test_init.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. # archivebox init
  2. # archivebox add
  3. import os
  4. import subprocess
  5. from pathlib import Path
  6. import json, shutil
  7. import sqlite3
  8. from archivebox.config.common import STORAGE_CONFIG
  9. from .fixtures import *
  10. DIR_PERMISSIONS = STORAGE_CONFIG.OUTPUT_PERMISSIONS.replace('6', '7').replace('4', '5')
  11. def test_init(tmp_path, process):
  12. assert "Initializing a new ArchiveBox" in process.stdout.decode("utf-8")
  13. def test_update(tmp_path, process):
  14. os.chdir(tmp_path)
  15. update_process = subprocess.run(['archivebox', 'init'], capture_output=True)
  16. assert "updating existing ArchiveBox" in update_process.stdout.decode("utf-8")
  17. def test_add_link(tmp_path, process, disable_extractors_dict):
  18. disable_extractors_dict.update({"USE_WGET": "true"})
  19. os.chdir(tmp_path)
  20. add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
  21. capture_output=True, env=disable_extractors_dict)
  22. archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
  23. assert "index.json" in [x.name for x in archived_item_path.iterdir()]
  24. with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
  25. output_json = json.load(f)
  26. assert "Example Domain" == output_json['history']['title'][0]['output']
  27. with open(archived_item_path / "index.html", "r", encoding="utf-8") as f:
  28. output_html = f.read()
  29. assert "Example Domain" in output_html
  30. def test_add_link_support_stdin(tmp_path, process, disable_extractors_dict):
  31. disable_extractors_dict.update({"USE_WGET": "true"})
  32. os.chdir(tmp_path)
  33. stdin_process = subprocess.Popen(["archivebox", "add"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
  34. env=disable_extractors_dict)
  35. stdin_process.communicate(input="http://127.0.0.1:8080/static/example.com.html".encode())
  36. archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
  37. assert "index.json" in [x.name for x in archived_item_path.iterdir()]
  38. with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
  39. output_json = json.load(f)
  40. assert "Example Domain" == output_json['history']['title'][0]['output']
  41. def test_correct_permissions_output_folder(tmp_path, process):
  42. index_files = ['index.sqlite3', 'archive']
  43. for file in index_files:
  44. file_path = tmp_path / file
  45. assert oct(file_path.stat().st_mode)[-3:] in (STORAGE_CONFIG.OUTPUT_PERMISSIONS, DIR_PERMISSIONS)
  46. def test_correct_permissions_add_command_results(tmp_path, process, disable_extractors_dict):
  47. os.chdir(tmp_path)
  48. add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
  49. env=disable_extractors_dict)
  50. archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
  51. for path in archived_item_path.iterdir():
  52. assert oct(path.stat().st_mode)[-3:] in (STORAGE_CONFIG.OUTPUT_PERMISSIONS, DIR_PERMISSIONS)
  53. def test_collision_urls_different_timestamps(tmp_path, process, disable_extractors_dict):
  54. os.chdir(tmp_path)
  55. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
  56. env=disable_extractors_dict)
  57. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
  58. env=disable_extractors_dict)
  59. archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
  60. first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
  61. json_index = str(first_archive / "index.json")
  62. with open(json_index, "r", encoding="utf-8") as f:
  63. link_details = json.loads(f.read())
  64. link_details["url"] = "http://127.0.0.1:8080/static/iana.org.html"
  65. with open(json_index, "w", encoding="utf-8") as f:
  66. json.dump(link_details, f)
  67. init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
  68. # 1 from duplicated url, 1 from corrupted index
  69. assert "Skipped adding 2 invalid link data directories" in init_process.stdout.decode("utf-8")
  70. assert init_process.returncode == 0
  71. def test_collision_timestamps_different_urls(tmp_path, process, disable_extractors_dict):
  72. os.chdir(tmp_path)
  73. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
  74. env=disable_extractors_dict)
  75. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
  76. env=disable_extractors_dict)
  77. archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
  78. first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
  79. archive_folders.remove(first_archive.name)
  80. json_index = str(first_archive / "index.json")
  81. with open(json_index, "r", encoding="utf-8") as f:
  82. link_details = json.loads(f.read())
  83. link_details["timestamp"] = archive_folders[0]
  84. with open(json_index, "w", encoding="utf-8") as f:
  85. json.dump(link_details, f)
  86. init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
  87. assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
  88. assert init_process.returncode == 0
  89. def test_orphaned_folders(tmp_path, process, disable_extractors_dict):
  90. os.chdir(tmp_path)
  91. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
  92. env=disable_extractors_dict)
  93. list_process = subprocess.run(["archivebox", "list", "--json", "--with-headers"], capture_output=True)
  94. with open(tmp_path / "index.json", "wb") as f:
  95. f.write(list_process.stdout)
  96. conn = sqlite3.connect("index.sqlite3")
  97. c = conn.cursor()
  98. c.execute("DELETE from core_snapshot")
  99. conn.commit()
  100. conn.close()
  101. init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
  102. assert "Added 1 orphaned links from existing JSON index" in init_process.stdout.decode("utf-8")
  103. assert init_process.returncode == 0
  104. def test_unrecognized_folders(tmp_path, process, disable_extractors_dict):
  105. os.chdir(tmp_path)
  106. subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
  107. env=disable_extractors_dict)
  108. (tmp_path / "archive" / "some_random_folder").mkdir()
  109. init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
  110. assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
  111. assert init_process.returncode == 0
  112. def test_tags_migration(tmp_path, disable_extractors_dict):
  113. base_sqlite_path = Path(__file__).parent / 'tags_migration'
  114. if os.path.exists(tmp_path):
  115. shutil.rmtree(tmp_path)
  116. shutil.copytree(str(base_sqlite_path), tmp_path)
  117. os.chdir(tmp_path)
  118. conn = sqlite3.connect("index.sqlite3")
  119. conn.row_factory = sqlite3.Row
  120. c = conn.cursor()
  121. c.execute("SELECT id, tags from core_snapshot")
  122. snapshots = c.fetchall()
  123. snapshots_dict = { sn['id']: sn['tags'] for sn in snapshots}
  124. conn.commit()
  125. conn.close()
  126. init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
  127. conn = sqlite3.connect("index.sqlite3")
  128. conn.row_factory = sqlite3.Row
  129. c = conn.cursor()
  130. c.execute("""
  131. SELECT core_snapshot.id, core_tag.name from core_snapshot
  132. JOIN core_snapshot_tags on core_snapshot_tags.snapshot_id=core_snapshot.id
  133. JOIN core_tag on core_tag.id=core_snapshot_tags.tag_id
  134. """)
  135. tags = c.fetchall()
  136. conn.commit()
  137. conn.close()
  138. for tag in tags:
  139. snapshot_id = tag["id"]
  140. tag_name = tag["name"]
  141. # Check each tag migrated is in the previous field
  142. assert tag_name in snapshots_dict[snapshot_id]