test_extractors.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. from .fixtures import *
  2. import json as pyjson
  3. def test_singlefile_works(tmp_path, process, disable_extractors_dict):
  4. disable_extractors_dict.update({"USE_SINGLEFILE": "true"})
  5. add_process = subprocess.run(['archivebox', 'add', 'https://example.com'],
  6. capture_output=True, env=disable_extractors_dict)
  7. archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
  8. output_file = archived_item_path / "singlefile.html"
  9. assert output_file.exists()
  10. def test_readability_works(tmp_path, process, disable_extractors_dict):
  11. disable_extractors_dict.update({"USE_READABILITY": "true"})
  12. add_process = subprocess.run(['archivebox', 'add', 'https://example.com'],
  13. capture_output=True, env=disable_extractors_dict)
  14. archived_item_path = list(tmp_path.glob("archive/**/*"))[0]
  15. output_file = archived_item_path / "readability" / "content.html"
  16. assert output_file.exists()
  17. def test_htmltotext_works(tmp_path, process, disable_extractors_dict):
  18. disable_extractors_dict.update({"SAVE_HTMLTOTEXT": "true"})
  19. add_process = subprocess.run(['archivebox', 'add', 'https://example.com'],
  20. capture_output=True, env=disable_extractors_dict)
  21. archived_item_path = list(tmp_path.glob("archive/**/*"))[0]
  22. output_file = archived_item_path / "htmltotext.txt"
  23. assert output_file.exists()
  24. def test_use_node_false_disables_readability_and_singlefile(tmp_path, process, disable_extractors_dict):
  25. disable_extractors_dict.update({"USE_READABILITY": "true", "SAVE_DOM": "true", "USE_SINGLEFILE": "true", "USE_NODE": "false"})
  26. add_process = subprocess.run(['archivebox', 'add', 'https://example.com'],
  27. capture_output=True, env=disable_extractors_dict)
  28. output_str = add_process.stdout.decode("utf-8")
  29. assert "> singlefile" not in output_str
  30. assert "> readability" not in output_str
  31. def test_headers_retrieved(tmp_path, process, disable_extractors_dict):
  32. disable_extractors_dict.update({"SAVE_HEADERS": "true"})
  33. add_process = subprocess.run(['archivebox', 'add', 'https://example.com'],
  34. capture_output=True, env=disable_extractors_dict)
  35. archived_item_path = list(tmp_path.glob("archive/**/*"))[0]
  36. output_file = archived_item_path / "headers.json"
  37. assert output_file.exists()
  38. with open(output_file, 'r', encoding='utf-8') as f:
  39. headers = pyjson.load(f)
  40. assert 'Content-Type' in headers or 'content-type' in headers