json.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. __package__ = 'archivebox.index'
  2. import os
  3. import sys
  4. import json as pyjson
  5. from pathlib import Path
  6. from datetime import datetime, timezone
  7. from typing import List, Optional, Iterator, Any, Union
  8. from .schema import Link
  9. from ..system import atomic_write
  10. from ..util import enforce_types
  11. from ..config import (
  12. VERSION,
  13. OUTPUT_DIR,
  14. FOOTER_INFO,
  15. DEPENDENCIES,
  16. JSON_INDEX_FILENAME,
  17. ARCHIVE_DIR_NAME,
  18. ANSI
  19. )
  20. MAIN_INDEX_HEADER = {
  21. 'info': 'This is an index of site data archived by ArchiveBox: The self-hosted web archive.',
  22. 'schema': 'archivebox.index.json',
  23. 'copyright_info': FOOTER_INFO,
  24. 'meta': {
  25. 'project': 'ArchiveBox',
  26. 'version': VERSION,
  27. 'git_sha': VERSION, # not used anymore, but kept for backwards compatibility
  28. 'website': 'https://ArchiveBox.io',
  29. 'docs': 'https://github.com/ArchiveBox/ArchiveBox/wiki',
  30. 'source': 'https://github.com/ArchiveBox/ArchiveBox',
  31. 'issues': 'https://github.com/ArchiveBox/ArchiveBox/issues',
  32. 'dependencies': DEPENDENCIES,
  33. },
  34. }
  35. @enforce_types
  36. def generate_json_index_from_links(links: List[Link], with_headers: bool):
  37. if with_headers:
  38. output = {
  39. **MAIN_INDEX_HEADER,
  40. 'num_links': len(links),
  41. 'updated': datetime.now(timezone.utc),
  42. 'last_run_cmd': sys.argv,
  43. 'links': links,
  44. }
  45. else:
  46. output = links
  47. return to_json(output, indent=4, sort_keys=True)
  48. @enforce_types
  49. def parse_json_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]:
  50. """parse an archive index json file and return the list of links"""
  51. index_path = Path(out_dir) / JSON_INDEX_FILENAME
  52. if index_path.exists():
  53. with open(index_path, 'r', encoding='utf-8') as f:
  54. try:
  55. links = pyjson.load(f)['links']
  56. if links:
  57. Link.from_json(links[0])
  58. except Exception as err:
  59. print(" {lightyellow}! Found an index.json in the project root but couldn't load links from it: {} {}".format(
  60. err.__class__.__name__,
  61. err,
  62. **ANSI,
  63. ))
  64. return ()
  65. for link_json in links:
  66. try:
  67. yield Link.from_json(link_json)
  68. except KeyError:
  69. try:
  70. detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp']
  71. yield parse_json_link_details(str(detail_index_path))
  72. except KeyError:
  73. # as a last effort, try to guess the missing values out of existing ones
  74. try:
  75. yield Link.from_json(link_json, guess=True)
  76. except KeyError:
  77. print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI))
  78. continue
  79. return ()
  80. ### Link Details Index
  81. @enforce_types
  82. def write_json_link_details(link: Link, out_dir: Optional[str]=None) -> None:
  83. """write a json file with some info about the link"""
  84. out_dir = out_dir or link.link_dir
  85. path = Path(out_dir) / JSON_INDEX_FILENAME
  86. atomic_write(str(path), link._asdict(extended=True))
  87. @enforce_types
  88. def parse_json_link_details(out_dir: Union[Path, str], guess: Optional[bool]=False) -> Optional[Link]:
  89. """load the json link index from a given directory"""
  90. existing_index = Path(out_dir) / JSON_INDEX_FILENAME
  91. if existing_index.exists():
  92. with open(existing_index, 'r', encoding='utf-8') as f:
  93. try:
  94. link_json = pyjson.load(f)
  95. return Link.from_json(link_json, guess)
  96. except pyjson.JSONDecodeError:
  97. pass
  98. return None
  99. @enforce_types
  100. def parse_json_links_details(out_dir: Union[Path, str]) -> Iterator[Link]:
  101. """read through all the archive data folders and return the parsed links"""
  102. for entry in os.scandir(Path(out_dir) / ARCHIVE_DIR_NAME):
  103. if entry.is_dir(follow_symlinks=True):
  104. if (Path(entry.path) / 'index.json').exists():
  105. try:
  106. link = parse_json_link_details(entry.path)
  107. except KeyError:
  108. link = None
  109. if link:
  110. yield link
  111. ### Helpers
  112. class ExtendedEncoder(pyjson.JSONEncoder):
  113. """
  114. Extended json serializer that supports serializing several model
  115. fields and objects
  116. """
  117. def default(self, obj):
  118. cls_name = obj.__class__.__name__
  119. if hasattr(obj, '_asdict'):
  120. return obj._asdict()
  121. elif isinstance(obj, bytes):
  122. return obj.decode()
  123. elif isinstance(obj, datetime):
  124. return obj.isoformat()
  125. elif isinstance(obj, Exception):
  126. return '{}: {}'.format(obj.__class__.__name__, obj)
  127. elif cls_name in ('dict_items', 'dict_keys', 'dict_values'):
  128. return tuple(obj)
  129. return pyjson.JSONEncoder.default(self, obj)
  130. @enforce_types
  131. def to_json(obj: Any, indent: Optional[int]=4, sort_keys: bool=True, cls=ExtendedEncoder) -> str:
  132. return pyjson.dumps(obj, indent=indent, sort_keys=sort_keys, cls=ExtendedEncoder)