schema.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. """
  2. WARNING: THIS FILE IS ALL LEGACY CODE TO BE REMOVED.
  3. DO NOT ADD ANY NEW FEATURES TO THIS FILE, NEW CODE GOES HERE: core/models.py
  4. """
  5. __package__ = 'archivebox.index'
  6. from pathlib import Path
  7. from datetime import datetime, timezone, timedelta
  8. from typing import List, Dict, Any, Optional, Union
  9. from dataclasses import dataclass, asdict, field, fields
  10. from django.utils.functional import cached_property
  11. from ..system import get_dir_size
  12. from ..util import ts_to_date_str, parse_date
  13. from ..config import OUTPUT_DIR, ARCHIVE_DIR_NAME, FAVICON_PROVIDER
  14. class ArchiveError(Exception):
  15. def __init__(self, message, hints=None):
  16. super().__init__(message)
  17. self.hints = hints
  18. LinkDict = Dict[str, Any]
  19. ArchiveOutput = Union[str, Exception, None]
  20. @dataclass(frozen=True)
  21. class ArchiveResult:
  22. cmd: List[str]
  23. pwd: Optional[str]
  24. cmd_version: Optional[str]
  25. output: ArchiveOutput
  26. status: str
  27. start_ts: datetime
  28. end_ts: datetime
  29. index_texts: Union[List[str], None] = None
  30. schema: str = 'ArchiveResult'
  31. def __post_init__(self):
  32. self.typecheck()
  33. def _asdict(self):
  34. return asdict(self)
  35. def typecheck(self) -> None:
  36. assert self.schema == self.__class__.__name__
  37. assert isinstance(self.status, str) and self.status
  38. assert isinstance(self.start_ts, datetime)
  39. assert isinstance(self.end_ts, datetime)
  40. assert isinstance(self.cmd, list)
  41. assert all(isinstance(arg, str) and arg for arg in self.cmd)
  42. # TODO: replace emptystrings in these three with None / remove them from the DB
  43. assert self.pwd is None or isinstance(self.pwd, str)
  44. assert self.cmd_version is None or isinstance(self.cmd_version, str)
  45. assert self.output is None or isinstance(self.output, (str, Exception))
  46. @classmethod
  47. def guess_ts(_cls, dict_info):
  48. from ..util import parse_date
  49. parsed_timestamp = parse_date(dict_info["timestamp"])
  50. start_ts = parsed_timestamp
  51. end_ts = parsed_timestamp + timedelta(seconds=int(dict_info["duration"]))
  52. return start_ts, end_ts
  53. @classmethod
  54. def from_json(cls, json_info, guess=False):
  55. from ..util import parse_date
  56. info = {
  57. key: val
  58. for key, val in json_info.items()
  59. if key in cls.field_names()
  60. }
  61. if guess:
  62. keys = info.keys()
  63. if "start_ts" not in keys:
  64. info["start_ts"], info["end_ts"] = cls.guess_ts(json_info)
  65. else:
  66. info['start_ts'] = parse_date(info['start_ts'])
  67. info['end_ts'] = parse_date(info['end_ts'])
  68. if "pwd" not in keys:
  69. info["pwd"] = str(Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / json_info["timestamp"])
  70. if "cmd_version" not in keys:
  71. info["cmd_version"] = "Undefined"
  72. if "cmd" not in keys:
  73. info["cmd"] = []
  74. else:
  75. info['start_ts'] = parse_date(info['start_ts'])
  76. info['end_ts'] = parse_date(info['end_ts'])
  77. info['cmd_version'] = info.get('cmd_version')
  78. if type(info["cmd"]) is str:
  79. info["cmd"] = [info["cmd"]]
  80. return cls(**info)
  81. def to_dict(self, *keys) -> dict:
  82. if keys:
  83. return {k: v for k, v in asdict(self).items() if k in keys}
  84. return asdict(self)
  85. def to_json(self, indent=4, sort_keys=True) -> str:
  86. from .json import to_json
  87. return to_json(self, indent=indent, sort_keys=sort_keys)
  88. def to_csv(self, cols: Optional[List[str]]=None, separator: str=',', ljust: int=0) -> str:
  89. from .csv import to_csv
  90. return to_csv(self, csv_col=cols or self.field_names(), separator=separator, ljust=ljust)
  91. @classmethod
  92. def field_names(cls):
  93. return [f.name for f in fields(cls)]
  94. @property
  95. def duration(self) -> int:
  96. return (self.end_ts - self.start_ts).seconds
  97. @dataclass(frozen=True)
  98. class Link:
  99. timestamp: str
  100. url: str
  101. title: Optional[str]
  102. tags: Optional[str]
  103. sources: List[str]
  104. history: Dict[str, List[ArchiveResult]] = field(default_factory=lambda: {})
  105. updated: Optional[datetime] = None
  106. schema: str = 'Link'
  107. def __str__(self) -> str:
  108. return f'[{self.timestamp}] {self.url} "{self.title}"'
  109. def __post_init__(self):
  110. self.typecheck()
  111. def overwrite(self, **kwargs):
  112. """pure functional version of dict.update that returns a new instance"""
  113. return Link(**{**self._asdict(), **kwargs})
  114. def __eq__(self, other):
  115. if not isinstance(other, Link):
  116. return NotImplemented
  117. return self.url == other.url
  118. def __gt__(self, other):
  119. if not isinstance(other, Link):
  120. return NotImplemented
  121. if not self.timestamp or not other.timestamp:
  122. return
  123. return float(self.timestamp) > float(other.timestamp)
  124. def typecheck(self) -> None:
  125. from ..config import stderr, ANSI
  126. try:
  127. assert self.schema == self.__class__.__name__
  128. assert isinstance(self.timestamp, str) and self.timestamp
  129. assert self.timestamp.replace('.', '').isdigit()
  130. assert isinstance(self.url, str) and '://' in self.url
  131. assert self.updated is None or isinstance(self.updated, datetime)
  132. assert self.title is None or (isinstance(self.title, str) and self.title)
  133. assert self.tags is None or isinstance(self.tags, str)
  134. assert isinstance(self.sources, list)
  135. assert all(isinstance(source, str) and source for source in self.sources)
  136. assert isinstance(self.history, dict)
  137. for method, results in self.history.items():
  138. assert isinstance(method, str) and method
  139. assert isinstance(results, list)
  140. assert all(isinstance(result, ArchiveResult) for result in results)
  141. except Exception:
  142. stderr('{red}[X] Error while loading link! [{}] {} "{}"{reset}'.format(self.timestamp, self.url, self.title, **ANSI))
  143. raise
  144. def _asdict(self, extended=False):
  145. info = {
  146. 'schema': 'Link',
  147. 'url': self.url,
  148. 'title': self.title or None,
  149. 'timestamp': self.timestamp,
  150. 'updated': self.updated or None,
  151. 'tags': self.tags or None,
  152. 'sources': self.sources or [],
  153. 'history': self.history or {},
  154. }
  155. if extended:
  156. info.update({
  157. 'snapshot_id': self.snapshot_id,
  158. 'link_dir': self.link_dir,
  159. 'archive_path': self.archive_path,
  160. 'hash': self.url_hash,
  161. 'base_url': self.base_url,
  162. 'scheme': self.scheme,
  163. 'domain': self.domain,
  164. 'path': self.path,
  165. 'basename': self.basename,
  166. 'extension': self.extension,
  167. 'is_static': self.is_static,
  168. 'tags_str': (self.tags or '').strip(','), # only used to render static index in index/html.py, remove if no longer needed there
  169. 'icons': None, # only used to render static index in index/html.py, remove if no longer needed there
  170. 'bookmarked_date': self.bookmarked_date,
  171. 'updated_date': self.updated_date,
  172. 'oldest_archive_date': self.oldest_archive_date,
  173. 'newest_archive_date': self.newest_archive_date,
  174. 'is_archived': self.is_archived,
  175. 'num_outputs': self.num_outputs,
  176. 'num_failures': self.num_failures,
  177. 'latest': self.latest_outputs(),
  178. 'canonical': self.canonical_outputs(),
  179. })
  180. return info
  181. def as_snapshot(self):
  182. from core.models import Snapshot
  183. return Snapshot.objects.get(url=self.url)
  184. @classmethod
  185. def from_json(cls, json_info, guess=False):
  186. from ..util import parse_date
  187. info = {
  188. key: val
  189. for key, val in json_info.items()
  190. if key in cls.field_names()
  191. }
  192. info['updated'] = parse_date(info.get('updated'))
  193. info['sources'] = info.get('sources') or []
  194. json_history = info.get('history') or {}
  195. cast_history = {}
  196. for method, method_history in json_history.items():
  197. cast_history[method] = []
  198. for json_result in method_history:
  199. assert isinstance(json_result, dict), 'Items in Link["history"][method] must be dicts'
  200. cast_result = ArchiveResult.from_json(json_result, guess)
  201. cast_history[method].append(cast_result)
  202. info['history'] = cast_history
  203. return cls(**info)
  204. def to_json(self, indent=4, sort_keys=True) -> str:
  205. from .json import to_json
  206. return to_json(self, indent=indent, sort_keys=sort_keys)
  207. def to_csv(self, cols: Optional[List[str]]=None, separator: str=',', ljust: int=0) -> str:
  208. from .csv import to_csv
  209. return to_csv(self, cols=cols or self.field_names(), separator=separator, ljust=ljust)
  210. @cached_property
  211. def snapshot_id(self):
  212. from core.models import Snapshot
  213. return str(Snapshot.objects.only('id').get(url=self.url).id)
  214. @classmethod
  215. def field_names(cls):
  216. return [f.name for f in fields(cls)]
  217. @property
  218. def link_dir(self) -> str:
  219. from ..config import CONFIG
  220. return str(Path(CONFIG['ARCHIVE_DIR']) / self.timestamp)
  221. @property
  222. def archive_path(self) -> str:
  223. from ..config import ARCHIVE_DIR_NAME
  224. return '{}/{}'.format(ARCHIVE_DIR_NAME, self.timestamp)
  225. @property
  226. def archive_size(self) -> float:
  227. try:
  228. return get_dir_size(self.archive_path)[0]
  229. except Exception:
  230. return 0
  231. ### URL Helpers
  232. @property
  233. def url_hash(self):
  234. from ..util import hashurl
  235. return hashurl(self.url)
  236. @property
  237. def scheme(self) -> str:
  238. from ..util import scheme
  239. return scheme(self.url)
  240. @property
  241. def extension(self) -> str:
  242. from ..util import extension
  243. return extension(self.url)
  244. @property
  245. def domain(self) -> str:
  246. from ..util import domain
  247. return domain(self.url)
  248. @property
  249. def path(self) -> str:
  250. from ..util import path
  251. return path(self.url)
  252. @property
  253. def basename(self) -> str:
  254. from ..util import basename
  255. return basename(self.url)
  256. @property
  257. def base_url(self) -> str:
  258. from ..util import base_url
  259. return base_url(self.url)
  260. ### Pretty Printing Helpers
  261. @property
  262. def bookmarked_date(self) -> Optional[str]:
  263. max_ts = (datetime.now(timezone.utc) + timedelta(days=30)).timestamp()
  264. if self.timestamp and self.timestamp.replace('.', '').isdigit():
  265. if 0 < float(self.timestamp) < max_ts:
  266. return ts_to_date_str(datetime.fromtimestamp(float(self.timestamp)))
  267. else:
  268. return str(self.timestamp)
  269. return None
  270. @property
  271. def updated_date(self) -> Optional[str]:
  272. return ts_to_date_str(self.updated) if self.updated else None
  273. @property
  274. def archive_dates(self) -> List[datetime]:
  275. return [
  276. parse_date(result.start_ts)
  277. for method in self.history.keys()
  278. for result in self.history[method]
  279. ]
  280. @property
  281. def oldest_archive_date(self) -> Optional[datetime]:
  282. return min(self.archive_dates, default=None)
  283. @property
  284. def newest_archive_date(self) -> Optional[datetime]:
  285. return max(self.archive_dates, default=None)
  286. ### Archive Status Helpers
  287. @property
  288. def num_outputs(self) -> int:
  289. return self.as_snapshot().num_outputs
  290. @property
  291. def num_failures(self) -> int:
  292. return sum(1
  293. for method in self.history.keys()
  294. for result in self.history[method]
  295. if result.status == 'failed')
  296. @property
  297. def is_static(self) -> bool:
  298. from ..util import is_static_file
  299. return is_static_file(self.url)
  300. @property
  301. def is_archived(self) -> bool:
  302. from ..config import ARCHIVE_DIR
  303. from ..util import domain
  304. output_paths = (
  305. domain(self.url),
  306. 'output.pdf',
  307. 'screenshot.png',
  308. 'output.html',
  309. 'media',
  310. 'singlefile.html'
  311. )
  312. return any(
  313. (Path(ARCHIVE_DIR) / self.timestamp / path).exists()
  314. for path in output_paths
  315. )
  316. def latest_outputs(self, status: str=None) -> Dict[str, ArchiveOutput]:
  317. """get the latest output that each archive method produced for link"""
  318. ARCHIVE_METHODS = (
  319. 'title', 'favicon', 'wget', 'warc', 'singlefile', 'pdf',
  320. 'screenshot', 'dom', 'git', 'media', 'archive_org',
  321. )
  322. latest: Dict[str, ArchiveOutput] = {}
  323. for archive_method in ARCHIVE_METHODS:
  324. # get most recent succesful result in history for each archive method
  325. history = self.history.get(archive_method) or []
  326. history = list(filter(lambda result: result.output, reversed(history)))
  327. if status is not None:
  328. history = list(filter(lambda result: result.status == status, history))
  329. history = list(history)
  330. if history:
  331. latest[archive_method] = history[0].output
  332. else:
  333. latest[archive_method] = None
  334. return latest
  335. def canonical_outputs(self) -> Dict[str, Optional[str]]:
  336. """predict the expected output paths that should be present after archiving"""
  337. from ..extractors.wget import wget_output_path
  338. # TODO: banish this awful duplication from the codebase and import these
  339. # from their respective extractor files
  340. canonical = {
  341. 'index_path': 'index.html',
  342. 'favicon_path': 'favicon.ico',
  343. 'google_favicon_path': FAVICON_PROVIDER.format(self.domain),
  344. 'wget_path': wget_output_path(self),
  345. 'warc_path': 'warc/',
  346. 'singlefile_path': 'singlefile.html',
  347. 'readability_path': 'readability/content.html',
  348. 'mercury_path': 'mercury/content.html',
  349. 'htmltotext_path': 'htmltotext.txt',
  350. 'pdf_path': 'output.pdf',
  351. 'screenshot_path': 'screenshot.png',
  352. 'dom_path': 'output.html',
  353. 'archive_org_path': 'https://web.archive.org/web/{}'.format(self.base_url),
  354. 'git_path': 'git/',
  355. 'media_path': 'media/',
  356. 'headers_path': 'headers.json',
  357. }
  358. if self.is_static:
  359. # static binary files like PDF and images are handled slightly differently.
  360. # they're just downloaded once and aren't archived separately multiple times,
  361. # so the wget, screenshot, & pdf urls should all point to the same file
  362. static_path = wget_output_path(self)
  363. canonical.update({
  364. 'title': self.basename,
  365. 'wget_path': static_path,
  366. 'pdf_path': static_path,
  367. 'screenshot_path': static_path,
  368. 'dom_path': static_path,
  369. 'singlefile_path': static_path,
  370. 'readability_path': static_path,
  371. 'mercury_path': static_path,
  372. 'htmltotext_path': static_path,
  373. })
  374. return canonical