models.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. __package__ = 'archivebox.core'
  2. from typing import Optional, Dict, Iterable
  3. from django_stubs_ext.db.models import TypedModelMeta
  4. import json
  5. from pathlib import Path
  6. from django.db import models
  7. from django.utils.functional import cached_property
  8. from django.utils.text import slugify
  9. from django.core.cache import cache
  10. from django.urls import reverse, reverse_lazy
  11. from django.db.models import Case, When, Value, IntegerField
  12. from django.contrib import admin
  13. from django.conf import settings
  14. from abid_utils.models import ABIDModel, ABIDField, AutoDateTimeField
  15. from queues.tasks import bg_archive_snapshot
  16. from ..system import get_dir_size
  17. from ..util import parse_date, base_url
  18. from ..index.schema import Link
  19. from ..index.html import snapshot_icons
  20. from ..extractors import ARCHIVE_METHODS_INDEXING_PRECEDENCE, EXTRACTORS
  21. # class BaseModel(models.Model):
  22. # # TODO: migrate all models to a shared base class with all our standard fields and helpers:
  23. # # ulid/created_at/modified_at/created_by/is_deleted/as_json/from_json/etc.
  24. # #
  25. # # id = models.AutoField(primary_key=True, serialize=False, verbose_name='ID')
  26. # # ulid = models.CharField(max_length=26, null=True, blank=True, db_index=True, unique=True)
  27. # class Meta(TypedModelMeta):
  28. # abstract = True
  29. class Tag(ABIDModel):
  30. """
  31. Based on django-taggit model + ABID base.
  32. """
  33. abid_prefix = 'tag_'
  34. abid_ts_src = 'self.created_at'
  35. abid_uri_src = 'self.slug'
  36. abid_subtype_src = '"03"'
  37. abid_rand_src = 'self.id'
  38. abid_drift_allowed = True
  39. id = models.UUIDField(primary_key=True, default=None, null=False, editable=False, unique=True, verbose_name='ID')
  40. abid = ABIDField(prefix=abid_prefix)
  41. created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=False, related_name='tag_set')
  42. created_at = AutoDateTimeField(default=None, null=False, db_index=True)
  43. modified_at = models.DateTimeField(auto_now=True)
  44. name = models.CharField(unique=True, blank=False, max_length=100)
  45. slug = models.SlugField(unique=True, blank=False, max_length=100, editable=False)
  46. # slug is autoset on save from name, never set it manually
  47. snapshot_set: models.Manager['Snapshot']
  48. class Meta(TypedModelMeta):
  49. verbose_name = "Tag"
  50. verbose_name_plural = "Tags"
  51. def __str__(self):
  52. return self.name
  53. def slugify(self, tag, i=None):
  54. slug = slugify(tag)
  55. if i is not None:
  56. slug += "_%d" % i
  57. return slug
  58. def save(self, *args, **kwargs):
  59. if self._state.adding and not self.slug:
  60. self.slug = self.slugify(self.name)
  61. # if name is different but slug conficts with another tags slug, append a counter
  62. # with transaction.atomic():
  63. slugs = set(
  64. type(self)
  65. ._default_manager.filter(slug__startswith=self.slug)
  66. .values_list("slug", flat=True)
  67. )
  68. i = None
  69. while True:
  70. slug = self.slugify(self.name, i)
  71. if slug not in slugs:
  72. self.slug = slug
  73. return super().save(*args, **kwargs)
  74. i = 1 if i is None else i+1
  75. else:
  76. return super().save(*args, **kwargs)
  77. @property
  78. def api_url(self) -> str:
  79. # /api/v1/core/snapshot/{uulid}
  80. return reverse_lazy('api-1:get_tag', args=[self.abid]) # + f'?api_key={get_or_create_api_token(request.user)}'
  81. @property
  82. def api_docs_url(self) -> str:
  83. return '/api/v1/docs#/Core%20Models/api_v1_core_get_tag'
  84. class SnapshotTag(models.Model):
  85. id = models.AutoField(primary_key=True)
  86. snapshot = models.ForeignKey('Snapshot', db_column='snapshot_id', on_delete=models.CASCADE, to_field='id')
  87. tag = models.ForeignKey(Tag, db_column='tag_id', on_delete=models.CASCADE, to_field='id')
  88. class Meta:
  89. db_table = 'core_snapshot_tags'
  90. unique_together = [('snapshot', 'tag')]
  91. class SnapshotManager(models.Manager):
  92. def get_queryset(self):
  93. return super().get_queryset().prefetch_related('tags', 'archiveresult_set') # .annotate(archiveresult_count=models.Count('archiveresult')).distinct()
  94. class Snapshot(ABIDModel):
  95. abid_prefix = 'snp_'
  96. abid_ts_src = 'self.created_at'
  97. abid_uri_src = 'self.url'
  98. abid_subtype_src = '"01"'
  99. abid_rand_src = 'self.id'
  100. abid_drift_allowed = True
  101. id = models.UUIDField(primary_key=True, default=None, null=False, editable=False, unique=True, verbose_name='ID')
  102. abid = ABIDField(prefix=abid_prefix)
  103. created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=False, related_name='snapshot_set')
  104. created_at = AutoDateTimeField(default=None, null=False, db_index=True) # loaded from self._init_timestamp
  105. modified_at = models.DateTimeField(auto_now=True)
  106. # legacy ts fields
  107. bookmarked_at = AutoDateTimeField(default=None, null=False, editable=True, db_index=True)
  108. downloaded_at = models.DateTimeField(default=None, null=True, editable=False, db_index=True, blank=True)
  109. url = models.URLField(unique=True, db_index=True)
  110. timestamp = models.CharField(max_length=32, unique=True, db_index=True, editable=False)
  111. tags = models.ManyToManyField(Tag, blank=True, through=SnapshotTag, related_name='snapshot_set', through_fields=('snapshot', 'tag'))
  112. title = models.CharField(max_length=512, null=True, blank=True, db_index=True)
  113. keys = ('url', 'timestamp', 'title', 'tags', 'downloaded_at')
  114. archiveresult_set: models.Manager['ArchiveResult']
  115. objects = SnapshotManager()
  116. def save(self, *args, **kwargs):
  117. if not self.bookmarked_at:
  118. self.bookmarked_at = self.created_at or self._init_timestamp
  119. super().save(*args, **kwargs)
  120. def archive(self, overwrite=False, methods=None):
  121. result = bg_archive_snapshot(self, overwrite=overwrite, methods=methods)
  122. return result
  123. def __repr__(self) -> str:
  124. title = (self.title_stripped or '-')[:64]
  125. return f'[{self.timestamp}] {self.url[:64]} ({title})'
  126. def __str__(self) -> str:
  127. title = (self.title_stripped or '-')[:64]
  128. return f'[{self.timestamp}] {self.url[:64]} ({title})'
  129. @classmethod
  130. def from_json(cls, info: dict):
  131. info = {k: v for k, v in info.items() if k in cls.keys}
  132. return cls(**info)
  133. def as_json(self, *args) -> dict:
  134. args = args or self.keys
  135. return {
  136. key: getattr(self, key) if key != 'tags' else self.tags_str(nocache=False)
  137. for key in args
  138. }
  139. def as_link(self) -> Link:
  140. return Link.from_json(self.as_json())
  141. def as_link_with_details(self) -> Link:
  142. from ..index import load_link_details
  143. return load_link_details(self.as_link())
  144. @admin.display(description='Tags')
  145. def tags_str(self, nocache=True) -> str | None:
  146. calc_tags_str = lambda: ','.join(sorted(tag.name for tag in self.tags.all()))
  147. cache_key = f'{self.pk}-{(self.downloaded_at or self.bookmarked_at).timestamp()}-tags'
  148. if hasattr(self, '_prefetched_objects_cache') and 'tags' in self._prefetched_objects_cache:
  149. # tags are pre-fetched already, use them directly (best because db is always freshest)
  150. tags_str = calc_tags_str()
  151. return tags_str
  152. if nocache:
  153. tags_str = calc_tags_str()
  154. cache.set(cache_key, tags_str)
  155. return tags_str
  156. return cache.get_or_set(cache_key, calc_tags_str)
  157. def icons(self) -> str:
  158. return snapshot_icons(self)
  159. @property
  160. def api_url(self) -> str:
  161. # /api/v1/core/snapshot/{uulid}
  162. return reverse_lazy('api-1:get_snapshot', args=[self.abid]) # + f'?api_key={get_or_create_api_token(request.user)}'
  163. @property
  164. def api_docs_url(self) -> str:
  165. return '/api/v1/docs#/Core%20Models/api_v1_core_get_snapshot'
  166. def get_absolute_url(self):
  167. return f'/{self.archive_path}'
  168. @cached_property
  169. def title_stripped(self) -> str:
  170. return (self.title or '').replace("\n", " ").replace("\r", "")
  171. @cached_property
  172. def extension(self) -> str:
  173. from ..util import extension
  174. return extension(self.url)
  175. @cached_property
  176. def bookmarked(self):
  177. return parse_date(self.timestamp)
  178. @cached_property
  179. def bookmarked_date(self):
  180. # TODO: remove this
  181. return self.bookmarked
  182. @cached_property
  183. def is_archived(self):
  184. return self.as_link().is_archived
  185. @cached_property
  186. def num_outputs(self) -> int:
  187. # DONT DO THIS: it will trigger a separate query for every snapshot
  188. # return self.archiveresult_set.filter(status='succeeded').count()
  189. # this is better:
  190. return sum((1 for result in self.archiveresult_set.all() if result.status == 'succeeded'))
  191. @cached_property
  192. def base_url(self):
  193. return base_url(self.url)
  194. @cached_property
  195. def link_dir(self):
  196. return str(settings.CONFIG.ARCHIVE_DIR / self.timestamp)
  197. @cached_property
  198. def archive_path(self):
  199. return '{}/{}'.format(settings.CONFIG.ARCHIVE_DIR_NAME, self.timestamp)
  200. @cached_property
  201. def archive_size(self):
  202. cache_key = f'{str(self.pk)[:12]}-{(self.downloaded_at or self.bookmarked_at).timestamp()}-size'
  203. def calc_dir_size():
  204. try:
  205. return get_dir_size(self.link_dir)[0]
  206. except Exception:
  207. return 0
  208. return cache.get_or_set(cache_key, calc_dir_size)
  209. @cached_property
  210. def thumbnail_url(self) -> Optional[str]:
  211. if hasattr(self, '_prefetched_objects_cache') and 'archiveresult_set' in self._prefetched_objects_cache:
  212. result = (sorted(
  213. (
  214. result
  215. for result in self.archiveresult_set.all()
  216. if result.extractor == 'screenshot' and result.status =='succeeded' and result.output
  217. ),
  218. key=lambda result: result.created_at,
  219. ) or [None])[-1]
  220. else:
  221. result = self.archiveresult_set.filter(
  222. extractor='screenshot',
  223. status='succeeded'
  224. ).only('output').last()
  225. if result:
  226. return reverse('Snapshot', args=[f'{str(self.timestamp)}/{result.output}'])
  227. return None
  228. @cached_property
  229. def headers(self) -> Optional[Dict[str, str]]:
  230. try:
  231. return json.loads((Path(self.link_dir) / 'headers.json').read_text(encoding='utf-8').strip())
  232. except Exception:
  233. pass
  234. return None
  235. @cached_property
  236. def status_code(self) -> Optional[str]:
  237. return self.headers.get('Status-Code') if self.headers else None
  238. @cached_property
  239. def history(self) -> dict:
  240. # TODO: use ArchiveResult for this instead of json
  241. return self.as_link_with_details().history
  242. @cached_property
  243. def latest_title(self) -> Optional[str]:
  244. if self.title:
  245. return self.title # whoopdedoo that was easy
  246. # check if ArchiveResult set has already been prefetched, if so use it instead of fetching it from db again
  247. if hasattr(self, '_prefetched_objects_cache') and 'archiveresult_set' in self._prefetched_objects_cache:
  248. try:
  249. return (sorted(
  250. (
  251. result.output.strip()
  252. for result in self.archiveresult_set.all()
  253. if result.extractor == 'title' and result.status =='succeeded' and result.output
  254. ),
  255. key=lambda title: len(title),
  256. ) or [None])[-1]
  257. except IndexError:
  258. pass
  259. try:
  260. # take longest successful title from ArchiveResult db history
  261. return sorted(
  262. self.archiveresult_set\
  263. .filter(extractor='title', status='succeeded', output__isnull=False)\
  264. .values_list('output', flat=True),
  265. key=lambda r: len(r),
  266. )[-1]
  267. except IndexError:
  268. pass
  269. try:
  270. # take longest successful title from Link json index file history
  271. return sorted(
  272. (
  273. result.output.strip()
  274. for result in self.history['title']
  275. if result.status == 'succeeded' and result.output.strip()
  276. ),
  277. key=lambda r: len(r),
  278. )[-1]
  279. except (KeyError, IndexError):
  280. pass
  281. return None
  282. def save_tags(self, tags: Iterable[str]=()) -> None:
  283. tags_id = []
  284. for tag in tags:
  285. if tag.strip():
  286. tags_id.append(Tag.objects.get_or_create(name=tag)[0].pk)
  287. self.tags.clear()
  288. self.tags.add(*tags_id)
  289. # def get_storage_dir(self, create=True, symlink=True) -> Path:
  290. # date_str = self.bookmarked_at.strftime('%Y%m%d')
  291. # domain_str = domain(self.url)
  292. # abs_storage_dir = Path(settings.CONFIG.ARCHIVE_DIR) / 'snapshots' / date_str / domain_str / str(self.ulid)
  293. # if create and not abs_storage_dir.is_dir():
  294. # abs_storage_dir.mkdir(parents=True, exist_ok=True)
  295. # if symlink:
  296. # LINK_PATHS = [
  297. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'all_by_id' / str(self.ulid),
  298. # # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'snapshots_by_id' / str(self.ulid),
  299. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'snapshots_by_date' / date_str / domain_str / str(self.ulid),
  300. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'snapshots_by_domain' / domain_str / date_str / str(self.ulid),
  301. # ]
  302. # for link_path in LINK_PATHS:
  303. # link_path.parent.mkdir(parents=True, exist_ok=True)
  304. # try:
  305. # link_path.symlink_to(abs_storage_dir)
  306. # except FileExistsError:
  307. # link_path.unlink()
  308. # link_path.symlink_to(abs_storage_dir)
  309. # return abs_storage_dir
  310. class ArchiveResultManager(models.Manager):
  311. def indexable(self, sorted: bool = True):
  312. """Return only ArchiveResults containing text suitable for full-text search (sorted in order of typical result quality)"""
  313. INDEXABLE_METHODS = [ r[0] for r in ARCHIVE_METHODS_INDEXING_PRECEDENCE ]
  314. qs = self.get_queryset().filter(extractor__in=INDEXABLE_METHODS, status='succeeded')
  315. if sorted:
  316. precedence = [
  317. When(extractor=method, then=Value(precedence))
  318. for method, precedence in ARCHIVE_METHODS_INDEXING_PRECEDENCE
  319. ]
  320. qs = qs.annotate(
  321. indexing_precedence=Case(
  322. *precedence,
  323. default=Value(1000),
  324. output_field=IntegerField()
  325. )
  326. ).order_by('indexing_precedence')
  327. return qs
  328. class ArchiveResult(ABIDModel):
  329. abid_prefix = 'res_'
  330. abid_ts_src = 'self.snapshot.created_at'
  331. abid_uri_src = 'self.snapshot.url'
  332. abid_subtype_src = 'self.extractor'
  333. abid_rand_src = 'self.id'
  334. abid_drift_allowed = True
  335. EXTRACTOR_CHOICES = (
  336. ('htmltotext', 'htmltotext'),
  337. ('git', 'git'),
  338. ('singlefile', 'singlefile'),
  339. ('media', 'media'),
  340. ('archive_org', 'archive_org'),
  341. ('readability', 'readability'),
  342. ('mercury', 'mercury'),
  343. ('favicon', 'favicon'),
  344. ('pdf', 'pdf'),
  345. ('headers', 'headers'),
  346. ('screenshot', 'screenshot'),
  347. ('dom', 'dom'),
  348. ('title', 'title'),
  349. ('wget', 'wget'),
  350. )
  351. STATUS_CHOICES = [
  352. ("succeeded", "succeeded"),
  353. ("failed", "failed"),
  354. ("skipped", "skipped")
  355. ]
  356. id = models.UUIDField(primary_key=True, default=None, null=False, editable=False, unique=True, verbose_name='ID')
  357. abid = ABIDField(prefix=abid_prefix)
  358. created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=False, related_name='archiveresult_set')
  359. created_at = AutoDateTimeField(default=None, null=False, db_index=True)
  360. modified_at = models.DateTimeField(auto_now=True)
  361. snapshot = models.ForeignKey(Snapshot, on_delete=models.CASCADE, to_field='id', db_column='snapshot_id')
  362. extractor = models.CharField(choices=EXTRACTOR_CHOICES, max_length=32)
  363. cmd = models.JSONField()
  364. pwd = models.CharField(max_length=256)
  365. cmd_version = models.CharField(max_length=128, default=None, null=True, blank=True)
  366. output = models.CharField(max_length=1024)
  367. start_ts = models.DateTimeField(db_index=True)
  368. end_ts = models.DateTimeField()
  369. status = models.CharField(max_length=16, choices=STATUS_CHOICES)
  370. objects = ArchiveResultManager()
  371. class Meta(TypedModelMeta):
  372. verbose_name = 'Archive Result'
  373. verbose_name_plural = 'Archive Results Log'
  374. def __str__(self):
  375. # return f'[{self.abid}] 📅 {self.start_ts.strftime("%Y-%m-%d %H:%M")} 📄 {self.extractor} {self.snapshot.url}'
  376. return self.extractor
  377. @cached_property
  378. def snapshot_dir(self):
  379. return Path(self.snapshot.link_dir)
  380. @property
  381. def api_url(self) -> str:
  382. # /api/v1/core/archiveresult/{uulid}
  383. return reverse_lazy('api-1:get_archiveresult', args=[self.abid]) # + f'?api_key={get_or_create_api_token(request.user)}'
  384. @property
  385. def api_docs_url(self) -> str:
  386. return '/api/v1/docs#/Core%20Models/api_v1_core_get_archiveresult'
  387. def get_absolute_url(self):
  388. return f'/{self.snapshot.archive_path}/{self.output_path()}'
  389. @property
  390. def extractor_module(self):
  391. return EXTRACTORS[self.extractor]
  392. def output_path(self) -> str:
  393. """return the canonical output filename or directory name within the snapshot dir"""
  394. return self.extractor_module.get_output_path()
  395. def embed_path(self) -> str:
  396. """
  397. return the actual runtime-calculated path to the file on-disk that
  398. should be used for user-facing iframe embeds of this result
  399. """
  400. if get_embed_path_func := getattr(self.extractor_module, 'get_embed_path', None):
  401. return get_embed_path_func(self)
  402. return self.extractor_module.get_output_path()
  403. def legacy_output_path(self):
  404. link = self.snapshot.as_link()
  405. return link.canonical_outputs().get(f'{self.extractor}_path')
  406. def output_exists(self) -> bool:
  407. return Path(self.output_path()).exists()
  408. # def get_storage_dir(self, create=True, symlink=True):
  409. # date_str = self.snapshot.bookmarked_at.strftime('%Y%m%d')
  410. # domain_str = domain(self.snapshot.url)
  411. # abs_storage_dir = Path(settings.CONFIG.ARCHIVE_DIR) / 'results' / date_str / domain_str / self.extractor / str(self.ulid)
  412. # if create and not abs_storage_dir.is_dir():
  413. # abs_storage_dir.mkdir(parents=True, exist_ok=True)
  414. # if symlink:
  415. # LINK_PATHS = [
  416. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'all_by_id' / str(self.ulid),
  417. # # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'results_by_id' / str(self.ulid),
  418. # # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'results_by_date' / date_str / domain_str / self.extractor / str(self.ulid),
  419. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'results_by_domain' / domain_str / date_str / self.extractor / str(self.ulid),
  420. # Path(settings.CONFIG.ARCHIVE_DIR).parent / 'index' / 'results_by_type' / self.extractor / date_str / domain_str / str(self.ulid),
  421. # ]
  422. # for link_path in LINK_PATHS:
  423. # link_path.parent.mkdir(parents=True, exist_ok=True)
  424. # try:
  425. # link_path.symlink_to(abs_storage_dir)
  426. # except FileExistsError:
  427. # link_path.unlink()
  428. # link_path.symlink_to(abs_storage_dir)
  429. # return abs_storage_dir
  430. # def symlink_index(self, create=True):
  431. # abs_result_dir = self.get_storage_dir(create=create)