html.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. __package__ = 'archivebox.index'
  2. from pathlib import Path
  3. from datetime import datetime, timezone
  4. from collections import defaultdict
  5. from typing import List, Optional, Iterator, Mapping
  6. from django.utils.html import format_html, mark_safe # type: ignore
  7. from django.core.cache import cache
  8. import abx
  9. from archivebox.misc.system import atomic_write
  10. from archivebox.misc.util import (
  11. enforce_types,
  12. ts_to_date_str,
  13. urlencode,
  14. htmlencode,
  15. urldecode,
  16. )
  17. from archivebox.config import CONSTANTS, DATA_DIR, VERSION
  18. from archivebox.config.common import SERVER_CONFIG
  19. from archivebox.config.version import get_COMMIT_HASH
  20. from archivebox.misc.logging_util import printable_filesize
  21. from .schema import Link
  22. MAIN_INDEX_TEMPLATE = 'static_index.html'
  23. MINIMAL_INDEX_TEMPLATE = 'minimal_index.html'
  24. LINK_DETAILS_TEMPLATE = 'snapshot.html'
  25. TITLE_LOADING_MSG = 'Not yet archived...'
  26. ### Main Links Index
  27. @enforce_types
  28. def parse_html_main_index(out_dir: Path=DATA_DIR) -> Iterator[str]:
  29. """parse an archive index html file and return the list of urls"""
  30. index_path = Path(out_dir) / CONSTANTS.HTML_INDEX_FILENAME
  31. if index_path.exists():
  32. with open(index_path, 'r', encoding='utf-8') as f:
  33. for line in f:
  34. if 'class="link-url"' in line:
  35. yield line.split('"')[1]
  36. return ()
  37. @enforce_types
  38. def generate_index_from_links(links: List[Link], with_headers: bool):
  39. if with_headers:
  40. output = main_index_template(links)
  41. else:
  42. output = main_index_template(links, template=MINIMAL_INDEX_TEMPLATE)
  43. return output
  44. @enforce_types
  45. def main_index_template(links: List[Link], template: str=MAIN_INDEX_TEMPLATE) -> str:
  46. """render the template for the entire main index"""
  47. return render_django_template(template, {
  48. 'version': VERSION,
  49. 'git_sha': get_COMMIT_HASH() or VERSION,
  50. 'num_links': str(len(links)),
  51. 'date_updated': datetime.now(timezone.utc).strftime('%Y-%m-%d'),
  52. 'time_updated': datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M'),
  53. 'links': [link._asdict(extended=True) for link in links],
  54. 'FOOTER_INFO': SERVER_CONFIG.FOOTER_INFO,
  55. })
  56. ### Link Details Index
  57. @enforce_types
  58. def write_html_link_details(link: Link, out_dir: Optional[str]=None) -> None:
  59. out_dir = out_dir or link.link_dir
  60. rendered_html = link_details_template(link)
  61. atomic_write(str(Path(out_dir) / CONSTANTS.HTML_INDEX_FILENAME), rendered_html)
  62. @enforce_types
  63. def link_details_template(link: Link) -> str:
  64. from abx_plugin_wget_extractor.wget import wget_output_path
  65. SAVE_ARCHIVE_DOT_ORG = abx.pm.hook.get_FLAT_CONFIG().SAVE_ARCHIVE_DOT_ORG
  66. link_info = link._asdict(extended=True)
  67. return render_django_template(LINK_DETAILS_TEMPLATE, {
  68. **link_info,
  69. **link_info['canonical'],
  70. 'title': htmlencode(
  71. link.title
  72. or (link.base_url if link.is_archived else TITLE_LOADING_MSG)
  73. ),
  74. 'url_str': htmlencode(urldecode(link.base_url)),
  75. 'archive_url': urlencode(
  76. wget_output_path(link)
  77. or (link.domain if link.is_archived else '')
  78. ) or 'about:blank',
  79. 'extension': link.extension or 'html',
  80. 'tags': link.tags or 'untagged',
  81. 'size': printable_filesize(link.archive_size) if link.archive_size else 'pending',
  82. 'status': 'archived' if link.is_archived else 'not yet archived',
  83. 'status_color': 'success' if link.is_archived else 'danger',
  84. 'oldest_archive_date': ts_to_date_str(link.oldest_archive_date),
  85. 'SAVE_ARCHIVE_DOT_ORG': SAVE_ARCHIVE_DOT_ORG,
  86. 'PREVIEW_ORIGINALS': SERVER_CONFIG.PREVIEW_ORIGINALS,
  87. })
  88. @enforce_types
  89. def render_django_template(template: str, context: Mapping[str, str]) -> str:
  90. """render a given html template string with the given template content"""
  91. from django.template.loader import render_to_string
  92. return render_to_string(template, context)
  93. def snapshot_icons(snapshot) -> str:
  94. cache_key = f'result_icons:{snapshot.pk}:{(snapshot.downloaded_at or snapshot.modified_at or snapshot.created_at or snapshot.bookmarked_at).timestamp()}'
  95. def calc_snapshot_icons():
  96. from core.models import ArchiveResult
  97. # start = datetime.now(timezone.utc)
  98. if hasattr(snapshot, '_prefetched_objects_cache') and 'archiveresult_set' in snapshot._prefetched_objects_cache:
  99. archive_results = [
  100. result
  101. for result in snapshot.archiveresult_set.all()
  102. if result.status == "succeeded" and result.output
  103. ]
  104. else:
  105. archive_results = snapshot.archiveresult_set.filter(status="succeeded", output__isnull=False)
  106. # import ipdb; ipdb.set_trace()
  107. link = snapshot.as_link()
  108. path = link.archive_path
  109. canon = link.canonical_outputs()
  110. output = ""
  111. output_template = '<a href="/{}/{}" class="exists-{}" title="{}">{}</a> &nbsp;'
  112. icons = {
  113. "singlefile": "❶",
  114. "wget": "🆆",
  115. "dom": "🅷",
  116. "pdf": "📄",
  117. "screenshot": "💻",
  118. "media": "📼",
  119. "git": "🅶",
  120. "archive_org": "🏛",
  121. "readability": "🆁",
  122. "mercury": "🅼",
  123. "warc": "📦"
  124. }
  125. exclude = ["favicon", "title", "headers", "htmltotext", "archive_org"]
  126. # Missing specific entry for WARC
  127. extractor_outputs = defaultdict(lambda: None)
  128. for extractor, _ in ArchiveResult.EXTRACTOR_CHOICES:
  129. for result in archive_results:
  130. if result.extractor == extractor and result:
  131. extractor_outputs[extractor] = result
  132. for extractor, _ in ArchiveResult.EXTRACTOR_CHOICES:
  133. if extractor not in exclude:
  134. existing = extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output
  135. # Check filesystsem to see if anything is actually present (too slow, needs optimization/caching)
  136. # if existing:
  137. # existing = (Path(path) / existing)
  138. # if existing.is_file():
  139. # existing = True
  140. # elif existing.is_dir():
  141. # existing = any(existing.glob('*.*'))
  142. output += format_html(output_template, path, canon[f"{extractor}_path"], str(bool(existing)),
  143. extractor, icons.get(extractor, "?"))
  144. if extractor == "wget":
  145. # warc isn't technically it's own extractor, so we have to add it after wget
  146. # get from db (faster but less thurthful)
  147. exists = extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output
  148. # get from filesystem (slower but more accurate)
  149. # exists = list((Path(path) / canon["warc_path"]).glob("*.warc.gz"))
  150. output += format_html(output_template, path, canon["warc_path"], str(bool(exists)), "warc", icons.get("warc", "?"))
  151. if extractor == "archive_org":
  152. # The check for archive_org is different, so it has to be handled separately
  153. # get from db (faster)
  154. exists = extractor in extractor_outputs and extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output
  155. # get from filesystem (slower)
  156. # target_path = Path(path) / "archive.org.txt"
  157. # exists = target_path.exists()
  158. output += '<a href="{}" class="exists-{}" title="{}">{}</a> '.format(canon["archive_org_path"], str(exists),
  159. "archive_org", icons.get("archive_org", "?"))
  160. result = format_html('<span class="files-icons" style="font-size: 1.1em; opacity: 0.8; min-width: 240px; display: inline-block">{}<span>', mark_safe(output))
  161. # end = datetime.now(timezone.utc)
  162. # print(((end - start).total_seconds()*1000) // 1, 'ms')
  163. return result
  164. cache_result = cache.get(cache_key)
  165. if cache_result:
  166. return cache_result
  167. fresh_result = calc_snapshot_icons()
  168. cache.set(cache_key, fresh_result, timeout=60 * 60 * 24)
  169. return fresh_result