archive_methods.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. import os
  2. from functools import wraps
  3. from collections import defaultdict
  4. from datetime import datetime
  5. from stdlib_patches import run, PIPE, DEVNULL
  6. from index import (
  7. write_link_index,
  8. patch_links_index,
  9. load_json_link_index,
  10. )
  11. from config import (
  12. CURL_BINARY,
  13. GIT_BINARY,
  14. WGET_BINARY,
  15. YOUTUBEDL_BINARY,
  16. FETCH_FAVICON,
  17. FETCH_TITLE,
  18. FETCH_WGET,
  19. FETCH_WGET_REQUISITES,
  20. FETCH_PDF,
  21. FETCH_SCREENSHOT,
  22. FETCH_DOM,
  23. FETCH_WARC,
  24. FETCH_GIT,
  25. FETCH_MEDIA,
  26. SUBMIT_ARCHIVE_DOT_ORG,
  27. TIMEOUT,
  28. MEDIA_TIMEOUT,
  29. ANSI,
  30. OUTPUT_DIR,
  31. GIT_DOMAINS,
  32. GIT_SHA,
  33. WGET_USER_AGENT,
  34. CHECK_SSL_VALIDITY,
  35. COOKIES_FILE,
  36. )
  37. from util import (
  38. domain,
  39. extension,
  40. without_query,
  41. without_fragment,
  42. fetch_page_title,
  43. is_static_file,
  44. progress,
  45. chmod_file,
  46. check_link_structure,
  47. wget_output_path,
  48. chrome_args,
  49. )
  50. from logs import (
  51. _LAST_RUN_STATS,
  52. log_link_archiving_started,
  53. log_link_archiving_failed,
  54. )
  55. class ArchiveError(Exception):
  56. def __init__(self, message, hints=None):
  57. super().__init__(message)
  58. self.hints = hints
  59. def archive_link(link_dir, link, overwrite=True):
  60. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  61. ARCHIVE_METHODS = (
  62. (FETCH_TITLE, fetch_title),
  63. (FETCH_FAVICON, fetch_favicon),
  64. (FETCH_WGET, fetch_wget),
  65. (FETCH_PDF, fetch_pdf),
  66. (FETCH_SCREENSHOT, fetch_screenshot),
  67. (FETCH_DOM, fetch_dom),
  68. (FETCH_GIT, fetch_git),
  69. (FETCH_MEDIA, fetch_media),
  70. (SUBMIT_ARCHIVE_DOT_ORG, archive_dot_org),
  71. )
  72. active_methods = [method for toggle, method in ARCHIVE_METHODS if toggle]
  73. try:
  74. is_new = not os.path.exists(link_dir)
  75. if is_new:
  76. os.makedirs(link_dir)
  77. link = load_json_link_index(link_dir, link)
  78. log_link_archiving_started(link_dir, link, is_new)
  79. for archive_method in active_methods:
  80. archive_method(link_dir, link, overwrite=overwrite)
  81. write_link_index(link_dir, link)
  82. patch_links_index(link)
  83. except Exception as err:
  84. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  85. return link
  86. def attach_result_to_link(method):
  87. """
  88. Instead of returning a result={output:'...', status:'success'} object,
  89. attach that result to the links's history & latest fields, then return
  90. the updated link object.
  91. """
  92. def decorator(fetch_func):
  93. @wraps(fetch_func)
  94. def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
  95. # initialize methods and history json field on link
  96. link['latest'] = link.get('latest') or {}
  97. link['latest'][method] = link['latest'].get(method) or None
  98. link['history'] = link.get('history') or {}
  99. link['history'][method] = link['history'].get(method) or []
  100. start_ts = datetime.now().timestamp()
  101. # if a valid method output is already present, dont run the fetch function
  102. if link['latest'][method] and not overwrite:
  103. print(' √ {}'.format(method))
  104. result = None
  105. else:
  106. print(' > {}'.format(method))
  107. result = fetch_func(link_dir, link, **kwargs)
  108. end_ts = datetime.now().timestamp()
  109. duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
  110. # append a history item recording fail/success
  111. history_entry = {
  112. 'timestamp': str(start_ts).split('.')[0],
  113. }
  114. if result is None:
  115. history_entry['status'] = 'skipped'
  116. elif isinstance(result.get('output'), Exception):
  117. history_entry['status'] = 'failed'
  118. history_entry['duration'] = duration
  119. history_entry.update(result or {})
  120. link['history'][method].append(history_entry)
  121. else:
  122. history_entry['status'] = 'succeded'
  123. history_entry['duration'] = duration
  124. history_entry.update(result or {})
  125. link['history'][method].append(history_entry)
  126. link['latest'][method] = result['output']
  127. _LAST_RUN_STATS[history_entry['status']] += 1
  128. return link
  129. return timed_fetch_func
  130. return decorator
  131. @attach_result_to_link('title')
  132. def fetch_title(link_dir, link, timeout=TIMEOUT):
  133. """try to guess the page's title from its content"""
  134. # if link already has valid title, skip it
  135. if link['title'] and not link['title'].lower().startswith('http'):
  136. return {'output': link['title'], 'status': 'skipped'}
  137. if is_static_file(link['url']):
  138. return {'output': None, 'status': 'skipped'}
  139. end = progress(timeout, prefix=' ')
  140. try:
  141. title = fetch_page_title(link['url'], timeout=timeout, progress=False)
  142. end()
  143. output = title
  144. except Exception as e:
  145. end()
  146. output = e
  147. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  148. if title and title.strip():
  149. link['title'] = title
  150. output = title
  151. return {
  152. 'cmd': 'fetch_page_title("{}")'.format(link['url']),
  153. 'output': output,
  154. }
  155. @attach_result_to_link('favicon')
  156. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  157. """download site favicon from google's favicon api"""
  158. output = 'favicon.ico'
  159. if os.path.exists(os.path.join(link_dir, output)):
  160. return {'output': output, 'status': 'skipped'}
  161. CMD = [
  162. CURL_BINARY,
  163. '--max-time', str(timeout),
  164. '--location',
  165. '--output', output,
  166. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  167. 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),
  168. ]
  169. end = progress(timeout, prefix=' ')
  170. try:
  171. run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  172. end()
  173. chmod_file(output, cwd=link_dir)
  174. except Exception as e:
  175. end()
  176. output = e
  177. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  178. return {
  179. 'cmd': CMD,
  180. 'output': output,
  181. }
  182. @attach_result_to_link('wget')
  183. def fetch_wget(link_dir, link, timeout=TIMEOUT):
  184. """download full site using wget"""
  185. domain_dir = os.path.join(link_dir, domain(link['url']))
  186. existing_file = wget_output_path(link)
  187. if os.path.exists(domain_dir) and existing_file:
  188. return {'output': existing_file, 'status': 'skipped'}
  189. if FETCH_WARC:
  190. warc_dir = os.path.join(link_dir, 'warc')
  191. os.makedirs(warc_dir, exist_ok=True)
  192. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  193. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  194. CMD = [
  195. WGET_BINARY,
  196. # '--server-response', # print headers for better error parsing
  197. '--no-verbose',
  198. '--adjust-extension',
  199. '--convert-links',
  200. '--force-directories',
  201. '--backup-converted',
  202. '--span-hosts',
  203. '--no-parent',
  204. '-e', 'robots=off',
  205. '--restrict-file-names=unix',
  206. '--timeout={}'.format(timeout),
  207. *(() if FETCH_WARC else ('--timestamping',)),
  208. *(('--warc-file={}'.format(warc_path),) if FETCH_WARC else ()),
  209. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  210. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  211. *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),
  212. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  213. link['url'],
  214. ]
  215. end = progress(timeout, prefix=' ')
  216. try:
  217. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  218. end()
  219. output = wget_output_path(link)
  220. output_tail = [
  221. line.strip()
  222. for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
  223. if line.strip()
  224. ]
  225. # parse out number of files downloaded from last line of stderr:
  226. # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  227. files_downloaded = (
  228. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  229. if 'Downloaded:' in output_tail[-1]
  230. else 0
  231. )
  232. # Check for common failure cases
  233. if result.returncode > 0 and files_downloaded < 1:
  234. hints = (
  235. 'Got wget response code {}:\n'.format(result.returncode),
  236. *output_tail,
  237. )
  238. if b'403: Forbidden' in result.stderr:
  239. raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)
  240. if b'404: Not Found' in result.stderr:
  241. raise ArchiveError('404 Not Found', hints)
  242. if b'ERROR 500: Internal Server Error' in result.stderr:
  243. raise ArchiveError('500 Internal Server Error', hints)
  244. raise ArchiveError('Got an error from the server', hints)
  245. except Exception as e:
  246. end()
  247. output = e
  248. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  249. return {
  250. 'cmd': CMD,
  251. 'output': output,
  252. }
  253. @attach_result_to_link('pdf')
  254. def fetch_pdf(link_dir, link, timeout=TIMEOUT):
  255. """print PDF of site to file using chrome --headless"""
  256. if is_static_file(link['url']):
  257. return {'output': None, 'status': 'skipped'}
  258. output = 'output.pdf'
  259. if os.path.exists(os.path.join(link_dir, output)):
  260. return {'output': output, 'status': 'skipped'}
  261. CMD = [
  262. *chrome_args(timeout=timeout),
  263. '--print-to-pdf',
  264. link['url']
  265. ]
  266. end = progress(timeout, prefix=' ')
  267. hints = None
  268. try:
  269. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  270. end()
  271. if result.returncode:
  272. hints = (result.stderr or result.stdout).decode()
  273. raise ArchiveError('Failed to print PDF', hints)
  274. chmod_file('output.pdf', cwd=link_dir)
  275. except Exception as e:
  276. end()
  277. output = e
  278. print_error_hints(cmd=CMD, pwd=link_dir, err=e, hints=hints)
  279. return {
  280. 'cmd': CMD,
  281. 'output': output,
  282. }
  283. @attach_result_to_link('screenshot')
  284. def fetch_screenshot(link_dir, link, timeout=TIMEOUT):
  285. """take screenshot of site using chrome --headless"""
  286. if is_static_file(link['url']):
  287. return {'output': None, 'status': 'skipped'}
  288. output = 'screenshot.png'
  289. if os.path.exists(os.path.join(link_dir, output)):
  290. return {'output': output, 'status': 'skipped'}
  291. CMD = [
  292. *chrome_args(timeout=timeout),
  293. '--screenshot',
  294. link['url'],
  295. ]
  296. end = progress(timeout, prefix=' ')
  297. try:
  298. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  299. end()
  300. if result.returncode:
  301. hints = (result.stderr or result.stdout).decode()
  302. raise ArchiveError('Failed to take screenshot', hints)
  303. chmod_file(output, cwd=link_dir)
  304. except Exception as e:
  305. end()
  306. output = e
  307. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  308. return {
  309. 'cmd': CMD,
  310. 'output': output,
  311. }
  312. @attach_result_to_link('dom')
  313. def fetch_dom(link_dir, link, timeout=TIMEOUT):
  314. """print HTML of site to file using chrome --dump-html"""
  315. if is_static_file(link['url']):
  316. return {'output': None, 'status': 'skipped'}
  317. output = 'output.html'
  318. output_path = os.path.join(link_dir, output)
  319. if os.path.exists(output_path):
  320. return {'output': output, 'status': 'skipped'}
  321. CMD = [
  322. *chrome_args(timeout=timeout),
  323. '--dump-dom',
  324. link['url']
  325. ]
  326. end = progress(timeout, prefix=' ')
  327. try:
  328. with open(output_path, 'w+') as f:
  329. result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)
  330. end()
  331. if result.returncode:
  332. hints = result.stderr.decode()
  333. raise ArchiveError('Failed to fetch DOM', hints)
  334. chmod_file(output, cwd=link_dir)
  335. except Exception as e:
  336. end()
  337. output = e
  338. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  339. return {
  340. 'cmd': CMD,
  341. 'output': output,
  342. }
  343. @attach_result_to_link('git')
  344. def fetch_git(link_dir, link, timeout=TIMEOUT):
  345. """download full site using git"""
  346. is_clonable_url = (
  347. domain(link['url']) in GIT_DOMAINS
  348. or extension(link['url']) == 'git'
  349. )
  350. if is_static_file(link['url']) or not is_clonable_url:
  351. return {'output': None, 'status': 'skipped'}
  352. output = 'git'
  353. output_path = os.path.join(link_dir, 'git')
  354. if os.path.exists(output_path):
  355. return {'output': output, 'status': 'skipped'}
  356. os.makedirs(output_path, exist_ok=True)
  357. CMD = [
  358. GIT_BINARY,
  359. 'clone',
  360. '--mirror',
  361. '--recursive',
  362. *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
  363. without_query(without_fragment(link['url'])),
  364. ]
  365. end = progress(timeout, prefix=' ')
  366. try:
  367. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  368. end()
  369. if result.returncode == 128:
  370. # ignore failed re-download when the folder already exists
  371. pass
  372. elif result.returncode > 0:
  373. hints = 'got git response code {}:'.format(result.returncode)
  374. raise ArchiveError('Failed git download', hints)
  375. except Exception as e:
  376. end()
  377. output = e
  378. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  379. return {
  380. 'cmd': CMD,
  381. 'output': output,
  382. }
  383. @attach_result_to_link('media')
  384. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
  385. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  386. output = 'media'
  387. output_path = os.path.join(link_dir, 'media')
  388. if os.path.exists(output_path) and not overwrite:
  389. return {'output': output, 'status': 'skipped'}
  390. os.makedirs(output_path, exist_ok=True)
  391. CMD = [
  392. YOUTUBEDL_BINARY,
  393. '--write-description',
  394. '--write-info-json',
  395. '--write-annotations',
  396. '--yes-playlist',
  397. '--write-thumbnail',
  398. '--no-call-home',
  399. '--no-check-certificate',
  400. '--user-agent',
  401. '--all-subs',
  402. '--extract-audio',
  403. '--keep-video',
  404. '--ignore-errors',
  405. '--geo-bypass',
  406. '--audio-format', 'mp3',
  407. '--audio-quality', '320K',
  408. '--embed-thumbnail',
  409. '--add-metadata',
  410. *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),
  411. link['url'],
  412. ]
  413. end = progress(timeout, prefix=' ')
  414. try:
  415. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  416. chmod_file(output, cwd=link_dir)
  417. end()
  418. if result.returncode:
  419. if (b'ERROR: Unsupported URL' in result.stderr
  420. or b'HTTP Error 404' in result.stderr
  421. or b'HTTP Error 403' in result.stderr
  422. or b'URL could be a direct video link' in result.stderr
  423. or b'Unable to extract container ID' in result.stderr):
  424. # These happen too frequently on non-media pages to warrant printing to console
  425. pass
  426. else:
  427. hints = (
  428. 'got youtubedl response code {}:'.format(result.returncode),
  429. *result.stderr.decode().split('\n'),
  430. )
  431. raise ArchiveError('Failed to download media', hints)
  432. except Exception as e:
  433. end()
  434. output = e
  435. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  436. return {
  437. 'cmd': CMD,
  438. 'output': output,
  439. }
  440. def parse_archive_dot_org_response(response):
  441. # Parse archive.org response headers
  442. headers = defaultdict(list)
  443. # lowercase all the header names and store in dict
  444. for header in response.splitlines():
  445. if b':' not in header or not header.strip():
  446. continue
  447. name, val = header.decode().split(':', 1)
  448. headers[name.lower().strip()].append(val.strip())
  449. # Get successful archive url in "content-location" header or any errors
  450. content_location = headers['content-location']
  451. errors = headers['x-archive-wayback-runtime-error']
  452. return content_location, errors
  453. @attach_result_to_link('archive_org')
  454. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  455. """submit site to archive.org for archiving via their service, save returned archive url"""
  456. output = 'archive.org.txt'
  457. archive_org_url = None
  458. path = os.path.join(link_dir, output)
  459. if os.path.exists(path):
  460. archive_org_url = open(path, 'r').read().strip()
  461. return {'output': archive_org_url, 'status': 'skipped'}
  462. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  463. CMD = [
  464. CURL_BINARY,
  465. '--location',
  466. '--head',
  467. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from
  468. '--max-time', str(timeout),
  469. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  470. submit_url,
  471. ]
  472. end = progress(timeout, prefix=' ')
  473. try:
  474. result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)
  475. end()
  476. content_location, errors = parse_archive_dot_org_response(result.stdout)
  477. if content_location:
  478. archive_org_url = 'https://web.archive.org{}'.format(content_location[0])
  479. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  480. archive_org_url = None
  481. # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))
  482. elif errors:
  483. raise ArchiveError(', '.join(errors))
  484. else:
  485. raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
  486. except Exception as e:
  487. end()
  488. output = e
  489. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  490. if not isinstance(output, Exception):
  491. # instead of writing None when archive.org rejects the url write the
  492. # url to resubmit it to archive.org. This is so when the user visits
  493. # the URL in person, it will attempt to re-archive it, and it'll show the
  494. # nicer error message explaining why the url was rejected if it fails.
  495. archive_org_url = archive_org_url or submit_url
  496. with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:
  497. f.write(archive_org_url)
  498. chmod_file('archive.org.txt', cwd=link_dir)
  499. output = archive_org_url
  500. return {
  501. 'cmd': CMD,
  502. 'output': output,
  503. }