archive_methods.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. import os
  2. from functools import wraps
  3. from collections import defaultdict
  4. from datetime import datetime
  5. from index import (
  6. write_link_index,
  7. patch_links_index,
  8. load_json_link_index,
  9. )
  10. from config import (
  11. CURL_BINARY,
  12. GIT_BINARY,
  13. WGET_BINARY,
  14. YOUTUBEDL_BINARY,
  15. FETCH_FAVICON,
  16. FETCH_TITLE,
  17. FETCH_WGET,
  18. FETCH_WGET_REQUISITES,
  19. FETCH_PDF,
  20. FETCH_SCREENSHOT,
  21. FETCH_DOM,
  22. FETCH_WARC,
  23. FETCH_GIT,
  24. FETCH_MEDIA,
  25. SUBMIT_ARCHIVE_DOT_ORG,
  26. TIMEOUT,
  27. MEDIA_TIMEOUT,
  28. ANSI,
  29. OUTPUT_DIR,
  30. GIT_DOMAINS,
  31. GIT_SHA,
  32. WGET_USER_AGENT,
  33. CHECK_SSL_VALIDITY,
  34. COOKIES_FILE,
  35. )
  36. from util import (
  37. domain,
  38. extension,
  39. without_query,
  40. without_fragment,
  41. fetch_page_title,
  42. is_static_file,
  43. TimedProgress,
  44. chmod_file,
  45. wget_output_path,
  46. chrome_args,
  47. check_link_structure,
  48. run, PIPE, DEVNULL
  49. )
  50. from logs import (
  51. _LAST_RUN_STATS,
  52. log_link_archiving_started,
  53. log_link_archiving_finished,
  54. log_archive_method_starting,
  55. log_archive_method_finished,
  56. )
  57. class ArchiveError(Exception):
  58. def __init__(self, message, hints=None):
  59. super().__init__(message)
  60. self.hints = hints
  61. def archive_link(link_dir, link):
  62. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  63. ARCHIVE_METHODS = (
  64. ('title', should_fetch_title, fetch_title),
  65. ('favicon', should_fetch_favicon, fetch_favicon),
  66. ('wget', should_fetch_wget, fetch_wget),
  67. ('pdf', should_fetch_pdf, fetch_pdf),
  68. ('screenshot', should_fetch_screenshot, fetch_screenshot),
  69. ('dom', should_fetch_dom, fetch_dom),
  70. ('git', should_fetch_git, fetch_git),
  71. ('media', should_fetch_media, fetch_media),
  72. ('archive_org', should_fetch_archive_dot_org, archive_dot_org),
  73. )
  74. try:
  75. is_new = not os.path.exists(link_dir)
  76. if is_new:
  77. os.makedirs(link_dir)
  78. link = load_json_link_index(link_dir, link)
  79. log_link_archiving_started(link_dir, link, is_new)
  80. skipped_entirely = True
  81. for method_name, should_run, method_function in ARCHIVE_METHODS:
  82. if method_name not in link['history']:
  83. link['history'][method_name] = []
  84. if method_name not in link['latest']:
  85. link['latest'][method_name] = None
  86. if not should_run(link_dir, link):
  87. continue
  88. if skipped_entirely:
  89. skipped_entirely = False
  90. print()
  91. log_archive_method_starting(method_name)
  92. result = method_function(link_dir, link)
  93. log_archive_method_finished(result)
  94. link['history'][method_name].append(result)
  95. if result['status'] == 'succeeded':
  96. link['latest'][method_name] = result['output']
  97. if result['status'] != 'skipped':
  98. made_changes = True
  99. _LAST_RUN_STATS[result['status']] += 1
  100. write_link_index(link_dir, link)
  101. patch_links_index(link)
  102. log_link_archiving_finished(link_dir, link, is_new, skipped_entirely)
  103. except Exception as err:
  104. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  105. raise
  106. return link
  107. def should_fetch_title(link_dir, link):
  108. # if link already has valid title, skip it
  109. if link['title'] and not link['title'].lower().startswith('http'):
  110. return False
  111. if is_static_file(link['url']):
  112. return False
  113. return FETCH_TITLE
  114. def fetch_title(link_dir, link, timeout=TIMEOUT):
  115. """try to guess the page's title from its content"""
  116. output = None
  117. cmd = [
  118. CURL_BINARY,
  119. link['url'],
  120. '|',
  121. 'grep',
  122. '<title>',
  123. ]
  124. status = 'succeeded'
  125. timer = TimedProgress(timeout, prefix=' ')
  126. try:
  127. output = fetch_page_title(link['url'], timeout=timeout, progress=False)
  128. if not output:
  129. raise ArchiveError('Unable to detect page title')
  130. except Exception as err:
  131. status = 'failed'
  132. output = err
  133. finally:
  134. timer.end()
  135. return {
  136. 'cmd': cmd,
  137. 'pwd': link_dir,
  138. 'output': output,
  139. 'status': status,
  140. **timer.stats,
  141. }
  142. def should_fetch_favicon(link_dir, link):
  143. if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
  144. return False
  145. return FETCH_FAVICON
  146. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  147. """download site favicon from google's favicon api"""
  148. output = 'favicon.ico'
  149. cmd = [
  150. CURL_BINARY,
  151. '--max-time', str(timeout),
  152. '--location',
  153. '--output', output,
  154. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  155. 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),
  156. ]
  157. status = 'succeeded'
  158. timer = TimedProgress(timeout, prefix=' ')
  159. try:
  160. run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  161. chmod_file(output, cwd=link_dir)
  162. except Exception as err:
  163. status = 'failed'
  164. output = err
  165. finally:
  166. timer.end()
  167. return {
  168. 'cmd': cmd,
  169. 'pwd': link_dir,
  170. 'output': output,
  171. 'status': status,
  172. **timer.stats,
  173. }
  174. def should_fetch_wget(link_dir, link):
  175. output_path = wget_output_path(link)
  176. if output_path and os.path.exists(os.path.join(link_dir, output_path)):
  177. return False
  178. return FETCH_WGET
  179. def fetch_wget(link_dir, link, timeout=TIMEOUT):
  180. """download full site using wget"""
  181. if FETCH_WARC:
  182. warc_dir = os.path.join(link_dir, 'warc')
  183. os.makedirs(warc_dir, exist_ok=True)
  184. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  185. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  186. output = None
  187. cmd = [
  188. WGET_BINARY,
  189. # '--server-response', # print headers for better error parsing
  190. '--no-verbose',
  191. '--adjust-extension',
  192. '--convert-links',
  193. '--force-directories',
  194. '--backup-converted',
  195. '--span-hosts',
  196. '--no-parent',
  197. '-e', 'robots=off',
  198. '--restrict-file-names=unix',
  199. '--timeout={}'.format(timeout),
  200. *(() if FETCH_WARC else ('--timestamping',)),
  201. *(('--warc-file={}'.format(warc_path),) if FETCH_WARC else ()),
  202. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  203. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  204. *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),
  205. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  206. link['url'],
  207. ]
  208. status = 'succeeded'
  209. timer = TimedProgress(timeout, prefix=' ')
  210. try:
  211. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  212. output = wget_output_path(link)
  213. # parse out number of files downloaded from last line of stderr:
  214. # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  215. output_tail = [
  216. line.strip()
  217. for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
  218. if line.strip()
  219. ]
  220. files_downloaded = (
  221. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  222. if 'Downloaded:' in output_tail[-1]
  223. else 0
  224. )
  225. # Check for common failure cases
  226. if result.returncode > 0 and files_downloaded < 1:
  227. hints = (
  228. 'Got wget response code: {}.'.format(result.returncode),
  229. *output_tail,
  230. )
  231. if b'403: Forbidden' in result.stderr:
  232. raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)
  233. if b'404: Not Found' in result.stderr:
  234. raise ArchiveError('404 Not Found', hints)
  235. if b'ERROR 500: Internal Server Error' in result.stderr:
  236. raise ArchiveError('500 Internal Server Error', hints)
  237. raise ArchiveError('Got an error from the server', hints)
  238. except Exception as err:
  239. status = 'failed'
  240. output = err
  241. finally:
  242. timer.end()
  243. return {
  244. 'cmd': cmd,
  245. 'pwd': link_dir,
  246. 'output': output,
  247. 'status': status,
  248. **timer.stats,
  249. }
  250. def should_fetch_pdf(link_dir, link):
  251. if is_static_file(link['url']):
  252. return False
  253. if os.path.exists(os.path.join(link_dir, 'output.pdf')):
  254. return False
  255. return FETCH_PDF
  256. def fetch_pdf(link_dir, link, timeout=TIMEOUT):
  257. """print PDF of site to file using chrome --headless"""
  258. output = 'output.pdf'
  259. cmd = [
  260. *chrome_args(timeout=timeout),
  261. '--print-to-pdf',
  262. link['url'],
  263. ]
  264. status = 'succeeded'
  265. timer = TimedProgress(timeout, prefix=' ')
  266. try:
  267. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  268. if result.returncode:
  269. hints = (result.stderr or result.stdout).decode()
  270. raise ArchiveError('Failed to print PDF', hints)
  271. chmod_file('output.pdf', cwd=link_dir)
  272. except Exception as err:
  273. status = 'failed'
  274. output = err
  275. finally:
  276. timer.end()
  277. return {
  278. 'cmd': cmd,
  279. 'pwd': link_dir,
  280. 'output': output,
  281. 'status': status,
  282. **timer.stats,
  283. }
  284. def should_fetch_screenshot(link_dir, link):
  285. if is_static_file(link['url']):
  286. return False
  287. if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
  288. return False
  289. return FETCH_SCREENSHOT
  290. def fetch_screenshot(link_dir, link, timeout=TIMEOUT):
  291. """take screenshot of site using chrome --headless"""
  292. output = 'screenshot.png'
  293. cmd = [
  294. *chrome_args(timeout=timeout),
  295. '--screenshot',
  296. link['url'],
  297. ]
  298. status = 'succeeded'
  299. timer = TimedProgress(timeout, prefix=' ')
  300. try:
  301. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  302. if result.returncode:
  303. hints = (result.stderr or result.stdout).decode()
  304. raise ArchiveError('Failed to take screenshot', hints)
  305. chmod_file(output, cwd=link_dir)
  306. except Exception as err:
  307. status = 'failed'
  308. output = err
  309. finally:
  310. timer.end()
  311. return {
  312. 'cmd': cmd,
  313. 'pwd': link_dir,
  314. 'output': output,
  315. 'status': status,
  316. **timer.stats,
  317. }
  318. def should_fetch_dom(link_dir, link):
  319. if is_static_file(link['url']):
  320. return False
  321. if os.path.exists(os.path.join(link_dir, 'output.html')):
  322. return False
  323. return FETCH_DOM
  324. def fetch_dom(link_dir, link, timeout=TIMEOUT):
  325. """print HTML of site to file using chrome --dump-html"""
  326. output = 'output.html'
  327. output_path = os.path.join(link_dir, output)
  328. cmd = [
  329. *chrome_args(timeout=timeout),
  330. '--dump-dom',
  331. link['url']
  332. ]
  333. status = 'succeeded'
  334. timer = TimedProgress(timeout, prefix=' ')
  335. try:
  336. with open(output_path, 'w+') as f:
  337. result = run(cmd, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)
  338. if result.returncode:
  339. hints = result.stderr.decode()
  340. raise ArchiveError('Failed to fetch DOM', hints)
  341. chmod_file(output, cwd=link_dir)
  342. except Exception as err:
  343. status = 'failed'
  344. output = err
  345. finally:
  346. timer.end()
  347. return {
  348. 'cmd': cmd,
  349. 'pwd': link_dir,
  350. 'output': output,
  351. 'status': status,
  352. **timer.stats,
  353. }
  354. def should_fetch_git(link_dir, link):
  355. if is_static_file(link['url']):
  356. return False
  357. if os.path.exists(os.path.join(link_dir, 'git')):
  358. return False
  359. is_clonable_url = (
  360. domain(link['url']) in GIT_DOMAINS
  361. or extension(link['url']) == 'git'
  362. )
  363. if not is_clonable_url:
  364. return False
  365. return FETCH_GIT
  366. def fetch_git(link_dir, link, timeout=TIMEOUT):
  367. """download full site using git"""
  368. output = 'git'
  369. output_path = os.path.join(link_dir, 'git')
  370. os.makedirs(output_path, exist_ok=True)
  371. cmd = [
  372. GIT_BINARY,
  373. 'clone',
  374. '--mirror',
  375. '--recursive',
  376. *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
  377. without_query(without_fragment(link['url'])),
  378. ]
  379. status = 'succeeded'
  380. timer = TimedProgress(timeout, prefix=' ')
  381. try:
  382. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  383. if result.returncode == 128:
  384. # ignore failed re-download when the folder already exists
  385. pass
  386. elif result.returncode > 0:
  387. hints = 'Got git response code: {}.'.format(result.returncode)
  388. raise ArchiveError('Failed git download', hints)
  389. except Exception as err:
  390. status = 'failed'
  391. output = err
  392. finally:
  393. timer.end()
  394. return {
  395. 'cmd': cmd,
  396. 'pwd': link_dir,
  397. 'output': output,
  398. 'status': status,
  399. **timer.stats,
  400. }
  401. def should_fetch_media(link_dir, link):
  402. if is_static_file(link['url']):
  403. return False
  404. if os.path.exists(os.path.join(link_dir, 'media')):
  405. return False
  406. return FETCH_MEDIA
  407. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT):
  408. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  409. output = 'media'
  410. output_path = os.path.join(link_dir, 'media')
  411. os.makedirs(output_path, exist_ok=True)
  412. cmd = [
  413. YOUTUBEDL_BINARY,
  414. '--write-description',
  415. '--write-info-json',
  416. '--write-annotations',
  417. '--yes-playlist',
  418. '--write-thumbnail',
  419. '--no-call-home',
  420. '--no-check-certificate',
  421. '--user-agent',
  422. '--all-subs',
  423. '--extract-audio',
  424. '--keep-video',
  425. '--ignore-errors',
  426. '--geo-bypass',
  427. '--audio-format', 'mp3',
  428. '--audio-quality', '320K',
  429. '--embed-thumbnail',
  430. '--add-metadata',
  431. *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),
  432. link['url'],
  433. ]
  434. status = 'succeeded'
  435. timer = TimedProgress(timeout, prefix=' ')
  436. try:
  437. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  438. chmod_file(output, cwd=link_dir)
  439. if result.returncode:
  440. if (b'ERROR: Unsupported URL' in result.stderr
  441. or b'HTTP Error 404' in result.stderr
  442. or b'HTTP Error 403' in result.stderr
  443. or b'URL could be a direct video link' in result.stderr
  444. or b'Unable to extract container ID' in result.stderr):
  445. # These happen too frequently on non-media pages to warrant printing to console
  446. pass
  447. else:
  448. hints = (
  449. 'Got youtube-dl response code: {}.'.format(result.returncode),
  450. *result.stderr.decode().split('\n'),
  451. )
  452. raise ArchiveError('Failed to download media', hints)
  453. except Exception as err:
  454. status = 'failed'
  455. output = err
  456. finally:
  457. timer.end()
  458. return {
  459. 'cmd': cmd,
  460. 'pwd': link_dir,
  461. 'output': output,
  462. 'status': status,
  463. **timer.stats,
  464. }
  465. def parse_archive_dot_org_response(response):
  466. # Parse archive.org response headers
  467. headers = defaultdict(list)
  468. # lowercase all the header names and store in dict
  469. for header in response.splitlines():
  470. if b':' not in header or not header.strip():
  471. continue
  472. name, val = header.decode().split(':', 1)
  473. headers[name.lower().strip()].append(val.strip())
  474. # Get successful archive url in "content-location" header or any errors
  475. content_location = headers['content-location']
  476. errors = headers['x-archive-wayback-runtime-error']
  477. return content_location, errors
  478. def should_fetch_archive_dot_org(link_dir, link):
  479. if is_static_file(link['url']):
  480. return False
  481. if os.path.exists(os.path.join(link_dir, 'archive.org.txt')):
  482. # if open(path, 'r').read().strip() != 'None':
  483. return False
  484. return SUBMIT_ARCHIVE_DOT_ORG
  485. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  486. """submit site to archive.org for archiving via their service, save returned archive url"""
  487. output = 'archive.org.txt'
  488. archive_org_url = None
  489. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  490. cmd = [
  491. CURL_BINARY,
  492. '--location',
  493. '--head',
  494. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from
  495. '--max-time', str(timeout),
  496. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  497. submit_url,
  498. ]
  499. status = 'succeeded'
  500. timer = TimedProgress(timeout, prefix=' ')
  501. try:
  502. result = run(cmd, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)
  503. content_location, errors = parse_archive_dot_org_response(result.stdout)
  504. if content_location:
  505. archive_org_url = 'https://web.archive.org{}'.format(content_location[0])
  506. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  507. archive_org_url = None
  508. # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))
  509. elif errors:
  510. raise ArchiveError(', '.join(errors))
  511. else:
  512. raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
  513. except Exception as err:
  514. status = 'failed'
  515. output = err
  516. finally:
  517. timer.end()
  518. if not isinstance(output, Exception):
  519. # instead of writing None when archive.org rejects the url write the
  520. # url to resubmit it to archive.org. This is so when the user visits
  521. # the URL in person, it will attempt to re-archive it, and it'll show the
  522. # nicer error message explaining why the url was rejected if it fails.
  523. archive_org_url = archive_org_url or submit_url
  524. with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:
  525. f.write(archive_org_url)
  526. chmod_file('archive.org.txt', cwd=link_dir)
  527. output = archive_org_url
  528. return {
  529. 'cmd': cmd,
  530. 'pwd': link_dir,
  531. 'output': output,
  532. 'status': status,
  533. **timer.stats,
  534. }