archive_methods.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. import os
  2. from collections import defaultdict
  3. from datetime import datetime
  4. from index import (
  5. write_link_index,
  6. patch_links_index,
  7. load_json_link_index,
  8. )
  9. from config import (
  10. CURL_BINARY,
  11. GIT_BINARY,
  12. WGET_BINARY,
  13. YOUTUBEDL_BINARY,
  14. FETCH_FAVICON,
  15. FETCH_TITLE,
  16. FETCH_WGET,
  17. FETCH_WGET_REQUISITES,
  18. FETCH_PDF,
  19. FETCH_SCREENSHOT,
  20. FETCH_DOM,
  21. FETCH_WARC,
  22. FETCH_GIT,
  23. FETCH_MEDIA,
  24. SUBMIT_ARCHIVE_DOT_ORG,
  25. TIMEOUT,
  26. MEDIA_TIMEOUT,
  27. ANSI,
  28. OUTPUT_DIR,
  29. GIT_DOMAINS,
  30. GIT_SHA,
  31. WGET_USER_AGENT,
  32. CHECK_SSL_VALIDITY,
  33. COOKIES_FILE,
  34. )
  35. from util import (
  36. domain,
  37. extension,
  38. without_query,
  39. without_fragment,
  40. fetch_page_title,
  41. is_static_file,
  42. TimedProgress,
  43. chmod_file,
  44. wget_output_path,
  45. chrome_args,
  46. check_link_structure,
  47. run, PIPE, DEVNULL
  48. )
  49. from logs import (
  50. log_link_archiving_started,
  51. log_link_archiving_finished,
  52. log_archive_method_started,
  53. log_archive_method_finished,
  54. )
  55. class ArchiveError(Exception):
  56. def __init__(self, message, hints=None):
  57. super().__init__(message)
  58. self.hints = hints
  59. def archive_link(link_dir, link):
  60. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  61. ARCHIVE_METHODS = (
  62. ('title', should_fetch_title, fetch_title),
  63. ('favicon', should_fetch_favicon, fetch_favicon),
  64. ('wget', should_fetch_wget, fetch_wget),
  65. ('pdf', should_fetch_pdf, fetch_pdf),
  66. ('screenshot', should_fetch_screenshot, fetch_screenshot),
  67. ('dom', should_fetch_dom, fetch_dom),
  68. ('git', should_fetch_git, fetch_git),
  69. ('media', should_fetch_media, fetch_media),
  70. ('archive_org', should_fetch_archive_dot_org, archive_dot_org),
  71. )
  72. try:
  73. is_new = not os.path.exists(link_dir)
  74. if is_new:
  75. os.makedirs(link_dir)
  76. link = load_json_link_index(link_dir, link)
  77. log_link_archiving_started(link_dir, link, is_new)
  78. skipped_entirely = True
  79. for method_name, should_run, method_function in ARCHIVE_METHODS:
  80. if method_name not in link['history']:
  81. link['history'][method_name] = []
  82. if method_name not in link['latest']:
  83. link['latest'][method_name] = None
  84. if not should_run(link_dir, link):
  85. continue
  86. if skipped_entirely:
  87. skipped_entirely = False
  88. print()
  89. log_archive_method_started(method_name)
  90. result = method_function(link_dir, link)
  91. log_archive_method_finished(result)
  92. link['history'][method_name].append(result)
  93. if result['status'] == 'succeeded':
  94. link['latest'][method_name] = result['output']
  95. write_link_index(link_dir, link)
  96. patch_links_index(link)
  97. log_link_archiving_finished(link_dir, link, is_new, skipped_entirely)
  98. except Exception as err:
  99. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  100. raise
  101. return link
  102. ### Archive Method Functions
  103. def should_fetch_title(link_dir, link):
  104. # if link already has valid title, skip it
  105. if link['title'] and not link['title'].lower().startswith('http'):
  106. return False
  107. if is_static_file(link['url']):
  108. return False
  109. return FETCH_TITLE
  110. def fetch_title(link_dir, link, timeout=TIMEOUT):
  111. """try to guess the page's title from its content"""
  112. output = None
  113. cmd = [
  114. CURL_BINARY,
  115. link['url'],
  116. '|',
  117. 'grep',
  118. '<title>',
  119. ]
  120. status = 'succeeded'
  121. timer = TimedProgress(timeout, prefix=' ')
  122. try:
  123. output = fetch_page_title(link['url'], timeout=timeout, progress=False)
  124. if not output:
  125. raise ArchiveError('Unable to detect page title')
  126. except Exception as err:
  127. status = 'failed'
  128. output = err
  129. finally:
  130. timer.end()
  131. return {
  132. 'cmd': cmd,
  133. 'pwd': link_dir,
  134. 'output': output,
  135. 'status': status,
  136. **timer.stats,
  137. }
  138. def should_fetch_favicon(link_dir, link):
  139. if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
  140. return False
  141. return FETCH_FAVICON
  142. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  143. """download site favicon from google's favicon api"""
  144. output = 'favicon.ico'
  145. cmd = [
  146. CURL_BINARY,
  147. '--max-time', str(timeout),
  148. '--location',
  149. '--output', output,
  150. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  151. 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),
  152. ]
  153. status = 'succeeded'
  154. timer = TimedProgress(timeout, prefix=' ')
  155. try:
  156. run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  157. chmod_file(output, cwd=link_dir)
  158. except Exception as err:
  159. status = 'failed'
  160. output = err
  161. finally:
  162. timer.end()
  163. return {
  164. 'cmd': cmd,
  165. 'pwd': link_dir,
  166. 'output': output,
  167. 'status': status,
  168. **timer.stats,
  169. }
  170. def should_fetch_wget(link_dir, link):
  171. output_path = wget_output_path(link)
  172. if output_path and os.path.exists(os.path.join(link_dir, output_path)):
  173. return False
  174. return FETCH_WGET
  175. def fetch_wget(link_dir, link, timeout=TIMEOUT):
  176. """download full site using wget"""
  177. if FETCH_WARC:
  178. warc_dir = os.path.join(link_dir, 'warc')
  179. os.makedirs(warc_dir, exist_ok=True)
  180. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  181. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  182. output = None
  183. cmd = [
  184. WGET_BINARY,
  185. # '--server-response', # print headers for better error parsing
  186. '--no-verbose',
  187. '--adjust-extension',
  188. '--convert-links',
  189. '--force-directories',
  190. '--backup-converted',
  191. '--span-hosts',
  192. '--no-parent',
  193. '-e', 'robots=off',
  194. '--restrict-file-names=unix',
  195. '--timeout={}'.format(timeout),
  196. *(() if FETCH_WARC else ('--timestamping',)),
  197. *(('--warc-file={}'.format(warc_path),) if FETCH_WARC else ()),
  198. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  199. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  200. *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),
  201. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  202. link['url'],
  203. ]
  204. status = 'succeeded'
  205. timer = TimedProgress(timeout, prefix=' ')
  206. try:
  207. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  208. output = wget_output_path(link)
  209. # parse out number of files downloaded from last line of stderr:
  210. # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  211. output_tail = [
  212. line.strip()
  213. for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
  214. if line.strip()
  215. ]
  216. files_downloaded = (
  217. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  218. if 'Downloaded:' in output_tail[-1]
  219. else 0
  220. )
  221. # Check for common failure cases
  222. if result.returncode > 0 and files_downloaded < 1:
  223. hints = (
  224. 'Got wget response code: {}.'.format(result.returncode),
  225. *output_tail,
  226. )
  227. if b'403: Forbidden' in result.stderr:
  228. raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)
  229. if b'404: Not Found' in result.stderr:
  230. raise ArchiveError('404 Not Found', hints)
  231. if b'ERROR 500: Internal Server Error' in result.stderr:
  232. raise ArchiveError('500 Internal Server Error', hints)
  233. raise ArchiveError('Got an error from the server', hints)
  234. except Exception as err:
  235. status = 'failed'
  236. output = err
  237. finally:
  238. timer.end()
  239. return {
  240. 'cmd': cmd,
  241. 'pwd': link_dir,
  242. 'output': output,
  243. 'status': status,
  244. **timer.stats,
  245. }
  246. def should_fetch_pdf(link_dir, link):
  247. if is_static_file(link['url']):
  248. return False
  249. if os.path.exists(os.path.join(link_dir, 'output.pdf')):
  250. return False
  251. return FETCH_PDF
  252. def fetch_pdf(link_dir, link, timeout=TIMEOUT):
  253. """print PDF of site to file using chrome --headless"""
  254. output = 'output.pdf'
  255. cmd = [
  256. *chrome_args(timeout=timeout),
  257. '--print-to-pdf',
  258. link['url'],
  259. ]
  260. status = 'succeeded'
  261. timer = TimedProgress(timeout, prefix=' ')
  262. try:
  263. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  264. if result.returncode:
  265. hints = (result.stderr or result.stdout).decode()
  266. raise ArchiveError('Failed to print PDF', hints)
  267. chmod_file('output.pdf', cwd=link_dir)
  268. except Exception as err:
  269. status = 'failed'
  270. output = err
  271. finally:
  272. timer.end()
  273. return {
  274. 'cmd': cmd,
  275. 'pwd': link_dir,
  276. 'output': output,
  277. 'status': status,
  278. **timer.stats,
  279. }
  280. def should_fetch_screenshot(link_dir, link):
  281. if is_static_file(link['url']):
  282. return False
  283. if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
  284. return False
  285. return FETCH_SCREENSHOT
  286. def fetch_screenshot(link_dir, link, timeout=TIMEOUT):
  287. """take screenshot of site using chrome --headless"""
  288. output = 'screenshot.png'
  289. cmd = [
  290. *chrome_args(timeout=timeout),
  291. '--screenshot',
  292. link['url'],
  293. ]
  294. status = 'succeeded'
  295. timer = TimedProgress(timeout, prefix=' ')
  296. try:
  297. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  298. if result.returncode:
  299. hints = (result.stderr or result.stdout).decode()
  300. raise ArchiveError('Failed to take screenshot', hints)
  301. chmod_file(output, cwd=link_dir)
  302. except Exception as err:
  303. status = 'failed'
  304. output = err
  305. finally:
  306. timer.end()
  307. return {
  308. 'cmd': cmd,
  309. 'pwd': link_dir,
  310. 'output': output,
  311. 'status': status,
  312. **timer.stats,
  313. }
  314. def should_fetch_dom(link_dir, link):
  315. if is_static_file(link['url']):
  316. return False
  317. if os.path.exists(os.path.join(link_dir, 'output.html')):
  318. return False
  319. return FETCH_DOM
  320. def fetch_dom(link_dir, link, timeout=TIMEOUT):
  321. """print HTML of site to file using chrome --dump-html"""
  322. output = 'output.html'
  323. output_path = os.path.join(link_dir, output)
  324. cmd = [
  325. *chrome_args(timeout=timeout),
  326. '--dump-dom',
  327. link['url']
  328. ]
  329. status = 'succeeded'
  330. timer = TimedProgress(timeout, prefix=' ')
  331. try:
  332. with open(output_path, 'w+') as f:
  333. result = run(cmd, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)
  334. if result.returncode:
  335. hints = result.stderr.decode()
  336. raise ArchiveError('Failed to fetch DOM', hints)
  337. chmod_file(output, cwd=link_dir)
  338. except Exception as err:
  339. status = 'failed'
  340. output = err
  341. finally:
  342. timer.end()
  343. return {
  344. 'cmd': cmd,
  345. 'pwd': link_dir,
  346. 'output': output,
  347. 'status': status,
  348. **timer.stats,
  349. }
  350. def should_fetch_git(link_dir, link):
  351. if is_static_file(link['url']):
  352. return False
  353. if os.path.exists(os.path.join(link_dir, 'git')):
  354. return False
  355. is_clonable_url = (
  356. (domain(link['url']) in GIT_DOMAINS)
  357. or (extension(link['url']) == 'git')
  358. )
  359. if not is_clonable_url:
  360. return False
  361. return FETCH_GIT
  362. def fetch_git(link_dir, link, timeout=TIMEOUT):
  363. """download full site using git"""
  364. output = 'git'
  365. output_path = os.path.join(link_dir, 'git')
  366. os.makedirs(output_path, exist_ok=True)
  367. cmd = [
  368. GIT_BINARY,
  369. 'clone',
  370. '--mirror',
  371. '--recursive',
  372. *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
  373. without_query(without_fragment(link['url'])),
  374. ]
  375. status = 'succeeded'
  376. timer = TimedProgress(timeout, prefix=' ')
  377. try:
  378. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  379. if result.returncode == 128:
  380. # ignore failed re-download when the folder already exists
  381. pass
  382. elif result.returncode > 0:
  383. hints = 'Got git response code: {}.'.format(result.returncode)
  384. raise ArchiveError('Failed git download', hints)
  385. except Exception as err:
  386. status = 'failed'
  387. output = err
  388. finally:
  389. timer.end()
  390. return {
  391. 'cmd': cmd,
  392. 'pwd': link_dir,
  393. 'output': output,
  394. 'status': status,
  395. **timer.stats,
  396. }
  397. def should_fetch_media(link_dir, link):
  398. if is_static_file(link['url']):
  399. return False
  400. if os.path.exists(os.path.join(link_dir, 'media')):
  401. return False
  402. return FETCH_MEDIA
  403. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT):
  404. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  405. output = 'media'
  406. output_path = os.path.join(link_dir, 'media')
  407. os.makedirs(output_path, exist_ok=True)
  408. cmd = [
  409. YOUTUBEDL_BINARY,
  410. '--write-description',
  411. '--write-info-json',
  412. '--write-annotations',
  413. '--yes-playlist',
  414. '--write-thumbnail',
  415. '--no-call-home',
  416. '--no-check-certificate',
  417. '--user-agent',
  418. '--all-subs',
  419. '--extract-audio',
  420. '--keep-video',
  421. '--ignore-errors',
  422. '--geo-bypass',
  423. '--audio-format', 'mp3',
  424. '--audio-quality', '320K',
  425. '--embed-thumbnail',
  426. '--add-metadata',
  427. *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),
  428. link['url'],
  429. ]
  430. status = 'succeeded'
  431. timer = TimedProgress(timeout, prefix=' ')
  432. try:
  433. result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  434. chmod_file(output, cwd=link_dir)
  435. if result.returncode:
  436. if (b'ERROR: Unsupported URL' in result.stderr
  437. or b'HTTP Error 404' in result.stderr
  438. or b'HTTP Error 403' in result.stderr
  439. or b'URL could be a direct video link' in result.stderr
  440. or b'Unable to extract container ID' in result.stderr):
  441. # These happen too frequently on non-media pages to warrant printing to console
  442. pass
  443. else:
  444. hints = (
  445. 'Got youtube-dl response code: {}.'.format(result.returncode),
  446. *result.stderr.decode().split('\n'),
  447. )
  448. raise ArchiveError('Failed to download media', hints)
  449. except Exception as err:
  450. status = 'failed'
  451. output = err
  452. finally:
  453. timer.end()
  454. return {
  455. 'cmd': cmd,
  456. 'pwd': link_dir,
  457. 'output': output,
  458. 'status': status,
  459. **timer.stats,
  460. }
  461. def should_fetch_archive_dot_org(link_dir, link):
  462. if is_static_file(link['url']):
  463. return False
  464. if os.path.exists(os.path.join(link_dir, 'archive.org.txt')):
  465. # if open(path, 'r').read().strip() != 'None':
  466. return False
  467. return SUBMIT_ARCHIVE_DOT_ORG
  468. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  469. """submit site to archive.org for archiving via their service, save returned archive url"""
  470. output = 'archive.org.txt'
  471. archive_org_url = None
  472. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  473. cmd = [
  474. CURL_BINARY,
  475. '--location',
  476. '--head',
  477. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from
  478. '--max-time', str(timeout),
  479. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  480. submit_url,
  481. ]
  482. status = 'succeeded'
  483. timer = TimedProgress(timeout, prefix=' ')
  484. try:
  485. result = run(cmd, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)
  486. content_location, errors = parse_archive_dot_org_response(result.stdout)
  487. if content_location:
  488. archive_org_url = 'https://web.archive.org{}'.format(content_location[0])
  489. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  490. archive_org_url = None
  491. # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))
  492. elif errors:
  493. raise ArchiveError(', '.join(errors))
  494. else:
  495. raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
  496. except Exception as err:
  497. status = 'failed'
  498. output = err
  499. finally:
  500. timer.end()
  501. if not isinstance(output, Exception):
  502. # instead of writing None when archive.org rejects the url write the
  503. # url to resubmit it to archive.org. This is so when the user visits
  504. # the URL in person, it will attempt to re-archive it, and it'll show the
  505. # nicer error message explaining why the url was rejected if it fails.
  506. archive_org_url = archive_org_url or submit_url
  507. with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:
  508. f.write(archive_org_url)
  509. chmod_file('archive.org.txt', cwd=link_dir)
  510. output = archive_org_url
  511. return {
  512. 'cmd': cmd,
  513. 'pwd': link_dir,
  514. 'output': output,
  515. 'status': status,
  516. **timer.stats,
  517. }
  518. def parse_archive_dot_org_response(response):
  519. # Parse archive.org response headers
  520. headers = defaultdict(list)
  521. # lowercase all the header names and store in dict
  522. for header in response.splitlines():
  523. if b':' not in header or not header.strip():
  524. continue
  525. name, val = header.decode().split(':', 1)
  526. headers[name.lower().strip()].append(val.strip())
  527. # Get successful archive url in "content-location" header or any errors
  528. content_location = headers['content-location']
  529. errors = headers['x-archive-wayback-runtime-error']
  530. return content_location, errors