archive_methods.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. import os
  2. import re
  3. import sys
  4. from functools import wraps
  5. from collections import defaultdict
  6. from datetime import datetime
  7. from peekable import Peekable
  8. from index import wget_output_path, parse_json_link_index, write_link_index
  9. from links import links_after_timestamp
  10. from config import (
  11. CHROME_BINARY,
  12. FETCH_WGET,
  13. FETCH_WGET_REQUISITES,
  14. FETCH_PDF,
  15. FETCH_SCREENSHOT,
  16. FETCH_DOM,
  17. FETCH_WARC,
  18. FETCH_GIT,
  19. FETCH_MEDIA,
  20. RESOLUTION,
  21. CHECK_SSL_VALIDITY,
  22. SUBMIT_ARCHIVE_DOT_ORG,
  23. FETCH_FAVICON,
  24. WGET_USER_AGENT,
  25. CHROME_USER_DATA_DIR,
  26. CHROME_SANDBOX,
  27. TIMEOUT,
  28. MEDIA_TIMEOUT,
  29. ANSI,
  30. ARCHIVE_DIR,
  31. GIT_DOMAINS,
  32. GIT_SHA,
  33. )
  34. from util import (
  35. check_dependencies,
  36. progress,
  37. chmod_file,
  38. pretty_path,
  39. run, PIPE, DEVNULL
  40. )
  41. _RESULTS_TOTALS = { # globals are bad, mmkay
  42. 'skipped': 0,
  43. 'succeded': 0,
  44. 'failed': 0,
  45. }
  46. def archive_links(archive_path, links, source=None, resume=None):
  47. check_dependencies()
  48. to_archive = Peekable(links_after_timestamp(links, resume))
  49. idx, link = 0, to_archive.peek(0)
  50. try:
  51. for idx, link in enumerate(to_archive):
  52. link_dir = os.path.join(ARCHIVE_DIR, link['timestamp'])
  53. archive_link(link_dir, link)
  54. except (KeyboardInterrupt, SystemExit, Exception) as e:
  55. print('{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
  56. **ANSI,
  57. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  58. idx=idx+1,
  59. timestamp=link['timestamp'],
  60. total=len(links),
  61. ))
  62. print(' Continue where you left off by running:')
  63. print(' {} {}'.format(
  64. pretty_path(sys.argv[0]),
  65. link['timestamp'],
  66. ))
  67. if not isinstance(e, KeyboardInterrupt):
  68. raise e
  69. raise SystemExit(1)
  70. def archive_link(link_dir, link, overwrite=True):
  71. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  72. try:
  73. update_existing = os.path.exists(link_dir)
  74. if update_existing:
  75. link = {
  76. **parse_json_link_index(link_dir),
  77. **link,
  78. }
  79. else:
  80. os.makedirs(link_dir)
  81. log_link_archive(link_dir, link, update_existing)
  82. if FETCH_FAVICON:
  83. link = fetch_favicon(link_dir, link, overwrite=overwrite)
  84. if FETCH_WGET:
  85. link = fetch_wget(link_dir, link, overwrite=overwrite)
  86. if FETCH_PDF:
  87. link = fetch_pdf(link_dir, link, overwrite=overwrite)
  88. if FETCH_SCREENSHOT:
  89. link = fetch_screenshot(link_dir, link, overwrite=overwrite)
  90. if FETCH_DOM:
  91. link = fetch_dom(link_dir, link, overwrite=overwrite)
  92. if SUBMIT_ARCHIVE_DOT_ORG:
  93. link = archive_dot_org(link_dir, link, overwrite=overwrite)
  94. if FETCH_GIT:
  95. link = fetch_git(link_dir, link, overwrite=overwrite)
  96. if FETCH_MEDIA:
  97. link = fetch_media(link_dir, link, overwrite=overwrite)
  98. write_link_index(link_dir, link)
  99. except Exception as err:
  100. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  101. return link
  102. def log_link_archive(link_dir, link, update_existing):
  103. print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
  104. symbol='*' if update_existing else '+',
  105. symbol_color=ANSI['black' if update_existing else 'green'],
  106. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  107. **link,
  108. **ANSI,
  109. ))
  110. print(' > {}{}'.format(pretty_path(link_dir), '' if update_existing else ' (new)'))
  111. if link['type']:
  112. print(' i {}'.format(link['type']))
  113. def attach_result_to_link(method):
  114. """
  115. Instead of returning a result={output:'...', status:'success'} object,
  116. attach that result to the links's history & latest fields, then return
  117. the updated link object.
  118. """
  119. def decorator(fetch_func):
  120. @wraps(fetch_func)
  121. def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
  122. # initialize methods and history json field on link
  123. link['latest'] = link.get('latest') or {}
  124. link['latest'][method] = link['latest'].get(method) or None
  125. link['history'] = link.get('history') or {}
  126. link['history'][method] = link['history'].get(method) or []
  127. start_ts = datetime.now().timestamp()
  128. # if a valid method output is already present, dont run the fetch function
  129. if link['latest'][method] and not overwrite:
  130. print(' √ {}'.format(method))
  131. result = None
  132. else:
  133. print(' > {}'.format(method))
  134. result = fetch_func(link_dir, link, **kwargs)
  135. end_ts = datetime.now().timestamp()
  136. duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
  137. # append a history item recording fail/success
  138. history_entry = {
  139. 'timestamp': str(start_ts).split('.')[0],
  140. }
  141. if result is None:
  142. history_entry['status'] = 'skipped'
  143. elif isinstance(result.get('output'), Exception):
  144. history_entry['status'] = 'failed'
  145. history_entry['duration'] = duration
  146. history_entry.update(result or {})
  147. link['history'][method].append(history_entry)
  148. else:
  149. history_entry['status'] = 'succeded'
  150. history_entry['duration'] = duration
  151. history_entry.update(result or {})
  152. link['history'][method].append(history_entry)
  153. link['latest'][method] = result['output']
  154. _RESULTS_TOTALS[history_entry['status']] += 1
  155. return link
  156. return timed_fetch_func
  157. return decorator
  158. @attach_result_to_link('wget')
  159. def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
  160. """download full site using wget"""
  161. domain_dir = os.path.join(link_dir, link['domain'])
  162. existing_file = wget_output_path(link)
  163. if os.path.exists(domain_dir) and existing_file:
  164. return {'output': existing_file, 'status': 'skipped'}
  165. if warc:
  166. warc_dir = os.path.join(link_dir, 'warc')
  167. os.makedirs(warc_dir, exist_ok=True)
  168. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  169. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  170. CMD = [
  171. 'wget',
  172. # '--server-response', # print headers for better error parsing
  173. '--no-verbose',
  174. '--adjust-extension',
  175. '--convert-links',
  176. '--force-directories',
  177. '--backup-converted',
  178. '--span-hosts',
  179. '--no-parent',
  180. '--restrict-file-names=unix',
  181. '--timeout={}'.format(timeout),
  182. *(() if warc else ('--timestamping',)),
  183. *(('--warc-file={}'.format(warc_path),) if warc else ()),
  184. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  185. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  186. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  187. link['url'],
  188. ]
  189. end = progress(timeout, prefix=' ')
  190. try:
  191. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # index.html
  192. end()
  193. output = wget_output_path(link, look_in=domain_dir)
  194. output_tail = [' ' + line for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip()]
  195. # parse out number of files downloaded from "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  196. files_downloaded = (
  197. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  198. if 'Downloaded:' in output_tail[-1]
  199. else 0
  200. )
  201. # Check for common failure cases
  202. if result.returncode > 0 and files_downloaded < 1:
  203. print(' Got wget response code {}:'.format(result.returncode))
  204. print('\n'.join(output_tail))
  205. if b'403: Forbidden' in result.stderr:
  206. raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
  207. if b'404: Not Found' in result.stderr:
  208. raise Exception('404 Not Found')
  209. if b'ERROR 500: Internal Server Error' in result.stderr:
  210. raise Exception('500 Internal Server Error')
  211. raise Exception('Got an error from the server')
  212. except Exception as e:
  213. end()
  214. print(' {}Some resources were skipped: {}{}'.format(ANSI['lightyellow'], e, ANSI['reset']))
  215. print(' Run to see full output:')
  216. print(' cd {};'.format(link_dir))
  217. print(' {}'.format(' '.join(CMD).replace(WGET_USER_AGENT, '"{}"'.format(WGET_USER_AGENT))))
  218. output = e
  219. return {
  220. 'cmd': CMD,
  221. 'output': output,
  222. }
  223. @attach_result_to_link('pdf')
  224. def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  225. """print PDF of site to file using chrome --headless"""
  226. if link['type'] in ('PDF', 'image'):
  227. return {'output': wget_output_path(link)}
  228. if os.path.exists(os.path.join(link_dir, 'output.pdf')):
  229. return {'output': 'output.pdf', 'status': 'skipped'}
  230. CMD = [
  231. *chrome_headless(user_data_dir=user_data_dir),
  232. '--print-to-pdf',
  233. '--hide-scrollbars',
  234. '--timeout={}'.format((timeout) * 1000),
  235. *(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
  236. link['url']
  237. ]
  238. end = progress(timeout, prefix=' ')
  239. try:
  240. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.pdf
  241. end()
  242. if result.returncode:
  243. print(' ', (result.stderr or result.stdout).decode())
  244. raise Exception('Failed to print PDF')
  245. chmod_file('output.pdf', cwd=link_dir)
  246. output = 'output.pdf'
  247. except Exception as e:
  248. end()
  249. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  250. print(' Run to see full output:')
  251. print(' cd {};'.format(link_dir))
  252. print(' {}'.format(' '.join(CMD)))
  253. output = e
  254. return {
  255. 'cmd': CMD,
  256. 'output': output,
  257. }
  258. @attach_result_to_link('screenshot')
  259. def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
  260. """take screenshot of site using chrome --headless"""
  261. if link['type'] in ('PDF', 'image'):
  262. return {'output': wget_output_path(link)}
  263. if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
  264. return {'output': 'screenshot.png', 'status': 'skipped'}
  265. CMD = [
  266. *chrome_headless(user_data_dir=user_data_dir),
  267. '--screenshot',
  268. '--window-size={}'.format(resolution),
  269. '--hide-scrollbars',
  270. '--timeout={}'.format((timeout) * 1000),
  271. *(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
  272. # '--full-page', # TODO: make this actually work using ./bin/screenshot fullPage: true
  273. link['url'],
  274. ]
  275. end = progress(timeout, prefix=' ')
  276. try:
  277. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # sreenshot.png
  278. end()
  279. if result.returncode:
  280. print(' ', (result.stderr or result.stdout).decode())
  281. raise Exception('Failed to take screenshot')
  282. chmod_file('screenshot.png', cwd=link_dir)
  283. output = 'screenshot.png'
  284. except Exception as e:
  285. end()
  286. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  287. print(' Run to see full output:')
  288. print(' cd {};'.format(link_dir))
  289. print(' {}'.format(' '.join(CMD)))
  290. output = e
  291. return {
  292. 'cmd': CMD,
  293. 'output': output,
  294. }
  295. @attach_result_to_link('dom')
  296. def fetch_dom(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  297. """print HTML of site to file using chrome --dump-html"""
  298. if link['type'] in ('PDF', 'image'):
  299. return {'output': wget_output_path(link)}
  300. output_path = os.path.join(link_dir, 'output.html')
  301. if os.path.exists(output_path):
  302. return {'output': 'output.html', 'status': 'skipped'}
  303. CMD = [
  304. *chrome_headless(user_data_dir=user_data_dir),
  305. '--dump-dom',
  306. '--timeout={}'.format((timeout) * 1000),
  307. link['url']
  308. ]
  309. end = progress(timeout, prefix=' ')
  310. try:
  311. with open(output_path, 'w+') as f:
  312. result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.html
  313. end()
  314. if result.returncode:
  315. print(' ', (result.stderr).decode())
  316. raise Exception('Failed to fetch DOM')
  317. chmod_file('output.html', cwd=link_dir)
  318. output = 'output.html'
  319. except Exception as e:
  320. end()
  321. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  322. print(' Run to see full output:')
  323. print(' cd {};'.format(link_dir))
  324. print(' {}'.format(' '.join(CMD)))
  325. output = e
  326. return {
  327. 'cmd': CMD,
  328. 'output': output,
  329. }
  330. @attach_result_to_link('archive_org')
  331. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  332. """submit site to archive.org for archiving via their service, save returned archive url"""
  333. path = os.path.join(link_dir, 'archive.org.txt')
  334. if os.path.exists(path):
  335. archive_org_url = open(path, 'r').read().strip()
  336. return {'output': archive_org_url, 'status': 'skipped'}
  337. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  338. success = False
  339. CMD = [
  340. 'curl',
  341. '--location',
  342. '--head',
  343. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA),
  344. '--max-time', str(timeout),
  345. '--get',
  346. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  347. submit_url,
  348. ]
  349. end = progress(timeout, prefix=' ')
  350. try:
  351. result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # archive.org.txt
  352. end()
  353. # Parse archive.org response headers
  354. headers = defaultdict(list)
  355. # lowercase all the header names and store in dict
  356. for header in result.stdout.splitlines():
  357. if b':' not in header or not header.strip():
  358. continue
  359. name, val = header.decode().split(':', 1)
  360. headers[name.lower().strip()].append(val.strip())
  361. # Get successful archive url in "content-location" header or any errors
  362. content_location = headers['content-location']
  363. errors = headers['x-archive-wayback-runtime-error']
  364. if content_location:
  365. saved_url = 'https://web.archive.org{}'.format(content_location[0])
  366. success = True
  367. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  368. output = submit_url
  369. # raise Exception('Archive.org denied by {}/robots.txt'.format(link['domain']))
  370. elif errors:
  371. raise Exception(', '.join(errors))
  372. else:
  373. raise Exception('Failed to find "content-location" URL header in Archive.org response.')
  374. except Exception as e:
  375. end()
  376. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  377. print(' Run to see full output:')
  378. print(' {}'.format(' '.join(CMD)))
  379. output = e
  380. if success:
  381. with open(os.path.join(link_dir, 'archive.org.txt'), 'w', encoding='utf-8') as f:
  382. f.write(saved_url)
  383. chmod_file('archive.org.txt', cwd=link_dir)
  384. output = saved_url
  385. return {
  386. 'cmd': CMD,
  387. 'output': output,
  388. }
  389. @attach_result_to_link('favicon')
  390. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  391. """download site favicon from google's favicon api"""
  392. if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
  393. return {'output': 'favicon.ico', 'status': 'skipped'}
  394. CMD = [
  395. 'curl',
  396. '--max-time', str(timeout),
  397. 'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
  398. ]
  399. fout = open('{}/favicon.ico'.format(link_dir), 'w')
  400. end = progress(timeout, prefix=' ')
  401. try:
  402. run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # favicon.ico
  403. fout.close()
  404. end()
  405. chmod_file('favicon.ico', cwd=link_dir)
  406. output = 'favicon.ico'
  407. except Exception as e:
  408. fout.close()
  409. end()
  410. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  411. print(' Run to see full output:')
  412. print(' {}'.format(' '.join(CMD)))
  413. output = e
  414. return {
  415. 'cmd': CMD,
  416. 'output': output,
  417. }
  418. @attach_result_to_link('media')
  419. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
  420. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  421. # import ipdb; ipdb.set_trace()
  422. output = os.path.join(link_dir, 'media')
  423. already_done = os.path.exists(output) # and os.listdir(output)
  424. if already_done and not overwrite:
  425. return {'output': 'media', 'status': 'skipped'}
  426. os.makedirs(output, exist_ok=True)
  427. CMD = [
  428. 'youtube-dl',
  429. '--write-description',
  430. '--write-info-json',
  431. '--write-annotations',
  432. '--yes-playlist',
  433. '--write-thumbnail',
  434. '--no-call-home',
  435. '--no-check-certificate',
  436. '--user-agent',
  437. '--all-subs',
  438. '-x',
  439. '-k',
  440. '--audio-format', 'mp3',
  441. '--audio-quality', '320K',
  442. '--embed-thumbnail',
  443. '--add-metadata',
  444. link['url'],
  445. ]
  446. end = progress(timeout, prefix=' ')
  447. try:
  448. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output, timeout=timeout + 1) # audio/audio.mp3
  449. chmod_file('media', cwd=link_dir)
  450. output = 'media'
  451. end()
  452. if result.returncode:
  453. if (b'ERROR: Unsupported URL' in result.stderr
  454. or b'HTTP Error 404' in result.stderr
  455. or b'HTTP Error 403' in result.stderr
  456. or b'URL could be a direct video link' in result.stderr
  457. or b'Unable to extract container ID' in result.stderr):
  458. # These happen too frequently on non-media pages to warrant printing to console
  459. pass
  460. else:
  461. print(' got youtubedl response code {}:'.format(result.returncode))
  462. print(result.stderr)
  463. raise Exception('Failed to download media')
  464. except Exception as e:
  465. end()
  466. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  467. print(' Run to see full output:')
  468. print(' cd {};'.format(link_dir))
  469. print(' {}'.format(' '.join(CMD)))
  470. output = e
  471. return {
  472. 'cmd': CMD,
  473. 'output': output,
  474. }
  475. @attach_result_to_link('git')
  476. def fetch_git(link_dir, link, timeout=TIMEOUT):
  477. """download full site using git"""
  478. if not (link['domain'] in GIT_DOMAINS
  479. or link['url'].endswith('.git')
  480. or link['type'] == 'git'):
  481. return
  482. if os.path.exists(os.path.join(link_dir, 'git')):
  483. return {'output': 'git', 'status': 'skipped'}
  484. CMD = ['git', 'clone', '--mirror', '--recursive', link['url'].split('#')[0], 'git']
  485. output = 'git'
  486. end = progress(timeout, prefix=' ')
  487. try:
  488. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # git/<reponame>
  489. end()
  490. if result.returncode > 0:
  491. print(' got git response code {}:'.format(result.returncode))
  492. raise Exception('Failed git download')
  493. except Exception as e:
  494. end()
  495. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  496. print(' Run to see full output:')
  497. print(' cd {};'.format(link_dir))
  498. print(' {}'.format(' '.join(CMD)))
  499. output = e
  500. return {
  501. 'cmd': CMD,
  502. 'output': output,
  503. }
  504. def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
  505. args = [binary, '--headless'] # '--disable-gpu'
  506. if not CHROME_SANDBOX:
  507. args.append('--no-sandbox')
  508. default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome')
  509. if user_data_dir:
  510. args.append('--user-data-dir={}'.format(user_data_dir))
  511. elif os.path.exists(default_profile):
  512. args.append('--user-data-dir={}'.format(default_profile))
  513. return args