archive_methods.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. import os
  2. import sys
  3. from functools import wraps
  4. from collections import defaultdict
  5. from datetime import datetime
  6. from subprocess import run, PIPE, DEVNULL
  7. from peekable import Peekable
  8. from index import wget_output_path, parse_json_link_index, write_link_index
  9. from links import links_after_timestamp
  10. from config import (
  11. CHROME_BINARY,
  12. FETCH_WGET,
  13. FETCH_WGET_REQUISITES,
  14. FETCH_PDF,
  15. FETCH_SCREENSHOT,
  16. FETCH_DOM,
  17. FETCH_GIT,
  18. RESOLUTION,
  19. CHECK_SSL_VALIDITY,
  20. SUBMIT_ARCHIVE_DOT_ORG,
  21. FETCH_AUDIO,
  22. FETCH_VIDEO,
  23. FETCH_FAVICON,
  24. WGET_USER_AGENT,
  25. CHROME_USER_DATA_DIR,
  26. CHROME_SANDBOX,
  27. TIMEOUT,
  28. ANSI,
  29. ARCHIVE_DIR,
  30. )
  31. from util import (
  32. check_dependencies,
  33. progress,
  34. chmod_file,
  35. pretty_path,
  36. )
  37. _RESULTS_TOTALS = { # globals are bad, mmkay
  38. 'skipped': 0,
  39. 'succeded': 0,
  40. 'failed': 0,
  41. }
  42. def archive_links(archive_path, links, source=None, resume=None):
  43. check_dependencies()
  44. to_archive = Peekable(links_after_timestamp(links, resume))
  45. idx, link = 0, to_archive.peek(0)
  46. try:
  47. for idx, link in enumerate(to_archive):
  48. link_dir = os.path.join(ARCHIVE_DIR, link['timestamp'])
  49. archive_link(link_dir, link)
  50. except (KeyboardInterrupt, SystemExit, Exception) as e:
  51. print('{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
  52. **ANSI,
  53. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  54. idx=idx+1,
  55. timestamp=link['timestamp'],
  56. total=len(links),
  57. ))
  58. print(' Continue where you left off by running:')
  59. print(' {} {}'.format(
  60. pretty_path(sys.argv[0]),
  61. link['timestamp'],
  62. ))
  63. if not isinstance(e, KeyboardInterrupt):
  64. raise e
  65. raise SystemExit(1)
  66. def archive_link(link_dir, link, overwrite=True):
  67. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  68. update_existing = os.path.exists(link_dir)
  69. if update_existing:
  70. link = {
  71. **parse_json_link_index(link_dir),
  72. **link,
  73. }
  74. else:
  75. os.makedirs(link_dir)
  76. log_link_archive(link_dir, link, update_existing)
  77. if FETCH_WGET:
  78. link = fetch_wget(link_dir, link, overwrite=overwrite)
  79. if FETCH_PDF:
  80. link = fetch_pdf(link_dir, link, overwrite=overwrite)
  81. if FETCH_SCREENSHOT:
  82. link = fetch_screenshot(link_dir, link, overwrite=overwrite)
  83. if FETCH_DOM:
  84. link = fetch_dom(link_dir, link, overwrite=overwrite)
  85. if SUBMIT_ARCHIVE_DOT_ORG:
  86. link = archive_dot_org(link_dir, link, overwrite=overwrite)
  87. # if FETCH_AUDIO:
  88. # link = fetch_audio(link_dir, link, overwrite=overwrite)
  89. # if FETCH_VIDEO:
  90. # link = fetch_video(link_dir, link, overwrite=overwrite)
  91. if FETCH_GIT:
  92. link = fetch_git(link_dir, link, overwrite=overwrite)
  93. if FETCH_FAVICON:
  94. link = fetch_favicon(link_dir, link, overwrite=overwrite)
  95. write_link_index(link_dir, link)
  96. # print()
  97. return link
  98. def log_link_archive(link_dir, link, update_existing):
  99. print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
  100. symbol='*' if update_existing else '+',
  101. symbol_color=ANSI['black' if update_existing else 'green'],
  102. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  103. **link,
  104. **ANSI,
  105. ))
  106. print(' > {}{}'.format(pretty_path(link_dir), '' if update_existing else ' (new)'))
  107. if link['type']:
  108. print(' i {}'.format(link['type']))
  109. def attach_result_to_link(method):
  110. """
  111. Instead of returning a result={output:'...', status:'success'} object,
  112. attach that result to the links's history & latest fields, then return
  113. the updated link object.
  114. """
  115. def decorator(fetch_func):
  116. @wraps(fetch_func)
  117. def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
  118. # initialize methods and history json field on link
  119. link['latest'] = link.get('latest') or {}
  120. link['latest'][method] = link['latest'].get(method) or None
  121. link['history'] = link.get('history') or {}
  122. link['history'][method] = link['history'].get(method) or []
  123. start_ts = datetime.now().timestamp()
  124. # if a valid method output is already present, dont run the fetch function
  125. if link['latest'][method] and not overwrite:
  126. print(' √ {}'.format(method))
  127. result = None
  128. else:
  129. print(' > {}'.format(method))
  130. result = fetch_func(link_dir, link, **kwargs)
  131. end_ts = datetime.now().timestamp()
  132. duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
  133. # append a history item recording fail/success
  134. history_entry = {
  135. 'timestamp': str(start_ts).split('.')[0],
  136. }
  137. if result is None:
  138. history_entry['status'] = 'skipped'
  139. elif isinstance(result.get('output'), Exception):
  140. history_entry['status'] = 'failed'
  141. history_entry['duration'] = duration
  142. history_entry.update(result or {})
  143. link['history'][method].append(history_entry)
  144. else:
  145. history_entry['status'] = 'succeded'
  146. history_entry['duration'] = duration
  147. history_entry.update(result or {})
  148. link['history'][method].append(history_entry)
  149. link['latest'][method] = result['output']
  150. _RESULTS_TOTALS[history_entry['status']] += 1
  151. return link
  152. return timed_fetch_func
  153. return decorator
  154. @attach_result_to_link('wget')
  155. def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, timeout=TIMEOUT):
  156. """download full site using wget"""
  157. domain_dir = os.path.join(link_dir, link['domain'])
  158. existing_file = wget_output_path(link)
  159. if os.path.exists(domain_dir) and existing_file:
  160. return {'output': existing_file, 'status': 'skipped'}
  161. CMD = [
  162. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  163. *'wget -N -E -np -x -H -k -K -S --restrict-file-names=unix'.split(' '),
  164. *(('-p',) if FETCH_WGET_REQUISITES else ()),
  165. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  166. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate',))),
  167. link['url'],
  168. ]
  169. end = progress(timeout, prefix=' ')
  170. try:
  171. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # index.html
  172. end()
  173. output = wget_output_path(link, look_in=domain_dir)
  174. # Check for common failure cases
  175. if result.returncode > 0:
  176. print(' got wget response code {}:'.format(result.returncode))
  177. if result.returncode != 8:
  178. print('\n'.join(' ' + line for line in (result.stderr or result.stdout).decode().rsplit('\n', 10)[-10:] if line.strip()))
  179. if b'403: Forbidden' in result.stderr:
  180. raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
  181. if b'404: Not Found' in result.stderr:
  182. raise Exception('404 Not Found')
  183. if b'ERROR 500: Internal Server Error' in result.stderr:
  184. raise Exception('500 Internal Server Error')
  185. if result.returncode == 4:
  186. raise Exception('Failed wget download')
  187. except Exception as e:
  188. end()
  189. print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  190. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  191. output = e
  192. return {
  193. 'cmd': CMD,
  194. 'output': output,
  195. }
  196. @attach_result_to_link('pdf')
  197. def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  198. """print PDF of site to file using chrome --headless"""
  199. if link['type'] in ('PDF', 'image'):
  200. return {'output': wget_output_path(link)}
  201. if os.path.exists(os.path.join(link_dir, 'output.pdf')):
  202. return {'output': 'output.pdf', 'status': 'skipped'}
  203. CMD = [
  204. *chrome_headless(user_data_dir=user_data_dir),
  205. '--print-to-pdf',
  206. link['url']
  207. ]
  208. end = progress(timeout, prefix=' ')
  209. try:
  210. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # output.pdf
  211. end()
  212. if result.returncode:
  213. print(' ', (result.stderr or result.stdout).decode())
  214. raise Exception('Failed to print PDF')
  215. chmod_file('output.pdf', cwd=link_dir)
  216. output = 'output.pdf'
  217. except Exception as e:
  218. end()
  219. print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  220. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  221. output = e
  222. return {
  223. 'cmd': CMD,
  224. 'output': output,
  225. }
  226. @attach_result_to_link('screenshot')
  227. def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
  228. """take screenshot of site using chrome --headless"""
  229. if link['type'] in ('PDF', 'image'):
  230. return {'output': wget_output_path(link)}
  231. if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
  232. return {'output': 'screenshot.png', 'status': 'skipped'}
  233. CMD = [
  234. *chrome_headless(user_data_dir=user_data_dir),
  235. '--screenshot',
  236. '--window-size={}'.format(resolution),
  237. '--hide-scrollbars',
  238. # '--full-page', # TODO: make this actually work using ./bin/screenshot fullPage: true
  239. link['url'],
  240. ]
  241. end = progress(timeout, prefix=' ')
  242. try:
  243. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # sreenshot.png
  244. end()
  245. if result.returncode:
  246. print(' ', (result.stderr or result.stdout).decode())
  247. raise Exception('Failed to take screenshot')
  248. chmod_file('screenshot.png', cwd=link_dir)
  249. output = 'screenshot.png'
  250. except Exception as e:
  251. end()
  252. print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  253. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  254. output = e
  255. return {
  256. 'cmd': CMD,
  257. 'output': output,
  258. }
  259. @attach_result_to_link('dom')
  260. def fetch_dom(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  261. """print HTML of site to file using chrome --dump-html"""
  262. if link['type'] in ('PDF', 'image'):
  263. return {'output': wget_output_path(link)}
  264. output_path = os.path.join(link_dir, 'output.html')
  265. if os.path.exists(output_path):
  266. return {'output': 'output.html', 'status': 'skipped'}
  267. CMD = [
  268. *chrome_headless(user_data_dir=user_data_dir),
  269. '--dump-dom',
  270. link['url']
  271. ]
  272. end = progress(timeout, prefix=' ')
  273. try:
  274. with open(output_path, 'w+') as f:
  275. result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # output.html
  276. end()
  277. if result.returncode:
  278. print(' ', (result.stderr).decode())
  279. raise Exception('Failed to fetch DOM')
  280. chmod_file('output.html', cwd=link_dir)
  281. output = 'output.html'
  282. except Exception as e:
  283. end()
  284. print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  285. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  286. output = e
  287. return {
  288. 'cmd': CMD,
  289. 'output': output,
  290. }
  291. @attach_result_to_link('archive_org')
  292. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  293. """submit site to archive.org for archiving via their service, save returned archive url"""
  294. path = os.path.join(link_dir, 'archive.org.txt')
  295. if os.path.exists(path):
  296. archive_org_url = open(path, 'r').read().strip()
  297. return {'output': archive_org_url, 'status': 'skipped'}
  298. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  299. success = False
  300. CMD = ['curl', '-L', '-I', '-X', 'GET', submit_url]
  301. end = progress(timeout, prefix=' ')
  302. try:
  303. result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # archive.org.txt
  304. end()
  305. # Parse archive.org response headers
  306. headers = defaultdict(list)
  307. # lowercase all the header names and store in dict
  308. for header in result.stdout.splitlines():
  309. if b':' not in header or not header.strip():
  310. continue
  311. name, val = header.decode().split(':', 1)
  312. headers[name.lower().strip()].append(val.strip())
  313. # Get successful archive url in "content-location" header or any errors
  314. content_location = headers['content-location']
  315. errors = headers['x-archive-wayback-runtime-error']
  316. if content_location:
  317. saved_url = 'https://web.archive.org{}'.format(content_location[0])
  318. success = True
  319. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  320. output = submit_url
  321. # raise Exception('Archive.org denied by {}/robots.txt'.format(link['domain']))
  322. elif errors:
  323. raise Exception(', '.join(errors))
  324. else:
  325. raise Exception('Failed to find "content-location" URL header in Archive.org response.')
  326. except Exception as e:
  327. end()
  328. print(' Visit url to see output:', ' '.join(CMD))
  329. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  330. output = e
  331. if success:
  332. with open(os.path.join(link_dir, 'archive.org.txt'), 'w', encoding='utf-8') as f:
  333. f.write(saved_url)
  334. chmod_file('archive.org.txt', cwd=link_dir)
  335. output = saved_url
  336. return {
  337. 'cmd': CMD,
  338. 'output': output,
  339. }
  340. @attach_result_to_link('favicon')
  341. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  342. """download site favicon from google's favicon api"""
  343. if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
  344. return {'output': 'favicon.ico', 'status': 'skipped'}
  345. CMD = ['curl', 'https://www.google.com/s2/favicons?domain={domain}'.format(**link)]
  346. fout = open('{}/favicon.ico'.format(link_dir), 'w')
  347. end = progress(timeout, prefix=' ')
  348. try:
  349. run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # favicon.ico
  350. fout.close()
  351. end()
  352. chmod_file('favicon.ico', cwd=link_dir)
  353. output = 'favicon.ico'
  354. except Exception as e:
  355. fout.close()
  356. end()
  357. print(' Run to see full output:', ' '.join(CMD))
  358. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  359. output = e
  360. return {
  361. 'cmd': CMD,
  362. 'output': output,
  363. }
  364. # @attach_result_to_link('audio')
  365. # def fetch_audio(link_dir, link, timeout=TIMEOUT):
  366. # """Download audio rip using youtube-dl"""
  367. # if link['type'] not in ('soundcloud',)\
  368. # and 'audio' not in link['tags']:
  369. # return
  370. # path = os.path.join(link_dir, 'audio')
  371. # if not os.path.exists(path) or overwrite:
  372. # print(' - Downloading audio')
  373. # CMD = [
  374. # "youtube-dl -x --audio-format mp3 --audio-quality 0 -o '%(title)s.%(ext)s'",
  375. # link['url'],
  376. # ]
  377. # end = progress(timeout, prefix=' ')
  378. # try:
  379. # result = run(CMD, stdout=DEVNULL, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # audio/audio.mp3
  380. # end()
  381. # if result.returncode:
  382. # print(' ', result.stderr.decode())
  383. # raise Exception('Failed to download audio')
  384. # chmod_file('audio.mp3', cwd=link_dir)
  385. # return 'audio.mp3'
  386. # except Exception as e:
  387. # end()
  388. # print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  389. # print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  390. # raise
  391. # else:
  392. # print(' √ Skipping audio download')
  393. # @attach_result_to_link('video')
  394. # def fetch_video(link_dir, link, timeout=TIMEOUT):
  395. # """Download video rip using youtube-dl"""
  396. # if link['type'] not in ('youtube', 'youku', 'vimeo')\
  397. # and 'video' not in link['tags']:
  398. # return
  399. # path = os.path.join(link_dir, 'video')
  400. # if not os.path.exists(path) or overwrite:
  401. # print(' - Downloading video')
  402. # CMD = [
  403. # "youtube-dl -x --video-format mp4 --audio-quality 0 -o '%(title)s.%(ext)s'",
  404. # link['url'],
  405. # ]
  406. # end = progress(timeout, prefix=' ')
  407. # try:
  408. # result = run(CMD, stdout=DEVNULL, stderr=DEVNULL, cwd=link_dir, timeout=timeout + 1) # video/movie.mp4
  409. # end()
  410. # if result.returncode:
  411. # print(' ', result.stderr.decode())
  412. # raise Exception('Failed to download video')
  413. # chmod_file('video.mp4', cwd=link_dir)
  414. # return 'video.mp4'
  415. # except Exception as e:
  416. # end()
  417. # print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  418. # print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  419. # raise
  420. # else:
  421. # print(' √ Skipping video download')
  422. @attach_result_to_link('git')
  423. def fetch_git(link_dir, link, timeout=TIMEOUT):
  424. """download full site using git"""
  425. if not (link['domain'] == 'github.com'
  426. or link['url'].endswith('.git')
  427. or link['type'] == 'git'):
  428. return
  429. if os.path.exists(os.path.join(link_dir, 'git')):
  430. return {'output': 'git', 'status': 'skipped'}
  431. CMD = ['git', 'clone', '--recursive', link['url'], 'git']
  432. output = 'git'
  433. end = progress(timeout, prefix=' ')
  434. try:
  435. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # git/<reponame>
  436. end()
  437. if result.returncode > 0:
  438. print(' got git response code {}:'.format(result.returncode))
  439. raise Exception('Failed git download')
  440. except Exception as e:
  441. end()
  442. print(' Run to see full output:', 'cd {}; {}'.format(link_dir, ' '.join(CMD)))
  443. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  444. output = e
  445. return {
  446. 'cmd': CMD,
  447. 'output': output,
  448. }
  449. def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
  450. args = [binary, '--headless'] # '--disable-gpu'
  451. if not CHROME_SANDBOX:
  452. args.append('--no-sandbox')
  453. default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome/Default')
  454. if user_data_dir:
  455. args.append('--user-data-dir={}'.format(user_data_dir))
  456. elif os.path.exists(default_profile):
  457. args.append('--user-data-dir={}'.format(default_profile))
  458. return args