util.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. import os
  2. import re
  3. import sys
  4. import time
  5. import json
  6. from urllib.request import Request, urlopen
  7. from urllib.parse import urlparse
  8. from decimal import Decimal
  9. from urllib.parse import quote
  10. from datetime import datetime
  11. from subprocess import TimeoutExpired, Popen, PIPE, DEVNULL, CompletedProcess, CalledProcessError
  12. from multiprocessing import Process
  13. from config import (
  14. ANSI,
  15. IS_TTY,
  16. TERM_WIDTH,
  17. REPO_DIR,
  18. OUTPUT_DIR,
  19. SOURCES_DIR,
  20. ARCHIVE_DIR,
  21. OUTPUT_PERMISSIONS,
  22. TIMEOUT,
  23. SHOW_PROGRESS,
  24. CHECK_SSL_VALIDITY,
  25. WGET_USER_AGENT,
  26. CURL_BINARY,
  27. WGET_BINARY,
  28. CHROME_BINARY,
  29. GIT_BINARY,
  30. YOUTUBEDL_BINARY,
  31. FETCH_TITLE,
  32. FETCH_FAVICON,
  33. FETCH_WGET,
  34. FETCH_WARC,
  35. FETCH_PDF,
  36. FETCH_SCREENSHOT,
  37. FETCH_DOM,
  38. FETCH_GIT,
  39. FETCH_MEDIA,
  40. SUBMIT_ARCHIVE_DOT_ORG,
  41. )
  42. # URL helpers: https://docs.python.org/3/library/urllib.parse.html#url-parsing
  43. scheme = lambda url: urlparse(url).scheme
  44. without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
  45. without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
  46. without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
  47. without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
  48. path = lambda url: urlparse(url).path
  49. basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
  50. domain = lambda url: urlparse(url).netloc
  51. query = lambda url: urlparse(url).query
  52. fragment = lambda url: urlparse(url).fragment
  53. extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
  54. base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
  55. short_ts = lambda ts: ts.split('.')[0]
  56. URL_REGEX = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))[^<\""]+'
  57. HTML_TITLE_REGEX = '<title>(.[^<>]+)'
  58. def check_dependencies():
  59. """Check that all necessary dependencies are installed, and have valid versions"""
  60. python_vers = float('{}.{}'.format(sys.version_info.major, sys.version_info.minor))
  61. if python_vers < 3.5:
  62. print('{}[X] Python version is not new enough: {} (>3.5 is required){}'.format(ANSI['red'], python_vers, ANSI['reset']))
  63. print(' See https://github.com/pirate/ArchiveBox#troubleshooting for help upgrading your Python installation.')
  64. raise SystemExit(1)
  65. if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG:
  66. if run(['which', CURL_BINARY], stdout=DEVNULL).returncode or run([CURL_BINARY, '--version'], stdout=DEVNULL).returncode:
  67. print('{red}[X] Missing dependency: curl{reset}'.format(**ANSI))
  68. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CURL_BINARY))
  69. print(' See https://github.com/pirate/ArchiveBox for help.')
  70. raise SystemExit(1)
  71. if FETCH_WGET or FETCH_WARC:
  72. if run(['which', WGET_BINARY], stdout=DEVNULL).returncode or run([WGET_BINARY, '--version'], stdout=DEVNULL).returncode:
  73. print('{red}[X] Missing dependency: wget{reset}'.format(**ANSI))
  74. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(WGET_BINARY))
  75. print(' See https://github.com/pirate/ArchiveBox for help.')
  76. raise SystemExit(1)
  77. if FETCH_PDF or FETCH_SCREENSHOT or FETCH_DOM:
  78. if run(['which', CHROME_BINARY], stdout=DEVNULL).returncode:
  79. print('{}[X] Missing dependency: {}{}'.format(ANSI['red'], CHROME_BINARY, ANSI['reset']))
  80. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
  81. print(' See https://github.com/pirate/ArchiveBox for help.')
  82. raise SystemExit(1)
  83. # parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
  84. try:
  85. result = run([CHROME_BINARY, '--version'], stdout=PIPE)
  86. version_str = result.stdout.decode('utf-8')
  87. version_lines = re.sub("(Google Chrome|Chromium) (\\d+?)\\.(\\d+?)\\.(\\d+?).*?$", "\\2", version_str).split('\n')
  88. version = [l for l in version_lines if l.isdigit()][-1]
  89. if int(version) < 59:
  90. print(version_lines)
  91. print('{red}[X] Chrome version must be 59 or greater for headless PDF, screenshot, and DOM saving{reset}'.format(**ANSI))
  92. print(' See https://github.com/pirate/ArchiveBox for help.')
  93. raise SystemExit(1)
  94. except (IndexError, TypeError, OSError):
  95. print('{red}[X] Failed to parse Chrome version, is it installed properly?{reset}'.format(**ANSI))
  96. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
  97. print(' See https://github.com/pirate/ArchiveBox for help.')
  98. raise SystemExit(1)
  99. if FETCH_GIT:
  100. if run(['which', GIT_BINARY], stdout=DEVNULL).returncode or run([GIT_BINARY, '--version'], stdout=DEVNULL).returncode:
  101. print('{red}[X] Missing dependency: git{reset}'.format(**ANSI))
  102. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(GIT_BINARY))
  103. print(' See https://github.com/pirate/ArchiveBox for help.')
  104. raise SystemExit(1)
  105. if FETCH_MEDIA:
  106. if run(['which', YOUTUBEDL_BINARY], stdout=DEVNULL).returncode or run([YOUTUBEDL_BINARY, '--version'], stdout=DEVNULL).returncode:
  107. print('{red}[X] Missing dependency: youtube-dl{reset}'.format(**ANSI))
  108. print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(YOUTUBEDL_BINARY))
  109. print(' See https://github.com/pirate/ArchiveBox for help.')
  110. raise SystemExit(1)
  111. def chmod_file(path, cwd='.', permissions=OUTPUT_PERMISSIONS, timeout=30):
  112. """chmod -R <permissions> <cwd>/<path>"""
  113. if not os.path.exists(os.path.join(cwd, path)):
  114. raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
  115. chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout)
  116. if chmod_result.returncode == 1:
  117. print(' ', chmod_result.stderr.decode())
  118. raise Exception('Failed to chmod {}/{}'.format(cwd, path))
  119. def progress(seconds=TIMEOUT, prefix=''):
  120. """Show a (subprocess-controlled) progress bar with a <seconds> timeout,
  121. returns end() function to instantly finish the progress
  122. """
  123. if not SHOW_PROGRESS:
  124. return lambda: None
  125. def progress_bar(seconds, prefix):
  126. """show timer in the form of progress bar, with percentage and seconds remaining"""
  127. chunk = '█' if sys.stdout.encoding == 'UTF-8' else '#'
  128. chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
  129. try:
  130. for s in range(seconds * chunks):
  131. progress = s / chunks / seconds * 100
  132. bar_width = round(progress/(100/chunks))
  133. # ████████████████████ 0.9% (1/60sec)
  134. sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
  135. prefix,
  136. ANSI['green'],
  137. (chunk * bar_width).ljust(chunks),
  138. ANSI['reset'],
  139. round(progress, 1),
  140. round(s/chunks),
  141. seconds,
  142. ))
  143. sys.stdout.flush()
  144. time.sleep(1 / chunks)
  145. # ██████████████████████████████████ 100.0% (60/60sec)
  146. sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format(
  147. prefix,
  148. ANSI['red'],
  149. chunk * chunks,
  150. ANSI['reset'],
  151. 100.0,
  152. seconds,
  153. seconds,
  154. ))
  155. sys.stdout.flush()
  156. except KeyboardInterrupt:
  157. print()
  158. pass
  159. p = Process(target=progress_bar, args=(seconds, prefix))
  160. p.start()
  161. def end():
  162. """immediately finish progress and clear the progressbar line"""
  163. # protect from double termination
  164. #if p is None or not hasattr(p, 'kill'):
  165. # return
  166. nonlocal p
  167. if p is not None:
  168. p.terminate()
  169. p = None
  170. sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line
  171. sys.stdout.flush()
  172. return end
  173. def pretty_path(path):
  174. """convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
  175. return path.replace(REPO_DIR + '/', '')
  176. def save_stdin_source(raw_text):
  177. if not os.path.exists(SOURCES_DIR):
  178. os.makedirs(SOURCES_DIR)
  179. ts = str(datetime.now().timestamp()).split('.', 1)[0]
  180. source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format('stdin', ts))
  181. with open(source_path, 'w', encoding='utf-8') as f:
  182. f.write(raw_text)
  183. return source_path
  184. def fetch_page_content(url, timeout=TIMEOUT):
  185. req = Request(url, headers={'User-Agent': WGET_USER_AGENT})
  186. if CHECK_SSL_VALIDITY:
  187. resp = urlopen(req, timeout=timeout)
  188. else:
  189. import ssl
  190. insecure = ssl._create_unverified_context()
  191. resp = urlopen(req, timeout=timeout, context=insecure)
  192. encoding = resp.headers.get_content_charset() or 'utf-8'
  193. return resp.read().decode(encoding)
  194. def save_remote_source(url, timeout=TIMEOUT):
  195. """download a given url's content into downloads/domain.txt"""
  196. if not os.path.exists(SOURCES_DIR):
  197. os.makedirs(SOURCES_DIR)
  198. ts = str(datetime.now().timestamp()).split('.', 1)[0]
  199. source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format(domain(url), ts))
  200. print('{}[*] [{}] Downloading {}{}'.format(
  201. ANSI['green'],
  202. datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  203. url,
  204. ANSI['reset'],
  205. ))
  206. end = progress(TIMEOUT, prefix=' ')
  207. try:
  208. downloaded_xml = fetch_page_content(url, timeout=timeout)
  209. end()
  210. except Exception as e:
  211. end()
  212. print('{}[!] Failed to download {}{}\n'.format(
  213. ANSI['red'],
  214. url,
  215. ANSI['reset'],
  216. ))
  217. print(' ', e)
  218. raise SystemExit(1)
  219. with open(source_path, 'w', encoding='utf-8') as f:
  220. f.write(downloaded_xml)
  221. print(' > {}'.format(pretty_path(source_path)))
  222. return source_path
  223. def fetch_page_title(url, timeout=10, progress=SHOW_PROGRESS):
  224. """Attempt to guess a page's title by downloading the html"""
  225. if not FETCH_TITLE:
  226. return None
  227. try:
  228. if progress:
  229. sys.stdout.write('.')
  230. sys.stdout.flush()
  231. html = fetch_page_content(url, timeout=timeout)
  232. match = re.search(HTML_TITLE_REGEX, html)
  233. return match.group(1).strip() if match else None
  234. except Exception as err:
  235. # print('[!] Failed to fetch title because of {}: {}'.format(
  236. # err.__class__.__name__,
  237. # err,
  238. # ))
  239. return None
  240. def str_between(string, start, end=None):
  241. """(<abc>12345</def>, <abc>, </def>) -> 12345"""
  242. content = string.split(start, 1)[-1]
  243. if end is not None:
  244. content = content.rsplit(end, 1)[0]
  245. return content
  246. def get_link_type(link):
  247. """Certain types of links need to be handled specially, this figures out when that's the case"""
  248. if link['base_url'].endswith('.pdf'):
  249. return 'PDF'
  250. elif link['base_url'].rsplit('.', 1) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'):
  251. return 'image'
  252. elif 'wikipedia.org' in link['domain']:
  253. return 'wiki'
  254. elif 'youtube.com' in link['domain']:
  255. return 'youtube'
  256. elif 'soundcloud.com' in link['domain']:
  257. return 'soundcloud'
  258. elif 'youku.com' in link['domain']:
  259. return 'youku'
  260. elif 'vimeo.com' in link['domain']:
  261. return 'vimeo'
  262. return None
  263. def merge_links(a, b):
  264. """deterministially merge two links, favoring longer field values over shorter,
  265. and "cleaner" values over worse ones.
  266. """
  267. longer = lambda key: (a[key] if len(a[key]) > len(b[key]) else b[key]) if (a[key] and b[key]) else (a[key] or b[key])
  268. earlier = lambda key: a[key] if a[key] < b[key] else b[key]
  269. url = longer('url')
  270. longest_title = longer('title')
  271. cleanest_title = a['title'] if '://' not in (a['title'] or '') else b['title']
  272. link = {
  273. 'timestamp': earlier('timestamp'),
  274. 'url': url,
  275. 'domain': domain(url),
  276. 'base_url': base_url(url),
  277. 'tags': longer('tags'),
  278. 'title': longest_title if '://' not in (longest_title or '') else cleanest_title,
  279. 'sources': list(set(a.get('sources', []) + b.get('sources', []))),
  280. }
  281. link['type'] = get_link_type(link)
  282. return link
  283. def find_link(folder, links):
  284. """for a given archive folder, find the corresponding link object in links"""
  285. url = parse_url(folder)
  286. if url:
  287. for link in links:
  288. if (link['base_url'] in url) or (url in link['url']):
  289. return link
  290. timestamp = folder.split('.')[0]
  291. for link in links:
  292. if link['timestamp'].startswith(timestamp):
  293. if link['domain'] in os.listdir(os.path.join(ARCHIVE_DIR, folder)):
  294. return link # careful now, this isn't safe for most ppl
  295. if link['domain'] in parse_url(folder):
  296. return link
  297. return None
  298. def parse_url(folder):
  299. """for a given archive folder, figure out what url it's for"""
  300. link_json = os.path.join(ARCHIVE_DIR, folder, 'index.json')
  301. if os.path.exists(link_json):
  302. with open(link_json, 'r') as f:
  303. try:
  304. link_json = f.read().strip()
  305. if link_json:
  306. link = json.loads(link_json)
  307. return link['base_url']
  308. except ValueError:
  309. print('File contains invalid JSON: {}!'.format(link_json))
  310. archive_org_txt = os.path.join(ARCHIVE_DIR, folder, 'archive.org.txt')
  311. if os.path.exists(archive_org_txt):
  312. with open(archive_org_txt, 'r') as f:
  313. original_link = f.read().strip().split('/http', 1)[-1]
  314. with_scheme = 'http{}'.format(original_link)
  315. return with_scheme
  316. return ''
  317. def manually_merge_folders(source, target):
  318. """prompt for user input to resolve a conflict between two archive folders"""
  319. if not IS_TTY:
  320. return
  321. fname = lambda path: path.split('/')[-1]
  322. print(' {} and {} have conflicting files, which do you want to keep?'.format(fname(source), fname(target)))
  323. print(' - [enter]: do nothing (keep both)')
  324. print(' - a: prefer files from {}'.format(source))
  325. print(' - b: prefer files from {}'.format(target))
  326. print(' - q: quit and resolve the conflict manually')
  327. try:
  328. answer = input('> ').strip().lower()
  329. except KeyboardInterrupt:
  330. answer = 'q'
  331. assert answer in ('', 'a', 'b', 'q'), 'Invalid choice.'
  332. if answer == 'q':
  333. print('\nJust run ArchiveBox again to pick up where you left off.')
  334. raise SystemExit(0)
  335. elif answer == '':
  336. return
  337. files_in_source = set(os.listdir(source))
  338. files_in_target = set(os.listdir(target))
  339. for file in files_in_source:
  340. if file in files_in_target:
  341. to_delete = target if answer == 'a' else source
  342. run(['rm', '-Rf', os.path.join(to_delete, file)])
  343. run(['mv', os.path.join(source, file), os.path.join(target, file)])
  344. if not set(os.listdir(source)):
  345. run(['rm', '-Rf', source])
  346. def fix_folder_path(archive_path, link_folder, link):
  347. """given a folder, merge it to the canonical 'correct' path for the given link object"""
  348. source = os.path.join(archive_path, link_folder)
  349. target = os.path.join(archive_path, link['timestamp'])
  350. url_in_folder = parse_url(source)
  351. if not (url_in_folder in link['base_url']
  352. or link['base_url'] in url_in_folder):
  353. raise ValueError('The link does not match the url for this folder.')
  354. if not os.path.exists(target):
  355. # target doesn't exist so nothing needs merging, simply move A to B
  356. run(['mv', source, target])
  357. else:
  358. # target folder exists, check for conflicting files and attempt manual merge
  359. files_in_source = set(os.listdir(source))
  360. files_in_target = set(os.listdir(target))
  361. conflicting_files = files_in_source & files_in_target
  362. if not conflicting_files:
  363. for file in files_in_source:
  364. run(['mv', os.path.join(source, file), os.path.join(target, file)])
  365. if os.path.exists(source):
  366. files_in_source = set(os.listdir(source))
  367. if files_in_source:
  368. manually_merge_folders(source, target)
  369. else:
  370. run(['rm', '-R', source])
  371. def migrate_data():
  372. # migrate old folder to new OUTPUT folder
  373. old_dir = os.path.join(REPO_DIR, 'html')
  374. if os.path.exists(old_dir):
  375. print('[!] WARNING: Moved old output folder "html" to new location: {}'.format(OUTPUT_DIR))
  376. run(['mv', old_dir, OUTPUT_DIR], timeout=10)
  377. def cleanup_archive(archive_path, links):
  378. """move any incorrectly named folders to their canonical locations"""
  379. # for each folder that exists, see if we can match it up with a known good link
  380. # if we can, then merge the two folders (TODO: if not, move it to lost & found)
  381. unmatched = []
  382. bad_folders = []
  383. if not os.path.exists(archive_path):
  384. return
  385. for folder in os.listdir(archive_path):
  386. try:
  387. files = os.listdir(os.path.join(archive_path, folder))
  388. except NotADirectoryError:
  389. continue
  390. if files:
  391. link = find_link(folder, links)
  392. if link is None:
  393. unmatched.append(folder)
  394. continue
  395. if folder != link['timestamp']:
  396. bad_folders.append((folder, link))
  397. else:
  398. # delete empty folders
  399. run(['rm', '-R', os.path.join(archive_path, folder)])
  400. if bad_folders and IS_TTY and input('[!] Cleanup archive? y/[n]: ') == 'y':
  401. print('[!] Fixing {} improperly named folders in archive...'.format(len(bad_folders)))
  402. for folder, link in bad_folders:
  403. fix_folder_path(archive_path, folder, link)
  404. elif bad_folders:
  405. print('[!] Warning! {} folders need to be merged, fix by running ArchiveBox.'.format(len(bad_folders)))
  406. if unmatched:
  407. print('[!] Warning! {} unrecognized folders in html/archive/'.format(len(unmatched)))
  408. print(' '+ '\n '.join(unmatched))
  409. def wget_output_path(link, look_in=None):
  410. """calculate the path to the wgetted .html file, since wget may
  411. adjust some paths to be different than the base_url path.
  412. See docs on wget --adjust-extension (-E)
  413. """
  414. # if we have it stored, always prefer the actual output path to computed one
  415. if link.get('latest', {}).get('wget'):
  416. return link['latest']['wget']
  417. urlencode = lambda s: quote(s, encoding='utf-8', errors='replace')
  418. if link['type'] in ('PDF', 'image'):
  419. return urlencode(link['base_url'])
  420. # Since the wget algorithm to for -E (appending .html) is incredibly complex
  421. # instead of trying to emulate it here, we just look in the output folder
  422. # to see what html file wget actually created as the output
  423. wget_folder = link['base_url'].rsplit('/', 1)[0].split('/')
  424. look_in = os.path.join(ARCHIVE_DIR, link['timestamp'], *wget_folder)
  425. if look_in and os.path.exists(look_in):
  426. html_files = [
  427. f for f in os.listdir(look_in)
  428. if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M)
  429. ]
  430. if html_files:
  431. return urlencode(os.path.join(*wget_folder, html_files[0]))
  432. return None
  433. # If finding the actual output file didn't work, fall back to the buggy
  434. # implementation of the wget .html appending algorithm
  435. # split_url = link['url'].split('#', 1)
  436. # query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else ''
  437. # if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M):
  438. # # already ends in .html
  439. # return urlencode(link['base_url'])
  440. # else:
  441. # # .html needs to be appended
  442. # without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0]
  443. # if without_scheme.endswith('/'):
  444. # if query:
  445. # return urlencode('#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]]))
  446. # return urlencode('#'.join([without_scheme + 'index.html', *split_url[1:]]))
  447. # else:
  448. # if query:
  449. # return urlencode('#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]]))
  450. # elif '/' in without_scheme:
  451. # return urlencode('#'.join([without_scheme + '.html', *split_url[1:]]))
  452. # return urlencode(link['base_url'] + '/index.html')
  453. def derived_link_info(link):
  454. """extend link info with the archive urls and other derived data"""
  455. url = link['url']
  456. link_info = {
  457. **link,
  458. 'title': link['title'] or url,
  459. 'date': datetime.fromtimestamp(Decimal(link['timestamp'])).strftime('%Y-%m-%d %H:%M'),
  460. 'base_url': base_url(url),
  461. 'domain': domain(url),
  462. 'basename': basename(url),
  463. 'path': path(url),
  464. # Archive Method Output URLs
  465. 'favicon_url': 'archive/{timestamp}/favicon.ico'.format(**link),
  466. 'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
  467. 'files_url': 'archive/{timestamp}/index.html'.format(**link),
  468. 'archive_url': 'archive/{}/{}'.format(link['timestamp'], wget_output_path(link) or 'index.html'),
  469. 'warc_url': 'archive/{timestamp}/warc'.format(**link),
  470. 'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link),
  471. 'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link),
  472. 'dom_link': 'archive/{timestamp}/output.html'.format(**link),
  473. 'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**link),
  474. 'git_url': 'archive/{timestamp}/git'.format(**link),
  475. 'media_url': 'archive/{timestamp}/media'.format(**link),
  476. }
  477. # PDF and images are handled slightly differently
  478. # wget, screenshot, & pdf urls all point to the same file
  479. if link['type'] in ('PDF', 'image'):
  480. link_info.update({
  481. 'archive_url': 'archive/{timestamp}/{base_url}'.format(**link),
  482. 'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link),
  483. 'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link),
  484. 'dom_link': 'archive/{timestamp}/{base_url}'.format(**link),
  485. 'title': link['title'] or basename(link['url']),
  486. })
  487. return link_info
  488. def run(*popenargs, input=None, capture_output=False, timeout=None, check=False, **kwargs):
  489. """Patched of subprocess.run to fix blocking io making timeout=innefective"""
  490. if input is not None:
  491. if 'stdin' in kwargs:
  492. raise ValueError('stdin and input arguments may not both be used.')
  493. kwargs['stdin'] = PIPE
  494. if capture_output:
  495. if ('stdout' in kwargs) or ('stderr' in kwargs):
  496. raise ValueError('stdout and stderr arguments may not be used '
  497. 'with capture_output.')
  498. kwargs['stdout'] = PIPE
  499. kwargs['stderr'] = PIPE
  500. with Popen(*popenargs, **kwargs) as process:
  501. try:
  502. stdout, stderr = process.communicate(input, timeout=timeout)
  503. except TimeoutExpired:
  504. process.kill()
  505. try:
  506. stdout, stderr = process.communicate(input, timeout=2)
  507. except:
  508. pass
  509. raise TimeoutExpired(popenargs[0][0], timeout)
  510. except BaseException as err:
  511. process.kill()
  512. # We don't call process.wait() as .__exit__ does that for us.
  513. raise
  514. retcode = process.poll()
  515. if check and retcode:
  516. raise CalledProcessError(retcode, process.args,
  517. output=stdout, stderr=stderr)
  518. return CompletedProcess(process.args, retcode, stdout, stderr)
  519. def check_link_structure(link):
  520. assert isinstance(link, dict)
  521. assert isinstance(link.get('url'), str)
  522. assert len(link['url']) > 2
  523. def check_links_structure(links):
  524. assert isinstance(links, list)
  525. if links:
  526. check_link_structure(links[0])