util.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. import os
  2. import re
  3. import sys
  4. import time
  5. from urllib.request import Request, urlopen
  6. from urllib.parse import urlparse, quote
  7. from decimal import Decimal
  8. from datetime import datetime
  9. from multiprocessing import Process
  10. from subprocess import (
  11. Popen,
  12. PIPE,
  13. DEVNULL,
  14. CompletedProcess,
  15. TimeoutExpired,
  16. CalledProcessError,
  17. )
  18. from config import (
  19. ANSI,
  20. TERM_WIDTH,
  21. SOURCES_DIR,
  22. ARCHIVE_DIR,
  23. OUTPUT_PERMISSIONS,
  24. TIMEOUT,
  25. SHOW_PROGRESS,
  26. FETCH_TITLE,
  27. ARCHIVE_DIR_NAME,
  28. CHECK_SSL_VALIDITY,
  29. WGET_USER_AGENT,
  30. CHROME_OPTIONS,
  31. )
  32. from logs import pretty_path
  33. ### Parsing Helpers
  34. # Url Parsing: https://docs.python.org/3/library/urllib.parse.html#url-parsing
  35. scheme = lambda url: urlparse(url).scheme
  36. without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
  37. without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
  38. without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
  39. without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
  40. path = lambda url: urlparse(url).path
  41. basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
  42. domain = lambda url: urlparse(url).netloc
  43. query = lambda url: urlparse(url).query
  44. fragment = lambda url: urlparse(url).fragment
  45. extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
  46. base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
  47. short_ts = lambda ts: ts.split('.')[0]
  48. urlencode = lambda s: quote(s, encoding='utf-8', errors='replace')
  49. URL_REGEX = re.compile(
  50. r'http[s]?://' # start matching from allowed schemes
  51. r'(?:[a-zA-Z]|[0-9]' # followed by allowed alphanum characters
  52. r'|[$-_@.&+]|[!*\(\),]' # or allowed symbols
  53. r'|(?:%[0-9a-fA-F][0-9a-fA-F]))' # or allowed unicode bytes
  54. r'[^\]\[\(\)<>\""\'\s]+', # stop parsing at these symbols
  55. re.IGNORECASE,
  56. )
  57. HTML_TITLE_REGEX = re.compile(
  58. r'<title.*?>' # start matching text after <title> tag
  59. r'(.[^<>]+)', # get everything up to these symbols
  60. re.IGNORECASE | re.MULTILINE | re.DOTALL | re.UNICODE,
  61. )
  62. STATICFILE_EXTENSIONS = {
  63. # 99.999% of the time, URLs ending in these extentions are static files
  64. # that can be downloaded as-is, not html pages that need to be rendered
  65. 'gif', 'jpeg', 'jpg', 'png', 'tif', 'tiff', 'wbmp', 'ico', 'jng', 'bmp',
  66. 'svg', 'svgz', 'webp', 'ps', 'eps', 'ai',
  67. 'mp3', 'mp4', 'm4a', 'mpeg', 'mpg', 'mkv', 'mov', 'webm', 'm4v', 'flv', 'wmv', 'avi', 'ogg', 'ts', 'm3u8'
  68. 'pdf', 'txt', 'rtf', 'rtfd', 'doc', 'docx', 'ppt', 'pptx', 'xls', 'xlsx',
  69. 'atom', 'rss', 'css', 'js', 'json',
  70. 'dmg', 'iso', 'img',
  71. 'rar', 'war', 'hqx', 'zip', 'gz', 'bz2', '7z',
  72. # Less common extensions to consider adding later
  73. # jar, swf, bin, com, exe, dll, deb
  74. # ear, hqx, eot, wmlc, kml, kmz, cco, jardiff, jnlp, run, msi, msp, msm,
  75. # pl pm, prc pdb, rar, rpm, sea, sit, tcl tk, der, pem, crt, xpi, xspf,
  76. # ra, mng, asx, asf, 3gpp, 3gp, mid, midi, kar, jad, wml, htc, mml
  77. # Thse are always treated as pages, not as static files, never add them:
  78. # html, htm, shtml, xhtml, xml, aspx, php, cgi
  79. }
  80. ### Checks & Tests
  81. def check_link_structure(link):
  82. """basic sanity check invariants to make sure the data is valid"""
  83. assert isinstance(link, dict)
  84. assert isinstance(link.get('url'), str)
  85. assert len(link['url']) > 2
  86. assert len(re.findall(URL_REGEX, link['url'])) == 1
  87. if 'history' in link:
  88. assert isinstance(link['history'], dict), 'history must be a Dict'
  89. for key, val in link['history'].items():
  90. assert isinstance(key, str)
  91. assert isinstance(val, list), 'history must be a Dict[str, List], got: {}'.format(link['history'])
  92. def check_links_structure(links):
  93. """basic sanity check invariants to make sure the data is valid"""
  94. assert isinstance(links, list)
  95. if links:
  96. check_link_structure(links[0])
  97. def check_url_parsing_invariants():
  98. """Check that plain text regex URL parsing works as expected"""
  99. # this is last-line-of-defense to make sure the URL_REGEX isn't
  100. # misbehaving, as the consequences could be disastrous and lead to many
  101. # incorrect/badly parsed links being added to the archive
  102. test_urls = '''
  103. https://example1.com/what/is/happening.html?what=1#how-about-this=1
  104. https://example2.com/what/is/happening/?what=1#how-about-this=1
  105. HTtpS://example3.com/what/is/happening/?what=1#how-about-this=1f
  106. https://example4.com/what/is/happening.html
  107. https://example5.com/
  108. https://example6.com
  109. <test>http://example7.com</test>
  110. [https://example8.com/what/is/this.php?what=1]
  111. [and http://example9.com?what=1&other=3#and-thing=2]
  112. <what>https://example10.com#and-thing=2 "</about>
  113. abc<this["https://example11.com/what/is#and-thing=2?whoami=23&where=1"]that>def
  114. sdflkf[what](https://example12.com/who/what.php?whoami=1#whatami=2)?am=hi
  115. example13.bada
  116. and example14.badb
  117. <or>htt://example15.badc</that>
  118. '''
  119. # print('\n'.join(re.findall(URL_REGEX, test_urls)))
  120. assert len(re.findall(URL_REGEX, test_urls)) == 12
  121. ### Random Helpers
  122. def save_stdin_source(raw_text):
  123. if not os.path.exists(SOURCES_DIR):
  124. os.makedirs(SOURCES_DIR)
  125. ts = str(datetime.now().timestamp()).split('.', 1)[0]
  126. source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format('stdin', ts))
  127. with open(source_path, 'w', encoding='utf-8') as f:
  128. f.write(raw_text)
  129. return source_path
  130. def save_remote_source(url, timeout=TIMEOUT):
  131. """download a given url's content into output/sources/domain-<timestamp>.txt"""
  132. if not os.path.exists(SOURCES_DIR):
  133. os.makedirs(SOURCES_DIR)
  134. ts = str(datetime.now().timestamp()).split('.', 1)[0]
  135. source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format(domain(url), ts))
  136. print('{}[*] [{}] Downloading {}{}'.format(
  137. ANSI['green'],
  138. datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  139. url,
  140. ANSI['reset'],
  141. ))
  142. timer = TimedProgress(timeout, prefix=' ')
  143. try:
  144. downloaded_xml = download_url(url, timeout=timeout)
  145. timer.end()
  146. except Exception as e:
  147. timer.end()
  148. print('{}[!] Failed to download {}{}\n'.format(
  149. ANSI['red'],
  150. url,
  151. ANSI['reset'],
  152. ))
  153. print(' ', e)
  154. raise SystemExit(1)
  155. with open(source_path, 'w', encoding='utf-8') as f:
  156. f.write(downloaded_xml)
  157. print(' > {}'.format(pretty_path(source_path)))
  158. return source_path
  159. def fetch_page_title(url, timeout=10, progress=SHOW_PROGRESS):
  160. """Attempt to guess a page's title by downloading the html"""
  161. if not FETCH_TITLE:
  162. return None
  163. try:
  164. if progress:
  165. sys.stdout.write('.')
  166. sys.stdout.flush()
  167. html = download_url(url, timeout=timeout)
  168. match = re.search(HTML_TITLE_REGEX, html)
  169. return match.group(1).strip() if match else None
  170. except Exception as err: # noqa
  171. # print('[!] Failed to fetch title because of {}: {}'.format(
  172. # err.__class__.__name__,
  173. # err,
  174. # ))
  175. return None
  176. def wget_output_path(link):
  177. """calculate the path to the wgetted .html file, since wget may
  178. adjust some paths to be different than the base_url path.
  179. See docs on wget --adjust-extension (-E)
  180. """
  181. if is_static_file(link['url']):
  182. return without_scheme(without_fragment(link['url']))
  183. # Wget downloads can save in a number of different ways depending on the url:
  184. # https://example.com
  185. # > output/archive/<timestamp>/example.com/index.html
  186. # https://example.com?v=zzVa_tX1OiI
  187. # > output/archive/<timestamp>/example.com/index.html?v=zzVa_tX1OiI.html
  188. # https://www.example.com/?v=zzVa_tX1OiI
  189. # > output/archive/<timestamp>/example.com/index.html?v=zzVa_tX1OiI.html
  190. # https://example.com/abc
  191. # > output/archive/<timestamp>/example.com/abc.html
  192. # https://example.com/abc/
  193. # > output/archive/<timestamp>/example.com/abc/index.html
  194. # https://example.com/abc?v=zzVa_tX1OiI.html
  195. # > output/archive/<timestamp>/example.com/abc?v=zzVa_tX1OiI.html
  196. # https://example.com/abc/?v=zzVa_tX1OiI.html
  197. # > output/archive/<timestamp>/example.com/abc/index.html?v=zzVa_tX1OiI.html
  198. # https://example.com/abc/test.html
  199. # > output/archive/<timestamp>/example.com/abc/test.html
  200. # https://example.com/abc/test?v=zzVa_tX1OiI
  201. # > output/archive/<timestamp>/example.com/abc/test?v=zzVa_tX1OiI.html
  202. # https://example.com/abc/test/?v=zzVa_tX1OiI
  203. # > output/archive/<timestamp>/example.com/abc/test/index.html?v=zzVa_tX1OiI.html
  204. # There's also lots of complexity around how the urlencoding and renaming
  205. # is done for pages with query and hash fragments or extensions like shtml / htm / php / etc
  206. # Since the wget algorithm for -E (appending .html) is incredibly complex
  207. # and there's no way to get the computed output path from wget
  208. # in order to avoid having to reverse-engineer how they calculate it,
  209. # we just look in the output folder read the filename wget used from the filesystem
  210. link_dir = os.path.join(ARCHIVE_DIR, link['timestamp'])
  211. full_path = without_fragment(without_query(path(link['url']))).strip('/')
  212. search_dir = os.path.join(
  213. link_dir,
  214. domain(link['url']),
  215. full_path,
  216. )
  217. for _ in range(4):
  218. if os.path.exists(search_dir):
  219. if os.path.isdir(search_dir):
  220. html_files = [
  221. f for f in os.listdir(search_dir)
  222. if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M)
  223. ]
  224. if html_files:
  225. path_from_link_dir = search_dir.split(link_dir)[-1].strip('/')
  226. return os.path.join(path_from_link_dir, html_files[0])
  227. # Move up one directory level
  228. search_dir = search_dir.rsplit('/', 1)[0]
  229. if search_dir == link_dir:
  230. break
  231. return None
  232. ### String Manipulation & Logging Helpers
  233. def str_between(string, start, end=None):
  234. """(<abc>12345</def>, <abc>, </def>) -> 12345"""
  235. content = string.split(start, 1)[-1]
  236. if end is not None:
  237. content = content.rsplit(end, 1)[0]
  238. return content
  239. ### Link Helpers
  240. def merge_links(a, b):
  241. """deterministially merge two links, favoring longer field values over shorter,
  242. and "cleaner" values over worse ones.
  243. """
  244. longer = lambda key: (a[key] if len(a[key]) > len(b[key]) else b[key]) if (a[key] and b[key]) else (a[key] or b[key])
  245. earlier = lambda key: a[key] if a[key] < b[key] else b[key]
  246. url = longer('url')
  247. longest_title = longer('title')
  248. cleanest_title = a['title'] if '://' not in (a['title'] or '') else b['title']
  249. return {
  250. 'url': url,
  251. 'timestamp': earlier('timestamp'),
  252. 'title': longest_title if '://' not in (longest_title or '') else cleanest_title,
  253. 'tags': longer('tags'),
  254. 'sources': list(set(a.get('sources', []) + b.get('sources', []))),
  255. }
  256. def is_static_file(url):
  257. """Certain URLs just point to a single static file, and
  258. don't need to be re-archived in many formats
  259. """
  260. # TODO: the proper way is with MIME type detection, not using extension
  261. return extension(url) in STATICFILE_EXTENSIONS
  262. def derived_link_info(link):
  263. """extend link info with the archive urls and other derived data"""
  264. url = link['url']
  265. to_date_str = lambda ts: datetime.fromtimestamp(Decimal(ts)).strftime('%Y-%m-%d %H:%M')
  266. extended_info = {
  267. **link,
  268. 'link_dir': '{}/{}'.format(ARCHIVE_DIR_NAME, link['timestamp']),
  269. 'bookmarked_date': to_date_str(link['timestamp']),
  270. 'updated_date': to_date_str(link['updated']) if 'updated' in link else None,
  271. 'domain': domain(url),
  272. 'path': path(url),
  273. 'basename': basename(url),
  274. 'extension': extension(url),
  275. 'base_url': base_url(url),
  276. 'is_static': is_static_file(url),
  277. 'is_archived': os.path.exists(os.path.join(
  278. ARCHIVE_DIR,
  279. link['timestamp'],
  280. domain(url),
  281. )),
  282. 'num_outputs': len([entry for entry in latest_output(link).values() if entry]),
  283. }
  284. # Archive Method Output URLs
  285. extended_info.update({
  286. 'index_url': 'index.html',
  287. 'favicon_url': 'favicon.ico',
  288. 'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**extended_info),
  289. 'archive_url': wget_output_path(link),
  290. 'warc_url': 'warc',
  291. 'pdf_url': 'output.pdf',
  292. 'screenshot_url': 'screenshot.png',
  293. 'dom_url': 'output.html',
  294. 'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**extended_info),
  295. 'git_url': 'git',
  296. 'media_url': 'media',
  297. })
  298. # static binary files like PDF and images are handled slightly differently.
  299. # they're just downloaded once and aren't archived separately multiple times,
  300. # so the wget, screenshot, & pdf urls should all point to the same file
  301. if is_static_file(url):
  302. extended_info.update({
  303. 'title': basename(url),
  304. 'archive_url': base_url(url),
  305. 'pdf_url': base_url(url),
  306. 'screenshot_url': base_url(url),
  307. 'dom_url': base_url(url),
  308. })
  309. return extended_info
  310. def latest_output(link, status=None):
  311. """get the latest output that each archive method produced for link"""
  312. latest = {
  313. 'title': None,
  314. 'favicon': None,
  315. 'wget': None,
  316. 'warc': None,
  317. 'pdf': None,
  318. 'screenshot': None,
  319. 'dom': None,
  320. 'git': None,
  321. 'media': None,
  322. 'archive_org': None,
  323. }
  324. for archive_method in latest.keys():
  325. # get most recent succesful result in history for each archive method
  326. history = link.get('history', {}).get(archive_method) or []
  327. history = filter(lambda result: result['output'], reversed(history))
  328. if status is not None:
  329. history = filter(lambda result: result['status'] == status, history)
  330. history = list(history)
  331. if history:
  332. latest[archive_method] = history[0]['output']
  333. return latest
  334. ### Python / System Helpers
  335. def run(*popenargs, input=None, capture_output=False, timeout=None, check=False, **kwargs):
  336. """Patched of subprocess.run to fix blocking io making timeout=innefective"""
  337. if input is not None:
  338. if 'stdin' in kwargs:
  339. raise ValueError('stdin and input arguments may not both be used.')
  340. kwargs['stdin'] = PIPE
  341. if capture_output:
  342. if ('stdout' in kwargs) or ('stderr' in kwargs):
  343. raise ValueError('stdout and stderr arguments may not be used '
  344. 'with capture_output.')
  345. kwargs['stdout'] = PIPE
  346. kwargs['stderr'] = PIPE
  347. with Popen(*popenargs, **kwargs) as process:
  348. try:
  349. stdout, stderr = process.communicate(input, timeout=timeout)
  350. except TimeoutExpired:
  351. process.kill()
  352. try:
  353. stdout, stderr = process.communicate(input, timeout=2)
  354. except:
  355. pass
  356. raise TimeoutExpired(popenargs[0][0], timeout)
  357. except BaseException:
  358. process.kill()
  359. # We don't call process.wait() as .__exit__ does that for us.
  360. raise
  361. retcode = process.poll()
  362. if check and retcode:
  363. raise CalledProcessError(retcode, process.args,
  364. output=stdout, stderr=stderr)
  365. return CompletedProcess(process.args, retcode, stdout, stderr)
  366. def progress_bar(seconds, prefix):
  367. """show timer in the form of progress bar, with percentage and seconds remaining"""
  368. chunk = '█' if sys.stdout.encoding == 'UTF-8' else '#'
  369. chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
  370. try:
  371. for s in range(seconds * chunks):
  372. progress = s / chunks / seconds * 100
  373. bar_width = round(progress/(100/chunks))
  374. # ████████████████████ 0.9% (1/60sec)
  375. sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
  376. prefix,
  377. ANSI['green'],
  378. (chunk * bar_width).ljust(chunks),
  379. ANSI['reset'],
  380. round(progress, 1),
  381. round(s/chunks),
  382. seconds,
  383. ))
  384. sys.stdout.flush()
  385. time.sleep(1 / chunks)
  386. # ██████████████████████████████████ 100.0% (60/60sec)
  387. sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format(
  388. prefix,
  389. ANSI['red'],
  390. chunk * chunks,
  391. ANSI['reset'],
  392. 100.0,
  393. seconds,
  394. seconds,
  395. ))
  396. sys.stdout.flush()
  397. except KeyboardInterrupt:
  398. print()
  399. pass
  400. class TimedProgress:
  401. """Show a progress bar and measure elapsed time until .end() is called"""
  402. def __init__(self, seconds, prefix=''):
  403. if SHOW_PROGRESS:
  404. self.p = Process(target=progress_bar, args=(seconds, prefix))
  405. self.p.start()
  406. self.stats = {
  407. 'start_ts': datetime.now(),
  408. 'end_ts': None,
  409. 'duration': None,
  410. }
  411. def end(self):
  412. """immediately end progress, clear the progressbar line, and save end_ts"""
  413. end_ts = datetime.now()
  414. self.stats.update({
  415. 'end_ts': end_ts,
  416. 'duration': (end_ts - self.stats['start_ts']).seconds,
  417. })
  418. if SHOW_PROGRESS:
  419. # protect from double termination
  420. #if p is None or not hasattr(p, 'kill'):
  421. # return
  422. if self.p is not None:
  423. self.p.terminate()
  424. self.p = None
  425. sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line
  426. sys.stdout.flush()
  427. def download_url(url, timeout=TIMEOUT):
  428. """Download the contents of a remote url and return the text"""
  429. req = Request(url, headers={'User-Agent': WGET_USER_AGENT})
  430. if CHECK_SSL_VALIDITY:
  431. resp = urlopen(req, timeout=timeout)
  432. else:
  433. import ssl
  434. insecure = ssl._create_unverified_context()
  435. resp = urlopen(req, timeout=timeout, context=insecure)
  436. encoding = resp.headers.get_content_charset() or 'utf-8'
  437. return resp.read().decode(encoding)
  438. def chmod_file(path, cwd='.', permissions=OUTPUT_PERMISSIONS, timeout=30):
  439. """chmod -R <permissions> <cwd>/<path>"""
  440. if not os.path.exists(os.path.join(cwd, path)):
  441. raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
  442. chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout)
  443. if chmod_result.returncode == 1:
  444. print(' ', chmod_result.stderr.decode())
  445. raise Exception('Failed to chmod {}/{}'.format(cwd, path))
  446. def chrome_args(**options):
  447. """helper to build up a chrome shell command with arguments"""
  448. options = {**CHROME_OPTIONS, **options}
  449. cmd_args = [options['CHROME_BINARY']]
  450. if options['CHROME_HEADLESS']:
  451. cmd_args += ('--headless',)
  452. if not options['CHROME_SANDBOX']:
  453. # dont use GPU or sandbox when running inside docker container
  454. cmd_args += ('--no-sandbox', '--disable-gpu')
  455. if not options['CHECK_SSL_VALIDITY']:
  456. cmd_args += ('--disable-web-security', '--ignore-certificate-errors')
  457. if options['CHROME_USER_AGENT']:
  458. cmd_args += ('--user-agent={}'.format(options['CHROME_USER_AGENT']),)
  459. if options['RESOLUTION']:
  460. cmd_args += ('--window-size={}'.format(options['RESOLUTION']),)
  461. if options['TIMEOUT']:
  462. cmd_args += ('--timeout={}'.format((options['TIMEOUT']) * 1000),)
  463. if options['CHROME_USER_DATA_DIR']:
  464. cmd_args.append('--user-data-dir={}'.format(options['CHROME_USER_DATA_DIR']))
  465. return cmd_args