util.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. __package__ = 'archivebox'
  2. import re
  3. import requests
  4. import json as pyjson
  5. from typing import List, Optional, Any
  6. from pathlib import Path
  7. from inspect import signature
  8. from functools import wraps
  9. from hashlib import sha256
  10. from urllib.parse import urlparse, quote, unquote
  11. from html import escape, unescape
  12. from datetime import datetime, timezone
  13. from dateparser import parse as dateparser
  14. from requests.exceptions import RequestException, ReadTimeout
  15. from .vendor.base32_crockford import encode as base32_encode # type: ignore
  16. from w3lib.encoding import html_body_declared_encoding, http_content_type_encoding
  17. from os.path import lexists
  18. from os import remove as remove_file
  19. try:
  20. import chardet
  21. detect_encoding = lambda rawdata: chardet.detect(rawdata)["encoding"]
  22. except ImportError:
  23. detect_encoding = lambda rawdata: "utf-8"
  24. ### Parsing Helpers
  25. # All of these are (str) -> str
  26. # shortcuts to: https://docs.python.org/3/library/urllib.parse.html#url-parsing
  27. scheme = lambda url: urlparse(url).scheme.lower()
  28. without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
  29. without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
  30. without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
  31. without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
  32. path = lambda url: urlparse(url).path
  33. basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
  34. domain = lambda url: urlparse(url).netloc
  35. query = lambda url: urlparse(url).query
  36. fragment = lambda url: urlparse(url).fragment
  37. extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
  38. base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
  39. without_www = lambda url: url.replace('://www.', '://', 1)
  40. without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
  41. hashurl = lambda url: base32_encode(int(sha256(base_url(url).encode('utf-8')).hexdigest(), 16))[:20]
  42. urlencode = lambda s: s and quote(s, encoding='utf-8', errors='replace')
  43. urldecode = lambda s: s and unquote(s)
  44. htmlencode = lambda s: s and escape(s, quote=True)
  45. htmldecode = lambda s: s and unescape(s)
  46. short_ts = lambda ts: str(parse_date(ts).timestamp()).split('.')[0]
  47. ts_to_date_str = lambda ts: ts and parse_date(ts).strftime('%Y-%m-%d %H:%M')
  48. ts_to_iso = lambda ts: ts and parse_date(ts).isoformat()
  49. COLOR_REGEX = re.compile(r'\[(?P<arg_1>\d+)(;(?P<arg_2>\d+)(;(?P<arg_3>\d+))?)?m')
  50. URL_REGEX = re.compile(
  51. r'(?=(' +
  52. r'http[s]?://' + # start matching from allowed schemes
  53. r'(?:[a-zA-Z]|[0-9]' + # followed by allowed alphanum characters
  54. r'|[-_$@.&+!*\(\),]' + # or allowed symbols (keep hyphen first to match literal hyphen)
  55. r'|[^\u0000-\u007F])+' + # or allowed unicode bytes
  56. r'[^\]\[<>"\'\s]+' + # stop parsing at these symbols
  57. r'))',
  58. re.IGNORECASE | re.UNICODE,
  59. )
  60. def parens_are_matched(string: str, open_char='(', close_char=')'):
  61. """check that all parentheses in a string are balanced and nested properly"""
  62. count = 0
  63. for c in string:
  64. if c == open_char:
  65. count += 1
  66. elif c == close_char:
  67. count -= 1
  68. if count < 0:
  69. return False
  70. return count == 0
  71. def fix_url_from_markdown(url_str: str) -> str:
  72. """
  73. cleanup a regex-parsed url that may contain dangling trailing parens from markdown link syntax
  74. helpful to fix URLs parsed from markdown e.g.
  75. input: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).somemoretext
  76. result: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def
  77. """
  78. trimmed_url = url_str
  79. # cut off one trailing character at a time
  80. # until parens are balanced e.g. /a(b)c).x(y)z -> /a(b)c
  81. while not parens_are_matched(trimmed_url):
  82. trimmed_url = trimmed_url[:-1]
  83. # make sure trimmed url is still valid
  84. if re.findall(URL_REGEX, trimmed_url):
  85. return trimmed_url
  86. return url_str
  87. def find_all_urls(urls_str: str):
  88. for url in re.findall(URL_REGEX, urls_str):
  89. yield fix_url_from_markdown(url)
  90. def is_static_file(url: str):
  91. # TODO: the proper way is with MIME type detection + ext, not only extension
  92. from .config import STATICFILE_EXTENSIONS
  93. return extension(url).lower() in STATICFILE_EXTENSIONS
  94. def enforce_types(func):
  95. """
  96. Enforce function arg and kwarg types at runtime using its python3 type hints
  97. """
  98. # TODO: check return type as well
  99. @wraps(func)
  100. def typechecked_function(*args, **kwargs):
  101. sig = signature(func)
  102. def check_argument_type(arg_key, arg_val):
  103. try:
  104. annotation = sig.parameters[arg_key].annotation
  105. except KeyError:
  106. annotation = None
  107. if annotation is not None and annotation.__class__ is type:
  108. if not isinstance(arg_val, annotation):
  109. raise TypeError(
  110. '{}(..., {}: {}) got unexpected {} argument {}={}'.format(
  111. func.__name__,
  112. arg_key,
  113. annotation.__name__,
  114. type(arg_val).__name__,
  115. arg_key,
  116. str(arg_val)[:64],
  117. )
  118. )
  119. # check args
  120. for arg_val, arg_key in zip(args, sig.parameters):
  121. check_argument_type(arg_key, arg_val)
  122. # check kwargs
  123. for arg_key, arg_val in kwargs.items():
  124. check_argument_type(arg_key, arg_val)
  125. return func(*args, **kwargs)
  126. return typechecked_function
  127. def docstring(text: Optional[str]):
  128. """attach the given docstring to the decorated function"""
  129. def decorator(func):
  130. if text:
  131. func.__doc__ = text
  132. return func
  133. return decorator
  134. @enforce_types
  135. def str_between(string: str, start: str, end: str=None) -> str:
  136. """(<abc>12345</def>, <abc>, </def>) -> 12345"""
  137. content = string.split(start, 1)[-1]
  138. if end is not None:
  139. content = content.rsplit(end, 1)[0]
  140. return content
  141. @enforce_types
  142. def parse_date(date: Any) -> Optional[datetime]:
  143. """Parse unix timestamps, iso format, and human-readable strings"""
  144. if date is None:
  145. return None
  146. if isinstance(date, datetime):
  147. if date.tzinfo is None:
  148. return date.replace(tzinfo=timezone.utc)
  149. assert date.tzinfo.utcoffset(datetime.now()).seconds == 0, 'Refusing to load a non-UTC date!'
  150. return date
  151. if isinstance(date, (float, int)):
  152. date = str(date)
  153. if isinstance(date, str):
  154. return dateparser(date, settings={'TIMEZONE': 'UTC'}).replace(tzinfo=timezone.utc)
  155. raise ValueError('Tried to parse invalid date! {}'.format(date))
  156. @enforce_types
  157. def download_url(url: str, timeout: int=None) -> str:
  158. """Download the contents of a remote url and return the text"""
  159. from .config import TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT
  160. timeout = timeout or TIMEOUT
  161. response = requests.get(
  162. url,
  163. headers={'User-Agent': WGET_USER_AGENT},
  164. verify=CHECK_SSL_VALIDITY,
  165. timeout=timeout,
  166. )
  167. content_type = response.headers.get('Content-Type', '')
  168. encoding = http_content_type_encoding(content_type) or html_body_declared_encoding(response.text)
  169. if encoding is not None:
  170. response.encoding = encoding
  171. return response.text
  172. @enforce_types
  173. def get_headers(url: str, timeout: int=None) -> str:
  174. """Download the contents of a remote url and return the headers"""
  175. from .config import TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT
  176. timeout = timeout or TIMEOUT
  177. try:
  178. response = requests.head(
  179. url,
  180. headers={'User-Agent': WGET_USER_AGENT},
  181. verify=CHECK_SSL_VALIDITY,
  182. timeout=timeout,
  183. allow_redirects=True,
  184. )
  185. if response.status_code >= 400:
  186. raise RequestException
  187. except ReadTimeout:
  188. raise
  189. except RequestException:
  190. response = requests.get(
  191. url,
  192. headers={'User-Agent': WGET_USER_AGENT},
  193. verify=CHECK_SSL_VALIDITY,
  194. timeout=timeout,
  195. stream=True
  196. )
  197. return pyjson.dumps(
  198. {
  199. 'Status-Code': response.status_code,
  200. **dict(response.headers),
  201. },
  202. indent=4,
  203. )
  204. @enforce_types
  205. def chrome_args(**options) -> List[str]:
  206. """helper to build up a chrome shell command with arguments"""
  207. from .config import CHROME_OPTIONS, CHROME_VERSION
  208. options = {**CHROME_OPTIONS, **options}
  209. if not options['CHROME_BINARY']:
  210. raise Exception('Could not find any CHROME_BINARY installed on your system')
  211. cmd_args = [options['CHROME_BINARY']]
  212. if options['CHROME_HEADLESS']:
  213. chrome_major_version = int(re.search(r'\s(\d+)\.\d', CHROME_VERSION)[1])
  214. if chrome_major_version >= 111:
  215. cmd_args += ("--headless=new",)
  216. else:
  217. cmd_args += ('--headless',)
  218. if not options['CHROME_SANDBOX']:
  219. # assume this means we are running inside a docker container
  220. # in docker, GPU support is limited, sandboxing is unecessary,
  221. # and SHM is limited to 64MB by default (which is too low to be usable).
  222. cmd_args += (
  223. "--no-sandbox",
  224. "--no-zygote",
  225. "--disable-dev-shm-usage",
  226. "--disable-software-rasterizer",
  227. "--run-all-compositor-stages-before-draw",
  228. "--hide-scrollbars",
  229. "--window-size=1440,2000",
  230. "--autoplay-policy=no-user-gesture-required",
  231. "--no-first-run",
  232. "--use-fake-ui-for-media-stream",
  233. "--use-fake-device-for-media-stream",
  234. "--disable-sync",
  235. )
  236. if not options['CHECK_SSL_VALIDITY']:
  237. cmd_args += ('--disable-web-security', '--ignore-certificate-errors')
  238. if options['CHROME_USER_AGENT']:
  239. cmd_args += ('--user-agent={}'.format(options['CHROME_USER_AGENT']),)
  240. if options['RESOLUTION']:
  241. cmd_args += ('--window-size={}'.format(options['RESOLUTION']),)
  242. if options['CHROME_TIMEOUT']:
  243. cmd_args += ('--timeout={}'.format(options['CHROME_TIMEOUT'] * 1000),)
  244. if options['CHROME_USER_DATA_DIR']:
  245. cmd_args.append('--user-data-dir={}'.format(options['CHROME_USER_DATA_DIR']))
  246. return cmd_args
  247. def chrome_cleanup():
  248. """
  249. Cleans up any state or runtime files that chrome leaves behind when killed by
  250. a timeout or other error
  251. """
  252. from .config import IN_DOCKER
  253. if IN_DOCKER and lexists("/home/archivebox/.config/chromium/SingletonLock"):
  254. remove_file("/home/archivebox/.config/chromium/SingletonLock")
  255. def ansi_to_html(text):
  256. """
  257. Based on: https://stackoverflow.com/questions/19212665/python-converting-ansi-color-codes-to-html
  258. """
  259. from .config import COLOR_DICT
  260. TEMPLATE = '<span style="color: rgb{}"><br>'
  261. text = text.replace('[m', '</span>')
  262. def single_sub(match):
  263. argsdict = match.groupdict()
  264. if argsdict['arg_3'] is None:
  265. if argsdict['arg_2'] is None:
  266. _, color = 0, argsdict['arg_1']
  267. else:
  268. _, color = argsdict['arg_1'], argsdict['arg_2']
  269. else:
  270. _, color = argsdict['arg_3'], argsdict['arg_2']
  271. return TEMPLATE.format(COLOR_DICT[color][0])
  272. return COLOR_REGEX.sub(single_sub, text)
  273. class AttributeDict(dict):
  274. """Helper to allow accessing dict values via Example.key or Example['key']"""
  275. def __init__(self, *args, **kwargs):
  276. super().__init__(*args, **kwargs)
  277. # Recursively convert nested dicts to AttributeDicts (optional):
  278. # for key, val in self.items():
  279. # if isinstance(val, dict) and type(val) is not AttributeDict:
  280. # self[key] = AttributeDict(val)
  281. def __getattr__(self, attr: str) -> Any:
  282. return dict.__getitem__(self, attr)
  283. def __setattr__(self, attr: str, value: Any) -> None:
  284. return dict.__setitem__(self, attr, value)
  285. class ExtendedEncoder(pyjson.JSONEncoder):
  286. """
  287. Extended json serializer that supports serializing several model
  288. fields and objects
  289. """
  290. def default(self, obj):
  291. cls_name = obj.__class__.__name__
  292. if hasattr(obj, '_asdict'):
  293. return obj._asdict()
  294. elif isinstance(obj, bytes):
  295. return obj.decode()
  296. elif isinstance(obj, datetime):
  297. return obj.isoformat()
  298. elif isinstance(obj, Exception):
  299. return '{}: {}'.format(obj.__class__.__name__, obj)
  300. elif isinstance(obj, Path):
  301. return str(obj)
  302. elif cls_name in ('dict_items', 'dict_keys', 'dict_values'):
  303. return tuple(obj)
  304. return pyjson.JSONEncoder.default(self, obj)
  305. ### URL PARSING TESTS / ASSERTIONS
  306. # they run at runtime because I like having them inline in this file,
  307. # I like the peace of mind knowing it's enforced at runtime across all OS's (in case the regex engine ever has any weird locale-specific quirks),
  308. # and these assertions are basically instant, so not a big performance cost to do it on startup
  309. assert fix_url_from_markdown('/a(b)c).x(y)z') == '/a(b)c'
  310. assert fix_url_from_markdown('https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).link(with)_trailingtext') == 'https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def'
  311. URL_REGEX_TESTS = [
  312. ('https://example.com', ['https://example.com']),
  313. ('http://abc-file234example.com/abc?def=abc&23423=sdfsdf#abc=234&234=a234', ['http://abc-file234example.com/abc?def=abc&23423=sdfsdf#abc=234&234=a234']),
  314. ('https://twitter.com/share?url=https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ abc', ['https://twitter.com/share?url=https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ', 'https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ']),
  315. ('<a href="https://twitter.com/share#url=https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ"> abc', ['https://twitter.com/share#url=https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ', 'https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ']),
  316. ('///a', []),
  317. ('http://', []),
  318. ('http://../', ['http://../']),
  319. ('http://-error-.invalid/', ['http://-error-.invalid/']),
  320. ('https://a(b)c+1#2?3&4/', ['https://a(b)c+1#2?3&4/']),
  321. ('http://उदाहरण.परीक्षा', ['http://उदाहरण.परीक्षा']),
  322. ('http://例子.测试', ['http://例子.测试']),
  323. ('http://➡.ws/䨹 htps://abc.1243?234', ['http://➡.ws/䨹']),
  324. ('http://⌘.ws">https://exa+mple.com//:abc ', ['http://⌘.ws', 'https://exa+mple.com//:abc']),
  325. ('http://مثال.إختبار/abc?def=ت&ب=abc#abc=234', ['http://مثال.إختبار/abc?def=ت&ب=abc#abc=234']),
  326. ('http://-.~_!$&()*+,;=:%40:80%2f::::::@example.c\'om', ['http://-.~_!$&()*+,;=:%40:80%2f::::::@example.c']),
  327. ('http://us:[email protected]:42/http://ex.co:19/a?_d=4#-a=2.3', ['http://us:[email protected]:42/http://ex.co:19/a?_d=4#-a=2.3', 'http://ex.co:19/a?_d=4#-a=2.3']),
  328. ('http://code.google.com/events/#&product=browser', ['http://code.google.com/events/#&product=browser']),
  329. ('http://foo.bar?q=Spaces should be encoded', ['http://foo.bar?q=Spaces']),
  330. ('http://foo.com/blah_(wikipedia)#c(i)t[e]-1', ['http://foo.com/blah_(wikipedia)#c(i)t']),
  331. ('http://foo.com/(something)?after=parens', ['http://foo.com/(something)?after=parens']),
  332. ('http://foo.com/unicode_(✪)_in_parens) abc', ['http://foo.com/unicode_(✪)_in_parens']),
  333. ('http://foo.bar/?q=Test%20URL-encoded%20stuff', ['http://foo.bar/?q=Test%20URL-encoded%20stuff']),
  334. ('[xyz](http://a.b/?q=(Test)%20U)RL-encoded%20stuff', ['http://a.b/?q=(Test)%20U']),
  335. ('[xyz](http://a.b/?q=(Test)%20U)-ab https://abc+123', ['http://a.b/?q=(Test)%20U', 'https://abc+123']),
  336. ('[xyz](http://a.b/?q=(Test)%20U) https://a(b)c+12)3', ['http://a.b/?q=(Test)%20U', 'https://a(b)c+12']),
  337. ('[xyz](http://a.b/?q=(Test)a\nabchttps://a(b)c+12)3', ['http://a.b/?q=(Test)a', 'https://a(b)c+12']),
  338. ('http://foo.bar/?q=Test%20URL-encoded%20stuff', ['http://foo.bar/?q=Test%20URL-encoded%20stuff']),
  339. ]
  340. for urls_str, expected_url_matches in URL_REGEX_TESTS:
  341. url_matches = list(find_all_urls(urls_str))
  342. assert url_matches == expected_url_matches, 'FAILED URL_REGEX CHECK!'