archive_methods.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. import os
  2. from functools import wraps
  3. from collections import defaultdict
  4. from datetime import datetime
  5. from index import (
  6. parse_json_link_index,
  7. write_link_index,
  8. update_main_index,
  9. )
  10. from config import (
  11. CURL_BINARY,
  12. GIT_BINARY,
  13. WGET_BINARY,
  14. YOUTUBEDL_BINARY,
  15. CHROME_BINARY,
  16. FETCH_FAVICON,
  17. FETCH_TITLE,
  18. FETCH_WGET,
  19. FETCH_WGET_REQUISITES,
  20. FETCH_PDF,
  21. FETCH_SCREENSHOT,
  22. FETCH_DOM,
  23. FETCH_WARC,
  24. FETCH_GIT,
  25. FETCH_MEDIA,
  26. RESOLUTION,
  27. CHECK_SSL_VALIDITY,
  28. SUBMIT_ARCHIVE_DOT_ORG,
  29. COOKIES_FILE,
  30. WGET_USER_AGENT,
  31. CHROME_USER_AGENT,
  32. CHROME_USER_DATA_DIR,
  33. CHROME_HEADLESS,
  34. CHROME_SANDBOX,
  35. TIMEOUT,
  36. MEDIA_TIMEOUT,
  37. ANSI,
  38. ARCHIVE_DIR,
  39. GIT_DOMAINS,
  40. GIT_SHA,
  41. )
  42. from util import (
  43. domain,
  44. without_query,
  45. without_fragment,
  46. fetch_page_title,
  47. progress,
  48. chmod_file,
  49. pretty_path,
  50. print_error_hints,
  51. check_link_structure,
  52. wget_output_path,
  53. run, PIPE, DEVNULL,
  54. )
  55. _RESULTS_TOTALS = { # globals are bad, mmkay
  56. 'skipped': 0,
  57. 'succeded': 0,
  58. 'failed': 0,
  59. }
  60. def load_link_index(link_dir, link):
  61. """check for an existing link archive in the given directory,
  62. and load+merge it into the given link dict
  63. """
  64. is_new = not os.path.exists(link_dir)
  65. if is_new:
  66. os.makedirs(link_dir)
  67. else:
  68. link = {
  69. **parse_json_link_index(link_dir),
  70. **link,
  71. }
  72. check_link_structure(link)
  73. print_link_status_line(link_dir, link, is_new)
  74. return link
  75. class ArchiveError(Exception):
  76. def __init__(self, message, hints=None):
  77. super().__init__(message)
  78. self.hints = hints
  79. def archive_link(link_dir, link, overwrite=True):
  80. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  81. ARCHIVE_METHODS = (
  82. (FETCH_TITLE, fetch_title),
  83. (FETCH_FAVICON, fetch_favicon),
  84. (FETCH_WGET, fetch_wget),
  85. (FETCH_PDF, fetch_pdf),
  86. (FETCH_SCREENSHOT, fetch_screenshot),
  87. (FETCH_DOM, fetch_dom),
  88. (FETCH_GIT, fetch_git),
  89. (FETCH_MEDIA, fetch_media),
  90. (SUBMIT_ARCHIVE_DOT_ORG, archive_dot_org),
  91. )
  92. active_methods = [method for toggle, method in ARCHIVE_METHODS if toggle]
  93. try:
  94. link = load_link_index(link_dir, link)
  95. for archive_method in active_methods:
  96. archive_method(link_dir, link, overwrite=overwrite)
  97. write_link_index(link_dir, link)
  98. update_main_index(link)
  99. except Exception as err:
  100. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  101. return link
  102. def print_link_status_line(link_dir, link, is_new):
  103. print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
  104. symbol='+' if is_new else '*',
  105. symbol_color=ANSI['green' if is_new else 'black'],
  106. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  107. **{**link, 'title': link['title'] or link['url']},
  108. **ANSI,
  109. ))
  110. print(' > {}{}'.format(pretty_path(link_dir), ' (new)' if is_new else ''))
  111. def attach_result_to_link(method):
  112. """
  113. Instead of returning a result={output:'...', status:'success'} object,
  114. attach that result to the links's history & latest fields, then return
  115. the updated link object.
  116. """
  117. def decorator(fetch_func):
  118. @wraps(fetch_func)
  119. def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
  120. # initialize methods and history json field on link
  121. link['latest'] = link.get('latest') or {}
  122. link['latest'][method] = link['latest'].get(method) or None
  123. link['history'] = link.get('history') or {}
  124. link['history'][method] = link['history'].get(method) or []
  125. start_ts = datetime.now().timestamp()
  126. # if a valid method output is already present, dont run the fetch function
  127. if link['latest'][method] and not overwrite:
  128. print(' √ {}'.format(method))
  129. result = None
  130. else:
  131. print(' > {}'.format(method))
  132. result = fetch_func(link_dir, link, **kwargs)
  133. end_ts = datetime.now().timestamp()
  134. duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
  135. # append a history item recording fail/success
  136. history_entry = {
  137. 'timestamp': str(start_ts).split('.')[0],
  138. }
  139. if result is None:
  140. history_entry['status'] = 'skipped'
  141. elif isinstance(result.get('output'), Exception):
  142. history_entry['status'] = 'failed'
  143. history_entry['duration'] = duration
  144. history_entry.update(result or {})
  145. link['history'][method].append(history_entry)
  146. else:
  147. history_entry['status'] = 'succeded'
  148. history_entry['duration'] = duration
  149. history_entry.update(result or {})
  150. link['history'][method].append(history_entry)
  151. link['latest'][method] = result['output']
  152. _RESULTS_TOTALS[history_entry['status']] += 1
  153. return link
  154. return timed_fetch_func
  155. return decorator
  156. @attach_result_to_link('wget')
  157. def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
  158. """download full site using wget"""
  159. domain_dir = os.path.join(link_dir, domain(link['url']))
  160. existing_file = wget_output_path(link)
  161. if os.path.exists(domain_dir) and existing_file:
  162. return {'output': existing_file, 'status': 'skipped'}
  163. if warc:
  164. warc_dir = os.path.join(link_dir, 'warc')
  165. os.makedirs(warc_dir, exist_ok=True)
  166. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  167. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  168. CMD = [
  169. WGET_BINARY,
  170. # '--server-response', # print headers for better error parsing
  171. '--no-verbose',
  172. '--adjust-extension',
  173. '--convert-links',
  174. '--force-directories',
  175. '--backup-converted',
  176. '--span-hosts',
  177. '--no-parent',
  178. '-e', 'robots=off',
  179. '--restrict-file-names=unix',
  180. '--timeout={}'.format(timeout),
  181. *(() if warc else ('--timestamping',)),
  182. *(('--warc-file={}'.format(warc_path),) if warc else ()),
  183. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  184. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  185. *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),
  186. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  187. link['url'],
  188. ]
  189. end = progress(timeout, prefix=' ')
  190. try:
  191. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  192. end()
  193. output = wget_output_path(link)
  194. output_tail = [
  195. line.strip()
  196. for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
  197. if line.strip()
  198. ]
  199. # parse out number of files downloaded from "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  200. files_downloaded = (
  201. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  202. if 'Downloaded:' in output_tail[-1]
  203. else 0
  204. )
  205. # Check for common failure cases
  206. if result.returncode > 0 and files_downloaded < 1:
  207. hints = (
  208. 'Got wget response code {}:\n'.format(result.returncode),
  209. *output_tail,
  210. )
  211. if b'403: Forbidden' in result.stderr:
  212. raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)
  213. if b'404: Not Found' in result.stderr:
  214. raise ArchiveError('404 Not Found', hints)
  215. if b'ERROR 500: Internal Server Error' in result.stderr:
  216. raise ArchiveError('500 Internal Server Error', hints)
  217. raise ArchiveError('Got an error from the server', hints)
  218. except Exception as e:
  219. end()
  220. output = e
  221. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  222. return {
  223. 'cmd': CMD,
  224. 'output': output,
  225. }
  226. @attach_result_to_link('pdf')
  227. def fetch_pdf(link_dir, link, timeout=TIMEOUT, **chrome_kwargs):
  228. """print PDF of site to file using chrome --headless"""
  229. if is_static_file(link['url']):
  230. return {'output': wget_output_path(link), 'status': 'skipped'}
  231. output = 'output.pdf'
  232. if os.path.exists(os.path.join(link_dir, output)):
  233. return {'output': output, 'status': 'skipped'}
  234. CMD = [
  235. *chrome_headless(timeout=timeout, **chrome_kwargs),
  236. '--print-to-pdf',
  237. link['url']
  238. ]
  239. end = progress(timeout, prefix=' ')
  240. hints = None
  241. try:
  242. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  243. end()
  244. if result.returncode:
  245. hints = (result.stderr or result.stdout).decode()
  246. raise ArchiveError('Failed to print PDF', hints)
  247. chmod_file('output.pdf', cwd=link_dir)
  248. except Exception as e:
  249. end()
  250. output = e
  251. print_error_hints(cmd=CMD, pwd=link_dir, err=e, hints=hints)
  252. return {
  253. 'cmd': CMD,
  254. 'output': output,
  255. }
  256. @attach_result_to_link('screenshot')
  257. def fetch_screenshot(link_dir, link, timeout=TIMEOUT, **chrome_kwargs):
  258. """take screenshot of site using chrome --headless"""
  259. if is_static_file(link['url']):
  260. return {'output': wget_output_path(link), 'status': 'skipped'}
  261. output = 'screenshot.png'
  262. if os.path.exists(os.path.join(link_dir, output)):
  263. return {'output': output, 'status': 'skipped'}
  264. CMD = [
  265. *chrome_headless(timeout=timeout, **chrome_kwargs),
  266. '--screenshot',
  267. link['url'],
  268. ]
  269. end = progress(timeout, prefix=' ')
  270. try:
  271. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  272. end()
  273. if result.returncode:
  274. hints = (result.stderr or result.stdout).decode()
  275. raise ArchiveError('Failed to take screenshot', hints)
  276. chmod_file(output, cwd=link_dir)
  277. except Exception as e:
  278. end()
  279. output = e
  280. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  281. return {
  282. 'cmd': CMD,
  283. 'output': output,
  284. }
  285. @attach_result_to_link('dom')
  286. def fetch_dom(link_dir, link, timeout=TIMEOUT, **chrome_kwargs):
  287. """print HTML of site to file using chrome --dump-html"""
  288. if is_static_file(link['url']):
  289. return {'output': wget_output_path(link), 'status': 'skipped'}
  290. output = 'output.html'
  291. if os.path.exists(os.path.join(link_dir, output)):
  292. return {'output': output, 'status': 'skipped'}
  293. CMD = [
  294. *chrome_headless(timeout=timeout, **chrome_kwargs),
  295. '--dump-dom',
  296. link['url']
  297. ]
  298. end = progress(timeout, prefix=' ')
  299. try:
  300. with open(output_path, 'w+') as f:
  301. result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)
  302. end()
  303. if result.returncode:
  304. hints = result.stderr.decode()
  305. raise ArchiveError('Failed to fetch DOM', hints)
  306. chmod_file(output, cwd=link_dir)
  307. except Exception as e:
  308. end()
  309. output = e
  310. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  311. return {
  312. 'cmd': CMD,
  313. 'output': output,
  314. }
  315. def parse_archive_dot_org_response(response):
  316. # Parse archive.org response headers
  317. headers = defaultdict(list)
  318. # lowercase all the header names and store in dict
  319. for header in response.splitlines():
  320. if b':' not in header or not header.strip():
  321. continue
  322. name, val = header.decode().split(':', 1)
  323. headers[name.lower().strip()].append(val.strip())
  324. # Get successful archive url in "content-location" header or any errors
  325. content_location = headers['content-location']
  326. errors = headers['x-archive-wayback-runtime-error']
  327. return content_location, errors
  328. @attach_result_to_link('archive_org')
  329. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  330. """submit site to archive.org for archiving via their service, save returned archive url"""
  331. output = 'archive.org.txt'
  332. archive_org_url = None
  333. path = os.path.join(link_dir, output)
  334. if os.path.exists(path):
  335. archive_org_url = open(path, 'r').read().strip()
  336. return {'output': archive_org_url, 'status': 'skipped'}
  337. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  338. CMD = [
  339. CURL_BINARY,
  340. '--location',
  341. '--head',
  342. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from
  343. '--max-time', str(timeout),
  344. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  345. submit_url,
  346. ]
  347. end = progress(timeout, prefix=' ')
  348. try:
  349. result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)
  350. end()
  351. content_location, errors = parse_archive_dot_org_response(result.stdout)
  352. if content_location:
  353. archive_org_url = 'https://web.archive.org{}'.format(content_location[0])
  354. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  355. archive_org_url = None
  356. # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))
  357. elif errors:
  358. raise ArchiveError(', '.join(errors))
  359. else:
  360. raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
  361. except Exception as e:
  362. end()
  363. output = e
  364. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  365. if not isinstance(output, Exception):
  366. # instead of writing None when archive.org rejects the url write the
  367. # url to resubmit it to archive.org. This is so when the user visits
  368. # the URL in person, it will attempt to re-archive it, and it'll show the
  369. # nicer error message explaining why the url was rejected if it fails.
  370. archive_org_url = archive_org_url or submit_url
  371. with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:
  372. f.write(archive_org_url)
  373. chmod_file('archive.org.txt', cwd=link_dir)
  374. output = archive_org_url
  375. return {
  376. 'cmd': CMD,
  377. 'output': output,
  378. }
  379. @attach_result_to_link('favicon')
  380. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  381. """download site favicon from google's favicon api"""
  382. output = 'favicon.ico'
  383. if os.path.exists(os.path.join(link_dir, output)):
  384. return {'output': output, 'status': 'skipped'}
  385. CMD = [
  386. CURL_BINARY,
  387. '--max-time', str(timeout),
  388. '--location',
  389. '--output', output,
  390. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  391. 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),
  392. ]
  393. end = progress(timeout, prefix=' ')
  394. try:
  395. run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  396. end()
  397. chmod_file(output, cwd=link_dir)
  398. except Exception as e:
  399. end()
  400. output = e
  401. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  402. return {
  403. 'cmd': CMD,
  404. 'output': output,
  405. }
  406. @attach_result_to_link('title')
  407. def fetch_title(link_dir, link, timeout=TIMEOUT):
  408. """try to guess the page's title from its content"""
  409. # if link already has valid title, skip it
  410. if link['title'] and not link['title'].lower().startswith('http'):
  411. return {'output': link['title'], 'status': 'skipped'}
  412. if is_static_file(link['url']):
  413. return {'output': None, 'status': 'skipped'}
  414. end = progress(timeout, prefix=' ')
  415. try:
  416. title = fetch_page_title(link['url'], timeout=timeout, progress=False)
  417. end()
  418. output = title
  419. except Exception as e:
  420. end()
  421. output = e
  422. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  423. if title and title.strip():
  424. link['title'] = title
  425. output = title
  426. return {
  427. 'cmd': 'fetch_page_title("{}")'.format(link['url']),
  428. 'output': output,
  429. }
  430. @attach_result_to_link('media')
  431. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
  432. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  433. output = 'media'
  434. output_path = os.path.join(link_dir, 'media')
  435. if os.path.exists(output_path) and not overwrite:
  436. return {'output': output, 'status': 'skipped'}
  437. os.makedirs(output_path, exist_ok=True)
  438. CMD = [
  439. YOUTUBEDL_BINARY,
  440. '--write-description',
  441. '--write-info-json',
  442. '--write-annotations',
  443. '--yes-playlist',
  444. '--write-thumbnail',
  445. '--no-call-home',
  446. '--no-check-certificate',
  447. '--user-agent',
  448. '--all-subs',
  449. '--extract-audio',
  450. '--keep-video',
  451. '--ignore-errors',
  452. '--geo-bypass',
  453. '--audio-format', 'mp3',
  454. '--audio-quality', '320K',
  455. '--embed-thumbnail',
  456. '--add-metadata',
  457. *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),
  458. link['url'],
  459. ]
  460. end = progress(timeout, prefix=' ')
  461. try:
  462. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  463. chmod_file(output, cwd=link_dir)
  464. end()
  465. if result.returncode:
  466. if (b'ERROR: Unsupported URL' in result.stderr
  467. or b'HTTP Error 404' in result.stderr
  468. or b'HTTP Error 403' in result.stderr
  469. or b'URL could be a direct video link' in result.stderr
  470. or b'Unable to extract container ID' in result.stderr):
  471. # These happen too frequently on non-media pages to warrant printing to console
  472. pass
  473. else:
  474. hints = (
  475. 'got youtubedl response code {}:'.format(result.returncode)),
  476. *result.stderr.decode().split('\n'),
  477. )
  478. raise ArchiveError('Failed to download media', hints)
  479. except Exception as e:
  480. end()
  481. output = e
  482. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  483. return {
  484. 'cmd': CMD,
  485. 'output': output,
  486. }
  487. @attach_result_to_link('git')
  488. def fetch_git(link_dir, link, timeout=TIMEOUT):
  489. """download full site using git"""
  490. url_is_clonable = (
  491. domain(link['url']) in GIT_DOMAINS
  492. or link['url'].endswith('.git')
  493. )
  494. if not url_is_clonable or is_static_file(link['url']):
  495. return {'output': None, 'status': 'skipped'}
  496. output = 'git'
  497. output_path = os.path.join(link_dir, 'git')
  498. if os.path.exists(output_path):
  499. return {'output': output, 'status': 'skipped'}
  500. os.makedirs(output_path, exist_ok=True)
  501. CMD = [
  502. GIT_BINARY,
  503. 'clone',
  504. '--mirror',
  505. '--recursive',
  506. *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
  507. without_query(without_fragment(link['url'])),
  508. ]
  509. end = progress(timeout, prefix=' ')
  510. try:
  511. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)
  512. end()
  513. if result.returncode == 128:
  514. # ignore failed re-download when the folder already exists
  515. pass
  516. elif result.returncode > 0:
  517. hints = 'got git response code {}:'.format(result.returncode)
  518. raise ArchiveError('Failed git download', hints)
  519. except Exception as e:
  520. end()
  521. output = e
  522. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  523. return {
  524. 'cmd': CMD,
  525. 'output': output,
  526. }
  527. def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR, headless=CHROME_HEADLESS, sandbox=CHROME_SANDBOX, check_ssl_validity=CHECK_SSL_VALIDITY, user_agent=CHROME_USER_AGENT, resolution=RESOLUTION, timeout=TIMEOUT):
  528. global CACHED_USER_DATA_DIR
  529. user_data_dir = user_data_dir or CACHED_USER_DATA_DIR
  530. cmd_args = [binary]
  531. if headless:
  532. cmd_args += ('--headless',)
  533. if not sandbox:
  534. # dont use GPU or sandbox when running inside docker container
  535. cmd_args += ('--no-sandbox', '--disable-gpu')
  536. if not check_ssl_validity:
  537. cmd_args += ('--disable-web-security', '--ignore-certificate-errors')
  538. if user_agent:
  539. cmd_args += ('--user-agent={}'.format(user_agent),)
  540. if resolution:
  541. cmd_args += ('--window-size={}'.format(RESOLUTION),)
  542. if timeout:
  543. cmd_args += ('--timeout={}'.format((timeout) * 1000),)
  544. # Find chrome user data directory
  545. default_profile_paths = (
  546. '~/.config/chromium',
  547. '~/.config/google-chrome',
  548. '~/.config/google-chrome-beta',
  549. '~/.config/google-chrome-unstable',
  550. '~/Library/Application Support/Chromium',
  551. '~/Library/Application Support/Google/Chrome',
  552. '~/Library/Application Support/Google/Chrome Canary',
  553. '~/AppData/Local/Chromium/User Data',
  554. '~/AppData/Local/Google/Chrome/User Data',
  555. '~/AppData/Local/Google/Chrome SxS/User Data',
  556. )
  557. if user_data_dir:
  558. cmd_args.append('--user-data-dir={}'.format(user_data_dir))
  559. else:
  560. for path in default_profile_paths:
  561. full_path = os.path.expanduser(path)
  562. if os.path.exists(full_path):
  563. CACHED_USER_DATA_DIR = full_path
  564. cmd_args.append('--user-data-dir={}'.format(full_path))
  565. break
  566. return cmd_args
  567. CACHED_USER_DATA_DIR = CHROME_USER_DATA_DIR