archive_methods.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. import os
  2. from functools import wraps
  3. from collections import defaultdict
  4. from datetime import datetime
  5. from index import (
  6. parse_json_link_index,
  7. write_link_index,
  8. patch_index_title_hack,
  9. )
  10. from config import (
  11. CURL_BINARY,
  12. GIT_BINARY,
  13. WGET_BINARY,
  14. YOUTUBEDL_BINARY,
  15. CHROME_BINARY,
  16. FETCH_FAVICON,
  17. FETCH_TITLE,
  18. FETCH_WGET,
  19. FETCH_WGET_REQUISITES,
  20. FETCH_PDF,
  21. FETCH_SCREENSHOT,
  22. FETCH_DOM,
  23. FETCH_WARC,
  24. FETCH_GIT,
  25. FETCH_MEDIA,
  26. RESOLUTION,
  27. CHECK_SSL_VALIDITY,
  28. SUBMIT_ARCHIVE_DOT_ORG,
  29. COOKIES_FILE,
  30. WGET_USER_AGENT,
  31. CHROME_USER_DATA_DIR,
  32. CHROME_HEADLESS,
  33. CHROME_SANDBOX,
  34. TIMEOUT,
  35. MEDIA_TIMEOUT,
  36. ANSI,
  37. ARCHIVE_DIR,
  38. GIT_DOMAINS,
  39. GIT_SHA,
  40. )
  41. from util import (
  42. domain,
  43. without_query,
  44. without_fragment,
  45. fetch_page_title,
  46. progress,
  47. chmod_file,
  48. pretty_path,
  49. print_error_hints,
  50. check_link_structure,
  51. wget_output_path,
  52. run, PIPE, DEVNULL,
  53. )
  54. _RESULTS_TOTALS = { # globals are bad, mmkay
  55. 'skipped': 0,
  56. 'succeded': 0,
  57. 'failed': 0,
  58. }
  59. def load_link_index(link_dir, link):
  60. """check for an existing link archive in the given directory,
  61. and load+merge it into the given link dict
  62. """
  63. is_new = not os.path.exists(link_dir)
  64. if is_new:
  65. os.makedirs(link_dir)
  66. else:
  67. link = {
  68. **parse_json_link_index(link_dir),
  69. **link,
  70. }
  71. check_link_structure(link)
  72. print_link_status_line(link_dir, link, is_new)
  73. return link
  74. def archive_link(link_dir, link, overwrite=True):
  75. """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
  76. ARCHIVE_METHODS = (
  77. (FETCH_TITLE, fetch_title),
  78. (FETCH_FAVICON, fetch_favicon),
  79. (FETCH_WGET, fetch_wget),
  80. (FETCH_PDF, fetch_pdf),
  81. (FETCH_SCREENSHOT, fetch_screenshot),
  82. (FETCH_DOM, fetch_dom),
  83. (FETCH_GIT, fetch_git),
  84. (FETCH_MEDIA, fetch_media),
  85. (SUBMIT_ARCHIVE_DOT_ORG, archive_dot_org),
  86. )
  87. active_methods = [method for toggle, method in ARCHIVE_METHODS if toggle]
  88. try:
  89. link = load_link_index(link_dir, link)
  90. for archive_method in active_methods:
  91. archive_method(link_dir, link, overwrite=overwrite)
  92. write_link_index(link_dir, link)
  93. except Exception as err:
  94. print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
  95. return link
  96. def print_link_status_line(link_dir, link, is_new):
  97. print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
  98. symbol='+' if is_new else '*',
  99. symbol_color=ANSI['green' if is_new else 'black'],
  100. now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  101. **{**link, 'title': link['title'] or link['url']},
  102. **ANSI,
  103. ))
  104. print(' > {}{}'.format(pretty_path(link_dir), ' (new)' if is_new else ''))
  105. # if link['type']:
  106. # print(' i {}'.format(link['type']))
  107. def attach_result_to_link(method):
  108. """
  109. Instead of returning a result={output:'...', status:'success'} object,
  110. attach that result to the links's history & latest fields, then return
  111. the updated link object.
  112. """
  113. def decorator(fetch_func):
  114. @wraps(fetch_func)
  115. def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
  116. # initialize methods and history json field on link
  117. link['latest'] = link.get('latest') or {}
  118. link['latest'][method] = link['latest'].get(method) or None
  119. link['history'] = link.get('history') or {}
  120. link['history'][method] = link['history'].get(method) or []
  121. start_ts = datetime.now().timestamp()
  122. # if a valid method output is already present, dont run the fetch function
  123. if link['latest'][method] and not overwrite:
  124. print(' √ {}'.format(method))
  125. result = None
  126. else:
  127. print(' > {}'.format(method))
  128. result = fetch_func(link_dir, link, **kwargs)
  129. end_ts = datetime.now().timestamp()
  130. duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
  131. # append a history item recording fail/success
  132. history_entry = {
  133. 'timestamp': str(start_ts).split('.')[0],
  134. }
  135. if result is None:
  136. history_entry['status'] = 'skipped'
  137. elif isinstance(result.get('output'), Exception):
  138. history_entry['status'] = 'failed'
  139. history_entry['duration'] = duration
  140. history_entry.update(result or {})
  141. link['history'][method].append(history_entry)
  142. else:
  143. history_entry['status'] = 'succeded'
  144. history_entry['duration'] = duration
  145. history_entry.update(result or {})
  146. link['history'][method].append(history_entry)
  147. link['latest'][method] = result['output']
  148. _RESULTS_TOTALS[history_entry['status']] += 1
  149. return link
  150. return timed_fetch_func
  151. return decorator
  152. @attach_result_to_link('wget')
  153. def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
  154. """download full site using wget"""
  155. domain_dir = os.path.join(link_dir, domain(link['url']))
  156. existing_file = wget_output_path(link)
  157. if os.path.exists(domain_dir) and existing_file:
  158. return {'output': existing_file, 'status': 'skipped'}
  159. if warc:
  160. warc_dir = os.path.join(link_dir, 'warc')
  161. os.makedirs(warc_dir, exist_ok=True)
  162. warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
  163. # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
  164. CMD = [
  165. WGET_BINARY,
  166. # '--server-response', # print headers for better error parsing
  167. '--no-verbose',
  168. '--adjust-extension',
  169. '--convert-links',
  170. '--force-directories',
  171. '--backup-converted',
  172. '--span-hosts',
  173. '--no-parent',
  174. '-e', 'robots=off',
  175. '--restrict-file-names=unix',
  176. '--timeout={}'.format(timeout),
  177. *(() if warc else ('--timestamping',)),
  178. *(('--warc-file={}'.format(warc_path),) if warc else ()),
  179. *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
  180. *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
  181. *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),
  182. *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),
  183. link['url'],
  184. ]
  185. end = progress(timeout, prefix=' ')
  186. try:
  187. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  188. end()
  189. output = wget_output_path(link, look_in=domain_dir)
  190. output_tail = [' ' + line for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip()]
  191. # parse out number of files downloaded from "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
  192. files_downloaded = (
  193. int(output_tail[-1].strip().split(' ', 2)[1] or 0)
  194. if 'Downloaded:' in output_tail[-1]
  195. else 0
  196. )
  197. # Check for common failure cases
  198. if result.returncode > 0 and files_downloaded < 1:
  199. print(' Got wget response code {}:'.format(result.returncode))
  200. print('\n'.join(output_tail))
  201. if b'403: Forbidden' in result.stderr:
  202. raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
  203. if b'404: Not Found' in result.stderr:
  204. raise Exception('404 Not Found')
  205. if b'ERROR 500: Internal Server Error' in result.stderr:
  206. raise Exception('500 Internal Server Error')
  207. raise Exception('Got an error from the server')
  208. except Exception as e:
  209. end()
  210. output = e
  211. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  212. return {
  213. 'cmd': CMD,
  214. 'output': output,
  215. }
  216. @attach_result_to_link('pdf')
  217. def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  218. """print PDF of site to file using chrome --headless"""
  219. if link['type'] in ('PDF', 'image'):
  220. return {'output': wget_output_path(link)}
  221. if os.path.exists(os.path.join(link_dir, 'output.pdf')):
  222. return {'output': 'output.pdf', 'status': 'skipped'}
  223. CMD = [
  224. *chrome_headless(user_data_dir=user_data_dir),
  225. '--print-to-pdf',
  226. '--hide-scrollbars',
  227. '--timeout={}'.format((timeout) * 1000),
  228. *(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
  229. link['url']
  230. ]
  231. end = progress(timeout, prefix=' ')
  232. try:
  233. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  234. end()
  235. if result.returncode:
  236. print(' ', (result.stderr or result.stdout).decode())
  237. raise Exception('Failed to print PDF')
  238. chmod_file('output.pdf', cwd=link_dir)
  239. output = 'output.pdf'
  240. except Exception as e:
  241. end()
  242. output = e
  243. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  244. return {
  245. 'cmd': CMD,
  246. 'output': output,
  247. }
  248. @attach_result_to_link('screenshot')
  249. def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
  250. """take screenshot of site using chrome --headless"""
  251. if link['type'] in ('PDF', 'image'):
  252. return {'output': wget_output_path(link)}
  253. if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
  254. return {'output': 'screenshot.png', 'status': 'skipped'}
  255. CMD = [
  256. *chrome_headless(user_data_dir=user_data_dir),
  257. '--screenshot',
  258. '--window-size={}'.format(resolution),
  259. '--hide-scrollbars',
  260. '--timeout={}'.format((timeout) * 1000),
  261. *(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
  262. # '--full-page', # TODO: make this actually work using ./bin/screenshot fullPage: true
  263. link['url'],
  264. ]
  265. end = progress(timeout, prefix=' ')
  266. try:
  267. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  268. end()
  269. if result.returncode:
  270. print(' ', (result.stderr or result.stdout).decode())
  271. raise Exception('Failed to take screenshot')
  272. chmod_file('screenshot.png', cwd=link_dir)
  273. output = 'screenshot.png'
  274. except Exception as e:
  275. end()
  276. output = e
  277. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  278. return {
  279. 'cmd': CMD,
  280. 'output': output,
  281. }
  282. @attach_result_to_link('dom')
  283. def fetch_dom(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
  284. """print HTML of site to file using chrome --dump-html"""
  285. if link['type'] in ('PDF', 'image'):
  286. return {'output': wget_output_path(link)}
  287. output_path = os.path.join(link_dir, 'output.html')
  288. if os.path.exists(output_path):
  289. return {'output': 'output.html', 'status': 'skipped'}
  290. CMD = [
  291. *chrome_headless(user_data_dir=user_data_dir),
  292. '--dump-dom',
  293. '--timeout={}'.format((timeout) * 1000),
  294. link['url']
  295. ]
  296. end = progress(timeout, prefix=' ')
  297. try:
  298. with open(output_path, 'w+') as f:
  299. result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)
  300. end()
  301. if result.returncode:
  302. print(' ', (result.stderr).decode())
  303. raise Exception('Failed to fetch DOM')
  304. chmod_file('output.html', cwd=link_dir)
  305. output = 'output.html'
  306. except Exception as e:
  307. end()
  308. output = e
  309. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  310. return {
  311. 'cmd': CMD,
  312. 'output': output,
  313. }
  314. def parse_archive_dot_org_response(response):
  315. # Parse archive.org response headers
  316. headers = defaultdict(list)
  317. # lowercase all the header names and store in dict
  318. for header in response.splitlines():
  319. if b':' not in header or not header.strip():
  320. continue
  321. name, val = header.decode().split(':', 1)
  322. headers[name.lower().strip()].append(val.strip())
  323. # Get successful archive url in "content-location" header or any errors
  324. content_location = headers['content-location']
  325. errors = headers['x-archive-wayback-runtime-error']
  326. return content_location, errors
  327. @attach_result_to_link('archive_org')
  328. def archive_dot_org(link_dir, link, timeout=TIMEOUT):
  329. """submit site to archive.org for archiving via their service, save returned archive url"""
  330. output = 'archive.org.txt'
  331. archive_org_url = None
  332. path = os.path.join(link_dir, output)
  333. if os.path.exists(path):
  334. archive_org_url = open(path, 'r').read().strip()
  335. return {'output': archive_org_url, 'status': 'skipped'}
  336. submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
  337. CMD = [
  338. CURL_BINARY,
  339. '--location',
  340. '--head',
  341. '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from
  342. '--max-time', str(timeout),
  343. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  344. submit_url,
  345. ]
  346. end = progress(timeout, prefix=' ')
  347. try:
  348. result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)
  349. end()
  350. content_location, errors = parse_archive_dot_org_response(result.stdout)
  351. if content_location:
  352. archive_org_url = 'https://web.archive.org{}'.format(content_location[0])
  353. elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
  354. archive_org_url = None
  355. # raise Exception('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))
  356. elif errors:
  357. raise Exception(', '.join(errors))
  358. else:
  359. raise Exception('Failed to find "content-location" URL header in Archive.org response.')
  360. except Exception as e:
  361. end()
  362. output = e
  363. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  364. if not isinstance(output, Exception):
  365. # instead of writing None when archive.org rejects the url write the
  366. # url to resubmit it to archive.org. This is so when the user visits
  367. # the URL in person, it will attempt to re-archive it, and it'll show the
  368. # nicer error message explaining why the url was rejected if it fails.
  369. archive_org_url = archive_org_url or submit_url
  370. with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:
  371. f.write(archive_org_url)
  372. chmod_file('archive.org.txt', cwd=link_dir)
  373. output = archive_org_url
  374. return {
  375. 'cmd': CMD,
  376. 'output': output,
  377. }
  378. @attach_result_to_link('favicon')
  379. def fetch_favicon(link_dir, link, timeout=TIMEOUT):
  380. """download site favicon from google's favicon api"""
  381. output = 'favicon.ico'
  382. if os.path.exists(os.path.join(link_dir, output)):
  383. return {'output': output, 'status': 'skipped'}
  384. CMD = [
  385. CURL_BINARY,
  386. '--max-time', str(timeout),
  387. '--location',
  388. '--output', output,
  389. *(() if CHECK_SSL_VALIDITY else ('--insecure',)),
  390. 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),
  391. ]
  392. end = progress(timeout, prefix=' ')
  393. try:
  394. run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)
  395. end()
  396. chmod_file('favicon.ico', cwd=link_dir)
  397. output = 'favicon.ico'
  398. except Exception as e:
  399. end()
  400. output = e
  401. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  402. return {
  403. 'cmd': CMD,
  404. 'output': output,
  405. }
  406. @attach_result_to_link('title')
  407. def fetch_title(link_dir, link, timeout=TIMEOUT):
  408. """try to guess the page's title from its content"""
  409. # if link already has valid title, skip it
  410. if link['title'] and not link['title'].lower().startswith('http'):
  411. return {'output': link['title'], 'status': 'skipped'}
  412. end = progress(timeout, prefix=' ')
  413. try:
  414. title = fetch_page_title(link['url'], timeout=timeout, progress=False)
  415. end()
  416. output = title
  417. except Exception as e:
  418. end()
  419. print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
  420. output = e
  421. # titles should show up in the global index immediatley for better UX,
  422. # do a hacky immediate replacement to add them in as we're archiving
  423. # TODO: figure out how to do this without gnarly string replacement
  424. if title:
  425. link['title'] = title
  426. patch_index_title_hack(link['url'], title)
  427. return {
  428. 'cmd': 'fetch_page_title("{}")'.format(link['url']),
  429. 'output': output,
  430. }
  431. @attach_result_to_link('media')
  432. def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
  433. """Download playlists or individual video, audio, and subtitles using youtube-dl"""
  434. # import ipdb; ipdb.set_trace()
  435. output = os.path.join(link_dir, 'media')
  436. already_done = os.path.exists(output) # and os.listdir(output)
  437. if already_done and not overwrite:
  438. return {'output': 'media', 'status': 'skipped'}
  439. os.makedirs(output, exist_ok=True)
  440. CMD = [
  441. YOUTUBEDL_BINARY,
  442. '--write-description',
  443. '--write-info-json',
  444. '--write-annotations',
  445. '--yes-playlist',
  446. '--write-thumbnail',
  447. '--no-call-home',
  448. '--no-check-certificate',
  449. '--user-agent',
  450. '--all-subs',
  451. '--extract-audio',
  452. '--keep-video',
  453. '--ignore-errors',
  454. '--geo-bypass',
  455. '--audio-format', 'mp3',
  456. '--audio-quality', '320K',
  457. '--embed-thumbnail',
  458. '--add-metadata',
  459. *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),
  460. link['url'],
  461. ]
  462. end = progress(timeout, prefix=' ')
  463. try:
  464. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output, timeout=timeout + 1)
  465. chmod_file('media', cwd=link_dir)
  466. output = 'media'
  467. end()
  468. if result.returncode:
  469. if (b'ERROR: Unsupported URL' in result.stderr
  470. or b'HTTP Error 404' in result.stderr
  471. or b'HTTP Error 403' in result.stderr
  472. or b'URL could be a direct video link' in result.stderr
  473. or b'Unable to extract container ID' in result.stderr):
  474. # These happen too frequently on non-media pages to warrant printing to console
  475. pass
  476. else:
  477. print(' got youtubedl response code {}:'.format(result.returncode))
  478. print(result.stderr)
  479. raise Exception('Failed to download media')
  480. except Exception as e:
  481. end()
  482. output = e
  483. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  484. return {
  485. 'cmd': CMD,
  486. 'output': output,
  487. }
  488. @attach_result_to_link('git')
  489. def fetch_git(link_dir, link, timeout=TIMEOUT):
  490. """download full site using git"""
  491. url_is_clonable = (
  492. domain(link['url']) in GIT_DOMAINS
  493. or link['url'].endswith('.git')
  494. or link['type'] == 'git'
  495. )
  496. if not url_is_clonable:
  497. return {'output': None, 'status': 'skipped'}
  498. git_dir = os.path.join(link_dir, 'git')
  499. if os.path.exists(git_dir):
  500. return {'output': 'git', 'status': 'skipped'}
  501. os.makedirs(git_dir, exist_ok=True)
  502. output = 'git'
  503. CMD = [
  504. GIT_BINARY,
  505. 'clone',
  506. '--mirror',
  507. '--recursive',
  508. *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),
  509. without_query(without_fragment(link['url'])),
  510. ]
  511. end = progress(timeout, prefix=' ')
  512. try:
  513. result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=git_dir, timeout=timeout + 1)
  514. end()
  515. if result.returncode == 128:
  516. # ignore failed re-download when the folder already exists
  517. pass
  518. elif result.returncode > 0:
  519. print(' got git response code {}:'.format(result.returncode))
  520. raise Exception('Failed git download')
  521. except Exception as e:
  522. end()
  523. output = e
  524. print_error_hints(cmd=CMD, pwd=link_dir, err=e)
  525. return {
  526. 'cmd': CMD,
  527. 'output': output,
  528. }
  529. def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR, headless=CHROME_HEADLESS, sandbox=CHROME_SANDBOX):
  530. global USER_DATA_DIR
  531. user_data_dir = user_data_dir or USER_DATA_DIR
  532. cmd_args = [binary]
  533. if headless:
  534. cmd_args += ('--headless',)
  535. if not sandbox:
  536. # dont use GPU or sandbox when running inside docker container
  537. cmd_args += ('--no-sandbox', '--disable-gpu')
  538. # Find chrome user data directory
  539. default_profile_paths = (
  540. '~/.config/chromium',
  541. '~/.config/google-chrome',
  542. '~/.config/google-chrome-beta',
  543. '~/.config/google-chrome-unstable',
  544. '~/Library/Application Support/Chromium',
  545. '~/Library/Application Support/Google/Chrome',
  546. '~/Library/Application Support/Google/Chrome Canary',
  547. '~/AppData/Local/Chromium/User Data',
  548. '~/AppData/Local/Google/Chrome/User Data',
  549. '~/AppData/Local/Google/Chrome SxS/User Data',
  550. )
  551. if user_data_dir:
  552. cmd_args.append('--user-data-dir={}'.format(user_data_dir))
  553. else:
  554. for path in default_profile_paths:
  555. full_path = os.path.expanduser(path)
  556. if os.path.exists(full_path):
  557. USER_DATA_DIR = full_path
  558. cmd_args.append('--user-data-dir={}'.format(full_path))
  559. break
  560. return cmd_args
  561. USER_DATA_DIR = CHROME_USER_DATA_DIR