parse.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. # coding: utf-8
  2. """
  3. Everything related to parsing links from bookmark services.
  4. For a list of supported services, see the README.md.
  5. For examples of supported files see examples/.
  6. Parsed link schema: {
  7. 'url': 'https://example.com/example/?abc=123&xyc=345#lmnop',
  8. 'domain': 'example.com',
  9. 'base_url': 'example.com/example/',
  10. 'timestamp': '15442123124234',
  11. 'tags': 'abc,def',
  12. 'title': 'Example.com Page Title',
  13. 'sources': ['ril_export.html', 'downloads/getpocket.com.txt'],
  14. }
  15. """
  16. import re
  17. import json
  18. import urllib
  19. from collections import OrderedDict
  20. import xml.etree.ElementTree as etree
  21. from datetime import datetime
  22. from util import (
  23. domain,
  24. base_url,
  25. str_between,
  26. get_link_type,
  27. fetch_page_title,
  28. URL_REGEX,
  29. )
  30. def get_parsers(file):
  31. """return all parsers that work on a given file, defaults to all of them"""
  32. return OrderedDict([
  33. ('pocket', parse_pocket_export),
  34. ('pinboard', parse_json_export),
  35. ('bookmarks', parse_bookmarks_export),
  36. ('rss', parse_rss_export),
  37. ('pinboard_rss', parse_pinboard_rss_feed),
  38. ('medium_rss', parse_medium_rss_feed),
  39. ('plain_text', parse_plain_text),
  40. ])
  41. def parse_links(path):
  42. """parse a list of links dictionaries from a bookmark export file"""
  43. links = []
  44. with open(path, 'r', encoding='utf-8') as file:
  45. for parser_func in get_parsers(file).values():
  46. # otherwise try all parsers until one works
  47. try:
  48. links += list(parser_func(file))
  49. if links:
  50. break
  51. except (ValueError, TypeError, IndexError, AttributeError, etree.ParseError):
  52. # parser not supported on this file
  53. pass
  54. return links
  55. def parse_pocket_export(html_file):
  56. """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)"""
  57. html_file.seek(0)
  58. pattern = re.compile("^\\s*<li><a href=\"(.+)\" time_added=\"(\\d+)\" tags=\"(.*)\">(.+)</a></li>", re.UNICODE)
  59. for line in html_file:
  60. # example line
  61. # <li><a href="http://example.com/ time_added="1478739709" tags="tag1,tag2">example title</a></li>
  62. match = pattern.search(line)
  63. if match:
  64. fixed_url = match.group(1).replace('http://www.readability.com/read?url=', '') # remove old readability prefixes to get original url
  65. time = datetime.fromtimestamp(float(match.group(2)))
  66. info = {
  67. 'url': fixed_url,
  68. 'domain': domain(fixed_url),
  69. 'base_url': base_url(fixed_url),
  70. 'timestamp': str(datetime.now().timestamp()),
  71. 'tags': match.group(3),
  72. 'title': match.group(4).replace(' — Readability', '').replace('http://www.readability.com/read?url=', '') or fetch_page_title(fixed_url),
  73. 'sources': [html_file.name],
  74. }
  75. info['type'] = get_link_type(info)
  76. yield info
  77. def parse_json_export(json_file):
  78. """Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)"""
  79. json_file.seek(0)
  80. json_content = json.load(json_file)
  81. for line in json_content:
  82. # example line
  83. # {"href":"http:\/\/www.reddit.com\/r\/example","description":"title here","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:42Z","shared":"no","toread":"no","tags":"reddit android"}]
  84. if line:
  85. erg = line
  86. if erg.get('timestamp'):
  87. timestamp = str(erg['timestamp']/10000000) # chrome/ff histories use a very precise timestamp
  88. elif erg.get('time'):
  89. timestamp = str(datetime.strptime(erg['time'].split(',', 1)[0], '%Y-%m-%dT%H:%M:%SZ').timestamp())
  90. elif erg.get('created_at'):
  91. timestamp = str(datetime.strptime(erg['created_at'], '%Y-%m-%dT%H:%M:%S%z').timestamp())
  92. else:
  93. timestamp = str(datetime.now().timestamp())
  94. if erg.get('href'):
  95. url = erg['href']
  96. else:
  97. url = erg['url']
  98. if erg.get('description'):
  99. title = (erg.get('description') or '').replace(' — Readability', '')
  100. else:
  101. title = erg['title'].strip()
  102. info = {
  103. 'url': url,
  104. 'domain': domain(url),
  105. 'base_url': base_url(url),
  106. 'timestamp': timestamp,
  107. 'tags': erg.get('tags') or '',
  108. 'title': title or fetch_page_title(url),
  109. 'sources': [json_file.name],
  110. }
  111. info['type'] = get_link_type(info)
  112. yield info
  113. def parse_rss_export(rss_file):
  114. """Parse RSS XML-format files into links"""
  115. rss_file.seek(0)
  116. items = rss_file.read().split('</item>\n<item>')
  117. for item in items:
  118. # example item:
  119. # <item>
  120. # <title><![CDATA[How JavaScript works: inside the V8 engine]]></title>
  121. # <category>Unread</category>
  122. # <link>https://blog.sessionstack.com/how-javascript-works-inside</link>
  123. # <guid>https://blog.sessionstack.com/how-javascript-works-inside</guid>
  124. # <pubDate>Mon, 21 Aug 2017 14:21:58 -0500</pubDate>
  125. # </item>
  126. trailing_removed = item.split('</item>', 1)[0]
  127. leading_removed = trailing_removed.split('<item>', 1)[-1]
  128. rows = leading_removed.split('\n')
  129. def get_row(key):
  130. return [r for r in rows if r.startswith('<{}>'.format(key))][0]
  131. title = str_between(get_row('title'), '<![CDATA[', ']]').strip()
  132. url = str_between(get_row('link'), '<link>', '</link>')
  133. ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')
  134. time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %z")
  135. info = {
  136. 'url': url,
  137. 'domain': domain(url),
  138. 'base_url': base_url(url),
  139. 'timestamp': str(datetime.now().timestamp()),
  140. 'tags': '',
  141. 'title': title or fetch_page_title(url),
  142. 'sources': [rss_file.name],
  143. }
  144. info['type'] = get_link_type(info)
  145. yield info
  146. def parse_bookmarks_export(html_file):
  147. """Parse netscape-format bookmarks export files (produced by all browsers)"""
  148. html_file.seek(0)
  149. pattern = re.compile("<a href=\"(.+?)\" add_date=\"(\\d+)\"[^>]*>(.+)</a>", re.UNICODE | re.IGNORECASE)
  150. for line in html_file:
  151. # example line
  152. # <DT><A HREF="https://example.com/?q=1+2" ADD_DATE="1497562974" LAST_MODIFIED="1497562974" ICON_URI="https://example.com/favicon.ico" ICON="data:image/png;base64,...">example bookmark title</A>
  153. match = pattern.search(line)
  154. if match:
  155. url = match.group(1)
  156. time = datetime.fromtimestamp(float(match.group(2)))
  157. info = {
  158. 'url': url,
  159. 'domain': domain(url),
  160. 'base_url': base_url(url),
  161. 'timestamp': str(datetime.now().timestamp()),
  162. 'tags': "",
  163. 'title': match.group(3).strip() or fetch_page_title(url),
  164. 'sources': [html_file.name],
  165. }
  166. info['type'] = get_link_type(info)
  167. yield info
  168. def parse_pinboard_rss_feed(rss_file):
  169. """Parse Pinboard RSS feed files into links"""
  170. rss_file.seek(0)
  171. root = etree.parse(rss_file).getroot()
  172. items = root.findall("{http://purl.org/rss/1.0/}item")
  173. for item in items:
  174. url = item.find("{http://purl.org/rss/1.0/}link").text
  175. tags = item.find("{http://purl.org/dc/elements/1.1/}subject").text
  176. title = item.find("{http://purl.org/rss/1.0/}title").text.strip()
  177. ts_str = item.find("{http://purl.org/dc/elements/1.1/}date").text
  178. # = 🌈🌈🌈🌈
  179. # = 🌈🌈🌈🌈
  180. # = 🏆🏆🏆🏆
  181. # Pinboard includes a colon in its date stamp timezone offsets, which
  182. # Python can't parse. Remove it:
  183. if ":" == ts_str[-3:-2]:
  184. ts_str = ts_str[:-3]+ts_str[-2:]
  185. time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
  186. info = {
  187. 'url': url,
  188. 'domain': domain(url),
  189. 'base_url': base_url(url),
  190. 'timestamp': str(datetime.now().timestamp()),
  191. 'tags': tags,
  192. 'title': title or fetch_page_title(url),
  193. 'sources': [rss_file.name],
  194. }
  195. info['type'] = get_link_type(info)
  196. yield info
  197. def parse_medium_rss_feed(rss_file):
  198. """Parse Medium RSS feed files into links"""
  199. rss_file.seek(0)
  200. root = etree.parse(rss_file).getroot()
  201. items = root.find("channel").findall("item")
  202. for item in items:
  203. # for child in item:
  204. # print(child.tag, child.text)
  205. url = item.find("link").text
  206. title = item.find("title").text.strip()
  207. ts_str = item.find("pubDate").text
  208. time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %Z")
  209. info = {
  210. 'url': url,
  211. 'domain': domain(url),
  212. 'base_url': base_url(url),
  213. 'timestamp': str(datetime.now().timestamp()),
  214. 'tags': '',
  215. 'title': title or fetch_page_title(url),
  216. 'sources': [rss_file.name],
  217. }
  218. info['type'] = get_link_type(info)
  219. yield info
  220. def parse_plain_text(text_file):
  221. """Parse raw links from each line in a text file"""
  222. text_file.seek(0)
  223. text_content = text_file.readlines()
  224. for line in text_content:
  225. if line:
  226. urls = re.findall(URL_REGEX, line)
  227. for url in urls:
  228. info = {
  229. 'url': url,
  230. 'domain': domain(url),
  231. 'base_url': base_url(url),
  232. 'timestamp': str(datetime.now().timestamp()),
  233. 'tags': '',
  234. 'title': fetch_page_title(url),
  235. 'sources': [text_file.name],
  236. }
  237. info['type'] = get_link_type(info)
  238. yield info