re2.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. # Copyright 2019 The RE2 Authors. All Rights Reserved.
  2. # Use of this source code is governed by a BSD-style
  3. # license that can be found in the LICENSE file.
  4. r"""A drop-in replacement for the re module.
  5. It uses RE2 under the hood, of course, so various PCRE features
  6. (e.g. backreferences, look-around assertions) are not supported.
  7. See https://github.com/google/re2/wiki/Syntax for the canonical
  8. reference, but known syntactic "gotchas" relative to Python are:
  9. * PCRE supports \Z and \z; RE2 supports \z; Python supports \z,
  10. but calls it \Z. You must rewrite \Z to \z in pattern strings.
  11. Known differences between this module's API and the re module's API:
  12. * The error class does not provide any error information as attributes.
  13. * The Options class replaces the re module's flags with RE2's options as
  14. gettable/settable properties. Please see re2.h for their documentation.
  15. * The pattern string and the input string do not have to be the same type.
  16. Any str will be encoded to UTF-8.
  17. * The pattern string cannot be str if the options specify Latin-1 encoding.
  18. This module's LRU cache contains a maximum of 128 regular expression objects.
  19. Each regular expression object's underlying RE2 object uses a maximum of 8MiB
  20. of memory (by default). Hence, this module's LRU cache uses a maximum of 1GiB
  21. of memory (by default), but in most cases, it should use much less than that.
  22. """
  23. import codecs
  24. import functools
  25. import itertools
  26. import _re2
  27. # pybind11 translates C++ exceptions to Python exceptions.
  28. # We use that same Python exception class for consistency.
  29. error = _re2.Error
  30. class Options(_re2.RE2.Options):
  31. __slots__ = ()
  32. NAMES = (
  33. 'max_mem',
  34. 'encoding',
  35. 'posix_syntax',
  36. 'longest_match',
  37. 'log_errors',
  38. 'literal',
  39. 'never_nl',
  40. 'dot_nl',
  41. 'never_capture',
  42. 'case_sensitive',
  43. 'perl_classes',
  44. 'word_boundary',
  45. 'one_line',
  46. )
  47. def compile(pattern, options=None):
  48. if isinstance(pattern, _Regexp):
  49. if options:
  50. raise error('pattern is already compiled, so '
  51. 'options may not be specified')
  52. pattern = pattern._pattern
  53. options = options or Options()
  54. values = tuple(getattr(options, name) for name in Options.NAMES)
  55. return _Regexp._make(pattern, values)
  56. def search(pattern, text, options=None):
  57. return compile(pattern, options=options).search(text)
  58. def match(pattern, text, options=None):
  59. return compile(pattern, options=options).match(text)
  60. def fullmatch(pattern, text, options=None):
  61. return compile(pattern, options=options).fullmatch(text)
  62. def finditer(pattern, text, options=None):
  63. return compile(pattern, options=options).finditer(text)
  64. def findall(pattern, text, options=None):
  65. return compile(pattern, options=options).findall(text)
  66. def split(pattern, text, maxsplit=0, options=None):
  67. return compile(pattern, options=options).split(text, maxsplit)
  68. def subn(pattern, repl, text, count=0, options=None):
  69. return compile(pattern, options=options).subn(repl, text, count)
  70. def sub(pattern, repl, text, count=0, options=None):
  71. return compile(pattern, options=options).sub(repl, text, count)
  72. def _encode(t):
  73. return t.encode(encoding='utf-8')
  74. def _decode(b):
  75. return b.decode(encoding='utf-8')
  76. def escape(pattern):
  77. if isinstance(pattern, str):
  78. encoded_pattern = _encode(pattern)
  79. escaped = _re2.RE2.QuoteMeta(encoded_pattern)
  80. decoded_escaped = _decode(escaped)
  81. return decoded_escaped
  82. else:
  83. escaped = _re2.RE2.QuoteMeta(pattern)
  84. return escaped
  85. def purge():
  86. return _Regexp._make.cache_clear()
  87. _Anchor = _re2.RE2.Anchor
  88. _NULL_SPAN = (-1, -1)
  89. class _Regexp(object):
  90. __slots__ = ('_pattern', '_regexp')
  91. @classmethod
  92. @functools.lru_cache(typed=True)
  93. def _make(cls, pattern, values):
  94. options = Options()
  95. for name, value in zip(Options.NAMES, values):
  96. setattr(options, name, value)
  97. return cls(pattern, options)
  98. def __init__(self, pattern, options):
  99. self._pattern = pattern
  100. if isinstance(self._pattern, str):
  101. if options.encoding == Options.Encoding.LATIN1:
  102. raise error('string type of pattern is str, but '
  103. 'encoding specified in options is LATIN1')
  104. encoded_pattern = _encode(self._pattern)
  105. self._regexp = _re2.RE2(encoded_pattern, options)
  106. else:
  107. self._regexp = _re2.RE2(self._pattern, options)
  108. if not self._regexp.ok():
  109. raise error(self._regexp.error())
  110. def __getstate__(self):
  111. options = {name: getattr(self.options, name) for name in Options.NAMES}
  112. return self._pattern, options
  113. def __setstate__(self, state):
  114. pattern, options = state
  115. values = tuple(options[name] for name in Options.NAMES)
  116. other = _Regexp._make(pattern, values)
  117. self._pattern = other._pattern
  118. self._regexp = other._regexp
  119. def _match(self, anchor, text, pos=None, endpos=None):
  120. pos = 0 if pos is None else max(0, min(pos, len(text)))
  121. endpos = len(text) if endpos is None else max(0, min(endpos, len(text)))
  122. if pos > endpos:
  123. return
  124. if isinstance(text, str):
  125. encoded_text = _encode(text)
  126. encoded_pos = _re2.CharLenToBytes(encoded_text, 0, pos)
  127. if endpos == len(text):
  128. # This is the common case.
  129. encoded_endpos = len(encoded_text)
  130. else:
  131. encoded_endpos = encoded_pos + _re2.CharLenToBytes(
  132. encoded_text, encoded_pos, endpos - pos)
  133. decoded_offsets = {0: 0}
  134. last_offset = 0
  135. while True:
  136. spans = self._regexp.Match(anchor, encoded_text, encoded_pos,
  137. encoded_endpos)
  138. if spans[0] == _NULL_SPAN:
  139. break
  140. # This algorithm is linear in the length of encoded_text. Specifically,
  141. # no matter how many groups there are for a given regular expression or
  142. # how many iterations through the loop there are for a given generator,
  143. # this algorithm uses a single, straightforward pass over encoded_text.
  144. offsets = sorted(set(itertools.chain(*spans)))
  145. if offsets[0] == -1:
  146. offsets = offsets[1:]
  147. # Discard the rest of the items because they are useless now - and we
  148. # could accumulate one item per str offset in the pathological case!
  149. decoded_offsets = {last_offset: decoded_offsets[last_offset]}
  150. for offset in offsets:
  151. decoded_offsets[offset] = (
  152. decoded_offsets[last_offset] +
  153. _re2.BytesToCharLen(encoded_text, last_offset, offset))
  154. last_offset = offset
  155. def decode(span):
  156. if span == _NULL_SPAN:
  157. return span
  158. return decoded_offsets[span[0]], decoded_offsets[span[1]]
  159. decoded_spans = [decode(span) for span in spans]
  160. yield _Match(self, text, pos, endpos, decoded_spans)
  161. if encoded_pos == encoded_endpos:
  162. break
  163. elif encoded_pos == spans[0][1]:
  164. # We matched the empty string at encoded_pos and would be stuck, so
  165. # in order to make forward progress, increment the str offset.
  166. encoded_pos += _re2.CharLenToBytes(encoded_text, encoded_pos, 1)
  167. else:
  168. encoded_pos = spans[0][1]
  169. else:
  170. while True:
  171. spans = self._regexp.Match(anchor, text, pos, endpos)
  172. if spans[0] == _NULL_SPAN:
  173. break
  174. yield _Match(self, text, pos, endpos, spans)
  175. if pos == endpos:
  176. break
  177. elif pos == spans[0][1]:
  178. # We matched the empty string at pos and would be stuck, so in order
  179. # to make forward progress, increment the bytes offset.
  180. pos += 1
  181. else:
  182. pos = spans[0][1]
  183. def search(self, text, pos=None, endpos=None):
  184. return next(self._match(_Anchor.UNANCHORED, text, pos, endpos), None)
  185. def match(self, text, pos=None, endpos=None):
  186. return next(self._match(_Anchor.ANCHOR_START, text, pos, endpos), None)
  187. def fullmatch(self, text, pos=None, endpos=None):
  188. return next(self._match(_Anchor.ANCHOR_BOTH, text, pos, endpos), None)
  189. def finditer(self, text, pos=None, endpos=None):
  190. return self._match(_Anchor.UNANCHORED, text, pos, endpos)
  191. def findall(self, text, pos=None, endpos=None):
  192. empty = type(text)()
  193. items = []
  194. for match in self.finditer(text, pos, endpos):
  195. if not self.groups:
  196. item = match.group()
  197. elif self.groups == 1:
  198. item = match.groups(default=empty)[0]
  199. else:
  200. item = match.groups(default=empty)
  201. items.append(item)
  202. return items
  203. def _split(self, cb, text, maxsplit=0):
  204. if maxsplit < 0:
  205. return [text], 0
  206. elif maxsplit > 0:
  207. matchiter = itertools.islice(self.finditer(text), maxsplit)
  208. else:
  209. matchiter = self.finditer(text)
  210. pieces = []
  211. end = 0
  212. numsplit = 0
  213. for match in matchiter:
  214. pieces.append(text[end:match.start()])
  215. pieces.extend(cb(match))
  216. end = match.end()
  217. numsplit += 1
  218. pieces.append(text[end:])
  219. return pieces, numsplit
  220. def split(self, text, maxsplit=0):
  221. cb = lambda match: [match[group] for group in range(1, self.groups + 1)]
  222. pieces, _ = self._split(cb, text, maxsplit)
  223. return pieces
  224. def subn(self, repl, text, count=0):
  225. cb = lambda match: [repl(match) if callable(repl) else match.expand(repl)]
  226. empty = type(text)()
  227. pieces, numsplit = self._split(cb, text, count)
  228. joined_pieces = empty.join(pieces)
  229. return joined_pieces, numsplit
  230. def sub(self, repl, text, count=0):
  231. joined_pieces, _ = self.subn(repl, text, count)
  232. return joined_pieces
  233. @property
  234. def pattern(self):
  235. return self._pattern
  236. @property
  237. def options(self):
  238. return self._regexp.options()
  239. @property
  240. def groups(self):
  241. return self._regexp.NumberOfCapturingGroups()
  242. @property
  243. def groupindex(self):
  244. groups = self._regexp.NamedCapturingGroups()
  245. if isinstance(self._pattern, str):
  246. decoded_groups = [(_decode(group), index) for group, index in groups]
  247. return dict(decoded_groups)
  248. else:
  249. return dict(groups)
  250. @property
  251. def programsize(self):
  252. return self._regexp.ProgramSize()
  253. @property
  254. def reverseprogramsize(self):
  255. return self._regexp.ReverseProgramSize()
  256. @property
  257. def programfanout(self):
  258. return self._regexp.ProgramFanout()
  259. @property
  260. def reverseprogramfanout(self):
  261. return self._regexp.ReverseProgramFanout()
  262. def possiblematchrange(self, maxlen):
  263. ok, min, max = self._regexp.PossibleMatchRange(maxlen)
  264. if not ok:
  265. raise error('failed to compute match range')
  266. return min, max
  267. class _Match(object):
  268. __slots__ = ('_regexp', '_text', '_pos', '_endpos', '_spans')
  269. def __init__(self, regexp, text, pos, endpos, spans):
  270. self._regexp = regexp
  271. self._text = text
  272. self._pos = pos
  273. self._endpos = endpos
  274. self._spans = spans
  275. # Python prioritises three-digit octal numbers over group escapes.
  276. # For example, \100 should not be handled the same way as \g<10>0.
  277. _OCTAL_RE = compile('\\\\[0-7][0-7][0-7]')
  278. # Python supports \1 through \99 (inclusive) and \g<...> syntax.
  279. _GROUP_RE = compile('\\\\[1-9][0-9]?|\\\\g<\\w+>')
  280. @classmethod
  281. @functools.lru_cache(typed=True)
  282. def _split(cls, template):
  283. if isinstance(template, str):
  284. backslash = '\\'
  285. else:
  286. backslash = b'\\'
  287. empty = type(template)()
  288. pieces = [empty]
  289. index = template.find(backslash)
  290. while index != -1:
  291. piece, template = template[:index], template[index:]
  292. pieces[-1] += piece
  293. octal_match = cls._OCTAL_RE.match(template)
  294. group_match = cls._GROUP_RE.match(template)
  295. if (not octal_match) and group_match:
  296. index = group_match.end()
  297. piece, template = template[:index], template[index:]
  298. pieces.extend((piece, empty))
  299. else:
  300. # 2 isn't enough for \o, \x, \N, \u and \U escapes, but none of those
  301. # should contain backslashes, so break them here and then fix them at
  302. # the beginning of the next loop iteration or right before returning.
  303. index = 2
  304. piece, template = template[:index], template[index:]
  305. pieces[-1] += piece
  306. index = template.find(backslash)
  307. pieces[-1] += template
  308. return pieces
  309. def expand(self, template):
  310. if isinstance(template, str):
  311. unescape = codecs.unicode_escape_decode
  312. else:
  313. unescape = codecs.escape_decode
  314. empty = type(template)()
  315. # Make a copy so that we don't clobber the cached pieces!
  316. pieces = list(self._split(template))
  317. for index, piece in enumerate(pieces):
  318. if not index % 2:
  319. pieces[index], _ = unescape(piece)
  320. else:
  321. if len(piece) <= 3: # \1 through \99 (inclusive)
  322. group = int(piece[1:])
  323. else: # \g<...>
  324. group = piece[3:-1]
  325. try:
  326. group = int(group)
  327. except ValueError:
  328. pass
  329. pieces[index] = self.__getitem__(group) or empty
  330. joined_pieces = empty.join(pieces)
  331. return joined_pieces
  332. def __getitem__(self, group):
  333. if not isinstance(group, int):
  334. try:
  335. group = self._regexp.groupindex[group]
  336. except KeyError:
  337. raise IndexError('bad group name')
  338. if not 0 <= group <= self._regexp.groups:
  339. raise IndexError('bad group index')
  340. span = self._spans[group]
  341. if span == _NULL_SPAN:
  342. return None
  343. return self._text[span[0]:span[1]]
  344. def group(self, *groups):
  345. if not groups:
  346. groups = (0,)
  347. items = (self.__getitem__(group) for group in groups)
  348. return next(items) if len(groups) == 1 else tuple(items)
  349. def groups(self, default=None):
  350. items = []
  351. for group in range(1, self._regexp.groups + 1):
  352. item = self.__getitem__(group)
  353. items.append(default if item is None else item)
  354. return tuple(items)
  355. def groupdict(self, default=None):
  356. items = []
  357. for group, index in self._regexp.groupindex.items():
  358. item = self.__getitem__(index)
  359. items.append((group, default) if item is None else (group, item))
  360. return dict(items)
  361. def start(self, group=0):
  362. if not 0 <= group <= self._regexp.groups:
  363. raise IndexError('bad group index')
  364. return self._spans[group][0]
  365. def end(self, group=0):
  366. if not 0 <= group <= self._regexp.groups:
  367. raise IndexError('bad group index')
  368. return self._spans[group][1]
  369. def span(self, group=0):
  370. if not 0 <= group <= self._regexp.groups:
  371. raise IndexError('bad group index')
  372. return self._spans[group]
  373. @property
  374. def re(self):
  375. return self._regexp
  376. @property
  377. def string(self):
  378. return self._text
  379. @property
  380. def pos(self):
  381. return self._pos
  382. @property
  383. def endpos(self):
  384. return self._endpos
  385. @property
  386. def lastindex(self):
  387. max_end = -1
  388. max_group = None
  389. # We look for the rightmost right parenthesis by keeping the first group
  390. # that ends at max_end because that is the leftmost/outermost group when
  391. # there are nested groups!
  392. for group in range(1, self._regexp.groups + 1):
  393. end = self._spans[group][1]
  394. if max_end < end:
  395. max_end = end
  396. max_group = group
  397. return max_group
  398. @property
  399. def lastgroup(self):
  400. max_group = self.lastindex
  401. if not max_group:
  402. return None
  403. for group, index in self._regexp.groupindex.items():
  404. if max_group == index:
  405. return group
  406. return None
  407. class Set(object):
  408. """A Pythonic wrapper around RE2::Set."""
  409. __slots__ = ('_set')
  410. def __init__(self, anchor, options=None):
  411. options = options or Options()
  412. self._set = _re2.Set(anchor, options)
  413. @classmethod
  414. def SearchSet(cls, options=None):
  415. return cls(_Anchor.UNANCHORED, options=options)
  416. @classmethod
  417. def MatchSet(cls, options=None):
  418. return cls(_Anchor.ANCHOR_START, options=options)
  419. @classmethod
  420. def FullMatchSet(cls, options=None):
  421. return cls(_Anchor.ANCHOR_BOTH, options=options)
  422. def Add(self, pattern):
  423. if isinstance(pattern, str):
  424. encoded_pattern = _encode(pattern)
  425. index = self._set.Add(encoded_pattern)
  426. else:
  427. index = self._set.Add(pattern)
  428. if index == -1:
  429. raise error('failed to add %r to Set' % pattern)
  430. return index
  431. def Compile(self):
  432. if not self._set.Compile():
  433. raise error('failed to compile Set')
  434. def Match(self, text):
  435. if isinstance(text, str):
  436. encoded_text = _encode(text)
  437. matches = self._set.Match(encoded_text)
  438. else:
  439. matches = self._set.Match(text)
  440. return matches or None
  441. class Filter(object):
  442. """A Pythonic wrapper around FilteredRE2."""
  443. __slots__ = ('_filter', '_patterns')
  444. def __init__(self):
  445. self._filter = _re2.Filter()
  446. self._patterns = []
  447. def Add(self, pattern, options=None):
  448. options = options or Options()
  449. if isinstance(pattern, str):
  450. encoded_pattern = _encode(pattern)
  451. index = self._filter.Add(encoded_pattern, options)
  452. else:
  453. index = self._filter.Add(pattern, options)
  454. if index == -1:
  455. raise error('failed to add %r to Filter' % pattern)
  456. self._patterns.append(pattern)
  457. return index
  458. def Compile(self):
  459. if not self._filter.Compile():
  460. raise error('failed to compile Filter')
  461. def Match(self, text, potential=False):
  462. if isinstance(text, str):
  463. encoded_text = _encode(text)
  464. matches = self._filter.Match(encoded_text, potential)
  465. else:
  466. matches = self._filter.Match(text, potential)
  467. return matches or None
  468. def re(self, index):
  469. if not 0 <= index < len(self._patterns):
  470. raise IndexError('bad index')
  471. proxy = object.__new__(_Regexp)
  472. proxy._pattern = self._patterns[index]
  473. proxy._regexp = self._filter.GetRE2(index)
  474. return proxy