scanner.odin 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. package bufio
  2. import "core:bytes"
  3. import "core:io"
  4. import "core:mem"
  5. import "core:unicode/utf8"
  6. import "core:intrinsics"
  7. // Extra errors returns by scanning procedures
  8. Scanner_Extra_Error :: enum i32 {
  9. Negative_Advance,
  10. Advanced_Too_Far,
  11. Bad_Read_Count,
  12. Too_Long,
  13. Too_Short,
  14. }
  15. Scanner_Error :: union {
  16. io.Error,
  17. Scanner_Extra_Error,
  18. }
  19. // Split_Proc is the signature of the split procedure used to tokenize the input.
  20. Split_Proc :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool)
  21. Scanner :: struct {
  22. r: io.Reader,
  23. split: Split_Proc,
  24. buf: [dynamic]byte,
  25. max_token_size: int,
  26. start: int,
  27. end: int,
  28. token: []byte,
  29. _err: Scanner_Error,
  30. max_consecutive_empty_reads: int,
  31. successive_empty_token_count: int,
  32. scan_called: bool,
  33. done: bool,
  34. }
  35. DEFAULT_MAX_SCAN_TOKEN_SIZE :: 1<<16
  36. @(private)
  37. _INIT_BUF_SIZE :: 4096
  38. scanner_init :: proc(s: ^Scanner, r: io.Reader, buf_allocator := context.allocator) -> ^Scanner {
  39. s.r = r
  40. s.split = scan_lines
  41. s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE
  42. s.buf.allocator = buf_allocator
  43. return s
  44. }
  45. scanner_init_with_buffer :: proc(s: ^Scanner, r: io.Reader, buf: []byte) -> ^Scanner {
  46. s.r = r
  47. s.split = scan_lines
  48. s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE
  49. s.buf = mem.buffer_from_slice(buf)
  50. resize(&s.buf, cap(s.buf))
  51. return s
  52. }
  53. scanner_destroy :: proc(s: ^Scanner) {
  54. delete(s.buf)
  55. }
  56. // Returns the first non-EOF error that was encounted by the scanner
  57. scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
  58. switch s._err {
  59. case .EOF, .None:
  60. return nil
  61. }
  62. return s._err
  63. }
  64. // Returns the most recent token created by scanner_scan.
  65. // The underlying array may point to data that may be overwritten
  66. // by another call to scanner_scan.
  67. // Treat the returned value as if it is immutable.
  68. scanner_bytes :: proc(s: ^Scanner) -> []byte {
  69. return s.token
  70. }
  71. // Returns the most recent token created by scanner_scan.
  72. // The underlying array may point to data that may be overwritten
  73. // by another call to scanner_scan.
  74. // Treat the returned value as if it is immutable.
  75. scanner_text :: proc(s: ^Scanner) -> string {
  76. return string(s.token)
  77. }
  78. // scanner_scan advances the scanner
  79. scanner_scan :: proc(s: ^Scanner) -> bool {
  80. set_err :: proc(s: ^Scanner, err: Scanner_Error) {
  81. err := err
  82. if err == .None {
  83. err = nil
  84. }
  85. switch s._err {
  86. case nil, .EOF:
  87. s._err = err
  88. }
  89. }
  90. if s.done {
  91. return false
  92. }
  93. s.scan_called = true
  94. for {
  95. // Check if a token is possible with what is available
  96. // Allow the split procedure to recover if it fails
  97. if s.start < s.end || s._err != nil {
  98. advance, token, err, final_token := s.split(s.buf[s.start:s.end], s._err != nil)
  99. if final_token {
  100. s.token = token
  101. s.done = true
  102. return true
  103. }
  104. if err != nil {
  105. set_err(s, err)
  106. return false
  107. }
  108. // Do advance
  109. if advance < 0 {
  110. set_err(s, .Negative_Advance)
  111. return false
  112. }
  113. if advance > s.end-s.start {
  114. set_err(s, .Advanced_Too_Far)
  115. return false
  116. }
  117. s.start += advance
  118. s.token = token
  119. if s.token != nil {
  120. if s._err == nil || advance > 0 {
  121. s.successive_empty_token_count = 0
  122. } else {
  123. s.successive_empty_token_count += 1
  124. if s.max_consecutive_empty_reads <= 0 {
  125. s.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS
  126. }
  127. if s.successive_empty_token_count > s.max_consecutive_empty_reads {
  128. set_err(s, .No_Progress)
  129. return false
  130. }
  131. }
  132. return true
  133. }
  134. }
  135. // If an error is hit, no token can be created
  136. if s._err != nil {
  137. s.start = 0
  138. s.end = 0
  139. return false
  140. }
  141. // More data must be required to be read
  142. if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
  143. copy(s.buf[:], s.buf[s.start:s.end])
  144. s.end -= s.start
  145. s.start = 0
  146. }
  147. could_be_too_short := false
  148. // Resize the buffer if full
  149. if s.end == len(s.buf) {
  150. if s.max_token_size <= 0 {
  151. s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE
  152. }
  153. if len(s.buf) >= s.max_token_size {
  154. set_err(s, .Too_Long)
  155. return false
  156. }
  157. // overflow check
  158. new_size := _INIT_BUF_SIZE
  159. if len(s.buf) > 0 {
  160. overflowed: bool
  161. if new_size, overflowed = intrinsics.overflow_mul(len(s.buf), 2); overflowed {
  162. set_err(s, .Too_Long)
  163. return false
  164. }
  165. }
  166. old_size := len(s.buf)
  167. new_size = min(new_size, s.max_token_size)
  168. resize(&s.buf, new_size)
  169. s.end -= s.start
  170. s.start = 0
  171. could_be_too_short = old_size >= len(s.buf)
  172. }
  173. // Read data into the buffer
  174. loop := 0
  175. for {
  176. n, err := io.read(s.r, s.buf[s.end:len(s.buf)])
  177. if n < 0 || len(s.buf)-s.end < n {
  178. set_err(s, .Bad_Read_Count)
  179. break
  180. }
  181. s.end += n
  182. if err != nil {
  183. set_err(s, err)
  184. break
  185. }
  186. if n > 0 {
  187. s.successive_empty_token_count = 0
  188. break
  189. }
  190. loop += 1
  191. if s.max_consecutive_empty_reads <= 0 {
  192. s.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS
  193. }
  194. if loop > s.max_consecutive_empty_reads {
  195. if could_be_too_short {
  196. set_err(s, .Too_Short)
  197. } else {
  198. set_err(s, .No_Progress)
  199. }
  200. break
  201. }
  202. }
  203. }
  204. }
  205. scan_bytes :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
  206. if at_eof && len(data) == 0 {
  207. return
  208. }
  209. return 1, data[0:1], nil, false
  210. }
  211. scan_runes :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
  212. if at_eof && len(data) == 0 {
  213. return
  214. }
  215. if data[0] < utf8.RUNE_SELF {
  216. advance = 1
  217. token = data[0:1]
  218. return
  219. }
  220. _, width := utf8.decode_rune(data)
  221. if width > 1 {
  222. advance = width
  223. token = data[0:width]
  224. return
  225. }
  226. if !at_eof && !utf8.full_rune(data) {
  227. return
  228. }
  229. @thread_local ERROR_RUNE := []byte{0xef, 0xbf, 0xbd}
  230. advance = 1
  231. token = ERROR_RUNE
  232. return
  233. }
  234. scan_words :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
  235. is_space :: proc "contextless" (r: rune) -> bool {
  236. switch r {
  237. // lower ones
  238. case ' ', '\t', '\n', '\v', '\f', '\r':
  239. return true
  240. case '\u0085', '\u00a0':
  241. return true
  242. // higher ones
  243. case '\u2000' ..= '\u200a':
  244. return true
  245. case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
  246. return true
  247. }
  248. return false
  249. }
  250. // skip spaces at the beginning
  251. start := 0
  252. for width := 0; start < len(data); start += width {
  253. r: rune
  254. r, width = utf8.decode_rune(data[start:])
  255. if !is_space(r) {
  256. break
  257. }
  258. }
  259. for width, i := 0, start; i < len(data); i += width {
  260. r: rune
  261. r, width = utf8.decode_rune(data[i:])
  262. if is_space(r) {
  263. advance = i+width
  264. token = data[start:i]
  265. return
  266. }
  267. }
  268. if at_eof && len(data) > start {
  269. advance = len(data)
  270. token = data[start:]
  271. return
  272. }
  273. advance = start
  274. return
  275. }
  276. scan_lines :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
  277. trim_carriage_return :: proc "contextless" (data: []byte) -> []byte {
  278. if len(data) > 0 && data[len(data)-1] == '\r' {
  279. return data[0:len(data)-1]
  280. }
  281. return data
  282. }
  283. if at_eof && len(data) == 0 {
  284. return
  285. }
  286. if i := bytes.index_byte(data, '\n'); i >= 0 {
  287. advance = i+1
  288. token = trim_carriage_return(data[0:i])
  289. return
  290. }
  291. if at_eof {
  292. advance = len(data)
  293. token = trim_carriage_return(data)
  294. }
  295. return
  296. }