tokenizer.odin 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. package encoding_json
  2. import "core:unicode/utf8"
  3. Pos :: struct {
  4. offset: int,
  5. line: int,
  6. column: int,
  7. }
  8. Token :: struct {
  9. using pos: Pos,
  10. kind: Token_Kind,
  11. text: string,
  12. }
  13. Token_Kind :: enum {
  14. Invalid,
  15. EOF,
  16. Null,
  17. False,
  18. True,
  19. Infinity,
  20. NaN,
  21. Ident,
  22. Integer,
  23. Float,
  24. String,
  25. Colon,
  26. Comma,
  27. Open_Brace,
  28. Close_Brace,
  29. Open_Bracket,
  30. Close_Bracket,
  31. }
  32. Tokenizer :: struct {
  33. using pos: Pos,
  34. data: string,
  35. r: rune, // current rune
  36. w: int, // current rune width in bytes
  37. curr_line_offset: int,
  38. spec: Specification,
  39. parse_integers: bool,
  40. insert_comma: bool,
  41. }
  42. make_tokenizer :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false) -> Tokenizer {
  43. t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers}
  44. next_rune(&t)
  45. if t.r == utf8.RUNE_BOM {
  46. next_rune(&t)
  47. }
  48. return t
  49. }
  50. next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check {
  51. if t.offset >= len(t.data) {
  52. t.r = utf8.RUNE_EOF
  53. } else {
  54. t.offset += t.w
  55. t.r, t.w = utf8.decode_rune_in_string(t.data[t.offset:])
  56. t.pos.column = t.offset - t.curr_line_offset
  57. if t.offset >= len(t.data) {
  58. t.r = utf8.RUNE_EOF
  59. }
  60. }
  61. return t.r
  62. }
  63. get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
  64. skip_digits :: proc(t: ^Tokenizer) {
  65. for t.offset < len(t.data) {
  66. if '0' <= t.r && t.r <= '9' {
  67. // Okay
  68. } else {
  69. return
  70. }
  71. next_rune(t)
  72. }
  73. }
  74. skip_hex_digits :: proc(t: ^Tokenizer) {
  75. for t.offset < len(t.data) {
  76. next_rune(t)
  77. switch t.r {
  78. case '0'..='9', 'a'..='f', 'A'..='F':
  79. // Okay
  80. case:
  81. return
  82. }
  83. }
  84. }
  85. scan_espace :: proc(t: ^Tokenizer) -> bool {
  86. switch t.r {
  87. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  88. next_rune(t)
  89. return true
  90. case 'u':
  91. // Expect 4 hexadecimal digits
  92. for i := 0; i < 4; i += 1 {
  93. r := next_rune(t)
  94. switch r {
  95. case '0'..='9', 'a'..='f', 'A'..='F':
  96. // Okay
  97. case:
  98. return false
  99. }
  100. }
  101. return true
  102. case:
  103. // Ignore the next rune regardless
  104. next_rune(t)
  105. }
  106. return false
  107. }
  108. skip_whitespace :: proc(t: ^Tokenizer, on_newline: bool) -> rune {
  109. loop: for t.offset < len(t.data) {
  110. switch t.r {
  111. case ' ', '\t', '\v', '\f', '\r':
  112. next_rune(t)
  113. case '\n':
  114. if on_newline {
  115. break loop
  116. }
  117. t.line += 1
  118. t.curr_line_offset = t.offset
  119. t.pos.column = 1
  120. next_rune(t)
  121. case:
  122. if t.spec != .JSON {
  123. switch t.r {
  124. case 0x2028, 0x2029, 0xFEFF:
  125. next_rune(t)
  126. continue loop
  127. }
  128. }
  129. break loop
  130. }
  131. }
  132. return t.r
  133. }
  134. skip_to_next_line :: proc(t: ^Tokenizer) {
  135. for t.offset < len(t.data) {
  136. r := next_rune(t)
  137. if r == '\n' {
  138. return
  139. }
  140. }
  141. }
  142. skip_alphanum :: proc(t: ^Tokenizer) {
  143. for t.offset < len(t.data) {
  144. switch t.r {
  145. case 'A'..='Z', 'a'..='z', '0'..='9', '_':
  146. next_rune(t)
  147. continue
  148. }
  149. return
  150. }
  151. }
  152. skip_whitespace(t, t.insert_comma)
  153. token.pos = t.pos
  154. token.kind = .Invalid
  155. curr_rune := t.r
  156. next_rune(t)
  157. block: switch curr_rune {
  158. case utf8.RUNE_ERROR:
  159. err = .Illegal_Character
  160. case utf8.RUNE_EOF, '\x00':
  161. token.kind = .EOF
  162. err = .EOF
  163. case '\n':
  164. t.insert_comma = false
  165. token.text = ","
  166. token.kind = .Comma
  167. return
  168. case 'A'..='Z', 'a'..='z', '_':
  169. token.kind = .Ident
  170. skip_alphanum(t)
  171. switch str := string(t.data[token.offset:t.offset]); str {
  172. case "null": token.kind = .Null
  173. case "false": token.kind = .False
  174. case "true": token.kind = .True
  175. case:
  176. if t.spec != .JSON {
  177. switch str {
  178. case "Infinity": token.kind = .Infinity
  179. case "NaN": token.kind = .NaN
  180. }
  181. }
  182. }
  183. case '+':
  184. err = .Illegal_Character
  185. if t.spec == .JSON {
  186. break
  187. }
  188. fallthrough
  189. case '-':
  190. switch t.r {
  191. case '0'..='9':
  192. // Okay
  193. case:
  194. // Illegal use of +/-
  195. err = .Illegal_Character
  196. if t.spec != .JSON {
  197. if t.r == 'I' || t.r == 'N' {
  198. skip_alphanum(t)
  199. }
  200. switch string(t.data[token.offset:t.offset]) {
  201. case "-Infinity": token.kind = .Infinity
  202. case "-NaN": token.kind = .NaN
  203. }
  204. }
  205. break block
  206. }
  207. fallthrough
  208. case '0'..='9':
  209. token.kind = t.parse_integers ? .Integer : .Float
  210. if t.spec != .JSON { // Hexadecimal Numbers
  211. if curr_rune == '0' && (t.r == 'x' || t.r == 'X') {
  212. next_rune(t)
  213. skip_hex_digits(t)
  214. break
  215. }
  216. }
  217. skip_digits(t)
  218. if t.r == '.' {
  219. token.kind = .Float
  220. next_rune(t)
  221. skip_digits(t)
  222. }
  223. if t.r == 'e' || t.r == 'E' {
  224. token.kind = .Float
  225. switch r := next_rune(t); r {
  226. case '+', '-':
  227. next_rune(t)
  228. }
  229. skip_digits(t)
  230. }
  231. str := string(t.data[token.offset:t.offset])
  232. if !is_valid_number(str, t.spec) {
  233. err = .Invalid_Number
  234. }
  235. case '.':
  236. err = .Illegal_Character
  237. if t.spec != .JSON { // Allow leading decimal point
  238. skip_digits(t)
  239. if t.r == 'e' || t.r == 'E' {
  240. switch r := next_rune(t); r {
  241. case '+', '-':
  242. next_rune(t)
  243. }
  244. skip_digits(t)
  245. }
  246. str := string(t.data[token.offset:t.offset])
  247. if !is_valid_number(str, t.spec) {
  248. err = .Invalid_Number
  249. }
  250. }
  251. case '\'':
  252. err = .Illegal_Character
  253. if t.spec == .JSON {
  254. break
  255. }
  256. fallthrough
  257. case '"':
  258. token.kind = .String
  259. quote := curr_rune
  260. for t.offset < len(t.data) {
  261. r := t.r
  262. if r == '\n' || r < 0 {
  263. err = .String_Not_Terminated
  264. break
  265. }
  266. next_rune(t)
  267. if r == quote {
  268. break
  269. }
  270. if r == '\\' {
  271. scan_espace(t)
  272. }
  273. }
  274. str := string(t.data[token.offset : t.offset])
  275. if !is_valid_string_literal(str, t.spec) {
  276. err = .Invalid_String
  277. }
  278. case ',':
  279. token.kind = .Comma
  280. t.insert_comma = false
  281. case ':': token.kind = .Colon
  282. case '{': token.kind = .Open_Brace
  283. case '}': token.kind = .Close_Brace
  284. case '[': token.kind = .Open_Bracket
  285. case ']': token.kind = .Close_Bracket
  286. case '=':
  287. if t.spec == .MJSON {
  288. token.kind = .Colon
  289. } else {
  290. err = .Illegal_Character
  291. }
  292. case '/':
  293. err = .Illegal_Character
  294. if t.spec != .JSON {
  295. switch t.r {
  296. case '/':
  297. // Single-line comments
  298. skip_to_next_line(t)
  299. return get_token(t)
  300. case '*':
  301. // None-nested multi-line comments
  302. for t.offset < len(t.data) {
  303. next_rune(t)
  304. if t.r == '*' {
  305. next_rune(t)
  306. if t.r == '/' {
  307. next_rune(t)
  308. return get_token(t)
  309. }
  310. }
  311. }
  312. err = .EOF
  313. }
  314. }
  315. case: err = .Illegal_Character
  316. }
  317. token.text = string(t.data[token.offset : t.offset])
  318. if t.spec == .MJSON {
  319. switch token.kind {
  320. case .Invalid:
  321. // preserve insert_comma info
  322. case .EOF:
  323. t.insert_comma = false
  324. case .Colon, .Comma, .Open_Brace, .Open_Bracket:
  325. t.insert_comma = false
  326. case .Null, .False, .True, .Infinity, .NaN,
  327. .Ident, .Integer, .Float, .String,
  328. .Close_Brace, .Close_Bracket:
  329. t.insert_comma = true
  330. }
  331. }
  332. return
  333. }
  334. is_valid_number :: proc(str: string, spec: Specification) -> bool {
  335. s := str
  336. if s == "" {
  337. return false
  338. }
  339. if s[0] == '-' {
  340. s = s[1:]
  341. if s == "" {
  342. return false
  343. }
  344. } else if spec != .JSON {
  345. if s[0] == '+' { // Allow positive sign
  346. s = s[1:]
  347. if s == "" {
  348. return false
  349. }
  350. }
  351. }
  352. if spec != .JSON && len(s) >= 2 {
  353. // Allow for hexadecimal strings
  354. if s[:2] == "0x" || s[:2] == "0X" {
  355. s = s[2:]
  356. if len(s) == 0 {
  357. return false
  358. }
  359. hexadecimal_loop: for len(s) > 0 {
  360. switch s[0] {
  361. case '0'..='9', 'A'..='Z', 'a'..='z':
  362. s = s[1:]
  363. case:
  364. break hexadecimal_loop
  365. }
  366. }
  367. return len(s) == 0
  368. }
  369. }
  370. switch s[0] {
  371. case '0':
  372. s = s[1:]
  373. case '1'..='9':
  374. s = s[1:]
  375. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  376. s = s[1:]
  377. }
  378. case '.':
  379. if spec != .JSON { // Allow leading decimal point
  380. s = s[1:]
  381. } else {
  382. return false
  383. }
  384. case:
  385. return false
  386. }
  387. if spec != .JSON {
  388. if len(s) == 1 && s[0] == '.' { // Allow trailing decimal point
  389. return true
  390. }
  391. }
  392. if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
  393. s = s[2:]
  394. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  395. s = s[1:]
  396. }
  397. }
  398. if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
  399. s = s[1:]
  400. switch s[0] {
  401. case '+', '-':
  402. s = s[1:]
  403. if s == "" {
  404. return false
  405. }
  406. }
  407. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  408. s = s[1:]
  409. }
  410. }
  411. // The string should be empty now to be valid
  412. return s == ""
  413. }
  414. is_valid_string_literal :: proc(str: string, spec: Specification) -> bool {
  415. s := str
  416. if len(s) < 2 {
  417. return false
  418. }
  419. quote := s[0]
  420. if s[0] != s[len(s)-1] {
  421. return false
  422. }
  423. switch quote {
  424. case '"':
  425. // okay
  426. case '\'':
  427. if spec == .JSON {
  428. return false
  429. }
  430. // okay
  431. case:
  432. return false
  433. }
  434. s = s[1 : len(s)-1]
  435. i := 0
  436. for i < len(s) {
  437. c := s[i]
  438. switch {
  439. case c == '\\':
  440. i += 1
  441. if i >= len(s) {
  442. return false
  443. }
  444. switch s[i] {
  445. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  446. i += 1
  447. case '\r':
  448. if spec != .JSON && i+1 < len(s) && s[i+1] == '\n' {
  449. i += 2
  450. } else {
  451. return false
  452. }
  453. case '\n':
  454. if spec != .JSON {
  455. i += 1
  456. } else {
  457. return false
  458. }
  459. case 'u':
  460. if i >= len(s) {
  461. return false
  462. }
  463. hex := s[i+1:]
  464. if len(hex) < 4 {
  465. return false
  466. }
  467. hex = hex[:4]
  468. i += 5
  469. for j := 0; j < 4; j += 1 {
  470. c2 := hex[j]
  471. switch c2 {
  472. case '0'..='9', 'a'..='z', 'A'..='Z':
  473. // Okay
  474. case:
  475. return false
  476. }
  477. }
  478. case: return false
  479. }
  480. case c == quote, c < ' ':
  481. return false
  482. case c < utf8.RUNE_SELF:
  483. i += 1
  484. case:
  485. r, width := utf8.decode_rune_in_string(s[i:])
  486. if r == utf8.RUNE_ERROR && width == 1 {
  487. return false
  488. }
  489. i += width
  490. }
  491. }
  492. if i == len(s) {
  493. return true
  494. }
  495. return true
  496. }