tokenizer.odin 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. package encoding_json
  2. import "core:unicode/utf8"
  3. Pos :: struct {
  4. offset: int,
  5. line: int,
  6. column: int,
  7. }
  8. Token :: struct {
  9. using pos: Pos,
  10. kind: Token_Kind,
  11. text: string,
  12. }
  13. Token_Kind :: enum {
  14. Invalid,
  15. EOF,
  16. Null,
  17. False,
  18. True,
  19. Infinity,
  20. NaN,
  21. Ident,
  22. Integer,
  23. Float,
  24. String,
  25. Colon,
  26. Comma,
  27. Open_Brace,
  28. Close_Brace,
  29. Open_Bracket,
  30. Close_Bracket,
  31. }
  32. Tokenizer :: struct {
  33. using pos: Pos,
  34. data: string,
  35. r: rune, // current rune
  36. w: int, // current rune width in bytes
  37. curr_line_offset: int,
  38. spec: Specification,
  39. parse_integers: bool,
  40. insert_comma: bool,
  41. }
  42. make_tokenizer :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false) -> Tokenizer {
  43. t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers}
  44. next_rune(&t)
  45. if t.r == utf8.RUNE_BOM {
  46. next_rune(&t)
  47. }
  48. return t
  49. }
  50. next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check {
  51. if t.offset >= len(t.data) {
  52. t.r = utf8.RUNE_EOF
  53. } else {
  54. t.offset += t.w
  55. t.r, t.w = utf8.decode_rune_in_string(t.data[t.offset:])
  56. t.pos.column = t.offset - t.curr_line_offset
  57. if t.offset >= len(t.data) {
  58. t.r = utf8.RUNE_EOF
  59. }
  60. }
  61. return t.r
  62. }
  63. get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
  64. skip_digits :: proc(t: ^Tokenizer) {
  65. for t.offset < len(t.data) {
  66. if '0' <= t.r && t.r <= '9' {
  67. // Okay
  68. } else {
  69. return
  70. }
  71. next_rune(t)
  72. }
  73. }
  74. skip_hex_digits :: proc(t: ^Tokenizer) {
  75. for t.offset < len(t.data) {
  76. next_rune(t)
  77. switch t.r {
  78. case '0'..='9', 'a'..='f', 'A'..='F':
  79. // Okay
  80. case:
  81. return
  82. }
  83. }
  84. }
  85. scan_espace :: proc(t: ^Tokenizer) -> bool {
  86. switch t.r {
  87. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  88. next_rune(t)
  89. return true
  90. case 'u':
  91. // Expect 4 hexadecimal digits
  92. for i := 0; i < 4; i += 1 {
  93. r := next_rune(t)
  94. switch r {
  95. case '0'..='9', 'a'..='f', 'A'..='F':
  96. // Okay
  97. case:
  98. return false
  99. }
  100. }
  101. return true
  102. case:
  103. // Ignore the next rune regardless
  104. next_rune(t)
  105. }
  106. return false
  107. }
  108. skip_whitespace :: proc(t: ^Tokenizer, on_newline: bool) -> rune {
  109. loop: for t.offset < len(t.data) {
  110. switch t.r {
  111. case ' ', '\t', '\v', '\f', '\r':
  112. next_rune(t)
  113. case '\n':
  114. if on_newline {
  115. break loop
  116. }
  117. t.line += 1
  118. t.curr_line_offset = t.offset
  119. t.pos.column = 1
  120. next_rune(t)
  121. case:
  122. if t.spec != .JSON {
  123. switch t.r {
  124. case 0x2028, 0x2029, 0xFEFF:
  125. next_rune(t)
  126. continue loop
  127. }
  128. }
  129. break loop
  130. }
  131. }
  132. return t.r
  133. }
  134. skip_to_next_line :: proc(t: ^Tokenizer) {
  135. for t.offset < len(t.data) {
  136. r := next_rune(t)
  137. if r == '\n' {
  138. return
  139. }
  140. }
  141. }
  142. skip_alphanum :: proc(t: ^Tokenizer) {
  143. for t.offset < len(t.data) {
  144. switch t.r {
  145. case 'A'..='Z', 'a'..='z', '0'..='9', '_':
  146. next_rune(t)
  147. continue
  148. }
  149. return
  150. }
  151. }
  152. skip_whitespace(t, t.insert_comma)
  153. token.pos = t.pos
  154. token.kind = .Invalid
  155. curr_rune := t.r
  156. next_rune(t)
  157. block: switch curr_rune {
  158. case utf8.RUNE_ERROR:
  159. err = .Illegal_Character
  160. case utf8.RUNE_EOF, '\x00':
  161. token.kind = .EOF
  162. err = .EOF
  163. case '\n':
  164. t.insert_comma = false
  165. token.text = ","
  166. token.kind = .Comma
  167. return
  168. case 'A'..='Z', 'a'..='z', '_':
  169. token.kind = .Ident
  170. skip_alphanum(t)
  171. switch str := string(t.data[token.offset:t.offset]); str {
  172. case "null": token.kind = .Null
  173. case "false": token.kind = .False
  174. case "true": token.kind = .True
  175. case:
  176. if t.spec != .JSON {
  177. switch str {
  178. case "Infinity": token.kind = .Infinity
  179. case "NaN": token.kind = .NaN
  180. }
  181. }
  182. }
  183. case '+':
  184. err = .Illegal_Character
  185. if t.spec == .JSON {
  186. break
  187. }
  188. fallthrough
  189. case '-':
  190. switch t.r {
  191. case '0'..='9':
  192. // Okay
  193. case:
  194. // Illegal use of +/-
  195. err = .Illegal_Character
  196. if t.spec != .JSON {
  197. if t.r == 'I' || t.r == 'N' {
  198. skip_alphanum(t)
  199. }
  200. switch string(t.data[token.offset:t.offset]) {
  201. case "-Infinity": token.kind = .Infinity
  202. case "-NaN": token.kind = .NaN
  203. }
  204. }
  205. break block
  206. }
  207. fallthrough
  208. case '0'..='9':
  209. token.kind = t.parse_integers ? .Integer : .Float
  210. if t.spec != .JSON { // Hexadecimal Numbers
  211. if curr_rune == '0' && (t.r == 'x' || t.r == 'X') {
  212. next_rune(t)
  213. skip_hex_digits(t)
  214. break
  215. }
  216. }
  217. skip_digits(t)
  218. if t.r == '.' {
  219. token.kind = .Float
  220. next_rune(t)
  221. skip_digits(t)
  222. }
  223. if t.r == 'e' || t.r == 'E' {
  224. switch r := next_rune(t); r {
  225. case '+', '-':
  226. next_rune(t)
  227. }
  228. skip_digits(t)
  229. }
  230. str := string(t.data[token.offset:t.offset])
  231. if !is_valid_number(str, t.spec) {
  232. err = .Invalid_Number
  233. }
  234. case '.':
  235. err = .Illegal_Character
  236. if t.spec != .JSON { // Allow leading decimal point
  237. skip_digits(t)
  238. if t.r == 'e' || t.r == 'E' {
  239. switch r := next_rune(t); r {
  240. case '+', '-':
  241. next_rune(t)
  242. }
  243. skip_digits(t)
  244. }
  245. str := string(t.data[token.offset:t.offset])
  246. if !is_valid_number(str, t.spec) {
  247. err = .Invalid_Number
  248. }
  249. }
  250. case '\'':
  251. err = .Illegal_Character
  252. if t.spec == .JSON {
  253. break
  254. }
  255. fallthrough
  256. case '"':
  257. token.kind = .String
  258. quote := curr_rune
  259. for t.offset < len(t.data) {
  260. r := t.r
  261. if r == '\n' || r < 0 {
  262. err = .String_Not_Terminated
  263. break
  264. }
  265. next_rune(t)
  266. if r == quote {
  267. break
  268. }
  269. if r == '\\' {
  270. scan_espace(t)
  271. }
  272. }
  273. str := string(t.data[token.offset : t.offset])
  274. if !is_valid_string_literal(str, t.spec) {
  275. err = .Invalid_String
  276. }
  277. case ',':
  278. token.kind = .Comma
  279. t.insert_comma = false
  280. case ':': token.kind = .Colon
  281. case '{': token.kind = .Open_Brace
  282. case '}': token.kind = .Close_Brace
  283. case '[': token.kind = .Open_Bracket
  284. case ']': token.kind = .Close_Bracket
  285. case '=':
  286. if t.spec == .MJSON {
  287. token.kind = .Colon
  288. } else {
  289. err = .Illegal_Character
  290. }
  291. case '/':
  292. err = .Illegal_Character
  293. if t.spec != .JSON {
  294. switch t.r {
  295. case '/':
  296. // Single-line comments
  297. skip_to_next_line(t)
  298. return get_token(t)
  299. case '*':
  300. // None-nested multi-line comments
  301. for t.offset < len(t.data) {
  302. next_rune(t)
  303. if t.r == '*' {
  304. next_rune(t)
  305. if t.r == '/' {
  306. next_rune(t)
  307. return get_token(t)
  308. }
  309. }
  310. }
  311. err = .EOF
  312. }
  313. }
  314. case: err = .Illegal_Character
  315. }
  316. token.text = string(t.data[token.offset : t.offset])
  317. if t.spec == .MJSON {
  318. switch token.kind {
  319. case .Invalid:
  320. // preserve insert_comma info
  321. case .EOF:
  322. t.insert_comma = false
  323. case .Colon, .Comma, .Open_Brace, .Open_Bracket:
  324. t.insert_comma = false
  325. case .Null, .False, .True, .Infinity, .NaN,
  326. .Ident, .Integer, .Float, .String,
  327. .Close_Brace, .Close_Bracket:
  328. t.insert_comma = true
  329. }
  330. }
  331. return
  332. }
  333. is_valid_number :: proc(str: string, spec: Specification) -> bool {
  334. s := str
  335. if s == "" {
  336. return false
  337. }
  338. if s[0] == '-' {
  339. s = s[1:]
  340. if s == "" {
  341. return false
  342. }
  343. } else if spec != .JSON {
  344. if s[0] == '+' { // Allow positive sign
  345. s = s[1:]
  346. if s == "" {
  347. return false
  348. }
  349. }
  350. }
  351. if spec != .JSON && len(s) >= 2 {
  352. // Allow for hexadecimal strings
  353. if s[:2] == "0x" || s[:2] == "0X" {
  354. s = s[2:]
  355. if len(s) == 0 {
  356. return false
  357. }
  358. hexadecimal_loop: for len(s) > 0 {
  359. switch s[0] {
  360. case '0'..='9', 'A'..='Z', 'a'..='z':
  361. s = s[1:]
  362. case:
  363. break hexadecimal_loop
  364. }
  365. }
  366. return len(s) == 0
  367. }
  368. }
  369. switch s[0] {
  370. case '0':
  371. s = s[1:]
  372. case '1'..='9':
  373. s = s[1:]
  374. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  375. s = s[1:]
  376. }
  377. case '.':
  378. if spec != .JSON { // Allow leading decimal point
  379. s = s[1:]
  380. } else {
  381. return false
  382. }
  383. case:
  384. return false
  385. }
  386. if spec != .JSON {
  387. if len(s) == 1 && s[0] == '.' { // Allow trailing decimal point
  388. return true
  389. }
  390. }
  391. if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
  392. s = s[2:]
  393. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  394. s = s[1:]
  395. }
  396. }
  397. if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
  398. s = s[1:]
  399. switch s[0] {
  400. case '+', '-':
  401. s = s[1:]
  402. if s == "" {
  403. return false
  404. }
  405. }
  406. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  407. s = s[1:]
  408. }
  409. }
  410. // The string should be empty now to be valid
  411. return s == ""
  412. }
  413. is_valid_string_literal :: proc(str: string, spec: Specification) -> bool {
  414. s := str
  415. if len(s) < 2 {
  416. return false
  417. }
  418. quote := s[0]
  419. if s[0] != s[len(s)-1] {
  420. return false
  421. }
  422. switch quote {
  423. case '"':
  424. // okay
  425. case '\'':
  426. if spec != .JSON {
  427. return false
  428. }
  429. // okay
  430. case:
  431. return false
  432. }
  433. s = s[1 : len(s)-1]
  434. i := 0
  435. for i < len(s) {
  436. c := s[i]
  437. switch {
  438. case c == '\\':
  439. i += 1
  440. if i >= len(s) {
  441. return false
  442. }
  443. switch s[i] {
  444. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  445. i += 1
  446. case '\r':
  447. if spec != .JSON && i+1 < len(s) && s[i+1] == '\n' {
  448. i += 2
  449. } else {
  450. return false
  451. }
  452. case '\n':
  453. if spec != .JSON {
  454. i += 1
  455. } else {
  456. return false
  457. }
  458. case 'u':
  459. if i >= len(s) {
  460. return false
  461. }
  462. hex := s[i+1:]
  463. if len(hex) < 4 {
  464. return false
  465. }
  466. hex = hex[:4]
  467. i += 5
  468. for j := 0; j < 4; j += 1 {
  469. c2 := hex[j]
  470. switch c2 {
  471. case '0'..='9', 'a'..='z', 'A'..='Z':
  472. // Okay
  473. case:
  474. return false
  475. }
  476. }
  477. case: return false
  478. }
  479. case c == quote, c < ' ':
  480. return false
  481. case c < utf8.RUNE_SELF:
  482. i += 1
  483. case:
  484. r, width := utf8.decode_rune_in_string(s[i:])
  485. if r == utf8.RUNE_ERROR && width == 1 {
  486. return false
  487. }
  488. i += width
  489. }
  490. }
  491. if i == len(s) {
  492. return true
  493. }
  494. return true
  495. }