tokenizer.odin 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. package json
  2. import "core:unicode/utf8"
  3. Pos :: struct {
  4. offset: int,
  5. line: int,
  6. column: int,
  7. }
  8. Token :: struct {
  9. using pos: Pos,
  10. kind: Token_Kind,
  11. text: string,
  12. }
  13. Token_Kind :: enum {
  14. Invalid,
  15. EOF,
  16. Null,
  17. False,
  18. True,
  19. Infinity,
  20. NaN,
  21. Ident,
  22. Integer,
  23. Float,
  24. String,
  25. Colon,
  26. Comma,
  27. Open_Brace,
  28. Close_Brace,
  29. Open_Bracket,
  30. Close_Bracket,
  31. }
  32. Tokenizer :: struct {
  33. using pos: Pos,
  34. data: string,
  35. r: rune, // current rune
  36. w: int, // current rune width in bytes
  37. curr_line_offset: int,
  38. spec: Specification,
  39. parse_integers: bool,
  40. insert_comma: bool,
  41. }
  42. make_tokenizer :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false) -> Tokenizer {
  43. t := Tokenizer{pos = {line=1}, data = data, spec = spec, parse_integers = parse_integers}
  44. next_rune(&t)
  45. if t.r == utf8.RUNE_BOM {
  46. next_rune(&t)
  47. }
  48. return t
  49. }
  50. next_rune :: proc(t: ^Tokenizer) -> rune #no_bounds_check {
  51. if t.offset >= len(t.data) {
  52. t.r = utf8.RUNE_EOF
  53. } else {
  54. t.offset += t.w
  55. t.r, t.w = utf8.decode_rune_in_string(t.data[t.offset:])
  56. t.pos.column = t.offset - t.curr_line_offset
  57. if t.offset >= len(t.data) {
  58. t.r = utf8.RUNE_EOF
  59. }
  60. }
  61. return t.r
  62. }
  63. get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
  64. skip_digits :: proc(t: ^Tokenizer) {
  65. for t.offset < len(t.data) {
  66. if '0' <= t.r && t.r <= '9' {
  67. // Okay
  68. } else {
  69. return
  70. }
  71. next_rune(t)
  72. }
  73. }
  74. skip_hex_digits :: proc(t: ^Tokenizer) {
  75. for t.offset < len(t.data) {
  76. next_rune(t)
  77. switch t.r {
  78. case '0'..='9', 'a'..='f', 'A'..='F':
  79. // Okay
  80. case:
  81. return
  82. }
  83. }
  84. }
  85. scan_espace :: proc(t: ^Tokenizer) -> bool {
  86. switch t.r {
  87. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  88. next_rune(t)
  89. return true
  90. case 'u':
  91. // Expect 4 hexadecimal digits
  92. for i := 0; i < 4; i += 1 {
  93. r := next_rune(t)
  94. switch r {
  95. case '0'..='9', 'a'..='f', 'A'..='F':
  96. // Okay
  97. case:
  98. return false
  99. }
  100. }
  101. return true
  102. case:
  103. // Ignore the next rune regardless
  104. next_rune(t)
  105. }
  106. return false
  107. }
  108. skip_whitespace :: proc(t: ^Tokenizer, on_newline: bool) -> rune {
  109. loop: for t.offset < len(t.data) {
  110. switch t.r {
  111. case ' ', '\t', '\v', '\f', '\r':
  112. next_rune(t)
  113. case '\n':
  114. if on_newline {
  115. break loop
  116. }
  117. t.line += 1
  118. t.curr_line_offset = t.offset
  119. t.pos.column = 1
  120. next_rune(t)
  121. case:
  122. if t.spec != .JSON {
  123. switch t.r {
  124. case 0x2028, 0x2029, 0xFEFF:
  125. next_rune(t)
  126. continue loop
  127. }
  128. }
  129. break loop
  130. }
  131. }
  132. return t.r
  133. }
  134. skip_to_next_line :: proc(t: ^Tokenizer) {
  135. for t.offset < len(t.data) {
  136. r := next_rune(t)
  137. if r == '\n' {
  138. return
  139. }
  140. }
  141. }
  142. skip_alphanum :: proc(t: ^Tokenizer) {
  143. for t.offset < len(t.data) {
  144. switch next_rune(t) {
  145. case 'A'..='Z', 'a'..='z', '0'..='9', '_':
  146. continue
  147. }
  148. return
  149. }
  150. }
  151. skip_whitespace(t, t.insert_comma)
  152. token.pos = t.pos
  153. token.kind = .Invalid
  154. curr_rune := t.r
  155. next_rune(t)
  156. block: switch curr_rune {
  157. case utf8.RUNE_ERROR:
  158. err = .Illegal_Character
  159. case utf8.RUNE_EOF, '\x00':
  160. token.kind = .EOF
  161. err = .EOF
  162. case '\n':
  163. t.insert_comma = false
  164. token.text = ","
  165. token.kind = .Comma
  166. return
  167. case 'A'..='Z', 'a'..='z', '_':
  168. token.kind = .Ident
  169. skip_alphanum(t)
  170. switch str := string(t.data[token.offset:t.offset]); str {
  171. case "null": token.kind = .Null
  172. case "false": token.kind = .False
  173. case "true": token.kind = .True
  174. case:
  175. if t.spec != .JSON {
  176. switch str {
  177. case "Infinity": token.kind = .Infinity
  178. case "NaN": token.kind = .NaN
  179. }
  180. }
  181. }
  182. case '+':
  183. err = .Illegal_Character
  184. if t.spec == .JSON {
  185. break
  186. }
  187. fallthrough
  188. case '-':
  189. switch t.r {
  190. case '0'..='9':
  191. // Okay
  192. case:
  193. // Illegal use of +/-
  194. err = .Illegal_Character
  195. if t.spec != .JSON {
  196. if t.r == 'I' || t.r == 'N' {
  197. skip_alphanum(t)
  198. }
  199. switch string(t.data[token.offset:t.offset]) {
  200. case "-Infinity": token.kind = .Infinity
  201. case "-NaN": token.kind = .NaN
  202. }
  203. }
  204. break block
  205. }
  206. fallthrough
  207. case '0'..='9':
  208. token.kind = t.parse_integers ? .Integer : .Float
  209. if t.spec != .JSON { // Hexadecimal Numbers
  210. if curr_rune == '0' && (t.r == 'x' || t.r == 'X') {
  211. next_rune(t)
  212. skip_hex_digits(t)
  213. break
  214. }
  215. }
  216. skip_digits(t)
  217. if t.r == '.' {
  218. token.kind = .Float
  219. next_rune(t)
  220. skip_digits(t)
  221. }
  222. if t.r == 'e' || t.r == 'E' {
  223. switch r := next_rune(t); r {
  224. case '+', '-':
  225. next_rune(t)
  226. }
  227. skip_digits(t)
  228. }
  229. str := string(t.data[token.offset:t.offset])
  230. if !is_valid_number(str, t.spec) {
  231. err = .Invalid_Number
  232. }
  233. case '.':
  234. err = .Illegal_Character
  235. if t.spec != .JSON { // Allow leading decimal point
  236. skip_digits(t)
  237. if t.r == 'e' || t.r == 'E' {
  238. switch r := next_rune(t); r {
  239. case '+', '-':
  240. next_rune(t)
  241. }
  242. skip_digits(t)
  243. }
  244. str := string(t.data[token.offset:t.offset])
  245. if !is_valid_number(str, t.spec) {
  246. err = .Invalid_Number
  247. }
  248. }
  249. case '\'':
  250. err = .Illegal_Character
  251. if t.spec == .JSON {
  252. break
  253. }
  254. fallthrough
  255. case '"':
  256. token.kind = .String
  257. quote := curr_rune
  258. for t.offset < len(t.data) {
  259. r := t.r
  260. if r == '\n' || r < 0 {
  261. err = .String_Not_Terminated
  262. break
  263. }
  264. next_rune(t)
  265. if r == quote {
  266. break
  267. }
  268. if r == '\\' {
  269. scan_espace(t)
  270. }
  271. }
  272. str := string(t.data[token.offset : t.offset])
  273. if !is_valid_string_literal(str, t.spec) {
  274. err = .Invalid_String
  275. }
  276. case ',':
  277. token.kind = .Comma
  278. t.insert_comma = false
  279. case ':': token.kind = .Colon
  280. case '{': token.kind = .Open_Brace
  281. case '}': token.kind = .Close_Brace
  282. case '[': token.kind = .Open_Bracket
  283. case ']': token.kind = .Close_Bracket
  284. case '=':
  285. if t.spec == .MJSON {
  286. token.kind = .Colon
  287. } else {
  288. err = .Illegal_Character
  289. }
  290. case '/':
  291. err = .Illegal_Character
  292. if t.spec != .JSON {
  293. switch t.r {
  294. case '/':
  295. // Single-line comments
  296. skip_to_next_line(t)
  297. return get_token(t)
  298. case '*':
  299. // None-nested multi-line comments
  300. for t.offset < len(t.data) {
  301. next_rune(t)
  302. if t.r == '*' {
  303. next_rune(t)
  304. if t.r == '/' {
  305. next_rune(t)
  306. return get_token(t)
  307. }
  308. }
  309. }
  310. err = .EOF
  311. }
  312. }
  313. case: err = .Illegal_Character
  314. }
  315. token.text = string(t.data[token.offset : t.offset])
  316. if t.spec == .MJSON {
  317. switch token.kind {
  318. case .Invalid:
  319. // preserve insert_comma info
  320. case .EOF:
  321. t.insert_comma = false
  322. case .Colon, .Comma, .Open_Brace, .Open_Bracket:
  323. t.insert_comma = false
  324. case .Null, .False, .True, .Infinity, .NaN,
  325. .Ident, .Integer, .Float, .String,
  326. .Close_Brace, .Close_Bracket:
  327. t.insert_comma = true
  328. }
  329. }
  330. return
  331. }
  332. is_valid_number :: proc(str: string, spec: Specification) -> bool {
  333. s := str
  334. if s == "" {
  335. return false
  336. }
  337. if s[0] == '-' {
  338. s = s[1:]
  339. if s == "" {
  340. return false
  341. }
  342. } else if spec != .JSON {
  343. if s[0] == '+' { // Allow positive sign
  344. s = s[1:]
  345. if s == "" {
  346. return false
  347. }
  348. }
  349. }
  350. if spec != .JSON && len(s) >= 2 {
  351. // Allow for hexadecimal strings
  352. if s[:2] == "0x" || s[:2] == "0X" {
  353. s = s[2:]
  354. if len(s) == 0 {
  355. return false
  356. }
  357. hexadecimal_loop: for len(s) > 0 {
  358. switch s[0] {
  359. case '0'..='9', 'A'..='Z', 'a'..='z':
  360. s = s[1:]
  361. case:
  362. break hexadecimal_loop
  363. }
  364. }
  365. return len(s) == 0
  366. }
  367. }
  368. switch s[0] {
  369. case '0':
  370. s = s[1:]
  371. case '1'..='9':
  372. s = s[1:]
  373. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  374. s = s[1:]
  375. }
  376. case '.':
  377. if spec != .JSON { // Allow leading decimal point
  378. s = s[1:]
  379. } else {
  380. return false
  381. }
  382. case:
  383. return false
  384. }
  385. if spec != .JSON {
  386. if len(s) == 1 && s[0] == '.' { // Allow trailing decimal point
  387. return true
  388. }
  389. }
  390. if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
  391. s = s[2:]
  392. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  393. s = s[1:]
  394. }
  395. }
  396. if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
  397. s = s[1:]
  398. switch s[0] {
  399. case '+', '-':
  400. s = s[1:]
  401. if s == "" {
  402. return false
  403. }
  404. }
  405. for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
  406. s = s[1:]
  407. }
  408. }
  409. // The string should be empty now to be valid
  410. return s == ""
  411. }
  412. is_valid_string_literal :: proc(str: string, spec: Specification) -> bool {
  413. s := str
  414. if len(s) < 2 {
  415. return false
  416. }
  417. quote := s[0]
  418. if s[0] != s[len(s)-1] {
  419. return false
  420. }
  421. switch quote {
  422. case '"':
  423. // okay
  424. case '\'':
  425. if spec != .JSON {
  426. return false
  427. }
  428. // okay
  429. case:
  430. return false
  431. }
  432. s = s[1 : len(s)-1]
  433. i := 0
  434. for i < len(s) {
  435. c := s[i]
  436. switch {
  437. case c == '\\':
  438. i += 1
  439. if i >= len(s) {
  440. return false
  441. }
  442. switch s[i] {
  443. case '"', '\'', '\\', '/', 'b', 'n', 'r', 't', 'f':
  444. i += 1
  445. case '\r':
  446. if spec != .JSON && i+1 < len(s) && s[i+1] == '\n' {
  447. i += 2
  448. } else {
  449. return false
  450. }
  451. case '\n':
  452. if spec != .JSON {
  453. i += 1
  454. } else {
  455. return false
  456. }
  457. case 'u':
  458. if i >= len(s) {
  459. return false
  460. }
  461. hex := s[i+1:]
  462. if len(hex) < 4 {
  463. return false
  464. }
  465. hex = hex[:4]
  466. i += 5
  467. for j := 0; j < 4; j += 1 {
  468. c2 := hex[j]
  469. switch c2 {
  470. case '0'..='9', 'a'..='z', 'A'..='Z':
  471. // Okay
  472. case:
  473. return false
  474. }
  475. }
  476. case: return false
  477. }
  478. case c == quote, c < ' ':
  479. return false
  480. case c < utf8.RUNE_SELF:
  481. i += 1
  482. case:
  483. r, width := utf8.decode_rune_in_string(s[i:])
  484. if r == utf8.RUNE_ERROR && width == 1 {
  485. return false
  486. }
  487. i += width
  488. }
  489. }
  490. if i == len(s) {
  491. return true
  492. }
  493. return true
  494. }