xml_reader.odin 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. 2021-2022 Jeroen van Rijn <[email protected]>.
  3. available under Odin's BSD-3 license.
  4. List of contributors:
  5. - Jeroen van Rijn: Initial implementation.
  6. */
  7. package encoding_xml
  8. // An XML 1.0 / 1.1 parser
  9. import "core:bytes"
  10. import "core:encoding/entity"
  11. import "base:intrinsics"
  12. import "core:mem"
  13. import "core:os"
  14. import "core:strings"
  15. import "base:runtime"
  16. likely :: intrinsics.expect
  17. DEFAULT_OPTIONS :: Options{
  18. flags = {.Ignore_Unsupported},
  19. expected_doctype = "",
  20. }
  21. Option_Flag :: enum {
  22. // If the caller says that input may be modified, we can perform in-situ parsing.
  23. // If this flag isn't provided, the XML parser first duplicates the input so that it can.
  24. Input_May_Be_Modified,
  25. // Document MUST start with `<?xml` prologue.
  26. Must_Have_Prolog,
  27. // Document MUST have a `<!DOCTYPE`.
  28. Must_Have_DocType,
  29. // By default we skip comments. Use this option to intern a comment on a parented Element.
  30. Intern_Comments,
  31. // How to handle unsupported parts of the specification, like <! other than <!DOCTYPE and <![CDATA[
  32. Error_on_Unsupported,
  33. Ignore_Unsupported,
  34. // By default CDATA tags are passed-through as-is.
  35. // This option unwraps them when encountered.
  36. Unbox_CDATA,
  37. // By default SGML entities like `&gt;`, `&#32;` and `&#x20;` are passed-through as-is.
  38. // This option decodes them when encountered.
  39. Decode_SGML_Entities,
  40. // If a tag body has a comment, it will be stripped unless this option is given.
  41. Keep_Tag_Body_Comments,
  42. }
  43. Option_Flags :: bit_set[Option_Flag; u16]
  44. Document :: struct {
  45. elements: [dynamic]Element,
  46. element_count: Element_ID,
  47. prologue: Attributes,
  48. encoding: Encoding,
  49. doctype: struct {
  50. // We only scan the <!DOCTYPE IDENT part and skip the rest.
  51. ident: string,
  52. rest: string,
  53. },
  54. // If we encounter comments before the root node, and the option to intern comments is given, this is where they'll live.
  55. // Otherwise they'll be in the element tree.
  56. comments: [dynamic]string,
  57. // Internal
  58. tokenizer: ^Tokenizer,
  59. allocator: mem.Allocator,
  60. // Input. Either the original buffer, or a copy if `.Input_May_Be_Modified` isn't specified.
  61. input: []u8,
  62. strings_to_free: [dynamic]string,
  63. }
  64. Element :: struct {
  65. ident: string,
  66. value: [dynamic]Value,
  67. attribs: Attributes,
  68. kind: enum {
  69. Element = 0,
  70. Comment,
  71. },
  72. parent: Element_ID,
  73. }
  74. Value :: union {
  75. string,
  76. Element_ID,
  77. }
  78. Attribute :: struct {
  79. key: string,
  80. val: string,
  81. }
  82. Attributes :: [dynamic]Attribute
  83. Options :: struct {
  84. flags: Option_Flags,
  85. expected_doctype: string,
  86. }
  87. Encoding :: enum {
  88. Unknown,
  89. UTF_8,
  90. ISO_8859_1,
  91. // Aliases
  92. LATIN_1 = ISO_8859_1,
  93. }
  94. Error :: enum {
  95. // General return values.
  96. None = 0,
  97. General_Error,
  98. Unexpected_Token,
  99. Invalid_Token,
  100. // Couldn't find, open or read file.
  101. File_Error,
  102. // File too short.
  103. Premature_EOF,
  104. // XML-specific errors.
  105. No_Prolog,
  106. Invalid_Prolog,
  107. Too_Many_Prologs,
  108. No_DocType,
  109. Too_Many_DocTypes,
  110. DocType_Must_Preceed_Elements,
  111. // If a DOCTYPE is present _or_ the caller
  112. // asked for a specific DOCTYPE and the DOCTYPE
  113. // and root tag don't match, we return `.Invalid_DocType`.
  114. Invalid_DocType,
  115. Invalid_Tag_Value,
  116. Mismatched_Closing_Tag,
  117. Unclosed_Comment,
  118. Comment_Before_Root_Element,
  119. Invalid_Sequence_In_Comment,
  120. Unsupported_Version,
  121. Unsupported_Encoding,
  122. // <!FOO are usually skipped.
  123. Unhandled_Bang,
  124. Duplicate_Attribute,
  125. Conflicting_Options,
  126. }
  127. parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) {
  128. data := data
  129. context.allocator = allocator
  130. opts := validate_options(options) or_return
  131. // If `.Input_May_Be_Modified` is not specified, we duplicate the input so that we can modify it in-place.
  132. if .Input_May_Be_Modified not_in opts.flags {
  133. data = bytes.clone(data)
  134. }
  135. t := &Tokenizer{}
  136. init(t, string(data), path, error_handler)
  137. doc = new(Document)
  138. doc.allocator = allocator
  139. doc.tokenizer = t
  140. doc.input = data
  141. doc.elements = make([dynamic]Element, 1024, 1024, allocator)
  142. err = .Unexpected_Token
  143. element, parent: Element_ID
  144. open: Token
  145. // If a DOCTYPE is present, the root tag has to match.
  146. // If an expected DOCTYPE is given in options (i.e. it's non-empty), the DOCTYPE (if present) and root tag have to match.
  147. expected_doctype := options.expected_doctype
  148. loop: for {
  149. skip_whitespace(t)
  150. // NOTE(Jeroen): This is faster as a switch.
  151. switch t.ch {
  152. case '<':
  153. // Consume peeked `<`
  154. advance_rune(t)
  155. open = scan(t)
  156. // NOTE(Jeroen): We're not using a switch because this if-else chain ordered by likelihood is 2.5% faster at -o:size and -o:speed.
  157. if likely(open.kind, Token_Kind.Ident) == .Ident {
  158. // e.g. <odin - Start of new element.
  159. element = new_element(doc)
  160. if element == 0 { // First Element
  161. parent = element
  162. } else {
  163. append(&doc.elements[parent].value, element)
  164. }
  165. doc.elements[element].parent = parent
  166. doc.elements[element].ident = open.text
  167. parse_attributes(doc, &doc.elements[element].attribs) or_return
  168. // If a DOCTYPE is present _or_ the caller
  169. // asked for a specific DOCTYPE and the DOCTYPE
  170. // and root tag don't match, we return .Invalid_Root_Tag.
  171. if element == 0 { // Root tag?
  172. if len(expected_doctype) > 0 && expected_doctype != open.text {
  173. error(t, t.offset, "Root Tag doesn't match DOCTYPE. Expected: %v, got: %v\n", expected_doctype, open.text)
  174. return doc, .Invalid_DocType
  175. }
  176. }
  177. // One of these should follow:
  178. // - `>`, which means we've just opened this tag and expect a later element to close it.
  179. // - `/>`, which means this is an 'empty' or self-closing tag.
  180. end_token := scan(t)
  181. #partial switch end_token.kind {
  182. case .Gt:
  183. // We're now the new parent.
  184. parent = element
  185. case .Slash:
  186. // Empty tag. Close it.
  187. expect(t, .Gt) or_return
  188. parent = doc.elements[element].parent
  189. element = parent
  190. case:
  191. error(t, t.offset, "Expected close tag, got: %#v\n", end_token)
  192. return
  193. }
  194. } else if open.kind == .Slash {
  195. // Close tag.
  196. ident := expect(t, .Ident) or_return
  197. _ = expect(t, .Gt) or_return
  198. if doc.elements[element].ident != ident.text {
  199. error(t, t.offset, "Mismatched Closing Tag. Expected %v, got %v\n", doc.elements[element].ident, ident.text)
  200. return doc, .Mismatched_Closing_Tag
  201. }
  202. parent = doc.elements[element].parent
  203. element = parent
  204. } else if open.kind == .Exclaim {
  205. // <!
  206. next := scan(t)
  207. #partial switch next.kind {
  208. case .Ident:
  209. switch next.text {
  210. case "DOCTYPE":
  211. if len(doc.doctype.ident) > 0 {
  212. return doc, .Too_Many_DocTypes
  213. }
  214. if doc.element_count > 0 {
  215. return doc, .DocType_Must_Preceed_Elements
  216. }
  217. parse_doctype(doc) or_return
  218. if len(expected_doctype) > 0 && expected_doctype != doc.doctype.ident {
  219. error(t, t.offset, "Invalid DOCTYPE. Expected: %v, got: %v\n", expected_doctype, doc.doctype.ident)
  220. return doc, .Invalid_DocType
  221. }
  222. expected_doctype = doc.doctype.ident
  223. case:
  224. if .Error_on_Unsupported in opts.flags {
  225. error(t, t.offset, "Unhandled: <!%v\n", next.text)
  226. return doc, .Unhandled_Bang
  227. }
  228. skip_element(t) or_return
  229. }
  230. case .Dash:
  231. // Comment: <!-- -->.
  232. // The grammar does not allow a comment to end in --->
  233. expect(t, .Dash)
  234. comment := scan_comment(t) or_return
  235. if .Intern_Comments in opts.flags {
  236. if len(doc.elements) == 0 {
  237. append(&doc.comments, comment)
  238. } else {
  239. el := new_element(doc)
  240. doc.elements[el].parent = element
  241. doc.elements[el].kind = .Comment
  242. append(&doc.elements[el].value, comment)
  243. append(&doc.elements[element].value, el)
  244. }
  245. }
  246. case:
  247. error(t, t.offset, "Invalid Token after <!. Expected .Ident, got %#v\n", next)
  248. return
  249. }
  250. } else if open.kind == .Question {
  251. // <?xml
  252. next := scan(t)
  253. #partial switch next.kind {
  254. case .Ident:
  255. if len(next.text) == 3 && strings.equal_fold(next.text, "xml") {
  256. parse_prologue(doc) or_return
  257. } else if len(doc.prologue) > 0 {
  258. // We've already seen a prologue.
  259. return doc, .Too_Many_Prologs
  260. } else {
  261. // Could be `<?xml-stylesheet`, etc. Ignore it.
  262. skip_element(t) or_return
  263. }
  264. case:
  265. error(t, t.offset, "Expected \"<?xml\", got \"<?%v\".", next.text)
  266. return
  267. }
  268. } else {
  269. error(t, t.offset, "Invalid Token after <: %#v\n", open)
  270. return
  271. }
  272. case -1:
  273. // End of file.
  274. break loop
  275. case:
  276. // This should be a tag's body text.
  277. body_text := scan_string(t, t.offset) or_return
  278. needs_processing := .Unbox_CDATA in opts.flags
  279. needs_processing |= .Decode_SGML_Entities in opts.flags
  280. if !needs_processing {
  281. append(&doc.elements[element].value, body_text)
  282. continue
  283. }
  284. decode_opts := entity.XML_Decode_Options{}
  285. if .Keep_Tag_Body_Comments not_in opts.flags {
  286. decode_opts += { .Comment_Strip }
  287. }
  288. if .Decode_SGML_Entities not_in opts.flags {
  289. decode_opts += { .No_Entity_Decode }
  290. }
  291. if .Unbox_CDATA in opts.flags {
  292. decode_opts += { .Unbox_CDATA }
  293. if .Decode_SGML_Entities in opts.flags {
  294. decode_opts += { .Decode_CDATA }
  295. }
  296. }
  297. decoded, decode_err := entity.decode_xml(body_text, decode_opts)
  298. if decode_err == .None {
  299. append(&doc.elements[element].value, decoded)
  300. append(&doc.strings_to_free, decoded)
  301. } else {
  302. append(&doc.elements[element].value, body_text)
  303. }
  304. }
  305. }
  306. if .Must_Have_Prolog in opts.flags && len(doc.prologue) == 0 {
  307. return doc, .No_Prolog
  308. }
  309. if .Must_Have_DocType in opts.flags && len(doc.doctype.ident) == 0 {
  310. return doc, .No_DocType
  311. }
  312. resize(&doc.elements, int(doc.element_count))
  313. return doc, .None
  314. }
  315. parse_string :: proc(data: string, options := DEFAULT_OPTIONS, path := "", error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) {
  316. _data := transmute([]u8)data
  317. return parse_bytes(_data, options, path, error_handler, allocator)
  318. }
  319. parse :: proc { parse_string, parse_bytes }
  320. // Load an XML file
  321. load_from_file :: proc(filename: string, options := DEFAULT_OPTIONS, error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) {
  322. context.allocator = allocator
  323. options := options
  324. data, data_ok := os.read_entire_file(filename)
  325. if !data_ok { return {}, .File_Error }
  326. options.flags += { .Input_May_Be_Modified }
  327. return parse_bytes(data, options, filename, error_handler, allocator)
  328. }
  329. destroy :: proc(doc: ^Document) {
  330. if doc == nil { return }
  331. for el in doc.elements {
  332. delete(el.attribs)
  333. delete(el.value)
  334. }
  335. delete(doc.elements)
  336. delete(doc.prologue)
  337. delete(doc.comments)
  338. delete(doc.input)
  339. for s in doc.strings_to_free {
  340. delete(s)
  341. }
  342. delete(doc.strings_to_free)
  343. free(doc)
  344. }
  345. /*
  346. Helpers.
  347. */
  348. validate_options :: proc(options: Options) -> (validated: Options, err: Error) {
  349. validated = options
  350. if .Error_on_Unsupported in validated.flags && .Ignore_Unsupported in validated.flags {
  351. return options, .Conflicting_Options
  352. }
  353. return validated, .None
  354. }
  355. expect :: proc(t: ^Tokenizer, kind: Token_Kind, multiline_string := false) -> (tok: Token, err: Error) {
  356. tok = scan(t, multiline_string=multiline_string)
  357. if tok.kind == kind { return tok, .None }
  358. error(t, t.offset, "Expected \"%v\", got \"%v\".", kind, tok.kind)
  359. return tok, .Unexpected_Token
  360. }
  361. parse_attribute :: proc(doc: ^Document) -> (attr: Attribute, offset: int, err: Error) {
  362. assert(doc != nil)
  363. context.allocator = doc.allocator
  364. t := doc.tokenizer
  365. key := expect(t, .Ident) or_return
  366. offset = t.offset - len(key.text)
  367. _ = expect(t, .Eq) or_return
  368. value := expect(t, .String, multiline_string=true) or_return
  369. normalized, normalize_err := entity.decode_xml(value.text, {.Normalize_Whitespace}, doc.allocator)
  370. if normalize_err == .None {
  371. append(&doc.strings_to_free, normalized)
  372. value.text = normalized
  373. }
  374. attr.key = key.text
  375. attr.val = value.text
  376. err = .None
  377. return
  378. }
  379. check_duplicate_attributes :: proc(t: ^Tokenizer, attribs: Attributes, attr: Attribute, offset: int) -> (err: Error) {
  380. for a in attribs {
  381. if attr.key == a.key {
  382. error(t, offset, "Duplicate attribute: %v\n", attr.key)
  383. return .Duplicate_Attribute
  384. }
  385. }
  386. return .None
  387. }
  388. parse_attributes :: proc(doc: ^Document, attribs: ^Attributes) -> (err: Error) {
  389. assert(doc != nil)
  390. context.allocator = doc.allocator
  391. t := doc.tokenizer
  392. for peek(t).kind == .Ident {
  393. attr, offset := parse_attribute(doc) or_return
  394. check_duplicate_attributes(t, attribs^, attr, offset) or_return
  395. append(attribs, attr)
  396. }
  397. skip_whitespace(t)
  398. return .None
  399. }
  400. parse_prologue :: proc(doc: ^Document) -> (err: Error) {
  401. assert(doc != nil)
  402. context.allocator = doc.allocator
  403. t := doc.tokenizer
  404. offset := t.offset
  405. parse_attributes(doc, &doc.prologue) or_return
  406. for attr in doc.prologue {
  407. switch attr.key {
  408. case "version":
  409. switch attr.val {
  410. case "1.0", "1.1":
  411. case:
  412. error(t, offset, "[parse_prologue] Warning: Unhandled XML version: %v\n", attr.val)
  413. }
  414. case "encoding":
  415. runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
  416. switch strings.to_lower(attr.val, context.temp_allocator) {
  417. case "utf-8", "utf8":
  418. doc.encoding = .UTF_8
  419. case "latin-1", "latin1", "iso-8859-1":
  420. doc.encoding = .LATIN_1
  421. case:
  422. // Unrecognized encoding, assume UTF-8.
  423. error(t, offset, "[parse_prologue] Warning: Unrecognized encoding: %v\n", attr.val)
  424. }
  425. case:
  426. // Ignored.
  427. }
  428. }
  429. _ = expect(t, .Question) or_return
  430. _ = expect(t, .Gt) or_return
  431. return .None
  432. }
  433. skip_element :: proc(t: ^Tokenizer) -> (err: Error) {
  434. close := 1
  435. loop: for {
  436. tok := scan(t)
  437. #partial switch tok.kind {
  438. case .EOF:
  439. error(t, t.offset, "[skip_element] Premature EOF\n")
  440. return .Premature_EOF
  441. case .Lt:
  442. close += 1
  443. case .Gt:
  444. close -= 1
  445. if close == 0 {
  446. break loop
  447. }
  448. case:
  449. }
  450. }
  451. return .None
  452. }
  453. parse_doctype :: proc(doc: ^Document) -> (err: Error) {
  454. /*
  455. <!DOCTYPE greeting SYSTEM "hello.dtd">
  456. <!DOCTYPE greeting [
  457. <!ELEMENT greeting (#PCDATA)>
  458. ]>
  459. */
  460. assert(doc != nil)
  461. context.allocator = doc.allocator
  462. t := doc.tokenizer
  463. tok := expect(t, .Ident) or_return
  464. doc.doctype.ident = tok.text
  465. skip_whitespace(t)
  466. offset := t.offset
  467. skip_element(t) or_return
  468. // -1 because the current offset is that of the closing tag, so the rest of the DOCTYPE tag ends just before it.
  469. doc.doctype.rest = string(t.src[offset : t.offset - 1])
  470. return .None
  471. }
  472. Element_ID :: u32
  473. new_element :: proc(doc: ^Document) -> (id: Element_ID) {
  474. element_space := len(doc.elements)
  475. // Need to resize
  476. if int(doc.element_count) + 1 > element_space {
  477. if element_space < 65536 {
  478. element_space *= 2
  479. } else {
  480. element_space += 65536
  481. }
  482. resize(&doc.elements, element_space)
  483. }
  484. cur := doc.element_count
  485. doc.element_count += 1
  486. return cur
  487. }