package xml /* An XML 1.0 / 1.1 parser Copyright 2021 Jeroen van Rijn . Made available under Odin's BSD-3 license. A from-scratch XML implementation, loosely modelled on the [spec](https://www.w3.org/TR/2006/REC-xml11-20060816). Features: - Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage. - Simple to understand and use. Small. Caveats: - We do NOT support HTML in this package, as that may or may not be valid XML. If it works, great. If it doesn't, that's not considered a bug. - We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences. - <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options. TODO: - Optional CDATA unboxing. - Optional `>`, ` `, ` ` and other escape substitution in tag bodies. MAYBE: - XML writer? - Serialize/deserialize Odin types? List of contributors: Jeroen van Rijn: Initial implementation. */ import "core:strings" import "core:mem" import "core:os" DEFAULT_Options :: Options{ flags = { .Ignore_Unsupported, }, expected_doctype = "", } Option_Flag :: enum { /* Document MUST start with ` (doc: ^Document, err: Error) { context.allocator = allocator opts := validate_options(options) or_return t := &Tokenizer{} init(t, string(data), path, error_handler) doc = new(Document) doc.allocator = allocator doc.tokenizer = t strings.intern_init(&doc.intern, allocator, allocator) err = .Unexpected_Token element, parent: ^Element tag_is_open := false /* If a DOCTYPE is present, the root tag has to match. If an expected DOCTYPE is given in options (i.e. it's non-empty), the DOCTYPE (if present) and root tag have to match. */ expected_doctype := options.expected_doctype loop: for { skip_whitespace(t) switch t.ch { case '<': /* Consume peeked `<` */ advance_rune(t) open := scan(t) #partial switch open.kind { case .Question: /* 0 { /* We've already seen a prolog. */ return doc, .Too_Many_Prologs } else { /* Could be ` 0 { return doc, .Too_Many_DocTypes } if doc.root != nil { return doc, .DocType_Must_Proceed_Elements } parse_doctype(doc) or_return if len(expected_doctype) > 0 && expected_doctype != doc.doctype.ident { error(t, t.offset, "Invalid DOCTYPE. Expected: %v, got: %v\n", expected_doctype, doc.doctype.ident) return doc, .Invalid_DocType } expected_doctype = doc.doctype.ident case: if .Error_on_Unsupported in opts.flags { error(t, t.offset, "Unhandled: . The grammar does not allow a comment to end in ---> */ expect(t, .Dash) comment := scan_comment(t) or_return if .Intern_Comments in opts.flags { comment = strings.intern_get(&doc.intern, comment) if doc.root == nil { append(&doc.comments, comment) } else { el := new(Element) el.parent = element el.kind = .Comment el.value = comment append(&element.children, el) } } case: error(t, t.offset, "Invalid Token after 0 && expected_doctype != open.text { error(t, t.offset, "Root Tag doesn't match DOCTYPE. Expected: %v, got: %v\n", expected_doctype, open.text) return doc, .Invalid_DocType } } /* One of these should follow: - `>`, which means we've just opened this tag and expect a later element to close it. - `/>`, which means this is an 'empty' or self-closing tag. */ end_token := scan(t) #partial switch end_token.kind { case .Gt: /* We're now the new parent. */ parent = element case .Slash: /* Empty tag. Close it. */ expect(t, .Gt) or_return parent = element.parent element = parent tag_is_open = false case: error(t, t.offset, "Expected close tag, got: %#v\n", end_token) return } case .Slash: /* Close tag. */ ident := expect(t, .Ident) or_return _ = expect(t, .Gt) or_return if element.ident != ident.text { error(t, t.offset, "Mismatched Closing Tag. Expected %v, got %v\n", element.ident, ident.text) return doc, .Mismatched_Closing_Tag } parent = element.parent element = parent tag_is_open = false case: error(t, t.offset, "Invalid Token after <: %#v\n", open) return } case -1: /* End of file. */ if tag_is_open { return doc, .Premature_EOF } break loop case: /* This should be a tag's body text. */ body_text := scan_string(t, t.offset) or_return element.value = strings.intern_get(&doc.intern, body_text) } } if .Must_Have_Prolog in opts.flags && len(doc.prolog) == 0 { return doc, .No_Prolog } if .Must_Have_DocType in opts.flags && len(doc.doctype.ident) == 0 { return doc, .No_DocType } return doc, .None } parse_from_file :: proc(filename: string, options := DEFAULT_Options, error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) { context.allocator = allocator data, data_ok := os.read_entire_file(filename) defer delete(data) if !data_ok { return {}, .File_Error } return parse_from_slice(data, options, filename, error_handler, allocator) } parse :: proc { parse_from_file, parse_from_slice } free_element :: proc(element: ^Element) { if element == nil { return } for child in element.children { /* NOTE: Recursive. Could be rewritten so it adds them to a list of pointers to free. */ free_element(child) } delete(element.attribs) delete(element.children) free(element) } destroy :: proc(doc: ^Document) { if doc == nil { return } free_element(doc.root) strings.intern_destroy(&doc.intern) delete(doc.prolog) delete(doc.comments) free(doc) } /* Helpers. */ validate_options :: proc(options: Options) -> (validated: Options, err: Error) { validated = options if .Error_on_Unsupported in validated.flags && .Ignore_Unsupported in validated.flags { return options, .Conflicting_Options } if .Unbox_CDATA in validated.flags { return options, .Unhandled_CDATA_Unboxing } if .Decode_SGML_Entities in validated.flags { return options, .Unhandled_SGML_Entity_Decoding } return validated, .None } expect :: proc(t: ^Tokenizer, kind: Token_Kind) -> (tok: Token, err: Error) { tok = scan(t) if tok.kind == kind { return tok, .None } error(t, t.offset, "Expected \"%v\", got \"%v\".", kind, tok.kind) return tok, .Unexpected_Token } parse_attribute :: proc(doc: ^Document) -> (attr: Attr, offset: int, err: Error) { assert(doc != nil) context.allocator = doc.allocator t := doc.tokenizer key := expect(t, .Ident) or_return offset = t.offset - len(key.text) _ = expect(t, .Eq) or_return value := expect(t, .String) or_return attr.key = strings.intern_get(&doc.intern, key.text) attr.val = strings.intern_get(&doc.intern, value.text) err = .None return } check_duplicate_attributes :: proc(t: ^Tokenizer, attribs: Attributes, attr: Attr, offset: int) -> (err: Error) { for a in attribs { if attr.key == a.key { error(t, offset, "Duplicate attribute: %v\n", attr.key) return .Duplicate_Attribute } } return .None } parse_attributes :: proc(doc: ^Document, attribs: ^Attributes) -> (err: Error) { assert(doc != nil) context.allocator = doc.allocator t := doc.tokenizer for peek(t).kind == .Ident { attr, offset := parse_attribute(doc) or_return check_duplicate_attributes(t, attribs^, attr, offset) or_return append(attribs, attr) } skip_whitespace(t) return .None } parse_prolog :: proc(doc: ^Document) -> (err: Error) { assert(doc != nil) context.allocator = doc.allocator t := doc.tokenizer offset := t.offset parse_attributes(doc, &doc.prolog) or_return for attr in doc.prolog { switch attr.key { case "version": switch attr.val { case "1.0", "1.1": case: error(t, offset, "[parse_prolog] Warning: Unhandled XML version: %v\n", attr.val) } case "encoding": switch strings.to_lower(attr.val, context.temp_allocator) { case "utf-8", "utf8": doc.encoding = .UTF_8 case "latin-1", "latin1", "iso-8859-1": doc.encoding = .LATIN_1 case: /* Unrecognized encoding, assume UTF-8. */ error(t, offset, "[parse_prolog] Warning: Unrecognized encoding: %v\n", attr.val) } case: // Ignored. } } _ = expect(t, .Question) or_return _ = expect(t, .Gt) or_return return .None } skip_element :: proc(t: ^Tokenizer) -> (err: Error) { close := 1 loop: for { tok := scan(t) #partial switch tok.kind { case .EOF: error(t, t.offset, "[skip_element] Premature EOF\n") return .Premature_EOF case .Lt: close += 1 case .Gt: close -= 1 if close == 0 { break loop } case: } } return .None } parse_doctype :: proc(doc: ^Document) -> (err: Error) { /* ]> */ assert(doc != nil) context.allocator = doc.allocator t := doc.tokenizer tok := expect(t, .Ident) or_return doc.doctype.ident = strings.intern_get(&doc.intern, tok.text) skip_whitespace(t) offset := t.offset skip_element(t) or_return /* -1 because the current offset is that of the closing tag, so the rest of the DOCTYPE tag ends just before it. */ doc.doctype.rest = strings.intern_get(&doc.intern, string(t.src[offset : t.offset - 1])) return .None }