Browse Source

Change implicit semicolon rules for record types within procedure bodies; Update `package odin/*`

gingerBill 5 years ago
parent
commit
939459b635

+ 3 - 4
core/encoding/json/marshal.odin

@@ -284,11 +284,10 @@ marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
 			t := runtime.type_info_base(ti);
 			switch info in t.variant {
 			case runtime.Type_Info_Integer:
-				using runtime.Type_Info_Endianness;
 				switch info.endianness {
-				case Platform: return false;
-				case Little:   return ODIN_ENDIAN != "little";
-				case Big:      return ODIN_ENDIAN != "big";
+				case .Platform: return false;
+				case .Little:   return ODIN_ENDIAN != "little";
+				case .Big:      return ODIN_ENDIAN != "big";
 				}
 			}
 			return false;

+ 94 - 94
core/odin/ast/ast.odin

@@ -1,6 +1,6 @@
 package odin_ast
 
-import "core:odin/token"
+import "core:odin/tokenizer"
 
 Proc_Tag :: enum {
 	Bounds_Check,
@@ -33,12 +33,12 @@ Node_State_Flags :: distinct bit_set[Node_State_Flag];
 
 
 Comment_Group :: struct {
-	list: []token.Token,
+	list: []tokenizer.Token,
 }
 
 Node :: struct {
-	pos:         token.Pos,
-	end:         token.Pos,
+	pos:         tokenizer.Pos,
+	end:         tokenizer.Pos,
 	derived:     any,
 	state_flags: Node_State_Flags,
 }
@@ -67,29 +67,29 @@ Ident :: struct {
 
 Implicit :: struct {
 	using node: Expr,
-	tok: token.Token,
+	tok: tokenizer.Token,
 }
 
 
 Undef :: struct {
 	using node: Expr,
-	tok:  token.Kind,
+	tok:  tokenizer.Token_Kind,
 }
 
 Basic_Lit :: struct {
 	using node: Expr,
-	tok: token.Token,
+	tok: tokenizer.Token,
 }
 
 Basic_Directive :: struct {
 	using node: Expr,
-	tok:  token.Token,
+	tok:  tokenizer.Token,
 	name: string,
 }
 
 Ellipsis :: struct {
 	using node: Expr,
-	tok:  token.Kind,
+	tok:  tokenizer.Token_Kind,
 	expr: ^Expr,
 }
 
@@ -99,44 +99,44 @@ Proc_Lit :: struct {
 	body: ^Stmt,
 	tags: Proc_Tags,
 	inlining: Proc_Inlining,
-	where_token: token.Token,
+	where_token: tokenizer.Token,
 	where_clauses: []^Expr,
 }
 
 Comp_Lit :: struct {
 	using node: Expr,
 	type: ^Expr,
-	open: token.Pos,
+	open: tokenizer.Pos,
 	elems: []^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 
 Tag_Expr :: struct {
 	using node: Expr,
-	op:      token.Token,
+	op:      tokenizer.Token,
 	name:    string,
 	expr:    ^Expr,
 }
 
 Unary_Expr :: struct {
 	using node: Expr,
-	op:   token.Token,
+	op:   tokenizer.Token,
 	expr: ^Expr,
 }
 
 Binary_Expr :: struct {
 	using node: Expr,
 	left:  ^Expr,
-	op:    token.Token,
+	op:    tokenizer.Token,
 	right: ^Expr,
 }
 
 Paren_Expr :: struct {
 	using node: Expr,
-	open:  token.Pos,
+	open:  tokenizer.Pos,
 	expr:  ^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 Selector_Expr :: struct {
@@ -153,74 +153,74 @@ Implicit_Selector_Expr :: struct {
 Index_Expr :: struct {
 	using node: Expr,
 	expr:  ^Expr,
-	open:  token.Pos,
+	open:  tokenizer.Pos,
 	index: ^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 Deref_Expr :: struct {
 	using node: Expr,
 	expr: ^Expr,
-	op:   token.Token,
+	op:   tokenizer.Token,
 }
 
 Slice_Expr :: struct {
 	using node: Expr,
 	expr:     ^Expr,
-	open:     token.Pos,
+	open:     tokenizer.Pos,
 	low:      ^Expr,
-	interval: token.Token,
+	interval: tokenizer.Token,
 	high:     ^Expr,
-	close:    token.Pos,
+	close:    tokenizer.Pos,
 }
 
 Call_Expr :: struct {
 	using node: Expr,
 	inlining: Proc_Inlining,
 	expr:     ^Expr,
-	open:     token.Pos,
+	open:     tokenizer.Pos,
 	args:     []^Expr,
-	ellipsis: token.Token,
-	close:    token.Pos,
+	ellipsis: tokenizer.Token,
+	close:    tokenizer.Pos,
 }
 
 Field_Value :: struct {
 	using node: Expr,
 	field: ^Expr,
-	sep:   token.Pos,
+	sep:   tokenizer.Pos,
 	value: ^Expr,
 }
 
 Ternary_Expr :: struct {
 	using node: Expr,
 	cond: ^Expr,
-	op1:  token.Token,
+	op1:  tokenizer.Token,
 	x:    ^Expr,
-	op2:  token.Token,
+	op2:  tokenizer.Token,
 	y:    ^Expr,
 }
 
 Type_Assertion :: struct {
 	using node: Expr,
 	expr:  ^Expr,
-	dot:   token.Pos,
-	open:  token.Pos,
+	dot:   tokenizer.Pos,
+	open:  tokenizer.Pos,
 	type:  ^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 Type_Cast :: struct {
 	using node: Expr,
-	tok:   token.Token,
-	open:  token.Pos,
+	tok:   tokenizer.Token,
+	open:  tokenizer.Pos,
 	type:  ^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 	expr:  ^Expr,
 }
 
 Auto_Cast :: struct {
 	using node: Expr,
-	op:   token.Token,
+	op:   tokenizer.Token,
 	expr: ^Expr,
 }
 
@@ -235,7 +235,7 @@ Bad_Stmt :: struct {
 
 Empty_Stmt :: struct {
 	using node: Stmt,
-	semicolon: token.Pos, // Position of the following ';'
+	semicolon: tokenizer.Pos, // Position of the following ';'
 }
 
 Expr_Stmt :: struct {
@@ -245,7 +245,7 @@ Expr_Stmt :: struct {
 
 Tag_Stmt :: struct {
 	using node: Stmt,
-	op:      token.Token,
+	op:      tokenizer.Token,
 	name:    string,
 	stmt:    ^Stmt,
 }
@@ -253,7 +253,7 @@ Tag_Stmt :: struct {
 Assign_Stmt :: struct {
 	using node: Stmt,
 	lhs:    []^Expr,
-	op:     token.Token,
+	op:     tokenizer.Token,
 	rhs:    []^Expr,
 }
 
@@ -261,15 +261,15 @@ Assign_Stmt :: struct {
 Block_Stmt :: struct {
 	using node: Stmt,
 	label: ^Expr,
-	open:  token.Pos,
+	open:  tokenizer.Pos,
 	stmts: []^Stmt,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 If_Stmt :: struct {
 	using node: Stmt,
 	label:     ^Expr,
-	if_pos:    token.Pos,
+	if_pos:    tokenizer.Pos,
 	init:      ^Stmt,
 	cond:      ^Expr,
 	body:      ^Stmt,
@@ -278,7 +278,7 @@ If_Stmt :: struct {
 
 When_Stmt :: struct {
 	using node: Stmt,
-	when_pos:  token.Pos,
+	when_pos:  tokenizer.Pos,
 	cond:      ^Expr,
 	body:      ^Stmt,
 	else_stmt: ^Stmt,
@@ -297,7 +297,7 @@ Defer_Stmt :: struct {
 For_Stmt :: struct {
 	using node: Stmt,
 	label:     ^Expr,
-	for_pos:   token.Pos,
+	for_pos:   tokenizer.Pos,
 	init:      ^Stmt,
 	cond:      ^Expr,
 	post:      ^Stmt,
@@ -307,10 +307,10 @@ For_Stmt :: struct {
 Range_Stmt :: struct {
 	using node: Stmt,
 	label:     ^Expr,
-	for_pos:   token.Pos,
+	for_pos:   tokenizer.Pos,
 	val0:      ^Expr,
 	val1:      ^Expr,
-	in_pos:    token.Pos,
+	in_pos:    tokenizer.Pos,
 	expr:      ^Expr,
 	body:      ^Stmt,
 }
@@ -318,16 +318,16 @@ Range_Stmt :: struct {
 
 Case_Clause :: struct {
 	using node: Stmt,
-	case_pos:   token.Pos,
+	case_pos:   tokenizer.Pos,
 	list:       []^Expr,
-	terminator: token.Token,
+	terminator: tokenizer.Token,
 	body:       []^Stmt,
 }
 
 Switch_Stmt :: struct {
 	using node: Stmt,
 	label:      ^Expr,
-	switch_pos: token.Pos,
+	switch_pos: tokenizer.Pos,
 	init:       ^Stmt,
 	cond:       ^Expr,
 	body:       ^Stmt,
@@ -337,7 +337,7 @@ Switch_Stmt :: struct {
 Type_Switch_Stmt :: struct {
 	using node: Stmt,
 	label:      ^Expr,
-	switch_pos: token.Pos,
+	switch_pos: tokenizer.Pos,
 	tag:        ^Stmt,
 	expr:       ^Expr,
 	body:       ^Stmt,
@@ -346,7 +346,7 @@ Type_Switch_Stmt :: struct {
 
 Branch_Stmt :: struct {
 	using node: Stmt,
-	tok:   token.Token,
+	tok:   tokenizer.Token,
 	label: ^Ident,
 }
 
@@ -377,7 +377,7 @@ Value_Decl :: struct {
 Package_Decl :: struct {
 	using node: Decl,
 	docs:    ^Comment_Group,
-	token:   token.Token,
+	token:   tokenizer.Token,
 	name:    string,
 	comment: ^Comment_Group,
 }
@@ -386,9 +386,9 @@ Import_Decl :: struct {
 	using node: Decl,
 	docs:       ^Comment_Group,
 	is_using:    bool,
-	import_tok:  token.Token,
-	name:        token.Token,
-	relpath:     token.Token,
+	import_tok:  tokenizer.Token,
+	name:        tokenizer.Token,
+	relpath:     tokenizer.Token,
 	fullpath:    string,
 	comment:     ^Comment_Group,
 }
@@ -397,7 +397,7 @@ Foreign_Block_Decl :: struct {
 	using node: Decl,
 	docs:            ^Comment_Group,
 	attributes:      [dynamic]^Attribute, // dynamic as parsing will add to them lazily
-	tok:             token.Token,
+	tok:             tokenizer.Token,
 	foreign_library: ^Expr,
 	body:            ^Stmt,
 }
@@ -405,8 +405,8 @@ Foreign_Block_Decl :: struct {
 Foreign_Import_Decl :: struct {
 	using node: Decl,
 	docs:            ^Comment_Group,
-	foreign_tok:     token.Token,
-	import_tok:      token.Token,
+	foreign_tok:     tokenizer.Token,
+	import_tok:      tokenizer.Token,
 	name:            ^Ident,
 	collection_name: string,
 	fullpaths:       []string,
@@ -467,18 +467,18 @@ Field_Flags_Signature_Results :: Field_Flags_Signature;
 
 Proc_Group :: struct {
 	using node: Expr,
-	tok:   token.Token,
-	open:  token.Pos,
+	tok:   tokenizer.Token,
+	open:  tokenizer.Pos,
 	args:  []^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 Attribute :: struct {
 	using node: Node,
-	tok:   token.Kind,
-	open:  token.Pos,
+	tok:   tokenizer.Token_Kind,
+	open:  tokenizer.Pos,
 	elems: []^Expr,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 Field :: struct {
@@ -487,57 +487,57 @@ Field :: struct {
 	names:         []^Expr, // Could be polymorphic
 	type:          ^Expr,
 	default_value: ^Expr,
-	tag:           token.Token,
+	tag:           tokenizer.Token,
 	flags:         Field_Flags,
 	comment:       ^Comment_Group,
 }
 
 Field_List :: struct {
 	using node: Node,
-	open:  token.Pos,
+	open:  tokenizer.Pos,
 	list:  []^Field,
-	close: token.Pos,
+	close: tokenizer.Pos,
 }
 
 
 // Types
 Typeid_Type :: struct {
 	using node: Expr,
-	tok:            token.Kind,
+	tok:            tokenizer.Token_Kind,
 	specialization: ^Expr,
 }
 
 Helper_Type :: struct {
 	using node: Expr,
-	tok:  token.Kind,
+	tok:  tokenizer.Token_Kind,
 	type: ^Expr,
 }
 
 Distinct_Type :: struct {
 	using node: Expr,
-	tok:  token.Kind,
+	tok:  tokenizer.Token_Kind,
 	type: ^Expr,
 }
 
 Opaque_Type :: struct {
 	using node: Expr,
-	tok:  token.Kind,
+	tok:  tokenizer.Token_Kind,
 	type: ^Expr,
 }
 
 Poly_Type :: struct {
 	using node: Expr,
-	dollar:         token.Pos,
+	dollar:         tokenizer.Pos,
 	type:           ^Ident,
 	specialization: ^Expr,
 }
 
 Proc_Type :: struct {
 	using node: Expr,
-	tok:       token.Token,
+	tok:       tokenizer.Token,
 	calling_convention: Proc_Calling_Convention,
 	params:    ^Field_List,
-	arrow:     token.Pos,
+	arrow:     tokenizer.Pos,
 	results:   ^Field_List,
 	tags:      Proc_Tags,
 	generic:   bool,
@@ -546,34 +546,34 @@ Proc_Type :: struct {
 
 Pointer_Type :: struct {
 	using node: Expr,
-	pointer: token.Pos,
+	pointer: tokenizer.Pos,
 	elem:    ^Expr,
 }
 
 Array_Type :: struct {
 	using node: Expr,
-	open:  token.Pos,
+	open:  tokenizer.Pos,
 	len:   ^Expr, // Ellipsis node for [?]T arrray types, nil for slice types
-	close: token.Pos,
+	close: tokenizer.Pos,
 	elem:  ^Expr,
 }
 
 Dynamic_Array_Type :: struct {
 	using node: Expr,
-	open:        token.Pos,
-	dynamic_pos: token.Pos,
-	close:       token.Pos,
+	open:        tokenizer.Pos,
+	dynamic_pos: tokenizer.Pos,
+	close:       tokenizer.Pos,
 	elem:        ^Expr,
 }
 
 Struct_Type :: struct {
 	using node: Expr,
-	tok_pos:       token.Pos,
+	tok_pos:       tokenizer.Pos,
 	poly_params:   ^Field_List,
 	align:         ^Expr,
 	fields:        ^Field_List,
 	name_count:    int,
-	where_token:   token.Token,
+	where_token:   tokenizer.Token,
 	where_clauses: []^Expr,
 	is_packed:     bool,
 	is_raw_union:  bool,
@@ -581,46 +581,46 @@ Struct_Type :: struct {
 
 Union_Type :: struct {
 	using node: Expr,
-	tok_pos:     token.Pos,
+	tok_pos:     tokenizer.Pos,
 	poly_params: ^Field_List,
 	align:       ^Expr,
 	variants:    []^Expr,
-	where_token: token.Token,
+	where_token: tokenizer.Token,
 	where_clauses: []^Expr,
 }
 
 Enum_Type :: struct {
 	using node: Expr,
-	tok_pos:  token.Pos,
+	tok_pos:  tokenizer.Pos,
 	base_type: ^Expr,
-	open:      token.Pos,
+	open:      tokenizer.Pos,
 	fields:    []^Expr,
-	close:     token.Pos,
+	close:     tokenizer.Pos,
 
 	is_using:  bool,
 }
 
 Bit_Field_Type :: struct {
 	using node: Expr,
-	tok_pos: token.Pos,
+	tok_pos: tokenizer.Pos,
 	align:   ^Expr,
-	open:    token.Pos,
+	open:    tokenizer.Pos,
 	fields:  []^Field_Value, // Field_Value with ':' rather than '='
-	close:   token.Pos,
+	close:   tokenizer.Pos,
 }
 
 Bit_Set_Type :: struct {
 	using node: Expr,
-	tok_pos:    token.Pos,
-	open:       token.Pos,
+	tok_pos:    tokenizer.Pos,
+	open:       tokenizer.Pos,
 	elem:       ^Expr,
 	underlying: ^Expr,
-	close:      token.Pos,
+	close:      tokenizer.Pos,
 }
 
 Map_Type :: struct {
 	using node: Expr,
-	tok_pos: token.Pos,
+	tok_pos: tokenizer.Pos,
 	key:     ^Expr,
 	value:   ^Expr,
 }

+ 2 - 2
core/odin/ast/clone.odin

@@ -2,9 +2,9 @@ package odin_ast
 
 import "core:mem"
 import "core:fmt"
-import "core:odin/token"
+import "core:odin/tokenizer"
 
-new :: proc($T: typeid, pos, end: token.Pos) -> ^T {
+new :: proc($T: typeid, pos, end: tokenizer.Pos) -> ^T {
 	n := mem.new(T);
 	n.pos = pos;
 	n.end = end;

+ 2 - 2
core/odin/ast/file.odin

@@ -1,6 +1,6 @@
 package odin_ast
 
-import "core:odin/token"
+import "core:odin/tokenizer"
 
 Package_Kind :: enum {
 	Normal,
@@ -26,7 +26,7 @@ File :: struct {
 	src:      []byte,
 
 	pkg_decl:  ^Package_Decl,
-	pkg_token: token.Token,
+	pkg_token: tokenizer.Token,
 	pkg_name:  string,
 
 	decls:   [dynamic]^Stmt,

File diff suppressed because it is too large
+ 191 - 191
core/odin/parser/parser.odin


+ 0 - 333
core/odin/token/token.odin

@@ -1,333 +0,0 @@
-package odin_token
-
-import "core:strings"
-
-Token :: struct {
-	kind: Kind,
-	text: string,
-	pos:  Pos,
-}
-
-Pos :: struct {
-	file:   string,
-	offset: int, // starting at 0
-	line:   int, // starting at 1
-	column: int, // starting at 1
-}
-
-pos_compare :: proc(lhs, rhs: Pos) -> int {
-	if lhs.offset != rhs.offset {
-		return (lhs.offset < rhs.offset) ? -1 : +1;
-	}
-	if lhs.line != rhs.line {
-		return (lhs.line < rhs.line) ? -1 : +1;
-	}
-	if lhs.column != rhs.column {
-		return (lhs.column < rhs.column) ? -1 : +1;
-	}
-	return strings.compare(lhs.file, rhs.file);
-}
-
-using Kind :: enum u32 {
-	Invalid,
-	EOF,
-	Comment,
-
-	B_Literal_Begin,
-		Ident,
-		Integer,
-		Float,
-		Imag,
-		Rune,
-		String,
-	B_Literal_End,
-
-	B_Operator_Begin,
-		Eq,
-		Not,
-		Hash,
-		At,
-		Dollar,
-		Pointer,
-		Question,
-		Add,
-		Sub,
-		Mul,
-		Quo,
-		Mod,
-		Mod_Mod,
-		And,
-		Or,
-		Xor,
-		And_Not,
-		Shl,
-		Shr,
-
-		Cmp_And,
-		Cmp_Or,
-
-	B_Assign_Op_Begin,
-		Add_Eq,
-		Sub_Eq,
-		Mul_Eq,
-		Quo_Eq,
-		Mod_Eq,
-		Mod_Mod_Eq,
-		And_Eq,
-		Or_Eq,
-		Xor_Eq,
-		And_Not_Eq,
-		Shl_Eq,
-		Shr_Eq,
-		Cmp_And_Eq,
-		Cmp_Or_Eq,
-	B_Assign_Op_End,
-
-		Arrow_Right,
-		Arrow_Left,
-		Double_Arrow_Right,
-		Undef,
-
-	B_Comparison_Begin,
-		Cmp_Eq,
-		Not_Eq,
-		Lt,
-		Gt,
-		Lt_Eq,
-		Gt_Eq,
-	B_Comparison_End,
-
-		Open_Paren,
-		Close_Paren,
-		Open_Bracket,
-		Close_Bracket,
-		Open_Brace,
-		Close_Brace,
-		Colon,
-		Semicolon,
-		Period,
-		Comma,
-		Ellipsis,
-		Range_Half,
-		Back_Slash,
-	B_Operator_End,
-
-	B_Keyword_Begin,
-		Import,
-		Foreign,
-		Package,
-		Typeid,
-		When,
-		Where,
-		If,
-		Else,
-		For,
-		Switch,
-		In,
-		Notin,
-		Do,
-		Case,
-		Break,
-		Continue,
-		Fallthrough,
-		Defer,
-		Return,
-		Proc,
-		Macro,
-		Struct,
-		Union,
-		Enum,
-		Bit_Field,
-		Bit_Set,
-		Map,
-		Dynamic,
-		Auto_Cast,
-		Cast,
-		Transmute,
-		Distinct,
-		Opaque,
-		Using,
-		Inline,
-		No_Inline,
-		Context,
-		Size_Of,
-		Align_Of,
-		Offset_Of,
-		Type_Of,
-		Const,
-	B_Keyword_End,
-
-	COUNT,
-
-	B_Custom_Keyword_Begin = COUNT+1,
-	// ... Custom keywords
-};
-
-tokens := [Kind.COUNT]string {
-	"Invalid",
-	"EOF",
-	"Comment",
-
-	"",
-	"identifier",
-	"integer",
-	"float",
-	"imaginary",
-	"rune",
-	"string",
-	"",
-
-	"",
-	"=",
-	"!",
-	"#",
-	"@",
-	"$",
-	"^",
-	"?",
-	"+",
-	"-",
-	"*",
-	"/",
-	"%",
-	"%%",
-	"&",
-	"|",
-	"~",
-	"&~",
-	"<<",
-	">>",
-
-	"&&",
-	"||",
-
-	"",
-	"+=",
-	"-=",
-	"*=",
-	"/=",
-	"%=",
-	"%%=",
-	"&=",
-	"|=",
-	"~=",
-	"&~=",
-	"<<=",
-	">>=",
-	"&&=",
-	"||=",
-	"",
-
-	"->",
-	"<-",
-	"=>",
-	"---",
-
-	"",
-	"==",
-	"!=",
-	"<",
-	">",
-	"<=",
-	">=",
-	"",
-
-	"(",
-	")",
-	"[",
-	"]",
-	"{",
-	"}",
-	":",
-	";",
-	".",
-	",",
-	"..",
-	"..<",
-	"\\",
-	"",
-
-	"",
-	"import",
-	"foreign",
-	"package",
-	"typeid",
-	"when",
-	"where",
-	"if",
-	"else",
-	"for",
-	"switch",
-	"in",
-	"notin",
-	"do",
-	"case",
-	"break",
-	"continue",
-	"fallthrough",
-	"defer",
-	"return",
-	"proc",
-	"macro",
-	"struct",
-	"union",
-	"enum",
-	"bit_field",
-	"bit_set",
-	"map",
-	"dynamic",
-	"auto_cast",
-	"cast",
-	"transmute",
-	"distinct",
-	"opaque",
-	"using",
-	"inline",
-	"no_inline",
-	"context",
-	"size_of",
-	"align_of",
-	"offset_of",
-	"type_of",
-	"const",
-	"",
-};
-
-custom_keyword_tokens: []string;
-
-to_string :: proc(kind: Kind) -> string {
-	if Invalid <= kind && kind < COUNT {
-		return tokens[kind];
-	}
-	if B_Custom_Keyword_Begin < kind {
-		n := int(u16(kind)-u16(B_Custom_Keyword_Begin));
-		if n < len(custom_keyword_tokens) {
-			return custom_keyword_tokens[n];
-		}
-	}
-
-	return "Invalid";
-}
-
-is_literal  :: proc(kind: Kind) -> bool { return B_Literal_Begin  < kind && kind < B_Literal_End;  }
-is_operator :: proc(kind: Kind) -> bool {
-	switch kind {
-	case B_Operator_Begin..B_Operator_End:
-		return true;
-	case In, Notin:
-		return true;
-	}
-	return false;
-}
-is_assignment_operator :: proc(kind: Kind) -> bool {
-	return B_Assign_Op_Begin < kind && kind < B_Assign_Op_End || kind == Eq;
-}
-is_keyword :: proc(kind: Kind) -> bool {
-	switch {
-	case B_Keyword_Begin < kind && kind < B_Keyword_End:
-		return true;
-	case B_Custom_Keyword_Begin < kind:
-		return true;
-	}
-	return false;
-}

+ 75 - 76
core/odin/tokenizer/tokenizer.odin

@@ -1,10 +1,9 @@
 package odin_tokenizer
 
 import "core:fmt"
-import "core:odin/token"
 import "core:unicode/utf8"
 
-Error_Handler :: #type proc(pos: token.Pos, fmt: string, args: ..any);
+Error_Handler :: #type proc(pos: Pos, fmt: string, args: ..any);
 
 Tokenizer :: struct {
 	// Immutable data
@@ -41,11 +40,11 @@ init :: proc(t: ^Tokenizer, src: []byte, path: string, err: Error_Handler = defa
 }
 
 @(private)
-offset_to_pos :: proc(t: ^Tokenizer, offset: int) -> token.Pos {
+offset_to_pos :: proc(t: ^Tokenizer, offset: int) -> Pos {
 	line := t.line_count;
 	column := offset - t.line_offset + 1;
 
-	return token.Pos {
+	return Pos {
 		file = t.path,
 		offset = offset,
 		line = line,
@@ -53,7 +52,7 @@ offset_to_pos :: proc(t: ^Tokenizer, offset: int) -> token.Pos {
 	};
 }
 
-default_error_handler :: proc(pos: token.Pos, msg: string, args: ..any) {
+default_error_handler :: proc(pos: Pos, msg: string, args: ..any) {
 	fmt.eprintf("%s(%d:%d) ", pos.file, pos.line, pos.column);
 	fmt.eprintf(msg, ..args);
 	fmt.eprintf("\n");
@@ -322,15 +321,15 @@ scan_rune :: proc(t: ^Tokenizer) -> string {
 	return string(t.src[offset : t.offset]);
 }
 
-scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (token.Kind, string) {
+scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (Token_Kind, string) {
 	scan_mantissa :: proc(t: ^Tokenizer, base: int) {
 		for digit_val(t.ch) < base || t.ch == '_' {
 			advance_rune(t);
 		}
 	}
-	scan_exponent :: proc(t: ^Tokenizer, kind: ^token.Kind) {
+	scan_exponent :: proc(t: ^Tokenizer, kind: ^Token_Kind) {
 		if t.ch == 'e' || t.ch == 'E' {
-			kind^ = token.Float;
+			kind^ = .Float;
 			advance_rune(t);
 			if t.ch == '-' || t.ch == '+' {
 				advance_rune(t);
@@ -345,16 +344,16 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (token.Kind, str
 		// NOTE(bill): This needs to be here for sanity's sake
 		switch t.ch {
 		case 'i', 'j', 'k':
-			kind^ = token.Imag;
+			kind^ = .Imag;
 			advance_rune(t);
 		}
 	}
-	scan_fraction :: proc(t: ^Tokenizer, kind: ^token.Kind) -> (early_exit: bool) {
+	scan_fraction :: proc(t: ^Tokenizer, kind: ^Token_Kind) -> (early_exit: bool) {
 		if t.ch == '.' && peek_byte(t) == '.' {
 			return true;
 		}
 		if t.ch == '.' {
-			kind^ = token.Float;
+			kind^ = .Float;
 			advance_rune(t);
 			scan_mantissa(t, 10);
 		}
@@ -363,22 +362,22 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (token.Kind, str
 
 
 	offset := t.offset;
-	kind := token.Integer;
+	kind := Token_Kind.Integer;
 	seen_point := seen_decimal_point;
 
 	if seen_point {
 		offset -= 1;
-		kind = token.Float;
+		kind = .Float;
 		scan_mantissa(t, 10);
 		scan_exponent(t, &kind);
 	} else {
 		if t.ch == '0' {
-			int_base :: inline proc(t: ^Tokenizer, kind: ^token.Kind, base: int, msg: string) {
+			int_base :: inline proc(t: ^Tokenizer, kind: ^Token_Kind, base: int, msg: string) {
 				prev := t.offset;
 				advance_rune(t);
 				scan_mantissa(t, base);
 				if t.offset - prev <= 1 {
-					kind^ = token.Invalid;
+					kind^ = .Invalid;
 					error(t, t.offset, msg);
 				}
 			}
@@ -395,7 +394,7 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (token.Kind, str
 				advance_rune(t);
 				scan_mantissa(t, 16);
 				if t.offset - prev <= 1 {
-					kind = token.Invalid;
+					kind = .Invalid;
 					error(t, t.offset, "illegal hexadecimal floating-point number");
 				} else {
 					sub := t.src[prev+1 : t.offset];
@@ -440,15 +439,15 @@ scan_number :: proc(t: ^Tokenizer, seen_decimal_point: bool) -> (token.Kind, str
 }
 
 
-scan :: proc(t: ^Tokenizer) -> token.Token {
-	switch2 :: proc(t: ^Tokenizer, tok0, tok1: token.Kind) -> token.Kind {
+scan :: proc(t: ^Tokenizer) -> Token {
+	switch2 :: proc(t: ^Tokenizer, tok0, tok1: Token_Kind) -> Token_Kind {
 		if t.ch == '=' {
 			advance_rune(t);
 			return tok1;
 		}
 		return tok0;
 	}
-	switch3 :: proc(t: ^Tokenizer, tok0, tok1: token.Kind, ch2: rune, tok2: token.Kind) -> token.Kind {
+	switch3 :: proc(t: ^Tokenizer, tok0, tok1: Token_Kind, ch2: rune, tok2: Token_Kind) -> Token_Kind {
 		if t.ch == '=' {
 			advance_rune(t);
 			return tok1;
@@ -459,7 +458,7 @@ scan :: proc(t: ^Tokenizer) -> token.Token {
 		}
 		return tok0;
 	}
-	switch4 :: proc(t: ^Tokenizer, tok0, tok1: token.Kind, ch2: rune, tok2, tok3: token.Kind) -> token.Kind {
+	switch4 :: proc(t: ^Tokenizer, tok0, tok1: Token_Kind, ch2: rune, tok2, tok3: Token_Kind) -> Token_Kind {
 		if t.ch == '=' {
 			advance_rune(t);
 			return tok1;
@@ -480,25 +479,25 @@ scan :: proc(t: ^Tokenizer) -> token.Token {
 
 	offset := t.offset;
 
-	kind: token.Kind;
+	kind: Token_Kind;
 	lit:  string;
 	pos := offset_to_pos(t, offset);
 
 	switch ch := t.ch; true {
 	case is_letter(ch):
 		lit = scan_identifier(t);
-		kind = token.Ident;
+		kind = .Ident;
 		check_keyword: if len(lit) > 1 {
 			// TODO(bill): Maybe have a hash table lookup rather than this linear search
-			for i in token.B_Keyword_Begin .. token.B_Keyword_End {
-				if lit == token.tokens[i] {
-					kind = token.Kind(i);
+			for i in Token_Kind.B_Keyword_Begin .. Token_Kind.B_Keyword_End {
+				if lit == tokens[i] {
+					kind = Token_Kind(i);
 					break check_keyword;
 				}
 			}
-			for keyword, i in token.custom_keyword_tokens {
+			for keyword, i in custom_keyword_tokens {
 				if lit == keyword {
-					kind = token.Kind(i+1)+token.B_Custom_Keyword_Begin;
+					kind = Token_Kind(i+1) + .B_Custom_Keyword_Begin;
 					break check_keyword;
 				}
 			}
@@ -509,115 +508,115 @@ scan :: proc(t: ^Tokenizer) -> token.Token {
 		advance_rune(t);
 		switch ch {
 		case -1:
-			kind = token.EOF;
+			kind = .EOF;
 		case '"':
-			kind = token.String;
+			kind = .String;
 			lit = scan_string(t);
 		case '\'':
-			kind = token.Rune;
+			kind = .Rune;
 			lit = scan_rune(t);
 		case '`':
-			kind = token.String;
+			kind = .String;
 			lit = scan_raw_string(t);
 		case '=':
 			if t.ch == '>' {
 				advance_rune(t);
-				kind = token.Double_Arrow_Right;
+				kind = .Double_Arrow_Right;
 			} else {
-				kind = switch2(t, token.Eq, token.Cmp_Eq);
+				kind = switch2(t, .Eq, .Cmp_Eq);
 			}
-		case '!': kind = switch2(t, token.Not, token.Not_Eq);
+		case '!': kind = switch2(t, .Not, .Not_Eq);
 		case '#':
-			kind = token.Hash;
+			kind = .Hash;
 			if t.ch == '!' {
-				kind = token.Comment;
+				kind = .Comment;
 				lit = scan_comment(t);
 			}
-		case '?': kind = token.Question;
-		case '@': kind = token.At;
-		case '$': kind = token.Dollar;
-		case '^': kind = token.Pointer;
-		case '+': kind = switch2(t, token.Add, token.Add_Eq);
+		case '?': kind = .Question;
+		case '@': kind = .At;
+		case '$': kind = .Dollar;
+		case '^': kind = .Pointer;
+		case '+': kind = switch2(t, .Add, .Add_Eq);
 		case '-':
 			if t.ch == '>' {
 				advance_rune(t);
-				kind = token.Arrow_Right;
+				kind = .Arrow_Right;
 			} else if t.ch == '-' && peek_byte(t) == '-' {
 				advance_rune(t);
 				advance_rune(t);
-				kind = token.Undef;
+				kind = .Undef;
 			} else {
-				kind = switch2(t, token.Sub, token.Sub_Eq);
+				kind = switch2(t, .Sub, .Sub_Eq);
 			}
-		case '*': kind = switch2(t, token.Mul, token.Mul_Eq);
+		case '*': kind = switch2(t, .Mul, .Mul_Eq);
 		case '/':
 			if t.ch == '/' || t.ch == '*' {
-				kind = token.Comment;
+				kind = .Comment;
 				lit = scan_comment(t);
 			} else {
-				kind = switch2(t, token.Quo, token.Quo_Eq);
+				kind = switch2(t, .Quo, .Quo_Eq);
 			}
-		case '%': kind = switch4(t, token.Mod, token.Mod_Eq, '%', token.Mod_Mod, token.Mod_Mod_Eq);
+		case '%': kind = switch4(t, .Mod, .Mod_Eq, '%', .Mod_Mod, .Mod_Mod_Eq);
 		case '&':
 			if t.ch == '~' {
 				advance_rune(t);
-				kind = switch2(t, token.And_Not, token.And_Not_Eq);
+				kind = switch2(t, .And_Not, .And_Not_Eq);
 			} else {
-				kind = switch3(t, token.And, token.And_Eq, '&', token.Cmp_And);
+				kind = switch3(t, .And, .And_Eq, '&', .Cmp_And);
 			}
-		case '|': kind = switch3(t, token.Or, token.Or_Eq, '|', token.Cmp_Or);
-		case '~': kind = token.Xor;
+		case '|': kind = switch3(t, .Or, .Or_Eq, '|', .Cmp_Or);
+		case '~': kind = .Xor;
 		case '<':
 			if t.ch == '-' {
 				advance_rune(t);
-				kind = token.Arrow_Left;
+				kind = .Arrow_Left;
 			} else {
-				kind = switch4(t, token.Lt, token.Lt_Eq, '<', token.Shl, token.Shl_Eq);
+				kind = switch4(t, .Lt, .Lt_Eq, '<', .Shl, .Shl_Eq);
 			}
-		case '>': kind = switch4(t, token.Gt, token.Gt_Eq, '>', token.Shr,token.Shr_Eq);
+		case '>': kind = switch4(t, .Gt, .Gt_Eq, '>', .Shr,.Shr_Eq);
 
-		case '≠': kind = token.Not_Eq;
-		case '≤': kind = token.Lt_Eq;
-		case '≥': kind = token.Gt_Eq;
-		case '∈': kind = token.In;
-		case '∉': kind = token.Notin;
+		case '≠': kind = .Not_Eq;
+		case '≤': kind = .Lt_Eq;
+		case '≥': kind = .Gt_Eq;
+		case '∈': kind = .In;
+		case '∉': kind = .Notin;
 
 		case '.':
 			if '0' <= t.ch && t.ch <= '9' {
 				kind, lit = scan_number(t, true);
 			} else {
-				kind = token.Period;
+				kind = .Period;
 				if t.ch == '.' {
 					advance_rune(t);
-					kind = token.Ellipsis;
+					kind = .Ellipsis;
 					if t.ch == '<' {
 						advance_rune(t);
-						kind = token.Range_Half;
+						kind = .Range_Half;
 					}
 				}
 			}
-		case ':': kind = token.Colon;
-		case ',': kind = token.Comma;
-		case ';': kind = token.Semicolon;
-		case '(': kind = token.Open_Paren;
-		case ')': kind = token.Close_Paren;
-		case '[': kind = token.Open_Bracket;
-		case ']': kind = token.Close_Bracket;
-		case '{': kind = token.Open_Brace;
-		case '}': kind = token.Close_Brace;
-
-		case '\\': kind = token.Back_Slash;
+		case ':': kind = .Colon;
+		case ',': kind = .Comma;
+		case ';': kind = .Semicolon;
+		case '(': kind = .Open_Paren;
+		case ')': kind = .Close_Paren;
+		case '[': kind = .Open_Bracket;
+		case ']': kind = .Close_Bracket;
+		case '{': kind = .Open_Brace;
+		case '}': kind = .Close_Brace;
+
+		case '\\': kind = .Back_Slash;
 
 		case:
 			if ch != utf8.RUNE_BOM {
 				error(t, t.offset, "illegal character '%r': %d", ch, ch);
 			}
-			kind = token.Invalid;
+			kind = .Invalid;
 		}
 	}
 
 	if lit == "" {
 		lit = string(t.src[offset : t.offset]);
 	}
-	return token.Token{kind, lit, pos};
+	return Token{kind, lit, pos};
 }

+ 1 - 1
core/runtime/core.odin

@@ -814,7 +814,7 @@ __get_map_header :: proc "contextless" (m: ^$T/map[$K]$V) -> Map_Header {
 		key:   Map_Key,
 		next:  int,
 		value: V,
-	}
+	};
 
 	_, is_string := type_info_base(type_info_of(K)).variant.(Type_Info_String);
 	header.is_key_string = is_string;

+ 1 - 1
core/strconv/generic_float.odin

@@ -105,7 +105,7 @@ format_digits :: proc(buf: []byte, shortest: bool, neg: bool, digs: Decimal_Slic
 	Buffer :: struct {
 		b: []byte,
 		n: int,
-	}
+	};
 
 	to_bytes :: proc(b: Buffer) -> []byte do return b.b[:b.n];
 	add_bytes :: proc(buf: ^Buffer, bytes: ..byte) {

+ 2 - 1
src/parser.cpp

@@ -1344,7 +1344,8 @@ bool is_semicolon_optional_for_node(AstFile *f, Ast *s) {
 	case Ast_UnionType:
 	case Ast_EnumType:
 	case Ast_BitFieldType:
-		return true;
+		// Require semicolon within a procedure body
+		return f->curr_proc == false;
 	case Ast_ProcLit:
 		return true;
 

Some files were not shown because too many files changed in this diff