Browse Source

Merge pull request #3895 from laytan/fix-optimization-mode-attribute

remove misleading @(optimization_mode) values and make "none" inhibit optimizations
gingerBill 1 year ago
parent
commit
34c6868e78

+ 1 - 1
base/runtime/print.odin

@@ -262,7 +262,7 @@ print_typeid :: #force_no_inline proc "contextless" (id: typeid) {
 	}
 }
 
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 print_type :: #force_no_inline proc "contextless" (ti: ^Type_Info) {
 	if ti == nil {
 		print_string("nil")

+ 25 - 25
core/compress/common.odin

@@ -186,7 +186,7 @@ input_size_from_stream :: proc(z: ^Context_Stream_Input) -> (res: i64, err: Erro
 
 input_size :: proc{input_size_from_memory, input_size_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_slice_from_memory :: #force_inline proc(z: ^Context_Memory_Input, size: int) -> (res: []u8, err: io.Error) {
 	#no_bounds_check {
 		if len(z.input_data) >= size {
@@ -203,7 +203,7 @@ read_slice_from_memory :: #force_inline proc(z: ^Context_Memory_Input, size: int
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_slice_from_stream :: #force_inline proc(z: ^Context_Stream_Input, size: int) -> (res: []u8, err: io.Error) {
 	// TODO: REMOVE ALL USE OF context.temp_allocator here
 	// there is literally no need for it
@@ -214,13 +214,13 @@ read_slice_from_stream :: #force_inline proc(z: ^Context_Stream_Input, size: int
 
 read_slice :: proc{read_slice_from_memory, read_slice_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_data :: #force_inline proc(z: ^$C, $T: typeid) -> (res: T, err: io.Error) {
 	b := read_slice(z, size_of(T)) or_return
 	return (^T)(&b[0])^, nil
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_u8_from_memory :: #force_inline proc(z: ^Context_Memory_Input) -> (res: u8, err: io.Error) {
 	#no_bounds_check {
 		if len(z.input_data) >= 1 {
@@ -232,7 +232,7 @@ read_u8_from_memory :: #force_inline proc(z: ^Context_Memory_Input) -> (res: u8,
 	return 0, .EOF
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_u8_from_stream :: #force_inline proc(z: ^Context_Stream_Input) -> (res: u8, err: io.Error) {
 	b := read_slice_from_stream(z, 1) or_return
 	return b[0], nil
@@ -242,7 +242,7 @@ read_u8 :: proc{read_u8_from_memory, read_u8_from_stream}
 
 // You would typically only use this at the end of Inflate, to drain bits from the code buffer
 // preferentially.
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_u8_prefer_code_buffer_lsb :: #force_inline proc(z: ^$C) -> (res: u8, err: io.Error) {
 	if z.num_bits >= 8 {
 		res = u8(read_bits_no_refill_lsb(z, 8))
@@ -257,7 +257,7 @@ read_u8_prefer_code_buffer_lsb :: #force_inline proc(z: ^$C) -> (res: u8, err: i
 	return
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_data_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid) -> (res: T, err: io.Error) {
 	size :: size_of(T)
 
@@ -275,7 +275,7 @@ peek_data_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_data_at_offset_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid, #any_int offset: int) -> (res: T, err: io.Error) {
 	size :: size_of(T)
 
@@ -293,7 +293,7 @@ peek_data_at_offset_from_memory :: #force_inline proc(z: ^Context_Memory_Input,
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_data_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid) -> (res: T, err: io.Error) {
 	size :: size_of(T)
 
@@ -317,7 +317,7 @@ peek_data_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid
 	return res, .None
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_data_at_offset_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid, #any_int offset: int) -> (res: T, err: io.Error) {
 	size :: size_of(T)
 
@@ -352,14 +352,14 @@ peek_data :: proc{peek_data_from_memory, peek_data_from_stream, peek_data_at_off
 
 
 // Sliding window read back
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_back_byte :: #force_inline proc(z: ^$C, offset: i64) -> (res: u8, err: io.Error) {
 	// Look back into the sliding window.
 	return z.output.buf[z.bytes_written - offset], .None
 }
 
 // Generalized bit reader LSB
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width := i8(48)) {
 	refill := u64(width)
 	b      := u64(0)
@@ -385,7 +385,7 @@ refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width :=
 }
 
 // Generalized bit reader LSB
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 refill_lsb_from_stream :: proc(z: ^Context_Stream_Input, width := i8(24)) {
 	refill := u64(width)
 
@@ -414,13 +414,13 @@ refill_lsb_from_stream :: proc(z: ^Context_Stream_Input, width := i8(24)) {
 refill_lsb :: proc{refill_lsb_from_memory, refill_lsb_from_stream}
 
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 consume_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) {
 	z.code_buffer >>= width
 	z.num_bits -= u64(width)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 consume_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) {
 	z.code_buffer >>= width
 	z.num_bits -= u64(width)
@@ -428,7 +428,7 @@ consume_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, wid
 
 consume_bits_lsb :: proc{consume_bits_lsb_from_memory, consume_bits_lsb_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
 	if z.num_bits < u64(width) {
 		refill_lsb(z)
@@ -436,7 +436,7 @@ peek_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width:
 	return u32(z.code_buffer &~ (~u64(0) << width))
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
 	if z.num_bits < u64(width) {
 		refill_lsb(z)
@@ -446,13 +446,13 @@ peek_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width:
 
 peek_bits_lsb :: proc{peek_bits_lsb_from_memory, peek_bits_lsb_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
 	assert(z.num_bits >= u64(width))
 	return u32(z.code_buffer &~ (~u64(0) << width))
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 peek_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
 	assert(z.num_bits >= u64(width))
 	return u32(z.code_buffer &~ (~u64(0) << width))
@@ -460,14 +460,14 @@ peek_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Inp
 
 peek_bits_no_refill_lsb :: proc{peek_bits_no_refill_lsb_from_memory, peek_bits_no_refill_lsb_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
 	k := #force_inline peek_bits_lsb(z, width)
 	#force_inline consume_bits_lsb(z, width)
 	return k
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
 	k := peek_bits_lsb(z, width)
 	consume_bits_lsb(z, width)
@@ -476,14 +476,14 @@ read_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width:
 
 read_bits_lsb :: proc{read_bits_lsb_from_memory, read_bits_lsb_from_stream}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
 	k := #force_inline peek_bits_no_refill_lsb(z, width)
 	#force_inline consume_bits_lsb(z, width)
 	return k
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
 	k := peek_bits_no_refill_lsb(z, width)
 	consume_bits_lsb(z, width)
@@ -493,14 +493,14 @@ read_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Inp
 read_bits_no_refill_lsb :: proc{read_bits_no_refill_lsb_from_memory, read_bits_no_refill_lsb_from_stream}
 
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 discard_to_next_byte_lsb_from_memory :: proc(z: ^Context_Memory_Input) {
 	discard := u8(z.num_bits & 7)
 	#force_inline consume_bits_lsb(z, discard)
 }
 
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 discard_to_next_byte_lsb_from_stream :: proc(z: ^Context_Stream_Input) {
 	discard := u8(z.num_bits & 7)
 	consume_bits_lsb(z, discard)

+ 11 - 11
core/compress/zlib/zlib.odin

@@ -120,7 +120,7 @@ Huffman_Table :: struct {
 }
 
 // Implementation starts here
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
 	assert(bits <= 16)
 	// NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
@@ -136,7 +136,7 @@ z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
 }
 
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
 	/*
 		That we get here at all means that we didn't pass an expected output size,
@@ -154,7 +154,7 @@ grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
 	TODO: Make these return compress.Error.
 */
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
 	/*
 		Resize if needed.
@@ -173,7 +173,7 @@ write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_ch
 	return .None
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
 	/*
 		TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
@@ -201,7 +201,7 @@ repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check
 	return .None
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
 	/*
 		TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
@@ -234,7 +234,7 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
 	return new(Huffman_Table, allocator), nil
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	sizes:     [HUFFMAN_MAX_BITS+1]int
 	next_code: [HUFFMAN_MAX_BITS+1]int
@@ -293,7 +293,7 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	return nil
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 	code := u16(compress.peek_bits_lsb(z,16))
 
@@ -324,7 +324,7 @@ decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Erro
 	return r, nil
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 	if z.num_bits < 16 {
 		if z.num_bits > 63 {
@@ -344,7 +344,7 @@ decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bo
 	return decode_huffman_slowpath(z, t)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
 	#no_bounds_check for {
 		value, e := decode_huffman(z, z_repeat)
@@ -413,7 +413,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
 	/*
 		ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
@@ -486,7 +486,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 
 // TODO: Check alignment of reserve/resize.
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
 	context.allocator = allocator
 	expected_output_size := expected_output_size

+ 1 - 1
core/crypto/crypto.odin

@@ -79,4 +79,4 @@ random_generator :: proc() -> runtime.Random_Generator {
 		},
 		data = nil,
 	}
-}
+}

+ 5 - 5
core/encoding/xml/tokenizer.odin

@@ -126,7 +126,7 @@ error :: proc(t: ^Tokenizer, offset: int, msg: string, args: ..any) {
 	t.error_count += 1
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 advance_rune :: proc(t: ^Tokenizer) {
 	#no_bounds_check {
 		/*
@@ -170,7 +170,7 @@ peek_byte :: proc(t: ^Tokenizer, offset := 0) -> byte {
 	return 0
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 skip_whitespace :: proc(t: ^Tokenizer) {
 	for {
 		switch t.ch {
@@ -182,7 +182,7 @@ skip_whitespace :: proc(t: ^Tokenizer) {
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 is_letter :: proc(r: rune) -> bool {
 	if r < utf8.RUNE_SELF {
 		switch r {
@@ -296,7 +296,7 @@ skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
 	return
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close := false, multiline := true) -> (value: string, err: Error) {
 	err = .None
 
@@ -414,4 +414,4 @@ scan :: proc(t: ^Tokenizer, multiline_string := false) -> Token {
 		lit = string(t.src[offset : t.offset])
 	}
 	return Token{kind, lit, pos}
-}
+}

+ 5 - 5
core/flags/internal_assignment.odin

@@ -12,7 +12,7 @@ import "core:reflect"
 
 // Push a positional argument onto a data struct, checking for specified
 // positionals first before adding it to a fallback field.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 push_positional :: #force_no_inline proc (model: ^$T, parser: ^Parser, arg: string) -> (error: Error) {
 	if bit_array.get(&parser.filled_pos, parser.filled_pos.max_index) {
 		// The max index is set, which means we're out of space.
@@ -74,7 +74,7 @@ register_field :: proc(parser: ^Parser, field: reflect.Struct_Field, index: int)
 }
 
 // Set a `-flag` argument, Odin-style.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 set_odin_flag :: proc(model: ^$T, parser: ^Parser, name: string) -> (error: Error) {
 	// We make a special case for help requests.
 	switch name {
@@ -100,7 +100,7 @@ set_odin_flag :: proc(model: ^$T, parser: ^Parser, name: string) -> (error: Erro
 }
 
 // Set a `-flag` argument, UNIX-style.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 set_unix_flag :: proc(model: ^$T, parser: ^Parser, name: string) -> (future_args: int, error: Error) {
 	// We make a special case for help requests.
 	switch name {
@@ -137,7 +137,7 @@ set_unix_flag :: proc(model: ^$T, parser: ^Parser, name: string) -> (future_args
 }
 
 // Set a `-flag:option` argument.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 set_option :: proc(model: ^$T, parser: ^Parser, name, option: string) -> (error: Error) {
 	field, index := get_field_by_name(model, name) or_return
 
@@ -176,7 +176,7 @@ set_option :: proc(model: ^$T, parser: ^Parser, name, option: string) -> (error:
 }
 
 // Set a `-map:key=value` argument.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 set_key_value :: proc(model: ^$T, parser: ^Parser, name, key, value: string) -> (error: Error) {
 	field, index := get_field_by_name(model, name) or_return
 

+ 4 - 4
core/flags/internal_rtti.odin

@@ -13,7 +13,7 @@ import "core:strings"
 @require import "core:time/datetime"
 import "core:unicode/utf8"
 
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 parse_and_set_pointer_by_base_type :: proc(ptr: rawptr, str: string, type_info: ^runtime.Type_Info) -> bool {
 	bounded_int :: proc(value, min, max: i128) -> (result: i128, ok: bool) {
 		return value, min <= value && value <= max
@@ -202,7 +202,7 @@ parse_and_set_pointer_by_base_type :: proc(ptr: rawptr, str: string, type_info:
 // especially with files.
 //
 // We want to provide as informative as an error as we can.
-@(optimization_mode="size", disabled=NO_CORE_NAMED_TYPES)
+@(optimization_mode="favor_size", disabled=NO_CORE_NAMED_TYPES)
 parse_and_set_pointer_by_named_type :: proc(ptr: rawptr, str: string, data_type: typeid, arg_tag: string, out_error: ^Error) {
 	// Core types currently supported:
 	//
@@ -320,7 +320,7 @@ parse_and_set_pointer_by_named_type :: proc(ptr: rawptr, str: string, data_type:
 	}
 }
 
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 set_unbounded_integer_by_type :: proc(ptr: rawptr, value: $T, data_type: typeid) where intrinsics.type_is_integer(T) {
 	switch data_type {
 	case i8:      (^i8)     (ptr)^ = cast(i8)      value
@@ -367,7 +367,7 @@ set_unbounded_integer_by_type :: proc(ptr: rawptr, value: $T, data_type: typeid)
 	}
 }
 
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 parse_and_set_pointer_by_type :: proc(ptr: rawptr, str: string, type_info: ^runtime.Type_Info, arg_tag: string) -> (error: Error) {
 	#partial switch specific_type_info in type_info.variant {
 	case runtime.Type_Info_Named:

+ 2 - 2
core/flags/internal_validation.odin

@@ -11,7 +11,7 @@ package flags
 @require import "core:strings"
 
 // This proc is used to assert that `T` meets the expectations of the library.
-@(optimization_mode="size", disabled=ODIN_DISABLE_ASSERT)
+@(optimization_mode="favor_size", disabled=ODIN_DISABLE_ASSERT)
 validate_structure :: proc(model_type: $T, style: Parsing_Style, loc := #caller_location) {
 	positionals_assigned_so_far: bit_array.Bit_Array
 	defer bit_array.destroy(&positionals_assigned_so_far)
@@ -162,7 +162,7 @@ validate_structure :: proc(model_type: $T, style: Parsing_Style, loc := #caller_
 
 // Validate that all the required arguments are set and that the set arguments
 // are up to the program's expectations.
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 validate_arguments :: proc(model: ^$T, parser: ^Parser) -> Error {
 	check_fields: for field, index in reflect.struct_fields_zipped(T) {
 		was_set := bit_array.get(&parser.fields_set, index)

+ 1 - 1
core/flags/parsing.odin

@@ -32,7 +32,7 @@ Inputs:
 Returns:
 - error: A union of errors; parsing, file open, a help request, or validation.
 */
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 parse :: proc(
 	model: ^$T,
 	args: []string,

+ 1 - 1
core/flags/usage.odin

@@ -17,7 +17,7 @@ Inputs:
 - program: The name of the program, usually the first argument to `os.args`.
 - style: The argument parsing style, required to show flags in the proper style.
 */
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 write_usage :: proc(out: io.Writer, data_type: typeid, program: string = "", style: Parsing_Style = .Odin) {
 	// All flags get their tags parsed so they can be reasoned about later.
 	Flag :: struct {

+ 2 - 2
core/flags/util.odin

@@ -19,7 +19,7 @@ Inputs:
 - allocator: (default: context.allocator)
 - loc: The caller location for debugging purposes (default: #caller_location)
 */
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 parse_or_exit :: proc(
 	model: ^$T,
 	program_args: []string,
@@ -63,7 +63,7 @@ Inputs:
 - error: The error returned from `parse`.
 - style: The argument parsing style, required to show flags in the proper style, when usage is shown.
 */
-@(optimization_mode="size")
+@(optimization_mode="favor_size")
 print_errors :: proc(data_type: typeid, error: Error, program: string, style: Parsing_Style = .Odin) {
 	stderr := os.stream_from_handle(os.stderr)
 	stdout := os.stream_from_handle(os.stdout)

+ 4 - 4
core/hash/crc.odin

@@ -1,6 +1,6 @@
 package hash
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 crc64_ecma_182 :: proc "contextless" (data: []byte, seed := u64(0)) -> (result: u64) #no_bounds_check {
 	result = seed
 	#no_bounds_check for b in data {
@@ -14,7 +14,7 @@ crc64_ecma_182 :: proc "contextless" (data: []byte, seed := u64(0)) -> (result:
 	bit-reversed, with one's complement pre and post processing.
 	Based on Mark Adler's v1.4 implementation in C under the ZLIB license.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 crc64_xz :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_check {
 	data := data
 	result := ~u64le(seed)
@@ -52,7 +52,7 @@ crc64_xz :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_
 /*
 	Generator polynomial: x^64 + x^4 + x^3 + x + 1
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 crc64_iso_3306 :: proc "contextless" (data: []byte, seed := u64(0)) -> u64 #no_bounds_check {
 
 	result := seed
@@ -738,4 +738,4 @@ crc64_iso_3306_inverse :: proc "contextless" (data: []byte, seed := u64(0)) -> u
 	0x9fc0, 0x9e70, 0x9ca0, 0x9d10, 
 	0x9480, 0x9530, 0x97e0, 0x9650, 
 	0x9240, 0x93f0, 0x9120, 0x9090,
-}
+}

+ 1 - 1
core/hash/crc32.odin

@@ -2,7 +2,7 @@ package hash
 
 import "base:intrinsics"
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 crc32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 #no_bounds_check {
 	crc := ~seed
 	buffer := raw_data(data)

+ 11 - 11
core/hash/hash.odin

@@ -3,7 +3,7 @@ package hash
 import "core:mem"
 import "base:intrinsics"
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 adler32 :: proc "contextless" (data: []byte, seed := u32(1)) -> u32 #no_bounds_check {
 
 	ADLER_CONST :: 65521
@@ -46,7 +46,7 @@ adler32 :: proc "contextless" (data: []byte, seed := u32(1)) -> u32 #no_bounds_c
 	return (u32(b) << 16) | u32(a)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 djb2 :: proc "contextless" (data: []byte, seed := u32(5381)) -> u32 {
 	hash: u32 = seed
 	for b in data {
@@ -73,7 +73,7 @@ djbx33a :: proc "contextless" (data: []byte, seed := u32(5381)) -> (result: [16]
 }
 
 // If you have a choice, prefer fnv32a
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fnv32_no_a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
 	h: u32 = seed
 	for b in data {
@@ -86,7 +86,7 @@ fnv32 :: fnv32_no_a // NOTE(bill): Not a fan of these aliases but seems necessar
 fnv64 :: fnv64_no_a // NOTE(bill): Not a fan of these aliases but seems necessary
 
 // If you have a choice, prefer fnv64a
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fnv64_no_a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) -> u64 {
 	h: u64 = seed
 	for b in data {
@@ -94,7 +94,7 @@ fnv64_no_a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325))
 	}
 	return h
 }
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fnv32a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
 	h: u32 = seed
 	for b in data {
@@ -103,7 +103,7 @@ fnv32a :: proc "contextless" (data: []byte, seed := u32(0x811c9dc5)) -> u32 {
 	return h
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fnv64a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) -> u64 {
 	h: u64 = seed
 	for b in data {
@@ -112,7 +112,7 @@ fnv64a :: proc "contextless" (data: []byte, seed := u64(0xcbf29ce484222325)) ->
 	return h
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 jenkins :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
 	hash: u32 = seed
 	for b in data {
@@ -126,7 +126,7 @@ jenkins :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
 	return hash
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 murmur32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
 	c1_32: u32 : 0xcc9e2d51
 	c2_32: u32 : 0x1b873593
@@ -177,7 +177,7 @@ murmur32 :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
 }
 
 // See https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp#L96
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 murmur64a :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
 	m :: 0xc6a4a7935bd1e995
 	r :: 47
@@ -218,7 +218,7 @@ murmur64a :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
 }
 
 // See https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp#L140
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 murmur64b :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
 	m :: 0x5bd1e995
 	r :: 24
@@ -286,7 +286,7 @@ murmur64b :: proc "contextless" (data: []byte, seed := u64(0x9747b28c)) -> u64 {
 	return u64(h1)<<32 | u64(h2)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 sdbm :: proc "contextless" (data: []byte, seed := u32(0)) -> u32 {
 	hash: u32 = seed
 	for b in data {

+ 5 - 5
core/hash/xxhash/common.odin

@@ -67,17 +67,17 @@ when !XXH_DISABLE_PREFETCH {
 }
 
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH_rotl32 :: #force_inline proc(x, r: u32) -> (res: u32) {
 	return ((x << r) | (x >> (32 - r)))
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH_rotl64 :: #force_inline proc(x, r: u64) -> (res: u64) {
 	return ((x << r) | (x >> (64 - r)))
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u32) {
 	if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
 		#no_bounds_check b := (^u32le)(&buf[0])^
@@ -89,7 +89,7 @@ XXH32_read32 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned)
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned) -> (res: u64) {
 	if XXH_FORCE_MEMORY_ACCESS == 2 || alignment == .Aligned {
 		#no_bounds_check b := (^u64le)(&buf[0])^
@@ -99,4 +99,4 @@ XXH64_read64 :: #force_inline proc(buf: []u8, alignment := Alignment.Unaligned)
 		mem_copy(&b, raw_data(buf[:]), 8)
 		return u64(b)
 	}
-}
+}

+ 45 - 45
core/hash/xxhash/xxhash_3.odin

@@ -111,13 +111,13 @@ XXH128_canonical :: struct {
 	@param lhs, rhs The 64-bit integers to multiply
 	@return The low 64 bits of the product XOR'd by the high 64 bits.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH_mul_64_to_128_fold_64 :: #force_inline proc(lhs, rhs: xxh_u64) -> (res: xxh_u64) {
 	t := u128(lhs) * u128(rhs)
 	return u64(t & 0xFFFFFFFFFFFFFFFF) ~ u64(t >> 64)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH_xorshift_64 :: #force_inline proc(v: xxh_u64, #any_int shift: uint) -> (res: xxh_u64) {
 	return v ~ (v >> shift)
 }
@@ -125,7 +125,7 @@ XXH_xorshift_64 :: #force_inline proc(v: xxh_u64, #any_int shift: uint) -> (res:
 /*
 	This is a fast avalanche stage, suitable when input bits are already partially mixed
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_avalanche :: #force_inline proc(h64: xxh_u64) -> (res: xxh_u64) {
 	res = XXH_xorshift_64(h64, 37)
 	res *= 0x165667919E3779F9
@@ -137,7 +137,7 @@ XXH3_avalanche :: #force_inline proc(h64: xxh_u64) -> (res: xxh_u64) {
 	This is a stronger avalanche, inspired by Pelle Evensen's rrmxmx
 	preferable when input has not been previously mixed
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_rrmxmx :: #force_inline proc(h64, length: xxh_u64) -> (res: xxh_u64) {
 	/* this mix is inspired by Pelle Evensen's rrmxmx */
 	res = h64
@@ -166,7 +166,7 @@ XXH3_rrmxmx :: #force_inline proc(h64, length: xxh_u64) -> (res: xxh_u64) {
 	fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
 */
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_1to3_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	/* A doubled version of 1to3_64b with different constants. */
 	length := len(input)
@@ -190,7 +190,7 @@ XXH3_len_1to3_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_4to8_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	length := len(input)
 	seed   := seed
@@ -219,7 +219,7 @@ XXH3_len_4to8_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_9to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	length := len(input)
 
@@ -261,7 +261,7 @@ XXH3_len_9to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u
 /*
 	Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_0to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	length := len(input)
 
@@ -279,7 +279,7 @@ XXH3_len_0to16_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u
 /*
 	A bit slower than XXH3_mix16B, but handles multiply by zero better.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH128_mix32B :: #force_inline proc(acc: xxh_u128, input_1: []u8, input_2: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	acc128 := XXH128_hash_t{
 		h = acc,
@@ -293,7 +293,7 @@ XXH128_mix32B :: #force_inline proc(acc: xxh_u128, input_1: []u8, input_2: []u8,
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_17to128_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	length := len(input)
 
@@ -323,7 +323,7 @@ XXH3_len_17to128_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh
 	unreachable()
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_129to240_128b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u128) {
 	length := len(input)
 
@@ -379,7 +379,7 @@ XXH3_INIT_ACC :: [XXH_ACC_NB]xxh_u64{
 
 XXH_SECRET_MERGEACCS_START :: 11
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_128b_internal :: #force_inline proc(
 			input: []u8,
 			secret: []u8,
@@ -407,7 +407,7 @@ XXH3_hashLong_128b_internal :: #force_inline proc(
 /*
  * It's important for performance that XXH3_hashLong is not inlined.
  */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_128b_default :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
 	return XXH3_hashLong_128b_internal(input, XXH3_kSecret[:], XXH3_accumulate_512, XXH3_scramble_accumulator)
 }
@@ -415,12 +415,12 @@ XXH3_hashLong_128b_default :: #force_no_inline proc(input: []u8, seed: xxh_u64,
 /*
  * It's important for performance that XXH3_hashLong is not inlined.
  */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_128b_withSecret :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
 	return XXH3_hashLong_128b_internal(input, secret, XXH3_accumulate_512, XXH3_scramble_accumulator)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_128b_withSeed_internal :: #force_inline proc(
 								input: []u8, seed: xxh_u64, secret: []u8,
 								f_acc512: XXH3_accumulate_512_f,
@@ -441,14 +441,14 @@ XXH3_hashLong_128b_withSeed_internal :: #force_inline proc(
 /*
  * It's important for performance that XXH3_hashLong is not inlined.
  */
- @(optimization_mode="speed")
+ @(optimization_mode="favor_size")
 XXH3_hashLong_128b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (res: XXH3_128_hash) {
 	return XXH3_hashLong_128b_withSeed_internal(input, seed, secret, XXH3_accumulate_512, XXH3_scramble_accumulator , XXH3_init_custom_secret)
 }
 
 XXH3_hashLong128_f :: #type proc(input: []u8, seed: xxh_u64, secret: []u8)  -> (res: XXH3_128_hash)
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_128bits_internal :: #force_inline proc(
 	input: []u8, seed: xxh_u64, secret: []u8, f_hl128: XXH3_hashLong128_f) -> (res: XXH3_128_hash) {
 
@@ -474,17 +474,17 @@ XXH3_128bits_internal :: #force_inline proc(
 }
 
 /* ===   Public XXH128 API   === */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_128_default :: proc(input: []u8) -> (hash: XXH3_128_hash) {
 	return XXH3_128bits_internal(input, 0, XXH3_kSecret[:], XXH3_hashLong_128b_withSeed)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_128_with_seed :: proc(input: []u8, seed: xxh_u64) -> (hash: XXH3_128_hash) {
 	return XXH3_128bits_internal(input, seed, XXH3_kSecret[:], XXH3_hashLong_128b_withSeed)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_128_with_secret :: proc(input: []u8, secret: []u8) -> (hash: XXH3_128_hash) {
 	return XXH3_128bits_internal(input, 0, secret, XXH3_hashLong_128b_withSecret)
 }
@@ -519,7 +519,7 @@ XXH3_128 :: proc { XXH3_128_default, XXH3_128_with_seed, XXH3_128_with_secret }
 	The XOR mixing hides individual parts of the secret and increases entropy.
 	This adds an extra layer of strength for custom secrets.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_1to3_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	length := u32(len(input))
 	assert(input != nil)
@@ -542,7 +542,7 @@ XXH3_len_1to3_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_4to8_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	length := u32(len(input))
 	assert(input != nil)
@@ -562,7 +562,7 @@ XXH3_len_4to8_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_9to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	length := u64(len(input))
 	assert(input != nil)
@@ -579,7 +579,7 @@ XXH3_len_9to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_0to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	length := u64(len(input))
 	assert(input != nil)
@@ -621,7 +621,7 @@ XXH3_len_0to16_64b :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u6
 	by this, although it is always a good idea to use a proper seed if you care
 	about strength.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_mix16B :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	input_lo := XXH64_read64(input[0:])
 	input_hi := XXH64_read64(input[8:])
@@ -632,7 +632,7 @@ XXH3_mix16B :: #force_inline proc(input: []u8, secret: []u8, seed: xxh_u64) -> (
 }
 
 /* For mid range keys, XXH3 uses a Mum-hash variant. */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_17to128_64b :: proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
 	length := len(input)
@@ -665,7 +665,7 @@ XXH3_MIDSIZE_MAX         :: 240
 XXH3_MIDSIZE_STARTOFFSET :: 3
 XXH3_MIDSIZE_LASTOFFSET  :: 17
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_len_129to240_64b :: proc(input: []u8, secret: []u8, seed: xxh_u64) -> (res: xxh_u64) {
 	assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
 	length := len(input)
@@ -699,7 +699,7 @@ XXH_SECRET_CONSUME_RATE  :: 8 /* nb of secret bytes consumed at each accumulatio
 XXH_ACC_NB               :: (XXH_STRIPE_LEN / size_of(xxh_u64))
 XXH_SECRET_LASTACC_START :: 7 /* not aligned on 8, last secret is different from acc & scrambler */
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH_writeLE64 :: #force_inline proc(dst: []u8, v64: u64le) {
 	v := v64
 	mem_copy(raw_data(dst), &v, size_of(v64))
@@ -737,7 +737,7 @@ XXH3_scramble_accumulator : XXH3_scramble_accumulator_f = XXH3_scramble_accumula
 XXH3_init_custom_secret   : XXH3_init_custom_secret_f   = XXH3_init_custom_secret_scalar
 
 /* scalar variants - universal */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_accumulate_512_scalar :: #force_inline proc(acc: []xxh_u64, input: []u8, secret: []u8) {
 	xacc    := acc     /* presumed aligned */
 	xinput  := input   /* no alignment restriction */
@@ -754,7 +754,7 @@ XXH3_accumulate_512_scalar :: #force_inline proc(acc: []xxh_u64, input: []u8, se
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_scramble_accumulator_scalar :: #force_inline proc(acc: []xxh_u64, secret: []u8) {
 	xacc    := acc     /* presumed aligned */
 	xsecret := secret  /* no alignment restriction */
@@ -771,7 +771,7 @@ XXH3_scramble_accumulator_scalar :: #force_inline proc(acc: []xxh_u64, secret: [
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_init_custom_secret_scalar :: #force_inline proc(custom_secret: []u8, seed64: xxh_u64) {
 	#assert((XXH_SECRET_DEFAULT_SIZE & 15) == 0)
 
@@ -791,7 +791,7 @@ XXH_PREFETCH_DIST :: 320
  * Loops over XXH3_accumulate_512().
  * Assumption: nbStripes will not overflow the secret size
  */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_accumulate :: #force_inline proc(
 	acc: []xxh_u64, input: []u8, secret: []u8, nbStripes: uint, f_acc512: XXH3_accumulate_512_f) {
 
@@ -804,7 +804,7 @@ XXH3_accumulate :: #force_inline proc(
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_internal_loop :: #force_inline proc(acc: []xxh_u64, input: []u8, secret: []u8,
 	f_acc512: XXH3_accumulate_512_f, f_scramble: XXH3_scramble_accumulator_f) {
 
@@ -833,14 +833,14 @@ XXH3_hashLong_internal_loop :: #force_inline proc(acc: []xxh_u64, input: []u8, s
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_mix2Accs :: #force_inline proc(acc: []xxh_u64, secret: []u8) -> (res: xxh_u64) {
 	return XXH_mul_64_to_128_fold_64(
 		acc[0] ~ XXH64_read64(secret),
 		acc[1] ~ XXH64_read64(secret[8:]))
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_mergeAccs :: #force_inline proc(acc: []xxh_u64, secret: []u8, start: xxh_u64) -> (res: xxh_u64) {
 	result64 := start
 	#no_bounds_check for i := 0; i < 4; i += 1 {
@@ -849,7 +849,7 @@ XXH3_mergeAccs :: #force_inline proc(acc: []xxh_u64, secret: []u8, start: xxh_u6
 	return XXH3_avalanche(result64)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_64b_internal :: #force_inline proc(input: []u8, secret: []u8,
 			f_acc512: XXH3_accumulate_512_f, f_scramble: XXH3_scramble_accumulator_f) -> (hash: xxh_u64) {
 
@@ -868,7 +868,7 @@ XXH3_hashLong_64b_internal :: #force_inline proc(input: []u8, secret: []u8,
 /*
 	It's important for performance that XXH3_hashLong is not inlined.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_64b_withSecret :: #force_no_inline proc(input: []u8, seed64: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
 	return XXH3_hashLong_64b_internal(input, secret, XXH3_accumulate_512, XXH3_scramble_accumulator)
 }
@@ -880,7 +880,7 @@ XXH3_hashLong_64b_withSecret :: #force_no_inline proc(input: []u8, seed64: xxh_u
 	This variant enforces that the compiler can detect that,
 	and uses this opportunity to streamline the generated code for better performance.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_64b_default :: #force_no_inline proc(input: []u8, seed64: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
 	return XXH3_hashLong_64b_internal(input, XXH3_kSecret[:], XXH3_accumulate_512, XXH3_scramble_accumulator)
 }
@@ -896,7 +896,7 @@ XXH3_hashLong_64b_default :: #force_no_inline proc(input: []u8, seed64: xxh_u64,
 	It's important for performance that XXH3_hashLong is not inlined. Not sure
 	why (uop cache maybe?), but the difference is large and easily measurable.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_64b_withSeed_internal :: #force_no_inline proc(
 	input:       []u8,
 	seed:        xxh_u64,
@@ -916,7 +916,7 @@ XXH3_hashLong_64b_withSeed_internal :: #force_no_inline proc(
 /*
 	It's important for performance that XXH3_hashLong is not inlined.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_hashLong_64b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64, secret: []u8) -> (hash: xxh_u64) {
 	return XXH3_hashLong_64b_withSeed_internal(input, seed, XXH3_accumulate_512, XXH3_scramble_accumulator, XXH3_init_custom_secret)
 }
@@ -924,7 +924,7 @@ XXH3_hashLong_64b_withSeed :: #force_no_inline proc(input: []u8, seed: xxh_u64,
 
 XXH3_hashLong64_f :: #type proc(input: []u8, seed: xxh_u64, secret: []u8)  -> (res: xxh_u64)
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_64bits_internal :: proc(input: []u8, seed: xxh_u64, secret: []u8, f_hashLong: XXH3_hashLong64_f) -> (hash: xxh_u64) {
 	assert(len(secret) >= XXH3_SECRET_SIZE_MIN)
 	/*
@@ -944,19 +944,19 @@ XXH3_64bits_internal :: proc(input: []u8, seed: xxh_u64, secret: []u8, f_hashLon
 }
 
 /* ===   Public entry point   === */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_64_default :: proc(input: []u8) -> (hash: xxh_u64) {
 	return XXH3_64bits_internal(input, 0, XXH3_kSecret[:], XXH3_hashLong_64b_default)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_64_with_seed :: proc(input: []u8, seed: xxh_u64) -> (hash: xxh_u64) {
 	return XXH3_64bits_internal(input, seed, XXH3_kSecret[:], XXH3_hashLong_64b_withSeed)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH3_64_with_secret :: proc(input, secret: []u8) -> (hash: xxh_u64) {
 	return XXH3_64bits_internal(input, 0, secret, XXH3_hashLong_64b_withSecret)
 }
 
-XXH3_64 :: proc { XXH3_64_default, XXH3_64_with_seed, XXH3_64_with_secret }
+XXH3_64 :: proc { XXH3_64_default, XXH3_64_with_seed, XXH3_64_with_secret }

+ 5 - 5
core/hash/xxhash/xxhash_32.odin

@@ -40,7 +40,7 @@ XXH_PRIME32_3 :: 0xC2B2AE3D     /*!< 0b11000010101100101010111000111101 */
 XXH_PRIME32_4 :: 0x27D4EB2F     /*!< 0b00100111110101001110101100101111 */
 XXH_PRIME32_5 :: 0x165667B1     /*!< 0b00010110010101100110011110110001 */
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH32_round :: #force_inline proc(seed, input: XXH32_hash) -> (res: XXH32_hash) {
 	seed := seed
 
@@ -53,7 +53,7 @@ XXH32_round :: #force_inline proc(seed, input: XXH32_hash) -> (res: XXH32_hash)
 /*
 	Mix all bits
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH32_avalanche :: #force_inline proc(h32: u32) -> (res: u32) {
 	h32 := h32
 
@@ -65,7 +65,7 @@ XXH32_avalanche :: #force_inline proc(h32: u32) -> (res: u32) {
 	return h32
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH32_finalize :: #force_inline proc(h32: u32, buf: []u8, alignment: Alignment) -> (res: u32) {
 	process_1 :: #force_inline proc(h32: u32, buf: []u8) -> (h32_res: u32, buf_res: []u8) {
 		#no_bounds_check b := u32(buf[0])
@@ -143,7 +143,7 @@ XXH32_finalize :: #force_inline proc(h32: u32, buf: []u8, alignment: Alignment)
 	unreachable()
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH32_endian_align :: #force_inline proc(input: []u8, seed := XXH32_DEFAULT_SEED, alignment: Alignment) -> (res: XXH32_hash) {
 	buf := input
 	length := len(input)
@@ -318,4 +318,4 @@ XXH32_canonical_from_hash :: proc(hash: XXH32_hash) -> (canonical: XXH32_canonic
 XXH32_hash_from_canonical :: proc(canonical: ^XXH32_canonical) -> (hash: XXH32_hash) {
 	h := (^u32be)(&canonical.digest)^
 	return XXH32_hash(h)
-}
+}

+ 8 - 8
core/hash/xxhash/xxhash_64.odin

@@ -40,7 +40,7 @@ XXH_PRIME64_3 :: 0x165667B19E3779F9 /*!< 0b0001011001010110011001111011000110011
 XXH_PRIME64_4 :: 0x85EBCA77C2B2AE63 /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
 XXH_PRIME64_5 :: 0x27D4EB2F165667C5 /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_round :: proc(acc, input: xxh_u64) -> (res: xxh_u64) {
 	acc := acc
 
@@ -50,14 +50,14 @@ XXH64_round :: proc(acc, input: xxh_u64) -> (res: xxh_u64) {
 	return acc
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_mergeRound :: proc(acc, val: xxh_u64) -> (res: xxh_u64) {
 	res  = acc ~ XXH64_round(0, val)
 	res  = res * XXH_PRIME64_1 + XXH_PRIME64_4
 	return res
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_avalanche :: proc(h64: xxh_u64) -> (res: xxh_u64) {
 	res = h64
 	res ~= res >> 33
@@ -68,7 +68,7 @@ XXH64_avalanche :: proc(h64: xxh_u64) -> (res: xxh_u64) {
 	return res
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_finalize :: proc(h64: xxh_u64, buf: []u8, alignment: Alignment) -> (res: xxh_u64) {
 	buf := buf
 	length := len(buf) & 31
@@ -100,7 +100,7 @@ XXH64_finalize :: proc(h64: xxh_u64, buf: []u8, alignment: Alignment) -> (res: x
 	return XXH64_avalanche(res)
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_endian_align :: proc(input: []u8, seed := XXH64_DEFAULT_SEED, alignment := Alignment.Unaligned) -> (res: xxh_u64) {
 	buf    := input
 	length := len(buf)
@@ -191,7 +191,7 @@ XXH64_reset_state :: proc(state_ptr: ^XXH64_state, seed := XXH64_DEFAULT_SEED) -
 	return .None
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_update :: proc(state: ^XXH64_state, input: []u8) -> (err: Error) {
 	buf    := input
 	length := len(buf)
@@ -245,7 +245,7 @@ XXH64_update :: proc(state: ^XXH64_state, input: []u8) -> (err: Error) {
 	return .None
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 XXH64_digest :: proc(state: ^XXH64_state) -> (res: XXH64_hash) {
 	if state.total_len >= 32 {
 		v1 := state.v1
@@ -292,4 +292,4 @@ XXH64_canonical_from_hash :: proc(hash: XXH64_hash) -> (canonical: XXH64_canonic
 XXH64_hash_from_canonical :: proc(canonical: ^XXH64_canonical) -> (hash: XXH64_hash) {
 	h := (^u64be)(&canonical.digest)^
 	return XXH64_hash(h)
-}
+}

+ 2 - 2
core/image/bmp/bmp.odin

@@ -122,7 +122,7 @@ load_from_bytes :: proc(data: []byte, options := Options{}, allocator := context
 	return img, err
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
 	context.allocator = allocator
 	options := options
@@ -743,4 +743,4 @@ destroy :: proc(img: ^Image) {
 @(init, private)
 _register :: proc() {
 	image.register(.BMP, load_from_bytes, destroy)
-}
+}

+ 3 - 3
core/image/common.odin

@@ -1376,7 +1376,7 @@ expand_grayscale :: proc(img: ^Image, allocator := context.allocator) -> (ok: bo
 /*
 	Helper functions to read and write data from/to a Context, etc.
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_data :: proc(z: $C, $T: typeid) -> (res: T, err: compress.General_Error) {
 	if r, e := compress.read_data(z, T); e != .None {
 		return {}, .Stream_Too_Short
@@ -1385,7 +1385,7 @@ read_data :: proc(z: $C, $T: typeid) -> (res: T, err: compress.General_Error) {
 	}
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 read_u8 :: proc(z: $C) -> (res: u8, err: compress.General_Error) {
 	if r, e := compress.read_u8(z); e != .None {
 		return {}, .Stream_Too_Short
@@ -1405,4 +1405,4 @@ write_bytes :: proc(buf: ^bytes.Buffer, data: []u8) -> (err: compress.General_Er
 		return .Resize_Failed
 	}
 	return nil
-}
+}

+ 2 - 2
core/image/qoi/qoi.odin

@@ -170,7 +170,7 @@ load_from_bytes :: proc(data: []byte, options := Options{}, allocator := context
 	return img, err
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
 	context.allocator = allocator
 	options := options
@@ -373,4 +373,4 @@ qoi_hash :: #force_inline proc(pixel: RGBA_Pixel) -> (index: u8) {
 @(init, private)
 _register :: proc() {
 	image.register(.QOI, load_from_bytes, destroy)
-}
+}

+ 6 - 6
core/math/noise/internal.odin

@@ -688,7 +688,7 @@ _internal_noise_4d_unskewed_base :: proc(seed: i64, coord: Vec4) -> (value: f32)
 /*
 	Utility functions
 */
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 grad_2d :: proc(seed: i64, svp: [2]i64, delta: [2]f32) -> (value: f32) {
 	hash := seed ~ svp.x ~ svp.y
 	hash *= HASH_MULTIPLIER
@@ -698,7 +698,7 @@ grad_2d :: proc(seed: i64, svp: [2]i64, delta: [2]f32) -> (value: f32) {
 	return GRADIENTS_2D[gi] * delta.x + GRADIENTS_2D[gi | 1] * delta.y
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 grad_3d :: proc(seed: i64, rvp: [3]i64, delta: [3]f32) -> (value: f32) {
 	hash := (seed ~ rvp.x) ~ (rvp.y ~ rvp.z)
 	hash *= HASH_MULTIPLIER
@@ -708,7 +708,7 @@ grad_3d :: proc(seed: i64, rvp: [3]i64, delta: [3]f32) -> (value: f32) {
 	return GRADIENTS_3D[gi] * delta.x + GRADIENTS_3D[gi | 1] * delta.y + GRADIENTS_3D[gi | 2] * delta.z
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 grad_4d :: proc(seed: i64, svp: [4]i64, delta: [4]f32) -> (value: f32) {
 	hash := seed ~ (svp.x ~ svp.y) ~ (svp.z ~ svp.w)
 	hash *= HASH_MULTIPLIER
@@ -720,13 +720,13 @@ grad_4d :: proc(seed: i64, svp: [4]i64, delta: [4]f32) -> (value: f32) {
 
 grad :: proc {grad_2d, grad_3d, grad_4d}
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fast_floor :: proc(x: f64) -> (floored: i64) {
 	xi := i64(x)
 	return x < f64(xi) ? xi - 1 : xi
 }
 
-@(optimization_mode="speed")
+@(optimization_mode="favor_size")
 fast_round :: proc(x: f64) -> (rounded: i64) {
 	return x < 0 ? i64(x - 0.5) : i64(x + 0.5)
-}
+}

+ 3 - 3
core/mem/tlsf/tlsf_internal.odin

@@ -284,7 +284,7 @@ adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err:
 // TLSF utility functions. In most cases these are direct translations of
 // the documentation in the research paper.
 
-@(optimization_mode="speed", require_results)
+@(optimization_mode="favor_size", require_results)
 mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	if size < SMALL_BLOCK_SIZE {
 		// Store small blocks in first list.
@@ -297,7 +297,7 @@ mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
 	return
 }
 
-@(optimization_mode="speed", require_results)
+@(optimization_mode="favor_size", require_results)
 mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 	rounded = size
 	if size >= SMALL_BLOCK_SIZE {
@@ -308,7 +308,7 @@ mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
 }
 
 // This version rounds up to the next block size (for allocations)
-@(optimization_mode="speed", require_results)
+@(optimization_mode="favor_size", require_results)
 mapping_search :: proc(size: uint) -> (fl, sl: i32) {
 	return mapping_insert(mapping_round(size))
 }

+ 0 - 1
src/check_decl.cpp

@@ -1030,7 +1030,6 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
 
 	switch (e->Procedure.optimization_mode) {
 	case ProcedureOptimizationMode_None:
-	case ProcedureOptimizationMode_Minimal:
 		if (pl->inlining == ProcInlining_inline) {
 			error(e->token, "#force_inline cannot be used in conjunction with the attribute 'optimization_mode' with neither \"none\" nor \"minimal\"");
 		}

+ 6 - 6
src/checker.cpp

@@ -3544,19 +3544,19 @@ gb_internal DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
 			String mode = ev.value_string;
 			if (mode == "none") {
 				ac->optimization_mode = ProcedureOptimizationMode_None;
+			} else if (mode == "favor_size") {
+				ac->optimization_mode = ProcedureOptimizationMode_FavorSize;
 			} else if (mode == "minimal") {
-				ac->optimization_mode = ProcedureOptimizationMode_Minimal;
+				error(elem, "Invalid optimization_mode 'minimal' for '%.*s', mode has been removed due to confusion, but 'none' has the same behaviour", LIT(name));
 			} else if (mode == "size") {
-				ac->optimization_mode = ProcedureOptimizationMode_Size;
+				error(elem, "Invalid optimization_mode 'size' for '%.*s', mode has been removed due to confusion, but 'favor_size' has the same behaviour", LIT(name));
 			} else if (mode == "speed") {
-				ac->optimization_mode = ProcedureOptimizationMode_Speed;
+				error(elem, "Invalid optimization_mode 'speed' for '%.*s', mode has been removed due to confusion, but 'favor_size' has the same behaviour", LIT(name));
 			} else {
 				ERROR_BLOCK();
 				error(elem, "Invalid optimization_mode for '%.*s'. Valid modes:", LIT(name));
 				error_line("\tnone\n");
-				error_line("\tminimal\n");
-				error_line("\tsize\n");
-				error_line("\tspeed\n");
+				error_line("\tfavor_size\n");
 			}
 		} else {
 			error(elem, "Expected a string for '%.*s'", LIT(name));

+ 1 - 3
src/entity.cpp

@@ -133,9 +133,7 @@ enum EntityConstantFlags : u32 {
 enum ProcedureOptimizationMode : u8 {
 	ProcedureOptimizationMode_Default,
 	ProcedureOptimizationMode_None,
-	ProcedureOptimizationMode_Minimal,
-	ProcedureOptimizationMode_Size,
-	ProcedureOptimizationMode_Speed,
+	ProcedureOptimizationMode_FavorSize,
 };
 
 

+ 5 - 12
src/llvm_backend.cpp

@@ -1486,10 +1486,6 @@ gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_per_module) {
 		lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default],                false, build_context.optimization_level);
 		lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default_without_memcpy], true,  build_context.optimization_level);
 		lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_none],      -1);
-		lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_minimal],    0);
-		lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_size],       1);
-		lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_speed],      2);
-		lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_aggressive], 3);
 
 		for (i32 i = 0; i < lbFunctionPassManager_COUNT; i++) {
 			LLVMFinalizeFunctionPassManager(m->function_pass_managers[i]);
@@ -1513,15 +1509,12 @@ gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_per_module) {
 				if (p->entity && p->entity->kind == Entity_Procedure) {
 					switch (p->entity->Procedure.optimization_mode) {
 					case ProcedureOptimizationMode_None:
-					case ProcedureOptimizationMode_Minimal:
-						pass_manager_kind = lbFunctionPassManager_minimal;
+						pass_manager_kind = lbFunctionPassManager_none;
+						GB_ASSERT(lb_proc_has_attribute(p->module, p->value, "optnone"));
+						GB_ASSERT(lb_proc_has_attribute(p->module, p->value, "noinline"));
 						break;
-					case ProcedureOptimizationMode_Size:
-						pass_manager_kind = lbFunctionPassManager_size;
-						lb_add_attribute_to_proc(p->module, p->value, "optsize");
-						break;
-					case ProcedureOptimizationMode_Speed:
-						pass_manager_kind = lbFunctionPassManager_speed;
+					case ProcedureOptimizationMode_FavorSize:
+						GB_ASSERT(lb_proc_has_attribute(p->module, p->value, "optsize"));
 						break;
 					}
 				}

+ 0 - 5
src/llvm_backend.hpp

@@ -134,11 +134,6 @@ enum lbFunctionPassManagerKind {
 	lbFunctionPassManager_default,
 	lbFunctionPassManager_default_without_memcpy,
 	lbFunctionPassManager_none,
-	lbFunctionPassManager_minimal,
-	lbFunctionPassManager_size,
-	lbFunctionPassManager_speed,
-	lbFunctionPassManager_aggressive,
-
 	lbFunctionPassManager_COUNT
 };
 

+ 6 - 0
src/llvm_backend_general.cpp

@@ -2523,6 +2523,12 @@ gb_internal void lb_add_proc_attribute_at_index(lbProcedure *p, isize index, cha
 gb_internal void lb_add_attribute_to_proc(lbModule *m, LLVMValueRef proc_value, char const *name, u64 value=0) {
 	LLVMAddAttributeAtIndex(proc_value, LLVMAttributeIndex_FunctionIndex, lb_create_enum_attribute(m->ctx, name, value));
 }
+
+gb_internal bool lb_proc_has_attribute(lbModule *m, LLVMValueRef proc_value, char const *name) {
+	LLVMAttributeRef ref = LLVMGetEnumAttributeAtIndex(proc_value, LLVMAttributeIndex_FunctionIndex, LLVMGetEnumAttributeKindForName(name, gb_strlen(name)));
+	return ref != nullptr;
+}
+
 gb_internal void lb_add_attribute_to_proc_with_string(lbModule *m, LLVMValueRef proc_value, String const &name, String const &value) {
 	LLVMAttributeRef attr = lb_create_string_attribute(m->ctx, name, value);
 	LLVMAddAttributeAtIndex(proc_value, LLVMAttributeIndex_FunctionIndex, attr);

+ 1 - 7
src/llvm_backend_proc.cpp

@@ -163,16 +163,10 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
 
 	switch (entity->Procedure.optimization_mode) {
 	case ProcedureOptimizationMode_None:
-		break;
-	case ProcedureOptimizationMode_Minimal:
 		lb_add_attribute_to_proc(m, p->value, "optnone");
 		lb_add_attribute_to_proc(m, p->value, "noinline");
 		break;
-	case ProcedureOptimizationMode_Size:
-		lb_add_attribute_to_proc(m, p->value, "optsize");
-		break;
-	case ProcedureOptimizationMode_Speed:
-		// TODO(bill): handle this correctly
+	case ProcedureOptimizationMode_FavorSize:
 		lb_add_attribute_to_proc(m, p->value, "optsize");
 		break;
 	}