|
@@ -25,13 +25,20 @@ vmValue vm_make_value_ptr(void *ptr) {
|
|
return v;
|
|
return v;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+vmValue vm_make_value_int(i64 i) {
|
|
|
|
+ vmValue v = {};
|
|
|
|
+ v.val_int = i;
|
|
|
|
+ return v;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
|
|
struct vmFrame {
|
|
struct vmFrame {
|
|
VirtualMachine * vm;
|
|
VirtualMachine * vm;
|
|
vmFrame * caller;
|
|
vmFrame * caller;
|
|
ssaProcedure * curr_proc;
|
|
ssaProcedure * curr_proc;
|
|
ssaBlock * curr_block;
|
|
ssaBlock * curr_block;
|
|
- isize instr_index;
|
|
|
|
|
|
+ isize instr_index; // For the current block
|
|
|
|
|
|
Map<vmValue> values; // Key: ssaValue *
|
|
Map<vmValue> values; // Key: ssaValue *
|
|
gbTempArenaMemory temp_arena_memory;
|
|
gbTempArenaMemory temp_arena_memory;
|
|
@@ -46,11 +53,14 @@ struct VirtualMachine {
|
|
gbAllocator stack_allocator;
|
|
gbAllocator stack_allocator;
|
|
gbAllocator heap_allocator;
|
|
gbAllocator heap_allocator;
|
|
Array<vmFrame> frame_stack;
|
|
Array<vmFrame> frame_stack;
|
|
- Map<vmValue> globals; // Key: ssaValue *
|
|
|
|
|
|
+ Map<vmValue> globals; // Key: ssaValue *
|
|
|
|
+ Map<vmValue> const_compound_lits; // Key: ssaValue *
|
|
vmValue exit_value;
|
|
vmValue exit_value;
|
|
};
|
|
};
|
|
|
|
|
|
-void vm_exec_instr(VirtualMachine *vm, ssaValue *value);
|
|
|
|
|
|
+void vm_exec_instr (VirtualMachine *vm, ssaValue *value);
|
|
|
|
+vmValue vm_operand_value(VirtualMachine *vm, ssaValue *value);
|
|
|
|
+void vm_store (VirtualMachine *vm, void *dst, vmValue val, Type *type);
|
|
|
|
|
|
vmFrame *vm_back_frame(VirtualMachine *vm) {
|
|
vmFrame *vm_back_frame(VirtualMachine *vm) {
|
|
if (vm->frame_stack.count > 0) {
|
|
if (vm->frame_stack.count > 0) {
|
|
@@ -59,6 +69,15 @@ vmFrame *vm_back_frame(VirtualMachine *vm) {
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+i64 vm_type_size_of(VirtualMachine *vm, Type *type) {
|
|
|
|
+ return type_size_of(vm->module->sizes, vm->heap_allocator, type);
|
|
|
|
+}
|
|
|
|
+i64 vm_type_align_of(VirtualMachine *vm, Type *type) {
|
|
|
|
+ return type_align_of(vm->module->sizes, vm->heap_allocator, type);
|
|
|
|
+}
|
|
|
|
+i64 vm_type_offset_of(VirtualMachine *vm, Type *type, i64 index) {
|
|
|
|
+ return type_offset_of(vm->module->sizes, vm->heap_allocator, type, index);
|
|
|
|
+}
|
|
|
|
|
|
|
|
|
|
void vm_init(VirtualMachine *vm, ssaModule *module) {
|
|
void vm_init(VirtualMachine *vm, ssaModule *module) {
|
|
@@ -69,10 +88,31 @@ void vm_init(VirtualMachine *vm, ssaModule *module) {
|
|
vm->heap_allocator = heap_allocator();
|
|
vm->heap_allocator = heap_allocator();
|
|
array_init(&vm->frame_stack, vm->heap_allocator);
|
|
array_init(&vm->frame_stack, vm->heap_allocator);
|
|
map_init(&vm->globals, vm->heap_allocator);
|
|
map_init(&vm->globals, vm->heap_allocator);
|
|
|
|
+ map_init(&vm->const_compound_lits, vm->heap_allocator);
|
|
|
|
+
|
|
|
|
+ for_array(i, vm->module->values.entries) {
|
|
|
|
+ ssaValue *v = vm->module->values.entries[i].value;
|
|
|
|
+ switch (v->kind) {
|
|
|
|
+ case ssaValue_Global: {
|
|
|
|
+ Type *t = ssa_type(v);
|
|
|
|
+ i64 size = vm_type_size_of(vm, t);
|
|
|
|
+ i64 align = vm_type_align_of(vm, t);
|
|
|
|
+ void *mem = gb_alloc_align(vm->heap_allocator, size, align);
|
|
|
|
+ vmValue init = vm_make_value_ptr(mem);
|
|
|
|
+ if (v->Global.value != NULL && v->Global.value->kind == ssaValue_Constant) {
|
|
|
|
+ vmValue *address = cast(vmValue *)init.val_ptr;
|
|
|
|
+ vm_store(vm, address, vm_operand_value(vm, v->Global.value), type_deref(t));
|
|
|
|
+ }
|
|
|
|
+ map_set(&vm->globals, hash_pointer(v), init);
|
|
|
|
+ } break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
}
|
|
}
|
|
void vm_destroy(VirtualMachine *vm) {
|
|
void vm_destroy(VirtualMachine *vm) {
|
|
array_free(&vm->frame_stack);
|
|
array_free(&vm->frame_stack);
|
|
map_destroy(&vm->globals);
|
|
map_destroy(&vm->globals);
|
|
|
|
+ map_destroy(&vm->const_compound_lits);
|
|
gb_arena_free(&vm->stack_arena);
|
|
gb_arena_free(&vm->stack_arena);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -80,18 +120,11 @@ void vm_destroy(VirtualMachine *vm) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
-i64 vm_type_size_of(VirtualMachine *vm, Type *type) {
|
|
|
|
- return type_size_of(vm->module->sizes, vm->heap_allocator, type);
|
|
|
|
-}
|
|
|
|
-i64 vm_type_align_of(VirtualMachine *vm, Type *type) {
|
|
|
|
- return type_align_of(vm->module->sizes, vm->heap_allocator, type);
|
|
|
|
-}
|
|
|
|
-i64 vm_type_offset_of(VirtualMachine *vm, Type *type, i64 offset) {
|
|
|
|
- return type_offset_of(vm->module->sizes, vm->heap_allocator, type, offset);
|
|
|
|
-}
|
|
|
|
|
|
|
|
void vm_set_value(vmFrame *f, ssaValue *v, vmValue val) {
|
|
void vm_set_value(vmFrame *f, ssaValue *v, vmValue val) {
|
|
- map_set(&f->values, hash_pointer(v), val);
|
|
|
|
|
|
+ if (v != NULL) {
|
|
|
|
+ map_set(&f->values, hash_pointer(v), val);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -125,8 +158,12 @@ void vm_pop_frame(VirtualMachine *vm) {
|
|
}
|
|
}
|
|
|
|
|
|
vmValue vm_call_procedure(VirtualMachine *vm, ssaProcedure *proc, Array<vmValue> values) {
|
|
vmValue vm_call_procedure(VirtualMachine *vm, ssaProcedure *proc, Array<vmValue> values) {
|
|
- GB_ASSERT_MSG(proc->params.count == values.count,
|
|
|
|
- "Incorrect number of arguments passed into procedure call!");
|
|
|
|
|
|
+ Type *type = base_type(proc->type);
|
|
|
|
+ GB_ASSERT_MSG(type->Proc.param_count == values.count,
|
|
|
|
+ "Incorrect number of arguments passed into procedure call!\n"
|
|
|
|
+ "%.*s -> %td vs %td",
|
|
|
|
+ LIT(proc->name),
|
|
|
|
+ type->Proc.param_count, values.count);
|
|
|
|
|
|
|
|
|
|
vmValue result = {};
|
|
vmValue result = {};
|
|
@@ -147,51 +184,150 @@ vmValue vm_call_procedure(VirtualMachine *vm, ssaProcedure *proc, Array<vmValue>
|
|
vm_exec_instr(vm, curr_instr);
|
|
vm_exec_instr(vm, curr_instr);
|
|
}
|
|
}
|
|
|
|
|
|
- if (base_type(proc->type)->Proc.result_count > 0) {
|
|
|
|
|
|
+ Type *proc_type = base_type(proc->type);
|
|
|
|
+ if (proc_type->Proc.result_count > 0) {
|
|
result = f->result;
|
|
result = f->result;
|
|
|
|
+
|
|
|
|
+ Type *rt = base_type(proc_type->Proc.results);
|
|
|
|
+ GB_ASSERT(is_type_tuple(rt));
|
|
|
|
+
|
|
|
|
+ if (rt->Tuple.variable_count == 1) {
|
|
|
|
+ rt = base_type(rt->Tuple.variables[0]->type);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (is_type_string(rt)) {
|
|
|
|
+ vmValue data = result.val_comp[0];
|
|
|
|
+ vmValue count = result.val_comp[1];
|
|
|
|
+ gb_printf("String: %.*s\n", cast(isize)count.val_int, cast(u8 *)data.val_ptr);
|
|
|
|
+ } else if (is_type_integer(rt)) {
|
|
|
|
+ gb_printf("Integer: %lld\n", cast(i64)result.val_int);
|
|
|
|
+ }
|
|
|
|
+ // gb_printf("%lld\n", cast(i64)result.val_int);
|
|
}
|
|
}
|
|
|
|
+
|
|
vm_pop_frame(vm);
|
|
vm_pop_frame(vm);
|
|
return result;
|
|
return result;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+vmValue vm_exact_value(VirtualMachine *vm, ssaValue *ptr, ExactValue value, Type *t) {
|
|
|
|
+ vmValue result = {};
|
|
|
|
+ Type *original_type = t;
|
|
|
|
+ t = base_type(get_enum_base_type(t));
|
|
|
|
+ // i64 size = vm_type_size_of(vm, t);
|
|
|
|
+ if (is_type_boolean(t)) {
|
|
|
|
+ result.val_int = value.value_bool != 0;
|
|
|
|
+ } else if (is_type_integer(t)) {
|
|
|
|
+ result.val_int = value.value_integer;
|
|
|
|
+ } else if (is_type_float(t)) {
|
|
|
|
+ if (t->Basic.kind == Basic_f32) {
|
|
|
|
+ result.val_f32 = cast(f32)value.value_float;
|
|
|
|
+ } else if (t->Basic.kind == Basic_f64) {
|
|
|
|
+ result.val_f64 = cast(f64)value.value_float;
|
|
|
|
+ }
|
|
|
|
+ } else if (is_type_pointer(t)) {
|
|
|
|
+ result.val_ptr = cast(void *)cast(intptr)value.value_pointer;
|
|
|
|
+ } else if (is_type_string(t)) {
|
|
|
|
+ array_init(&result.val_comp, vm->heap_allocator, 2);
|
|
|
|
+
|
|
|
|
+ String str = value.value_string;
|
|
|
|
+ i64 len = str.len;
|
|
|
|
+ u8 *text = gb_alloc_array(vm->heap_allocator, u8, len);
|
|
|
|
+ gb_memcopy(text, str.text, len);
|
|
|
|
+
|
|
|
|
+ vmValue data = {};
|
|
|
|
+ vmValue count = {};
|
|
|
|
+ data.val_ptr = text;
|
|
|
|
+ count.val_int = len;
|
|
|
|
+ array_add(&result.val_comp, data);
|
|
|
|
+ array_add(&result.val_comp, count);
|
|
|
|
+ } else if (value.kind == ExactValue_Compound) {
|
|
|
|
+ if (ptr != NULL) {
|
|
|
|
+ vmValue *found = map_get(&vm->const_compound_lits, hash_pointer(ptr));
|
|
|
|
+ if (found != NULL) {
|
|
|
|
+ return *found;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ast_node(cl, CompoundLit, value.value_compound);
|
|
|
|
+
|
|
|
|
+ if (is_type_array(t)) {
|
|
|
|
+ vmValue result = {};
|
|
|
|
+
|
|
|
|
+ isize elem_count = cl->elems.count;
|
|
|
|
+ if (elem_count == 0) {
|
|
|
|
+ if (ptr != NULL) {
|
|
|
|
+ map_set(&vm->const_compound_lits, hash_pointer(ptr), result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ Type *type = base_type(t);
|
|
|
|
+ array_init(&result.val_comp, vm->heap_allocator, type->Array.count);
|
|
|
|
+ array_resize(&result.val_comp, type->Array.count);
|
|
|
|
+ for (isize i = 0; i < elem_count; i++) {
|
|
|
|
+ TypeAndValue *tav = type_and_value_of_expression(vm->module->info, cl->elems[i]);
|
|
|
|
+ vmValue elem = vm_exact_value(vm, NULL, tav->value, tav->type);
|
|
|
|
+ result.val_comp[i] = elem;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ptr != NULL) {
|
|
|
|
+ map_set(&vm->const_compound_lits, hash_pointer(ptr), result);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return result;
|
|
|
|
+ } else if (is_type_struct(t)) {
|
|
|
|
+ ast_node(cl, CompoundLit, value.value_compound);
|
|
|
|
+
|
|
|
|
+ if (cl->elems.count == 0) {
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ isize value_count = t->Record.field_count;
|
|
|
|
+ array_init(&result.val_comp, vm->heap_allocator, value_count);
|
|
|
|
+ array_resize(&result.val_comp, value_count);
|
|
|
|
+
|
|
|
|
+ if (cl->elems[0]->kind == AstNode_FieldValue) {
|
|
|
|
+ isize elem_count = cl->elems.count;
|
|
|
|
+ for (isize i = 0; i < elem_count; i++) {
|
|
|
|
+ ast_node(fv, FieldValue, cl->elems[i]);
|
|
|
|
+ String name = fv->field->Ident.string;
|
|
|
|
+
|
|
|
|
+ TypeAndValue *tav = type_and_value_of_expression(vm->module->info, fv->value);
|
|
|
|
+ GB_ASSERT(tav != NULL);
|
|
|
|
+
|
|
|
|
+ Selection sel = lookup_field(vm->heap_allocator, t, name, false);
|
|
|
|
+ Entity *f = t->Record.fields[sel.index[0]];
|
|
|
|
+
|
|
|
|
+ result.val_comp[f->Variable.field_index] = vm_exact_value(vm, NULL, tav->value, f->type);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ for (isize i = 0; i < value_count; i++) {
|
|
|
|
+ TypeAndValue *tav = type_and_value_of_expression(vm->module->info, cl->elems[i]);
|
|
|
|
+ GB_ASSERT(tav != NULL);
|
|
|
|
+ Entity *f = t->Record.fields_in_src_order[i];
|
|
|
|
+ result.val_comp[f->Variable.field_index] = vm_exact_value(vm, NULL, tav->value, f->type);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ GB_PANIC("TODO(bill): Other compound types\n");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ } else if (value.kind == ExactValue_Invalid) {
|
|
|
|
+ // NOTE(bill): "zero value"
|
|
|
|
+ } else {
|
|
|
|
+ gb_printf_err("TODO(bill): Other constant types: %s\n", type_to_string(original_type));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return result;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
|
|
vmValue vm_operand_value(VirtualMachine *vm, ssaValue *value) {
|
|
vmValue vm_operand_value(VirtualMachine *vm, ssaValue *value) {
|
|
vmFrame *f = vm_back_frame(vm);
|
|
vmFrame *f = vm_back_frame(vm);
|
|
vmValue v = {};
|
|
vmValue v = {};
|
|
switch (value->kind) {
|
|
switch (value->kind) {
|
|
case ssaValue_Constant: {
|
|
case ssaValue_Constant: {
|
|
- auto *c = &value->Constant;
|
|
|
|
- Type *t = base_type(c->type);
|
|
|
|
- // i64 size = vm_type_size_of(vm, t);
|
|
|
|
- if (is_type_boolean(t)) {
|
|
|
|
- v.val_int = c->value.value_bool != 0;
|
|
|
|
- } else if (is_type_integer(t)) {
|
|
|
|
- v.val_int = c->value.value_integer;
|
|
|
|
- } else if (is_type_float(t)) {
|
|
|
|
- if (t->Basic.kind == Basic_f32) {
|
|
|
|
- v.val_f32 = cast(f32)c->value.value_float;
|
|
|
|
- } else if (t->Basic.kind == Basic_f64) {
|
|
|
|
- v.val_f64 = cast(f64)c->value.value_float;
|
|
|
|
- }
|
|
|
|
- } else if (is_type_pointer(t)) {
|
|
|
|
- v.val_ptr = cast(void *)cast(intptr)c->value.value_pointer;
|
|
|
|
- } else if (is_type_string(t)) {
|
|
|
|
- array_init(&v.val_comp, vm->heap_allocator, 2);
|
|
|
|
-
|
|
|
|
- String str = c->value.value_string;
|
|
|
|
- i64 len = str.len;
|
|
|
|
- u8 *text = gb_alloc_array(vm->heap_allocator, u8, len);
|
|
|
|
- gb_memcopy(text, str.text, len);
|
|
|
|
-
|
|
|
|
- vmValue data = {};
|
|
|
|
- vmValue count = {};
|
|
|
|
- data.val_ptr = text;
|
|
|
|
- count.val_int = len;
|
|
|
|
- array_add(&v.val_comp, data);
|
|
|
|
- array_add(&v.val_comp, count);
|
|
|
|
- } else {
|
|
|
|
- GB_PANIC("TODO(bill): Other constant types: %s", type_to_string(c->type));
|
|
|
|
- }
|
|
|
|
|
|
+ v = vm_exact_value(vm, value, value->Constant.value, value->Constant.type);
|
|
} break;
|
|
} break;
|
|
case ssaValue_ConstantSlice: {
|
|
case ssaValue_ConstantSlice: {
|
|
array_init(&v.val_comp, vm->heap_allocator, 3);
|
|
array_init(&v.val_comp, vm->heap_allocator, 3);
|
|
@@ -235,16 +371,15 @@ vmValue vm_operand_value(VirtualMachine *vm, ssaValue *value) {
|
|
return v;
|
|
return v;
|
|
}
|
|
}
|
|
|
|
|
|
-void vm_store_integer(VirtualMachine *vm, vmValue *dst, vmValue val, i64 store_bytes) {
|
|
|
|
|
|
+void vm_store_integer(VirtualMachine *vm, void *dst, vmValue val, i64 store_bytes) {
|
|
// TODO(bill): I assume little endian here
|
|
// TODO(bill): I assume little endian here
|
|
GB_ASSERT(dst != NULL);
|
|
GB_ASSERT(dst != NULL);
|
|
- gb_memcopy(&dst->val_int, &val.val_int, store_bytes);
|
|
|
|
-
|
|
|
|
|
|
+ gb_memcopy(dst, &val.val_int, store_bytes);
|
|
}
|
|
}
|
|
|
|
|
|
-void vm_store(VirtualMachine *vm, vmValue *dst, vmValue val, Type *type) {
|
|
|
|
|
|
+void vm_store(VirtualMachine *vm, void *dst, vmValue val, Type *type) {
|
|
i64 size = vm_type_size_of(vm, type);
|
|
i64 size = vm_type_size_of(vm, type);
|
|
- type = base_type(type);
|
|
|
|
|
|
+ type = base_type(get_enum_base_type(type));
|
|
|
|
|
|
// TODO(bill): I assume little endian here
|
|
// TODO(bill): I assume little endian here
|
|
|
|
|
|
@@ -265,39 +400,95 @@ void vm_store(VirtualMachine *vm, vmValue *dst, vmValue val, Type *type) {
|
|
vm_store_integer(vm, dst, val, size);
|
|
vm_store_integer(vm, dst, val, size);
|
|
break;
|
|
break;
|
|
case Basic_f32:
|
|
case Basic_f32:
|
|
- dst->val_f32 = val.val_f32;
|
|
|
|
|
|
+ *cast(f32 *)dst = val.val_f32;
|
|
break;
|
|
break;
|
|
case Basic_f64:
|
|
case Basic_f64:
|
|
- dst->val_f64 = val.val_f64;
|
|
|
|
|
|
+ *cast(f64 *)dst = val.val_f64;
|
|
break;
|
|
break;
|
|
case Basic_rawptr:
|
|
case Basic_rawptr:
|
|
- dst->val_ptr = val.val_ptr;
|
|
|
|
|
|
+ *cast(void **)dst = val.val_ptr;
|
|
break;
|
|
break;
|
|
|
|
+ case Basic_string: {
|
|
|
|
+ u8 *data = cast(u8 *)val.val_comp[0].val_ptr;
|
|
|
|
+ i64 word_size = vm_type_size_of(vm, t_int);
|
|
|
|
+
|
|
|
|
+ u8 *mem = cast(u8 *)dst;
|
|
|
|
+ gb_memcopy(mem, data, word_size);
|
|
|
|
+ vm_store_integer(vm, mem+word_size, val.val_comp[1], word_size);
|
|
|
|
+ } break;
|
|
|
|
+ case Basic_any: {
|
|
|
|
+ void *type_info = val.val_comp[0].val_ptr;
|
|
|
|
+ void *data = val.val_comp[1].val_ptr;
|
|
|
|
+ i64 word_size = vm_type_size_of(vm, t_int);
|
|
|
|
+
|
|
|
|
+ u8 *mem = cast(u8 *)dst;
|
|
|
|
+ gb_memcopy(mem, type_info, word_size);
|
|
|
|
+ gb_memcopy(mem+word_size, data, word_size);
|
|
|
|
+ } break;
|
|
default:
|
|
default:
|
|
- GB_PANIC("TODO(bill): other basic types for `vm_store`");
|
|
|
|
|
|
+ gb_printf_err("TODO(bill): other basic types for `vm_store` %s\n", type_to_string(type));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ case Type_Record: {
|
|
|
|
+ if (is_type_struct(type)) {
|
|
|
|
+ u8 *mem = cast(u8 *)dst;
|
|
|
|
+
|
|
|
|
+ GB_ASSERT_MSG(type->Record.field_count >= val.val_comp.count,
|
|
|
|
+ "%td vs %td",
|
|
|
|
+ type->Record.field_count, val.val_comp.count);
|
|
|
|
+
|
|
|
|
+ isize field_count = gb_min(val.val_comp.count, type->Record.field_count);
|
|
|
|
+
|
|
|
|
+ for (isize i = 0; i < field_count; i++) {
|
|
|
|
+ Entity *f = type->Record.fields[i];
|
|
|
|
+ i64 offset = vm_type_offset_of(vm, type, i);
|
|
|
|
+ void *ptr = mem+offset;
|
|
|
|
+ vmValue member = val.val_comp[i];
|
|
|
|
+ vm_store(vm, ptr, member, f->type);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ } else {
|
|
|
|
+ gb_printf_err("TODO(bill): records for `vm_store` %s\n", type_to_string(type));
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+
|
|
|
|
+ case Type_Array: {
|
|
|
|
+ Type *elem_type = type->Array.elem;
|
|
|
|
+ u8 *mem = cast(u8 *)dst;
|
|
|
|
+ i64 elem_size = vm_type_size_of(vm, elem_type);
|
|
|
|
+ i64 elem_count = gb_min(val.val_comp.count, type->Array.count);
|
|
|
|
+
|
|
|
|
+ for (i64 i = 0; i < elem_count; i++) {
|
|
|
|
+ void *ptr = mem + (elem_size*i);
|
|
|
|
+ vmValue member = val.val_comp[i];
|
|
|
|
+ *cast(i64 *)ptr = 123;
|
|
|
|
+ vm_store(vm, ptr, member, elem_type);
|
|
|
|
+ }
|
|
|
|
+ gb_printf_err("%lld\n", *cast(i64 *)mem);
|
|
|
|
+
|
|
|
|
+ } break;
|
|
|
|
+
|
|
default:
|
|
default:
|
|
- GB_PANIC("TODO(bill): other types for `vm_store`");
|
|
|
|
|
|
+ gb_printf_err("TODO(bill): other types for `vm_store` %s\n", type_to_string(type));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-vmValue vm_load_integer(VirtualMachine *vm, vmValue *ptr, i64 store_bytes) {
|
|
|
|
|
|
+vmValue vm_load_integer(VirtualMachine *vm, void *ptr, i64 store_bytes) {
|
|
// TODO(bill): I assume little endian here
|
|
// TODO(bill): I assume little endian here
|
|
vmValue v = {};
|
|
vmValue v = {};
|
|
// NOTE(bill): Only load the needed amount
|
|
// NOTE(bill): Only load the needed amount
|
|
- gb_memcopy(&v.val_int, ptr->val_ptr, store_bytes);
|
|
|
|
|
|
+ gb_memcopy(&v.val_int, ptr, store_bytes);
|
|
return v;
|
|
return v;
|
|
}
|
|
}
|
|
|
|
|
|
-vmValue vm_load(VirtualMachine *vm, vmValue *ptr, Type *type) {
|
|
|
|
|
|
+vmValue vm_load(VirtualMachine *vm, void *ptr, Type *type) {
|
|
i64 size = vm_type_size_of(vm, type);
|
|
i64 size = vm_type_size_of(vm, type);
|
|
- type = base_type(type);
|
|
|
|
|
|
+ type = base_type(get_enum_base_type(type));
|
|
|
|
|
|
- vmValue v = {};
|
|
|
|
|
|
+ vmValue result = {};
|
|
|
|
|
|
switch (type->kind) {
|
|
switch (type->kind) {
|
|
case Type_Basic:
|
|
case Type_Basic:
|
|
@@ -313,29 +504,66 @@ vmValue vm_load(VirtualMachine *vm, vmValue *ptr, Type *type) {
|
|
case Basic_u64:
|
|
case Basic_u64:
|
|
case Basic_int:
|
|
case Basic_int:
|
|
case Basic_uint:
|
|
case Basic_uint:
|
|
- v = vm_load_integer(vm, ptr, size);
|
|
|
|
|
|
+ result = vm_load_integer(vm, ptr, size);
|
|
break;
|
|
break;
|
|
case Basic_f32:
|
|
case Basic_f32:
|
|
- v.val_f32 = *cast(f32 *)ptr;
|
|
|
|
|
|
+ result.val_f32 = *cast(f32 *)ptr;
|
|
break;
|
|
break;
|
|
case Basic_f64:
|
|
case Basic_f64:
|
|
- v.val_f64 = *cast(f64 *)ptr;
|
|
|
|
|
|
+ result.val_f64 = *cast(f64 *)ptr;
|
|
break;
|
|
break;
|
|
case Basic_rawptr:
|
|
case Basic_rawptr:
|
|
- v.val_ptr = *cast(void **)ptr;
|
|
|
|
|
|
+ result.val_ptr = *cast(void **)ptr;
|
|
break;
|
|
break;
|
|
|
|
+
|
|
|
|
+ case Basic_string: {
|
|
|
|
+ i64 word_size = vm_type_size_of(vm, t_int);
|
|
|
|
+ u8 *mem = *cast(u8 **)ptr;
|
|
|
|
+ array_init(&result.val_comp, vm->heap_allocator, 2);
|
|
|
|
+
|
|
|
|
+ i64 count = 0;
|
|
|
|
+ u8 *data = mem + 0*word_size;
|
|
|
|
+ u8 *count_data = mem + 1*word_size;
|
|
|
|
+ switch (word_size) {
|
|
|
|
+ case 4: count = *cast(i32 *)count_data; break;
|
|
|
|
+ case 8: count = *cast(i64 *)count_data; break;
|
|
|
|
+ default: GB_PANIC("Unknown int size"); break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ array_add(&result.val_comp, vm_make_value_ptr(mem));
|
|
|
|
+ array_add(&result.val_comp, vm_make_value_int(count));
|
|
|
|
+
|
|
|
|
+ } break;
|
|
|
|
+
|
|
default:
|
|
default:
|
|
- GB_PANIC("TODO(bill): other basic types for `vm_load`");
|
|
|
|
|
|
+ GB_PANIC("TODO(bill): other basic types for `vm_load` %s", type_to_string(type));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ case Type_Record: {
|
|
|
|
+ if (is_type_struct(type)) {
|
|
|
|
+ isize field_count = type->Record.field_count;
|
|
|
|
+
|
|
|
|
+ array_init(&result.val_comp, vm->heap_allocator, field_count);
|
|
|
|
+ array_resize(&result.val_comp, field_count);
|
|
|
|
+
|
|
|
|
+ u8 *mem = cast(u8 *)ptr;
|
|
|
|
+ for (isize i = 0; i < field_count; i++) {
|
|
|
|
+ Entity *f = type->Record.fields[i];
|
|
|
|
+ i64 offset = vm_type_offset_of(vm, type, i);
|
|
|
|
+ vmValue val = vm_load(vm, mem+offset, f->type);
|
|
|
|
+ result.val_comp[i] = val;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+
|
|
default:
|
|
default:
|
|
- GB_PANIC("TODO(bill): other types for `vm_load`");
|
|
|
|
|
|
+ GB_PANIC("TODO(bill): other types for `vm_load` %s", type_to_string(type));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- return v;
|
|
|
|
|
|
+ return result;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -376,20 +604,23 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_ZeroInit: {
|
|
case ssaInstr_ZeroInit: {
|
|
-
|
|
|
|
|
|
+ Type *t = type_deref(ssa_type(instr->ZeroInit.address));
|
|
|
|
+ vmValue addr = vm_operand_value(vm, instr->ZeroInit.address);
|
|
|
|
+ void *data = addr.val_ptr;
|
|
|
|
+ i64 size = vm_type_size_of(vm, t);
|
|
|
|
+ gb_zero_size(data, size);
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_Store: {
|
|
case ssaInstr_Store: {
|
|
vmValue addr = vm_operand_value(vm, instr->Store.address);
|
|
vmValue addr = vm_operand_value(vm, instr->Store.address);
|
|
vmValue val = vm_operand_value(vm, instr->Store.value);
|
|
vmValue val = vm_operand_value(vm, instr->Store.value);
|
|
- vmValue *address = cast(vmValue *)addr.val_ptr;
|
|
|
|
Type *t = ssa_type(instr->Store.value);
|
|
Type *t = ssa_type(instr->Store.value);
|
|
- vm_store(vm, address, val, t);
|
|
|
|
|
|
+ vm_store(vm, addr.val_ptr, val, t);
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_Load: {
|
|
case ssaInstr_Load: {
|
|
vmValue addr = vm_operand_value(vm, instr->Load.address);
|
|
vmValue addr = vm_operand_value(vm, instr->Load.address);
|
|
- vmValue v = vm_load(vm, &addr, ssa_type(value));
|
|
|
|
|
|
+ vmValue v = vm_load(vm, addr.val_ptr, ssa_type(value));
|
|
vm_set_value(f, value, v);
|
|
vm_set_value(f, value, v);
|
|
} break;
|
|
} break;
|
|
|
|
|
|
@@ -404,8 +635,8 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_StructElementPtr: {
|
|
case ssaInstr_StructElementPtr: {
|
|
- vmValue address = vm_operand_value(vm, instr->StructElementPtr.address);
|
|
|
|
- i32 elem_index = instr->StructElementPtr.elem_index;
|
|
|
|
|
|
+ vmValue address = vm_operand_value(vm, instr->StructElementPtr.address);
|
|
|
|
+ i32 elem_index = instr->StructElementPtr.elem_index;
|
|
|
|
|
|
Type *t = ssa_type(instr->StructElementPtr.address);
|
|
Type *t = ssa_type(instr->StructElementPtr.address);
|
|
i64 offset_in_bytes = vm_type_offset_of(vm, type_deref(t), elem_index);
|
|
i64 offset_in_bytes = vm_type_offset_of(vm, type_deref(t), elem_index);
|
|
@@ -424,7 +655,7 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_Phi: {
|
|
case ssaInstr_Phi: {
|
|
-
|
|
|
|
|
|
+ GB_PANIC("TODO(bill): ssaInstr_Phi");
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_ArrayExtractValue: {
|
|
case ssaInstr_ArrayExtractValue: {
|
|
@@ -444,7 +675,7 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
f->instr_index = 0;
|
|
f->instr_index = 0;
|
|
} break;
|
|
} break;
|
|
|
|
|
|
- case ssaInstr_If: {;
|
|
|
|
|
|
+ case ssaInstr_If: {
|
|
vmValue cond = vm_operand_value(vm, instr->If.cond);
|
|
vmValue cond = vm_operand_value(vm, instr->If.cond);
|
|
if (cond.val_int != 0) {
|
|
if (cond.val_int != 0) {
|
|
f->curr_block = instr->If.true_block;
|
|
f->curr_block = instr->If.true_block;
|
|
@@ -469,7 +700,79 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_Conv: {
|
|
case ssaInstr_Conv: {
|
|
|
|
+ // TODO(bill): Assuming little endian
|
|
|
|
+ vmValue dst = {};
|
|
|
|
+ vmValue src = vm_operand_value(vm, instr->Conv.value);
|
|
|
|
+ i64 from_size = vm_type_size_of(vm, instr->Conv.from);
|
|
|
|
+ i64 to_size = vm_type_size_of(vm, instr->Conv.to);
|
|
|
|
+ switch (instr->Conv.kind) {
|
|
|
|
+ case ssaConv_trunc:
|
|
|
|
+ gb_memcopy(&dst, &src, to_size);
|
|
|
|
+ break;
|
|
|
|
+ case ssaConv_zext:
|
|
|
|
+ gb_memcopy(&dst, &src, from_size);
|
|
|
|
+ break;
|
|
|
|
+ case ssaConv_fptrunc: {
|
|
|
|
+ GB_ASSERT(from_size > to_size);
|
|
|
|
+ GB_ASSERT(base_type(instr->Conv.from) == t_f64);
|
|
|
|
+ GB_ASSERT(base_type(instr->Conv.to) == t_f32);
|
|
|
|
+ dst.val_f32 = cast(f32)src.val_f64;
|
|
|
|
+ } break;
|
|
|
|
+ case ssaConv_fpext: {
|
|
|
|
+ GB_ASSERT(from_size < to_size);
|
|
|
|
+ GB_ASSERT(base_type(instr->Conv.from) == t_f32);
|
|
|
|
+ GB_ASSERT(base_type(instr->Conv.to) == t_f64);
|
|
|
|
+ dst.val_f64 = cast(f64)src.val_f32;
|
|
|
|
+ } break;
|
|
|
|
+ case ssaConv_fptoui: {
|
|
|
|
+ Type *from = base_type(instr->Conv.from);
|
|
|
|
+ if (from == t_f64) {
|
|
|
|
+ u64 u = cast(u64)src.val_f64;
|
|
|
|
+ gb_memcopy(&dst, &u, to_size);
|
|
|
|
+ } else {
|
|
|
|
+ u64 u = cast(u64)src.val_f32;
|
|
|
|
+ gb_memcopy(&dst, &u, to_size);
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+ case ssaConv_fptosi: {
|
|
|
|
+ Type *from = base_type(instr->Conv.from);
|
|
|
|
+ if (from == t_f64) {
|
|
|
|
+ i64 i = cast(i64)src.val_f64;
|
|
|
|
+ gb_memcopy(&dst, &i, to_size);
|
|
|
|
+ } else {
|
|
|
|
+ i64 i = cast(i64)src.val_f32;
|
|
|
|
+ gb_memcopy(&dst, &i, to_size);
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+ case ssaConv_uitofp: {
|
|
|
|
+ Type *to = base_type(instr->Conv.to);
|
|
|
|
+ if (to == t_f64) {
|
|
|
|
+ dst.val_f64 = cast(f64)cast(u64)src.val_int;
|
|
|
|
+ } else {
|
|
|
|
+ dst.val_f32 = cast(f32)cast(u64)src.val_int;
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+ case ssaConv_sitofp: {
|
|
|
|
+ Type *to = base_type(instr->Conv.to);
|
|
|
|
+ if (to == t_f64) {
|
|
|
|
+ dst.val_f64 = cast(f64)cast(i64)src.val_int;
|
|
|
|
+ } else {
|
|
|
|
+ dst.val_f32 = cast(f32)cast(i64)src.val_int;
|
|
|
|
+ }
|
|
|
|
+ } break;
|
|
|
|
+
|
|
|
|
+ case ssaConv_ptrtoint:
|
|
|
|
+ dst.val_int = cast(i64)src.val_ptr;
|
|
|
|
+ break;
|
|
|
|
+ case ssaConv_inttoptr:
|
|
|
|
+ dst.val_ptr = cast(void *)src.val_int;
|
|
|
|
+ break;
|
|
|
|
+ case ssaConv_bitcast:
|
|
|
|
+ dst = src;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ vm_set_value(f, value, dst);
|
|
} break;
|
|
} break;
|
|
|
|
|
|
case ssaInstr_Unreachable: {
|
|
case ssaInstr_Unreachable: {
|
|
@@ -493,15 +796,15 @@ void vm_exec_instr(VirtualMachine *vm, ssaValue *value) {
|
|
|
|
|
|
if (is_type_integer(t)) {
|
|
if (is_type_integer(t)) {
|
|
switch (bo->op) {
|
|
switch (bo->op) {
|
|
- case Token_Add: v.val_int = l.val_int + r.val_int; break;
|
|
|
|
- case Token_Sub: v.val_int = l.val_int - r.val_int; break;
|
|
|
|
- case Token_And: v.val_int = l.val_int & r.val_int; break;
|
|
|
|
- case Token_Or: v.val_int = l.val_int | r.val_int; break;
|
|
|
|
- case Token_Xor: v.val_int = l.val_int ^ r.val_int; break;
|
|
|
|
|
|
+ case Token_Add: v.val_int = l.val_int + r.val_int; break;
|
|
|
|
+ case Token_Sub: v.val_int = l.val_int - r.val_int; break;
|
|
|
|
+ case Token_And: v.val_int = l.val_int & r.val_int; break;
|
|
|
|
+ case Token_Or: v.val_int = l.val_int | r.val_int; break;
|
|
|
|
+ case Token_Xor: v.val_int = l.val_int ^ r.val_int; break;
|
|
case Token_Shl: v.val_int = l.val_int << r.val_int; break;
|
|
case Token_Shl: v.val_int = l.val_int << r.val_int; break;
|
|
case Token_Shr: v.val_int = l.val_int >> r.val_int; break;
|
|
case Token_Shr: v.val_int = l.val_int >> r.val_int; break;
|
|
- case Token_Mul: v.val_int = l.val_int * r.val_int; break;
|
|
|
|
- case Token_Not: v.val_int = l.val_int ^ r.val_int; break;
|
|
|
|
|
|
+ case Token_Mul: v.val_int = l.val_int * r.val_int; break;
|
|
|
|
+ case Token_Not: v.val_int = l.val_int ^ r.val_int; break;
|
|
|
|
|
|
case Token_AndNot: v.val_int = l.val_int & (~r.val_int); break;
|
|
case Token_AndNot: v.val_int = l.val_int & (~r.val_int); break;
|
|
|
|
|