|
@@ -137,6 +137,7 @@ static const int GC_SIZES[GC_PARTITIONS] = {4,8,12,16,20, 8,64,1<<14,1<<22};
|
|
#define GC_PROFILE 1
|
|
#define GC_PROFILE 1
|
|
#define GC_DUMP_MEM 2
|
|
#define GC_DUMP_MEM 2
|
|
#define GC_TRACK 4
|
|
#define GC_TRACK 4
|
|
|
|
+#define GC_NO_THREADS 8
|
|
|
|
|
|
static int gc_flags = 0;
|
|
static int gc_flags = 0;
|
|
static gc_pheader *gc_pages[GC_ALL_PAGES] = {NULL};
|
|
static gc_pheader *gc_pages[GC_ALL_PAGES] = {NULL};
|
|
@@ -146,6 +147,15 @@ static gc_pheader *gc_level1_null[1<<GC_LEVEL1_BITS] = {NULL};
|
|
static gc_pheader **hl_gc_page_map[1<<GC_LEVEL0_BITS] = {NULL};
|
|
static gc_pheader **hl_gc_page_map[1<<GC_LEVEL0_BITS] = {NULL};
|
|
static void (*gc_track_callback)(hl_type *,int,int,void*) = NULL;
|
|
static void (*gc_track_callback)(hl_type *,int,int,void*) = NULL;
|
|
|
|
|
|
|
|
+static struct {
|
|
|
|
+ int count;
|
|
|
|
+ bool stopping_world;
|
|
|
|
+ hl_thread_info **threads;
|
|
|
|
+ hl_mutex *global_lock;
|
|
|
|
+} gc_threads;
|
|
|
|
+
|
|
|
|
+HL_THREAD_STATIC_VAR hl_thread_info *current_thread;
|
|
|
|
+
|
|
static struct {
|
|
static struct {
|
|
int64 total_requested;
|
|
int64 total_requested;
|
|
int64 total_allocated;
|
|
int64 total_allocated;
|
|
@@ -180,11 +190,40 @@ static void ***gc_roots = NULL;
|
|
static int gc_roots_count = 0;
|
|
static int gc_roots_count = 0;
|
|
static int gc_roots_max = 0;
|
|
static int gc_roots_max = 0;
|
|
|
|
|
|
|
|
+HL_API hl_thread_info *hl_get_thread() {
|
|
|
|
+ return current_thread;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gc_save_context(hl_thread_info *t ) {
|
|
|
|
+ setjmp(t->gc_regs);
|
|
|
|
+ t->stack_cur = &t;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifndef HL_THREADS
|
|
|
|
+# define gc_global_lock(_)
|
|
|
|
+#else
|
|
|
|
+static void gc_global_lock( bool lock ) {
|
|
|
|
+ hl_thread_info *t = current_thread;
|
|
|
|
+ bool mt = (gc_flags & GC_NO_THREADS) == 0;
|
|
|
|
+ if( lock ) {
|
|
|
|
+ if( t->gc_blocking )
|
|
|
|
+ hl_fatal("Can't lock GC in hl_blocking section");
|
|
|
|
+ if( mt ) gc_save_context(t);
|
|
|
|
+ t->gc_blocking++;
|
|
|
|
+ if( mt ) hl_mutex_acquire(gc_threads.global_lock);
|
|
|
|
+ } else {
|
|
|
|
+ t->gc_blocking--;
|
|
|
|
+ if( mt ) hl_mutex_release(gc_threads.global_lock);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
HL_PRIM void hl_gc_set_track( void *f ) {
|
|
HL_PRIM void hl_gc_set_track( void *f ) {
|
|
gc_track_callback = f;
|
|
gc_track_callback = f;
|
|
}
|
|
}
|
|
|
|
|
|
HL_PRIM void hl_add_root( void *r ) {
|
|
HL_PRIM void hl_add_root( void *r ) {
|
|
|
|
+ gc_global_lock(true);
|
|
if( gc_roots_count == gc_roots_max ) {
|
|
if( gc_roots_count == gc_roots_max ) {
|
|
int nroots = gc_roots_max ? (gc_roots_max << 1) : 16;
|
|
int nroots = gc_roots_max ? (gc_roots_max << 1) : 16;
|
|
void ***roots = (void***)malloc(sizeof(void*)*nroots);
|
|
void ***roots = (void***)malloc(sizeof(void*)*nroots);
|
|
@@ -194,20 +233,19 @@ HL_PRIM void hl_add_root( void *r ) {
|
|
gc_roots_max = nroots;
|
|
gc_roots_max = nroots;
|
|
}
|
|
}
|
|
gc_roots[gc_roots_count++] = (void**)r;
|
|
gc_roots[gc_roots_count++] = (void**)r;
|
|
-}
|
|
|
|
-
|
|
|
|
-HL_PRIM void hl_pop_root() {
|
|
|
|
- gc_roots_count--;
|
|
|
|
|
|
+ gc_global_lock(false);
|
|
}
|
|
}
|
|
|
|
|
|
HL_PRIM void hl_remove_root( void *v ) {
|
|
HL_PRIM void hl_remove_root( void *v ) {
|
|
int i;
|
|
int i;
|
|
- for(i=0;i<gc_roots_count;i++)
|
|
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ for(i=gc_roots_count-1;i>=0;i--)
|
|
if( gc_roots[i] == (void**)v ) {
|
|
if( gc_roots[i] == (void**)v ) {
|
|
gc_roots_count--;
|
|
gc_roots_count--;
|
|
memmove(gc_roots + i, gc_roots + (i+1), (gc_roots_count - i) * sizeof(void*));
|
|
memmove(gc_roots + i, gc_roots + (i+1), (gc_roots_count - i) * sizeof(void*));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ gc_global_lock(false);
|
|
}
|
|
}
|
|
|
|
|
|
HL_PRIM gc_pheader *hl_gc_get_page( void *v ) {
|
|
HL_PRIM gc_pheader *hl_gc_get_page( void *v ) {
|
|
@@ -219,6 +257,72 @@ HL_PRIM gc_pheader *hl_gc_get_page( void *v ) {
|
|
return page;
|
|
return page;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+// ------------------------- THREADS ----------------------------------------------------------
|
|
|
|
+
|
|
|
|
+HL_API int hl_thread_id();
|
|
|
|
+
|
|
|
|
+HL_API void hl_register_thread( void *stack_top ) {
|
|
|
|
+ if( hl_get_thread() )
|
|
|
|
+ hl_fatal("Thread already registered");
|
|
|
|
+
|
|
|
|
+ hl_thread_info *t = (hl_thread_info*)malloc(sizeof(hl_thread_info));
|
|
|
|
+ memset(t, 0, sizeof(hl_thread_info));
|
|
|
|
+ t->thread_id = hl_thread_id();
|
|
|
|
+ t->stack_top = stack_top;
|
|
|
|
+ current_thread = t;
|
|
|
|
+ hl_add_root(&t->exc_value);
|
|
|
|
+ hl_add_root(&t->exc_handler);
|
|
|
|
+
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ hl_thread_info **all = (hl_thread_info**)malloc(sizeof(void*) * (gc_threads.count + 1));
|
|
|
|
+ memcpy(all,gc_threads.threads,sizeof(void*)*gc_threads.count);
|
|
|
|
+ gc_threads.threads = all;
|
|
|
|
+ all[gc_threads.count++] = t;
|
|
|
|
+ gc_global_lock(false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+HL_API void hl_unregister_thread() {
|
|
|
|
+ int i;
|
|
|
|
+ hl_thread_info *t = hl_get_thread();
|
|
|
|
+ if( !t )
|
|
|
|
+ hl_fatal("Thread not registered");
|
|
|
|
+ hl_remove_root(&t->exc_value);
|
|
|
|
+ hl_remove_root(&t->exc_handler);
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ for(i=0;i<gc_threads.count;i++)
|
|
|
|
+ if( gc_threads.threads[i] == t ) {
|
|
|
|
+ memmove(gc_threads.threads + i, gc_threads.threads + i + 1, sizeof(void*) * (gc_threads.count - i - 1));
|
|
|
|
+ gc_threads.count--;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ free(t);
|
|
|
|
+ current_thread = NULL;
|
|
|
|
+ // don't use gc_global_lock(false)
|
|
|
|
+ hl_mutex_release(gc_threads.global_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+HL_API void *hl_gc_threads_info() {
|
|
|
|
+ return &gc_threads;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gc_stop_world( bool b ) {
|
|
|
|
+# ifdef HL_THREADS
|
|
|
|
+ if( b ) {
|
|
|
|
+ int i;
|
|
|
|
+ gc_threads.stopping_world = true;
|
|
|
|
+ for(i=0;i<gc_threads.count;i++) {
|
|
|
|
+ hl_thread_info *t = gc_threads.threads[i];
|
|
|
|
+ while( t->gc_blocking == 0 ) {}; // spinwait
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ // releasing global lock will release all threads
|
|
|
|
+ gc_threads.stopping_world = false;
|
|
|
|
+ }
|
|
|
|
+# else
|
|
|
|
+ if( b ) gc_save_context(current_thread);
|
|
|
|
+# endif
|
|
|
|
+}
|
|
|
|
+
|
|
// ------------------------- ALLOCATOR ----------------------------------------------------------
|
|
// ------------------------- ALLOCATOR ----------------------------------------------------------
|
|
|
|
|
|
static void *gc_alloc_page_memory( int size );
|
|
static void *gc_alloc_page_memory( int size );
|
|
@@ -271,6 +375,7 @@ static int PAGE_ID = 0;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
HL_API void hl_gc_dump_memory( const char *filename );
|
|
HL_API void hl_gc_dump_memory( const char *filename );
|
|
|
|
+static void gc_major( void );
|
|
|
|
|
|
static gc_pheader *gc_alloc_new_page( int pid, int block, int size, int kind, bool varsize ) {
|
|
static gc_pheader *gc_alloc_new_page( int pid, int block, int size, int kind, bool varsize ) {
|
|
int m, i;
|
|
int m, i;
|
|
@@ -298,14 +403,16 @@ retry:
|
|
p = (gc_pheader*)base;
|
|
p = (gc_pheader*)base;
|
|
if( !base ) {
|
|
if( !base ) {
|
|
int pages = gc_stats.pages_allocated;
|
|
int pages = gc_stats.pages_allocated;
|
|
- hl_gc_major();
|
|
|
|
|
|
+ gc_major();
|
|
if( pages != gc_stats.pages_allocated ) {
|
|
if( pages != gc_stats.pages_allocated ) {
|
|
size = old_size;
|
|
size = old_size;
|
|
goto retry;
|
|
goto retry;
|
|
}
|
|
}
|
|
// big block : report stack trace - we should manage to handle it
|
|
// big block : report stack trace - we should manage to handle it
|
|
- if( size >= (8 << 20) )
|
|
|
|
|
|
+ if( size >= (8 << 20) ) {
|
|
|
|
+ gc_global_lock(false);
|
|
hl_error_msg(USTR("Failed to alloc %d KB"),size>>10);
|
|
hl_error_msg(USTR("Failed to alloc %d KB"),size>>10);
|
|
|
|
+ }
|
|
if( gc_flags & GC_DUMP_MEM ) hl_gc_dump_memory("hlmemory.dump");
|
|
if( gc_flags & GC_DUMP_MEM ) hl_gc_dump_memory("hlmemory.dump");
|
|
out_of_memory("pages");
|
|
out_of_memory("pages");
|
|
}
|
|
}
|
|
@@ -567,6 +674,7 @@ static void *gc_alloc_gen( int size, int flags, int *allocated ) {
|
|
return ptr;
|
|
return ptr;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ gc_global_lock(false);
|
|
hl_error("Required memory allocation too big");
|
|
hl_error("Required memory allocation too big");
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
@@ -577,6 +685,7 @@ void *hl_gc_alloc_gen( hl_type *t, int size, int flags ) {
|
|
void *ptr;
|
|
void *ptr;
|
|
int time = 0;
|
|
int time = 0;
|
|
int allocated = 0;
|
|
int allocated = 0;
|
|
|
|
+ gc_global_lock(true);
|
|
gc_check_mark();
|
|
gc_check_mark();
|
|
# ifdef GC_MEMCHK
|
|
# ifdef GC_MEMCHK
|
|
size += HL_WSIZE;
|
|
size += HL_WSIZE;
|
|
@@ -591,6 +700,7 @@ void *hl_gc_alloc_gen( hl_type *t, int size, int flags ) {
|
|
MZERO(ptr,allocated);
|
|
MZERO(ptr,allocated);
|
|
else if( MEM_HAS_PTR(flags) && allocated != size )
|
|
else if( MEM_HAS_PTR(flags) && allocated != size )
|
|
MZERO((char*)ptr+size,allocated-size); // erase possible pointers after data
|
|
MZERO((char*)ptr+size,allocated-size); // erase possible pointers after data
|
|
|
|
+ gc_global_lock(false);
|
|
if( (gc_flags & GC_TRACK) && gc_track_callback )
|
|
if( (gc_flags & GC_TRACK) && gc_track_callback )
|
|
((void (*)(hl_type *,int,int,void*))gc_track_callback)(t,size,flags,ptr);
|
|
((void (*)(hl_type *,int,int,void*))gc_track_callback)(t,size,flags,ptr);
|
|
# ifdef GC_MEMCHK
|
|
# ifdef GC_MEMCHK
|
|
@@ -602,7 +712,6 @@ void *hl_gc_alloc_gen( hl_type *t, int size, int flags ) {
|
|
// ------------------------- MARKING ----------------------------------------------------------
|
|
// ------------------------- MARKING ----------------------------------------------------------
|
|
|
|
|
|
static float gc_mark_threshold = 0.2f;
|
|
static float gc_mark_threshold = 0.2f;
|
|
-static void *gc_stack_top = NULL;
|
|
|
|
static int mark_size = 0;
|
|
static int mark_size = 0;
|
|
static unsigned char *mark_data = NULL;
|
|
static unsigned char *mark_data = NULL;
|
|
static void **cur_mark_stack = NULL;
|
|
static void **cur_mark_stack = NULL;
|
|
@@ -766,16 +875,35 @@ static void gc_call_finalizers(){
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void gc_mark_stack( void *start, void *end ) {
|
|
|
|
+ void **mark_stack = cur_mark_stack;
|
|
|
|
+ void **stack_head = (void**)start;
|
|
|
|
+ while( stack_head < (void**)end ) {
|
|
|
|
+ void *p = *stack_head++;
|
|
|
|
+ gc_pheader *page = GC_GET_PAGE(p);
|
|
|
|
+ int bid;
|
|
|
|
+ if( !page || (((unsigned char*)p - (unsigned char*)page)%page->block_size) != 0 ) continue;
|
|
|
|
+# ifdef HL_64
|
|
|
|
+ if( !INPAGE(p,page) ) continue;
|
|
|
|
+# endif
|
|
|
|
+ bid = (int)((unsigned char*)p - (unsigned char*)page) / page->block_size;
|
|
|
|
+ if( page->sizes ) {
|
|
|
|
+ if( page->sizes[bid] == 0 ) continue;
|
|
|
|
+ } else if( bid < page->first_block )
|
|
|
|
+ continue;
|
|
|
|
+ if( (page->bmp[bid>>3] & (1<<(bid&7))) == 0 ) {
|
|
|
|
+ page->bmp[bid>>3] |= 1<<(bid&7);
|
|
|
|
+ GC_PUSH_GEN(p,page,bid);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ cur_mark_stack = mark_stack;
|
|
|
|
+}
|
|
|
|
+
|
|
static void gc_mark() {
|
|
static void gc_mark() {
|
|
- jmp_buf regs;
|
|
|
|
- void **stack_head;
|
|
|
|
- void **stack_top = (void**)gc_stack_top;
|
|
|
|
void **mark_stack = cur_mark_stack;
|
|
void **mark_stack = cur_mark_stack;
|
|
int mark_bytes = gc_stats.mark_bytes;
|
|
int mark_bytes = gc_stats.mark_bytes;
|
|
int pid, i;
|
|
int pid, i;
|
|
unsigned char *mark_cur;
|
|
unsigned char *mark_cur;
|
|
- // save registers
|
|
|
|
- setjmp(regs);
|
|
|
|
// prepare mark bits
|
|
// prepare mark bits
|
|
if( mark_bytes > mark_size ) {
|
|
if( mark_bytes > mark_size ) {
|
|
gc_free_page_memory(mark_data, mark_size);
|
|
gc_free_page_memory(mark_data, mark_size);
|
|
@@ -814,27 +942,16 @@ static void gc_mark() {
|
|
GC_PUSH_GEN(p,page,bid);
|
|
GC_PUSH_GEN(p,page,bid);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- // scan stack
|
|
|
|
- stack_head = (void**)&stack_head;
|
|
|
|
- if( stack_head > (void**)®s ) stack_head = (void**)®s; // fix for compilers that might inverse variables
|
|
|
|
- while( stack_head <= stack_top ) {
|
|
|
|
- void *p = *stack_head++;
|
|
|
|
- gc_pheader *page = GC_GET_PAGE(p);
|
|
|
|
- int bid;
|
|
|
|
- if( !page || (((unsigned char*)p - (unsigned char*)page)%page->block_size) != 0 ) continue;
|
|
|
|
-# ifdef HL_64
|
|
|
|
- if( !INPAGE(p,page) ) continue;
|
|
|
|
-# endif
|
|
|
|
- bid = (int)((unsigned char*)p - (unsigned char*)page) / page->block_size;
|
|
|
|
- if( page->sizes ) {
|
|
|
|
- if( page->sizes[bid] == 0 ) continue;
|
|
|
|
- } else if( bid < page->first_block )
|
|
|
|
- continue;
|
|
|
|
- if( (page->bmp[bid>>3] & (1<<(bid&7))) == 0 ) {
|
|
|
|
- page->bmp[bid>>3] |= 1<<(bid&7);
|
|
|
|
- GC_PUSH_GEN(p,page,bid);
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ // scan threads stacks & registers
|
|
|
|
+ for(i=0;i<gc_threads.count;i++) {
|
|
|
|
+ hl_thread_info *t = gc_threads.threads[i];
|
|
|
|
+ cur_mark_stack = mark_stack;
|
|
|
|
+ gc_mark_stack(t->stack_cur,t->stack_top);
|
|
|
|
+ gc_mark_stack(&t->gc_regs,(void**)&t->gc_regs + (sizeof(jmp_buf) / sizeof(void*) - 1));
|
|
|
|
+ mark_stack = cur_mark_stack;
|
|
}
|
|
}
|
|
|
|
+
|
|
cur_mark_stack = mark_stack;
|
|
cur_mark_stack = mark_stack;
|
|
if( mark_stack ) gc_flush_mark();
|
|
if( mark_stack ) gc_flush_mark();
|
|
gc_call_finalizers();
|
|
gc_call_finalizers();
|
|
@@ -844,11 +961,13 @@ static void gc_mark() {
|
|
gc_flush_empty_pages();
|
|
gc_flush_empty_pages();
|
|
}
|
|
}
|
|
|
|
|
|
-HL_API void hl_gc_major() {
|
|
|
|
|
|
+static void gc_major() {
|
|
int time = TIMESTAMP(), dt;
|
|
int time = TIMESTAMP(), dt;
|
|
gc_stats.last_mark = gc_stats.total_allocated;
|
|
gc_stats.last_mark = gc_stats.total_allocated;
|
|
gc_stats.last_mark_allocs = gc_stats.allocation_count;
|
|
gc_stats.last_mark_allocs = gc_stats.allocation_count;
|
|
|
|
+ gc_stop_world(true);
|
|
gc_mark();
|
|
gc_mark();
|
|
|
|
+ gc_stop_world(false);
|
|
dt = TIMESTAMP() - time;
|
|
dt = TIMESTAMP() - time;
|
|
gc_stats.mark_count++;
|
|
gc_stats.mark_count++;
|
|
gc_stats.mark_time += dt;
|
|
gc_stats.mark_time += dt;
|
|
@@ -868,6 +987,12 @@ HL_API void hl_gc_major() {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+HL_API void hl_gc_major() {
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ gc_major();
|
|
|
|
+ gc_global_lock(false);
|
|
|
|
+}
|
|
|
|
+
|
|
HL_API bool hl_is_gc_ptr( void *ptr ) {
|
|
HL_API bool hl_is_gc_ptr( void *ptr ) {
|
|
gc_pheader *page = GC_GET_PAGE(ptr);
|
|
gc_pheader *page = GC_GET_PAGE(ptr);
|
|
int bid;
|
|
int bid;
|
|
@@ -887,12 +1012,11 @@ static void gc_check_mark() {
|
|
int64 m = gc_stats.total_allocated - gc_stats.last_mark;
|
|
int64 m = gc_stats.total_allocated - gc_stats.last_mark;
|
|
int64 b = gc_stats.allocation_count - gc_stats.last_mark_allocs;
|
|
int64 b = gc_stats.allocation_count - gc_stats.last_mark_allocs;
|
|
if( (m > gc_stats.pages_total_memory * gc_mark_threshold || b > gc_stats.pages_blocks * gc_mark_threshold) && gc_is_active )
|
|
if( (m > gc_stats.pages_total_memory * gc_mark_threshold || b > gc_stats.pages_blocks * gc_mark_threshold) && gc_is_active )
|
|
- hl_gc_major();
|
|
|
|
|
|
+ gc_major();
|
|
}
|
|
}
|
|
|
|
|
|
-static void hl_gc_init( void *stack_top ) {
|
|
|
|
|
|
+static void hl_gc_init() {
|
|
int i;
|
|
int i;
|
|
- gc_stack_top = stack_top;
|
|
|
|
for(i=0;i<1<<GC_LEVEL0_BITS;i++)
|
|
for(i=0;i<1<<GC_LEVEL0_BITS;i++)
|
|
hl_gc_page_map[i] = gc_level1_null;
|
|
hl_gc_page_map[i] = gc_level1_null;
|
|
if( TRAILING_ONES(0x080003FF) != 10 || TRAILING_ONES(0) != 0 || TRAILING_ONES(0xFFFFFFFF) != 32 )
|
|
if( TRAILING_ONES(0x080003FF) != 10 || TRAILING_ONES(0) != 0 || TRAILING_ONES(0xFFFFFFFF) != 32 )
|
|
@@ -905,25 +1029,50 @@ static void hl_gc_init( void *stack_top ) {
|
|
if( getenv("HL_DUMP_MEMORY") )
|
|
if( getenv("HL_DUMP_MEMORY") )
|
|
gc_flags |= GC_DUMP_MEM;
|
|
gc_flags |= GC_DUMP_MEM;
|
|
# endif
|
|
# endif
|
|
|
|
+ memset(&gc_threads,0,sizeof(gc_threads));
|
|
|
|
+ gc_threads.global_lock = hl_mutex_alloc();
|
|
}
|
|
}
|
|
|
|
|
|
// ---- UTILITIES ----------------------
|
|
// ---- UTILITIES ----------------------
|
|
|
|
|
|
-static bool is_blocking = false; // TODO : use TLS for multithread
|
|
|
|
-
|
|
|
|
HL_API bool hl_is_blocking() {
|
|
HL_API bool hl_is_blocking() {
|
|
- return is_blocking;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-HL_API void hl_blocking( bool b) {
|
|
|
|
- is_blocking = b;
|
|
|
|
|
|
+ hl_thread_info *t = current_thread;
|
|
|
|
+ // when called from a non GC thread, tells if the main thread is blocking
|
|
|
|
+ if( t == NULL ) {
|
|
|
|
+ if( gc_threads.count == 0 )
|
|
|
|
+ return false;
|
|
|
|
+ t = gc_threads.threads[0];
|
|
|
|
+ }
|
|
|
|
+ return t->gc_blocking > 0;
|
|
}
|
|
}
|
|
|
|
|
|
-void hl_global_init( void *stack_top ) {
|
|
|
|
- hl_gc_init(stack_top);
|
|
|
|
|
|
+HL_API void hl_blocking( bool b ) {
|
|
|
|
+ hl_thread_info *t = current_thread;
|
|
|
|
+ if( !t ) hl_error("Unregistered thread");
|
|
|
|
+ if( b ) {
|
|
|
|
+# ifdef HL_THREADS
|
|
|
|
+ if( t->gc_blocking == 0 )
|
|
|
|
+ gc_save_context(t);
|
|
|
|
+# endif
|
|
|
|
+ t->gc_blocking++;
|
|
|
|
+ } else if( t->gc_blocking == 0 )
|
|
|
|
+ hl_error("Unblocked thread");
|
|
|
|
+ else {
|
|
|
|
+ t->gc_blocking--;
|
|
|
|
+ if( t->gc_blocking == 0 && gc_threads.stopping_world ) {
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ gc_global_lock(false);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
void hl_cache_free();
|
|
void hl_cache_free();
|
|
|
|
+void hl_cache_init();
|
|
|
|
+
|
|
|
|
+void hl_global_init() {
|
|
|
|
+ hl_gc_init();
|
|
|
|
+ hl_cache_init();
|
|
|
|
+}
|
|
|
|
|
|
void hl_global_free() {
|
|
void hl_global_free() {
|
|
hl_cache_free();
|
|
hl_cache_free();
|
|
@@ -1069,11 +1218,11 @@ vdynamic *hl_alloc_dynamic( hl_type *t ) {
|
|
# define DYN_PAD
|
|
# define DYN_PAD
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static vdynamic vdyn_true = { &hlt_bool, DYN_PAD {true} };
|
|
|
|
-static vdynamic vdyn_false = { &hlt_bool, DYN_PAD {false} };
|
|
|
|
|
|
+static const vdynamic vdyn_true = { &hlt_bool, DYN_PAD {true} };
|
|
|
|
+static const vdynamic vdyn_false = { &hlt_bool, DYN_PAD {false} };
|
|
|
|
|
|
vdynamic *hl_alloc_dynbool( bool b ) {
|
|
vdynamic *hl_alloc_dynbool( bool b ) {
|
|
- return b ? &vdyn_true : &vdyn_false;
|
|
|
|
|
|
+ return (vdynamic*)(b ? &vdyn_true : &vdyn_false);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1157,6 +1306,8 @@ HL_API void hl_gc_set_dump_types( hl_types_dump tdump ) {
|
|
|
|
|
|
HL_API void hl_gc_dump_memory( const char *filename ) {
|
|
HL_API void hl_gc_dump_memory( const char *filename ) {
|
|
int i;
|
|
int i;
|
|
|
|
+ gc_global_lock(true);
|
|
|
|
+ gc_stop_world(true);
|
|
gc_mark();
|
|
gc_mark();
|
|
fdump = fopen(filename,"wb");
|
|
fdump = fopen(filename,"wb");
|
|
// header
|
|
// header
|
|
@@ -1187,13 +1338,13 @@ HL_API void hl_gc_dump_memory( const char *filename ) {
|
|
for(i=0;i<gc_roots_count;i++)
|
|
for(i=0;i<gc_roots_count;i++)
|
|
fdump_p(*gc_roots[i]);
|
|
fdump_p(*gc_roots[i]);
|
|
// stacks
|
|
// stacks
|
|
- fdump_i(1);
|
|
|
|
- fdump_p(gc_stack_top);
|
|
|
|
- {
|
|
|
|
- void **stack_head = (void**)&stack_head;
|
|
|
|
- int size = (int)((void**)gc_stack_top - stack_head);
|
|
|
|
|
|
+ fdump_i(gc_threads.count);
|
|
|
|
+ for(i=0;i<gc_threads.count;i++) {
|
|
|
|
+ hl_thread_info *t = gc_threads.threads[i];
|
|
|
|
+ fdump_p(t->stack_top);
|
|
|
|
+ int size = (int)((void**)t->stack_top - (void**)t->stack_cur);
|
|
fdump_i(size);
|
|
fdump_i(size);
|
|
- fdump_d(stack_head,size*sizeof(void*));
|
|
|
|
|
|
+ fdump_d(t->stack_cur,size*sizeof(void*));
|
|
}
|
|
}
|
|
// types
|
|
// types
|
|
# define fdump_t(t) fdump_i(t.kind); fdump_p(&t);
|
|
# define fdump_t(t) fdump_i(t.kind); fdump_p(&t);
|
|
@@ -1209,6 +1360,8 @@ HL_API void hl_gc_dump_memory( const char *filename ) {
|
|
if( gc_types_dump ) gc_types_dump(fdump_d);
|
|
if( gc_types_dump ) gc_types_dump(fdump_d);
|
|
fclose(fdump);
|
|
fclose(fdump);
|
|
fdump = NULL;
|
|
fdump = NULL;
|
|
|
|
+ gc_stop_world(false);
|
|
|
|
+ gc_global_lock(false);
|
|
}
|
|
}
|
|
|
|
|
|
HL_API vdynamic *hl_debug_call( int mode, vdynamic *v ) {
|
|
HL_API vdynamic *hl_debug_call( int mode, vdynamic *v ) {
|
|
@@ -1223,4 +1376,4 @@ DEFINE_PRIM(_VOID, gc_dump_memory, _BYTES);
|
|
DEFINE_PRIM(_I32, gc_get_flags, _NO_ARG);
|
|
DEFINE_PRIM(_I32, gc_get_flags, _NO_ARG);
|
|
DEFINE_PRIM(_VOID, gc_set_flags, _I32);
|
|
DEFINE_PRIM(_VOID, gc_set_flags, _I32);
|
|
DEFINE_PRIM(_DYN, debug_call, _I32 _DYN);
|
|
DEFINE_PRIM(_DYN, debug_call, _I32 _DYN);
|
|
-
|
|
|
|
|
|
+DEFINE_PRIM(_VOID, blocking, _BOOL);
|