|
@@ -64,9 +64,55 @@ static const int GC_SIZES[GC_PARTITIONS] = {4,8,12,16,20, 8,64,1<<13,0};
|
|
#define GC_ALIGN (1 << GC_ALIGN_BITS)
|
|
#define GC_ALIGN (1 << GC_ALIGN_BITS)
|
|
|
|
|
|
static gc_pheader *gc_pages[GC_ALL_PAGES] = {NULL};
|
|
static gc_pheader *gc_pages[GC_ALL_PAGES] = {NULL};
|
|
-static int gc_free_blocks[GC_ALL_PAGES] = {0};
|
|
|
|
static gc_pheader *gc_free_pages[GC_ALL_PAGES] = {NULL};
|
|
static gc_pheader *gc_free_pages[GC_ALL_PAGES] = {NULL};
|
|
|
|
|
|
|
|
+static gc_freelist *cached_fl[32] = {NULL};
|
|
|
|
+static int free_lists_size = 0;
|
|
|
|
+
|
|
|
|
+static gc_freelist *alloc_freelist( int size ) {
|
|
|
|
+ gc_freelist *fl = cached_fl[size];
|
|
|
|
+ if( !fl ) fl = cached_fl[size+1];
|
|
|
|
+ if( fl ) {
|
|
|
|
+ cached_fl[fl->size_bits] = fl->cached_next;
|
|
|
|
+ fl->count = 0;
|
|
|
|
+ fl->current = 0;
|
|
|
|
+ fl->cached_next = NULL;
|
|
|
|
+ return fl;
|
|
|
|
+ }
|
|
|
|
+ int bytes = sizeof(gc_freelist) + sizeof(gc_fl) * ((1<<size)-1);
|
|
|
|
+ free_lists_size += bytes;
|
|
|
|
+ fl = (gc_freelist*)malloc(bytes);
|
|
|
|
+ fl->count = 0;
|
|
|
|
+ fl->current = 0;
|
|
|
|
+ fl->size_bits = size;
|
|
|
|
+ fl->cached_next = NULL;
|
|
|
|
+ return fl;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void free_freelist( gc_freelist *fl ) {
|
|
|
|
+ fl->cached_next = cached_fl[fl->size_bits];
|
|
|
|
+ cached_fl[fl->size_bits] = fl;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#define GET_FL(fl,pos) ((&(fl)->first) + (pos))
|
|
|
|
+
|
|
|
|
+static void freelist_append( gc_freelist **flref, int pos, int count ) {
|
|
|
|
+ gc_freelist *fl = *flref;
|
|
|
|
+ if( fl->count == 1<<fl->size_bits ) {
|
|
|
|
+# ifdef GC_DEBUG
|
|
|
|
+ if( fl->current ) hl_fatal("assert");
|
|
|
|
+# endif
|
|
|
|
+ gc_freelist *fl2 = alloc_freelist(fl->size_bits + 1);
|
|
|
|
+ memcpy(GET_FL(fl2,0),GET_FL(fl,0),sizeof(int)*fl->count*2);
|
|
|
|
+ fl2->count = fl->count;
|
|
|
|
+ free_freelist(fl);
|
|
|
|
+ *flref = fl = fl2;
|
|
|
|
+ }
|
|
|
|
+ gc_fl *p = GET_FL(fl,fl->count++);
|
|
|
|
+ p->pos = pos;
|
|
|
|
+ p->count = count;
|
|
|
|
+}
|
|
|
|
|
|
static gc_pheader *gc_allocator_new_page( int pid, int block, int size, int kind, bool varsize ) {
|
|
static gc_pheader *gc_allocator_new_page( int pid, int block, int size, int kind, bool varsize ) {
|
|
// increase size based on previously allocated pages
|
|
// increase size based on previously allocated pages
|
|
@@ -106,8 +152,9 @@ static gc_pheader *gc_allocator_new_page( int pid, int block, int size, int kind
|
|
int m = start_pos % block;
|
|
int m = start_pos % block;
|
|
if( m ) start_pos += block - m;
|
|
if( m ) start_pos += block - m;
|
|
p->first_block = start_pos / block;
|
|
p->first_block = start_pos / block;
|
|
- p->next_block = p->first_block;
|
|
|
|
- p->free_blocks = p->max_blocks - p->first_block;
|
|
|
|
|
|
+ p->free = alloc_freelist(8);
|
|
|
|
+ freelist_append(&p->free,p->first_block, p->max_blocks - p->first_block);
|
|
|
|
+ p->need_flush = false;
|
|
|
|
|
|
ph->next_page = gc_pages[pid];
|
|
ph->next_page = gc_pages[pid];
|
|
gc_pages[pid] = ph;
|
|
gc_pages[pid] = ph;
|
|
@@ -115,53 +162,109 @@ static gc_pheader *gc_allocator_new_page( int pid, int block, int size, int kind
|
|
return ph;
|
|
return ph;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static void flush_free_list( gc_pheader *ph ) {
|
|
|
|
+ gc_allocator_page_data *p = &ph->alloc;
|
|
|
|
+
|
|
|
|
+ int bid = p->first_block;
|
|
|
|
+ int last = p->max_blocks;
|
|
|
|
+ gc_freelist *new_fl = alloc_freelist(p->free->size_bits);
|
|
|
|
+ gc_freelist *old_fl = p->free;
|
|
|
|
+ gc_fl *cur_pos = NULL;
|
|
|
|
+ int reuse_index = old_fl->current;
|
|
|
|
+ gc_fl *reuse = reuse_index < old_fl->count ? GET_FL(old_fl,reuse_index++) : NULL;
|
|
|
|
+ int next_bid = reuse ? reuse->pos : -1;
|
|
|
|
+ unsigned char *bmp = ph->bmp;
|
|
|
|
+
|
|
|
|
+ while( bid < last ) {
|
|
|
|
+ if( bid == next_bid ) {
|
|
|
|
+ if( cur_pos && cur_pos->pos + cur_pos->count == bid ) {
|
|
|
|
+ cur_pos->count += reuse->count;
|
|
|
|
+ } else {
|
|
|
|
+ freelist_append(&new_fl,reuse->pos,reuse->count);
|
|
|
|
+ cur_pos = GET_FL(new_fl,new_fl->count - 1);
|
|
|
|
+ }
|
|
|
|
+ reuse = reuse_index < old_fl->count ? GET_FL(old_fl,reuse_index++) : NULL;
|
|
|
|
+ bid = cur_pos->count + cur_pos->pos;
|
|
|
|
+ next_bid = reuse ? reuse->pos : -1;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ int count;
|
|
|
|
+ if( p->sizes ) {
|
|
|
|
+ count = p->sizes[bid];
|
|
|
|
+ if( !count ) count = 1;
|
|
|
|
+ } else
|
|
|
|
+ count = 1;
|
|
|
|
+ if( (bmp[bid>>3] & (1<<(bid&7))) == 0 ) {
|
|
|
|
+ if( p->sizes ) p->sizes[bid] = 0;
|
|
|
|
+ if( cur_pos && cur_pos->pos + cur_pos->count == bid )
|
|
|
|
+ cur_pos->count += count;
|
|
|
|
+ else {
|
|
|
|
+ freelist_append(&new_fl,bid,count);
|
|
|
|
+ cur_pos = GET_FL(new_fl,new_fl->count - 1);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ bid += count;
|
|
|
|
+ }
|
|
|
|
+ free_freelist(old_fl);
|
|
|
|
+ p->free = new_fl;
|
|
|
|
+ p->need_flush = false;
|
|
|
|
+#ifdef __GC_DEBUG
|
|
|
|
+ if( reuse ) hl_fatal("assert");
|
|
|
|
+ int bid;
|
|
|
|
+ gc_freelist *fl = p->free;
|
|
|
|
+ for(bid=p->first_block;bid<p->max_blocks;bid++) {
|
|
|
|
+ while( fl && fl->pos + fl->count <= bid ) fl = fl->next;
|
|
|
|
+ bool is_free = fl && bid >= fl->pos;
|
|
|
|
+ bool is_marked = ((ph->bmp[bid>>3] & (1<<(bid&7))) != 0);
|
|
|
|
+ if( is_free == is_marked ) hl_fatal("assert");
|
|
|
|
+ if( p->sizes && !is_free )
|
|
|
|
+ bid += p->sizes[bid]-1;
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
static void *gc_alloc_fixed( int part, int kind ) {
|
|
static void *gc_alloc_fixed( int part, int kind ) {
|
|
int pid = (part << PAGE_KIND_BITS) | kind;
|
|
int pid = (part << PAGE_KIND_BITS) | kind;
|
|
gc_pheader *ph = gc_free_pages[pid];
|
|
gc_pheader *ph = gc_free_pages[pid];
|
|
- gc_allocator_page_data *p;
|
|
|
|
- unsigned char *ptr;
|
|
|
|
|
|
+ gc_allocator_page_data *p = NULL;
|
|
|
|
+ int bid = -1;
|
|
while( ph ) {
|
|
while( ph ) {
|
|
p = &ph->alloc;
|
|
p = &ph->alloc;
|
|
- if( ph->bmp ) {
|
|
|
|
- int next = p->next_block;
|
|
|
|
- while( true ) {
|
|
|
|
- unsigned int fetch_bits = ((unsigned int*)ph->bmp)[next >> 5];
|
|
|
|
- int ones = TRAILING_ONES(fetch_bits >> (next&31));
|
|
|
|
- next += ones;
|
|
|
|
- if( (next&31) == 0 && ones ) {
|
|
|
|
- if( next >= p->max_blocks ) {
|
|
|
|
- p->next_block = next;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- p->next_block = next;
|
|
|
|
- if( next >= p->max_blocks )
|
|
|
|
- break;
|
|
|
|
- goto alloc_fixed;
|
|
|
|
- }
|
|
|
|
- } else if( p->next_block < p->max_blocks )
|
|
|
|
|
|
+ if( p->need_flush )
|
|
|
|
+ flush_free_list(ph);
|
|
|
|
+ gc_freelist *fl = p->free;
|
|
|
|
+ if( fl->current < fl->count ) {
|
|
|
|
+ gc_fl *c = GET_FL(fl,fl->current);
|
|
|
|
+ bid = c->pos++;
|
|
|
|
+ c->count--;
|
|
|
|
+# ifdef GC_DEBUG
|
|
|
|
+ if( c->count < 0 ) hl_fatal("assert");
|
|
|
|
+# endif
|
|
|
|
+ if( !c->count ) fl->current++;
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
ph = ph->next_page;
|
|
ph = ph->next_page;
|
|
}
|
|
}
|
|
- if( ph == NULL )
|
|
|
|
|
|
+ if( ph == NULL ) {
|
|
ph = gc_allocator_new_page(pid, GC_SIZES[part], GC_PAGE_SIZE, kind, false);
|
|
ph = gc_allocator_new_page(pid, GC_SIZES[part], GC_PAGE_SIZE, kind, false);
|
|
-alloc_fixed:
|
|
|
|
- p = &ph->alloc;
|
|
|
|
- ptr = ph->base + p->next_block * p->block_size;
|
|
|
|
|
|
+ p = &ph->alloc;
|
|
|
|
+ bid = p->free->first.pos++;
|
|
|
|
+ p->free->first.count--;
|
|
|
|
+ }
|
|
|
|
+ unsigned char *ptr = ph->base + bid * p->block_size;
|
|
# ifdef GC_DEBUG
|
|
# ifdef GC_DEBUG
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
- if( p->next_block < p->first_block || p->next_block >= p->max_blocks )
|
|
|
|
|
|
+ if( bid < p->first_block || bid >= p->max_blocks )
|
|
hl_fatal("assert");
|
|
hl_fatal("assert");
|
|
- if( ph->bmp && (ph->bmp[p->next_block>>3]&(1<<(p->next_block&7))) != 0 )
|
|
|
|
|
|
+ if( ph->bmp && (ph->bmp[bid>>3]&(1<<(bid&7))) != 0 )
|
|
hl_fatal("Alloc on marked bit");
|
|
hl_fatal("Alloc on marked bit");
|
|
for(i=0;i<p->block_size;i++)
|
|
for(i=0;i<p->block_size;i++)
|
|
if( ptr[i] != 0xDD )
|
|
if( ptr[i] != 0xDD )
|
|
hl_fatal("assert");
|
|
hl_fatal("assert");
|
|
}
|
|
}
|
|
# endif
|
|
# endif
|
|
- p->next_block++;
|
|
|
|
gc_free_pages[pid] = ph;
|
|
gc_free_pages[pid] = ph;
|
|
return ptr;
|
|
return ptr;
|
|
}
|
|
}
|
|
@@ -169,88 +272,47 @@ alloc_fixed:
|
|
static void *gc_alloc_var( int part, int size, int kind ) {
|
|
static void *gc_alloc_var( int part, int size, int kind ) {
|
|
int pid = (part << PAGE_KIND_BITS) | kind;
|
|
int pid = (part << PAGE_KIND_BITS) | kind;
|
|
gc_pheader *ph = gc_free_pages[pid];
|
|
gc_pheader *ph = gc_free_pages[pid];
|
|
- gc_allocator_page_data *p;
|
|
|
|
|
|
+ gc_allocator_page_data *p = NULL;
|
|
unsigned char *ptr;
|
|
unsigned char *ptr;
|
|
int nblocks = size >> GC_SBITS[part];
|
|
int nblocks = size >> GC_SBITS[part];
|
|
- int max_free = gc_free_blocks[pid];
|
|
|
|
-loop:
|
|
|
|
|
|
+ int bid = -1;
|
|
while( ph ) {
|
|
while( ph ) {
|
|
p = &ph->alloc;
|
|
p = &ph->alloc;
|
|
- if( ph->bmp ) {
|
|
|
|
- int next, avail = 0;
|
|
|
|
- if( p->free_blocks >= nblocks ) {
|
|
|
|
- p->next_block = p->first_block;
|
|
|
|
- p->free_blocks = 0;
|
|
|
|
- }
|
|
|
|
- next = p->next_block;
|
|
|
|
- if( next + nblocks > p->max_blocks )
|
|
|
|
- goto skip;
|
|
|
|
- while( true ) {
|
|
|
|
- int fid = next >> 5;
|
|
|
|
- unsigned int fetch_bits = ((unsigned int*)ph->bmp)[fid];
|
|
|
|
- int bits;
|
|
|
|
-resume:
|
|
|
|
- bits = TRAILING_ONES(fetch_bits >> (next&31));
|
|
|
|
- if( bits ) {
|
|
|
|
- if( avail > p->free_blocks ) p->free_blocks = avail;
|
|
|
|
- avail = 0;
|
|
|
|
- next += bits - 1;
|
|
|
|
- if( next >= p->max_blocks ) {
|
|
|
|
- p->next_block = next;
|
|
|
|
- ph = ph->next_page;
|
|
|
|
- goto loop;
|
|
|
|
- }
|
|
|
|
- if( p->sizes[next] == 0 ) hl_fatal("assert");
|
|
|
|
- next += p->sizes[next];
|
|
|
|
- if( next + nblocks > p->max_blocks ) {
|
|
|
|
- p->next_block = next;
|
|
|
|
- ph = ph->next_page;
|
|
|
|
- goto loop;
|
|
|
|
- }
|
|
|
|
- if( (next>>5) != fid )
|
|
|
|
- continue;
|
|
|
|
- goto resume;
|
|
|
|
- }
|
|
|
|
- bits = TRAILING_ZEROES( (next & 31) ? (fetch_bits >> (next&31)) | (1<<(32-(next&31))) : fetch_bits );
|
|
|
|
- avail += bits;
|
|
|
|
- next += bits;
|
|
|
|
- if( next > p->max_blocks ) {
|
|
|
|
- avail -= next - p->max_blocks;
|
|
|
|
- next = p->max_blocks;
|
|
|
|
- if( avail < nblocks ) break;
|
|
|
|
- }
|
|
|
|
- if( avail >= nblocks ) {
|
|
|
|
- p->next_block = next - avail;
|
|
|
|
- goto alloc_var;
|
|
|
|
- }
|
|
|
|
- if( next & 31 ) goto resume;
|
|
|
|
|
|
+ if( p->need_flush )
|
|
|
|
+ flush_free_list(ph);
|
|
|
|
+ gc_freelist *fl = p->free;
|
|
|
|
+ int k;
|
|
|
|
+ for(k=fl->current;k<fl->count;k++) {
|
|
|
|
+ gc_fl *c = GET_FL(fl,k);
|
|
|
|
+ if( c->count >= nblocks ) {
|
|
|
|
+ bid = c->pos;
|
|
|
|
+ c->pos += nblocks;
|
|
|
|
+ c->count -= nblocks;
|
|
|
|
+# ifdef GC_DEBUG
|
|
|
|
+ if( c->count < 0 ) hl_fatal("assert");
|
|
|
|
+# endif
|
|
|
|
+ if( c->count == 0 ) fl->current++;
|
|
|
|
+ goto alloc_var;
|
|
}
|
|
}
|
|
- if( avail > p->free_blocks ) p->free_blocks = avail;
|
|
|
|
- p->next_block = next;
|
|
|
|
- } else if( p->next_block + nblocks <= p->max_blocks )
|
|
|
|
- break;
|
|
|
|
-skip:
|
|
|
|
- if( p->free_blocks > max_free )
|
|
|
|
- max_free = p->free_blocks;
|
|
|
|
- ph = ph->next_page;
|
|
|
|
- if( ph == NULL && max_free >= nblocks ) {
|
|
|
|
- max_free = 0;
|
|
|
|
- ph = gc_pages[pid];
|
|
|
|
}
|
|
}
|
|
|
|
+ ph = ph->next_page;
|
|
}
|
|
}
|
|
if( ph == NULL ) {
|
|
if( ph == NULL ) {
|
|
int psize = GC_PAGE_SIZE;
|
|
int psize = GC_PAGE_SIZE;
|
|
while( psize < size + 1024 )
|
|
while( psize < size + 1024 )
|
|
psize <<= 1;
|
|
psize <<= 1;
|
|
ph = gc_allocator_new_page(pid, GC_SIZES[part], psize, kind, true);
|
|
ph = gc_allocator_new_page(pid, GC_SIZES[part], psize, kind, true);
|
|
|
|
+ p = &ph->alloc;
|
|
|
|
+ bid = p->first_block;
|
|
|
|
+ p->free->first.pos += nblocks;
|
|
|
|
+ p->free->first.count -= nblocks;
|
|
}
|
|
}
|
|
alloc_var:
|
|
alloc_var:
|
|
- p = &ph->alloc;
|
|
|
|
- ptr = ph->base + p->next_block * p->block_size;
|
|
|
|
|
|
+ ptr = ph->base + bid * p->block_size;
|
|
# ifdef GC_DEBUG
|
|
# ifdef GC_DEBUG
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
- if( p->next_block < p->first_block || p->next_block + nblocks > p->max_blocks )
|
|
|
|
|
|
+ if( bid < p->first_block || bid + nblocks > p->max_blocks )
|
|
hl_fatal("assert");
|
|
hl_fatal("assert");
|
|
for(i=0;i<size;i++)
|
|
for(i=0;i<size;i++)
|
|
if( ptr[i] != 0xDD )
|
|
if( ptr[i] != 0xDD )
|
|
@@ -258,24 +320,18 @@ alloc_var:
|
|
}
|
|
}
|
|
# endif
|
|
# endif
|
|
if( ph->bmp ) {
|
|
if( ph->bmp ) {
|
|
- int bid = p->next_block;
|
|
|
|
# ifdef GC_DEBUG
|
|
# ifdef GC_DEBUG
|
|
int i;
|
|
int i;
|
|
for(i=0;i<nblocks;i++) {
|
|
for(i=0;i<nblocks;i++) {
|
|
- if( (ph->bmp[bid>>3]&(1<<(bid&7))) != 0 ) hl_fatal("Alloc on marked block");
|
|
|
|
- bid++;
|
|
|
|
|
|
+ int b = bid + i;
|
|
|
|
+ if( (ph->bmp[b>>3]&(1<<(b&7))) != 0 ) hl_fatal("Alloc on marked block");
|
|
}
|
|
}
|
|
- bid = p->next_block;
|
|
|
|
# endif
|
|
# endif
|
|
ph->bmp[bid>>3] |= 1<<(bid&7);
|
|
ph->bmp[bid>>3] |= 1<<(bid&7);
|
|
- } else {
|
|
|
|
- p->free_blocks = p->max_blocks - (p->next_block + nblocks);
|
|
|
|
}
|
|
}
|
|
- if( nblocks > 1 ) MZERO(p->sizes + p->next_block, nblocks);
|
|
|
|
- p->sizes[p->next_block] = (unsigned char)nblocks;
|
|
|
|
- p->next_block += nblocks;
|
|
|
|
|
|
+ if( nblocks > 1 ) MZERO(p->sizes + bid, nblocks);
|
|
|
|
+ p->sizes[bid] = (unsigned char)nblocks;
|
|
gc_free_pages[pid] = ph;
|
|
gc_free_pages[pid] = ph;
|
|
- gc_free_blocks[pid] = max_free;
|
|
|
|
return ptr;
|
|
return ptr;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -332,6 +388,7 @@ static void gc_flush_empty_pages() {
|
|
gc_pages[i] = next;
|
|
gc_pages[i] = next;
|
|
if( gc_free_pages[i] == ph )
|
|
if( gc_free_pages[i] == ph )
|
|
gc_free_pages[i] = next;
|
|
gc_free_pages[i] = next;
|
|
|
|
+ free_freelist(p->free);
|
|
gc_free_page(ph, p->max_blocks);
|
|
gc_free_page(ph, p->max_blocks);
|
|
} else
|
|
} else
|
|
prev = ph;
|
|
prev = ph;
|
|
@@ -404,11 +461,9 @@ static void gc_allocator_before_mark( unsigned char *mark_cur ) {
|
|
for(pid=0;pid<GC_ALL_PAGES;pid++) {
|
|
for(pid=0;pid<GC_ALL_PAGES;pid++) {
|
|
gc_pheader *p = gc_pages[pid];
|
|
gc_pheader *p = gc_pages[pid];
|
|
gc_free_pages[pid] = p;
|
|
gc_free_pages[pid] = p;
|
|
- gc_free_blocks[pid] = 0;
|
|
|
|
while( p ) {
|
|
while( p ) {
|
|
p->bmp = mark_cur;
|
|
p->bmp = mark_cur;
|
|
- p->alloc.next_block = p->alloc.first_block;
|
|
|
|
- p->alloc.free_blocks = 0;
|
|
|
|
|
|
+ p->alloc.need_flush = true;
|
|
mark_cur += (p->alloc.max_blocks + 7) >> 3;
|
|
mark_cur += (p->alloc.max_blocks + 7) >> 3;
|
|
p = p->next_page;
|
|
p = p->next_page;
|
|
}
|
|
}
|