|
@@ -62,7 +62,7 @@ const
|
|
|
current thread. Since the allocations of oschunks are added together for
|
|
|
all blocksizes, this is only a fuzzy indication of when the size will be
|
|
|
doubled rather than a hard and fast boundary. }
|
|
|
- fixedallocthreshold = (maxblocksize shr blockshift) * 2;
|
|
|
+ fixedallocthreshold = (maxblocksize shr blockshift) * 8;
|
|
|
{ maximum size to which locgrowheapsizesmall can grow }
|
|
|
maxgrowheapsizesmall = 256*1024;
|
|
|
|
|
@@ -177,17 +177,9 @@ type
|
|
|
oslist : poschunk; { os chunks free, available for use }
|
|
|
fixedlists : tfixedfreelists;
|
|
|
oscount : dword; { number of os chunks on oslist }
|
|
|
- { we gradually grow the size of the blocks used for fixed allocations in
|
|
|
- case many of them are allocated. However, don't take successive
|
|
|
- allocate/free cases into account, since that mean the block's size is
|
|
|
- fine and that the program simply exhibits a cyclic behaviour (in which
|
|
|
- case increasing the blocksize could even slow things down due to the
|
|
|
- subdividing overhead) and MaxKeptOSChunks should probably be increased
|
|
|
- instead. }
|
|
|
- lastfixedopwasalloc: boolean;
|
|
|
- { how many oschunks have been successively allocated in this thread since
|
|
|
+ { how many oschunks have been allocated in this thread since
|
|
|
the last time we doubled the locgrowheapsizesmall size }
|
|
|
- fixedallocated: byte;
|
|
|
+ fixedallocated: dword;
|
|
|
{ the size of oschunks allocated for fixed allocations in this thread;
|
|
|
initialised on thread creation with the global growheapsizesmall setting }
|
|
|
locgrowheapsizesmall: ptruint;
|
|
@@ -498,7 +490,6 @@ procedure free_oschunk(loc_freelists: pfreelists; poc: poschunk);
|
|
|
var
|
|
|
pocsize: ptruint;
|
|
|
begin
|
|
|
- loc_freelists^.lastfixedopwasalloc:=false;
|
|
|
remove_freed_fixed_chunks(poc);
|
|
|
if assigned(poc^.prev_any) then
|
|
|
poc^.prev_any^.next_any := poc^.next_any
|
|
@@ -506,6 +497,8 @@ begin
|
|
|
loc_freelists^.oslist_all := poc^.next_any;
|
|
|
if assigned(poc^.next_any) then
|
|
|
poc^.next_any^.prev_any := poc^.prev_any;
|
|
|
+ if poc^.used >= 0 then
|
|
|
+ dec(loc_freelists^.fixedallocated);
|
|
|
pocsize := poc^.size and sizemask;
|
|
|
dec(loc_freelists^.internal_status.currheapsize, pocsize);
|
|
|
SysOSFree(poc, pocsize);
|
|
@@ -884,20 +877,16 @@ begin
|
|
|
pmc_next^.prev_fixed := pmc;
|
|
|
loc_freelists^.fixedlists[chunkindex] := pmemchunk_fixed(result);
|
|
|
{ check whether we should increase the size of the fixed freelist blocks }
|
|
|
- if (loc_freelists^.lastfixedopwasalloc) then
|
|
|
+ inc(loc_freelists^.fixedallocated);
|
|
|
+ if loc_freelists^.fixedallocated > fixedallocthreshold then
|
|
|
begin
|
|
|
- inc(loc_freelists^.fixedallocated);
|
|
|
- if (loc_freelists^.fixedallocated > fixedallocthreshold) then
|
|
|
- begin
|
|
|
- if (loc_freelists^.locgrowheapsizesmall < maxgrowheapsizesmall) then
|
|
|
- inc(loc_freelists^.locgrowheapsizesmall,loc_freelists^.locgrowheapsizesmall);
|
|
|
- { also set to zero in case we did not grow the blocksize to
|
|
|
- prevent oveflows of this counter in case the rtl is compiled
|
|
|
- range/overflow checking }
|
|
|
- loc_freelists^.fixedallocated:=0;
|
|
|
- end;
|
|
|
+ if loc_freelists^.locgrowheapsizesmall < maxgrowheapsizesmall then
|
|
|
+ inc(loc_freelists^.locgrowheapsizesmall, loc_freelists^.locgrowheapsizesmall);
|
|
|
+ { also set to zero in case we did not grow the blocksize to
|
|
|
+ prevent oveflows of this counter in case the rtl is compiled
|
|
|
+ range/overflow checking }
|
|
|
+ loc_freelists^.fixedallocated := 0;
|
|
|
end;
|
|
|
- loc_freelists^.lastfixedopwasalloc:=true;
|
|
|
end
|
|
|
else
|
|
|
begin
|