Browse Source

Fixed large allocation issues

Brian Fiete 5 years ago
parent
commit
c531ade968

+ 29 - 5
BeefLibs/corlib/src/Collections/Generic/List.bf

@@ -28,8 +28,13 @@ namespace System.Collections.Generic
 	{
 		private const int_cosize cDefaultCapacity = 4;
 
+#if BF_LARGE_COLLECTIONS
+		const int_cosize SizeFlags = 0x7FFFFFFF'FFFFFFFF;
+		const int_cosize DynAllocFlag = (int_cosize)0x80000000'00000000;
+#else
 		const int_cosize SizeFlags = 0x7FFFFFFF;
 		const int_cosize DynAllocFlag = (int_cosize)0x80000000;
+#endif
 
 #if BF_ENABLE_REALTIME_LEAK_CHECK
 		static DbgRawAllocData sRawAllocData;
@@ -393,11 +398,13 @@ namespace System.Collections.Generic
 			int allocSize = AllocSize;
 			if (allocSize < min)
 			{
-				int_cosize newCapacity = (int_cosize)(allocSize == 0 ? cDefaultCapacity : allocSize * 2);
-				// Allow the list to grow to maximum possible capacity (~2G elements) before encountering overflow.
-				// Note that this check works even when mItems.Length overflowed thanks to the (uint) cast
-				//if ((uint)newCapacity > Array.MaxArrayLength) newCapacity = Array.MaxArrayLength;
-				if (newCapacity < min) newCapacity = (int_cosize)min;
+				int newCapacity = allocSize == 0 ? cDefaultCapacity : allocSize + allocSize / 2;
+				// If we overflow, try to set to max. The "< min" check after still still bump us up
+				// if necessary
+				if (newCapacity > SizeFlags) 
+					newCapacity = SizeFlags;
+				if (newCapacity < min)
+					newCapacity = min;
 				Capacity = newCapacity;
 			}
 		}
@@ -459,6 +466,23 @@ namespace System.Collections.Generic
 #endif
 		}
 
+		public void Insert(int index, Span<T> items)
+		{
+			if (items.Length == 0)
+				return;
+			int addCount = items.Length;
+			if (mSize + addCount > AllocSize) EnsureCapacity(mSize + addCount);
+			if (index < mSize)
+			{
+				Internal.MemCpy(mItems + index + addCount, mItems + index, (mSize - index) * strideof(T), alignof(T));
+			}
+			Internal.MemCpy(mItems + index, items.Ptr, addCount * strideof(T));
+			mSize += (int_cosize)addCount;
+#if VERSION_LIST
+			mVersion++;
+#endif
+		}
+
 		public void RemoveAt(int index)
 		{
 			Debug.Assert((uint)index < (uint)mSize);

+ 2 - 2
BeefLibs/corlib/src/String.bf

@@ -685,7 +685,7 @@ namespace System
 		void Realloc(int newSize)
 		{
 			Debug.Assert(AllocSize > 0, "String has been frozen");
-			Debug.Assert((uint_strsize)newSize < 0x40000000);
+			Debug.Assert((uint_strsize)newSize <= cSizeFlags);
 			char8* newPtr = new:this char8[newSize]*;
 			Internal.MemCpy(newPtr, Ptr, mLength);
 			if (IsDynAlloc)
@@ -703,7 +703,7 @@ namespace System
 		void Realloc(char8* newPtr, int newSize)
 		{
 			Debug.Assert(AllocSize > 0, "String has been frozen");
-			Debug.Assert((uint_strsize)newSize < 0x40000000);
+			Debug.Assert((uint_strsize)newSize <= cSizeFlags);
 			Internal.MemCpy(newPtr, Ptr, mLength);
 			if (IsDynAlloc)
 				delete:this mPtr;

+ 2 - 2
BeefRT/dbg/DbgInternal.cpp

@@ -268,7 +268,7 @@ void* Internal::Dbg_GetMetadata(bf::System::Object* obj)
 
 intptr Internal::Dbg_PrepareStackTrace(intptr baseAllocSize, intptr maxStackTraceDepth)
 {
-	int allocSize = 0;
+	intptr allocSize = 0;
 	if (maxStackTraceDepth > 1)
 	{
 		int capturedTraceCount = BF_CAPTURE_STACK(1, (intptr*)gPendingAllocState.mStackTrace, min((int)maxStackTraceDepth, 1024));
@@ -292,7 +292,7 @@ bf::System::Object* Internal::Dbg_ObjectAlloc(bf::System::Reflection::TypeInstan
 {	
 	BF_ASSERT((BFRTFLAGS & BfRtFlags_ObjectHasDebugFlags) != 0);
 	Object* result;	
-	int allocSize = BF_ALIGN(size, typeInst->mInstAlign);
+	intptr allocSize = BF_ALIGN(size, typeInst->mInstAlign);
 	uint8* allocBytes = (uint8*)BfObjectAllocate(allocSize, typeInst->_GetType());
 // 	int dataOffset = (int)(sizeof(intptr) * 2);
 // 	memset(allocBytes + dataOffset, 0, size - dataOffset);

+ 6 - 6
BeefRT/dbg/gc.cpp

@@ -900,14 +900,14 @@ void BFGC::SweepSpan(tcmalloc_obj::Span* span, int expectedStartPage)
 	}
 
 	intptr pageSize = (intptr)1<<kPageShift;
-	int spanSize = pageSize * span->length;
+	intptr spanSize = pageSize * span->length;
 	void* spanStart = (void*)((intptr)span->start << kPageShift);
 	void* spanEnd = (void*)((intptr)spanStart + spanSize);
 	void* spanPtr = spanStart;
 
     BF_LOGASSERT((spanStart >= tcmalloc_obj::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_obj::PageHeap::sAddressEnd));
     
-	int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
+	intptr elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
 	if (elementSize == 0)
 		elementSize = spanSize;
 	BF_LOGASSERT(elementSize >= sizeof(bf::System::Object));
@@ -2040,14 +2040,14 @@ void BFGC::ObjReportHandleSpan(tcmalloc_obj::Span* span, int expectedStartPage,
 	}
 
 	intptr pageSize = (intptr)1<<kPageShift;
-	int spanSize = pageSize * span->length;
+	intptr spanSize = pageSize * span->length;
 	void* spanStart = (void*)((intptr)span->start << kPageShift);
 	void* spanEnd = (void*)((intptr)spanStart + spanSize);
 	void* spanPtr = spanStart;
 
 	BF_LOGASSERT((spanStart >= tcmalloc_obj::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_obj::PageHeap::sAddressEnd));
 
-	int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
+	intptr elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
 	if (elementSize == 0)
 		elementSize = spanSize;
 	BF_LOGASSERT(elementSize >= sizeof(bf::System::Object));
@@ -2519,7 +2519,7 @@ void BFGC::MarkFromGCThread(bf::System::Object* obj)
 		return;
 		
 	intptr pageSize = (intptr) 1 << kPageShift;
-	int spanSize = pageSize * span->length;
+	intptr spanSize = pageSize * span->length;
 	void* spanStart = (void*)((intptr)span->start << kPageShift);
 	void* spanEnd = (void*)((intptr)spanStart + spanSize);
 		
@@ -2546,7 +2546,7 @@ void BFGC::MarkFromGCThread(bf::System::Object* obj)
 		}
 	}
 
-	int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
+	intptr elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
 	// Large alloc
 	if (elementSize == 0)
 	{

+ 20 - 20
BeefRT/dbg/gc_raw.cpp

@@ -55,13 +55,13 @@ using namespace Beefy;
 struct DeferredFreeEntry
 {
 	bf::System::Object* mObject;
-	int mAllocSize;
+	intptr mAllocSize;
 };
 
 static Beefy::Deque<DeferredFreeEntry> gDeferredFrees;
-static int gRawAllocSize = 0;
-static int gMaxRawAllocSize = 0;
-static int gDeferredObjectFreeSize = 0;
+static intptr gRawAllocSize = 0;
+static intptr gMaxRawAllocSize = 0;
+static intptr gDeferredObjectFreeSize = 0;
 
 void BFGC::RawInit()
 {
@@ -97,14 +97,14 @@ void BFGC::RawMarkSpan(tcmalloc_raw::Span* span, int expectedStartPage)
 	}
 
 	intptr pageSize = (intptr)1 << kPageShift;
-	int spanSize = pageSize * span->length;
+	intptr spanSize = pageSize * span->length;
 	void* spanStart = (void*)((intptr)span->start << kPageShift);
 	void* spanEnd = (void*)((intptr)spanStart + spanSize);
 	void* spanPtr = spanStart;
 
 	BF_LOGASSERT((spanStart >= tcmalloc_raw::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_raw::PageHeap::sAddressEnd));
 
-	int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
+	intptr elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
 	if (elementSize == 0)
 		elementSize = spanSize;
 	BF_LOGASSERT(elementSize >= sizeof(bf::System::Object));
@@ -116,14 +116,14 @@ void BFGC::RawMarkSpan(tcmalloc_raw::Span* span, int expectedStartPage)
 		{	
 			if (rawAllocData->mMarkFunc != NULL)
 			{	
-				int extraDataSize = sizeof(intptr);
+				intptr extraDataSize = sizeof(intptr);
 				if (rawAllocData->mMaxStackTrace == 1)
 				{
 					extraDataSize += sizeof(intptr);					
 				}
 				else if (rawAllocData->mMaxStackTrace > 1)
 				{
-					int stackTraceCount = *(intptr*)((uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr));					
+					intptr stackTraceCount = *(intptr*)((uint8*)spanPtr + elementSize - sizeof(intptr) - sizeof(intptr));					
 					extraDataSize += (1 + stackTraceCount) * sizeof(intptr);
 				}
 
@@ -132,10 +132,10 @@ void BFGC::RawMarkSpan(tcmalloc_raw::Span* span, int expectedStartPage)
 				 
 				// It's possible we can overestimate elemCount, particularly for large allocations. This doesn't cause a problem
 				//  because we can safely mark on complete random memory -- pointer values are always validated before being followed
-				int elemStride = BF_ALIGN(rawAllocData->mType->mSize, rawAllocData->mType->mAlign);
-				int dataSize = elementSize - extraDataSize;
-				int elemCount = dataSize / elemStride;
-				for (int elemIdx = 0; elemIdx < elemCount; elemIdx++)
+				intptr elemStride = BF_ALIGN(rawAllocData->mType->mSize, rawAllocData->mType->mAlign);
+				intptr dataSize = elementSize - extraDataSize;
+				intptr elemCount = dataSize / elemStride;
+				for (intptr elemIdx = 0; elemIdx < elemCount; elemIdx++)
 				{
 				 	markFunc((uint8*)spanPtr + elemIdx * elemStride);
 				}
@@ -161,7 +161,7 @@ void BFGC::RawMarkAll()
 	if (pageHeap == NULL)
 		return;
 
-	int leafCheckCount = 0;
+	intptr leafCheckCount = 0;
 
 #ifdef BF32
 	for (int rootIdx = 0; rootIdx < PageHeap::PageMap::ROOT_LENGTH; rootIdx++)
@@ -228,14 +228,14 @@ void BFGC::RawReportHandleSpan(tcmalloc_raw::Span* span, int expectedStartPage,
 	}
 
 	intptr pageSize = (intptr)1 << kPageShift;
-	int spanSize = pageSize * span->length;
+	intptr spanSize = pageSize * span->length;
 	void* spanStart = (void*)((intptr)span->start << kPageShift);
 	void* spanEnd = (void*)((intptr)spanStart + spanSize);
 	void* spanPtr = spanStart;
 
 	BF_LOGASSERT((spanStart >= tcmalloc_raw::PageHeap::sAddressStart) && (spanEnd <= tcmalloc_raw::PageHeap::sAddressEnd));
 
-	int elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
+	intptr elementSize = Static::sizemap()->ByteSizeForClass(span->sizeclass);
 	if (elementSize == 0)
 		elementSize = spanSize;		
 
@@ -250,7 +250,7 @@ void BFGC::RawReportHandleSpan(tcmalloc_raw::Span* span, int expectedStartPage,
 
 			if (sizeMap == NULL)
 			{
-				int extraDataSize = sizeof(intptr);
+				intptr extraDataSize = sizeof(intptr);
 
 				RawLeakInfo rawLeakInfo;
 				rawLeakInfo.mRawAllocData = rawAllocData;
@@ -276,7 +276,7 @@ void BFGC::RawReportHandleSpan(tcmalloc_raw::Span* span, int expectedStartPage,
 				
 				if (rawAllocData->mType != NULL)
 				{
-					int typeSize;
+					intptr typeSize;
 					if ((gBfRtDbgFlags & BfRtFlags_ObjectHasDebugFlags) != 0)
 						typeSize = rawAllocData->mType->mSize;
 					else
@@ -374,7 +374,7 @@ void BFGC::RawReport(String& msg, intptr& freeSize, std::multimap<AllocInfo, bf:
 	allocIdSet.clear();
 #endif
 
-	int leafCheckCount = 0;
+	intptr leafCheckCount = 0;
 	bool overflowed = false;
 
 	Beefy::Dictionary<bf::System::Type*, AllocInfo> sizeMap;
@@ -556,7 +556,7 @@ void BfRawFree(void* ptr)
 {	
 	const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;	
 	size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
-	int allocSize = 0;
+	intptr allocSize = 0;
 	if (cl == 0)
 	{		
 		auto span = Static::pageheap()->GetDescriptor(p);
@@ -602,7 +602,7 @@ void BfRawFree(void* ptr)
 			entry.mAllocSize = allocSize;
 			gDeferredFrees.Add(entry);
 
-			int maxDeferredSize = gMaxRawAllocSize * gBFGC.mMaxRawDeferredObjectFreePercentage / 100;
+			intptr maxDeferredSize = gMaxRawAllocSize * gBFGC.mMaxRawDeferredObjectFreePercentage / 100;
 			while (gDeferredObjectFreeSize > maxDeferredSize)
 			{
 				DeferredFreeEntry entry = gDeferredFrees.PopBack();