Forráskód Böngészése

Merge remote-tracking branch 'origin/master' into file-tags-without-comments

Karl Zylinski 11 hónapja
szülő
commit
86e291235d

+ 5 - 5
.github/workflows/ci.yml

@@ -257,16 +257,16 @@ jobs:
         run: sudo apt-get install -y qemu-user qemu-user-static gcc-12-riscv64-linux-gnu libc6-riscv64-cross
 
       - name: Odin run
-        run: ./odin run examples/demo -vet -strict-style -disallow-do -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static"
+        run: ./odin run examples/demo -vet -strict-style -disallow-do -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static" -no-rpath
 
       - name: Odin run -debug
-        run: ./odin run examples/demo -debug -vet -strict-style -disallow-do -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static"
+        run: ./odin run examples/demo -debug -vet -strict-style -disallow-do -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static" -no-rpath
 
       - name: Normal Core library tests
-        run: ./odin test tests/core/normal.odin -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static"
+        run: ./odin test tests/core/normal.odin -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static" -no-rpath
 
       - name: Optimized Core library tests
-        run: ./odin test tests/core/speed.odin -o:speed -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static"
+        run: ./odin test tests/core/speed.odin -o:speed -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static" -no-rpath
 
       - name: Internals tests
-        run: ./odin test tests/internal -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static"
+        run: ./odin test tests/internal -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -target:linux_riscv64 -extra-linker-flags:"-fuse-ld=/usr/bin/riscv64-linux-gnu-gcc-12 -static -Wl,-static" -no-rpath

+ 86 - 59
.github/workflows/nightly.yml

@@ -36,43 +36,54 @@ jobs:
           cp -r bin dist
           cp -r examples dist
       - name: Upload artifact
-        uses: actions/upload-artifact@v1
+        uses: actions/upload-artifact@v4
         with:
           name: windows_artifacts
           path: dist
-  build_ubuntu:
-    name: Ubuntu Build
+  build_linux:
+    name: Linux Build
     if: github.repository == 'odin-lang/Odin'
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v4
+      - uses: jirutka/setup-alpine@v1
+        with:
+          branch: v3.20
       - name: (Linux) Download LLVM
         run: |
-          wget https://apt.llvm.org/llvm.sh
-          chmod +x llvm.sh
-          sudo ./llvm.sh 18
-          echo "/usr/lib/llvm-18/bin" >> $GITHUB_PATH
+          apk add --no-cache \
+          musl-dev llvm18-dev clang18 git mold lz4 \
+          libxml2-static llvm18-static zlib-static zstd-static \
+          make
+        shell: alpine.sh --root {0}
       - name: build odin
-        run: make nightly
+        # NOTE: this build does slow compile times because of musl
+        run: ci/build_linux_static.sh
+        shell: alpine.sh {0}
       - name: Odin run
         run: ./odin run examples/demo
       - name: Copy artifacts
         run: |
-          mkdir dist
-          cp odin dist
-          cp LICENSE dist
-          cp -r shared dist
-          cp -r base dist
-          cp -r core dist
-          cp -r vendor dist
-          cp -r examples dist
-          # Zipping so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
-          zip -r dist.zip dist
+          FILE="odin-linux-amd64-nightly+$(date -I)"
+          mkdir $FILE
+          cp odin $FILE
+          cp LICENSE $FILE
+          cp -r shared $FILE
+          cp -r base $FILE
+          cp -r core $FILE
+          cp -r vendor $FILE
+          cp -r examples $FILE
+          # Creating a tarball so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
+          tar -czvf dist.tar.gz $FILE
+      - name: Odin run
+        run: |
+          FILE="odin-linux-amd64-nightly+$(date -I)"
+          $FILE/odin run examples/demo
       - name: Upload artifact
-        uses: actions/upload-artifact@v1
+        uses: actions/upload-artifact@v4
         with:
-          name: ubuntu_artifacts
-          path: dist.zip
+          name: linux_artifacts
+          path: dist.tar.gz
   build_macos:
     name: MacOS Build
     if: github.repository == 'odin-lang/Odin'
@@ -89,24 +100,27 @@ jobs:
         run: CXXFLAGS="-L/usr/lib/system -L/usr/lib" make nightly
       - name: Bundle
         run: |
-          mkdir dist
-          cp odin dist
-          cp LICENSE dist
-          cp -r shared dist
-          cp -r base dist
-          cp -r core dist
-          cp -r vendor dist
-          cp -r examples dist
-          dylibbundler -b -x dist/odin -d dist/libs -od -p @executable_path/libs
-          # Zipping so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
-          zip -r dist.zip dist
+          FILE="odin-macos-amd64-nightly+$(date -I)"
+          mkdir $FILE
+          cp odin $FILE
+          cp LICENSE $FILE
+          cp -r shared $FILE
+          cp -r base $FILE
+          cp -r core $FILE
+          cp -r vendor $FILE
+          cp -r examples $FILE
+          dylibbundler -b -x $FILE/odin -d $FILE/libs -od -p @executable_path/libs
+          # Creating a tarball so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
+          tar -czvf dist.tar.gz $FILE
       - name: Odin run
-        run: ./dist/odin run examples/demo
+        run: |
+          FILE="odin-macos-amd64-nightly+$(date -I)"
+          $FILE/odin run examples/demo
       - name: Upload artifact
-        uses: actions/upload-artifact@v1
+        uses: actions/upload-artifact@v4
         with:
           name: macos_artifacts
-          path: dist.zip
+          path: dist.tar.gz
   build_macos_arm:
     name: MacOS ARM Build
     if: github.repository == 'odin-lang/Odin'
@@ -123,30 +137,33 @@ jobs:
         run: CXXFLAGS="-L/usr/lib/system -L/usr/lib" make nightly
       - name: Bundle
         run: |
-          mkdir dist
-          cp odin dist
-          cp LICENSE dist
-          cp -r shared dist
-          cp -r base dist
-          cp -r core dist
-          cp -r vendor dist
-          cp -r examples dist
-          dylibbundler -b -x dist/odin -d dist/libs -od -p @executable_path/libs
-          # Zipping so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
-          zip -r dist.zip dist
+          FILE="odin-macos-arm64-nightly+$(date -I)"
+          mkdir $FILE
+          cp odin $FILE
+          cp LICENSE $FILE
+          cp -r shared $FILE
+          cp -r base $FILE
+          cp -r core $FILE
+          cp -r vendor $FILE
+          cp -r examples $FILE
+          dylibbundler -b -x $FILE/odin -d $FILE/libs -od -p @executable_path/libs
+          # Creating a tarball so executable permissions are retained, see https://github.com/actions/upload-artifact/issues/38
+          tar -czvf dist.tar.gz $FILE
       - name: Odin run
-        run: ./dist/odin run examples/demo
+        run: |
+          FILE="odin-macos-arm64-nightly+$(date -I)"
+          $FILE/odin run examples/demo
       - name: Upload artifact
-        uses: actions/upload-artifact@v1
+        uses: actions/upload-artifact@v4
         with:
           name: macos_arm_artifacts
-          path: dist.zip
+          path: dist.tar.gz
   upload_b2:
     runs-on: [ubuntu-latest]
-    needs: [build_windows, build_macos, build_macos_arm, build_ubuntu]
+    needs: [build_windows, build_macos, build_macos_arm, build_linux]
     steps:
       - uses: actions/checkout@v4
-      - uses: actions/setup-python@v2
+      - uses: actions/setup-python@v5
         with:
           python-version: '3.8.x'
 
@@ -160,24 +177,33 @@ jobs:
         run: python -c "import sys; print(sys.version)"
 
       - name: Download Windows artifacts
-        uses: actions/download-artifact@v1
+
+        uses: actions/[email protected]
         with:
           name: windows_artifacts
+          path: windows_artifacts
 
       - name: Download Ubuntu artifacts
-        uses: actions/download-artifact@v1
+        uses: actions/download-artifact@v4.1.7
         with:
-          name: ubuntu_artifacts
+          name: linux_artifacts
+          path: linux_artifacts
 
       - name: Download macOS artifacts
-        uses: actions/download-artifact@v1
+        uses: actions/download-artifact@v4.1.7
         with:
           name: macos_artifacts
+          path: macos_artifacts
 
       - name: Download macOS arm artifacts
-        uses: actions/download-artifact@v1
+        uses: actions/download-artifact@v4.1.7
         with:
           name: macos_arm_artifacts
+          path: macos_arm_artifacts
+
+      - name: Debug
+        run: |
+          tree -L 2
 
       - name: Create archives and upload
         shell: bash
@@ -187,9 +213,10 @@ jobs:
           BUCKET: ${{ secrets.B2_BUCKET }}
           DAYS_TO_KEEP: ${{ secrets.B2_DAYS_TO_KEEP }}
         run: |
+          file linux_artifacts/dist.tar.gz
           python3 ci/nightly.py artifact windows-amd64 windows_artifacts/
-          python3 ci/nightly.py artifact ubuntu-amd64 ubuntu_artifacts/dist.zip
-          python3 ci/nightly.py artifact macos-amd64 macos_artifacts/dist.zip
-          python3 ci/nightly.py artifact macos-arm64 macos_arm_artifacts/dist.zip
+          python3 ci/nightly.py artifact linux-amd64 linux_artifacts/dist.tar.gz
+          python3 ci/nightly.py artifact macos-amd64 macos_artifacts/dist.tar.gz
+          python3 ci/nightly.py artifact macos-arm64 macos_arm_artifacts/dist.tar.gz
           python3 ci/nightly.py prune
           python3 ci/nightly.py json

+ 19 - 0
ci/build_linux_static.sh

@@ -0,0 +1,19 @@
+#!/usr/bin/env sh
+# Intended for use in Alpine containers, see the "nightly" Github action for a list of dependencies
+
+CXX="clang++-18"
+LLVM_CONFIG="llvm-config-18"
+
+DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
+
+CPPFLAGS="-DODIN_VERSION_RAW=\"dev-$(date +"%Y-%m")\""
+CXXFLAGS="-std=c++14 $($LLVM_CONFIG --cxxflags --ldflags)"
+
+LDFLAGS="-static -lm -lzstd -lz -lffi -pthread -ldl -fuse-ld=mold"
+LDFLAGS="$LDFLAGS $($LLVM_CONFIG --link-static --ldflags --libs --system-libs --libfiles)"
+LDFLAGS="$LDFLAGS -Wl,-rpath=\$ORIGIN"
+
+EXTRAFLAGS="-DNIGHTLY -O3"
+
+set -x
+$CXX src/main.cpp src/libtommath.cpp $DISABLED_WARNINGS $CPPFLAGS $CXXFLAGS $EXTRAFLAGS $LDFLAGS -o odin

+ 24 - 19
ci/nightly.py

@@ -2,7 +2,7 @@ import os
 import sys
 from zipfile  import ZipFile, ZIP_DEFLATED
 from b2sdk.v2 import InMemoryAccountInfo, B2Api
-from datetime import datetime
+from datetime import datetime, timezone
 import json
 
 UPLOAD_FOLDER = "nightly/"
@@ -22,7 +22,7 @@ def auth() -> bool:
 		pass        # Not yet authenticated
 
 	err = b2_api.authorize_account("production", application_key_id, application_key)
-	return err == None
+	return err is None
 
 def get_bucket():
 	if not auth(): sys.exit(1)
@@ -32,30 +32,35 @@ def remove_prefix(text: str, prefix: str) -> str:
 	return text[text.startswith(prefix) and len(prefix):]
 
 def create_and_upload_artifact_zip(platform: str, artifact: str) -> int:
-	now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
-	destination_zip_name = "odin-{}-nightly+{}.zip".format(platform, now.strftime("%Y-%m-%d"))
+	now = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
 
-	source_zip_name = artifact
-	if not artifact.endswith(".zip"):
-		print(f"Creating archive {destination_zip_name} from {artifact} and uploading to {bucket_name}")
+	source_archive: str
+	destination_name = f'odin-{platform}-nightly+{now.strftime("%Y-%m-%d")}'
 
-		source_zip_name = destination_zip_name
-		with ZipFile(source_zip_name, mode='w', compression=ZIP_DEFLATED, compresslevel=9) as z:
+	if platform.startswith("linux") or platform.startswith("macos"):
+		destination_name += ".tar.gz"
+		source_archive = artifact
+	else:
+		destination_name += ".zip"
+		source_archive = destination_name
+
+		print(f"Creating archive {destination_name} from {artifact} and uploading to {bucket_name}")
+		with ZipFile(source_archive, mode='w', compression=ZIP_DEFLATED, compresslevel=9) as z:
 			for root, directory, filenames in os.walk(artifact):
 				for file in filenames:
 					file_path = os.path.join(root, file)
 					zip_path  = os.path.join("dist", os.path.relpath(file_path, artifact))
 					z.write(file_path, zip_path)
 
-		if not os.path.exists(source_zip_name):
-			print(f"Error: Newly created ZIP archive {source_zip_name} not found.")
-			return 1
+	if not os.path.exists(source_archive):
+		print(f"Error: archive {source_archive} not found.")
+		return 1
 
-	print("Uploading {} to {}".format(source_zip_name, UPLOAD_FOLDER + destination_zip_name))
+	print("Uploading {} to {}".format(source_archive, UPLOAD_FOLDER + destination_name))
 	bucket = get_bucket()
 	res = bucket.upload_local_file(
-		source_zip_name,                   # Local file to upload
-		"nightly/" + destination_zip_name, # B2 destination path
+		source_archive,               # Local file to upload
+		"nightly/" + destination_name, # B2 destination path
 	)
 	return 0
 
@@ -65,8 +70,8 @@ def prune_artifacts():
 	bucket = get_bucket()
 	for file, _ in bucket.ls(UPLOAD_FOLDER, latest_only=False):
 		# Timestamp is in milliseconds
-		date  = datetime.fromtimestamp(file.upload_timestamp / 1_000.0).replace(hour=0, minute=0, second=0, microsecond=0)
-		now   = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
+		date  = datetime.fromtimestamp(file.upload_timestamp / 1_000.0, tz=timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
+		now   = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
 		delta = now - date
 
 		if delta.days > int(days_to_keep):
@@ -100,7 +105,7 @@ def update_nightly_json():
 			'sizeInBytes': size,
 		})
 
-	now = datetime.utcnow().isoformat()
+	now = datetime.now(timezone.utc).isoformat()
 
 	nightly = json.dumps({
 		'last_updated' : now,
@@ -137,4 +142,4 @@ if __name__ == "__main__":
 
 		elif command == "json":
 			res = update_nightly_json()
-			sys.exit(res)
+			sys.exit(res)

+ 11 - 27
core/encoding/cbor/unmarshal.odin

@@ -675,10 +675,6 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
 		return
 
 	case reflect.Type_Info_Map:
-		if !reflect.is_string(t.key) {
-			return _unsupported(v, hdr)
-		}
-
 		raw_map := (^mem.Raw_Map)(v.data)
 		if raw_map.allocator.procedure == nil {
 			raw_map.allocator = context.allocator
@@ -695,43 +691,31 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
 			new_len := uintptr(min(scap, runtime.map_len(raw_map^)+length))
 			runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
 		}
-		
-		// Temporary memory to unmarshal keys into before inserting them into the map.
+
+		// Temporary memory to unmarshal values into before inserting them into the map.
 		elem_backing := mem.alloc_bytes_non_zeroed(t.value.size, t.value.align, context.temp_allocator) or_return
 		defer delete(elem_backing, context.temp_allocator)
-
 		map_backing_value := any{raw_data(elem_backing), t.value.id}
 
-		for idx := 0; unknown || idx < length; idx += 1 {
-			// Decode key, keys can only be strings.
-			key: string
-			if keyv, kerr := decode_key(d, v); unknown && kerr == .Break {
-				break
-			} else if kerr != nil {
-				err = kerr
-				return
-			} else {
-				key = keyv
-			}
+		// Temporary memory to unmarshal keys into.
+		key_backing := mem.alloc_bytes_non_zeroed(t.key.size, t.key.align, context.temp_allocator) or_return
+		defer delete(key_backing, context.temp_allocator)
+		key_backing_value := any{raw_data(key_backing), t.key.id}
 
+		for idx := 0; unknown || idx < length; idx += 1 {
 			if unknown || idx > scap {
 				// Reserve space for new element so we can return allocator errors.
 				new_len := uintptr(runtime.map_len(raw_map^)+1)
 				runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
 			}
 
+			mem.zero_slice(key_backing)
+			_unmarshal_value(d, key_backing_value, _decode_header(r) or_return) or_return
+
 			mem.zero_slice(elem_backing)
 			_unmarshal_value(d, map_backing_value, _decode_header(r) or_return) or_return
 
-			key_ptr := rawptr(&key)
-			key_cstr: cstring
-			if reflect.is_cstring(t.key) {
-				assert_safe_for_cstring(key)
-				key_cstr = cstring(raw_data(key))
-				key_ptr = &key_cstr
-			}
-
-			set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_ptr, map_backing_value.data)
+			set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_backing_value.data, map_backing_value.data)
 			// We already reserved space for it, so this shouldn't fail.
 			assert(set_ptr != nil)
 		}

+ 3 - 3
core/math/math.odin

@@ -444,11 +444,11 @@ bias :: proc "contextless" (t, b: $T) -> T where intrinsics.type_is_numeric(T) {
 	return t / (((1/b) - 2) * (1 - t) + 1)
 }
 @(require_results)
-gain :: proc "contextless" (t, g: $T) -> T where intrinsics.type_is_numeric(T) {
+gain :: proc "contextless" (t, g: $T) -> T where intrinsics.type_is_float(T) {
 	if t < 0.5 {
-		return bias(t*2, g)*0.5
+		return bias(t*2, g) * 0.5
 	}
-	return bias(t*2 - 1, 1 - g)*0.5 + 0.5
+	return bias(t*2 - 1, 1 - g) * 0.5 + 0.5
 }
 
 

+ 35 - 38
core/os/os2/heap_linux.odin

@@ -1,17 +1,10 @@
 //+private
 package os2
 
-import "base:runtime"
-
 import "core:sys/linux"
 import "core:sync"
 import "core:mem"
 
-// Use the experimental custom heap allocator (over calling `malloc` etc.).
-// This is a switch because there are thread-safety problems that need to be fixed.
-// See: https://github.com/odin-lang/Odin/issues/4161
-USE_EXPERIMENTAL_ALLOCATOR :: #config(OS2_LINUX_USE_EXPERIMENTAL_ALLOCATOR, false)
-
 // NOTEs
 //
 // All allocations below DIRECT_MMAP_THRESHOLD exist inside of memory "Regions." A region
@@ -146,8 +139,6 @@ Region :: struct {
 	memory: [BLOCKS_PER_REGION]Allocation_Header,
 }
 
-when USE_EXPERIMENTAL_ALLOCATOR {
-
 _heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
                             size, alignment: int,
                             old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, mem.Allocator_Error) {
@@ -228,10 +219,6 @@ _heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
 	return nil, nil
 }
 
-} else {
-	_heap_allocator_proc :: runtime.heap_allocator_proc
-}
-
 heap_alloc :: proc(size: int) -> rawptr {
 	if size >= DIRECT_MMAP_THRESHOLD {
 		return _direct_mmap_alloc(size)
@@ -293,7 +280,8 @@ heap_alloc :: proc(size: int) -> rawptr {
 		_local_region, back_idx = _region_retrieve_with_space(blocks_needed, local_region_idx, back_idx)
 	}
 	user_ptr, used := _region_get_block(_local_region, idx, blocks_needed)
-	_local_region.hdr.free_blocks -= (used + 1)
+
+	sync.atomic_sub_explicit(&_local_region.hdr.free_blocks, used + 1, .Release)
 
 	// If this memory was ever used before, it now needs to be zero'd.
 	if idx < _local_region.hdr.last_used {
@@ -320,7 +308,7 @@ heap_resize :: proc(old_memory: rawptr, new_size: int) -> rawptr #no_bounds_chec
 
 heap_free :: proc(memory: rawptr) {
 	alloc := _get_allocation_header(memory)
-	if alloc.requested & IS_DIRECT_MMAP == IS_DIRECT_MMAP {
+	if sync.atomic_load(&alloc.requested) & IS_DIRECT_MMAP == IS_DIRECT_MMAP {
 		_direct_mmap_free(alloc)
 		return
 	}
@@ -475,25 +463,31 @@ _region_local_free :: proc(alloc: ^Allocation_Header) #no_bounds_check {
 	alloc := alloc
 	add_to_free_list := true
 
-	_local_region.hdr.free_blocks += _get_block_count(alloc^) + 1
+	idx := sync.atomic_load(&alloc.idx)
+	prev := sync.atomic_load(&alloc.prev)
+	next := sync.atomic_load(&alloc.next)
+	block_count := next - idx - 1
+	free_blocks := sync.atomic_load(&_local_region.hdr.free_blocks) + block_count + 1
+	sync.atomic_store_explicit(&_local_region.hdr.free_blocks, free_blocks, .Release)
 
 	// try to merge with prev
-	if alloc.idx > 0 && _local_region.memory[alloc.prev].free_idx != NOT_FREE {
-		_local_region.memory[alloc.prev].next = alloc.next
-		_local_region.memory[alloc.next].prev = alloc.prev
-		alloc = &_local_region.memory[alloc.prev]
+	if idx > 0 && sync.atomic_load(&_local_region.memory[prev].free_idx) != NOT_FREE {
+		sync.atomic_store_explicit(&_local_region.memory[prev].next, next, .Release)
+		_local_region.memory[next].prev = prev
+		alloc = &_local_region.memory[prev]
 		add_to_free_list = false
 	}
 
 	// try to merge with next
-	if alloc.next < BLOCKS_PER_REGION - 1 && _local_region.memory[alloc.next].free_idx != NOT_FREE {
-		old_next := alloc.next
-		alloc.next = _local_region.memory[old_next].next
-		_local_region.memory[alloc.next].prev = alloc.idx
+	if next < BLOCKS_PER_REGION - 1 && sync.atomic_load(&_local_region.memory[next].free_idx) != NOT_FREE {
+		old_next := next
+		sync.atomic_store_explicit(&alloc.next, sync.atomic_load(&_local_region.memory[old_next].next), .Release)
+
+		sync.atomic_store_explicit(&_local_region.memory[next].prev, idx, .Release)
 
 		if add_to_free_list {
-			_local_region.hdr.free_list[_local_region.memory[old_next].free_idx] = alloc.idx
-			alloc.free_idx = _local_region.memory[old_next].free_idx
+		        sync.atomic_store_explicit(&_local_region.hdr.free_list[_local_region.memory[old_next].free_idx], idx, .Release)
+		        sync.atomic_store_explicit(&alloc.free_idx, _local_region.memory[old_next].free_idx, .Release)
 		} else {
 			// NOTE: We have aleady merged with prev, and now merged with next.
 			//       Now, we are actually going to remove from the free_list.
@@ -505,10 +499,11 @@ _region_local_free :: proc(alloc: ^Allocation_Header) #no_bounds_check {
 	// This is the only place where anything is appended to the free list.
 	if add_to_free_list {
 		fl := _local_region.hdr.free_list
-		alloc.free_idx = _local_region.hdr.free_list_len
-		fl[alloc.free_idx] = alloc.idx
-		_local_region.hdr.free_list_len += 1
-		if int(_local_region.hdr.free_list_len) == len(fl) {
+		fl_len := sync.atomic_load(&_local_region.hdr.free_list_len)
+		sync.atomic_store_explicit(&alloc.free_idx, fl_len, .Release)
+		fl[alloc.free_idx] = idx
+		sync.atomic_store_explicit(&_local_region.hdr.free_list_len, fl_len + 1, .Release)
+		if int(fl_len + 1) == len(fl) {
 			free_alloc := _get_allocation_header(mem.raw_data(_local_region.hdr.free_list))
 			_region_resize(free_alloc, len(fl) * 2 * size_of(fl[0]), true)
 		}
@@ -525,8 +520,8 @@ _region_assign_free_list :: proc(region: ^Region, memory: rawptr, blocks: u16) {
 _region_retrieve_with_space :: proc(blocks: u16, local_idx: int = -1, back_idx: int = -1) -> (^Region, int) {
 	r: ^Region
 	idx: int
-	for r = global_regions; r != nil; r = r.hdr.next_region {
-		if idx == local_idx || idx < back_idx || r.hdr.free_blocks < blocks {
+	for r = sync.atomic_load(&global_regions); r != nil; r = r.hdr.next_region {
+		if idx == local_idx || idx < back_idx || sync.atomic_load(&r.hdr.free_blocks) < blocks {
 			idx += 1
 			continue
 		}
@@ -594,7 +589,7 @@ _region_segment :: proc(region: ^Region, alloc: ^Allocation_Header, blocks, new_
 
 _region_get_local_idx :: proc() -> int {
 	idx: int
-	for r := global_regions; r != nil; r = r.hdr.next_region {
+	for r := sync.atomic_load(&global_regions); r != nil; r = r.hdr.next_region {
 		if r == _local_region {
 			return idx
 		}
@@ -610,9 +605,10 @@ _region_find_and_assign_local :: proc(alloc: ^Allocation_Header) {
 		_local_region = _region_retrieve_from_addr(alloc)
 	}
 
-	// At this point, _local_region is set correctly. Spin until acquired
-	res: ^^Region
-	for res != &_local_region {
+	// At this point, _local_region is set correctly. Spin until acquire
+	res := CURRENTLY_ACTIVE
+
+	for res == CURRENTLY_ACTIVE {
 		res = sync.atomic_compare_exchange_strong_explicit(
 			&_local_region.hdr.local_addr,
 			&_local_region,
@@ -634,9 +630,9 @@ _region_contains_mem :: proc(r: ^Region, memory: rawptr) -> bool #no_bounds_chec
 _region_free_list_remove :: proc(region: ^Region, free_idx: u16) #no_bounds_check {
 	// pop, swap and update allocation hdr
 	if n := region.hdr.free_list_len - 1; free_idx != n {
-		region.hdr.free_list[free_idx] = region.hdr.free_list[n]
+		region.hdr.free_list[free_idx] = sync.atomic_load(&region.hdr.free_list[n]) 
 		alloc_idx := region.hdr.free_list[free_idx]
-		region.memory[alloc_idx].free_idx = free_idx
+		sync.atomic_store_explicit(&region.memory[alloc_idx].free_idx, free_idx, .Release)
 	}
 	region.hdr.free_list_len -= 1
 }
@@ -727,3 +723,4 @@ _get_allocation_header :: #force_inline proc(raw_mem: rawptr) -> ^Allocation_Hea
 _round_up_to_nearest :: #force_inline proc(size, round: int) -> int {
 	return (size-1) + round - (size-1) % round
 }
+

+ 3 - 4
core/sync/chan/chan.odin

@@ -421,21 +421,20 @@ raw_queue_pop :: proc "contextless" (q: ^Raw_Queue) -> (data: rawptr) {
 
 @(require_results)
 can_recv :: proc "contextless" (c: ^Raw_Chan) -> bool {
+	sync.guard(&c.mutex)
 	if is_buffered(c) {
 		return len(c) > 0
 	}
-	sync.guard(&c.mutex)
 	return sync.atomic_load(&c.w_waiting) > 0
 }
 
 
 @(require_results)
 can_send :: proc "contextless" (c: ^Raw_Chan) -> bool {
+	sync.guard(&c.mutex)
 	if is_buffered(c) {
-		sync.guard(&c.mutex)
-		return len(c) < cap(c)
+		return c.queue.len < c.queue.cap
 	}
-	sync.guard(&c.mutex)
 	return sync.atomic_load(&c.r_waiting) > 0
 }
 

+ 2 - 2
core/sys/darwin/Foundation/NSEvent.odin

@@ -5,8 +5,8 @@ Event :: struct {using _: Object}
 
 
 
-EventMask :: distinct bit_set[EventType; UInteger]
-EventMaskAny :: ~EventMask{}
+EventMask    :: distinct bit_set[EventType; UInteger]
+EventMaskAny :: transmute(EventMask)(max(UInteger))
 
 when size_of(UInteger) == 4 {
 	// We don't support a 32-bit darwin system but this is mostly to shut up the type checker for the time being

+ 2 - 0
src/build_settings.cpp

@@ -411,6 +411,7 @@ struct BuildContext {
 	bool   no_dynamic_literals;
 	bool   no_output_files;
 	bool   no_crt;
+	bool   no_rpath;
 	bool   no_entry_point;
 	bool   no_thread_local;
 	bool   use_lld;
@@ -430,6 +431,7 @@ struct BuildContext {
 	bool   json_errors;
 	bool   has_ansi_terminal_colours;
 
+	bool   fast_isel;
 	bool   ignore_lazy;
 	bool   ignore_llvm_build;
 	bool   ignore_panic;

+ 24 - 19
src/check_expr.cpp

@@ -3612,10 +3612,11 @@ gb_internal bool check_transmute(CheckerContext *c, Ast *node, Operand *o, Type
 		if (are_types_identical(src_bt, dst_bt)) {
 			return true;
 		}
-		if (is_type_integer(src_t) && is_type_integer(dst_t)) {
+		if ((is_type_integer(src_t) && is_type_integer(dst_t)) ||
+		    is_type_integer(src_t) && is_type_bit_set(dst_t)) {
 			if (types_have_same_internal_endian(src_t, dst_t)) {
 				ExactValue src_v = exact_value_to_integer(o->value);
-				GB_ASSERT(src_v.kind == ExactValue_Integer);
+				GB_ASSERT(src_v.kind == ExactValue_Integer || src_v.kind == ExactValue_Invalid);
 				BigInt v = src_v.value_integer;
 
 				BigInt smax = {};
@@ -4602,7 +4603,7 @@ gb_internal void convert_to_typed(CheckerContext *c, Operand *operand, Type *tar
 				    (operand->value.kind == ExactValue_Integer ||
 				     operand->value.kind == ExactValue_Float)) {
 					operand->mode = Addressing_Value;
-					target_type = t_untyped_nil;
+					// target_type = t_untyped_nil;
 				     	operand->value = empty_exact_value;
 					update_untyped_expr_value(c, operand->expr, operand->value);
 					break;
@@ -6203,22 +6204,6 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
 
 					Entity *vt = pt->params->Tuple.variables[pt->variadic_index];
 					o.type = vt->type;
-
-					// NOTE(bill, 2024-07-14): minimize the stack usage for variadic parameters with the backing array
-					if (c->decl) {
-						bool found = false;
-						for (auto &vr : c->decl->variadic_reuses) {
-							if (are_types_identical(vt->type, vr.slice_type)) {
-								vr.max_count = gb_max(vr.max_count, variadic_operands.count);
-								found = true;
-								break;
-							}
-						}
-						if (!found) {
-							array_add(&c->decl->variadic_reuses, VariadicReuseData{vt->type, variadic_operands.count});
-						}
-					}
-
 				} else {
 					dummy_argument_count += 1;
 					o.type = t_untyped_nil;
@@ -6412,6 +6397,23 @@ gb_internal CallArgumentError check_call_arguments_internal(CheckerContext *c, A
 			}
 			score += eval_param_and_score(c, o, t, err, true, var_entity, show_error);
 		}
+
+		if (!vari_expand && variadic_operands.count != 0) {
+			// NOTE(bill, 2024-07-14): minimize the stack usage for variadic parameters with the backing array
+			if (c->decl) {
+				bool found = false;
+				for (auto &vr : c->decl->variadic_reuses) {
+					if (are_types_identical(slice, vr.slice_type)) {
+						vr.max_count = gb_max(vr.max_count, variadic_operands.count);
+						found = true;
+						break;
+					}
+				}
+				if (!found) {
+					array_add(&c->decl->variadic_reuses, VariadicReuseData{slice, variadic_operands.count});
+				}
+			}
+		}
 	}
 
 	if (data) {
@@ -8085,7 +8087,10 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
 					GB_ASSERT(c->curr_proc_decl->entity->type->kind == Type_Proc);
 					String scope_features = c->curr_proc_decl->entity->type->Proc.enable_target_feature;
 					if (!check_target_feature_is_superset_of(scope_features, pt->Proc.enable_target_feature, &invalid)) {
+						ERROR_BLOCK();
 						error(call, "Inlined procedure enables target feature '%.*s', this requires the calling procedure to at least enable the same feature", LIT(invalid));
+
+						error_line("\tSuggested Example: @(enable_target_feature=\"%.*s\")\n", LIT(invalid));
 					}
 				}
 			}

+ 11 - 7
src/linker.cpp

@@ -548,14 +548,8 @@ gb_internal i32 linker_stage(LinkerData *gen) {
 							//                available at runtime wherever the executable is run, so we make require those to be
 							//                local to the executable (unless the system collection is used, in which case we search
 							//                the system library paths for the library file).
-							if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o"))) {
-								// static libs and object files, absolute full path relative to the file in which the lib was imported from
+							if (string_ends_with(lib, str_lit(".a")) || string_ends_with(lib, str_lit(".o")) || string_ends_with(lib, str_lit(".so")) || string_contains_string(lib, str_lit(".so."))) {
 								lib_str = gb_string_append_fmt(lib_str, " -l:\"%.*s\" ", LIT(lib));
-							} else if (string_ends_with(lib, str_lit(".so")) || string_contains_string(lib, str_lit(".so."))) {
-								// dynamic lib, relative path to executable
-								// NOTE(vassvik): it is the user's responsibility to make sure the shared library files are visible
-								//                at runtime to the executable
-								lib_str = gb_string_append_fmt(lib_str, " -l:\"%s/%.*s\" ", cwd, LIT(lib));
 							} else {
 								// dynamic or static system lib, just link regularly searching system library paths
 								lib_str = gb_string_append_fmt(lib_str, " -l%.*s ", LIT(lib));
@@ -643,6 +637,16 @@ gb_internal i32 linker_stage(LinkerData *gen) {
 				}
 			}
 
+			if (!build_context.no_rpath) {
+				// Set the rpath to the $ORIGIN/@loader_path (the path of the executable),
+				// so that dynamic libraries are looked for at that path.
+				if (build_context.metrics.os == TargetOs_darwin) {
+					link_settings = gb_string_appendc(link_settings, "-Wl,-rpath,@loader_path ");
+				} else {
+					link_settings = gb_string_appendc(link_settings, "-Wl,-rpath,\\$ORIGIN ");
+				}
+			}
+
 			if (!build_context.no_crt) {
 				platform_lib_str = gb_string_appendc(platform_lib_str, "-lm ");
 				if (build_context.metrics.os == TargetOs_darwin) {

+ 7 - 0
src/llvm_backend.cpp

@@ -3081,6 +3081,13 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
 		lbModule *m = entry.value;
 		m->target_machine = target_machine;
 		LLVMSetModuleDataLayout(m->mod, LLVMCreateTargetDataLayout(target_machine));
+
+	#if LLVM_VERSION_MAJOR >= 18
+		if (build_context.fast_isel) {
+			LLVMSetTargetMachineFastISel(m->target_machine, true);
+		}
+	#endif
+
 		array_add(&target_machines, target_machine);
 	}
 

+ 2 - 2
src/llvm_backend_const.cpp

@@ -154,7 +154,7 @@ gb_internal LLVMValueRef llvm_const_named_struct(lbModule *m, Type *t, LLVMValue
 	GB_ASSERT(value_count_ == bt->Struct.fields.count);
 	
 	auto field_remapping = lb_get_struct_remapping(m, t);
-	unsigned values_with_padding_count = LLVMCountStructElementTypes(struct_type);
+	unsigned values_with_padding_count = elem_count;
 	
 	LLVMValueRef *values_with_padding = gb_alloc_array(permanent_allocator(), LLVMValueRef, values_with_padding_count);
 	for (unsigned i = 0; i < value_count; i++) {
@@ -722,7 +722,7 @@ gb_internal lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bo
 		}
 
 	case ExactValue_Integer:
-		if (is_type_pointer(type) || is_type_multi_pointer(type)) {
+		if (is_type_pointer(type) || is_type_multi_pointer(type) || is_type_proc(type)) {
 			LLVMTypeRef t = lb_type(m, original_type);
 			LLVMValueRef i = lb_big_int_to_llvm(m, t_uintptr, &value.value_integer);
 			res.value = LLVMConstIntToPtr(i, t);

+ 7 - 1
src/llvm_backend_expr.cpp

@@ -3451,8 +3451,14 @@ gb_internal lbValue lb_build_expr_internal(lbProcedure *p, Ast *expr) {
 
 	switch (expr->kind) {
 	case_ast_node(bl, BasicLit, expr);
+		if (type != nullptr && type->Named.name == "Error") {
+			Entity *e = type->Named.type_name;
+			if (e->pkg && e->pkg->name == "os") {
+				return lb_const_nil(p->module, type);
+			}
+		}
 		TokenPos pos = bl->token.pos;
-		GB_PANIC("Non-constant basic literal %s - %.*s", token_pos_to_string(pos), LIT(token_strings[bl->token.kind]));
+		GB_PANIC("Non-constant basic literal %s - %.*s (%s)", token_pos_to_string(pos), LIT(token_strings[bl->token.kind]), type_to_string(type));
 	case_end;
 
 	case_ast_node(bd, BasicDirective, expr);

+ 1 - 1
src/llvm_backend_type.cpp

@@ -826,7 +826,7 @@ gb_internal void lb_setup_type_info_data_giant_array(lbModule *m, i64 global_typ
 
 
 				if (t->Struct.soa_kind != StructSoa_None) {
-					Type *kind_type = get_struct_field_type(tag_type, 10);
+					Type *kind_type = get_struct_field_type(tag_type, 7);
 
 					lbValue soa_kind = lb_const_value(m, kind_type, exact_value_i64(t->Struct.soa_kind));
 					LLVMValueRef soa_type = get_type_info_ptr(m, t->Struct.soa_elem);

+ 21 - 1
src/main.cpp

@@ -325,6 +325,7 @@ enum BuildFlagKind {
 	BuildFlag_NoTypeAssert,
 	BuildFlag_NoDynamicLiterals,
 	BuildFlag_NoCRT,
+	BuildFlag_NoRPath,
 	BuildFlag_NoEntryPoint,
 	BuildFlag_UseLLD,
 	BuildFlag_UseSeparateModules,
@@ -389,6 +390,7 @@ enum BuildFlagKind {
 	BuildFlag_PrintLinkerFlags,
 
 	// internal use only
+	BuildFlag_InternalFastISel,
 	BuildFlag_InternalIgnoreLazy,
 	BuildFlag_InternalIgnoreLLVMBuild,
 	BuildFlag_InternalIgnorePanic,
@@ -507,7 +509,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
 	auto build_flags = array_make<BuildFlag>(heap_allocator(), 0, BuildFlag_COUNT);
 	add_flag(&build_flags, BuildFlag_Help,                    str_lit("help"),                      BuildFlagParam_None,    Command_all);
 	add_flag(&build_flags, BuildFlag_SingleFile,              str_lit("file"),                      BuildFlagParam_None,    Command__does_build | Command__does_check);
-	add_flag(&build_flags, BuildFlag_OutFile,                 str_lit("out"),                       BuildFlagParam_String,  Command__does_build | Command_test);
+	add_flag(&build_flags, BuildFlag_OutFile,                 str_lit("out"),                       BuildFlagParam_String,  Command__does_build | Command_test | Command_doc);
 	add_flag(&build_flags, BuildFlag_OptimizationMode,        str_lit("o"),                         BuildFlagParam_String,  Command__does_build);
 	add_flag(&build_flags, BuildFlag_ShowTimings,             str_lit("show-timings"),              BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_ShowMoreTimings,         str_lit("show-more-timings"),         BuildFlagParam_None,    Command__does_check);
@@ -532,6 +534,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
 	add_flag(&build_flags, BuildFlag_NoThreadLocal,           str_lit("no-thread-local"),           BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_NoDynamicLiterals,       str_lit("no-dynamic-literals"),       BuildFlagParam_None,    Command__does_check);
 	add_flag(&build_flags, BuildFlag_NoCRT,                   str_lit("no-crt"),                    BuildFlagParam_None,    Command__does_build);
+	add_flag(&build_flags, BuildFlag_NoRPath,                 str_lit("no-rpath"),                  BuildFlagParam_None,    Command__does_build);
 	add_flag(&build_flags, BuildFlag_NoEntryPoint,            str_lit("no-entry-point"),            BuildFlagParam_None,    Command__does_check &~ Command_test);
 	add_flag(&build_flags, BuildFlag_UseLLD,                  str_lit("lld"),                       BuildFlagParam_None,    Command__does_build);
 	add_flag(&build_flags, BuildFlag_UseSeparateModules,      str_lit("use-separate-modules"),      BuildFlagParam_None,    Command__does_build);
@@ -594,6 +597,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
 
 	add_flag(&build_flags, BuildFlag_PrintLinkerFlags,        str_lit("print-linker-flags"),        BuildFlagParam_None,    Command_build);
 
+	add_flag(&build_flags, BuildFlag_InternalFastISel,        str_lit("internal-fast-isel"),        BuildFlagParam_None,    Command_all);
 	add_flag(&build_flags, BuildFlag_InternalIgnoreLazy,      str_lit("internal-ignore-lazy"),      BuildFlagParam_None,    Command_all);
 	add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None,    Command_all);
 	add_flag(&build_flags, BuildFlag_InternalIgnorePanic,     str_lit("internal-ignore-panic"),     BuildFlagParam_None,    Command_all);
@@ -1181,6 +1185,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
 						case BuildFlag_NoCRT:
 							build_context.no_crt = true;
 							break;
+						case BuildFlag_NoRPath:
+							build_context.no_rpath = true;
+							break;
 						case BuildFlag_NoEntryPoint:
 							build_context.no_entry_point = true;
 							break;
@@ -1408,6 +1415,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
 							build_context.print_linker_flags = true;
 							break;
 
+						case BuildFlag_InternalFastISel:
+							build_context.fast_isel = true;
+							break;
 						case BuildFlag_InternalIgnoreLazy:
 							build_context.ignore_lazy = true;
 							break;
@@ -2154,6 +2164,12 @@ gb_internal void print_show_help(String const arg0, String const &command) {
 		print_usage_line(1, "-doc-format");
 		print_usage_line(2, "Generates documentation as the .odin-doc format (useful for external tooling).");
 		print_usage_line(0, "");
+
+		print_usage_line(1, "-out:<filepath>");
+		print_usage_line(2, "Sets the base name of the resultig .odin-doc file.");
+		print_usage_line(2, "The extension can be optionally included; the resulting file will always have an extension of '.odin-doc'.");
+		print_usage_line(2, "Example: -out:foo");
+		print_usage_line(0, "");
 	}
 
 	if (run_or_build) {
@@ -2310,6 +2326,10 @@ gb_internal void print_show_help(String const arg0, String const &command) {
 		print_usage_line(2, "Disables automatic linking with the C Run Time.");
 		print_usage_line(0, "");
 
+		print_usage_line(1, "-no-rpath");
+		print_usage_line(2, "Disables automatic addition of an rpath linked to the executable directory.");
+		print_usage_line(0, "");
+
 		print_usage_line(1, "-no-thread-local");
 		print_usage_line(2, "Ignores @thread_local attribute, effectively treating the program as if it is single-threaded.");
 		print_usage_line(0, "");

+ 0 - 14
src/parser.cpp

@@ -5938,20 +5938,6 @@ gb_internal bool determine_path_from_string(BlockingMutex *file_mutex, Ast *node
 			do_error(node, "Unknown library collection: '%.*s'", LIT(collection_name));
 			return false;
 		}
-	} else {
-#if !defined(GB_SYSTEM_WINDOWS)
-		// @NOTE(vassvik): foreign imports of shared libraries that are not in the system collection on
-		//                 linux/mac have to be local to the executable for consistency with shared libraries.
-		//                 Unix does not have a concept of "import library" for shared/dynamic libraries,
-		//                 so we need to pass the relative path to the linker, and add the current
-		//                 working directory of the exe to the library search paths.
-		//                 Static libraries can be linked directly with the full pathname
-		//
-		if (node->kind == Ast_ForeignImportDecl && (string_ends_with(file_str, str_lit(".so")) || string_contains_string(file_str, str_lit(".so.")))) {
-			*path = file_str;
-			return true;
-		}
-#endif
 	}
 
 	if (is_package_name_reserved(file_str)) {

+ 7 - 7
vendor/box2d/build_box2d.sh

@@ -11,7 +11,7 @@ tar -xzvf "v$VERSION.tar.gz"
 
 cd "box2d-$VERSION"
 
-DISABLE_FLAGS="-DBOX2D_SAMPLES=OFF -DBOX2D_VALIDATE=OFF -DBOX2D_UNIT_TESTS=OFF"
+FLAGS="-DCMAKE_BUILD_TYPE=Release -DBOX2D_SAMPLES=OFF -DBOX2D_VALIDATE=OFF -DBOX2D_UNIT_TESTS=OFF"
 
 case "$(uname -s)" in
 Darwin)
@@ -21,20 +21,20 @@ Darwin)
 	"x86_64" | "amd64")
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DBOX2D_AVX2=ON -DCMAKE_OSX_ARCHITECTURES=x86_64 -S . -B build
+		cmake $FLAGS -DBOX2D_AVX2=ON -DCMAKE_OSX_ARCHITECTURES=x86_64 -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_darwin_amd64_avx2.a
 
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DBOX2D_AVX2=OFF -DCMAKE_OSX_ARCHITECTURES=x86_64 -S . -B build
+		cmake $FLAGS -DBOX2D_AVX2=OFF -DCMAKE_OSX_ARCHITECTURES=x86_64 -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_darwin_amd64_sse2.a
 		;;
 	*)
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DCMAKE_OSX_ARCHITECTURES=arm64 -S . -B build
+		cmake $FLAGS -DCMAKE_OSX_ARCHITECTURES=arm64 -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_darwin_arm64.a
 		;;
@@ -45,20 +45,20 @@ Darwin)
 	"x86_64" | "amd64")
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DBOX2D_AVX2=ON -S . -B build
+		cmake $FLAGS -DBOX2D_AVX2=ON -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_other_amd64_avx2.a
 
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DBOX2D_AVX2=OFF -S . -B build
+		cmake $FLAGS -DBOX2D_AVX2=OFF -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_other_amd64_sse2.a
 		;;
 	*)
 		rm -rf build
 		mkdir build
-		cmake $DISABLE_FLAGS -DCMAKE_OSX_ARCHITECTURES=arm64 -S . -B build
+		cmake $FLAGS -DCMAKE_OSX_ARCHITECTURES=arm64 -S . -B build
 		cmake --build build
 		cp build/src/libbox2d.a ../lib/box2d_other.a
 		;;

BIN
vendor/box2d/lib/box2d_darwin_amd64_avx2.a


BIN
vendor/box2d/lib/box2d_darwin_amd64_sse2.a


BIN
vendor/box2d/lib/box2d_darwin_arm64.a


BIN
vendor/stb/lib/darwin/stb_truetype.a


BIN
vendor/stb/lib/stb_truetype.lib


BIN
vendor/stb/lib/stb_truetype_wasm.o


+ 1 - 1
vendor/stb/src/Makefile

@@ -8,7 +8,7 @@ endif
 
 wasm:
 	mkdir -p ../lib
-	clang -c -Os --target=wasm32 -nostdlib stb_truetype_wasm.c -o ../lib/stb_truetype_wasm.o
+	$(CC) -c -Os --target=wasm32 -nostdlib stb_truetype_wasm.c -o ../lib/stb_truetype_wasm.o
 
 unix:
 	mkdir -p ../lib

+ 0 - 3
vendor/stb/src/stb_truetype.c

@@ -1,5 +1,2 @@
-#define STB_RECT_PACK_IMPLEMENTATION
-#include "stb_rect_pack.h"
-
 #define STB_TRUETYPE_IMPLEMENTATION
 #include "stb_truetype.h"