Browse Source

Update core to use new atomic intrinsics

gingerBill 3 years ago
parent
commit
ba1930eb01

+ 110 - 145
core/c/libc/stdatomic.odin

@@ -47,24 +47,25 @@ kill_dependency :: #force_inline proc(value: $T) -> T {
 
 // 7.17.4 Fences
 atomic_thread_fence :: #force_inline proc(order: memory_order) {
-	switch (order) {
-	case .relaxed:
-		return
-	case .consume:
-		intrinsics.atomic_fence_acq()
-	case .acquire:
-		intrinsics.atomic_fence_acq()
-	case .release:
-		intrinsics.atomic_fence_rel()
-	case .acq_rel:
-		intrinsics.atomic_fence_acqrel()
-	case .seq_cst:
-		intrinsics.atomic_fence_acqrel()
+	switch order {
+	case .relaxed: intrinsics.atomic_thread_fence(.relaxed)
+	case .consume: intrinsics.atomic_thread_fence(.consume)
+	case .acquire: intrinsics.atomic_thread_fence(.acquire)
+	case .release: intrinsics.atomic_thread_fence(.release)
+	case .acq_rel: intrinsics.atomic_thread_fence(.acq_rel)
+	case .seq_cst: intrinsics.atomic_thread_fence(.seq_cst)
 	}
 }
 
 atomic_signal_fence :: #force_inline proc(order: memory_order) {
-	atomic_thread_fence(order)
+	switch order {
+	case .relaxed: intrinsics.atomic_signal_fence(.relaxed)
+	case .consume: intrinsics.atomic_signal_fence(.consume)
+	case .acquire: intrinsics.atomic_signal_fence(.acquire)
+	case .release: intrinsics.atomic_signal_fence(.release)
+	case .acq_rel: intrinsics.atomic_signal_fence(.acq_rel)
+	case .seq_cst: intrinsics.atomic_signal_fence(.seq_cst)
+	}
 }
 
 // 7.17.5 Lock-free property
@@ -121,13 +122,10 @@ atomic_store_explicit :: #force_inline proc(object: ^$T, desired: T, order: memo
 	assert(order != .acquire)
 	assert(order != .acq_rel)
 
-	#partial switch (order) {
-	case .relaxed:
-		intrinsics.atomic_store_relaxed(object, desired)
-	case .release:
-		intrinsics.atomic_store_rel(object, desired)
-	case .seq_cst:
-		intrinsics.atomic_store(object, desired)
+	#partial switch order {
+	case .relaxed: intrinsics.atomic_store_explicit(object, desired, .relaxed)
+	case .release: intrinsics.atomic_store_explicit(object, desired, .release)
+	case .seq_cst: intrinsics.atomic_store_explicit(object, desired, .seq_cst)
 	}
 }
 
@@ -139,36 +137,26 @@ atomic_load_explicit :: #force_inline proc(object: ^$T, order: memory_order) {
 	assert(order != .release)
 	assert(order != .acq_rel)
 
-	#partial switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_load_relaxed(object)
-	case .consume:
-		return intrinsics.atomic_load_acq(object)
-	case .acquire:
-		return intrinsics.atomic_load_acq(object)
-	case .seq_cst:
-		return intrinsics.atomic_load(object)
+	#partial switch order {
+	case .relaxed: return intrinsics.atomic_load_explicit(object, .relaxed)
+	case .consume: return intrinsics.atomic_load_explicit(object, .consume)
+	case .acquire: return intrinsics.atomic_load_explicit(object, .acquire)
+	case .seq_cst: return intrinsics.atomic_load_explicit(object, .seq_cst)
 	}
 }
 
 atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
-	return intrinsics.atomic_xchg(object, desired)
+	return intrinsics.atomic_exchange(object, desired)
 }
 
 atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xchg_relaxed(object, desired)
-	case .consume:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .acquire:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .release:
-		return intrinsics.atomic_xchg_rel(object, desired)
-	case .acq_rel:
-		return intrinsics.atomic_xchg_acqrel(object, desired)
-	case .seq_cst:
-		return intrinsics.atomic_xchg(object, desired)
+	switch order {
+	case .relaxed: return intrinsics.atomic_exchange_explicit(object, desired, .relaxed)
+	case .consume: return intrinsics.atomic_exchange_explicit(object, desired, .consume)
+	case .acquire: return intrinsics.atomic_exchange_explicit(object, desired, .acquire)
+	case .release: return intrinsics.atomic_exchange_explicit(object, desired, .release)
+	case .acq_rel: return intrinsics.atomic_exchange_explicit(object, desired, .acq_rel)
+	case .seq_cst: return intrinsics.atomic_exchange_explicit(object, desired, .seq_cst)
 	}
 	return false
 }
@@ -189,102 +177,104 @@ atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: m
 // 	[success = seq_cst, failure = acquire] => failacq
 // 	[success = acquire, failure = relaxed] => acq_failrelaxed
 // 	[success = acq_rel, failure = relaxed] => acqrel_failrelaxed
-atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_strong(object, expected^, desired)
 	if !ok { expected^ = value } 
 	return ok
 }
 
-atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) {
+atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .seq_cst, .seq_cst)
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .acquire, .seq_cst)
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .consume, .seq_cst)
 		case .release:
-			value, ok := intrinsics.atomic_cxchg_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .release, .seq_cst)
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .acq_rel, .seq_cst)
 		}
 	case .relaxed:
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchg_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .relaxed, .relaxed)
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .seq_cst, .relaxed)
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .acquire, .relaxed)
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .consume, .relaxed)
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .acq_rel, .relaxed)
 		}
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .seq_cst, .consume)
 	case .acquire:
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchg_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .seq_cst, .acquire)
 
 	}
 	if !ok { expected^ = value }
 	return ok
 }
 
-atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_weak(object, expected^, desired)
 	if !ok { expected^ = value }
 	return ok
 }
 
-atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) {
+atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .seq_cst, .seq_cst)
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .acquire, .seq_cst)
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .consume, .seq_cst)
 		case .release:
-			value, ok := intrinsics.atomic_cxchgweak_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .release, .seq_cst)
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .acq_rel, .seq_cst)
 		}
 	case .relaxed:
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchgweak_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .relaxed, .relaxed)
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .seq_cst, .relaxed)
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .acquire, .relaxed)
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .consume, .relaxed)
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .acq_rel, .relaxed)
 		}
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .seq_cst, .consume)
 	case .acquire:
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchgweak_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .seq_cst, .acquire)
 
 	}
 	if !ok { expected^ = value }
@@ -297,19 +287,14 @@ atomic_fetch_add :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 
 atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_add_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_add_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_add_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_add(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_add_explicit(object, operand, .relaxed)
+	case .consume: return intrinsics.atomic_add_explicit(object, operand, .consume)
+	case .acquire: return intrinsics.atomic_add_explicit(object, operand, .acquire)
+	case .release: return intrinsics.atomic_add_explicit(object, operand, .release)
+	case .acq_rel: return intrinsics.atomic_add_explicit(object, operand, .acq_rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_add_explicit(object, operand, .seq_cst)
 	}
 }
 
@@ -318,19 +303,14 @@ atomic_fetch_sub :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 
 atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_sub_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_sub_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_sub_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_sub(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_sub_explicit(object, operand, .relaxed)
+	case .consume: return intrinsics.atomic_sub_explicit(object, operand, .consume)
+	case .acquire: return intrinsics.atomic_sub_explicit(object, operand, .acquire)
+	case .release: return intrinsics.atomic_sub_explicit(object, operand, .release)
+	case .acq_rel: return intrinsics.atomic_sub_explicit(object, operand, .acq_rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_sub_explicit(object, operand, .seq_cst)
 	}
 }
 
@@ -339,19 +319,14 @@ atomic_fetch_or :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 
 atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_or_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_or_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_or_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_or(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_or_explicit(object, operand, .relaxed)
+	case .consume: return intrinsics.atomic_or_explicit(object, operand, .consume)
+	case .acquire: return intrinsics.atomic_or_explicit(object, operand, .acquire)
+	case .release: return intrinsics.atomic_or_explicit(object, operand, .release)
+	case .acq_rel: return intrinsics.atomic_or_explicit(object, operand, .acq_rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_or_explicit(object, operand, .seq_cst)
 	}
 }
 
@@ -360,19 +335,14 @@ atomic_fetch_xor :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 
 atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xor_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_xor_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_xor_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_xor(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_xor_explicit(object, operand, .relaxed)
+	case .consume: return intrinsics.atomic_xor_explicit(object, operand, .consume)
+	case .acquire: return intrinsics.atomic_xor_explicit(object, operand, .acquire)
+	case .release: return intrinsics.atomic_xor_explicit(object, operand, .release)
+	case .acq_rel: return intrinsics.atomic_xor_explicit(object, operand, .acq_rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_xor_explicit(object, operand, .seq_cst)
 	}
 }
 
@@ -380,19 +350,14 @@ atomic_fetch_and :: #force_inline proc(object: ^$T, operand: T) -> T {
 	return intrinsics.atomic_and(object, operand)
 }
 atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_and_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_and_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_and_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_and(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_and_explicit(object, operand, .relaxed)
+	case .consume: return intrinsics.atomic_and_explicit(object, operand, .consume)
+	case .acquire: return intrinsics.atomic_and_explicit(object, operand, .acquire)
+	case .release: return intrinsics.atomic_and_explicit(object, operand, .release)
+	case .acq_rel: return intrinsics.atomic_and_explicit(object, operand, .acq_rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_and_explicit(object, operand, .seq_cst)
 	}
 }
 

+ 1 - 1
core/mem/mem.odin

@@ -16,7 +16,7 @@ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
 	// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
 	// memset_s call.
 	intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
-	intrinsics.atomic_fence() // Prevent reordering
+	intrinsics.atomic_thread_fence(.seq_cst) // Prevent reordering
 	return data
 }
 zero_item :: proc "contextless" (item: $P/^$T) {

+ 37 - 71
core/sync/atomic.odin

@@ -4,76 +4,42 @@ import "core:intrinsics"
 
 cpu_relax :: intrinsics.cpu_relax
 
-atomic_fence         :: intrinsics.atomic_fence
-atomic_fence_acquire :: intrinsics.atomic_fence_acq
-atomic_fence_release :: intrinsics.atomic_fence_rel
-atomic_fence_acqrel  :: intrinsics.atomic_fence_acqrel
-
-atomic_store           :: intrinsics.atomic_store
-atomic_store_release   :: intrinsics.atomic_store_rel
-atomic_store_relaxed   :: intrinsics.atomic_store_relaxed
-atomic_store_unordered :: intrinsics.atomic_store_unordered
-
-atomic_load           :: intrinsics.atomic_load
-atomic_load_acquire   :: intrinsics.atomic_load_acq
-atomic_load_relaxed   :: intrinsics.atomic_load_relaxed
-atomic_load_unordered :: intrinsics.atomic_load_unordered
-
-atomic_add          :: intrinsics.atomic_add
-atomic_add_acquire  :: intrinsics.atomic_add_acq
-atomic_add_release  :: intrinsics.atomic_add_rel
-atomic_add_acqrel   :: intrinsics.atomic_add_acqrel
-atomic_add_relaxed  :: intrinsics.atomic_add_relaxed
-atomic_sub          :: intrinsics.atomic_sub
-atomic_sub_acquire  :: intrinsics.atomic_sub_acq
-atomic_sub_release  :: intrinsics.atomic_sub_rel
-atomic_sub_acqrel   :: intrinsics.atomic_sub_acqrel
-atomic_sub_relaxed  :: intrinsics.atomic_sub_relaxed
-atomic_and          :: intrinsics.atomic_and
-atomic_and_acquire  :: intrinsics.atomic_and_acq
-atomic_and_release  :: intrinsics.atomic_and_rel
-atomic_and_acqrel   :: intrinsics.atomic_and_acqrel
-atomic_and_relaxed  :: intrinsics.atomic_and_relaxed
-atomic_nand         :: intrinsics.atomic_nand
-atomic_nand_acquire :: intrinsics.atomic_nand_acq
-atomic_nand_release :: intrinsics.atomic_nand_rel
-atomic_nand_acqrel  :: intrinsics.atomic_nand_acqrel
-atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed
-atomic_or           :: intrinsics.atomic_or
-atomic_or_acquire   :: intrinsics.atomic_or_acq
-atomic_or_release   :: intrinsics.atomic_or_rel
-atomic_or_acqrel    :: intrinsics.atomic_or_acqrel
-atomic_or_relaxed   :: intrinsics.atomic_or_relaxed
-atomic_xor          :: intrinsics.atomic_xor
-atomic_xor_acquire  :: intrinsics.atomic_xor_acq
-atomic_xor_release  :: intrinsics.atomic_xor_rel
-atomic_xor_acqrel   :: intrinsics.atomic_xor_acqrel
-atomic_xor_relaxed  :: intrinsics.atomic_xor_relaxed
-
-atomic_exchange         :: intrinsics.atomic_xchg
-atomic_exchange_acquire :: intrinsics.atomic_xchg_acq
-atomic_exchange_release :: intrinsics.atomic_xchg_rel
-atomic_exchange_acqrel  :: intrinsics.atomic_xchg_acqrel
-atomic_exchange_relaxed :: intrinsics.atomic_xchg_relaxed
-
-// Returns value and optional ok boolean
-atomic_compare_exchange_strong                     :: intrinsics.atomic_cxchg
-atomic_compare_exchange_strong_acquire             :: intrinsics.atomic_cxchg_acq
-atomic_compare_exchange_strong_release             :: intrinsics.atomic_cxchg_rel
-atomic_compare_exchange_strong_acqrel              :: intrinsics.atomic_cxchg_acqrel
-atomic_compare_exchange_strong_relaxed             :: intrinsics.atomic_cxchg_relaxed
-atomic_compare_exchange_strong_failrelaxed         :: intrinsics.atomic_cxchg_failrelaxed
-atomic_compare_exchange_strong_failacquire         :: intrinsics.atomic_cxchg_failacq
-atomic_compare_exchange_strong_acquire_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed
-atomic_compare_exchange_strong_acqrel_failrelaxed  :: intrinsics.atomic_cxchg_acqrel_failrelaxed
+/*
+Atomic_Memory_Order :: enum {
+	relaxed = 0,
+	consume = 1,
+	acquire = 2,
+	release = 3,
+	acq_rel = 4,
+	seq_cst = 5,
+}
+*/
+Atomic_Memory_Order :: intrinsics.Atomic_Memory_Order
+
+
+atomic_thread_fence                     :: intrinsics.atomic_thread_fence
+atomic_signal_fence                     :: intrinsics.atomic_signal_fence
+atomic_store                            :: intrinsics.atomic_store
+atomic_store_explicit                   :: intrinsics.atomic_store_explicit
+atomic_load                             :: intrinsics.atomic_load
+atomic_load_explicit                    :: intrinsics.atomic_load_explicit
+atomic_add                              :: intrinsics.atomic_add
+atomic_add_explicit                     :: intrinsics.atomic_add_explicit
+atomic_sub                              :: intrinsics.atomic_sub
+atomic_sub_explicit                     :: intrinsics.atomic_sub_explicit
+atomic_and                              :: intrinsics.atomic_and
+atomic_and_explicit                     :: intrinsics.atomic_and_explicit
+atomic_nand                             :: intrinsics.atomic_nand
+atomic_nand_explicit                    :: intrinsics.atomic_nand_explicit
+atomic_or                               :: intrinsics.atomic_or
+atomic_or_explicit                      :: intrinsics.atomic_or_explicit
+atomic_xor                              :: intrinsics.atomic_xor
+atomic_xor_explicit                     :: intrinsics.atomic_xor_explicit
+atomic_exchange                         :: intrinsics.atomic_exchange
+atomic_exchange_explicit                :: intrinsics.atomic_exchange_explicit
 
 // Returns value and optional ok boolean
-atomic_compare_exchange_weak                     :: intrinsics.atomic_cxchgweak
-atomic_compare_exchange_weak_acquire             :: intrinsics.atomic_cxchgweak_acq
-atomic_compare_exchange_weak_release             :: intrinsics.atomic_cxchgweak_rel
-atomic_compare_exchange_weak_acqrel              :: intrinsics.atomic_cxchgweak_acqrel
-atomic_compare_exchange_weak_relaxed             :: intrinsics.atomic_cxchgweak_relaxed
-atomic_compare_exchange_weak_failrelaxed         :: intrinsics.atomic_cxchgweak_failrelaxed
-atomic_compare_exchange_weak_failacquire         :: intrinsics.atomic_cxchgweak_failacq
-atomic_compare_exchange_weak_acquire_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed
-atomic_compare_exchange_weak_acqrel_failrelaxed  :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed
+atomic_compare_exchange_strong          :: intrinsics.atomic_compare_exchange_strong
+atomic_compare_exchange_strong_explicit :: intrinsics.atomic_compare_exchange_strong_explicit
+atomic_compare_exchange_weak            :: intrinsics.atomic_compare_exchange_weak
+atomic_compare_exchange_weak_explicit   :: intrinsics.atomic_compare_exchange_weak_explicit

+ 15 - 15
core/sync/extended.odin

@@ -146,10 +146,10 @@ Auto_Reset_Event :: struct {
 }
 
 auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
-	old_status := atomic_load_relaxed(&e.status)
+	old_status := atomic_load_explicit(&e.status, .seq_cst)
 	for {
 		new_status := old_status + 1 if old_status < 1 else 1
-		if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
+		if _, ok := atomic_compare_exchange_weak_explicit(&e.status, old_status, new_status, .seq_cst, .seq_cst); ok {
 			break
 		}
 
@@ -160,7 +160,7 @@ auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
 }
 
 auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
-	old_status := atomic_sub_acquire(&e.status, 1)
+	old_status := atomic_sub_explicit(&e.status, 1, .acquire)
 	if old_status < 1 {
 		sema_wait(&e.sema)
 	}
@@ -174,14 +174,14 @@ Ticket_Mutex :: struct {
 }
 
 ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
-	ticket := atomic_add_relaxed(&m.ticket, 1)
-	for ticket != atomic_load_acquire(&m.serving) {
+	ticket := atomic_add_explicit(&m.ticket, 1, .relaxed)
+	for ticket != atomic_load_explicit(&m.serving, .acquire) {
 		cpu_relax()
 	}
 }
 
 ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
-	atomic_add_relaxed(&m.serving, 1)
+	atomic_add_explicit(&m.serving, 1, .relaxed)
 }
 @(deferred_in=ticket_mutex_unlock)
 ticket_mutex_guard :: proc(m: ^Ticket_Mutex) -> bool {
@@ -196,18 +196,18 @@ Benaphore :: struct {
 }
 
 benaphore_lock :: proc(b: ^Benaphore) {
-	if atomic_add_acquire(&b.counter, 1) > 1 {
+	if atomic_add_explicit(&b.counter, 1, .acquire) > 1 {
 		sema_wait(&b.sema)
 	}
 }
 
 benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
-	v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0)
+	v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 1, 0, .acquire, .acquire)
 	return v == 0
 }
 
 benaphore_unlock :: proc(b: ^Benaphore) {
-	if atomic_sub_release(&b.counter, 1) > 0 {
+	if atomic_sub_explicit(&b.counter, 1, .release) > 0 {
 		sema_post(&b.sema)
 	}
 }
@@ -227,7 +227,7 @@ Recursive_Benaphore :: struct {
 
 recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
 	tid := current_thread_id()
-	if atomic_add_acquire(&b.counter, 1) > 1 {
+	if atomic_add_explicit(&b.counter, 1, .acquire) > 1 {
 		if tid != b.owner {
 			sema_wait(&b.sema)
 		}
@@ -240,10 +240,10 @@ recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
 recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
 	tid := current_thread_id()
 	if b.owner == tid {
-		atomic_add_acquire(&b.counter, 1)
+		atomic_add_explicit(&b.counter, 1, .acquire)
 	}
 
-	if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
+	if v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 1, 0, .acquire, .acquire); v != 0 {
 		return false
 	}
 	// inside the lock
@@ -260,7 +260,7 @@ recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
 	if recursion == 0 {
 		b.owner = 0
 	}
-	if atomic_sub_release(&b.counter, 1) > 0 {
+	if atomic_sub_explicit(&b.counter, 1, .release) > 0 {
 		if recursion == 0 {
 			sema_post(&b.sema)
 		}
@@ -293,12 +293,12 @@ once_do :: proc(o: ^Once, fn: proc()) {
 		defer mutex_unlock(&o.m)
 		if !o.done {
 			fn()
-			atomic_store_release(&o.done, true)
+			atomic_store_explicit(&o.done, true, .release)
 		}
 	}
 
 	
-	if atomic_load_acquire(&o.done) == false {
+	if atomic_load_explicit(&o.done, .acquire) == false {
 		do_slow(o, fn)
 	}
 }

+ 8 - 8
core/sync/primitives_atomic.odin

@@ -24,7 +24,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
 		new_state := curr_state // Make a copy of it
 
 		spin_lock: for spin in 0..<i32(100) {
-			state, ok := atomic_compare_exchange_weak_acquire(&m.state, .Unlocked, new_state)
+			state, ok := atomic_compare_exchange_weak_explicit(&m.state, .Unlocked, new_state, .acquire, .consume)
 			if ok {
 				return
 			}
@@ -42,7 +42,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
 		new_state = .Waiting
 
 		for {
-			if atomic_exchange_acquire(&m.state, .Waiting) == .Unlocked {
+			if atomic_exchange_explicit(&m.state, .Waiting, .acquire) == .Unlocked {
 				return
 			}
 			
@@ -52,7 +52,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
 	}
 
 
-	if v := atomic_exchange_acquire(&m.state, .Locked); v != .Unlocked {
+	if v := atomic_exchange_explicit(&m.state, .Locked, .acquire); v != .Unlocked {
 		lock_slow(m, v)
 	}
 }
@@ -65,7 +65,7 @@ atomic_mutex_unlock :: proc(m: ^Atomic_Mutex) {
 	}
 
 
-	switch atomic_exchange_release(&m.state, .Unlocked) {
+	switch atomic_exchange_explicit(&m.state, .Unlocked, .release) {
 	case .Unlocked:
 		unreachable()
 	case .Locked:
@@ -77,7 +77,7 @@ atomic_mutex_unlock :: proc(m: ^Atomic_Mutex) {
 
 // atomic_mutex_try_lock tries to lock m, will return true on success, and false on failure
 atomic_mutex_try_lock :: proc(m: ^Atomic_Mutex) -> bool {
-	_, ok := atomic_compare_exchange_strong_acquire(&m.state, .Unlocked, .Locked)
+	_, ok := atomic_compare_exchange_strong_explicit(&m.state, .Unlocked, .Locked, .acquire, .acquire)
 	return ok
 }
 
@@ -290,7 +290,7 @@ Queue_Item :: struct {
 
 @(private="file")
 queue_item_wait :: proc(item: ^Queue_Item) {
-	for atomic_load_acquire(&item.futex) == 0 {
+	for atomic_load_explicit(&item.futex, .acquire) == 0 {
 		futex_wait(&item.futex, 0)
 		cpu_relax()
 	}
@@ -298,7 +298,7 @@ queue_item_wait :: proc(item: ^Queue_Item) {
 @(private="file")
 queue_item_wait_with_timeout :: proc(item: ^Queue_Item, duration: time.Duration) -> bool {
 	start := time.tick_now()
-	for atomic_load_acquire(&item.futex) == 0 {
+	for atomic_load_explicit(&item.futex, .acquire) == 0 {
 		remaining := duration - time.tick_since(start)
 		if remaining < 0 {
 			return false
@@ -312,7 +312,7 @@ queue_item_wait_with_timeout :: proc(item: ^Queue_Item, duration: time.Duration)
 }
 @(private="file")
 queue_item_signal :: proc(item: ^Queue_Item) {
-	atomic_store_release(&item.futex, 1)
+	atomic_store_explicit(&item.futex, 1, .release)
 	futex_signal(&item.futex)
 }
 

+ 3 - 3
core/sync/primitives_internal.odin

@@ -10,7 +10,7 @@ when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
 	_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
 		tid := Futex(current_thread_id())
 		for {
-			prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
+			prev_owner := atomic_compare_exchange_strong_explicit(&m.impl.owner, tid, 0, .acquire, .acquire)
 			switch prev_owner {
 			case 0, tid:
 				m.impl.recursion += 1
@@ -27,7 +27,7 @@ when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
 		if m.impl.recursion != 0 {
 			return
 		}
-		atomic_exchange_release(&m.impl.owner, 0)
+		atomic_exchange_explicit(&m.impl.owner, 0, .release)
 		
 		futex_signal(&m.impl.owner)
 		// outside the lock
@@ -36,7 +36,7 @@ when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
 
 	_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
 		tid := Futex(current_thread_id())
-		prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
+		prev_owner := atomic_compare_exchange_strong_explicit(&m.impl.owner, tid, 0, .acquire, .acquire)
 		switch prev_owner {
 		case 0, tid:
 			m.impl.recursion += 1

+ 2 - 2
core/testing/runner_windows.odin

@@ -21,7 +21,7 @@ sema_wait :: proc "contextless" (s: ^Sema) {
 			win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), win32.INFINITE)
 			original_count = s.count
 		}
-		if original_count == intrinsics.atomic_cxchg(&s.count, original_count-1, original_count) {
+		if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
 			return
 		}
 	}
@@ -46,7 +46,7 @@ sema_wait_with_timeout :: proc "contextless" (s: ^Sema, duration: time.Duration)
 			}
 			original_count = s.count
 		}
-		if original_count == intrinsics.atomic_cxchg(&s.count, original_count-1, original_count) {
+		if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
 			return true
 		}
 	}