Browse Source

`@(require_target_feature=<string>)` `@(enable_target_feature=<string>)`

require_target_feature - required by the target micro-architecture
enable_target_feature - will be enabled for the specified procedure only
gingerBill 3 years ago
parent
commit
f3aefbc443

+ 4 - 0
core/simd/x86/fxsr.odin

@@ -1,17 +1,21 @@
 //+build i386, amd64
 package simd_x86
 
+@(enable_target_feature="fxsr")
 _fxsave :: #force_inline proc "c" (mem_addr: rawptr) {
 	fxsave(mem_addr)
 }
+@(enable_target_feature="fxsr")
 _fxrstor :: #force_inline proc "c" (mem_addr: rawptr) {
 	fxrstor(mem_addr)
 }
 
 when ODIN_ARCH == .amd64 {
+	@(enable_target_feature="fxsr")
 	_fxsave64 :: #force_inline proc "c" (mem_addr: rawptr) {
 		fxsave64(mem_addr)
 	}
+	@(enable_target_feature="fxsr")
 	_fxrstor64 :: #force_inline proc "c" (mem_addr: rawptr) {
 		fxrstor64(mem_addr)
 	}

+ 1 - 0
core/simd/x86/pclmulqdq.odin

@@ -1,6 +1,7 @@
 //+build i386, amd64
 package simd_x86
 
+@(enable_target_feature="pclmulqdq")
 _mm_clmulepi64_si128 :: #force_inline proc "c" (a, b: __m128i, $IMM8: u8) -> __m128i {
 	return pclmulqdq(a, b, u8(IMM8))
 }

+ 7 - 0
core/simd/x86/sha.odin

@@ -1,24 +1,31 @@
 //+build i386, amd64
 package simd_x86
 
+@(enable_target_feature="sha")
 _mm_sha1msg1_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)sha1msg1(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sha")
 _mm_sha1msg2_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)sha1msg2(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sha")
 _mm_sha1nexte_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)sha1nexte(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sha")
 _mm_sha1rnds4_epu32 :: #force_inline proc "c" (a, b: __m128i, $FUNC: u32) -> __m128i where 0 <= FUNC, FUNC <= 3 {
 	return transmute(__m128i)sha1rnds4(transmute(i32x4)a, transmute(i32x4)b, u8(FUNC & 0xff))
 }
+@(enable_target_feature="sha")
 _mm_sha256msg1_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)sha256msg1(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sha")
 _mm_sha256msg2_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)sha256msg2(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sha")
 _mm_sha256rnds2_epu32 :: #force_inline proc "c" (a, b, k: __m128i) -> __m128i {
 	return transmute(__m128i)sha256rnds2(transmute(i32x4)a, transmute(i32x4)b, transmute(i32x4)k)
 }

+ 102 - 0
core/simd/x86/sse.odin

@@ -43,232 +43,299 @@ _MM_FLUSH_ZERO_ON     :: 0x8000
 _MM_FLUSH_ZERO_OFF    :: 0x0000
 
 
+@(enable_target_feature="sse")
 _mm_add_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return addss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_add_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.add(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_sub_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return subss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_sub_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.sub(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_mul_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return mulss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_mul_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.mul(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_div_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return divss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_div_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.div(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_sqrt_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return sqrtss(a)
 }
+@(enable_target_feature="sse")
 _mm_sqrt_ps :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return sqrtps(a)
 }
 
+@(enable_target_feature="sse")
 _mm_rcp_ss :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return rcpss(a)
 }
+@(enable_target_feature="sse")
 _mm_rcp_ps :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return rcpps(a)
 }
 
+@(enable_target_feature="sse")
 _mm_rsqrt_ss :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return rsqrtss(a)
 }
+@(enable_target_feature="sse")
 _mm_rsqrt_ps :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return rsqrtps(a)
 }
 
+@(enable_target_feature="sse")
 _mm_min_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return minss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_min_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return minps(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_max_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return maxss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_max_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return maxps(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_and_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return transmute(__m128)simd.and(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse")
 _mm_andnot_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return transmute(__m128)simd.and_not(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse")
 _mm_or_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return transmute(__m128)simd.or(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse")
 _mm_xor_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return transmute(__m128)simd.xor(transmute(__m128i)a, transmute(__m128i)b)
 }
 
 
+@(enable_target_feature="sse")
 _mm_cmpeq_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 0)
 }
+@(enable_target_feature="sse")
 _mm_cmplt_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 1)
 }
+@(enable_target_feature="sse")
 _mm_cmple_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 2)
 }
+@(enable_target_feature="sse")
 _mm_cmpgt_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, cmpss(b, a, 1), 4, 1, 2, 3)
 }
+@(enable_target_feature="sse")
 _mm_cmpge_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, cmpss(b, a, 2), 4, 1, 2, 3)
 }
+@(enable_target_feature="sse")
 _mm_cmpneq_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 4)
 }
+@(enable_target_feature="sse")
 _mm_cmpnlt_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 5)
 }
+@(enable_target_feature="sse")
 _mm_cmpnle_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 6)
 }
+@(enable_target_feature="sse")
 _mm_cmpngt_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, cmpss(b, a, 5), 4, 1, 2, 3)
 }
+@(enable_target_feature="sse")
 _mm_cmpnge_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, cmpss(b, a, 6), 4, 1, 2, 3)
 }
+@(enable_target_feature="sse")
 _mm_cmpord_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 7)
 }
+@(enable_target_feature="sse")
 _mm_cmpunord_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpss(a, b, 3)
 }
 
 
+@(enable_target_feature="sse")
 _mm_cmpeq_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 0)
 }
+@(enable_target_feature="sse")
 _mm_cmplt_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 1)
 }
+@(enable_target_feature="sse")
 _mm_cmple_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 2)
 }
+@(enable_target_feature="sse")
 _mm_cmpgt_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 1)
 }
+@(enable_target_feature="sse")
 _mm_cmpge_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 2)
 }
+@(enable_target_feature="sse")
 _mm_cmpneq_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 4)
 }
+@(enable_target_feature="sse")
 _mm_cmpnlt_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 5)
 }
+@(enable_target_feature="sse")
 _mm_cmpnle_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(a, b, 6)
 }
+@(enable_target_feature="sse")
 _mm_cmpngt_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 5)
 }
+@(enable_target_feature="sse")
 _mm_cmpnge_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 6)
 }
+@(enable_target_feature="sse")
 _mm_cmpord_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 7)
 }
+@(enable_target_feature="sse")
 _mm_cmpunord_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return cmpps(b, a, 3)
 }
 
 
+@(enable_target_feature="sse")
 _mm_comieq_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comieq_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_comilt_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comilt_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_comile_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comile_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_comigt_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comigt_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_comige_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comige_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_comineq_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return comineq_ss(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_ucomieq_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomieq_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_ucomilt_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomilt_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_ucomile_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomile_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_ucomigt_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomigt_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_ucomige_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomige_ss(a, b)
 }
+@(enable_target_feature="sse")
 _mm_ucomineq_ss :: #force_inline proc "c" (a, b: __m128) -> b32 {
 	return ucomineq_ss(a, b)
 }
 
+@(enable_target_feature="sse")
 _mm_cvtss_si32 :: #force_inline proc "c" (a: __m128) -> i32 {
 	return cvtss2si(a)
 }
 _mm_cvt_ss2si :: _mm_cvtss_si32
 _mm_cvttss_si32 :: _mm_cvtss_si32
 
+@(enable_target_feature="sse")
 _mm_cvtss_f32 :: #force_inline proc "c" (a: __m128) -> f32 {
 	return simd.extract(a, 0)
 }
 
+@(enable_target_feature="sse")
 _mm_cvtsi32_ss :: #force_inline proc "c" (a: __m128, b: i32) -> __m128 {
 	return cvtsi2ss(a, b)
 }
 _mm_cvt_si2ss :: _mm_cvtsi32_ss
 
 
+@(enable_target_feature="sse")
 _mm_set_ss :: #force_inline proc "c" (a: f32) -> __m128 {
 	return __m128{a, 0, 0, 0}
 }
+@(enable_target_feature="sse")
 _mm_set1_ps :: #force_inline proc "c" (a: f32) -> __m128 {
 	return __m128(a)
 }
 _mm_set_ps1 :: _mm_set1_ps
 
+@(enable_target_feature="sse")
 _mm_set_ps :: #force_inline proc "c" (a, b, c, d: f32) -> __m128 {
 	return __m128{d, c, b, a}
 }
+@(enable_target_feature="sse")
 _mm_setr_ps :: #force_inline proc "c" (a, b, c, d: f32) -> __m128 {
 	return __m128{a, b, c, d}
 }
 
+@(enable_target_feature="sse")
 _mm_setzero_ps :: #force_inline proc "c" () -> __m128 {
 	return __m128{0, 0, 0, 0}
 }
 
+@(enable_target_feature="sse")
 _mm_shuffle_ps :: #force_inline proc "c" (a, b: __m128, $MASK: u32) -> __m128 {
 	return simd.shuffle(
 		a, b,
@@ -279,56 +346,69 @@ _mm_shuffle_ps :: #force_inline proc "c" (a, b: __m128, $MASK: u32) -> __m128 {
 }
 
 
+@(enable_target_feature="sse")
 _mm_unpackhi_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, b, 2, 6, 3, 7)
 }
+@(enable_target_feature="sse")
 _mm_unpacklo_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, b, 0, 4, 1, 5)
 }
 
+@(enable_target_feature="sse")
 _mm_movehl_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, b, 6, 7, 2, 3)
 }
+@(enable_target_feature="sse")
 _mm_movelh_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, b, 0, 1, 4, 5)
 }
 
+@(enable_target_feature="sse")
 _mm_movemask_ps :: #force_inline proc "c" (a: __m128) -> u32 {
 	return movmskps(a)
 }
 
+@(enable_target_feature="sse")
 _mm_load_ss :: #force_inline proc "c" (p: ^f32) -> __m128 {
 	return __m128{p^, 0, 0, 0}
 }
+@(enable_target_feature="sse")
 _mm_load1_ps :: #force_inline proc "c" (p: ^f32) -> __m128 {
 	a := p^
 	return __m128(a)
 }
 _mm_load_ps1 :: _mm_load1_ps
 
+@(enable_target_feature="sse")
 _mm_load_ps :: #force_inline proc "c" (p: [^]f32) -> __m128 {
 	return (^__m128)(p)^
 }
 
+@(enable_target_feature="sse")
 _mm_loadu_ps :: #force_inline proc "c" (p: [^]f32) -> __m128 {
 	dst := _mm_undefined_ps()
 	intrinsics.mem_copy_non_overlapping(&dst, p, size_of(__m128))
 	return dst
 }
 
+@(enable_target_feature="sse")
 _mm_loadr_ps :: #force_inline proc "c" (p: [^]f32) -> __m128 {
 	return simd.lanes_reverse(_mm_load_ps(p))
 }
 
+@(enable_target_feature="sse")
 _mm_loadu_si64 :: #force_inline proc "c" (mem_addr: rawptr) -> __m128i {
 	a := intrinsics.unaligned_load((^i64)(mem_addr))
 	return __m128i{a, 0}
 }
 
+@(enable_target_feature="sse")
 _mm_store_ss :: #force_inline proc "c" (p: ^f32, a: __m128) {
 	p^ = simd.extract(a, 0)
 }
 
+@(enable_target_feature="sse")
 _mm_store1_ps :: #force_inline proc "c" (p: [^]f32, a: __m128) {
 	b := simd.swizzle(a, 0, 0, 0, 0)
 	(^__m128)(p)^ = b
@@ -336,71 +416,89 @@ _mm_store1_ps :: #force_inline proc "c" (p: [^]f32, a: __m128) {
 _mm_store_ps1 :: _mm_store1_ps
 
 
+@(enable_target_feature="sse")
 _mm_store_ps :: #force_inline proc "c" (p: [^]f32, a: __m128) {
 	(^__m128)(p)^ = a
 }
+@(enable_target_feature="sse")
 _mm_storeu_ps :: #force_inline proc "c" (p: [^]f32, a: __m128) {
 	b := a
 	intrinsics.mem_copy_non_overlapping(p, &b, size_of(__m128))
 }
+@(enable_target_feature="sse")
 _mm_storer_ps :: #force_inline proc "c" (p: [^]f32, a: __m128) {
 	(^__m128)(p)^ = simd.lanes_reverse(a)
 }
 
 
+@(enable_target_feature="sse")
 _mm_move_ss :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return simd.shuffle(a, b, 4, 1, 2, 3)
 }
 
+@(enable_target_feature="sse")
 _mm_sfence :: #force_inline proc "c" () {
 	sfence()
 }
 
+@(enable_target_feature="sse")
 _mm_getcsr :: #force_inline proc "c" () -> (result: u32) {
 	stmxcsr(&result)
 	return result
 }
 
+@(enable_target_feature="sse")
 _mm_setcsr :: #force_inline proc "c" (val: u32) {
 	val := val
 	ldmxcsr(&val)
 }
 
+@(enable_target_feature="sse")
 _MM_GET_EXCEPTION_MASK :: #force_inline proc "c" () -> u32 {
 	return _mm_getcsr() & _MM_MASK_MASK
 }
+@(enable_target_feature="sse")
 _MM_GET_EXCEPTION_STATE :: #force_inline proc "c" () -> u32 {
 	return _mm_getcsr() & _MM_EXCEPT_MASK
 }
+@(enable_target_feature="sse")
 _MM_GET_FLUSH_ZERO_MODE :: #force_inline proc "c" () -> u32 {
 	return _mm_getcsr() & _MM_FLUSH_ZERO_MASK
 }
+@(enable_target_feature="sse")
 _MM_GET_ROUNDING_MODE :: #force_inline proc "c" () -> u32 {
 	return _mm_getcsr() & _MM_ROUND_MASK
 }
 
+@(enable_target_feature="sse")
 _MM_SET_EXCEPTION_MASK :: #force_inline proc "c" (x: u32) {
 	_mm_setcsr((_mm_getcsr() &~ _MM_MASK_MASK) | x)
 }
+@(enable_target_feature="sse")
 _MM_SET_EXCEPTION_STATE :: #force_inline proc "c" (x: u32) {
 	_mm_setcsr((_mm_getcsr() &~ _MM_EXCEPT_MASK) | x)
 }
+@(enable_target_feature="sse")
 _MM_SET_FLUSH_ZERO_MODE :: #force_inline proc "c" (x: u32) {
 	_mm_setcsr((_mm_getcsr() &~ _MM_FLUSH_ZERO_MASK) | x)
 }
+@(enable_target_feature="sse")
 _MM_SET_ROUNDING_MODE :: #force_inline proc "c" (x: u32) {
 	_mm_setcsr((_mm_getcsr() &~ _MM_ROUND_MASK) | x)
 }
 
+@(enable_target_feature="sse")
 _mm_prefetch :: #force_inline proc "c" (p: rawptr, $STRATEGY: u32) {
 	prefetch(p, (STRATEGY>>2)&1, STRATEGY&3, 1)
 }
 
 
+@(enable_target_feature="sse")
 _mm_undefined_ps :: #force_inline proc "c" () -> __m128 {
 	return _mm_set1_ps(0)
 }
 
+@(enable_target_feature="sse")
 _MM_TRANSPOSE4_PS :: #force_inline proc "c" (row0, row1, row2, row3: ^__m128) {
 	tmp0 := _mm_unpacklo_ps(row0^, row1^)
 	tmp1 := _mm_unpacklo_ps(row2^, row3^)
@@ -413,17 +511,21 @@ _MM_TRANSPOSE4_PS :: #force_inline proc "c" (row0, row1, row2, row3: ^__m128) {
 	row3^ = _mm_movelh_ps(tmp3, tmp1)
 }
 
+@(enable_target_feature="sse")
 _mm_stream_ps :: #force_inline proc "c" (addr: [^]f32, a: __m128) {
 	intrinsics.non_temporal_store((^__m128)(addr), a)
 }
 
 when ODIN_ARCH == .amd64 {
+	@(enable_target_feature="sse")
 	_mm_cvtss_si64 :: #force_inline proc "c"(a: __m128) -> i64 {
 		return cvtss2si64(a)
 	}
+	@(enable_target_feature="sse")
 	_mm_cvttss_si64 :: #force_inline proc "c"(a: __m128) -> i64 {
 		return cvttss2si64(a)
 	}
+	@(enable_target_feature="sse")
 	_mm_cvtsi64_ss :: #force_inline proc "c"(a: __m128, b: i64) -> __m128 {
 		return cvtsi642ss(a, b)
 	}

+ 223 - 0
core/simd/x86/sse2.odin

@@ -4,103 +4,135 @@ package simd_x86
 import "core:intrinsics"
 import "core:simd"
 
+@(enable_target_feature="sse2")
 _mm_pause :: #force_inline proc "c" () {
 	pause()
 }
+@(enable_target_feature="sse2")
 _mm_clflush :: #force_inline proc "c" (p: rawptr) {
 	clflush(p)
 }
+@(enable_target_feature="sse2")
 _mm_lfence :: #force_inline proc "c" () {
 	lfence()
 }
+@(enable_target_feature="sse2")
 _mm_mfence :: #force_inline proc "c" () {
 	mfence()
 }
 
+@(enable_target_feature="sse2")
 _mm_add_epi8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_add_epi16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_add_epi32 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_add_epi64 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add(transmute(i64x2)a, transmute(i64x2)b)
 }
+@(enable_target_feature="sse2")
 _mm_adds_epi8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add_sat(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_adds_epi16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add_sat(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_adds_epu8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add_sat(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_adds_epu16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)simd.add_sat(transmute(u16x8)a, transmute(u16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_avg_epu8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pavgb(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_avg_epu16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pavgw(transmute(u16x8)a, transmute(u16x8)b)
 }
 
+@(enable_target_feature="sse2")
 _mm_madd_epi16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pmaddwd(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_max_epi16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pmaxsw(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_max_epu8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pmaxub(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_min_epi16 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pminsw(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_min_epu8 :: #force_inline proc "c" (a, b: __m128i)  -> __m128i {
 	return transmute(__m128i)pminub(transmute(u8x16)a, transmute(u8x16)b)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_mulhi_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pmulhw(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_mulhi_epu16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pmulhuw(transmute(u16x8)a, transmute(u16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_mullo_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.mul(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_mul_epu32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pmuludq(transmute(u32x4)a, transmute(u32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_sad_epu8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)psadbw(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_sub_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_sub_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_sub_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_sub_epi64 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub(transmute(i64x2)a, transmute(i64x2)b)
 }
+@(enable_target_feature="sse2")
 _mm_subs_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub_sat(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_subs_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub_sat(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_subs_epu8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub_sat(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_subs_epu16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.sub_sat(transmute(u16x8)a, transmute(u16x8)b)
 }
@@ -108,6 +140,7 @@ _mm_subs_epu16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 
 
 @(private)
+@(enable_target_feature="sse2")
 _mm_slli_si128_impl :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	shift :: IMM8 & 0xff
 
@@ -134,6 +167,7 @@ _mm_slli_si128_impl :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128
 }
 
 @(private)
+@(enable_target_feature="sse2")
 _mm_srli_si128_impl :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	shift :: IMM8
 	return transmute(__m128i)simd.shuffle(
@@ -159,203 +193,264 @@ _mm_srli_si128_impl :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128
 }
 
 
+@(enable_target_feature="sse2")
 _mm_slli_si128 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return _mm_slli_si128_impl(a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_bslli_si128 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return _mm_slli_si128_impl(a, IMM8)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_bsrli_si128 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return _mm_srli_si128_impl(a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_slli_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)pslliw(transmute(i16x8)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_sll_epi16 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psllw(transmute(i16x8)a, transmute(i16x8)count)
 }
+@(enable_target_feature="sse2")
 _mm_slli_epi32 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psllid(transmute(i32x4)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_sll_epi32 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)pslld(transmute(i32x4)a, transmute(i32x4)count)
 }
+@(enable_target_feature="sse2")
 _mm_slli_epi64 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)pslliq(transmute(i64x2)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_sll_epi64 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psllq(transmute(i64x2)a, transmute(i64x2)count)
 }
+@(enable_target_feature="sse2")
 _mm_srai_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psraiw(transmute(i16x8)a. IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_sra_epi16 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psraw(transmute(i16x8)a, transmute(i16x8)count)
 }
+@(enable_target_feature="sse2")
 _mm_srai_epi32 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psraid(transmute(i32x4)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_sra_epi32 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psrad(transmute(i32x4)a, transmute(i32x4)count)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_srli_si128 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return _mm_srli_si128_impl(a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_srli_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psrliw(transmute(i16x8)a. IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_srl_epi16 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psrlw(transmute(i16x8)a, transmute(i16x8)count)
 }
+@(enable_target_feature="sse2")
 _mm_srli_epi32 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psrlid(transmute(i32x4)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_srl_epi32 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psrld(transmute(i32x4)a, transmute(i32x4)count)
 }
+@(enable_target_feature="sse2")
 _mm_srli_epi64 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	return transmute(__m128i)psrliq(transmute(i64x2)a, IMM8)
 }
+@(enable_target_feature="sse2")
 _mm_srl_epi64 :: #force_inline proc "c" (a, count: __m128i) -> __m128i {
 	return transmute(__m128i)psrlq(transmute(i64x2)a, transmute(i64x2)count)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_and_si128 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return simd.and(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_andnot_si128 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return simd.and_not(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_or_si128 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return simd.or(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_xor_si128 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return simd.xor(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpeq_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_eq(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpeq_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_eq(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpeq_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_eq(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpgt_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_gt(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpgt_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_gt(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmpgt_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_gt(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmplt_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_lt(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmplt_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_lt(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_cmplt_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.lanes_lt(transmute(i32x4)a, transmute(i32x4)b)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_cvtepi32_pd :: #force_inline proc "c" (a: __m128i) -> __m128d {
 	v := transmute(i32x4)a
 	return cast(__m128d)simd.shuffle(v, v, 0, 1)
 }
+@(enable_target_feature="sse2")
 _mm_cvtsi32_sd :: #force_inline proc "c" (a: __m128d, b: i32) -> __m128d {
 	return simd.replace(a, 0, f64(b))
 }
+@(enable_target_feature="sse2")
 _mm_cvtepi32_ps :: #force_inline proc "c" (a: __m128i) -> __m128 {
 	return cvtdq2ps(transmute(i32x4)a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtps_epi32 :: #force_inline proc "c" (a: __m128) -> __m128i {
 	return transmute(__m128i)cvtps2dq(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtsi32_si128 :: #force_inline proc "c" (a: i32) -> __m128i {
 	return transmute(__m128i)i32x4{a, 0, 0, 0}
 }
+@(enable_target_feature="sse2")
 _mm_cvtsi128_si32 :: #force_inline proc "c" (a: __m128i) -> i32 {
 	return simd.extract(transmute(i32x4)a, 0)
 }
 
 
 
+@(enable_target_feature="sse2")
 _mm_set_epi64x :: #force_inline proc "c" (e1, e0: i64) -> __m128i {
 	return transmute(__m128i)i64x2{e0, e1}
 }
+@(enable_target_feature="sse2")
 _mm_set_epi32 :: #force_inline proc "c" (e3, e2, e1, e0: i32) -> __m128i {
 	return transmute(__m128i)i32x4{e0, e1, e2, e3}
 }
+@(enable_target_feature="sse2")
 _mm_set_epi16 :: #force_inline proc "c" (e7, e6, e5, e4, e3, e2, e1, e0: i16) -> __m128i {
 	return transmute(__m128i)i16x8{e0, e1, e2, e3, e4, e5, e6, e7}
 }
+@(enable_target_feature="sse2")
 _mm_set_epi8 :: #force_inline proc "c" (e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0: i8) -> __m128i {
 	return transmute(__m128i)i8x16{e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15}
 }
+@(enable_target_feature="sse2")
 _mm_set1_epi64x :: #force_inline proc "c" (a: i64) -> __m128i {
 	return _mm_set_epi64x(a, a)
 }
+@(enable_target_feature="sse2")
 _mm_set1_epi32 :: #force_inline proc "c" (a: i32) -> __m128i {
 	return _mm_set_epi32(a, a, a, a)
 }
+@(enable_target_feature="sse2")
 _mm_set1_epi16 :: #force_inline proc "c" (a: i16) -> __m128i {
 	return _mm_set_epi16(a, a, a, a, a, a, a, a)
 }
+@(enable_target_feature="sse2")
 _mm_set1_epi8 :: #force_inline proc "c" (a: i8) -> __m128i {
 	return _mm_set_epi8(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)
 }
+@(enable_target_feature="sse2")
 _mm_setr_epi32 :: #force_inline proc "c" (e3, e2, e1, e0: i32) -> __m128i {
 	return _mm_set_epi32(e0, e1, e2, e3)
 }
+@(enable_target_feature="sse2")
 _mm_setr_epi16 :: #force_inline proc "c" (e7, e6, e5, e4, e3, e2, e1, e0: i16) -> __m128i {
 	return _mm_set_epi16(e0, e1, e2, e3, e4, e5, e6, e7)
 }
+@(enable_target_feature="sse2")
 _mm_setr_epi8 :: #force_inline proc "c" (e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0: i8) -> __m128i {
 	return _mm_set_epi8(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15)
 }
+@(enable_target_feature="sse2")
 _mm_setzero_si128 :: #force_inline proc "c" () -> __m128i {
 	return _mm_set1_epi64x(0)
 }
 
 
+@(enable_target_feature="sse2")
 _mm_loadl_epi64 :: #force_inline proc "c" (mem_addr: ^__m128i) -> __m128i {
 	return _mm_set_epi64x(0, intrinsics.unaligned_load((^i64)(mem_addr)))
 }
+@(enable_target_feature="sse2")
 _mm_load_si128 :: #force_inline proc "c" (mem_addr: ^__m128i) -> __m128i {
 	return mem_addr^
 }
+@(enable_target_feature="sse2")
 _mm_loadu_si128 :: #force_inline proc "c" (mem_addr: ^__m128i) -> __m128i {
 	dst := _mm_undefined_si128()
 	intrinsics.mem_copy_non_overlapping(&dst, mem_addr, size_of(__m128i))
 	return dst
 }
+@(enable_target_feature="sse2")
 _mm_maskmoveu_si128 :: #force_inline proc "c" (a, mask: __m128i, mem_addr: rawptr) {
 	maskmovdqu(transmute(i8x16)a, transmute(i8x16)mask, mem_addr)
 }
+@(enable_target_feature="sse2")
 _mm_store_si128 :: #force_inline proc "c" (mem_addr: ^__m128i, a: __m128i) {
 	mem_addr^ = a
 }
+@(enable_target_feature="sse2")
 _mm_storeu_si128 :: #force_inline proc "c" (mem_addr: ^__m128i, a: __m128i) {
 	storeudq(mem_addr, a)
 }
+@(enable_target_feature="sse2")
 _mm_storel_epi64 :: #force_inline proc "c" (mem_addr: ^__m128i, a: __m128i) {
 	a := a
 	intrinsics.mem_copy_non_overlapping(mem_addr, &a, 8)
 }
+@(enable_target_feature="sse2")
 _mm_stream_si128 :: #force_inline proc "c" (mem_addr: ^__m128i, a: __m128i) {
 	intrinsics.non_temporal_store(mem_addr, a)
 }
+@(enable_target_feature="sse2")
 _mm_stream_si32 :: #force_inline proc "c" (mem_addr: ^i32, a: i32) {
 	intrinsics.non_temporal_store(mem_addr, a)
 }
+@(enable_target_feature="sse2")
 _mm_move_epi64 :: #force_inline proc "c" (a: __m128i) -> __m128i {
 	zero := _mm_setzero_si128()
 	return transmute(__m128i)simd.shuffle(transmute(i64x2)a, transmute(i64x2)zero, 0, 2)
@@ -364,24 +459,31 @@ _mm_move_epi64 :: #force_inline proc "c" (a: __m128i) -> __m128i {
 
 
 
+@(enable_target_feature="sse2")
 _mm_packs_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)packsswb(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_packs_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)packssdw(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="sse2")
 _mm_packus_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)packuswb(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="sse2")
 _mm_extract_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> i32 {
 	return i32(simd.extract(transmute(u16x8)a, IMM8))
 }
+@(enable_target_feature="sse2")
 _mm_insert_epi16 :: #force_inline proc "c" (a: __m128i, i: i32, $IMM8: u32) -> __m128i {
 	return i32(simd.replace(transmute(u16x8)a, IMM8, i16(i)))
 }
+@(enable_target_feature="sse2")
 _mm_movemask_epi8 :: #force_inline proc "c" (a: __m128i) -> i32 {
 	return pmovmskb(transmute(i8x16)a)
 }
+@(enable_target_feature="sse2")
 _mm_shuffle_epi32 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	v := transmute(i32x4)a
 	return transmute(__m128i)simd.shuffle(
@@ -393,6 +495,7 @@ _mm_shuffle_epi32 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i
 		(IMM8 >> 6) & 0b11,
 	)
 }
+@(enable_target_feature="sse2")
 _mm_shufflehi_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	v := transmute(i16x8)a
 	return transmute(__m128i)simd.shuffle(
@@ -408,6 +511,7 @@ _mm_shufflehi_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128
 		((IMM8 >> 6) & 0b11) + 4,
 	)
 }
+@(enable_target_feature="sse2")
 _mm_shufflelo_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128i {
 	v := transmute(i16x8)a
 	return transmute(__m128i)simd.shuffle(
@@ -423,6 +527,7 @@ _mm_shufflelo_epi16 :: #force_inline proc "c" (a: __m128i, $IMM8: u32) -> __m128
 		7,
 	)
 }
+@(enable_target_feature="sse2")
 _mm_unpackhi_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(
 	        transmute(i8x16)a,
@@ -430,15 +535,19 @@ _mm_unpackhi_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
         	8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31,
 	)
 }
+@(enable_target_feature="sse2")
 _mm_unpackhi_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i16x8)a, transmute(i16x8)b, 4, 12, 5, 13, 6, 14, 7, 15)
 }
+@(enable_target_feature="sse2")
 _mm_unpackhi_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i32x4)a, transmute(i32x4)b, 2, 6, 3, 7)
 }
+@(enable_target_feature="sse2")
 _mm_unpackhi_epi64 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i64x2)a, transmute(i64x2)b, 1, 3)
 }
+@(enable_target_feature="sse2")
 _mm_unpacklo_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(
 	        transmute(i8x16)a,
@@ -446,12 +555,15 @@ _mm_unpacklo_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
         	0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23,
 	)
 }
+@(enable_target_feature="sse2")
 _mm_unpacklo_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i16x8)a, transmute(i16x8)b, 0, 8, 1, 9, 2, 10, 3, 11)
 }
+@(enable_target_feature="sse2")
 _mm_unpacklo_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i32x4)a, transmute(i32x4)b, 0, 4, 1, 5)
 }
+@(enable_target_feature="sse2")
 _mm_unpacklo_epi64 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)simd.shuffle(transmute(i64x2)a, transmute(i64x2)b, 0, 2)
 }
@@ -459,57 +571,75 @@ _mm_unpacklo_epi64 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 
 
 
+@(enable_target_feature="sse2")
 _mm_add_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(a, 0, _mm_cvtsd_f64(a) + _mm_cvtsd_f64(b))
 }
+@(enable_target_feature="sse2")
 _mm_add_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.add(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_div_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(a, 0, _mm_cvtsd_f64(a) / _mm_cvtsd_f64(b))
 }
+@(enable_target_feature="sse2")
 _mm_div_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.div(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_max_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return maxsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_max_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return maxpd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_min_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return minsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_min_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return minpd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_mul_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(a, 0, _mm_cvtsd_f64(a) * _mm_cvtsd_f64(b))
 }
+@(enable_target_feature="sse2")
 _mm_mul_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.mul(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_sqrt_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(a, 0, _mm_cvtsd_f64(sqrtsd(b)))
 }
+@(enable_target_feature="sse2")
 _mm_sqrt_pd :: #force_inline proc "c" (a: __m128d) -> __m128d {
 	return simd.sqrt(a)
 }
+@(enable_target_feature="sse2")
 _mm_sub_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(a, 0, _mm_cvtsd_f64(a) - _mm_cvtsd_f64(b))
 }
+@(enable_target_feature="sse2")
 _mm_sub_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.sub(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_and_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return transmute(__m128d)_mm_and_si128(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse2")
 _mm_andnot_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return transmute(__m128d)_mm_andnot_si128(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse2")
 _mm_or_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return transmute(__m128d)_mm_or_si128(transmute(__m128i)a, transmute(__m128i)b)
 }
+@(enable_target_feature="sse2")
 _mm_xor_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return transmute(__m128d)_mm_xor_si128(transmute(__m128i)a, transmute(__m128i)b)
 }
@@ -517,111 +647,147 @@ _mm_xor_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 
 
 
+@(enable_target_feature="sse2")
 _mm_cmpeq_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 0)
 }
+@(enable_target_feature="sse2")
 _mm_cmplt_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 1)
 }
+@(enable_target_feature="sse2")
 _mm_cmple_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 2)
 }
+@(enable_target_feature="sse2")
 _mm_cmpgt_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(_mm_cmplt_sd(b, a), 1, simd.extract(a, 1))
 }
+@(enable_target_feature="sse2")
 _mm_cmpge_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(_mm_cmple_sd(b, a), 1, simd.extract(a, 1))
 }
+@(enable_target_feature="sse2")
 _mm_cmpord_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 7)
 }
+@(enable_target_feature="sse2")
 _mm_cmpunord_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 3)
 }
+@(enable_target_feature="sse2")
 _mm_cmpneq_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 4)
 }
+@(enable_target_feature="sse2")
 _mm_cmpnlt_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 5)
 }
+@(enable_target_feature="sse2")
 _mm_cmpnle_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmpsd(a, b, 6)
 }
+@(enable_target_feature="sse2")
 _mm_cmpngt_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(_mm_cmpnlt_sd(b, a), 1, simd.extract(a, 1))
 }
+@(enable_target_feature="sse2")
 _mm_cmpnge_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.replace(_mm_cmpnle_sd(b, a), 1, simd.extract(a, 1))
 }
+@(enable_target_feature="sse2")
 _mm_cmpeq_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 0)
 }
+@(enable_target_feature="sse2")
 _mm_cmplt_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 1)
 }
+@(enable_target_feature="sse2")
 _mm_cmple_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 2)
 }
+@(enable_target_feature="sse2")
 _mm_cmpgt_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return _mm_cmplt_pd(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_cmpge_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return _mm_cmple_pd(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_cmpord_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 7)
 }
+@(enable_target_feature="sse2")
 _mm_cmpunord_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 3)
 }
+@(enable_target_feature="sse2")
 _mm_cmpneq_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 4)
 }
+@(enable_target_feature="sse2")
 _mm_cmpnlt_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 5)
 }
+@(enable_target_feature="sse2")
 _mm_cmpnle_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return cmppd(a, b, 6)
 }
+@(enable_target_feature="sse2")
 _mm_cmpngt_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return _mm_cmpnlt_pd(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_cmpnge_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return _mm_cmpnle_pd(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_comieq_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comieqsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_comilt_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comiltsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_comile_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comilesd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_comigt_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comigtsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_comige_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comigesd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_comineq_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return comineqsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomieq_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomieqsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomilt_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomiltsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomile_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomilesd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomigt_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomigtsd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomige_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomigesd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_ucomineq_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 	return ucomineqsd(a, b)
 }
@@ -630,115 +796,151 @@ _mm_ucomineq_sd :: #force_inline proc "c" (a, b: __m128d) -> i32 {
 
 
 
+@(enable_target_feature="sse2")
 _mm_cvtpd_ps :: #force_inline proc "c" (a: __m128d) -> __m128 {
 	return cvtpd2ps(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtps_pd :: #force_inline proc "c" (a: __m128) -> __m128d {
 	return cvtps2pd(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtpd_epi32 :: #force_inline proc "c" (a: __m128d) -> __m128i {
 	return transmute(__m128i)cvtpd2dq(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtsd_si32 :: #force_inline proc "c" (a: __m128d) -> i32 {
 	return cvtsd2si(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvtsd_ss :: #force_inline proc "c" (a, b: __m128d) -> __m128 {
 	return cvtsd2ss(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_cvtsd_f64 :: #force_inline proc "c" (a: __m128d) -> f64 {
 	return simd.extract(a, 0)
 }
+@(enable_target_feature="sse2")
 _mm_cvtss_sd :: #force_inline proc "c" (a, b: __m128) -> __m128d {
 	return cvtss2sd(a, b)
 }
+@(enable_target_feature="sse2")
 _mm_cvttpd_epi32 :: #force_inline proc "c" (a: __m128d) -> __m128i {
 	return transmute(__m128i)cvttpd2dq(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvttsd_si32 :: #force_inline proc "c" (a: __m128d) -> i32 {
 	return cvttsd2si(a)
 }
+@(enable_target_feature="sse2")
 _mm_cvttps_epi32 :: #force_inline proc "c" (a: __m128) -> __m128i {
 	return transmute(__m128i)cvttps2dq(a)
 }
+@(enable_target_feature="sse2")
 _mm_set_sd :: #force_inline proc "c" (a: f64) -> __m128d {
 	return _mm_set_pd(0.0, a)
 }
+@(enable_target_feature="sse2")
 _mm_set1_pd :: #force_inline proc "c" (a: f64) -> __m128d {
 	return _mm_set_pd(a, a)
 }
+@(enable_target_feature="sse2")
 _mm_set_pd1 :: #force_inline proc "c" (a: f64) -> __m128d {
 	return _mm_set_pd(a, a)
 }
+@(enable_target_feature="sse2")
 _mm_set_pd :: #force_inline proc "c" (a: f64, b: f64) -> __m128d {
 	return __m128d{b, a}
 }
+@(enable_target_feature="sse2")
 _mm_setr_pd :: #force_inline proc "c" (a: f64, b: f64) -> __m128d {
 	return _mm_set_pd(b, a)
 }
+@(enable_target_feature="sse2")
 _mm_setzero_pd :: #force_inline proc "c" () -> __m128d {
 	return _mm_set_pd(0.0, 0.0)
 }
+@(enable_target_feature="sse2")
 _mm_movemask_pd :: #force_inline proc "c" (a: __m128d) -> i32 {
 	return movmskpd(a)
 }
+@(enable_target_feature="sse2")
 _mm_load_pd :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	return (^__m128d)(mem_addr)^
 }
+@(enable_target_feature="sse2")
 _mm_load_sd :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	return _mm_setr_pd(mem_addr^, 0.)
 }
+@(enable_target_feature="sse2")
 _mm_loadh_pd :: #force_inline proc "c" (a: __m128d, mem_addr: ^f64) -> __m128d {
 	return _mm_setr_pd(simd.extract(a, 0), mem_addr^)
 }
+@(enable_target_feature="sse2")
 _mm_loadl_pd :: #force_inline proc "c" (a: __m128d, mem_addr: ^f64) -> __m128d {
 	return _mm_setr_pd(mem_addr^, simd.extract(a, 1))
 }
+@(enable_target_feature="sse2")
 _mm_stream_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	intrinsics.non_temporal_store((^__m128d)(mem_addr), a)
 }
+@(enable_target_feature="sse2")
 _mm_store_sd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	mem_addr^ = simd.extract(a, 0)
 }
+@(enable_target_feature="sse2")
 _mm_store_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	(^__m128d)(mem_addr)^ = a
 }
+@(enable_target_feature="sse2")
 _mm_storeu_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	storeupd(mem_addr, a)
 }
+@(enable_target_feature="sse2")
 _mm_store1_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	(^__m128d)(mem_addr)^ = simd.shuffle(a, a, 0, 0)
 }
+@(enable_target_feature="sse2")
 _mm_store_pd1 :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	(^__m128d)(mem_addr)^ = simd.shuffle(a, a, 0, 0)
 }
+@(enable_target_feature="sse2")
 _mm_storer_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	(^__m128d)(mem_addr)^ = simd.shuffle(a, a, 1, 0)
 }
+@(enable_target_feature="sse2")
 _mm_storeh_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	mem_addr^ = simd.extract(a, 1)
 }
+@(enable_target_feature="sse2")
 _mm_storel_pd :: #force_inline proc "c" (mem_addr: ^f64, a: __m128d) {
 	mem_addr^ = simd.extract(a, 0)
 }
+@(enable_target_feature="sse2")
 _mm_load1_pd :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	d := mem_addr^
 	return _mm_setr_pd(d, d)
 }
+@(enable_target_feature="sse2")
 _mm_load_pd1 :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	return _mm_load1_pd(mem_addr)
 }
+@(enable_target_feature="sse2")
 _mm_loadr_pd :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	a := _mm_load_pd(mem_addr)
 	return simd.shuffle(a, a, 1, 0)
 }
+@(enable_target_feature="sse2")
 _mm_loadu_pd :: #force_inline proc "c" (mem_addr: ^f64) -> __m128d {
 	dst := _mm_undefined_pd()
 	intrinsics.mem_copy_non_overlapping(&dst, mem_addr, size_of(__m128d))
 	return dst
 }
+@(enable_target_feature="sse2")
 _mm_shuffle_pd :: #force_inline proc "c" (a, b: __m128d, $MASK: u32) -> __m128d {
 	return simd.shuffle(a, b, MASK&0b1, ((MASK>>1)&0b1) + 2)
 }
+@(enable_target_feature="sse2")
 _mm_move_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return _mm_setr_pd(simd.extract(b, 0), simd.extract(a, 1))
 }
@@ -746,71 +948,92 @@ _mm_move_sd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 
 
 
+@(enable_target_feature="sse2")
 _mm_castpd_ps :: #force_inline proc "c" (a: __m128d) -> __m128 {
 	return transmute(__m128)a
 }
+@(enable_target_feature="sse2")
 _mm_castpd_si128 :: #force_inline proc "c" (a: __m128d) -> __m128i {
 	return transmute(__m128i)a
 }
+@(enable_target_feature="sse2")
 _mm_castps_pd :: #force_inline proc "c" (a: __m128) -> __m128d {
 	return transmute(__m128d)a
 }
+@(enable_target_feature="sse2")
 _mm_castps_si128 :: #force_inline proc "c" (a: __m128) -> __m128i {
 	return transmute(__m128i)a
 }
+@(enable_target_feature="sse2")
 _mm_castsi128_pd :: #force_inline proc "c" (a: __m128i) -> __m128d {
 	return transmute(__m128d)a
 }
+@(enable_target_feature="sse2")
 _mm_castsi128_ps :: #force_inline proc "c" (a: __m128i) -> __m128 {
 	return transmute(__m128)a
 }
 
 
+@(enable_target_feature="sse2")
 _mm_undefined_pd :: #force_inline proc "c" () -> __m128d {
 	return __m128d{0, 0}
 }
+@(enable_target_feature="sse2")
 _mm_undefined_si128 :: #force_inline proc "c" () -> __m128i {
 	return __m128i{0, 0}
 }
+@(enable_target_feature="sse2")
 _mm_unpackhi_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.shuffle(a, b, 1, 3)
 }
+@(enable_target_feature="sse2")
 _mm_unpacklo_pd :: #force_inline proc "c" (a, b: __m128d) -> __m128d {
 	return simd.shuffle(a, b, 0, 2)
 }
 
 
 when ODIN_ARCH == .amd64 {
+	@(enable_target_feature="sse2")
 	_mm_cvtsd_si64 :: #force_inline proc "c" (a: __m128d) -> i64 {
 		return cvtsd2si64(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsd_si64x :: #force_inline proc "c" (a: __m128d) -> i64 {
 		return _mm_cvtsd_si64(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvttsd_si64 :: #force_inline proc "c" (a: __m128d) -> i64 {
 		return cvttsd2si64(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvttsd_si64x :: #force_inline proc "c" (a: __m128d) -> i64 {
 		return _mm_cvttsd_si64(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_stream_si64 :: #force_inline proc "c" (mem_addr: ^i64, a: i64) {
 		intrinsics.non_temporal_store(mem_addr, a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi64_si128 :: #force_inline proc "c" (a: i64) -> __m128i {
 		return _mm_set_epi64x(0, a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi64x_si128 :: #force_inline proc "c" (a: i64) -> __m128i {
 		return _mm_cvtsi64_si128(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi128_si64 :: #force_inline proc "c" (a: __m128i) -> i64 {
 		return simd.extract(transmute(i64x2)a, 0)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi128_si64x :: #force_inline proc "c" (a: __m128i) -> i64 {
 		return _mm_cvtsi128_si64(a)
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi64_sd :: #force_inline proc "c" (a: __m128d, b: i64) -> __m128d {
 		return simd.replace(a, 0, f64(b))
 	}
+	@(enable_target_feature="sse2")
 	_mm_cvtsi64x_sd :: #force_inline proc "c" (a: __m128d, b: i64) -> __m128d {
 		return _mm_cvtsi64_sd(a, b)
 	}

+ 11 - 0
core/simd/x86/sse3.odin

@@ -4,36 +4,47 @@ package simd_x86
 import "core:intrinsics"
 import "core:simd"
 
+@(enable_target_feature="sse3")
 _mm_addsub_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return addsubps(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_addsub_pd :: #force_inline proc "c" (a: __m128d, b: __m128d) -> __m128d {
 	return addsubpd(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_hadd_pd :: #force_inline proc "c" (a: __m128d, b: __m128d) -> __m128d {
 	return haddpd(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_hadd_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return haddps(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_hsub_pd :: #force_inline proc "c" (a: __m128d, b: __m128d) -> __m128d {
 	return hsubpd(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_hsub_ps :: #force_inline proc "c" (a, b: __m128) -> __m128 {
 	return hsubps(a, b)
 }
+@(enable_target_feature="sse3")
 _mm_lddqu_si128 :: #force_inline proc "c" (mem_addr: ^__m128i) -> __m128i {
 	return transmute(__m128i)lddqu(mem_addr)
 }
+@(enable_target_feature="sse3")
 _mm_movedup_pd :: #force_inline proc "c" (a: __m128d) -> __m128d {
 	return simd.shuffle(a, a, 0, 0)
 }
+@(enable_target_feature="sse3")
 _mm_loaddup_pd :: #force_inline proc "c" (mem_addr: [^]f64) -> __m128d {
 	return _mm_load1_pd(mem_addr)
 }
+@(enable_target_feature="sse3")
 _mm_movehdup_ps :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return simd.shuffle(a, a, 1, 1, 3, 3)
 }
+@(enable_target_feature="sse3")
 _mm_moveldup_ps :: #force_inline proc "c" (a: __m128) -> __m128 {
 	return simd.shuffle(a, a, 0, 0, 2, 2)
 }

+ 16 - 0
core/simd/x86/ssse3.odin

@@ -5,18 +5,23 @@ import "core:intrinsics"
 import "core:simd"
 _ :: simd
 
+@(enable_target_feature="ssse3")
 _mm_abs_epi8 :: #force_inline proc "c" (a: __m128i) -> __m128i {
 	return transmute(__m128i)pabsb128(transmute(i8x16)a)
 }
+@(enable_target_feature="ssse3")
 _mm_abs_epi16 :: #force_inline proc "c" (a: __m128i) -> __m128i {
 	return transmute(__m128i)pabsw128(transmute(i16x8)a)
 }
+@(enable_target_feature="ssse3")
 _mm_abs_epi32 :: #force_inline proc "c" (a: __m128i) -> __m128i {
 	return transmute(__m128i)pabsd128(transmute(i32x4)a)
 }
+@(enable_target_feature="ssse3")
 _mm_shuffle_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pshufb128(transmute(u8x16)a, transmute(u8x16)b)
 }
+@(enable_target_feature="ssse3")
 _mm_alignr_epi8 :: #force_inline proc "c" (a, b: __m128i, $IMM8: u32) -> __m128i {
 	shift :: IMM8
 
@@ -53,36 +58,47 @@ _mm_alignr_epi8 :: #force_inline proc "c" (a, b: __m128i, $IMM8: u32) -> __m128i
 }
 
 
+@(enable_target_feature="ssse3")
 _mm_hadd_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phaddw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_hadds_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phaddsw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_hadd_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phaddd128(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="ssse3")
 _mm_hsub_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phsubw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_hsubs_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phsubsw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_hsub_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)phsubd128(transmute(i32x4)a, transmute(i32x4)b)
 }
+@(enable_target_feature="ssse3")
 _mm_maddubs_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pmaddubsw128(transmute(u8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="ssse3")
 _mm_mulhrs_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)pmulhrsw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_sign_epi8 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)psignb128(transmute(i8x16)a, transmute(i8x16)b)
 }
+@(enable_target_feature="ssse3")
 _mm_sign_epi16 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)psignw128(transmute(i16x8)a, transmute(i16x8)b)
 }
+@(enable_target_feature="ssse3")
 _mm_sign_epi32 :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
 	return transmute(__m128i)psignd128(transmute(i32x4)a, transmute(i32x4)b)
 }

+ 107 - 2
src/build_settings.cpp

@@ -256,7 +256,6 @@ struct BuildContext {
 	String extra_linker_flags;
 	String extra_assembler_flags;
 	String microarch;
-	String target_features;
 	BuildModeKind build_mode;
 	bool   generate_docs;
 	i32    optimization_level;
@@ -320,6 +319,10 @@ struct BuildContext {
 
 	PtrMap<char const *, ExactValue> defined_values;
 
+	BlockingMutex target_features_mutex;
+	StringSet target_features_set;
+	String target_features_string;
+
 };
 
 gb_global BuildContext build_context = {0};
@@ -1197,6 +1200,100 @@ void init_build_context(TargetMetrics *cross_target) {
 #include "microsoft_craziness.h"
 #endif
 
+
+Array<String> split_by_comma(String const &list) {
+	isize n = 1;
+	for (isize i = 0; i < list.len; i++) {
+		if (list.text[i] == ',') {
+			n++;
+		}
+	}
+	auto res = array_make<String>(heap_allocator(), n);
+
+	String s = list;
+	for (isize i = 0; i < n; i++) {
+		isize m = string_index_byte(s, ',');
+		if (m < 0) {
+			res[i] = s;
+			break;
+		}
+		res[i] = substring(s, 0, m);
+		s = substring(s, m+1, s.len);
+	}
+	return res;
+}
+
+bool check_target_feature_is_valid(TokenPos pos, String const &feature) {
+	// TODO(bill): check_target_feature_is_valid
+	return true;
+}
+
+bool check_target_feature_is_enabled(TokenPos pos, String const &target_feature_list) {
+	BuildContext *bc = &build_context;
+	mutex_lock(&bc->target_features_mutex);
+	defer (mutex_unlock(&bc->target_features_mutex));
+
+	auto items = split_by_comma(target_feature_list);
+	array_free(&items);
+	for_array(i, items) {
+		String const &item = items.data[i];
+		if (!check_target_feature_is_valid(pos, item)) {
+			error(pos, "Target feature '%.*s' is not valid", LIT(item));
+			return false;
+		}
+		if (!string_set_exists(&bc->target_features_set, item)) {
+			error(pos, "Target feature '%.*s' is not enabled", LIT(item));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+void enable_target_feature(TokenPos pos, String const &target_feature_list) {
+	BuildContext *bc = &build_context;
+	mutex_lock(&bc->target_features_mutex);
+	defer (mutex_unlock(&bc->target_features_mutex));
+
+	auto items = split_by_comma(target_feature_list);
+	array_free(&items);
+	for_array(i, items) {
+		String const &item = items.data[i];
+		if (!check_target_feature_is_valid(pos, item)) {
+			error(pos, "Target feature '%.*s' is not valid", LIT(item));
+		}
+	}
+}
+
+
+char const *target_features_set_to_cstring(gbAllocator allocator, bool with_quotes) {
+	isize len = 0;
+	for_array(i, build_context.target_features_set.entries) {
+		if (i != 0) {
+			len += 1;
+		}
+		String feature = build_context.target_features_set.entries[i].value;
+		len += feature.len;
+		if (with_quotes) len += 2;
+	}
+	char *features = gb_alloc_array(allocator, char, len+1);
+	len = 0;
+	for_array(i, build_context.target_features_set.entries) {
+		if (i != 0) {
+			features[len++] = ',';
+		}
+
+		if (with_quotes) features[len++] = '"';
+		String feature = build_context.target_features_set.entries[i].value;
+		gb_memmove(features, feature.text, feature.len);
+		len += feature.len;
+		if (with_quotes) features[len++] = '"';
+	}
+	features[len++] = 0;
+
+	return features;
+}
+
 // NOTE(Jeroen): Set/create the output and other paths and report an error as appropriate.
 // We've previously called `parse_build_flags`, so `out_filepath` should be set.
 bool init_build_paths(String init_filename) {
@@ -1206,6 +1303,9 @@ bool init_build_paths(String init_filename) {
 	// NOTE(Jeroen): We're pre-allocating BuildPathCOUNT slots so that certain paths are always at the same enumerated index.
 	array_init(&bc->build_paths, permanent_allocator(), BuildPathCOUNT);
 
+	string_set_init(&bc->target_features_set, heap_allocator(), 1024);
+	mutex_init(&bc->target_features_mutex);
+
 	// [BuildPathMainPackage] Turn given init path into a `Path`, which includes normalizing it into a full path.
 	bc->build_paths[BuildPath_Main_Package] = path_from_string(ha, init_filename);
 
@@ -1382,5 +1482,10 @@ bool init_build_paths(String init_filename) {
 		return false;
 	}
 
+	if (bc->target_features_string.len != 0) {
+		enable_target_feature({}, bc->target_features_string);
+	}
+
 	return true;
-}
+}
+

+ 12 - 0
src/check_decl.cpp

@@ -899,6 +899,18 @@ void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
 		}
 	}
 
+	if (ac.require_target_feature.len != 0 && ac.enable_target_feature.len != 0) {
+		error(e->token, "Attributes @(require_target_feature=...) and @(enable_target_feature=...) cannot be used together");
+	} else if (ac.require_target_feature.len != 0) {
+		if (check_target_feature_is_enabled(e->token.pos, ac.require_target_feature)) {
+			e->Procedure.target_feature = ac.require_target_feature;
+		} else {
+			e->Procedure.target_feature_disabled = true;
+		}
+	} else if (ac.enable_target_feature.len != 0) {
+		enable_target_feature(e->token.pos, ac.enable_target_feature);
+		e->Procedure.target_feature = ac.enable_target_feature;
+	}
 
 	switch (e->Procedure.optimization_mode) {
 	case ProcedureOptimizationMode_None:

+ 16 - 0
src/checker.cpp

@@ -3207,6 +3207,22 @@ DECL_ATTRIBUTE_PROC(proc_decl_attribute) {
 			}
 		}
 		return true;
+	} else if (name == "require_target_feature") {
+		ExactValue ev = check_decl_attribute_value(c, value);
+		if (ev.kind == ExactValue_String) {
+			ac->require_target_feature = ev.value_string;
+		} else {
+			error(elem, "Expected a string value for '%.*s'", LIT(name));
+		}
+		return true;
+	} else if (name == "enable_target_feature") {
+		ExactValue ev = check_decl_attribute_value(c, value);
+		if (ev.kind == ExactValue_String) {
+			ac->enable_target_feature = ev.value_string;
+		} else {
+			error(elem, "Expected a string value for '%.*s'", LIT(name));
+		}
+		return true;
 	}
 	return false;
 }

+ 3 - 0
src/checker.hpp

@@ -124,6 +124,9 @@ struct AttributeContext {
 	String  objc_name;
 	bool    objc_is_class_method;
 	Type *  objc_type;
+
+	String require_target_feature; // required by the target micro-architecture
+	String enable_target_feature;  // will be enabled for the procedure only
 };
 
 AttributeContext make_attribute_context(String link_prefix) {

+ 5 - 3
src/entity.cpp

@@ -233,10 +233,12 @@ struct Entity {
 			String  link_name;
 			String  link_prefix;
 			DeferredProcedure deferred_procedure;
-			bool    is_foreign;
-			bool    is_export;
-			bool    generated_from_polymorphic;
 			ProcedureOptimizationMode optimization_mode;
+			bool    is_foreign                 : 1;
+			bool    is_export                  : 1;
+			bool    generated_from_polymorphic : 1;
+			bool    target_feature_disabled    : 1;
+			String  target_feature;
 		} Procedure;
 		struct {
 			Array<Entity *> entities;

+ 2 - 2
src/llvm_backend.cpp

@@ -1332,8 +1332,8 @@ void lb_generate_code(lbGenerator *gen) {
 		}
 	}
 
-	if (build_context.target_features.len != 0) {
-		llvm_features = alloc_cstring(permanent_allocator(), build_context.target_features);
+	if (build_context.target_features_set.entries.count != 0) {
+		llvm_features = target_features_set_to_cstring(permanent_allocator(), false);
 	}
 
 	// GB_ASSERT_MSG(LLVMTargetHasAsmBackend(target));

+ 13 - 0
src/llvm_backend_proc.cpp

@@ -169,6 +169,19 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
 		}
 	}
 
+	if (!entity->Procedure.target_feature_disabled &&
+	    entity->Procedure.target_feature.len != 0) {
+	    	auto features = split_by_comma(entity->Procedure.target_feature);
+		for_array(i, features) {
+			String feature = features[i];
+			LLVMAttributeRef ref = LLVMCreateStringAttribute(
+				m->ctx,
+				cast(char const *)feature.text, cast(unsigned)feature.len,
+				"", 0);
+			LLVMAddAttributeAtIndex(p->value, LLVMAttributeIndex_FunctionIndex, ref);
+		}
+	}
+
 	if (entity->flags & EntityFlag_Cold) {
 		lb_add_attribute_to_proc(m, p->value, "cold");
 	}

+ 2 - 2
src/main.cpp

@@ -1376,8 +1376,8 @@ bool parse_build_flags(Array<String> args) {
 						}
 						case BuildFlag_TargetFeatures: {
 							GB_ASSERT(value.kind == ExactValue_String);
-							build_context.target_features = value.value_string;
-							string_to_lower(&build_context.target_features);
+							build_context.target_features_string = value.value_string;
+							string_to_lower(&build_context.target_features_string);
 							break;
 						}
 						case BuildFlag_RelocMode: {

+ 9 - 0
src/string.cpp

@@ -157,6 +157,15 @@ int string_compare(String const &x, String const &y) {
 	return 0;
 }
 
+isize string_index_byte(String const &s, u8 x) {
+	for (isize i = 0; i < s.len; i++) {
+		if (s.text[i] == x) {
+			return i;
+		}
+	}
+	return -1;
+}
+
 GB_COMPARE_PROC(string_cmp_proc) {
 	String x = *(String *)a;
 	String y = *(String *)b;