Browse Source

Fixing errors from the Github test server.

David Piuva 10 months ago
parent
commit
a0d397d196

+ 3 - 0
Source/DFPSR/History.txt

@@ -73,3 +73,6 @@ Changes from version 0.2.0 to version 0.3.0 (Performance, safety and template im
 		Replace 'getAlpha' with 'packOrder_getAlpha'.
 		Replace 'packBytes' with 'packOrder_packBytes'.
 		Replace 'floatToSaturatedByte' with 'packOrder_floatToSaturatedByte'.
+	* Because the new string printing uses exact matching for basic types, bool is now printed as true or false instead of 1 or 0.
+		Cast to uint32_t if you want bool interpreted as an integer as before.
+		Does not affect PersistentBoolean.

+ 7 - 3
Source/DFPSR/api/fileAPI.cpp

@@ -114,11 +114,13 @@ Buffer file_loadBuffer(const ReadableString& filename, bool mustExist) {
 	if (file != nullptr) {
 		// Get the file's size by going to the end, measuring, and going back
 		fseek(file, 0L, SEEK_END);
-		int64_t fileSize = ftell(file);
+		uintptr_t fileSize = ftell(file);
 		rewind(file);
 		// Allocate a buffer of the file's size
 		Buffer buffer = buffer_create(fileSize);
-		fread((void*)buffer_dangerous_getUnsafeData(buffer), fileSize, 1, file);
+		size_t resultSize = fread((void*)buffer_dangerous_getUnsafeData(buffer), fileSize, 1, file);
+		// Supress warnings.
+		(void)resultSize;
 		fclose(file);
 		return buffer;
 	} else {
@@ -385,7 +387,9 @@ String file_getCurrentPath() {
 		return fromNativeString(resultBuffer);
 	#else
 		NativeChar resultBuffer[maxLength + 1] = {0};
-		getcwd(resultBuffer, maxLength);
+		char* result = getcwd(resultBuffer, maxLength);
+		// Supress warnings about not using the result, because we already have it in the buffer.
+		(void)result;
 		return fromNativeString(resultBuffer);
 	#endif
 }

+ 1 - 1
Source/DFPSR/api/fileAPI.h

@@ -290,7 +290,7 @@ namespace dsr {
 	};
 
 	// A reference counted handle to a process, so that multiple callers can read the status at any time.
-	class DsrProcessImpl;
+	struct DsrProcessImpl;
 	using DsrProcess = Handle<DsrProcessImpl>;
 
 	// Post-condition: Returns the status of process.

+ 4 - 4
Source/DFPSR/api/imageAPI.h

@@ -407,19 +407,19 @@ namespace dsr {
 	// Returns the overlapping region if out of bound
 	// Returns a null image if there are no overlapping pixels to return
 	inline ImageU8 image_getSubImage(const ImageU8& image, const IRect& region) {
-		static_assert(sizeof(ImageU8) == sizeof(Image));
+		static_assert(sizeof(ImageU8) == sizeof(Image), "ImageU8 must have the same size as Image, to prevent slicing in assignments!");
 		return ImageU8(image, region);
 	}
 	inline ImageU16 image_getSubImage(const ImageU16& image, const IRect& region) {
-		static_assert(sizeof(ImageU16) == sizeof(Image));
+		static_assert(sizeof(ImageU16) == sizeof(Image), "ImageU16 must have the same size as Image, to prevent slicing in assignments!");
 		return ImageU16(image, region);
 	}
 	inline ImageF32 image_getSubImage(const ImageF32& image, const IRect& region) {
-		static_assert(sizeof(ImageF32) == sizeof(Image));
+		static_assert(sizeof(ImageF32) == sizeof(Image), "ImageF32 must have the same size as Image, to prevent slicing in assignments!");
 		return ImageF32(image, region);
 	}
 	inline ImageRgbaU8 image_getSubImage(const ImageRgbaU8& image, const IRect& region) {
-		static_assert(sizeof(ImageRgbaU8) == sizeof(Image));
+		static_assert(sizeof(ImageRgbaU8) == sizeof(Image), "ImageRgbaU8 must have the same size as Image, to prevent slicing in assignments!");
 		return ImageRgbaU8(image, region);
 	}
 	// Check dynamically if the image was created as a sub-image.

+ 1 - 1
Source/DFPSR/api/modelAPI.h

@@ -40,7 +40,7 @@ namespace dsr {
 
 namespace dsr {
 	// A handle to a multi-threaded rendering context.
-	class RendererImpl;
+	struct RendererImpl;
 	using Renderer = Handle<RendererImpl>;
 
 	// Normalized texture coordinates:

+ 6 - 6
Source/DFPSR/api/stringAPI.cpp

@@ -826,32 +826,32 @@ static void atomic_append_readable(String &target, const ReadableString& source)
 static void atomic_append_utf32(String &target, const DsrChar* source) { APPEND(target, source, strlen_utf32(source), 0xFFFFFFFF); }
 void dsr::string_appendChar(String& target, DsrChar value) { APPEND(target, &value, 1, 0xFFFFFFFF); }
 
-String& dsr::string_toStreamIndented(String& target, const char *value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_ascii(String& target, const char *value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	atomic_append_ascii(target, value);
 	return target;
 }
-String& dsr::string_toStreamIndented(String& target, const DsrChar *value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_utf32(String& target, const char32_t *value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	atomic_append_utf32(target, value);
 	return target;
 }
-String& dsr::string_toStreamIndented(String& target, const ReadableString& value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_readable(String& target, const ReadableString& value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	atomic_append_readable(target, value);
 	return target;
 }
-String& dsr::string_toStreamIndented(String& target, const double &value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_double(String& target, const double &value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	string_fromDouble(target, (double)value);
 	return target;
 }
-String& dsr::string_toStreamIndented(String& target, const int64_t &value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_int64(String& target, const int64_t &value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	string_fromSigned(target, value);
 	return target;
 }
-String& dsr::string_toStreamIndented(String& target, const uint64_t &value, const ReadableString& indentation) {
+String& dsr::impl_toStreamIndented_uint64(String& target, const uint64_t &value, const ReadableString& indentation) {
 	atomic_append_readable(target, indentation);
 	string_fromUnsigned(target, value);
 	return target;

+ 98 - 35
Source/DFPSR/api/stringAPI.h

@@ -28,6 +28,7 @@
 #include <functional>
 #include "bufferAPI.h"
 #include "../base/SafePointer.h"
+#include "../base/DsrTraits.h"
 #include "../collection/List.h"
 
 // Define DSR_INTERNAL_ACCESS before any include to get internal access to exposed types
@@ -157,35 +158,101 @@ public:
 	virtual ~Printable();
 };
 
-String& string_toStreamIndented(String& target, const char *value, const ReadableString& indentation);
-String& string_toStreamIndented(String& target, const DsrChar *value, const ReadableString& indentation);
-String& string_toStreamIndented(String& target, const ReadableString &value, const ReadableString& indentation);
-String& string_toStreamIndented(String& target, const double &value, const ReadableString& indentation);
-String& string_toStreamIndented(String& target, const int64_t &value, const ReadableString& indentation);
-String& string_toStreamIndented(String& target, const uint64_t &value, const ReadableString& indentation);
-inline String& string_toStreamIndented(String& target, const float &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (double)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const int32_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (int64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const int16_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (int64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const int8_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (int64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const uint32_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (uint64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const uint16_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (uint64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const uint8_t &value, const ReadableString& indentation) {
-	return string_toStreamIndented(target, (uint64_t)value, indentation);
-}
-inline String& string_toStreamIndented(String& target, const Printable& value, const ReadableString& indentation) {
-	return value.toStreamIndented(target, indentation);
+// Helper functions to resolve ambiguity without constexpr if statements in C++ 14.
+String& impl_toStreamIndented_ascii(String& target, const char *value, const ReadableString& indentation);
+String& impl_toStreamIndented_utf32(String& target, const char32_t *value, const ReadableString& indentation);
+String& impl_toStreamIndented_readable(String& target, const ReadableString &value, const ReadableString& indentation);
+String& impl_toStreamIndented_double(String& target, const double &value, const ReadableString& indentation);
+String& impl_toStreamIndented_int64(String& target, const int64_t &value, const ReadableString& indentation);
+String& impl_toStreamIndented_uint64(String& target, const uint64_t &value, const ReadableString& indentation);
+
+// Resolving ambiguity without access to constexpr in if statements by disabling type safety with unsafeCast.
+template <typename T, DSR_ENABLE_IF(
+    DSR_UTF32_LITERAL(T)
+ || DSR_ASCII_LITERAL(T)
+ || DSR_INHERITS_FROM(T, Printable)
+ || DSR_SAME_TYPE(T, String)
+ || DSR_SAME_TYPE(T, ReadableString)
+ || DSR_SAME_TYPE(T, float)
+ || DSR_SAME_TYPE(T, double)
+ || DSR_SAME_TYPE(T, char)
+ || DSR_SAME_TYPE(T, char32_t)
+ || DSR_SAME_TYPE(T, bool)
+ || DSR_SAME_TYPE(T, short)
+ || DSR_SAME_TYPE(T, int)
+ || DSR_SAME_TYPE(T, long)
+ || DSR_SAME_TYPE(T, long long)
+ || DSR_SAME_TYPE(T, unsigned short)
+ || DSR_SAME_TYPE(T, unsigned int)
+ || DSR_SAME_TYPE(T, unsigned long)
+ || DSR_SAME_TYPE(T, unsigned long long)
+ || DSR_SAME_TYPE(T, uint8_t)
+ || DSR_SAME_TYPE(T, uint16_t)
+ || DSR_SAME_TYPE(T, uint32_t)
+ || DSR_SAME_TYPE(T, uint64_t)
+ || DSR_SAME_TYPE(T, int8_t)
+ || DSR_SAME_TYPE(T, int16_t)
+ || DSR_SAME_TYPE(T, int32_t)
+ || DSR_SAME_TYPE(T, int64_t))>
+inline String& string_toStreamIndented(String& target, const T &value, const ReadableString& indentation) {
+	if (DSR_UTF32_LITERAL(T)) {
+		impl_toStreamIndented_utf32(target, unsafeCast<char32_t*>(value), indentation);
+	} else if (DSR_ASCII_LITERAL(T)) {
+		impl_toStreamIndented_ascii(target, unsafeCast<char*>(value), indentation);
+	} else if (DSR_INHERITS_FROM(T, Printable)) {
+		unsafeCast<Printable>(value).toStreamIndented(target, indentation);
+	} else if (DSR_SAME_TYPE(T, String)) {
+		impl_toStreamIndented_readable(target, unsafeCast<String>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, ReadableString)) {
+		impl_toStreamIndented_readable(target, unsafeCast<ReadableString>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, float)) {
+		impl_toStreamIndented_double(target, (double)unsafeCast<float>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, double)) {
+		impl_toStreamIndented_double(target, unsafeCast<double>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, char)) {
+		impl_toStreamIndented_readable(target, indentation, U"");
+		string_appendChar(target, unsafeCast<char>(value));
+	} else if (DSR_SAME_TYPE(T, char32_t)) {
+		impl_toStreamIndented_readable(target, indentation, U"");
+		string_appendChar(target, unsafeCast<char32_t>(value));
+	} else if (DSR_SAME_TYPE(T, bool)) {
+		impl_toStreamIndented_utf32(target, unsafeCast<bool>(value) ? U"true" : U"false", indentation);
+	} else if (DSR_SAME_TYPE(T, uint8_t)) {
+		impl_toStreamIndented_uint64(target, (uint64_t)unsafeCast<uint8_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, uint16_t)) {
+		impl_toStreamIndented_uint64(target, (uint64_t)unsafeCast<uint16_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, uint32_t)) {
+		impl_toStreamIndented_uint64(target, (uint64_t)unsafeCast<uint32_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, uint64_t)) {
+		impl_toStreamIndented_uint64(target, unsafeCast<uint64_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, int8_t)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<int8_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, int16_t)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<int16_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, int32_t)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<int32_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, int64_t)) {
+		impl_toStreamIndented_int64(target, unsafeCast<int64_t>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, short)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<short>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, int)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<int>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, long)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<long>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, long long)) {
+		static_assert(sizeof(long long) == 8, U"You need to implement integer printing for integers larger than 64 bits, or printing long long will be truncated!");
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<long long>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, unsigned short)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<short>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, unsigned int)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<int>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, unsigned long)) {
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<long>(value), indentation);
+	} else if (DSR_SAME_TYPE(T, unsigned long long)) {
+		static_assert(sizeof(unsigned long long) == 8, U"You need to implement integer printing for integers larger than 64 bits, or printing unsigned long long will be truncated!");
+		impl_toStreamIndented_int64(target, (int64_t)unsafeCast<unsigned long long>(value), indentation);
+	}
+	return target;
 }
 
 template<typename T>
@@ -392,12 +459,8 @@ void string_reserve(String& target, intptr_t minimumLength);
 // Append/push one character (to avoid integer to string conversion)
 void string_appendChar(String& target, DsrChar value);
 
-// Append one element
-template<typename TYPE>
-inline void string_append(String& target, const TYPE &value) {
-	string_toStreamIndented(target, value, U"");
-}
-// Append multiple elements
+// Append elements
+inline void string_append(String& target) {}
 template<typename HEAD, typename... TAIL>
 inline void string_append(String& target, HEAD head, TAIL&&... tail) {
 	string_toStreamIndented(target, head, U"");

+ 14 - 0
Source/DFPSR/base/DsrTraits.h

@@ -27,6 +27,7 @@
 #define DFPSR_TRAITS
 
 	#include <stdint.h>
+	#include <type_traits>
 
 	namespace dsr {
 		// Subset of std::integral_constant.
@@ -75,6 +76,19 @@
 		#define DSR_CHECK_RELATION(RELATION_NAME, TYPE_A, TYPE_B) \
 			(RELATION_NAME<TYPE_A, TYPE_B>::value)
 
+		// Checking types.
+		#define DSR_SAME_TYPE(TYPE_A, TYPE_B) DsrTrait_SameType<TYPE_A, TYPE_B>::value
+		#define DSR_UTF32_LITERAL(TYPE) std::is_convertible<TYPE, const char32_t*>::value
+		#define DSR_ASCII_LITERAL(TYPE) std::is_convertible<TYPE, const char*>::value
+		#define DSR_INHERITS_FROM(DERIVED, BASE) std::is_base_of<BASE, DERIVED>::value
+
+		// Supress type safety when impossible conversions can never execute.
+		template<typename TO, typename FROM>
+		inline const TO& unsafeCast(const FROM &value) {
+			const void *pointer = (const void*)&value;
+			return *(const TO*)pointer;
+		}
+
 		DSR_DECLARE_PROPERTY(DsrTrait_Any_U8)
 		DSR_APPLY_PROPERTY(DsrTrait_Any_U8, uint8_t)
 

+ 6 - 6
Source/DFPSR/base/noSimd.h

@@ -79,32 +79,32 @@ namespace dsr {
 
 	template <uint32_t bitOffset>
 	inline uint32_t bitShiftLeftImmediate(const uint32_t& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate left shift of 32-bit values may not shift more than 31 bits!");
 		return left << bitOffset;
 	}
 	template <uint32_t bitOffset>
 	inline uint32_t bitShiftRightImmediate(const uint32_t& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate right shift of 32-bit values may not shift more than 31 bits!");
 		return left >> bitOffset;
 	}
 	template <uint16_t bitOffset>
 	inline uint16_t bitShiftLeftImmediate(const uint16_t& left) {
-		static_assert(bitOffset < 16u);
+		static_assert(bitOffset < 16u, "Immediate left shift of 16-bit values may not shift more than 15 bits!");
 		return left << bitOffset;
 	}
 	template <uint16_t bitOffset>
 	inline uint16_t bitShiftRightImmediate(const uint16_t& left) {
-		static_assert(bitOffset < 16u);
+		static_assert(bitOffset < 16u, "Immediate right shift of 16-bit values may not shift more than 15 bits!");
 		return left >> bitOffset;
 	}
 	template <uint8_t bitOffset>
 	inline uint8_t bitShiftLeftImmediate(const uint8_t& left) {
-		static_assert(bitOffset < 8u);
+		static_assert(bitOffset < 8u, "Immediate left shift of 8-bit values may not shift more than 7 bits!");
 		return left << bitOffset;
 	}
 	template <uint8_t bitOffset>
 	inline uint8_t bitShiftRightImmediate(const uint8_t& left) {
-		static_assert(bitOffset < 8u);
+		static_assert(bitOffset < 8u, "Immediate right shift of 8-bit values may not shift more than 7 bits!");
 		return left >> bitOffset;
 	}
 

+ 284 - 23
Source/DFPSR/base/simd.h

@@ -588,7 +588,7 @@
 				root = _mm_mul_ps(_mm_add_ps(root, _mm_div_ps(value.v, root)), half);
 				return F32x4(root);
 			#elif defined USE_NEON
-				return F32x4(MUL_F32_SIMD(value.v, value.reciprocalSquareRoot().v));
+				return F32x4(MUL_F32_SIMD(value.v, reciprocalSquareRoot(value).v));
 			#else
 				assert(false);
 				return F32x4(0);
@@ -2169,7 +2169,8 @@
 	inline U32x4 operator>>(const U32x4& left, const U32x4 &bitOffsets) {
 		assert(allLanesLesser(bitOffsets, U32x4(32u)));
 		#if defined USE_NEON
-			return U32x4(vshrq_u32(left.v, vreinterpretq_s32_u32(bitOffsets.v)));
+			//return U32x4(vshrq_u32(left.v, vreinterpretq_s32_u32(bitOffsets.v)));
+			return U32x4(vshlq_u32(left.v, vnegq_s32(vreinterpretq_s32_u32(bitOffsets.v))));
 		#else
 			return U32x4(
 			  left.scalars[0] >> bitOffsets.scalars[0],
@@ -2182,7 +2183,7 @@
 	// bitOffset must be an immediate constant, so a template argument is used.
 	template <uint32_t bitOffset>
 	inline U32x4 bitShiftLeftImmediate(const U32x4& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate left shift of 32-bit values may not shift more than 31 bits!");
 		#if defined USE_SSE2
 			return U32x4(_mm_slli_epi32(left.v, bitOffset));
 		#else
@@ -2196,12 +2197,14 @@
 	// bitOffset must be an immediate constant.
 	template <uint32_t bitOffset>
 	inline U32x4 bitShiftRightImmediate(const U32x4& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate right shift of 32-bit values may not shift more than 31 bits!");
 		#if defined USE_SSE2
 			return U32x4(_mm_srli_epi32(left.v, bitOffset));
 		#else
 			#if defined USE_NEON
-				return U32x4(vshrq_u32(left.v, LOAD_SCALAR_I32_SIMD(bitOffset)));
+				// TODO: Why is vshrq_u32 not found?
+				//return U32x4(vshrq_u32(left.v, LOAD_SCALAR_I32_SIMD(bitOffset)));
+				return U32x4(vshlq_u32(left.v, LOAD_SCALAR_I32_SIMD(-(int32_t)bitOffset)));
 			#else
 				return U32x4(left.scalars[0] >> bitOffset, left.scalars[1] >> bitOffset, left.scalars[2] >> bitOffset, left.scalars[3] >> bitOffset);
 			#endif
@@ -2228,7 +2231,8 @@
 	inline U16x8 operator>>(const U16x8& left, const U16x8 &bitOffsets) {
 		assert(allLanesLesser(bitOffsets, U16x8(16u)));
 		#if defined USE_NEON
-			return U16x8(vshrq_u16(left.v, vreinterpretq_s16_u16(bitOffsets.v)));
+			//return U16x8(vshrq_u16(left.v, vreinterpretq_s16_u16(bitOffsets.v)));
+			return U16x8(vshlq_u16(left.v, vnegq_s16(vreinterpretq_s16_u16(bitOffsets.v))));
 		#else
 			return U16x8(
 			  left.scalars[0] >> bitOffsets.scalars[0],
@@ -2245,12 +2249,12 @@
 	// bitOffset must be an immediate constant, so a template argument is used.
 	template <uint32_t bitOffset>
 	inline U16x8 bitShiftLeftImmediate(const U16x8& left) {
-		static_assert(bitOffset < 16u);
+		static_assert(bitOffset < 16u, "Immediate left shift of 16-bit values may not shift more than 15 bits!");
 		#if defined USE_SSE2
 			return U16x8(_mm_slli_epi16(left.v, bitOffset));
 		#else
 			#if defined USE_NEON
-				return U16x8(vshlq_u32(left.v, vdupq_n_s16(int16_t(bitOffset))));
+				return U16x8(vshlq_u32(left.v, vdupq_n_s16(bitOffset)));
 			#else
 				return U16x8(
 				  left.scalars[0] << bitOffset,
@@ -2268,12 +2272,13 @@
 	// bitOffset must be an immediate constant.
 	template <uint32_t bitOffset>
 	inline U16x8 bitShiftRightImmediate(const U16x8& left) {
-		static_assert(bitOffset < 16u);
+		static_assert(bitOffset < 16u, "Immediate right shift of 16-bit values may not shift more than 15 bits!");
 		#if defined USE_SSE2
 			return U16x8(_mm_srli_epi16(left.v, bitOffset));
 		#else
 			#if defined USE_NEON
-				return U16x8(vshrq_u32(left.v, vdupq_n_s16(int16_t(bitOffset))));
+				//return U16x8(vshrq_u16(left.v, vdupq_n_s16(bitOffset)));
+				return U16x8(vshlq_u16(left.v, vdupq_n_s16(-(int32_t)bitOffset)));
 			#else
 				return U16x8(
 				  left.scalars[0] >> bitOffset,
@@ -2317,7 +2322,8 @@
 	inline U8x16 operator>>(const U8x16& left, const U8x16 &bitOffsets) {
 		assert(allLanesLesser(bitOffsets, U8x16(8u)));
 		#if defined USE_NEON
-			return U8x16(vshrq_u16(left.v, vreinterpretq_s8_u8(bitOffsets.v)));
+			//return U8x16(vshrq_u16(left.v, vreinterpretq_s8_u8(bitOffsets.v)));
+			return U8x16(vshlq_u16(left.v, vnegq_s8(vreinterpretq_s8_u8(bitOffsets.v))));
 		#else
 			return U8x16(
 			  left.scalars[ 0] >> bitOffsets.scalars[ 0],
@@ -2342,11 +2348,9 @@
 	// bitOffset must be an immediate constant, so a template argument is used.
 	template <uint32_t bitOffset>
 	inline U8x16 bitShiftLeftImmediate(const U8x16& left) {
-		static_assert(bitOffset < 8u);
-		#if defined USE_SSE2
-			return U8x16(_mm_slli_epi16(left.v, bitOffset));
-		#elif defined USE_NEON
-			return U8x16(vshlq_u32(left.v, vdupq_n_s8(int8_t(bitOffset))));
+		static_assert(bitOffset < 8u, "Immediate left shift of 8-bit values may not shift more than 7 bits!");
+		#if defined USE_NEON
+			return U8x16(vshlq_u32(left.v, vdupq_n_s8(bitOffset)));
 		#else
 			return U8x16(
 			  left.scalars[ 0] << bitOffset,
@@ -2371,11 +2375,10 @@
 	// bitOffset must be an immediate constant.
 	template <uint32_t bitOffset>
 	inline U8x16 bitShiftRightImmediate(const U8x16& left) {
-		static_assert(bitOffset < 8u);
-		#if defined USE_SSE2
-			return U8x16(_mm_srli_epi16(left.v, bitOffset));
-		#elif defined USE_NEON
-			return U8x16(vshrq_u32(left.v, vdupq_n_s8(int8_t(bitOffset))));
+		static_assert(bitOffset < 8u, "Immediate right shift of 8-bit values may not shift more than 7 bits!");
+		#if defined USE_NEON
+			//return U8x16(vshrq_u32(left.v, vdupq_n_s8(bitOffset)));
+			return U8x16(vshlq_u32(left.v, vdupq_n_s8(-(int32_t)bitOffset)));
 		#else
 			return U8x16(
 			  left.scalars[ 0] >> bitOffset,
@@ -3127,7 +3130,7 @@
 	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
 	template <uint32_t bitOffset>
 	inline U32x8 bitShiftLeftImmediate(const U32x8& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate left shift of 32-bit values may not shift more than 31 bits!");
 		#if defined USE_AVX2
 			return U32x8(_mm256_slli_epi32(left.v, bitOffset));
 		#else
@@ -3146,7 +3149,7 @@
 	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
 	template <uint32_t bitOffset>
 	inline U32x8 bitShiftRightImmediate(const U32x8& left) {
-		static_assert(bitOffset < 32u);
+		static_assert(bitOffset < 32u, "Immediate right shift of 32-bit values may not shift more than 31 bits!");
 		#if defined USE_AVX2
 			return U32x8(_mm256_srli_epi32(left.v, bitOffset));
 		#else
@@ -3163,6 +3166,264 @@
 		#endif
 	}
 
+	inline U16x16 operator<<(const U16x16& left, const U16x16 &bitOffsets) {
+		assert(allLanesLesser(bitOffsets, U16x16(16u)));
+		return U16x16(
+		  left.scalars[ 0] << bitOffsets.scalars[ 0],
+		  left.scalars[ 1] << bitOffsets.scalars[ 1],
+		  left.scalars[ 2] << bitOffsets.scalars[ 2],
+		  left.scalars[ 3] << bitOffsets.scalars[ 3],
+		  left.scalars[ 4] << bitOffsets.scalars[ 4],
+		  left.scalars[ 5] << bitOffsets.scalars[ 5],
+		  left.scalars[ 6] << bitOffsets.scalars[ 6],
+		  left.scalars[ 7] << bitOffsets.scalars[ 7],
+		  left.scalars[ 8] << bitOffsets.scalars[ 8],
+		  left.scalars[ 9] << bitOffsets.scalars[ 9],
+		  left.scalars[10] << bitOffsets.scalars[10],
+		  left.scalars[11] << bitOffsets.scalars[11],
+		  left.scalars[12] << bitOffsets.scalars[12],
+		  left.scalars[13] << bitOffsets.scalars[13],
+		  left.scalars[14] << bitOffsets.scalars[14],
+		  left.scalars[15] << bitOffsets.scalars[15]
+		);
+	}
+	inline U16x16 operator>>(const U16x16& left, const U16x16 &bitOffsets) {
+		assert(allLanesLesser(bitOffsets, U16x16(16u)));
+		return U16x16(
+		  left.scalars[ 0] >> bitOffsets.scalars[ 0],
+		  left.scalars[ 1] >> bitOffsets.scalars[ 1],
+		  left.scalars[ 2] >> bitOffsets.scalars[ 2],
+		  left.scalars[ 3] >> bitOffsets.scalars[ 3],
+		  left.scalars[ 4] >> bitOffsets.scalars[ 4],
+		  left.scalars[ 5] >> bitOffsets.scalars[ 5],
+		  left.scalars[ 6] >> bitOffsets.scalars[ 6],
+		  left.scalars[ 7] >> bitOffsets.scalars[ 7],
+		  left.scalars[ 8] >> bitOffsets.scalars[ 8],
+		  left.scalars[ 9] >> bitOffsets.scalars[ 9],
+		  left.scalars[10] >> bitOffsets.scalars[10],
+		  left.scalars[11] >> bitOffsets.scalars[11],
+		  left.scalars[12] >> bitOffsets.scalars[12],
+		  left.scalars[13] >> bitOffsets.scalars[13],
+		  left.scalars[14] >> bitOffsets.scalars[14],
+		  left.scalars[15] >> bitOffsets.scalars[15]
+		);
+	}
+	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
+	template <uint32_t bitOffset>
+	inline U16x16 bitShiftLeftImmediate(const U16x16& left) {
+		static_assert(bitOffset < 16u, "Immediate left shift of 16-bit values may not shift more than 15 bits!");
+		#if defined USE_AVX2
+			return U16x16(_mm256_slli_epi16(left.v, bitOffset));
+		#else
+			return U16x16(
+			  left.scalars[ 0] << bitOffset,
+			  left.scalars[ 1] << bitOffset,
+			  left.scalars[ 2] << bitOffset,
+			  left.scalars[ 3] << bitOffset,
+			  left.scalars[ 4] << bitOffset,
+			  left.scalars[ 5] << bitOffset,
+			  left.scalars[ 6] << bitOffset,
+			  left.scalars[ 7] << bitOffset,
+			  left.scalars[ 8] << bitOffset,
+			  left.scalars[ 9] << bitOffset,
+			  left.scalars[10] << bitOffset,
+			  left.scalars[11] << bitOffset,
+			  left.scalars[12] << bitOffset,
+			  left.scalars[13] << bitOffset,
+			  left.scalars[14] << bitOffset,
+			  left.scalars[15] << bitOffset
+			);
+		#endif
+	}
+	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
+	template <uint32_t bitOffset>
+	inline U16x16 bitShiftRightImmediate(const U16x16& left) {
+		static_assert(bitOffset < 16u, "Immediate right shift of 16-bit values may not shift more than 15 bits!");
+		#if defined USE_AVX2
+			return U16x16(_mm256_srli_epi16(left.v, bitOffset));
+		#else
+			return U16x16(
+			  left.scalars[ 0] >> bitOffset,
+			  left.scalars[ 1] >> bitOffset,
+			  left.scalars[ 2] >> bitOffset,
+			  left.scalars[ 3] >> bitOffset,
+			  left.scalars[ 4] >> bitOffset,
+			  left.scalars[ 5] >> bitOffset,
+			  left.scalars[ 6] >> bitOffset,
+			  left.scalars[ 7] >> bitOffset,
+			  left.scalars[ 8] >> bitOffset,
+			  left.scalars[ 9] >> bitOffset,
+			  left.scalars[10] >> bitOffset,
+			  left.scalars[11] >> bitOffset,
+			  left.scalars[12] >> bitOffset,
+			  left.scalars[13] >> bitOffset,
+			  left.scalars[14] >> bitOffset,
+			  left.scalars[15] >> bitOffset
+			);
+		#endif
+	}
+
+	inline U8x32 operator<<(const U8x32& left, const U8x32 &bitOffsets) {
+		assert(allLanesLesser(bitOffsets, U8x32(32u)));
+		return U8x32(
+		  left.scalars[ 0] << bitOffsets.scalars[ 0],
+		  left.scalars[ 1] << bitOffsets.scalars[ 1],
+		  left.scalars[ 2] << bitOffsets.scalars[ 2],
+		  left.scalars[ 3] << bitOffsets.scalars[ 3],
+		  left.scalars[ 4] << bitOffsets.scalars[ 4],
+		  left.scalars[ 5] << bitOffsets.scalars[ 5],
+		  left.scalars[ 6] << bitOffsets.scalars[ 6],
+		  left.scalars[ 7] << bitOffsets.scalars[ 7],
+		  left.scalars[ 8] << bitOffsets.scalars[ 8],
+		  left.scalars[ 9] << bitOffsets.scalars[ 9],
+		  left.scalars[10] << bitOffsets.scalars[10],
+		  left.scalars[11] << bitOffsets.scalars[11],
+		  left.scalars[12] << bitOffsets.scalars[12],
+		  left.scalars[13] << bitOffsets.scalars[13],
+		  left.scalars[14] << bitOffsets.scalars[14],
+		  left.scalars[15] << bitOffsets.scalars[15],
+		  left.scalars[16] << bitOffsets.scalars[16],
+		  left.scalars[17] << bitOffsets.scalars[17],
+		  left.scalars[18] << bitOffsets.scalars[18],
+		  left.scalars[19] << bitOffsets.scalars[19],
+		  left.scalars[20] << bitOffsets.scalars[20],
+		  left.scalars[21] << bitOffsets.scalars[21],
+		  left.scalars[22] << bitOffsets.scalars[22],
+		  left.scalars[23] << bitOffsets.scalars[23],
+		  left.scalars[24] << bitOffsets.scalars[24],
+		  left.scalars[25] << bitOffsets.scalars[25],
+		  left.scalars[26] << bitOffsets.scalars[26],
+		  left.scalars[27] << bitOffsets.scalars[27],
+		  left.scalars[28] << bitOffsets.scalars[28],
+		  left.scalars[29] << bitOffsets.scalars[29],
+		  left.scalars[30] << bitOffsets.scalars[30],
+		  left.scalars[31] << bitOffsets.scalars[31]
+		);
+	}
+	inline U8x32 operator>>(const U8x32& left, const U8x32 &bitOffsets) {
+		assert(allLanesLesser(bitOffsets, U8x32(32u)));
+		return U8x32(
+		  left.scalars[ 0] >> bitOffsets.scalars[ 0],
+		  left.scalars[ 1] >> bitOffsets.scalars[ 1],
+		  left.scalars[ 2] >> bitOffsets.scalars[ 2],
+		  left.scalars[ 3] >> bitOffsets.scalars[ 3],
+		  left.scalars[ 4] >> bitOffsets.scalars[ 4],
+		  left.scalars[ 5] >> bitOffsets.scalars[ 5],
+		  left.scalars[ 6] >> bitOffsets.scalars[ 6],
+		  left.scalars[ 7] >> bitOffsets.scalars[ 7],
+		  left.scalars[ 8] >> bitOffsets.scalars[ 8],
+		  left.scalars[ 9] >> bitOffsets.scalars[ 9],
+		  left.scalars[10] >> bitOffsets.scalars[10],
+		  left.scalars[11] >> bitOffsets.scalars[11],
+		  left.scalars[12] >> bitOffsets.scalars[12],
+		  left.scalars[13] >> bitOffsets.scalars[13],
+		  left.scalars[14] >> bitOffsets.scalars[14],
+		  left.scalars[15] >> bitOffsets.scalars[15],
+		  left.scalars[16] >> bitOffsets.scalars[16],
+		  left.scalars[17] >> bitOffsets.scalars[17],
+		  left.scalars[18] >> bitOffsets.scalars[18],
+		  left.scalars[19] >> bitOffsets.scalars[19],
+		  left.scalars[20] >> bitOffsets.scalars[20],
+		  left.scalars[21] >> bitOffsets.scalars[21],
+		  left.scalars[22] >> bitOffsets.scalars[22],
+		  left.scalars[23] >> bitOffsets.scalars[23],
+		  left.scalars[24] >> bitOffsets.scalars[24],
+		  left.scalars[25] >> bitOffsets.scalars[25],
+		  left.scalars[26] >> bitOffsets.scalars[26],
+		  left.scalars[27] >> bitOffsets.scalars[27],
+		  left.scalars[28] >> bitOffsets.scalars[28],
+		  left.scalars[29] >> bitOffsets.scalars[29],
+		  left.scalars[30] >> bitOffsets.scalars[30],
+		  left.scalars[31] >> bitOffsets.scalars[31]
+		);
+	}
+	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
+	template <uint32_t bitOffset>
+	inline U8x32 bitShiftLeftImmediate(const U8x32& left) {
+		static_assert(bitOffset < 8u, "Immediate left shift of 32-bit values may not shift more than 7 bits!");
+		#if defined USE_AVX2
+			return U8x32(_mm256_slli_epi8(left.v, bitOffset));
+		#else
+			return U8x32(
+			  left.scalars[ 0] << bitOffset,
+			  left.scalars[ 1] << bitOffset,
+			  left.scalars[ 2] << bitOffset,
+			  left.scalars[ 3] << bitOffset,
+			  left.scalars[ 4] << bitOffset,
+			  left.scalars[ 5] << bitOffset,
+			  left.scalars[ 6] << bitOffset,
+			  left.scalars[ 7] << bitOffset,
+			  left.scalars[ 8] << bitOffset,
+			  left.scalars[ 9] << bitOffset,
+			  left.scalars[10] << bitOffset,
+			  left.scalars[11] << bitOffset,
+			  left.scalars[12] << bitOffset,
+			  left.scalars[13] << bitOffset,
+			  left.scalars[14] << bitOffset,
+			  left.scalars[15] << bitOffset,
+			  left.scalars[16] << bitOffset,
+			  left.scalars[17] << bitOffset,
+			  left.scalars[18] << bitOffset,
+			  left.scalars[19] << bitOffset,
+			  left.scalars[20] << bitOffset,
+			  left.scalars[21] << bitOffset,
+			  left.scalars[22] << bitOffset,
+			  left.scalars[23] << bitOffset,
+			  left.scalars[24] << bitOffset,
+			  left.scalars[25] << bitOffset,
+			  left.scalars[26] << bitOffset,
+			  left.scalars[27] << bitOffset,
+			  left.scalars[28] << bitOffset,
+			  left.scalars[29] << bitOffset,
+			  left.scalars[30] << bitOffset,
+			  left.scalars[31] << bitOffset
+			);
+		#endif
+	}
+	// bitOffset must be an immediate constant from 0 to 31, so a template argument is used.
+	template <uint32_t bitOffset>
+	inline U8x32 bitShiftRightImmediate(const U8x32& left) {
+		static_assert(bitOffset < 8u, "Immediate right shift of 32-bit values may not shift more than 7 bits!");
+		#if defined USE_AVX2
+			return U8x32(_mm256_srli_epi8(left.v, bitOffset));
+		#else
+			return U8x32(
+			  left.scalars[ 0] >> bitOffset,
+			  left.scalars[ 1] >> bitOffset,
+			  left.scalars[ 2] >> bitOffset,
+			  left.scalars[ 3] >> bitOffset,
+			  left.scalars[ 4] >> bitOffset,
+			  left.scalars[ 5] >> bitOffset,
+			  left.scalars[ 6] >> bitOffset,
+			  left.scalars[ 7] >> bitOffset,
+			  left.scalars[ 8] >> bitOffset,
+			  left.scalars[ 9] >> bitOffset,
+			  left.scalars[10] >> bitOffset,
+			  left.scalars[11] >> bitOffset,
+			  left.scalars[12] >> bitOffset,
+			  left.scalars[13] >> bitOffset,
+			  left.scalars[14] >> bitOffset,
+			  left.scalars[15] >> bitOffset,
+			  left.scalars[16] >> bitOffset,
+			  left.scalars[17] >> bitOffset,
+			  left.scalars[18] >> bitOffset,
+			  left.scalars[19] >> bitOffset,
+			  left.scalars[20] >> bitOffset,
+			  left.scalars[21] >> bitOffset,
+			  left.scalars[22] >> bitOffset,
+			  left.scalars[23] >> bitOffset,
+			  left.scalars[24] >> bitOffset,
+			  left.scalars[25] >> bitOffset,
+			  left.scalars[26] >> bitOffset,
+			  left.scalars[27] >> bitOffset,
+			  left.scalars[28] >> bitOffset,
+			  left.scalars[29] >> bitOffset,
+			  left.scalars[30] >> bitOffset,
+			  left.scalars[31] >> bitOffset
+			);
+		#endif
+	}
+
 	inline U16x16 operator+(const U16x16& left, const U16x16& right) {
 		#if defined USE_256BIT_X_SIMD
 			return U16x16(ADD_U16_SIMD256(left.v, right.v));

+ 1 - 1
Source/DFPSR/gui/InputEvent.cpp

@@ -250,7 +250,7 @@ String& dsr::string_toStreamIndented(String& target, const KeyboardEvent& source
 	string_append(target, indentation, U"KeyboardEvent(");
 	string_append(target, U"keyboardEventType = ", source.keyboardEventType);
 	string_append(target, U", dsrKey = ", source.dsrKey);
-	string_append(target, U", character = ", source.character);
+	string_append(target, U", character = ", (uint32_t)source.character);
 	string_append(target, U")");
 	return target;
 }

+ 0 - 2
Source/DFPSR/machine/mediaFilters.cpp

@@ -360,7 +360,6 @@ void dsr::media_fade_region_linear(ImageU8& targetImage, const IRect& viewport,
 			// Each pixel needs to be evaluated in this fade.
 			for (int32_t y = viewport.top(); y < viewport.bottom(); y++) {
 				SafePointer<uint8_t> targetPixel = targetRow;
-				int64_t ratio = startRatio;
 				for (int32_t x = viewport.left(); x < viewport.right(); x++) {
 				int64_t saturatedRatio = startRatio;
 				if (saturatedRatio < 0) { saturatedRatio = 0; }
@@ -370,7 +369,6 @@ void dsr::media_fade_region_linear(ImageU8& targetImage, const IRect& viewport,
 					if (mixedColor > 255) { mixedColor = 255; }
 					*targetPixel = mixedColor;
 					targetPixel += 1;
-					ratio += ratioDx;
 				}
 				targetRow.increaseBytes(targetStride);
 				startRatio += ratioDy;

+ 1 - 1
Source/test/tests/SimdTest.cpp

@@ -2,7 +2,7 @@
 #include "../testTools.h"
 #include "../../DFPSR/base/simd.h"
 
-// TODO: Test: allLanesNotEqual, allLanesLesser, allLanesGreater, allLanesLesserOrEqual, allLanesGreaterOrEqual, reinterpret_U16FromU32, reinterpret_U32FromU16, operand ~
+// TODO: Test: allLanesNotEqual, allLanesLesser, allLanesGreater, allLanesLesserOrEqual, allLanesGreaterOrEqual, reinterpret_U16FromU32, reinterpret_U32FromU16, operand ~, smaller bit shifts.
 // TODO: Test that truncateToU32 saturates to minimum and maximum values.
 // TODO: Test that truncateToI32 saturates to minimum and maximum values.