// Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors. // All rights reserved. // Code licensed under the BSD License. // http://www.anki3d.org/LICENSE #include namespace anki { template static void writeShaderBlockMemorySanityChecks(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { // Check args ANKI_ASSERT(elements != nullptr); ANKI_ASSERT(elementsCount > 0); ANKI_ASSERT(buffBegin != nullptr); ANKI_ASSERT(buffEnd != nullptr); ANKI_ASSERT(buffBegin < buffEnd); // Check varBlkInfo ANKI_ASSERT(varBlkInfo.m_offset != -1); ANKI_ASSERT(varBlkInfo.m_arraySize > 0); if(varBlkInfo.m_arraySize > 1) { ANKI_ASSERT(varBlkInfo.m_arrayStride > 0); } // Check array size ANKI_ASSERT(I16(elementsCount) <= varBlkInfo.m_arraySize); } template static void writeShaderBlockMemorySimple(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { writeShaderBlockMemorySanityChecks(varBlkInfo, elements, elementsCount, buffBegin, buffEnd); U8* outBuff = static_cast(buffBegin) + varBlkInfo.m_offset; const U8* inBuff = static_cast(elements); for(U i = 0; i < elementsCount; i++) { ANKI_ASSERT(outBuff + sizeof(T) <= static_cast(buffEnd)); // Memcpy because Vec might have SIMD alignment but not the output buffer memcpy(outBuff, inBuff + i * sizeof(T), sizeof(T)); outBuff += varBlkInfo.m_arrayStride; } } template static void writeShaderBlockMemoryMatrix(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { writeShaderBlockMemorySanityChecks(varBlkInfo, elements, elementsCount, buffBegin, buffEnd); ANKI_ASSERT(varBlkInfo.m_matrixStride > 0); ANKI_ASSERT(varBlkInfo.m_matrixStride >= static_cast(sizeof(Vec))); U8* buff = static_cast(buffBegin) + varBlkInfo.m_offset; for(U i = 0; i < elementsCount; i++) { U8* subbuff = buff; const T& matrix = static_cast(elements)[i]; for(U j = 0; j < sizeof(T) / sizeof(Vec); j++) { ANKI_ASSERT((subbuff + sizeof(Vec)) <= static_cast(buffEnd)); const Vec in = matrix.getRow(j); memcpy(subbuff, &in, sizeof(Vec)); // Memcpy because Vec might have SIMD alignment but not the output buffer subbuff += varBlkInfo.m_matrixStride; } buff += varBlkInfo.m_arrayStride; } } // This is some trickery to select calling between writeShaderBlockMemoryMatrix and writeShaderBlockMemorySimple namespace { template class IsShaderVarDataTypeAMatrix { public: static constexpr Bool VALUE = false; }; #define ANKI_SVDT_MACRO(capital, type, baseType, rowCount, columnCount, isIntagralType) \ template<> \ class IsShaderVarDataTypeAMatrix \ { \ public: \ static constexpr Bool VALUE = rowCount * columnCount > 4; \ }; #include #undef ANKI_SVDT_MACRO template::VALUE> class WriteShaderBlockMemory { public: void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { using RowVec = typename T::RowVec; writeShaderBlockMemoryMatrix(varBlkInfo, elements, elementsCount, buffBegin, buffEnd); } }; template class WriteShaderBlockMemory { public: void operator()(const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { writeShaderBlockMemorySimple(varBlkInfo, elements, elementsCount, buffBegin, buffEnd); } }; } // namespace void writeShaderBlockMemory(ShaderVariableDataType type, const ShaderVariableBlockInfo& varBlkInfo, const void* elements, U32 elementsCount, void* buffBegin, const void* buffEnd) { switch(type) { #define ANKI_SVDT_MACRO(capital, type, baseType, rowCount, columnCount, isIntagralType) \ case ShaderVariableDataType::capital: \ WriteShaderBlockMemory()(varBlkInfo, elements, elementsCount, buffBegin, buffEnd); \ break; #include #undef ANKI_SVDT_MACRO default: ANKI_ASSERT(0); } } } // end namespace anki