// This header created by issuing: `echo "// This header created by issuing: \`$BASH_COMMAND\` $(echo "" | cat - LICENSE README.md | sed -e "s#^..*#\/\/ ") $(echo "" | cat - SYS_Types.h SYS_Math.h VM_SSEFunc.h VM_SIMDFunc.h VM_SIMD.h UT_Array.h UT_ArrayImpl.h UT_SmallArray.h UT_FixedVector.h UT_ParallelUtil.h UT_BVH.h UT_BVHImpl.h UT_SolidAngle.h UT_Array.cpp UT_SolidAngle.cpp | sed -e "s/^#.*include *\".*$//g")" > ~/Repos/libigl/include/igl/FastWindingNumberForSoups.h`
// MIT License
// Copyright (c) 2018 Side Effects Software Inc.
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// # Fast Winding Numbers for Soups
// https://github.com/alecjacobson/WindingNumber
// Implementation of the _ACM SIGGRAPH_ 2018 paper,
// "Fast Winding Numbers for Soups and Clouds"
// Gavin Barill¹, Neil Dickson², Ryan Schmidt³, David I.W. Levin¹, Alec Jacobson¹
// ¹University of Toronto, ²SideFX, ³Gradient Space
// _Note: this implementation is for triangle soups only, not point clouds._
// This version does _not_ depend on Intel TBB. Instead it depends on
// [libigl](https://github.com/libigl/libigl)'s simpler `igl::parallel_for` (which
// uses `std::thread`)
// This code, as written, depends on Intel's Threading Building Blocks (TBB) library for parallelism, but it should be fairly easy to change it to use any other means of threading, since it only uses parallel for loops with simple partitioning.
// The main class of interest is UT_SolidAngle and its init and computeSolidAngle functions, which you can use by including UT_SolidAngle.h, and whose implementation is mostly in UT_SolidAngle.cpp, using a 4-way bounding volume hierarchy (BVH) implemented in the UT_BVH.h and UT_BVHImpl.h headers. The rest of the files are mostly various supporting code. UT_SubtendedAngle, for computing angles subtended by 2D curves, can also be found in UT_SolidAngle.h and UT_SolidAngle.cpp .
// An example of very similar code and how to use it to create a geometry operator (SOP) in Houdini can be found in the HDK examples (toolkit/samples/SOP/SOP_WindingNumber) for Houdini 16.5.121 and later. Query points go in the first input and the mesh geometry goes in the second input.
// Create a single header using:
// echo "// This header created by issuing: \`$BASH_COMMAND\` $(echo "" | cat - LICENSE README.md | sed -e "s#^..*#\/\/ ") $(echo "" | cat - SYS_Types.h SYS_Math.h VM_SSEFunc.h VM_SIMD.h UT_Array.h UT_ArrayImpl.h UT_SmallArray.h UT_FixedVector.h UT_ParallelUtil.h UT_BVH.h UT_BVHImpl.h UT_SolidAngle.h UT_Array.cpp UT_SolidAngle.cpp | sed -e "s/^#.*include *\".*$//g")"
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* Common type definitions.
*/
#pragma once
#ifndef __SYS_Types__
#define __SYS_Types__
/* Include system types */
#include
#include
#include
#include
#include
namespace igl {
/// @private
namespace FastWindingNumber {
/*
* Integer types
*/
typedef signed char int8;
typedef unsigned char uint8;
typedef short int16;
typedef unsigned short uint16;
typedef int int32;
typedef unsigned int uint32;
#ifndef MBSD
typedef unsigned int uint;
#endif
/*
* Avoid using uint64.
* The extra bit of precision is NOT worth the cost in pain and suffering
* induced by use of unsigned.
*/
#if defined(_WIN32)
typedef __int64 int64;
typedef unsigned __int64 uint64;
#elif defined(MBSD)
// On MBSD, int64/uint64 are also defined in the system headers so we must
// declare these in the same way or else we get conflicts.
typedef int64_t int64;
typedef uint64_t uint64;
#elif defined(AMD64)
typedef long int64;
typedef unsigned long uint64;
#else
typedef long long int64;
typedef unsigned long long uint64;
#endif
/// The problem with int64 is that it implies that it is a fixed 64-bit quantity
/// that is saved to disk. Therefore, we need another integral type for
/// indexing our arrays.
typedef int64 exint;
/// Mark function to be inlined. If this is done, taking the address of such
/// a function is not allowed.
#if defined(__GNUC__) || defined(__clang__)
#define SYS_FORCE_INLINE __attribute__ ((always_inline)) inline
#elif defined(_MSC_VER)
#define SYS_FORCE_INLINE __forceinline
#else
#define SYS_FORCE_INLINE inline
#endif
/// Floating Point Types
typedef float fpreal32;
typedef double fpreal64;
/// SYS_FPRealUnionT for type-safe casting with integral types
template
union SYS_FPRealUnionT;
template <>
union SYS_FPRealUnionT
{
typedef int32 int_type;
typedef uint32 uint_type;
typedef fpreal32 fpreal_type;
enum {
EXPONENT_BITS = 8,
MANTISSA_BITS = 23,
EXPONENT_BIAS = 127 };
int_type ival;
uint_type uval;
fpreal_type fval;
struct
{
uint_type mantissa_val: 23;
uint_type exponent_val: 8;
uint_type sign_val: 1;
};
};
template <>
union SYS_FPRealUnionT
{
typedef int64 int_type;
typedef uint64 uint_type;
typedef fpreal64 fpreal_type;
enum {
EXPONENT_BITS = 11,
MANTISSA_BITS = 52,
EXPONENT_BIAS = 1023 };
int_type ival;
uint_type uval;
fpreal_type fval;
struct
{
uint_type mantissa_val: 52;
uint_type exponent_val: 11;
uint_type sign_val: 1;
};
};
typedef union SYS_FPRealUnionT SYS_FPRealUnionF;
typedef union SYS_FPRealUnionT SYS_FPRealUnionD;
/// Asserts are disabled
/// @{
#define UT_IGL_ASSERT_P(ZZ) ((void)0)
#define UT_IGL_ASSERT(ZZ) ((void)0)
#define UT_IGL_ASSERT_MSG_P(ZZ, MM) ((void)0)
#define UT_IGL_ASSERT_MSG(ZZ, MM) ((void)0)
/// @}
}}
#endif
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* Miscellaneous math functions.
*/
#pragma once
#ifndef __SYS_Math__
#define __SYS_Math__
#include
#include
#include
namespace igl {
/// @private
namespace FastWindingNumber {
// NOTE:
// These have been carefully written so that in the case of equality
// we always return the first parameter. This is so that NANs in
// in the second parameter are suppressed.
#define h_min(a, b) (((a) > (b)) ? (b) : (a))
#define h_max(a, b) (((a) < (b)) ? (b) : (a))
// DO NOT CHANGE THE ABOVE WITHOUT READING THE COMMENT
#define h_abs(a) (((a) > 0) ? (a) : -(a))
static constexpr inline int16 SYSmin(int16 a, int16 b) { return h_min(a,b); }
static constexpr inline int16 SYSmax(int16 a, int16 b) { return h_max(a,b); }
static constexpr inline int16 SYSabs(int16 a) { return h_abs(a); }
static constexpr inline int32 SYSmin(int32 a, int32 b) { return h_min(a,b); }
static constexpr inline int32 SYSmax(int32 a, int32 b) { return h_max(a,b); }
static constexpr inline int32 SYSabs(int32 a) { return h_abs(a); }
static constexpr inline int64 SYSmin(int64 a, int64 b) { return h_min(a,b); }
static constexpr inline int64 SYSmax(int64 a, int64 b) { return h_max(a,b); }
static constexpr inline int64 SYSmin(int32 a, int64 b) { return h_min(a,b); }
static constexpr inline int64 SYSmax(int32 a, int64 b) { return h_max(a,b); }
static constexpr inline int64 SYSmin(int64 a, int32 b) { return h_min(a,b); }
static constexpr inline int64 SYSmax(int64 a, int32 b) { return h_max(a,b); }
static constexpr inline int64 SYSabs(int64 a) { return h_abs(a); }
static constexpr inline uint16 SYSmin(uint16 a, uint16 b) { return h_min(a,b); }
static constexpr inline uint16 SYSmax(uint16 a, uint16 b) { return h_max(a,b); }
static constexpr inline uint32 SYSmin(uint32 a, uint32 b) { return h_min(a,b); }
static constexpr inline uint32 SYSmax(uint32 a, uint32 b) { return h_max(a,b); }
static constexpr inline uint64 SYSmin(uint64 a, uint64 b) { return h_min(a,b); }
static constexpr inline uint64 SYSmax(uint64 a, uint64 b) { return h_max(a,b); }
static constexpr inline fpreal32 SYSmin(fpreal32 a, fpreal32 b) { return h_min(a,b); }
static constexpr inline fpreal32 SYSmax(fpreal32 a, fpreal32 b) { return h_max(a,b); }
static constexpr inline fpreal64 SYSmin(fpreal64 a, fpreal64 b) { return h_min(a,b); }
static constexpr inline fpreal64 SYSmax(fpreal64 a, fpreal64 b) { return h_max(a,b); }
// Some systems have size_t as a seperate type from uint. Some don't.
#if (defined(LINUX) && defined(IA64)) || defined(MBSD)
static constexpr inline size_t SYSmin(size_t a, size_t b) { return h_min(a,b); }
static constexpr inline size_t SYSmax(size_t a, size_t b) { return h_max(a,b); }
#endif
#undef h_min
#undef h_max
#undef h_abs
#define h_clamp(val, min, max, tol) \
((val <= min+tol) ? min : ((val >= max-tol) ? max : val))
static constexpr inline int
SYSclamp(int v, int min, int max)
{ return h_clamp(v, min, max, 0); }
static constexpr inline uint
SYSclamp(uint v, uint min, uint max)
{ return h_clamp(v, min, max, 0); }
static constexpr inline int64
SYSclamp(int64 v, int64 min, int64 max)
{ return h_clamp(v, min, max, int64(0)); }
static constexpr inline uint64
SYSclamp(uint64 v, uint64 min, uint64 max)
{ return h_clamp(v, min, max, uint64(0)); }
static constexpr inline fpreal32
SYSclamp(fpreal32 v, fpreal32 min, fpreal32 max, fpreal32 tol=(fpreal32)0)
{ return h_clamp(v, min, max, tol); }
static constexpr inline fpreal64
SYSclamp(fpreal64 v, fpreal64 min, fpreal64 max, fpreal64 tol=(fpreal64)0)
{ return h_clamp(v, min, max, tol); }
#undef h_clamp
static inline fpreal64 SYSsqrt(fpreal64 arg)
{ return ::sqrt(arg); }
static inline fpreal32 SYSsqrt(fpreal32 arg)
{ return ::sqrtf(arg); }
static inline fpreal64 SYSatan2(fpreal64 a, fpreal64 b)
{ return ::atan2(a, b); }
static inline fpreal32 SYSatan2(fpreal32 a, fpreal32 b)
{ return ::atan2(a, b); }
static inline fpreal32 SYSabs(fpreal32 a) { return ::fabsf(a); }
static inline fpreal64 SYSabs(fpreal64 a) { return ::fabs(a); }
}}
#endif
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* SIMD wrapper functions for SSE instructions
*/
#pragma once
#ifdef __SSE__
#ifndef __VM_SSEFunc__
#define __VM_SSEFunc__
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable:4799)
#endif
#define CPU_HAS_SIMD_INSTR 1
#define VM_SSE_STYLE 1
#include
#if defined(__SSE4_1__)
#define VM_SSE41_STYLE 1
#include
#endif
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
namespace igl {
/// @private
namespace FastWindingNumber {
typedef __m128 v4sf;
typedef __m128i v4si;
// Plain casting (no conversion)
// MSVC has problems casting between __m128 and __m128i, so we implement a
// custom casting routine specifically for windows.
#if defined(_MSC_VER)
static SYS_FORCE_INLINE v4sf
vm_v4sf(const v4si &a)
{
union {
v4si ival;
v4sf fval;
};
ival = a;
return fval;
}
static SYS_FORCE_INLINE v4si
vm_v4si(const v4sf &a)
{
union {
v4si ival;
v4sf fval;
};
fval = a;
return ival;
}
#define V4SF(A) vm_v4sf(A)
#define V4SI(A) vm_v4si(A)
#else
#define V4SF(A) (v4sf)A
#define V4SI(A) (v4si)A
#endif
#define VM_SHUFFLE_MASK(a0,a1, b0,b1) ((b1)<<6|(b0)<<4 | (a1)<<2|(a0))
template
static SYS_FORCE_INLINE v4sf
vm_shuffle(const v4sf &a, const v4sf &b)
{
return _mm_shuffle_ps(a, b, mask);
}
template
static SYS_FORCE_INLINE v4si
vm_shuffle(const v4si &a, const v4si &b)
{
return V4SI(_mm_shuffle_ps(V4SF(a), V4SF(b), mask));
}
template
static SYS_FORCE_INLINE T
vm_shuffle(const T &a, const T &b)
{
return vm_shuffle(a, b);
}
template
static SYS_FORCE_INLINE T
vm_shuffle(const T &a)
{
return vm_shuffle(a, a);
}
template
static SYS_FORCE_INLINE T
vm_shuffle(const T &a)
{
return vm_shuffle(a, a);
}
#if defined(VM_SSE41_STYLE)
static SYS_FORCE_INLINE v4si
vm_insert(const v4si v, int32 a, int n)
{
switch (n)
{
case 0: return _mm_insert_epi32(v, a, 0);
case 1: return _mm_insert_epi32(v, a, 1);
case 2: return _mm_insert_epi32(v, a, 2);
case 3: return _mm_insert_epi32(v, a, 3);
}
return v;
}
static SYS_FORCE_INLINE v4sf
vm_insert(const v4sf v, float a, int n)
{
switch (n)
{
case 0: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,0,0));
case 1: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,1,0));
case 2: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,2,0));
case 3: return _mm_insert_ps(v, _mm_set_ss(a), _MM_MK_INSERTPS_NDX(0,3,0));
}
return v;
}
static SYS_FORCE_INLINE int
vm_extract(const v4si v, int n)
{
switch (n)
{
case 0: return _mm_extract_epi32(v, 0);
case 1: return _mm_extract_epi32(v, 1);
case 2: return _mm_extract_epi32(v, 2);
case 3: return _mm_extract_epi32(v, 3);
}
return 0;
}
static SYS_FORCE_INLINE float
vm_extract(const v4sf v, int n)
{
SYS_FPRealUnionF tmp;
switch (n)
{
case 0: tmp.ival = _mm_extract_ps(v, 0); break;
case 1: tmp.ival = _mm_extract_ps(v, 1); break;
case 2: tmp.ival = _mm_extract_ps(v, 2); break;
case 3: tmp.ival = _mm_extract_ps(v, 3); break;
}
return tmp.fval;
}
#else
static SYS_FORCE_INLINE v4si
vm_insert(const v4si v, int32 a, int n)
{
union { v4si vector; int32 comp[4]; };
vector = v;
comp[n] = a;
return vector;
}
static SYS_FORCE_INLINE v4sf
vm_insert(const v4sf v, float a, int n)
{
union { v4sf vector; float comp[4]; };
vector = v;
comp[n] = a;
return vector;
}
static SYS_FORCE_INLINE int
vm_extract(const v4si v, int n)
{
union { v4si vector; int32 comp[4]; };
vector = v;
return comp[n];
}
static SYS_FORCE_INLINE float
vm_extract(const v4sf v, int n)
{
union { v4sf vector; float comp[4]; };
vector = v;
return comp[n];
}
#endif
static SYS_FORCE_INLINE v4sf
vm_splats(float a)
{
return _mm_set1_ps(a);
}
static SYS_FORCE_INLINE v4si
vm_splats(uint32 a)
{
SYS_FPRealUnionF tmp;
tmp.uval = a;
return V4SI(vm_splats(tmp.fval));
}
static SYS_FORCE_INLINE v4si
vm_splats(int32 a)
{
SYS_FPRealUnionF tmp;
tmp.ival = a;
return V4SI(vm_splats(tmp.fval));
}
static SYS_FORCE_INLINE v4sf
vm_splats(float a, float b, float c, float d)
{
return vm_shuffle<0,2,0,2>(
vm_shuffle<0>(_mm_set_ss(a), _mm_set_ss(b)),
vm_shuffle<0>(_mm_set_ss(c), _mm_set_ss(d)));
}
static SYS_FORCE_INLINE v4si
vm_splats(uint32 a, uint32 b, uint32 c, uint32 d)
{
SYS_FPRealUnionF af, bf, cf, df;
af.uval = a;
bf.uval = b;
cf.uval = c;
df.uval = d;
return V4SI(vm_splats(af.fval, bf.fval, cf.fval, df.fval));
}
static SYS_FORCE_INLINE v4si
vm_splats(int32 a, int32 b, int32 c, int32 d)
{
SYS_FPRealUnionF af, bf, cf, df;
af.ival = a;
bf.ival = b;
cf.ival = c;
df.ival = d;
return V4SI(vm_splats(af.fval, bf.fval, cf.fval, df.fval));
}
static SYS_FORCE_INLINE v4si
vm_load(const int32 v[4])
{
return V4SI(_mm_loadu_ps((const float *)v));
}
static SYS_FORCE_INLINE v4sf
vm_load(const float v[4])
{
return _mm_loadu_ps(v);
}
static SYS_FORCE_INLINE void
vm_store(float dst[4], v4sf value)
{
_mm_storeu_ps(dst, value);
}
static SYS_FORCE_INLINE v4sf
vm_negate(v4sf a)
{
return _mm_sub_ps(_mm_setzero_ps(), a);
}
static SYS_FORCE_INLINE v4sf
vm_abs(v4sf a)
{
return _mm_max_ps(a, vm_negate(a));
}
static SYS_FORCE_INLINE v4sf
vm_fdiv(v4sf a, v4sf b)
{
return _mm_mul_ps(a, _mm_rcp_ps(b));
}
static SYS_FORCE_INLINE v4sf
vm_fsqrt(v4sf a)
{
return _mm_rcp_ps(_mm_rsqrt_ps(a));
}
static SYS_FORCE_INLINE v4sf
vm_madd(v4sf a, v4sf b, v4sf c)
{
return _mm_add_ps(_mm_mul_ps(a, b), c);
}
static const v4si theSSETrue = vm_splats(0xFFFFFFFF);
static SYS_FORCE_INLINE bool
vm_allbits(const v4si &a)
{
return _mm_movemask_ps(V4SF(_mm_cmpeq_epi32(a, theSSETrue))) == 0xF;
}
#define VM_EXTRACT vm_extract
#define VM_INSERT vm_insert
#define VM_SPLATS vm_splats
#define VM_LOAD vm_load
#define VM_STORE vm_store
#define VM_CMPLT(A,B) V4SI(_mm_cmplt_ps(A,B))
#define VM_CMPLE(A,B) V4SI(_mm_cmple_ps(A,B))
#define VM_CMPGT(A,B) V4SI(_mm_cmpgt_ps(A,B))
#define VM_CMPGE(A,B) V4SI(_mm_cmpge_ps(A,B))
#define VM_CMPEQ(A,B) V4SI(_mm_cmpeq_ps(A,B))
#define VM_CMPNE(A,B) V4SI(_mm_cmpneq_ps(A,B))
#define VM_ICMPLT _mm_cmplt_epi32
#define VM_ICMPGT _mm_cmpgt_epi32
#define VM_ICMPEQ _mm_cmpeq_epi32
#define VM_IADD _mm_add_epi32
#define VM_ISUB _mm_sub_epi32
#define VM_ADD _mm_add_ps
#define VM_SUB _mm_sub_ps
#define VM_MUL _mm_mul_ps
#define VM_DIV _mm_div_ps
#define VM_SQRT _mm_sqrt_ps
#define VM_ISQRT _mm_rsqrt_ps
#define VM_INVERT _mm_rcp_ps
#define VM_ABS vm_abs
#define VM_FDIV vm_fdiv
#define VM_NEG vm_negate
#define VM_FSQRT vm_fsqrt
#define VM_MADD vm_madd
#define VM_MIN _mm_min_ps
#define VM_MAX _mm_max_ps
#define VM_AND _mm_and_si128
#define VM_ANDNOT _mm_andnot_si128
#define VM_OR _mm_or_si128
#define VM_XOR _mm_xor_si128
#define VM_ALLBITS vm_allbits
#define VM_SHUFFLE vm_shuffle
// Integer to float conversions
#define VM_SSE_ROUND_MASK 0x6000
#define VM_SSE_ROUND_ZERO 0x6000
#define VM_SSE_ROUND_UP 0x4000
#define VM_SSE_ROUND_DOWN 0x2000
#define VM_SSE_ROUND_NEAR 0x0000
#define GETROUND() (_mm_getcsr()&VM_SSE_ROUND_MASK)
#define SETROUND(x) (_mm_setcsr(x|(_mm_getcsr()&~VM_SSE_ROUND_MASK)))
// The P functions must be invoked before FLOOR, the E functions invoked
// afterwards to reset the state.
#define VM_P_FLOOR() uint rounding = GETROUND(); \
SETROUND(VM_SSE_ROUND_DOWN);
#define VM_FLOOR _mm_cvtps_epi32
#define VM_INT _mm_cvttps_epi32
#define VM_E_FLOOR() SETROUND(rounding);
// Float to integer conversion
#define VM_IFLOAT _mm_cvtepi32_ps
}}
#endif
#endif
#pragma once
#ifndef __SSE__
#ifndef __VM_SIMDFunc__
#define __VM_SIMDFunc__
#include
namespace igl {
/// @private
namespace FastWindingNumber {
struct v4si {
int32 v[4];
};
struct v4sf {
float v[4];
};
static SYS_FORCE_INLINE v4sf V4SF(const v4si &v) {
static_assert(sizeof(v4si) == sizeof(v4sf) && alignof(v4si) == alignof(v4sf), "v4si and v4sf must be compatible");
return *(const v4sf*)&v;
}
static SYS_FORCE_INLINE v4si V4SI(const v4sf &v) {
static_assert(sizeof(v4si) == sizeof(v4sf) && alignof(v4si) == alignof(v4sf), "v4si and v4sf must be compatible");
return *(const v4si*)&v;
}
static SYS_FORCE_INLINE int32 conditionMask(bool c) {
return c ? int32(0xFFFFFFFF) : 0;
}
static SYS_FORCE_INLINE v4sf
VM_SPLATS(float f) {
return v4sf{{f, f, f, f}};
}
static SYS_FORCE_INLINE v4si
VM_SPLATS(uint32 i) {
return v4si{{int32(i), int32(i), int32(i), int32(i)}};
}
static SYS_FORCE_INLINE v4si
VM_SPLATS(int32 i) {
return v4si{{i, i, i, i}};
}
static SYS_FORCE_INLINE v4sf
VM_SPLATS(float a, float b, float c, float d) {
return v4sf{{a, b, c, d}};
}
static SYS_FORCE_INLINE v4si
VM_SPLATS(uint32 a, uint32 b, uint32 c, uint32 d) {
return v4si{{int32(a), int32(b), int32(c), int32(d)}};
}
static SYS_FORCE_INLINE v4si
VM_SPLATS(int32 a, int32 b, int32 c, int32 d) {
return v4si{{a, b, c, d}};
}
static SYS_FORCE_INLINE v4si
VM_LOAD(const int32 v[4]) {
return v4si{{v[0], v[1], v[2], v[3]}};
}
static SYS_FORCE_INLINE v4sf
VM_LOAD(const float v[4]) {
return v4sf{{v[0], v[1], v[2], v[3]}};
}
static inline v4si VM_ICMPEQ(v4si a, v4si b) {
return v4si{{
conditionMask(a.v[0] == b.v[0]),
conditionMask(a.v[1] == b.v[1]),
conditionMask(a.v[2] == b.v[2]),
conditionMask(a.v[3] == b.v[3])
}};
}
static inline v4si VM_ICMPGT(v4si a, v4si b) {
return v4si{{
conditionMask(a.v[0] > b.v[0]),
conditionMask(a.v[1] > b.v[1]),
conditionMask(a.v[2] > b.v[2]),
conditionMask(a.v[3] > b.v[3])
}};
}
static inline v4si VM_ICMPLT(v4si a, v4si b) {
return v4si{{
conditionMask(a.v[0] < b.v[0]),
conditionMask(a.v[1] < b.v[1]),
conditionMask(a.v[2] < b.v[2]),
conditionMask(a.v[3] < b.v[3])
}};
}
static inline v4si VM_IADD(v4si a, v4si b) {
return v4si{{
(a.v[0] + b.v[0]),
(a.v[1] + b.v[1]),
(a.v[2] + b.v[2]),
(a.v[3] + b.v[3])
}};
}
static inline v4si VM_ISUB(v4si a, v4si b) {
return v4si{{
(a.v[0] - b.v[0]),
(a.v[1] - b.v[1]),
(a.v[2] - b.v[2]),
(a.v[3] - b.v[3])
}};
}
static inline v4si VM_OR(v4si a, v4si b) {
return v4si{{
(a.v[0] | b.v[0]),
(a.v[1] | b.v[1]),
(a.v[2] | b.v[2]),
(a.v[3] | b.v[3])
}};
}
static inline v4si VM_AND(v4si a, v4si b) {
return v4si{{
(a.v[0] & b.v[0]),
(a.v[1] & b.v[1]),
(a.v[2] & b.v[2]),
(a.v[3] & b.v[3])
}};
}
static inline v4si VM_ANDNOT(v4si a, v4si b) {
return v4si{{
((~a.v[0]) & b.v[0]),
((~a.v[1]) & b.v[1]),
((~a.v[2]) & b.v[2]),
((~a.v[3]) & b.v[3])
}};
}
static inline v4si VM_XOR(v4si a, v4si b) {
return v4si{{
(a.v[0] ^ b.v[0]),
(a.v[1] ^ b.v[1]),
(a.v[2] ^ b.v[2]),
(a.v[3] ^ b.v[3])
}};
}
static SYS_FORCE_INLINE int
VM_EXTRACT(const v4si v, int index) {
return v.v[index];
}
static SYS_FORCE_INLINE float
VM_EXTRACT(const v4sf v, int index) {
return v.v[index];
}
static SYS_FORCE_INLINE v4si
VM_INSERT(v4si v, int32 value, int index) {
v.v[index] = value;
return v;
}
static SYS_FORCE_INLINE v4sf
VM_INSERT(v4sf v, float value, int index) {
v.v[index] = value;
return v;
}
static inline v4si VM_CMPEQ(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] == b.v[0]),
conditionMask(a.v[1] == b.v[1]),
conditionMask(a.v[2] == b.v[2]),
conditionMask(a.v[3] == b.v[3])
}};
}
static inline v4si VM_CMPNE(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] != b.v[0]),
conditionMask(a.v[1] != b.v[1]),
conditionMask(a.v[2] != b.v[2]),
conditionMask(a.v[3] != b.v[3])
}};
}
static inline v4si VM_CMPGT(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] > b.v[0]),
conditionMask(a.v[1] > b.v[1]),
conditionMask(a.v[2] > b.v[2]),
conditionMask(a.v[3] > b.v[3])
}};
}
static inline v4si VM_CMPLT(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] < b.v[0]),
conditionMask(a.v[1] < b.v[1]),
conditionMask(a.v[2] < b.v[2]),
conditionMask(a.v[3] < b.v[3])
}};
}
static inline v4si VM_CMPGE(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] >= b.v[0]),
conditionMask(a.v[1] >= b.v[1]),
conditionMask(a.v[2] >= b.v[2]),
conditionMask(a.v[3] >= b.v[3])
}};
}
static inline v4si VM_CMPLE(v4sf a, v4sf b) {
return v4si{{
conditionMask(a.v[0] <= b.v[0]),
conditionMask(a.v[1] <= b.v[1]),
conditionMask(a.v[2] <= b.v[2]),
conditionMask(a.v[3] <= b.v[3])
}};
}
static inline v4sf VM_ADD(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] + b.v[0]),
(a.v[1] + b.v[1]),
(a.v[2] + b.v[2]),
(a.v[3] + b.v[3])
}};
}
static inline v4sf VM_SUB(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] - b.v[0]),
(a.v[1] - b.v[1]),
(a.v[2] - b.v[2]),
(a.v[3] - b.v[3])
}};
}
static inline v4sf VM_NEG(v4sf a) {
return v4sf{{
(-a.v[0]),
(-a.v[1]),
(-a.v[2]),
(-a.v[3])
}};
}
static inline v4sf VM_MUL(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] * b.v[0]),
(a.v[1] * b.v[1]),
(a.v[2] * b.v[2]),
(a.v[3] * b.v[3])
}};
}
static inline v4sf VM_DIV(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] / b.v[0]),
(a.v[1] / b.v[1]),
(a.v[2] / b.v[2]),
(a.v[3] / b.v[3])
}};
}
static inline v4sf VM_MADD(v4sf a, v4sf b, v4sf c) {
return v4sf{{
(a.v[0] * b.v[0]) + c.v[0],
(a.v[1] * b.v[1]) + c.v[1],
(a.v[2] * b.v[2]) + c.v[2],
(a.v[3] * b.v[3]) + c.v[3]
}};
}
static inline v4sf VM_ABS(v4sf a) {
return v4sf{{
(a.v[0] < 0) ? -a.v[0] : a.v[0],
(a.v[1] < 0) ? -a.v[1] : a.v[1],
(a.v[2] < 0) ? -a.v[2] : a.v[2],
(a.v[3] < 0) ? -a.v[3] : a.v[3]
}};
}
static inline v4sf VM_MAX(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] < b.v[0]) ? b.v[0] : a.v[0],
(a.v[1] < b.v[1]) ? b.v[1] : a.v[1],
(a.v[2] < b.v[2]) ? b.v[2] : a.v[2],
(a.v[3] < b.v[3]) ? b.v[3] : a.v[3]
}};
}
static inline v4sf VM_MIN(v4sf a, v4sf b) {
return v4sf{{
(a.v[0] > b.v[0]) ? b.v[0] : a.v[0],
(a.v[1] > b.v[1]) ? b.v[1] : a.v[1],
(a.v[2] > b.v[2]) ? b.v[2] : a.v[2],
(a.v[3] > b.v[3]) ? b.v[3] : a.v[3]
}};
}
static inline v4sf VM_INVERT(v4sf a) {
return v4sf{{
(1.0f/a.v[0]),
(1.0f/a.v[1]),
(1.0f/a.v[2]),
(1.0f/a.v[3])
}};
}
static inline v4sf VM_SQRT(v4sf a) {
return v4sf{{
std::sqrt(a.v[0]),
std::sqrt(a.v[1]),
std::sqrt(a.v[2]),
std::sqrt(a.v[3])
}};
}
static inline v4si VM_INT(v4sf a) {
return v4si{{
int32(a.v[0]),
int32(a.v[1]),
int32(a.v[2]),
int32(a.v[3])
}};
}
static inline v4sf VM_IFLOAT(v4si a) {
return v4sf{{
float(a.v[0]),
float(a.v[1]),
float(a.v[2]),
float(a.v[3])
}};
}
static SYS_FORCE_INLINE void VM_P_FLOOR() {}
static SYS_FORCE_INLINE int32 singleIntFloor(float f) {
// Casting to int32 usually truncates toward zero, instead of rounding down,
// so subtract one if the result is above f.
int32 i = int32(f);
i -= (float(i) > f);
return i;
}
static inline v4si VM_FLOOR(v4sf a) {
return v4si{{
singleIntFloor(a.v[0]),
singleIntFloor(a.v[1]),
singleIntFloor(a.v[2]),
singleIntFloor(a.v[3])
}};
}
static SYS_FORCE_INLINE void VM_E_FLOOR() {}
static SYS_FORCE_INLINE bool vm_allbits(v4si a) {
return (
(a.v[0] == -1) &&
(a.v[1] == -1) &&
(a.v[2] == -1) &&
(a.v[3] == -1)
);
}
int SYS_FORCE_INLINE _mm_movemask_ps(const v4si& v) {
return (
int(v.v[0] < 0) |
(int(v.v[1] < 0)<<1) |
(int(v.v[2] < 0)<<2) |
(int(v.v[3] < 0)<<3)
);
}
int SYS_FORCE_INLINE _mm_movemask_ps(const v4sf& v) {
// Use std::signbit just in case it needs to distinguish between +0 and -0
// or between positive and negative NaN values (e.g. these could really
// be integers instead of floats).
return (
int(std::signbit(v.v[0])) |
(int(std::signbit(v.v[1]))<<1) |
(int(std::signbit(v.v[2]))<<2) |
(int(std::signbit(v.v[3]))<<3)
);
}
}}
#endif
#endif
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* SIMD wrapper classes for 4 floats or 4 ints
*/
#pragma once
#ifndef __HDK_VM_SIMD__
#define __HDK_VM_SIMD__
#include
//#define FORCE_NON_SIMD
namespace igl {
/// @private
namespace FastWindingNumber {
class v4uf;
class v4uu {
public:
SYS_FORCE_INLINE v4uu() {}
SYS_FORCE_INLINE v4uu(const v4si &v) : vector(v) {}
SYS_FORCE_INLINE v4uu(const v4uu &v) : vector(v.vector) {}
explicit SYS_FORCE_INLINE v4uu(int32 v) { vector = VM_SPLATS(v); }
explicit SYS_FORCE_INLINE v4uu(const int32 v[4])
{ vector = VM_LOAD(v); }
SYS_FORCE_INLINE v4uu(int32 a, int32 b, int32 c, int32 d)
{ vector = VM_SPLATS(a, b, c, d); }
// Assignment
SYS_FORCE_INLINE v4uu operator=(int32 v)
{ vector = v4uu(v).vector; return *this; }
SYS_FORCE_INLINE v4uu operator=(v4si v)
{ vector = v; return *this; }
SYS_FORCE_INLINE v4uu operator=(const v4uu &v)
{ vector = v.vector; return *this; }
SYS_FORCE_INLINE void condAssign(const v4uu &val, const v4uu &c)
{ *this = (c & val) | ((!c) & *this); }
// Comparison
SYS_FORCE_INLINE v4uu operator == (const v4uu &v) const
{ return v4uu(VM_ICMPEQ(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator != (const v4uu &v) const
{ return ~(*this == v); }
SYS_FORCE_INLINE v4uu operator > (const v4uu &v) const
{ return v4uu(VM_ICMPGT(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator < (const v4uu &v) const
{ return v4uu(VM_ICMPLT(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator >= (const v4uu &v) const
{ return ~(*this < v); }
SYS_FORCE_INLINE v4uu operator <= (const v4uu &v) const
{ return ~(*this > v); }
SYS_FORCE_INLINE v4uu operator == (int32 v) const { return *this == v4uu(v); }
SYS_FORCE_INLINE v4uu operator != (int32 v) const { return *this != v4uu(v); }
SYS_FORCE_INLINE v4uu operator > (int32 v) const { return *this > v4uu(v); }
SYS_FORCE_INLINE v4uu operator < (int32 v) const { return *this < v4uu(v); }
SYS_FORCE_INLINE v4uu operator >= (int32 v) const { return *this >= v4uu(v); }
SYS_FORCE_INLINE v4uu operator <= (int32 v) const { return *this <= v4uu(v); }
// Basic math
SYS_FORCE_INLINE v4uu operator+(const v4uu &r) const
{ return v4uu(VM_IADD(vector, r.vector)); }
SYS_FORCE_INLINE v4uu operator-(const v4uu &r) const
{ return v4uu(VM_ISUB(vector, r.vector)); }
SYS_FORCE_INLINE v4uu operator+=(const v4uu &r) { return (*this = *this + r); }
SYS_FORCE_INLINE v4uu operator-=(const v4uu &r) { return (*this = *this - r); }
SYS_FORCE_INLINE v4uu operator+(int32 r) const { return *this + v4uu(r); }
SYS_FORCE_INLINE v4uu operator-(int32 r) const { return *this - v4uu(r); }
SYS_FORCE_INLINE v4uu operator+=(int32 r) { return (*this = *this + r); }
SYS_FORCE_INLINE v4uu operator-=(int32 r) { return (*this = *this - r); }
// logical/bitwise
SYS_FORCE_INLINE v4uu operator||(const v4uu &r) const
{ return v4uu(VM_OR(vector, r.vector)); }
SYS_FORCE_INLINE v4uu operator&&(const v4uu &r) const
{ return v4uu(VM_AND(vector, r.vector)); }
SYS_FORCE_INLINE v4uu operator^(const v4uu &r) const
{ return v4uu(VM_XOR(vector, r.vector)); }
SYS_FORCE_INLINE v4uu operator!() const
{ return *this == v4uu(0); }
SYS_FORCE_INLINE v4uu operator|(const v4uu &r) const { return *this || r; }
SYS_FORCE_INLINE v4uu operator&(const v4uu &r) const { return *this && r; }
SYS_FORCE_INLINE v4uu operator~() const
{ return *this ^ v4uu(0xFFFFFFFF); }
// component
SYS_FORCE_INLINE int32 operator[](int idx) const { return VM_EXTRACT(vector, idx); }
SYS_FORCE_INLINE void setComp(int idx, int32 v) { vector = VM_INSERT(vector, v, idx); }
v4uf toFloat() const;
public:
v4si vector;
};
class v4uf {
public:
SYS_FORCE_INLINE v4uf() {}
SYS_FORCE_INLINE v4uf(const v4sf &v) : vector(v) {}
SYS_FORCE_INLINE v4uf(const v4uf &v) : vector(v.vector) {}
explicit SYS_FORCE_INLINE v4uf(float v) { vector = VM_SPLATS(v); }
explicit SYS_FORCE_INLINE v4uf(const float v[4])
{ vector = VM_LOAD(v); }
SYS_FORCE_INLINE v4uf(float a, float b, float c, float d)
{ vector = VM_SPLATS(a, b, c, d); }
// Assignment
SYS_FORCE_INLINE v4uf operator=(float v)
{ vector = v4uf(v).vector; return *this; }
SYS_FORCE_INLINE v4uf operator=(v4sf v)
{ vector = v; return *this; }
SYS_FORCE_INLINE v4uf operator=(const v4uf &v)
{ vector = v.vector; return *this; }
SYS_FORCE_INLINE void condAssign(const v4uf &val, const v4uu &c)
{ *this = (val & c) | (*this & ~c); }
// Comparison
SYS_FORCE_INLINE v4uu operator == (const v4uf &v) const
{ return v4uu(VM_CMPEQ(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator != (const v4uf &v) const
{ return v4uu(VM_CMPNE(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator > (const v4uf &v) const
{ return v4uu(VM_CMPGT(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator < (const v4uf &v) const
{ return v4uu(VM_CMPLT(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator >= (const v4uf &v) const
{ return v4uu(VM_CMPGE(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator <= (const v4uf &v) const
{ return v4uu(VM_CMPLE(vector, v.vector)); }
SYS_FORCE_INLINE v4uu operator == (float v) const { return *this == v4uf(v); }
SYS_FORCE_INLINE v4uu operator != (float v) const { return *this != v4uf(v); }
SYS_FORCE_INLINE v4uu operator > (float v) const { return *this > v4uf(v); }
SYS_FORCE_INLINE v4uu operator < (float v) const { return *this < v4uf(v); }
SYS_FORCE_INLINE v4uu operator >= (float v) const { return *this >= v4uf(v); }
SYS_FORCE_INLINE v4uu operator <= (float v) const { return *this <= v4uf(v); }
// Basic math
SYS_FORCE_INLINE v4uf operator+(const v4uf &r) const
{ return v4uf(VM_ADD(vector, r.vector)); }
SYS_FORCE_INLINE v4uf operator-(const v4uf &r) const
{ return v4uf(VM_SUB(vector, r.vector)); }
SYS_FORCE_INLINE v4uf operator-() const
{ return v4uf(VM_NEG(vector)); }
SYS_FORCE_INLINE v4uf operator*(const v4uf &r) const
{ return v4uf(VM_MUL(vector, r.vector)); }
SYS_FORCE_INLINE v4uf operator/(const v4uf &r) const
{ return v4uf(VM_DIV(vector, r.vector)); }
SYS_FORCE_INLINE v4uf operator+=(const v4uf &r) { return (*this = *this + r); }
SYS_FORCE_INLINE v4uf operator-=(const v4uf &r) { return (*this = *this - r); }
SYS_FORCE_INLINE v4uf operator*=(const v4uf &r) { return (*this = *this * r); }
SYS_FORCE_INLINE v4uf operator/=(const v4uf &r) { return (*this = *this / r); }
SYS_FORCE_INLINE v4uf operator+(float r) const { return *this + v4uf(r); }
SYS_FORCE_INLINE v4uf operator-(float r) const { return *this - v4uf(r); }
SYS_FORCE_INLINE v4uf operator*(float r) const { return *this * v4uf(r); }
SYS_FORCE_INLINE v4uf operator/(float r) const { return *this / v4uf(r); }
SYS_FORCE_INLINE v4uf operator+=(float r) { return (*this = *this + r); }
SYS_FORCE_INLINE v4uf operator-=(float r) { return (*this = *this - r); }
SYS_FORCE_INLINE v4uf operator*=(float r) { return (*this = *this * r); }
SYS_FORCE_INLINE v4uf operator/=(float r) { return (*this = *this / r); }
// logical/bitwise
SYS_FORCE_INLINE v4uf operator||(const v4uu &r) const
{ return v4uf(V4SF(VM_OR(V4SI(vector), r.vector))); }
SYS_FORCE_INLINE v4uf operator&&(const v4uu &r) const
{ return v4uf(V4SF(VM_AND(V4SI(vector), r.vector))); }
SYS_FORCE_INLINE v4uf operator^(const v4uu &r) const
{ return v4uf(V4SF(VM_XOR(V4SI(vector), r.vector))); }
SYS_FORCE_INLINE v4uf operator!() const
{ return v4uf(V4SF((*this == v4uf(0.0F)).vector)); }
SYS_FORCE_INLINE v4uf operator||(const v4uf &r) const
{ return v4uf(V4SF(VM_OR(V4SI(vector), V4SI(r.vector)))); }
SYS_FORCE_INLINE v4uf operator&&(const v4uf &r) const
{ return v4uf(V4SF(VM_AND(V4SI(vector), V4SI(r.vector)))); }
SYS_FORCE_INLINE v4uf operator^(const v4uf &r) const
{ return v4uf(V4SF(VM_XOR(V4SI(vector), V4SI(r.vector)))); }
SYS_FORCE_INLINE v4uf operator|(const v4uu &r) const { return *this || r; }
SYS_FORCE_INLINE v4uf operator&(const v4uu &r) const { return *this && r; }
SYS_FORCE_INLINE v4uf operator~() const
{ return *this ^ v4uu(0xFFFFFFFF); }
SYS_FORCE_INLINE v4uf operator|(const v4uf &r) const { return *this || r; }
SYS_FORCE_INLINE v4uf operator&(const v4uf &r) const { return *this && r; }
// component
SYS_FORCE_INLINE float operator[](int idx) const { return VM_EXTRACT(vector, idx); }
SYS_FORCE_INLINE void setComp(int idx, float v) { vector = VM_INSERT(vector, v, idx); }
// more math
SYS_FORCE_INLINE v4uf abs() const { return v4uf(VM_ABS(vector)); }
SYS_FORCE_INLINE v4uf clamp(const v4uf &low, const v4uf &high) const
{ return v4uf(
VM_MIN(VM_MAX(vector, low.vector), high.vector)); }
SYS_FORCE_INLINE v4uf clamp(float low, float high) const
{ return v4uf(VM_MIN(VM_MAX(vector,
v4uf(low).vector), v4uf(high).vector)); }
SYS_FORCE_INLINE v4uf recip() const { return v4uf(VM_INVERT(vector)); }
/// This is a lie, it is a signed int.
SYS_FORCE_INLINE v4uu toUnsignedInt() const { return VM_INT(vector); }
SYS_FORCE_INLINE v4uu toSignedInt() const { return VM_INT(vector); }
v4uu floor() const
{
VM_P_FLOOR();
v4uu result = VM_FLOOR(vector);
VM_E_FLOOR();
return result;
}
/// Returns the integer part of this float, this becomes the
/// 0..1 fractional component.
v4uu splitFloat()
{
v4uu base = toSignedInt();
*this -= base.toFloat();
return base;
}
#ifdef __SSE__
template
SYS_FORCE_INLINE v4uf swizzle() const
{
return VM_SHUFFLE(vector);
}
#endif
SYS_FORCE_INLINE v4uu isFinite() const
{
// If the exponent is the maximum value, it's either infinite or NaN.
const v4si mask = VM_SPLATS(0x7F800000);
return ~v4uu(VM_ICMPEQ(VM_AND(V4SI(vector), mask), mask));
}
public:
v4sf vector;
};
SYS_FORCE_INLINE v4uf
v4uu::toFloat() const
{
return v4uf(VM_IFLOAT(vector));
}
//
// Custom vector operations
//
static SYS_FORCE_INLINE v4uf
sqrt(const v4uf &a)
{
return v4uf(VM_SQRT(a.vector));
}
static SYS_FORCE_INLINE v4uf
fabs(const v4uf &a)
{
return a.abs();
}
// Use this operation to mask disabled values to 0
// rval = !a ? b : 0;
static SYS_FORCE_INLINE v4uf
andn(const v4uu &a, const v4uf &b)
{
return v4uf(V4SF(VM_ANDNOT(a.vector, V4SI(b.vector))));
}
static SYS_FORCE_INLINE v4uu
andn(const v4uu &a, const v4uu &b)
{
return v4uu(VM_ANDNOT(a.vector, b.vector));
}
// rval = a ? b : c;
static SYS_FORCE_INLINE v4uf
ternary(const v4uu &a, const v4uf &b, const v4uf &c)
{
return (b & a) | andn(a, c);
}
static SYS_FORCE_INLINE v4uu
ternary(const v4uu &a, const v4uu &b, const v4uu &c)
{
return (b & a) | andn(a, c);
}
// rval = !(a && b)
static SYS_FORCE_INLINE v4uu
nand(const v4uu &a, const v4uu &b)
{
return !v4uu(VM_AND(a.vector, b.vector));
}
static SYS_FORCE_INLINE v4uf
vmin(const v4uf &a, const v4uf &b)
{
return v4uf(VM_MIN(a.vector, b.vector));
}
static SYS_FORCE_INLINE v4uf
vmax(const v4uf &a, const v4uf &b)
{
return v4uf(VM_MAX(a.vector, b.vector));
}
static SYS_FORCE_INLINE v4uf
clamp(const v4uf &a, const v4uf &b, const v4uf &c)
{
return vmax(vmin(a, c), b);
}
static SYS_FORCE_INLINE v4uf
clamp(const v4uf &a, float b, float c)
{
return vmax(vmin(a, v4uf(c)), v4uf(b));
}
static SYS_FORCE_INLINE bool
allbits(const v4uu &a)
{
return vm_allbits(a.vector);
}
static SYS_FORCE_INLINE bool
anybits(const v4uu &a)
{
return !allbits(~a);
}
static SYS_FORCE_INLINE v4uf
madd(const v4uf &v, const v4uf &f, const v4uf &a)
{
return v4uf(VM_MADD(v.vector, f.vector, a.vector));
}
static SYS_FORCE_INLINE v4uf
madd(const v4uf &v, float f, float a)
{
return v4uf(VM_MADD(v.vector, v4uf(f).vector, v4uf(a).vector));
}
static SYS_FORCE_INLINE v4uf
madd(const v4uf &v, float f, const v4uf &a)
{
return v4uf(VM_MADD(v.vector, v4uf(f).vector, a.vector));
}
static SYS_FORCE_INLINE v4uf
msub(const v4uf &v, const v4uf &f, const v4uf &s)
{
return madd(v, f, -s);
}
static SYS_FORCE_INLINE v4uf
msub(const v4uf &v, float f, float s)
{
return madd(v, f, -s);
}
static SYS_FORCE_INLINE v4uf
lerp(const v4uf &a, const v4uf &b, const v4uf &w)
{
v4uf w1 = v4uf(1.0F) - w;
return madd(a, w1, b*w);
}
static SYS_FORCE_INLINE v4uf
luminance(const v4uf &r, const v4uf &g, const v4uf &b,
float rw, float gw, float bw)
{
return v4uf(madd(r, v4uf(rw), madd(g, v4uf(gw), b * bw)));
}
static SYS_FORCE_INLINE float
dot3(const v4uf &a, const v4uf &b)
{
v4uf res = a*b;
return res[0] + res[1] + res[2];
}
static SYS_FORCE_INLINE float
dot4(const v4uf &a, const v4uf &b)
{
v4uf res = a*b;
return res[0] + res[1] + res[2] + res[3];
}
static SYS_FORCE_INLINE float
length(const v4uf &a)
{
return SYSsqrt(dot3(a, a));
}
static SYS_FORCE_INLINE v4uf
normalize(const v4uf &a)
{
return a / length(a);
}
static SYS_FORCE_INLINE v4uf
cross(const v4uf &a, const v4uf &b)
{
return v4uf(a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0], 0);
}
// Currently there is no specific support for signed integers
typedef v4uu v4ui;
// Assuming that ptr is an array of elements of type STYPE, this operation
// will return the index of the first element that is aligned to (1<
#include
#include
#include
namespace igl {
/// @private
namespace FastWindingNumber {
/// This routine describes how to change the size of an array.
/// It must increase the current_size by at least one!
///
/// Current expected sequence of small sizes:
/// 4, 8, 16, 32, 48, 64, 80, 96, 112,
/// 128, 256, 384, 512, 640, 768, 896, 1024,
/// (increases by approx factor of 1.125 each time after this)
template
static inline T
UTbumpAlloc(T current_size)
{
// NOTE: These must be powers of two. See below.
constexpr T SMALL_ALLOC(16);
constexpr T BIG_ALLOC(128);
// For small values, we increment by fixed amounts. For
// large values, we increment by one eighth of the current size.
// This prevents n^2 behaviour with allocation one element at a time.
// A factor of 1/8 will waste 1/16 the memory on average, and will
// double the size of the array in approximately 6 reallocations.
if (current_size < T(8))
{
return (current_size < T(4)) ? T(4) : T(8);
}
if (current_size < T(BIG_ALLOC))
{
// Snap up to next multiple of SMALL_ALLOC (must be power of 2)
return (current_size + T(SMALL_ALLOC)) & ~T(SMALL_ALLOC-1);
}
if (current_size < T(BIG_ALLOC * 8))
{
// Snap up to next multiple of BIG_ALLOC (must be power of 2)
return (current_size + T(BIG_ALLOC)) & ~T(BIG_ALLOC-1);
}
T bump = current_size >> 3; // Divided by 8.
current_size += bump;
return current_size;
}
template
class UT_Array
{
public:
typedef T value_type;
typedef int (*Comparator)(const T *, const T *);
/// Copy constructor. It duplicates the data.
/// It's marked explicit so that it's not accidentally passed by value.
/// You can always pass by reference and then copy it, if needed.
/// If you have a line like:
/// UT_Array a = otherarray;
/// and it really does need to copy instead of referencing,
/// you can rewrite it as:
/// UT_Array a(otherarray);
inline explicit UT_Array(const UT_Array &a);
/// Move constructor. Steals the working data from the original.
inline UT_Array(UT_Array &&a) noexcept;
/// Construct based on given capacity and size
UT_Array(exint capacity, exint size)
{
myData = capacity ? allocateCapacity(capacity) : NULL;
if (capacity < size)
size = capacity;
mySize = size;
myCapacity = capacity;
trivialConstructRange(myData, mySize);
}
/// Construct based on given capacity with a size of 0
explicit UT_Array(exint capacity = 0) : myCapacity(capacity), mySize(0)
{
myData = capacity ? allocateCapacity(capacity) : NULL;
}
/// Construct with the contents of an initializer list
inline explicit UT_Array(std::initializer_list init);
inline ~UT_Array();
inline void swap(UT_Array &other);
/// Append an element to the current elements and return its index in the
/// array, or insert the element at a specified position; if necessary,
/// insert() grows the array to accommodate the element. The insert
/// methods use the assignment operator '=' to place the element into the
/// right spot; be aware that '=' works differently on objects and pointers.
/// The test for duplicates uses the logical equal operator '=='; as with
/// '=', the behaviour of the equality operator on pointers versus objects
/// is not the same.
/// Use the subscript operators instead of insert() if you are appending
/// to the array, or if you don't mind overwriting the element already
/// inserted at the given index.
exint append(void) { return insert(mySize); }
exint append(const T &t) { return appendImpl(t); }
exint append(T &&t) { return appendImpl(std::move(t)); }
inline void append(const T *pt, exint count);
inline void appendMultiple(const T &t, exint count);
inline exint insert(exint index);
exint insert(const T &t, exint i)
{ return insertImpl(t, i); }
exint insert(T &&t, exint i)
{ return insertImpl(std::move(t), i); }
/// Adds a new element to the array (resizing if necessary) and forwards
/// the given arguments to T's constructor.
/// NOTE: Unlike append(), the arguments cannot reference any existing
/// elements in the array. Checking for and handling such cases would
/// remove most of the performance gain versus append(T(...)). Debug builds
/// will assert that the arguments are valid.
template
inline exint emplace_back(S&&... s);
/// Takes another T array and concatenate it onto my end
inline exint concat(const UT_Array &a);
/// Insert an element "count" times at the given index. Return the index.
inline exint multipleInsert(exint index, exint count);
/// An alias for unique element insertion at a certain index. Also used by
/// the other insertion methods.
exint insertAt(const T &t, exint index)
{ return insertImpl(t, index); }
/// Return true if given index is valid.
bool isValidIndex(exint index) const
{ return (index >= 0 && index < mySize); }
/// Remove one element from the array given its
/// position in the list, and fill the gap by shifting the elements down
/// by one position. Return the index of the element removed or -1 if
/// the index was out of bounds.
exint removeIndex(exint index)
{
return isValidIndex(index) ? removeAt(index) : -1;
}
void removeLast()
{
if (mySize) removeAt(mySize-1);
}
/// Remove the range [begin_i,end_i) of elements from the array.
inline void removeRange(exint begin_i, exint end_i);
/// Remove the range [begin_i, end_i) of elements from this array and place
/// them in the dest array, shrinking/growing the dest array as necessary.
inline void extractRange(exint begin_i, exint end_i,
UT_Array& dest);
/// Removes all matching elements from the list, shuffling down and changing
/// the size appropriately.
/// Returns the number of elements left.
template
inline exint removeIf(IsEqual is_equal);
/// Remove all matching elements. Also sets the capacity of the array.
template
void collapseIf(IsEqual is_equal)
{
removeIf(is_equal);
setCapacity(size());
}
/// Move howMany objects starting at index srcIndex to destIndex;
/// This method will remove the elements at [srcIdx, srcIdx+howMany) and
/// then insert them at destIdx. This method can be used in place of
/// the old shift() operation.
inline void move(exint srcIdx, exint destIdx, exint howMany);
/// Cyclically shifts the entire array by howMany
inline void cycle(exint howMany);
/// Quickly set the array to a single value.
inline void constant(const T &v);
/// Zeros the array if a POD type, else trivial constructs if a class type.
inline void zero();
/// The fastest search possible, which does pointer arithmetic to find the
/// index of the element. WARNING: index() does no out-of-bounds checking.
exint index(const T &t) const { return &t - myData; }
exint safeIndex(const T &t) const
{
return (&t >= myData && &t < (myData + mySize))
? &t - myData : -1;
}
/// Set the capacity of the array, i.e. grow it or shrink it. The
/// function copies the data after reallocating space for the array.
inline void setCapacity(exint newcapacity);
void setCapacityIfNeeded(exint mincapacity)
{
if (capacity() < mincapacity)
setCapacity(mincapacity);
}
/// If the capacity is smaller than mincapacity, expand the array
/// to at least mincapacity and to at least a constant factor of the
/// array's previous capacity, to avoid having a linear number of
/// reallocations in a linear number of calls to bumpCapacity.
void bumpCapacity(exint mincapacity)
{
if (capacity() >= mincapacity)
return;
// The following 4 lines are just
// SYSmax(mincapacity, UTbumpAlloc(capacity())), avoiding SYSmax
exint bumped = UTbumpAlloc(capacity());
exint newcapacity = mincapacity;
if (bumped > mincapacity)
newcapacity = bumped;
setCapacity(newcapacity);
}
/// First bumpCapacity to ensure that there's space for newsize,
/// expanding either not at all or by at least a constant factor
/// of the array's previous capacity,
/// then set the size to newsize.
void bumpSize(exint newsize)
{
bumpCapacity(newsize);
setSize(newsize);
}
/// NOTE: bumpEntries() will be deprecated in favour of bumpSize() in a
/// future version.
void bumpEntries(exint newsize)
{
bumpSize(newsize);
}
/// Query the capacity, i.e. the allocated length of the array.
/// NOTE: capacity() >= size().
exint capacity() const { return myCapacity; }
/// Query the size, i.e. the number of occupied elements in the array.
/// NOTE: capacity() >= size().
exint size() const { return mySize; }
/// Alias of size(). size() is preferred.
exint entries() const { return mySize; }
/// Returns true iff there are no occupied elements in the array.
bool isEmpty() const { return mySize==0; }
/// Set the size, the number of occupied elements in the array.
/// NOTE: This will not do bumpCapacity, so if you call this
/// n times to increase the size, it may take
/// n^2 time.
void setSize(exint newsize)
{
if (newsize < 0)
newsize = 0;
if (newsize == mySize)
return;
setCapacityIfNeeded(newsize);
if (mySize > newsize)
trivialDestructRange(myData + newsize, mySize - newsize);
else // newsize > mySize
trivialConstructRange(myData + mySize, newsize - mySize);
mySize = newsize;
}
/// Alias of setSize(). setSize() is preferred.
void entries(exint newsize)
{
setSize(newsize);
}
/// Set the size, but unlike setSize(newsize), this function
/// will not initialize new POD elements to zero. Non-POD data types
/// will still have their constructors called.
/// This function is faster than setSize(ne) if you intend to fill in
/// data for all elements.
void setSizeNoInit(exint newsize)
{
if (newsize < 0)
newsize = 0;
if (newsize == mySize)
return;
setCapacityIfNeeded(newsize);
if (mySize > newsize)
trivialDestructRange(myData + newsize, mySize - newsize);
else if (!isPOD()) // newsize > mySize
trivialConstructRange(myData + mySize, newsize - mySize);
mySize = newsize;
}
/// Decreases, but never expands, to the given maxsize.
void truncate(exint maxsize)
{
if (maxsize >= 0 && size() > maxsize)
setSize(maxsize);
}
/// Resets list to an empty list.
void clear() {
// Don't call setSize(0) since that would require a valid default
// constructor.
trivialDestructRange(myData, mySize);
mySize = 0;
}
/// Assign array a to this array by copying each of a's elements with
/// memcpy for POD types, and with copy construction for class types.
inline UT_Array & operator=(const UT_Array &a);
/// Replace the contents with those from the initializer_list ilist
inline UT_Array & operator=(std::initializer_list ilist);
/// Move the contents of array a to this array.
inline UT_Array & operator=(UT_Array &&a);
/// Compare two array and return true if they are equal and false otherwise.
/// Two elements are checked against each other using operator '==' or
/// compare() respectively.
/// NOTE: The capacities of the arrays are not checked when
/// determining whether they are equal.
inline bool operator==(const UT_Array &a) const;
inline bool operator!=(const UT_Array &a) const;
/// Subscript operator
/// NOTE: This does NOT do any bounds checking unless paranoid
/// asserts are enabled.
T & operator()(exint i)
{
UT_IGL_ASSERT_P(i >= 0 && i < mySize);
return myData[i];
}
/// Const subscript operator
/// NOTE: This does NOT do any bounds checking unless paranoid
/// asserts are enabled.
const T & operator()(exint i) const
{
UT_IGL_ASSERT_P(i >= 0 && i < mySize);
return myData[i];
}
/// Subscript operator
/// NOTE: This does NOT do any bounds checking unless paranoid
/// asserts are enabled.
T & operator[](exint i)
{
UT_IGL_ASSERT_P(i >= 0 && i < mySize);
return myData[i];
}
/// Const subscript operator
/// NOTE: This does NOT do any bounds checking unless paranoid
/// asserts are enabled.
const T & operator[](exint i) const
{
UT_IGL_ASSERT_P(i >= 0 && i < mySize);
return myData[i];
}
/// forcedRef(exint) will grow the array if necessary, initializing any
/// new elements to zero for POD types and default constructing for
/// class types.
T & forcedRef(exint i)
{
UT_IGL_ASSERT_P(i >= 0);
if (i >= mySize)
bumpSize(i+1);
return myData[i];
}
/// forcedGet(exint) does NOT grow the array, and will return default
/// objects for out of bound array indices.
T forcedGet(exint i) const
{
return (i >= 0 && i < mySize) ? myData[i] : T();
}
T & last()
{
UT_IGL_ASSERT_P(mySize);
return myData[mySize-1];
}
const T & last() const
{
UT_IGL_ASSERT_P(mySize);
return myData[mySize-1];
}
T * getArray() const { return myData; }
const T * getRawArray() const { return myData; }
T * array() { return myData; }
const T * array() const { return myData; }
T * data() { return myData; }
const T * data() const { return myData; }
/// This method allows you to swap in a new raw T array, which must be
/// the same size as myCapacity. Use caution with this method.
T * aliasArray(T *newdata)
{ T *data = myData; myData = newdata; return data; }
template
class base_iterator :
public std::iterator
{
public:
typedef IT& reference;
typedef IT* pointer;
// Note: When we drop gcc 4.4 support and allow range-based for
// loops, we should also drop atEnd(), which means we can drop
// myEnd here.
base_iterator() : myCurrent(NULL), myEnd(NULL) {}
// Allow iterator to const_iterator conversion
template
base_iterator(const base_iterator &src)
: myCurrent(src.myCurrent), myEnd(src.myEnd) {}
pointer operator->() const
{ return FORWARD ? myCurrent : myCurrent - 1; }
reference operator*() const
{ return FORWARD ? *myCurrent : myCurrent[-1]; }
reference item() const
{ return FORWARD ? *myCurrent : myCurrent[-1]; }
reference operator[](exint n) const
{ return FORWARD ? myCurrent[n] : myCurrent[-n - 1]; }
/// Pre-increment operator
base_iterator &operator++()
{
if (FORWARD) ++myCurrent; else --myCurrent;
return *this;
}
/// Post-increment operator
base_iterator operator++(int)
{
base_iterator tmp = *this;
if (FORWARD) ++myCurrent; else --myCurrent;
return tmp;
}
/// Pre-decrement operator
base_iterator &operator--()
{
if (FORWARD) --myCurrent; else ++myCurrent;
return *this;
}
/// Post-decrement operator
base_iterator operator--(int)
{
base_iterator tmp = *this;
if (FORWARD) --myCurrent; else ++myCurrent;
return tmp;
}
base_iterator &operator+=(exint n)
{
if (FORWARD)
myCurrent += n;
else
myCurrent -= n;
return *this;
}
base_iterator operator+(exint n) const
{
if (FORWARD)
return base_iterator(myCurrent + n, myEnd);
else
return base_iterator(myCurrent - n, myEnd);
}
base_iterator &operator-=(exint n)
{ return (*this) += (-n); }
base_iterator operator-(exint n) const
{ return (*this) + (-n); }
bool atEnd() const { return myCurrent == myEnd; }
void advance() { this->operator++(); }
// Comparators
template
bool operator==(const base_iterator &r) const
{ return myCurrent == r.myCurrent; }
template
bool operator!=(const base_iterator &r) const
{ return myCurrent != r.myCurrent; }
template
bool operator<(const base_iterator &r) const
{
if (FORWARD)
return myCurrent < r.myCurrent;
else
return r.myCurrent < myCurrent;
}
template
bool operator>(const base_iterator &r) const
{
if (FORWARD)
return myCurrent > r.myCurrent;
else
return r.myCurrent > myCurrent;
}
template
bool operator<=(const base_iterator &r) const
{
if (FORWARD)
return myCurrent <= r.myCurrent;
else
return r.myCurrent <= myCurrent;
}
template
bool operator>=(const base_iterator &r) const
{
if (FORWARD)
return myCurrent >= r.myCurrent;
else
return r.myCurrent >= myCurrent;
}
// Difference operator for std::distance
template
exint operator-(const base_iterator &r) const
{
if (FORWARD)
return exint(myCurrent - r.myCurrent);
else
return exint(r.myCurrent - myCurrent);
}
protected:
friend class UT_Array;
base_iterator(IT *c, IT *e) : myCurrent(c), myEnd(e) {}
private:
IT *myCurrent;
IT *myEnd;
};
typedef base_iterator iterator;
typedef base_iterator const_iterator;
typedef base_iterator reverse_iterator;
typedef base_iterator const_reverse_iterator;
typedef const_iterator traverser; // For backward compatibility
/// Begin iterating over the array. The contents of the array may be
/// modified during the traversal.
iterator begin()
{
return iterator(myData, myData + mySize);
}
/// End iterator.
iterator end()
{
return iterator(myData + mySize,
myData + mySize);
}
/// Begin iterating over the array. The array may not be modified during
/// the traversal.
const_iterator begin() const
{
return const_iterator(myData, myData + mySize);
}
/// End const iterator. Consider using it.atEnd() instead.
const_iterator end() const
{
return const_iterator(myData + mySize,
myData + mySize);
}
/// Begin iterating over the array in reverse.
reverse_iterator rbegin()
{
return reverse_iterator(myData + mySize,
myData);
}
/// End reverse iterator.
reverse_iterator rend()
{
return reverse_iterator(myData, myData);
}
/// Begin iterating over the array in reverse.
const_reverse_iterator rbegin() const
{
return const_reverse_iterator(myData + mySize,
myData);
}
/// End reverse iterator. Consider using it.atEnd() instead.
const_reverse_iterator rend() const
{
return const_reverse_iterator(myData, myData);
}
/// Remove item specified by the reverse_iterator.
void removeItem(const reverse_iterator &it)
{
removeAt(&it.item() - myData);
}
/// Very dangerous methods to share arrays.
/// The array is not aware of the sharing, so ensure you clear
/// out the array prior a destructor or setCapacity operation.
void unsafeShareData(UT_Array &src)
{
myData = src.myData;
myCapacity = src.myCapacity;
mySize = src.mySize;
}
void unsafeShareData(T *src, exint srcsize)
{
myData = src;
myCapacity = srcsize;
mySize = srcsize;
}
void unsafeShareData(T *src, exint size, exint capacity)
{
myData = src;
mySize = size;
myCapacity = capacity;
}
void unsafeClearData()
{
myData = NULL;
myCapacity = 0;
mySize = 0;
}
/// Returns true if the data used by the array was allocated on the heap.
inline bool isHeapBuffer() const
{
return (myData != (T *)(((char*)this) + sizeof(*this)));
}
inline bool isHeapBuffer(T* data) const
{
return (data != (T *)(((char*)this) + sizeof(*this)));
}
protected:
// Check whether T may have a constructor, destructor, or copy
// constructor. This test is conservative in that some POD types will
// not be recognized as POD by this function. To mark your type as POD,
// use the SYS_DECLARE_IS_POD() macro in SYS_TypeDecorate.h.
static constexpr SYS_FORCE_INLINE bool isPOD()
{
return std::is_pod::value;
}
/// Implements both append(const T &) and append(T &&) via perfect
/// forwarding. Unlike the variadic emplace_back(), its argument may be a
/// reference to another element in the array.
template
inline exint appendImpl(S &&s);
/// Similar to appendImpl() but for insertion.
template
inline exint insertImpl(S &&s, exint index);
// Construct the given type
template
static void construct(T &dst, S&&... s)
{
new (&dst) T(std::forward(s)...);
}
// Copy construct the given type
static void copyConstruct(T &dst, const T &src)
{
if (isPOD())
dst = src;
else
new (&dst) T(src);
}
static void copyConstructRange(T *dst, const T *src, exint n)
{
if (isPOD())
{
if (n > 0)
{
::memcpy((void *)dst, (const void *)src,
n * sizeof(T));
}
}
else
{
for (exint i = 0; i < n; i++)
new (&dst[i]) T(src[i]);
}
}
/// Element Constructor
static void trivialConstruct(T &dst)
{
if (!isPOD())
new (&dst) T();
else
memset((void *)&dst, 0, sizeof(T));
}
static void trivialConstructRange(T *dst, exint n)
{
if (!isPOD())
{
for (exint i = 0; i < n; i++)
new (&dst[i]) T();
}
else if (n == 1)
{
// Special case for n == 1. If the size parameter
// passed to memset is known at compile time, this
// function call will be inlined. This results in
// much faster performance than a real memset
// function call which is required in the case
// below, where n is not known until runtime.
// This makes calls to append() much faster.
memset((void *)dst, 0, sizeof(T));
}
else
memset((void *)dst, 0, sizeof(T) * n);
}
/// Element Destructor
static void trivialDestruct(T &dst)
{
if (!isPOD())
dst.~T();
}
static void trivialDestructRange(T *dst, exint n)
{
if (!isPOD())
{
for (exint i = 0; i < n; i++)
dst[i].~T();
}
}
private:
/// Pointer to the array of elements of type T
T *myData;
/// The number of elements for which we have allocated memory
exint myCapacity;
/// The actual number of valid elements in the array
exint mySize;
// The guts of the remove() methods.
inline exint removeAt(exint index);
inline T * allocateCapacity(exint num_items);
};
}}
#endif // __UT_ARRAY_H_INCLUDED__
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* This is meant to be included by UT_Array.h and includes
* the template implementations needed by external code.
*/
#pragma once
#ifndef __UT_ARRAYIMPL_H_INCLUDED__
#define __UT_ARRAYIMPL_H_INCLUDED__
#include
#include
#include
#include
namespace igl {
/// @private
namespace FastWindingNumber {
// Implemented in UT_Array.C
extern void ut_ArrayImplFree(void *p);
template
inline UT_Array::UT_Array(const UT_Array &a)
: myCapacity(a.size()), mySize(a.size())
{
if (myCapacity)
{
myData = allocateCapacity(myCapacity);
copyConstructRange(myData, a.array(), mySize);
}
else
{
myData = nullptr;
}
}
template
inline UT_Array::UT_Array(std::initializer_list init)
: myCapacity(init.size()), mySize(init.size())
{
if (myCapacity)
{
myData = allocateCapacity(myCapacity);
copyConstructRange(myData, init.begin(), mySize);
}
else
{
myData = nullptr;
}
}
template
inline UT_Array::UT_Array(UT_Array &&a) noexcept
{
if (!a.isHeapBuffer())
{
myData = nullptr;
myCapacity = 0;
mySize = 0;
operator=(std::move(a));
return;
}
myCapacity = a.myCapacity;
mySize = a.mySize;
myData = a.myData;
a.myCapacity = a.mySize = 0;
a.myData = nullptr;
}
template
inline UT_Array::~UT_Array()
{
// NOTE: We call setCapacity to ensure that we call trivialDestructRange,
// then call free on myData.
setCapacity(0);
}
template
inline T *
UT_Array::allocateCapacity(exint capacity)
{
T *data = (T *)malloc(capacity * sizeof(T));
// Avoid degenerate case if we happen to be aliased the wrong way
if (!isHeapBuffer(data))
{
T *prev = data;
data = (T *)malloc(capacity * sizeof(T));
ut_ArrayImplFree(prev);
}
return data;
}
template
inline void
UT_Array::swap( UT_Array &other )
{
std::swap( myData, other.myData );
std::swap( myCapacity, other.myCapacity );
std::swap( mySize, other.mySize );
}
template
inline exint
UT_Array::insert(exint index)
{
if (index >= mySize)
{
bumpCapacity(index + 1);
trivialConstructRange(myData + mySize, index - mySize + 1);
mySize = index+1;
return index;
}
bumpCapacity(mySize + 1);
UT_IGL_ASSERT_P(index >= 0);
::memmove((void *)&myData[index+1], (void *)&myData[index],
((mySize-index)*sizeof(T)));
trivialConstruct(myData[index]);
mySize++;
return index;
}
template
template
inline exint
UT_Array::appendImpl(S &&s)
{
if (mySize == myCapacity)
{
exint idx = safeIndex(s);
// NOTE: UTbumpAlloc always returns a strictly larger value.
setCapacity(UTbumpAlloc(myCapacity));
if (idx >= 0)
construct(myData[mySize], std::forward(myData[idx]));
else
construct(myData[mySize], std::forward(s));
}
else
{
construct(myData[mySize], std::forward(s));
}
return mySize++;
}
template
template
inline exint
UT_Array::emplace_back(S&&... s)
{
if (mySize == myCapacity)
setCapacity(UTbumpAlloc(myCapacity));
construct(myData[mySize], std::forward(s)...);
return mySize++;
}
template
inline void
UT_Array::append(const T *pt, exint count)
{
bumpCapacity(mySize + count);
copyConstructRange(myData + mySize, pt, count);
mySize += count;
}
template
inline void
UT_Array::appendMultiple(const T &t, exint count)
{
UT_IGL_ASSERT_P(count >= 0);
if (count <= 0)
return;
if (mySize + count >= myCapacity)
{
exint tidx = safeIndex(t);
bumpCapacity(mySize + count);
for (exint i = 0; i < count; i++)
copyConstruct(myData[mySize+i], tidx >= 0 ? myData[tidx] : t);
}
else
{
for (exint i = 0; i < count; i++)
copyConstruct(myData[mySize+i], t);
}
mySize += count;
}
template
inline exint
UT_Array::concat(const UT_Array &a)
{
bumpCapacity(mySize + a.mySize);
copyConstructRange(myData + mySize, a.myData, a.mySize);
mySize += a.mySize;
return mySize;
}
template
inline exint
UT_Array::multipleInsert(exint beg_index, exint count)
{
exint end_index = beg_index + count;
if (beg_index >= mySize)
{
bumpCapacity(end_index);
trivialConstructRange(myData + mySize, end_index - mySize);
mySize = end_index;
return beg_index;
}
bumpCapacity(mySize+count);
::memmove((void *)&myData[end_index], (void *)&myData[beg_index],
((mySize-beg_index)*sizeof(T)));
mySize += count;
trivialConstructRange(myData + beg_index, count);
return beg_index;
}
template
template
inline exint
UT_Array::insertImpl(S &&s, exint index)
{
if (index == mySize)
{
// This case avoids an extraneous call to trivialConstructRange()
// which the compiler may not optimize out.
(void) appendImpl(std::forward(s));
}
else if (index > mySize)
{
exint src_i = safeIndex(s);
bumpCapacity(index + 1);
trivialConstructRange(myData + mySize, index - mySize);
if (src_i >= 0)
construct(myData[index], std::forward(myData[src_i]));
else
construct(myData[index], std::forward(s));
mySize = index + 1;
}
else // (index < mySize)
{
exint src_i = safeIndex(s);
bumpCapacity(mySize + 1);
::memmove((void *)&myData[index+1], (void *)&myData[index],
((mySize-index)*sizeof(T)));
if (src_i >= index)
++src_i;
if (src_i >= 0)
construct(myData[index], std::forward(myData[src_i]));
else
construct(myData[index], std::forward(s));
++mySize;
}
return index;
}
template
inline exint
UT_Array::removeAt(exint idx)
{
trivialDestruct(myData[idx]);
if (idx != --mySize)
{
::memmove((void *)&myData[idx], (void *)&myData[idx+1],
((mySize-idx)*sizeof(T)));
}
return idx;
}
template
inline void
UT_Array::removeRange(exint begin_i, exint end_i)
{
UT_IGL_ASSERT(begin_i <= end_i);
UT_IGL_ASSERT(end_i <= size());
if (end_i < size())
{
trivialDestructRange(myData + begin_i, end_i - begin_i);
::memmove((void *)&myData[begin_i], (void *)&myData[end_i],
(mySize - end_i)*sizeof(T));
}
setSize(mySize - (end_i - begin_i));
}
template
inline void
UT_Array::extractRange(exint begin_i, exint end_i, UT_Array& dest)
{
UT_IGL_ASSERT_P(begin_i >= 0);
UT_IGL_ASSERT_P(begin_i <= end_i);
UT_IGL_ASSERT_P(end_i <= size());
UT_IGL_ASSERT(this != &dest);
exint nelements = end_i - begin_i;
// grow the raw array if necessary.
dest.setCapacityIfNeeded(nelements);
::memmove((void*)dest.myData, (void*)&myData[begin_i],
nelements * sizeof(T));
dest.mySize = nelements;
// we just asserted this was true, but just in case
if (this != &dest)
{
if (end_i < size())
{
::memmove((void*)&myData[begin_i], (void*)&myData[end_i],
(mySize - end_i) * sizeof(T));
}
setSize(mySize - nelements);
}
}
template
inline void
UT_Array::move(exint srcIdx, exint destIdx, exint howMany)
{
// Make sure all the parameters are valid.
if( srcIdx < 0 )
srcIdx = 0;
if( destIdx < 0 )
destIdx = 0;
// If we are told to move a set of elements that would extend beyond the
// end of the current array, trim the group.
if( srcIdx + howMany > size() )
howMany = size() - srcIdx;
// If the destIdx would have us move the source beyond the end of the
// current array, move the destIdx back.
if( destIdx + howMany > size() )
destIdx = size() - howMany;
if( srcIdx != destIdx && howMany > 0 )
{
void **tmp = 0;
exint savelen;
savelen = SYSabs(srcIdx - destIdx);
tmp = (void **)::malloc(savelen*sizeof(T));
if( srcIdx > destIdx && howMany > 0 )
{
// We're moving the group backwards. Save all the stuff that
// we would overwrite, plus everything beyond that to the
// start of the source group. Then move the source group, then
// tack the saved data onto the end of the moved group.
::memcpy(tmp, (void *)&myData[destIdx], (savelen*sizeof(T)));
::memmove((void *)&myData[destIdx], (void *)&myData[srcIdx],
(howMany*sizeof(T)));
::memcpy((void *)&myData[destIdx+howMany], tmp, (savelen*sizeof(T)));
}
if( srcIdx < destIdx && howMany > 0 )
{
// We're moving the group forwards. Save from the end of the
// group being moved to the end of the where the destination
// group will end up. Then copy the source to the destination.
// Then move back up to the original source location and drop
// in our saved data.
::memcpy(tmp, (void *)&myData[srcIdx+howMany], (savelen*sizeof(T)));
::memmove((void *)&myData[destIdx], (void *)&myData[srcIdx],
(howMany*sizeof(T)));
::memcpy((void *)&myData[srcIdx], tmp, (savelen*sizeof(T)));
}
::free(tmp);
}
}
template
template
inline exint
UT_Array::removeIf(IsEqual is_equal)
{
// Move dst to the first element to remove.
exint dst;
for (dst = 0; dst < mySize; dst++)
{
if (is_equal(myData[dst]))
break;
}
// Now start looking at all the elements past the first one to remove.
for (exint idx = dst+1; idx < mySize; idx++)
{
if (!is_equal(myData[idx]))
{
UT_IGL_ASSERT(idx != dst);
myData[dst] = myData[idx];
dst++;
}
// On match, ignore.
}
// New size
mySize = dst;
return mySize;
}
template
inline void
UT_Array::cycle(exint howMany)
{
char *tempPtr;
exint numShift; // The number of items we shift
exint remaining; // mySize - numShift
if (howMany == 0 || mySize < 1) return;
numShift = howMany % (exint)mySize;
if (numShift < 0) numShift += mySize;
remaining = mySize - numShift;
tempPtr = new char[numShift*sizeof(T)];
::memmove(tempPtr, (void *)&myData[remaining], (numShift * sizeof(T)));
::memmove((void *)&myData[numShift], (void *)&myData[0], (remaining * sizeof(T)));
::memmove((void *)&myData[0], tempPtr, (numShift * sizeof(T)));
delete [] tempPtr;
}
template
inline void
UT_Array::constant(const T &value)
{
for (exint i = 0; i < mySize; i++)
{
myData[i] = value;
}
}
template
inline void
UT_Array::zero()
{
if (isPOD())
::memset((void *)myData, 0, mySize*sizeof(T));
else
trivialConstructRange(myData, mySize);
}
template
inline void
UT_Array::setCapacity(exint capacity)
{
// Do nothing when new capacity is the same as the current
if (capacity == myCapacity)
return;
// Special case for non-heap buffers
if (!isHeapBuffer())
{
if (capacity < mySize)
{
// Destroy the extra elements without changing myCapacity
trivialDestructRange(myData + capacity, mySize - capacity);
mySize = capacity;
}
else if (capacity > myCapacity)
{
T *prev = myData;
myData = (T *)malloc(sizeof(T) * capacity);
// myData is safe because we're already a stack buffer
UT_IGL_ASSERT_P(isHeapBuffer());
if (mySize > 0)
memcpy((void *)myData, (void *)prev, sizeof(T) * mySize);
myCapacity = capacity;
}
else
{
// Keep myCapacity unchanged in this case
UT_IGL_ASSERT_P(capacity >= mySize && capacity <= myCapacity);
}
return;
}
if (capacity == 0)
{
if (myData)
{
trivialDestructRange(myData, mySize);
free(myData);
}
myData = 0;
myCapacity = 0;
mySize = 0;
return;
}
if (capacity < mySize)
{
trivialDestructRange(myData + capacity, mySize - capacity);
mySize = capacity;
}
if (myData)
myData = (T *)realloc(myData, capacity*sizeof(T));
else
myData = (T *)malloc(sizeof(T) * capacity);
// Avoid degenerate case if we happen to be aliased the wrong way
if (!isHeapBuffer())
{
T *prev = myData;
myData = (T *)malloc(sizeof(T) * capacity);
if (mySize > 0)
memcpy((void *)myData, (void *)prev, sizeof(T) * mySize);
ut_ArrayImplFree(prev);
}
myCapacity = capacity;
UT_IGL_ASSERT(myData);
}
template
inline UT_Array &
UT_Array::operator=(const UT_Array &a)
{
if (this == &a)
return *this;
// Grow the raw array if necessary.
setCapacityIfNeeded(a.size());
// Make sure destructors and constructors are called on all elements
// being removed/added.
trivialDestructRange(myData, mySize);
copyConstructRange(myData, a.myData, a.size());
mySize = a.size();
return *this;
}
template
inline UT_Array &
UT_Array::operator=(std::initializer_list a)
{
const exint new_size = a.size();
// Grow the raw array if necessary.
setCapacityIfNeeded(new_size);
// Make sure destructors and constructors are called on all elements
// being removed/added.
trivialDestructRange(myData, mySize);
copyConstructRange(myData, a.begin(), new_size);
mySize = new_size;
return *this;
}
template
inline UT_Array &
UT_Array::operator=(UT_Array &&a)
{
if (!a.isHeapBuffer())
{
// Cannot steal from non-heap buffers
clear();
const exint n = a.size();
setCapacityIfNeeded(n);
if (isPOD())
{
if (n > 0)
memcpy(myData, a.myData, n * sizeof(T));
}
else
{
for (exint i = 0; i < n; ++i)
new (&myData[i]) T(std::move(a.myData[i]));
}
mySize = a.mySize;
a.mySize = 0;
return *this;
}
// else, just steal even if we're a small buffer
// Destroy all the elements we're currently holding.
if (myData)
{
trivialDestructRange(myData, mySize);
if (isHeapBuffer())
::free(myData);
}
// Move the contents of the other array to us and empty the other container
// so that it destructs cleanly.
myCapacity = a.myCapacity;
mySize = a.mySize;
myData = a.myData;
a.myCapacity = a.mySize = 0;
a.myData = nullptr;
return *this;
}
template
inline bool
UT_Array::operator==(const UT_Array &a) const
{
if (this == &a) return true;
if (mySize != a.size()) return false;
for (exint i = 0; i < mySize; i++)
if (!(myData[i] == a(i))) return false;
return true;
}
template
inline bool
UT_Array::operator!=(const UT_Array &a) const
{
return (!operator==(a));
}
}}
#endif // __UT_ARRAYIMPL_H_INCLUDED__
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* Special case for arrays that are usually small,
* to avoid a heap allocation when the array really is small.
*/
#pragma once
#ifndef __UT_SMALLARRAY_H_INCLUDED__
#define __UT_SMALLARRAY_H_INCLUDED__
#include
#include
namespace igl {
/// @private
namespace FastWindingNumber {
/// An array class with the small buffer optimization, making it ideal for
/// cases when you know it will only contain a few elements at the expense of
/// increasing the object size by MAX_BYTES (subject to alignment).
template
class UT_SmallArray : public UT_Array
{
// As many elements that fit into MAX_BYTES with 1 item minimum
enum { MAX_ELEMS = MAX_BYTES/sizeof(T) < 1 ? 1 : MAX_BYTES/sizeof(T) };
public:
// gcc falsely warns about our use of offsetof() on non-POD types. We can't
// easily suppress this because it has to be done in the caller at
// instantiation time. Instead, punt to a runtime check instead.
#if defined(__clang__) || defined(_MSC_VER)
#define UT_SMALL_ARRAY_SIZE_IGL_ASSERT() \
using ThisT = UT_SmallArray; \
static_assert(offsetof(ThisT, myBuffer) == sizeof(UT_Array), \
"In order for UT_Array's checks for whether it needs to free the buffer to work, " \
"the buffer must be exactly following the base class memory.")
#else
#define UT_SMALL_ARRAY_SIZE_IGL_ASSERT() \
UT_IGL_ASSERT_P(!UT_Array::isHeapBuffer());
#endif
/// Default construction
UT_SmallArray()
: UT_Array(/*capacity*/0)
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
}
/// Copy constructor
/// @{
explicit UT_SmallArray(const UT_Array ©)
: UT_Array(/*capacity*/0)
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
UT_Array::operator=(copy);
}
explicit UT_SmallArray(const UT_SmallArray ©)
: UT_Array(/*capacity*/0)
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
UT_Array::operator=(copy);
}
/// @}
/// Move constructor
/// @{
UT_SmallArray(UT_Array &&movable) noexcept
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
UT_Array::operator=(std::move(movable));
}
UT_SmallArray(UT_SmallArray &&movable) noexcept
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
UT_Array::operator=(std::move(movable));
}
/// @}
/// Initializer list constructor
explicit UT_SmallArray(std::initializer_list init)
{
UT_Array::unsafeShareData((T*)myBuffer, 0, MAX_ELEMS);
UT_SMALL_ARRAY_SIZE_IGL_ASSERT();
UT_Array::operator=(init);
}
#undef UT_SMALL_ARRAY_SIZE_IGL_ASSERT
/// Assignment operator
/// @{
UT_SmallArray &
operator=(const UT_SmallArray ©)
{
UT_Array::operator=(copy);
return *this;
}
UT_SmallArray &
operator=(const UT_Array ©)
{
UT_Array::operator=(copy);
return *this;
}
/// @}
/// Move operator
/// @{
UT_SmallArray &
operator=(UT_SmallArray &&movable)
{
UT_Array::operator=(std::move(movable));
return *this;
}
UT_SmallArray &
operator=(UT_Array &&movable)
{
UT_Array::operator=(std::move(movable));
return *this;
}
/// @}
UT_SmallArray &
operator=(std::initializer_list src)
{
UT_Array::operator=(src);
return *this;
}
private:
alignas(T) char myBuffer[MAX_ELEMS*sizeof(T)];
};
}}
#endif // __UT_SMALLARRAY_H_INCLUDED__
/*
* Copyright (c) 2018 Side Effects Software Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* COMMENTS:
* A vector class templated on its size and data type.
*/
#pragma once
#ifndef __UT_FixedVector__
#define __UT_FixedVector__
namespace igl {
/// @private
namespace FastWindingNumber {
template
class UT_FixedVector
{
public:
typedef UT_FixedVector ThisType;
typedef T value_type;
typedef T theType;
static const exint theSize = SIZE;
T vec[SIZE];
SYS_FORCE_INLINE UT_FixedVector() = default;
/// Initializes every component to the same value
SYS_FORCE_INLINE explicit UT_FixedVector(T that) noexcept
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = that;
}
SYS_FORCE_INLINE UT_FixedVector(const ThisType &that) = default;
SYS_FORCE_INLINE UT_FixedVector(ThisType &&that) = default;
/// Converts vector of S into vector of T,
/// or just copies if same type.
template
SYS_FORCE_INLINE UT_FixedVector(const UT_FixedVector &that) noexcept
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = that[i];
}
template
SYS_FORCE_INLINE UT_FixedVector(const S that[SIZE]) noexcept
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = that[i];
}
SYS_FORCE_INLINE const T &operator[](exint i) const noexcept
{
UT_IGL_ASSERT_P(i >= 0 && i < SIZE);
return vec[i];
}
SYS_FORCE_INLINE T &operator[](exint i) noexcept
{
UT_IGL_ASSERT_P(i >= 0 && i < SIZE);
return vec[i];
}
SYS_FORCE_INLINE constexpr const T *data() const noexcept
{
return vec;
}
SYS_FORCE_INLINE T *data() noexcept
{
return vec;
}
SYS_FORCE_INLINE ThisType &operator=(const ThisType &that) = default;
SYS_FORCE_INLINE ThisType &operator=(ThisType &&that) = default;
template
SYS_FORCE_INLINE ThisType &operator=(const UT_FixedVector &that) noexcept
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = that[i];
return *this;
}
SYS_FORCE_INLINE const ThisType &operator=(T that) noexcept
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = that;
return *this;
}
template
SYS_FORCE_INLINE void operator+=(const UT_FixedVector &that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] += that[i];
}
SYS_FORCE_INLINE void operator+=(T that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] += that;
}
template
SYS_FORCE_INLINE auto operator+(const UT_FixedVector &that) const -> UT_FixedVector
{
using Type = decltype(vec[0]+that[0]);
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] + that[i];
return result;
}
template
SYS_FORCE_INLINE void operator-=(const UT_FixedVector &that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] -= that[i];
}
SYS_FORCE_INLINE void operator-=(T that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] -= that;
}
template
SYS_FORCE_INLINE auto operator-(const UT_FixedVector &that) const -> UT_FixedVector
{
using Type = decltype(vec[0]-that[0]);
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] - that[i];
return result;
}
template
SYS_FORCE_INLINE void operator*=(const UT_FixedVector &that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] *= that[i];
}
template
SYS_FORCE_INLINE auto operator*(const UT_FixedVector &that) const -> UT_FixedVector
{
using Type = decltype(vec[0]*that[0]);
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] * that[i];
return result;
}
SYS_FORCE_INLINE void operator*=(T that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] *= that;
}
SYS_FORCE_INLINE UT_FixedVector operator*(T that) const
{
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] * that;
return result;
}
template
SYS_FORCE_INLINE void operator/=(const UT_FixedVector &that)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] /= that[i];
}
template
SYS_FORCE_INLINE auto operator/(const UT_FixedVector &that) const -> UT_FixedVector
{
using Type = decltype(vec[0]/that[0]);
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] / that[i];
return result;
}
SYS_FORCE_INLINE void operator/=(T that)
{
if (std::is_integral::value)
{
for (exint i = 0; i < SIZE; ++i)
vec[i] /= that;
}
else
{
that = 1/that;
for (exint i = 0; i < SIZE; ++i)
vec[i] *= that;
}
}
SYS_FORCE_INLINE UT_FixedVector operator/(T that) const
{
UT_FixedVector result;
if (std::is_integral::value)
{
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] / that;
}
else
{
that = 1/that;
for (exint i = 0; i < SIZE; ++i)
result[i] = vec[i] * that;
}
return result;
}
SYS_FORCE_INLINE void negate()
{
for (exint i = 0; i < SIZE; ++i)
vec[i] = -vec[i];
}
SYS_FORCE_INLINE UT_FixedVector operator-() const
{
UT_FixedVector result;
for (exint i = 0; i < SIZE; ++i)
result[i] = -vec[i];
return result;
}
template
SYS_FORCE_INLINE bool operator==(const UT_FixedVector &that) const noexcept
{
for (exint i = 0; i < SIZE; ++i)
{
if (vec[i] != T(that[i]))
return false;
}
return true;
}
template
SYS_FORCE_INLINE bool operator!=(const UT_FixedVector &that) const noexcept
{
return !(*this==that);
}
SYS_FORCE_INLINE bool isZero() const noexcept
{
for (exint i = 0; i < SIZE; ++i)
{
if (vec[i] != T(0))
return false;
}
return true;
}
SYS_FORCE_INLINE T maxComponent() const
{
T v = vec[0];
for (exint i = 1; i < SIZE; ++i)
v = (vec[i] > v) ? vec[i] : v;
return v;
}
SYS_FORCE_INLINE T minComponent() const
{
T v = vec[0];
for (exint i = 1; i < SIZE; ++i)
v = (vec[i] < v) ? vec[i] : v;
return v;
}
SYS_FORCE_INLINE T avgComponent() const
{
T v = vec[0];
for (exint i = 1; i < SIZE; ++i)
v += vec[i];
return v / SIZE;
}
SYS_FORCE_INLINE T length2() const noexcept
{
T a0(vec[0]);
T result(a0*a0);
for (exint i = 1; i < SIZE; ++i)
{
T ai(vec[i]);
result += ai*ai;
}
return result;
}
SYS_FORCE_INLINE T length() const
{
T len2 = length2();
return SYSsqrt(len2);
}
template
SYS_FORCE_INLINE auto dot(const UT_FixedVector &that) const -> decltype(vec[0]*that[0])
{
using TheType = decltype(vec[0]*that.vec[0]);
TheType result(vec[0]*that[0]);
for (exint i = 1; i < SIZE; ++i)
result += vec[i]*that[i];
return result;
}
template