Mesh Render.cpp 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /******************************************************************************/
  2. #include "stdafx.h"
  3. #define VAO_EXCLUSIVE HAS_THREADS // if VAO's can be processed only on the main thread - https://www.khronos.org/opengl/wiki/Vertex_Specification#Vertex_Array_Object "Note: VAOs cannot be shared between OpenGL contexts"
  4. /******************************************************************************/
  5. namespace EE{
  6. #if GL && VAO_EXCLUSIVE
  7. static Memc<UInt> VAOs; // list of released VAO's !! must be handled only under D._lock !! we could optionally 'glDeleteVertexArrays' them at app shut down but it's not needed
  8. #endif
  9. /******************************************************************************/
  10. Int MeshRender::BoneSplit::realToSplit0(Int bone)C {return Max(0, realToSplit(bone));}
  11. Int MeshRender::BoneSplit::realToSplit (Int bone)C
  12. {
  13. REP(bones)if(split_to_real[i]==bone)return i;
  14. return -1;
  15. }
  16. /******************************************************************************/
  17. T1(TYPE) static INLINE void Set(Byte *&v, Int i, C TYPE *t) {if(t){*(TYPE*)v=t[i]; v+=SIZE(TYPE);}}
  18. T1(TYPE) static INLINE void Set(Byte *&v, C TYPE &t) { *(TYPE*)v=t ; v+=SIZE(TYPE); }
  19. /******************************************************************************/
  20. void MeshRender::zero()
  21. {
  22. #if GL && VAO_EXCLUSIVE
  23. _vao_reset=false;
  24. #endif
  25. _storage=0;
  26. _tris=_bone_splits=0;
  27. _flag=0;
  28. _bone_split=null;
  29. _vf=null;
  30. }
  31. MeshRender::MeshRender( ) {zero();}
  32. MeshRender::MeshRender(C MeshRender &src) : MeshRender() {T=src;}
  33. /******************************************************************************/
  34. Int MeshRender::vtxOfs(UInt elm)C
  35. {
  36. Int ofs=0;
  37. if(storageCompress())
  38. {
  39. if(_flag&VTX_POS ){if(elm&VTX_POS )return ofs; ofs+=SIZE(Vec );}
  40. if(_flag&VTX_NRM ){if(elm&VTX_NRM )return ofs; ofs+=SIZE(VecB4);}
  41. if(_flag&VTX_TAN_BIN ){if(elm&VTX_TAN_BIN )return ofs; ofs+=SIZE(VecB4);} // in compressed mode Tan and Bin are merged together
  42. if(_flag&VTX_HLP ){if(elm&VTX_HLP )return ofs; ofs+=SIZE(Vec );}
  43. if(_flag&VTX_TEX0 ){if(elm&VTX_TEX0 )return ofs; ofs+=SIZE(Vec2 );}
  44. if(_flag&VTX_TEX1 ){if(elm&VTX_TEX1 )return ofs; ofs+=SIZE(Vec2 );}
  45. if(_flag&VTX_TEX2 ){if(elm&VTX_TEX2 )return ofs; ofs+=SIZE(Vec2 );}
  46. if(_flag&VTX_MATRIX ){if(elm&VTX_MATRIX )return ofs; ofs+=SIZE(VecB4);}
  47. if(_flag&VTX_BLEND ){if(elm&VTX_BLEND )return ofs; ofs+=SIZE(VecB4);}
  48. if(_flag&VTX_SIZE ){if(elm&VTX_SIZE )return ofs; ofs+=SIZE(Flt );}
  49. if(_flag&VTX_MATERIAL){if(elm&VTX_MATERIAL)return ofs; ofs+=SIZE(VecB4);}
  50. if(_flag&VTX_COLOR ){if(elm&VTX_COLOR )return ofs; ofs+=SIZE(VecB4);}
  51. }else
  52. {
  53. if(_flag&VTX_POS ){if(elm&VTX_POS )return ofs; ofs+=SIZE(Vec );}
  54. if(_flag&VTX_NRM ){if(elm&VTX_NRM )return ofs; ofs+=SIZE(Vec );}
  55. if(_flag&VTX_TAN ){if(elm&VTX_TAN )return ofs; ofs+=SIZE(Vec );}
  56. if(_flag&VTX_BIN ){if(elm&VTX_BIN )return ofs; ofs+=SIZE(Vec );}
  57. if(_flag&VTX_HLP ){if(elm&VTX_HLP )return ofs; ofs+=SIZE(Vec );}
  58. if(_flag&VTX_TEX0 ){if(elm&VTX_TEX0 )return ofs; ofs+=SIZE(Vec2 );}
  59. if(_flag&VTX_TEX1 ){if(elm&VTX_TEX1 )return ofs; ofs+=SIZE(Vec2 );}
  60. if(_flag&VTX_TEX2 ){if(elm&VTX_TEX2 )return ofs; ofs+=SIZE(Vec2 );}
  61. if(_flag&VTX_MATRIX ){if(elm&VTX_MATRIX )return ofs; ofs+=SIZE(VecB4);}
  62. if(_flag&VTX_BLEND ){if(elm&VTX_BLEND )return ofs; ofs+=SIZE(VecB4);}
  63. if(_flag&VTX_SIZE ){if(elm&VTX_SIZE )return ofs; ofs+=SIZE(Flt );}
  64. if(_flag&VTX_MATERIAL){if(elm&VTX_MATERIAL)return ofs; ofs+=SIZE(VecB4);}
  65. if(_flag&VTX_COLOR ){if(elm&VTX_COLOR )return ofs; ofs+=SIZE(VecB4);}
  66. }
  67. return -1;
  68. }
  69. /******************************************************************************/
  70. MeshRender& MeshRender::del()
  71. {
  72. #if GL
  73. if(D.notShaderModelGLES2() && _vao) // delete VAO
  74. {
  75. SafeSyncLocker lock(D._lock);
  76. if(_vao)
  77. {
  78. if(D.created())
  79. {
  80. #if VAO_EXCLUSIVE
  81. if(!App.mainThread())VAOs.add(_vao);else // if this is not the main thread, then store it in container for future re-use
  82. #endif
  83. glDeleteVertexArrays(1, &_vao); // we can delete it
  84. }
  85. _vao=0; // clear while in lock
  86. }
  87. }
  88. #endif
  89. _vb.del();
  90. _ib.del();
  91. Free(_bone_split);
  92. zero(); return T;
  93. }
  94. Bool MeshRender::setVF()
  95. {
  96. #if GL
  97. if(D.notShaderModelGLES2()) // create VAO
  98. {
  99. if(D.created())
  100. {
  101. #if VAO_EXCLUSIVE
  102. if(!App.mainThread())_vao_reset=true;else // if this is not the main thread, then we have to reset it later
  103. #endif
  104. {
  105. VtxFormatGL *temp=VtxFormats(VtxFormatKey(_flag, storageCompress() ? VTX_COMPRESS_NRM_TAN_BIN : 0))->vf; if(!temp)return false;
  106. if(!_vao)
  107. {
  108. SyncLocker lock(D._lock);
  109. #if VAO_EXCLUSIVE
  110. if(VAOs.elms())_vao=VAOs.pop();else // re-use if we have any
  111. #endif
  112. {
  113. glGenVertexArrays(1, &_vao); // create new one
  114. if(!_vao)return false;
  115. }
  116. }
  117. glBindVertexArray(_vao);
  118. _vb.set(); // these must be set after 'glBindVertexArray' and before 'enableSet'
  119. _ib.set(); // these must be set after 'glBindVertexArray' and before 'enableSet'
  120. REP(GL_VTX_NUM)glDisableVertexAttribArray(i); // first disable all, in case this VAO was set with other data before
  121. temp->enableSet(); // enable and set new data with VB and IB already set
  122. #if VAO_EXCLUSIVE
  123. _vao_reset=false; // we've just set it now, so clear reset
  124. #else
  125. glFlush(); // to make sure that the data was initialized, in case it'll be accessed on a secondary thread, no need to flush on exclusive mode, because there all VAO's are used only on the main thread
  126. #endif
  127. #if 0 // !! Don't do this, instead every time we want to bind some IB we use 'BindIndexBuffer' which disables VAO, also this method 'setVF' requires to have VAO already bound at the end so we don't have to bind it again in 'MeshRender.set' !!
  128. glBindVertexArray(0); // disable VAO so binding IB will not modify this VAO
  129. #endif
  130. }
  131. return true; // return success in both cases (!mainThread=reset later, and mainThread=created VAO)
  132. }
  133. }else
  134. #endif
  135. {
  136. _vf=VtxFormats(VtxFormatKey(_flag, storageCompress() ? VTX_COMPRESS_NRM_TAN_BIN : 0))->vf;
  137. }
  138. return _vf!=null;
  139. }
  140. Bool MeshRender::create(Int vtxs, Int tris, UInt flag, Bool compress)
  141. { // avoid deleting at the start, instead, check if some members already match
  142. UInt compress_flag=(compress ? VTX_COMPRESS_NRM_TAN_BIN : 0);
  143. Bool same_format=(flag==T.flag() && compress==storageCompress()); // !! this must check for all parameters which are passed into 'VtxFormatKey' !!
  144. if((same_format && _vb.vtxs()==vtxs && !_vb._lock_mode) || _vb.create(vtxs , flag , compress_flag)) // do a separate check for '_vb' because its faster than 'create' method which may do some more checks for vtx size
  145. if( _ib.create(tris*3, vtxs<=0x10000 ))
  146. {
  147. T._storage=(compress ? MSHR_COMPRESS : 0)|(D.meshStorageSigned() ? MSHR_SIGNED : 0)|(D.meshBoneSplit() ? MSHR_BONE_SPLIT : 0);
  148. T._tris =tris;
  149. T._flag =flag;
  150. Free(_bone_split); _bone_splits=0;
  151. if(GL && D.notShaderModelGLES2())return setVF(); // set VAO
  152. return same_format || setVF(); // skip setting VF if we already have same format
  153. }
  154. del(); return false;
  155. }
  156. Bool MeshRender::createRaw(C MeshBase &src, UInt flag_and, Bool optimize, Bool compress)
  157. {
  158. if(create(src.vtxs(), src.trisTotal(), src.flag()&flag_and&VTX_MSHR, compress))
  159. {
  160. if(Byte *v=vtxLock(LOCK_WRITE))
  161. {
  162. if(storageCompress())
  163. {
  164. if((flag()&(VTX_MSHR&~VTX_MATERIAL))==(VTX_POS|VTX_NRM|VTX_COLOR)) // optimized version for heightmaps (most heightmaps will have VTX_COLOR due to ambient occlusion) it's included because heightmaps can be created at runtime
  165. {
  166. C Vec *vtx_pos = src.vtx.pos ();
  167. C Vec *vtx_nrm = src.vtx.nrm ();
  168. C VecB4 *vtx_material=((flag()&VTX_MATERIAL) ? src.vtx.material() : null);
  169. C Color *vtx_color = src.vtx.color ();
  170. VecB4 (*NrmToByte4)(C Vec &v)=(storageSigned() ? NrmToSByte4 : NrmToUByte4);
  171. if(vtx_material)REPA(src.vtx)
  172. {
  173. Set(v, *vtx_pos ++ );
  174. Set(v, NrmToByte4(*vtx_nrm ++));
  175. Set(v, *vtx_material++ );
  176. Set(v, *vtx_color ++ );
  177. }else REPA(src.vtx)
  178. {
  179. Set(v, *vtx_pos ++ );
  180. Set(v, NrmToByte4(*vtx_nrm ++));
  181. Set(v, *vtx_color++ );
  182. }
  183. }else
  184. {
  185. C Vec *vtx_pos =((flag()&VTX_POS ) ? src.vtx.pos () : null);
  186. C Vec *vtx_nrm =((flag()&VTX_NRM ) ? src.vtx.nrm () : null);
  187. C Vec *vtx_tan =((flag()&VTX_TAN_BIN ) ? src.vtx.tan () : null);
  188. C Vec *vtx_bin =((flag()&VTX_TAN_BIN ) ? src.vtx.bin () : null);
  189. C Vec *vtx_hlp =((flag()&VTX_HLP ) ? src.vtx.hlp () : null);
  190. C Vec2 *vtx_tex0 =((flag()&VTX_TEX0 ) ? src.vtx.tex0 () : null);
  191. C Vec2 *vtx_tex1 =((flag()&VTX_TEX1 ) ? src.vtx.tex1 () : null);
  192. C Vec2 *vtx_tex2 =((flag()&VTX_TEX2 ) ? src.vtx.tex2 () : null);
  193. C VecB4 *vtx_matrix =((flag()&VTX_MATRIX ) ? src.vtx.matrix () : null);
  194. C VecB4 *vtx_blend =((flag()&VTX_BLEND ) ? src.vtx.blend () : null);
  195. C Flt *vtx_size =((flag()&VTX_SIZE ) ? src.vtx.size () : null);
  196. C VecB4 *vtx_material=((flag()&VTX_MATERIAL) ? src.vtx.material() : null);
  197. C Color *vtx_color =((flag()&VTX_COLOR ) ? src.vtx.color () : null);
  198. FREPA(src.vtx)
  199. {
  200. Set(v, i, vtx_pos);
  201. if(storageSigned())
  202. {
  203. if(vtx_nrm )Set(v, NrmToSByte4(vtx_nrm[i]));
  204. if(vtx_tan || vtx_bin)Set(v, TBNToSByte4(vtx_tan ? &vtx_tan[i] : null, vtx_bin ? &vtx_bin[i] : null, vtx_nrm ? &vtx_nrm[i] : null));
  205. }else
  206. {
  207. if(vtx_nrm )Set(v, NrmToUByte4(vtx_nrm[i]));
  208. if(vtx_tan || vtx_bin)Set(v, TBNToUByte4(vtx_tan ? &vtx_tan[i] : null, vtx_bin ? &vtx_bin[i] : null, vtx_nrm ? &vtx_nrm[i] : null));
  209. }
  210. Set(v, i, vtx_hlp );
  211. Set(v, i, vtx_tex0 );
  212. Set(v, i, vtx_tex1 );
  213. Set(v, i, vtx_tex2 );
  214. Set(v, i, vtx_matrix );
  215. Set(v, i, vtx_blend );
  216. Set(v, i, vtx_size );
  217. Set(v, i, vtx_material);
  218. Set(v, i, vtx_color );
  219. }
  220. }
  221. }else
  222. {
  223. C Vec *vtx_pos =((flag()&VTX_POS ) ? src.vtx.pos () : null);
  224. C Vec *vtx_nrm =((flag()&VTX_NRM ) ? src.vtx.nrm () : null);
  225. C Vec *vtx_tan =((flag()&VTX_TAN ) ? src.vtx.tan () : null);
  226. C Vec *vtx_bin =((flag()&VTX_BIN ) ? src.vtx.bin () : null);
  227. C Vec *vtx_hlp =((flag()&VTX_HLP ) ? src.vtx.hlp () : null);
  228. C Vec2 *vtx_tex0 =((flag()&VTX_TEX0 ) ? src.vtx.tex0 () : null);
  229. C Vec2 *vtx_tex1 =((flag()&VTX_TEX1 ) ? src.vtx.tex1 () : null);
  230. C Vec2 *vtx_tex2 =((flag()&VTX_TEX2 ) ? src.vtx.tex2 () : null);
  231. C VecB4 *vtx_matrix =((flag()&VTX_MATRIX ) ? src.vtx.matrix () : null);
  232. C VecB4 *vtx_blend =((flag()&VTX_BLEND ) ? src.vtx.blend () : null);
  233. C Flt *vtx_size =((flag()&VTX_SIZE ) ? src.vtx.size () : null);
  234. C VecB4 *vtx_material=((flag()&VTX_MATERIAL) ? src.vtx.material() : null);
  235. C Color *vtx_color =((flag()&VTX_COLOR ) ? src.vtx.color () : null);
  236. FREPA(src.vtx)
  237. {
  238. Set(v, i, vtx_pos );
  239. Set(v, i, vtx_nrm );
  240. Set(v, i, vtx_tan );
  241. Set(v, i, vtx_bin );
  242. Set(v, i, vtx_hlp );
  243. Set(v, i, vtx_tex0 );
  244. Set(v, i, vtx_tex1 );
  245. Set(v, i, vtx_tex2 );
  246. Set(v, i, vtx_matrix );
  247. Set(v, i, vtx_blend );
  248. Set(v, i, vtx_size );
  249. Set(v, i, vtx_material);
  250. Set(v, i, vtx_color );
  251. }
  252. }
  253. vtxUnlock();
  254. }
  255. if(Ptr ind=indLock(LOCK_WRITE))
  256. {
  257. SetFaceIndex(ind, src.tri.ind(), src.tris(), src.quad.ind(), src.quads(), _ib.bit16());
  258. indUnlock();
  259. }
  260. if(optimize)T.optimize();
  261. return true;
  262. }
  263. return false;
  264. }
  265. struct SplitPart
  266. {
  267. Int matrixes, temps;
  268. Bool matrix_used[256];
  269. Byte temp_matrix[4*4]; // max 4 quad_verts * 4 matrixes_per_vert
  270. void addTemp(Byte matrix)
  271. {
  272. REP(temps)if(temp_matrix[i]==matrix)return; // if already added then don't add anymore
  273. if(!InRange(temps, temp_matrix))Exit("SplitPart.addTemp"); // shouldn't happen
  274. temp_matrix[temps++]=matrix; // add to helper array
  275. }
  276. Bool canFit(VecB4 *matrix, VecB4 *weight, Int elms)
  277. {
  278. temps=0; // set helper number to zero
  279. REP(elms)
  280. {
  281. VecB4 m=matrix[i], w=weight[i];
  282. if(!matrix_used[m.x] && w.x)addTemp(m.x);
  283. if(!matrix_used[m.y] && w.y)addTemp(m.y);
  284. if(!matrix_used[m.z] && w.z)addTemp(m.z);
  285. if(!matrix_used[m.w] && w.w)addTemp(m.w);
  286. }
  287. return (matrixes+temps)<=MAX_MATRIX_HWMIN; // if amount of used matrixes along with new to be added is in range of supported matrixes by the gpu
  288. }
  289. void add(VecB4 *matrix, VecB4 *weight, Int elms)
  290. {
  291. REP(elms)
  292. {
  293. VecB4 m=matrix[i], w=weight[i];
  294. if(!matrix_used[m.x] && w.x){matrix_used[m.x]=true; matrixes++;}
  295. if(!matrix_used[m.y] && w.y){matrix_used[m.y]=true; matrixes++;}
  296. if(!matrix_used[m.z] && w.z){matrix_used[m.z]=true; matrixes++;}
  297. if(!matrix_used[m.w] && w.w){matrix_used[m.w]=true; matrixes++;}
  298. }
  299. }
  300. SplitPart() {matrixes=0; Zero(matrix_used);}
  301. };
  302. Bool MeshRender::create(C MeshBase &src, UInt flag_and, Bool optimize, Bool compress)
  303. {
  304. if((flag_and&VTX_MATRIX) && src.vtx.matrix())REPA(src.vtx)
  305. {
  306. C VecB4 &m=src.vtx.matrix(i);
  307. if(m.x>=MAX_MATRIX_HWMIN // we exceed the limit of available matrixes, so we need to create in parts
  308. || m.y>=MAX_MATRIX_HWMIN // MAX_MATRIX_HWMIN must be checked instead of MAX_MATRIX_HW because we're preparing splits for all platforms (in affected platforms we would then just adjust vertex matrixes instead of recalculating the splits)
  309. || m.z>=MAX_MATRIX_HWMIN
  310. || m.w>=MAX_MATRIX_HWMIN)
  311. {
  312. Memb<SplitPart> splits;
  313. Memc<MeshBase > mshbs ;
  314. Mems<Int> tri_split; tri_split.setNum(src.tris ()); Memt<Bool> tri_is; tri_is.setNum(src.tris ());
  315. Mems<Int> quad_split; quad_split.setNum(src.quads()); Memt<Bool> quad_is; quad_is.setNum(src.quads());
  316. // set split index for each triangle and quad
  317. VecB4 weight[4]; // 3 vtxs in a tri and 4 vtxs in a quad
  318. REPAO(weight).set(255, 255, 255, 255); // assume that all are used
  319. FREPA(src.tri) // add in original order
  320. {
  321. C VecI &ind = src.tri.ind(i);
  322. VecB4 matrix[]={src.vtx.matrix(ind.x), src.vtx.matrix(ind.y), src.vtx.matrix(ind.z)};
  323. if(src.vtx.blend())REPA(ind)weight[i]=src.vtx.blend(ind.c[i]);
  324. Int s=0; for(; s<splits.elms(); s++)if(splits[s].canFit(matrix, weight, Elms(matrix)))break; if(!InRange(s, splits))s=splits.addNum(1);
  325. splits[s].add(matrix, weight, Elms(matrix));
  326. tri_split[i]=s;
  327. }
  328. FREPA(src.quad) // add in original order
  329. {
  330. C VecI4 &ind = src.quad.ind(i);
  331. VecB4 matrix[]={src.vtx.matrix(ind.x), src.vtx.matrix(ind.y), src.vtx.matrix(ind.z), src.vtx.matrix(ind.w)};
  332. if(src.vtx.blend())REPA(ind)weight[i]=src.vtx.blend(ind.c[i]);
  333. Int s=0; for(; s<splits.elms(); s++)if(splits[s].canFit(matrix, weight, Elms(matrix)))break; if(!InRange(s, splits))s=splits.addNum(1);
  334. splits[s].add(matrix, weight, Elms(matrix));
  335. quad_split[i]=s;
  336. }
  337. // create mesh and splits
  338. BoneSplit *bone_splits=AllocZero<BoneSplit>(splits.elms()); // AllocZero to zero all maps
  339. const Bool bone_split =D.meshBoneSplit();
  340. FREPAD(s, splits)
  341. {
  342. // copy mesh
  343. MeshBase &mshb=mshbs.New();
  344. REPA(src.tri ) tri_is[i]=( tri_split[i]==s);
  345. REPA(src.quad)quad_is[i]=(quad_split[i]==s);
  346. src.copyFace(mshb, null, tri_is, quad_is, flag_and);
  347. mshb.quadToTri(); // we need to call this at this stage, so triangle indexes will be correct for creating 1 mesh from split meshes
  348. // set split
  349. SplitPart &split= splits[s];
  350. BoneSplit &bs =bone_splits[s];
  351. Byte real_to_split[256]; Zero(real_to_split);
  352. bs.vtxs =mshb.vtxs();
  353. bs.tris =mshb.tris();
  354. bs.bones=0;
  355. FREPA(split.matrix_used)if(split.matrix_used[i])
  356. {
  357. real_to_split[i]=bs.bones;
  358. bs.split_to_real [bs.bones]=i;
  359. bs.bones++;
  360. }
  361. // remap vertex matrixes
  362. if(bone_split)REPA(mshb.vtx)
  363. {
  364. VecB4 &matrix=mshb.vtx.matrix(i);
  365. matrix.x=real_to_split[matrix.x];
  366. matrix.y=real_to_split[matrix.y];
  367. matrix.z=real_to_split[matrix.z];
  368. matrix.w=real_to_split[matrix.w];
  369. }
  370. }
  371. MeshBase temp; temp.create(mshbs.data(), mshbs.elms());
  372. if(createRaw(temp, ~0, false, compress)) // don't optimize yet, wait until splits are set
  373. {
  374. Free(T._bone_split)=bone_splits; T._bone_splits=splits.elms(); // remove old splits and replace them with new ones
  375. if(optimize)T.optimize();
  376. return true;
  377. }
  378. // free
  379. Free(bone_splits);
  380. return false;
  381. }
  382. }
  383. return createRaw(src, flag_and, optimize, compress);
  384. }
  385. Bool MeshRender::create(C MeshRender *src[], Int elms, UInt flag_and, Bool optimize, Bool compress)
  386. {
  387. // check for bone splits
  388. REP(elms)if(C MeshRender *mesh=src[i])if(mesh->_bone_splits) // if any of the meshes have bone splits
  389. { // we need to convert to MeshBase first
  390. Memt<MeshBase> base; base.setNum(elms);
  391. REPA(base)if(C MeshRender *mesh=src[i])base[i].create(*mesh, flag_and);
  392. MeshBase all; all.create(base.data(), base.elms());
  393. return create(all, flag_and, optimize, compress);
  394. }
  395. // do fast merge
  396. Int vtxs=0, tris=0;
  397. UInt flag_all=0;
  398. REP(elms)if(C MeshRender *mesh=src[i])
  399. {
  400. vtxs+=mesh->vtxs();
  401. tris+=mesh->tris();
  402. flag_all|=mesh->flag();
  403. }
  404. Bool ok=true;
  405. MeshRender temp; if(temp.create(vtxs, tris, flag_all&flag_and, compress)) // operate on 'temp' in case 'this' belongs to one of 'src' meshes
  406. {
  407. // vertexes
  408. if(Byte *v=temp.vtxLock(LOCK_WRITE))
  409. {
  410. FREP(elms)if(C MeshRender *mesh=src[i])
  411. {
  412. if(C Byte *src=mesh->vtxLockRead())
  413. {
  414. if(temp.flag()==mesh->flag() && temp.vtxSize()==mesh->vtxSize() && temp.storageCompress()==mesh->storageCompress())
  415. {
  416. Int size=mesh->vtxs()*mesh->vtxSize();
  417. CopyFast(v, src, size);
  418. v+=size;
  419. }else
  420. {
  421. Int vtx_pos =mesh->vtxOfs(VTX_POS ),
  422. vtx_nrm =mesh->vtxOfs(VTX_NRM ),
  423. vtx_tan =mesh->vtxOfs(VTX_TAN ),
  424. vtx_bin =mesh->vtxOfs(VTX_BIN ),
  425. vtx_hlp =mesh->vtxOfs(VTX_HLP ),
  426. vtx_tex0 =mesh->vtxOfs(VTX_TEX0 ),
  427. vtx_tex1 =mesh->vtxOfs(VTX_TEX1 ),
  428. vtx_tex2 =mesh->vtxOfs(VTX_TEX2 ),
  429. vtx_matrix =mesh->vtxOfs(VTX_MATRIX ),
  430. vtx_blend =mesh->vtxOfs(VTX_BLEND ),
  431. vtx_size =mesh->vtxOfs(VTX_SIZE ),
  432. vtx_material=mesh->vtxOfs(VTX_MATERIAL),
  433. vtx_color =mesh->vtxOfs(VTX_COLOR );
  434. REP(mesh->vtxs())
  435. {
  436. if(temp.flag()&VTX_POS)if(vtx_pos>=0)Set(v, *(Vec*)(src+vtx_pos));else Set(v, VecZero);
  437. if(temp.flag()&VTX_NRM)
  438. {
  439. if(temp.storageCompress())
  440. {
  441. if(vtx_nrm>=0)if(mesh->storageCompress())Set(v, *(VecB4*)(src+vtx_nrm));else Set(v, (temp.storageSigned() ? NrmToSByte4 : NrmToUByte4)(*(Vec*)(src+vtx_nrm)));else Set(v, VecB4(temp.storageSigned() ? 0 : 128));
  442. }else
  443. {
  444. if(vtx_nrm>=0)if(mesh->storageCompress())Set(v, (mesh->storageSigned() ? SByte4ToNrm : UByte4ToNrm)(*(VecB4*)(src+vtx_nrm)));else Set(v, *(Vec*)(src+vtx_nrm));else Set(v, VecZero);
  445. }
  446. }
  447. if(temp.flag()&VTX_TAN_BIN)
  448. {
  449. if(temp.storageCompress()) // set as 1 packed VecB4 TanBin
  450. {
  451. if(vtx_tan>=0)
  452. {
  453. if(mesh->storageCompress())Set(v, *(VecB4*)(src+vtx_tan));else
  454. {
  455. Set(v, (temp.storageSigned() ? TBNToSByte4 : TBNToUByte4)((vtx_tan>=0) ? (Vec*)(src+vtx_tan) : null, (vtx_bin>=0) ? (Vec*)(src+vtx_bin) : null, (vtx_nrm>=0) ? (Vec*)(src+vtx_nrm) : null));
  456. }
  457. }else Set(v, VecB4(temp.storageSigned() ? 0 : 128));
  458. }else
  459. {
  460. if(temp.flag()&VTX_TAN)
  461. {
  462. if(vtx_tan>=0)
  463. {
  464. if(!mesh->storageCompress())Set(v, *(Vec*)(src+vtx_tan));else
  465. {
  466. Set(v, (mesh->storageSigned() ? SByte4ToNrm : UByte4ToNrm)(*(VecB4*)(src+vtx_tan)));
  467. }
  468. }else Set(v, VecZero);
  469. }
  470. if(temp.flag()&VTX_BIN)
  471. {
  472. if(vtx_bin>=0)
  473. {
  474. if(!mesh->storageCompress())Set(v, *(Vec*)(src+vtx_bin));else
  475. {
  476. Vec bin;
  477. (mesh->storageSigned() ? SByte4ToTan : UByte4ToTan) (*(VecB4*)(src+vtx_bin), null, &bin, (vtx_nrm>=0) ? &(mesh->storageSigned() ? SByte4ToNrm : UByte4ToNrm)(*(VecB4*)(src+vtx_nrm)) : null);
  478. Set(v, bin);
  479. }
  480. }else Set(v, VecZero);
  481. }
  482. }
  483. }
  484. if(temp.flag()&VTX_HLP )if(vtx_hlp >=0)Set(v, *(Vec *)(src+vtx_hlp ));else Set(v, VecZero);
  485. if(temp.flag()&VTX_TEX0 )if(vtx_tex0 >=0)Set(v, *(Vec2 *)(src+vtx_tex0 ));else Set(v, Vec2(0));
  486. if(temp.flag()&VTX_TEX1 )if(vtx_tex1 >=0)Set(v, *(Vec2 *)(src+vtx_tex1 ));else Set(v, Vec2(0));
  487. if(temp.flag()&VTX_TEX2 )if(vtx_tex2 >=0)Set(v, *(Vec2 *)(src+vtx_tex2 ));else Set(v, Vec2(0));
  488. if(temp.flag()&VTX_MATRIX )if(vtx_matrix >=0)Set(v, *(VecB4*)(src+vtx_matrix ));else Set(v, VecB4( 0, 0, 0, 0));
  489. if(temp.flag()&VTX_BLEND )if(vtx_blend >=0)Set(v, *(VecB4*)(src+vtx_blend ));else Set(v, VecB4(255, 0, 0, 0));
  490. if(temp.flag()&VTX_SIZE )if(vtx_size >=0)Set(v, *(Flt *)(src+vtx_size ));else Set(v, Flt(0));
  491. if(temp.flag()&VTX_MATERIAL)if(vtx_material>=0)Set(v, *(VecB4*)(src+vtx_material));else Set(v, VecB4(255, 0, 0, 0));
  492. if(temp.flag()&VTX_COLOR )if(vtx_color >=0)Set(v, *(Color*)(src+vtx_color ));else Set(v, WHITE);
  493. src+=mesh->vtxSize();
  494. }
  495. }
  496. mesh->vtxUnlock();
  497. }else ok=false;
  498. }
  499. temp.vtxUnlock();
  500. }else ok=false;
  501. // indexes
  502. if(Ptr ind=temp.indLock(LOCK_WRITE))
  503. {
  504. vtxs=0;
  505. VecUS *ind16=(temp.indBit16() ? (VecUS*)ind : null);
  506. VecI *ind32=(temp.indBit16() ? null : (VecI *)ind);
  507. FREP(elms)if(C MeshRender *mesh=src[i])
  508. {
  509. if(CPtr src=mesh->indLockRead())
  510. {
  511. if(mesh->indBit16())
  512. {
  513. C VecUS *s=(C VecUS*)src;
  514. if(ind16)REP(mesh->tris())*ind16++=(*s++)+vtxs;else
  515. if(ind32)REP(mesh->tris())*ind32++=(*s++)+vtxs;
  516. }else
  517. {
  518. C VecI *s=(C VecI*)src;
  519. if(ind16)REP(mesh->tris())*ind16++=(*s++)+vtxs;else
  520. if(ind32)REP(mesh->tris())*ind32++=(*s++)+vtxs;
  521. }
  522. mesh->indUnlock();
  523. }else ok=false;
  524. vtxs+=mesh->vtxs();
  525. }
  526. temp.indUnlock();
  527. }else ok=false;
  528. if(ok)
  529. {
  530. if(optimize)temp.optimize();
  531. Swap(temp, T);
  532. }else del();
  533. return ok;
  534. }
  535. return false;
  536. }
  537. Bool MeshRender::create(C MeshRender &src)
  538. {
  539. if(this==&src)return true;
  540. del();
  541. if(_vb.create(src._vb))
  542. if(_ib.create(src._ib))
  543. {
  544. _storage=src._storage;
  545. _tris =src._tris ;
  546. _flag =src._flag ;
  547. if(GL && D.notShaderModelGLES2())setVF();else _vf=src._vf; // VAO
  548. // copy splits
  549. Alloc(_bone_split, _bone_splits= src._bone_splits);
  550. CopyN(_bone_split, src._bone_split , src._bone_splits);
  551. return true;
  552. }
  553. return false;
  554. }
  555. #if 0 // DX9
  556. ID3DXMesh* MeshRender::createDx9Mesh()
  557. {
  558. Bool ok =false;
  559. ID3DXMesh *mesh=null;
  560. D._lock.on();
  561. if(Ptr src_vtx=vtxLock(LOCK_READ))
  562. {
  563. if(Ptr src_ind=indLock(LOCK_READ))
  564. {
  565. D3DVERTEXELEMENT9 ve[MAXD3DDECLLENGTH+1];
  566. if(SetVtxFormatFromVtxDecl(_vf, ve))
  567. {
  568. if(OK(D3DXCreateMesh(tris(), vtxs(), D3DXMESH_32BIT|D3DXMESH_SYSTEMMEM, ve, D3D, &mesh)))
  569. {
  570. DWORD *dest_id;
  571. Ptr dest_vtx, dest_ind;
  572. if(OK(mesh->LockVertexBuffer(0, &dest_vtx)))
  573. {
  574. if(OK(mesh->LockIndexBuffer(0, &dest_ind)))
  575. {
  576. if(OK(mesh->LockAttributeBuffer(0, &dest_id)))
  577. {
  578. // copy data
  579. D._lock.off();
  580. CopyFast (dest_vtx, src_vtx, mesh->GetNumVertices()*mesh->GetNumBytesPerVertex()); // vtx
  581. if(_ib.bit16())Copy16To32(dest_ind, src_ind, mesh->GetNumFaces ()*3 ); // tri
  582. else Copy32To32(dest_ind, src_ind, mesh->GetNumFaces ()*3 ); // tri
  583. FREP(_bone_splits)REPD(j, _bone_split[i].tris)*dest_id++=i; // id
  584. D._lock.on();
  585. ok=true;
  586. mesh->UnlockAttributeBuffer();
  587. }
  588. mesh->UnlockIndexBuffer();
  589. }
  590. mesh->UnlockVertexBuffer();
  591. }
  592. }
  593. }
  594. indUnlock();
  595. }
  596. vtxUnlock();
  597. }
  598. if(!ok)RELEASE(mesh);
  599. D._lock.off();
  600. return mesh;
  601. }
  602. #endif
  603. /******************************************************************************/
  604. C Byte* MeshRender::vtxLockedElm(UInt elm)C
  605. {
  606. if(C Byte *data=vtxLockedData())
  607. {
  608. Int ofs=vtxOfs(elm);
  609. if( ofs>=0)return data+ofs;
  610. }
  611. return null;
  612. }
  613. /******************************************************************************/
  614. // GET
  615. /******************************************************************************/
  616. Bool MeshRender::getBox(Box &box)C
  617. {
  618. Int pos =vtxOfs(VTX_POS);
  619. if( pos>=0)if(C Byte *vtx=vtxLockRead())
  620. {
  621. vtx+=pos;
  622. box=*(Vec*)vtx; REP(vtxs()-1){vtx+=vtxSize(); box|=*(Vec*)vtx;}
  623. vtxUnlock();
  624. return true;
  625. }
  626. box.zero(); return false;
  627. }
  628. Flt MeshRender::area(Vec *center)C
  629. {
  630. if(center)center->zero();
  631. Flt area=0;
  632. Int pos =vtxOfs(VTX_POS);
  633. if( pos>=0)if(C Byte *vtx=vtxLockRead())
  634. {
  635. vtx+=pos;
  636. if(CPtr ind=indLockRead())
  637. {
  638. if(indBit16())
  639. {
  640. C VecUS *tri=(C VecUS*)ind;
  641. REP(tris())
  642. {
  643. Tri t(*(Vec*)(vtx+tri->x*vtxSize()), *(Vec*)(vtx+tri->y*vtxSize()), *(Vec*)(vtx+tri->z*vtxSize())); tri++;
  644. Flt a=t.area();
  645. area +=a;
  646. if(center)*center+=a*t.center();
  647. }
  648. }else
  649. {
  650. C VecI *tri=(C VecI*)ind;
  651. REP(tris())
  652. {
  653. Tri t(*(Vec*)(vtx+tri->x*vtxSize()), *(Vec*)(vtx+tri->y*vtxSize()), *(Vec*)(vtx+tri->z*vtxSize())); tri++;
  654. Flt a=t.area();
  655. area +=a;
  656. if(center)*center+=a*t.center();
  657. }
  658. }
  659. indUnlock();
  660. }
  661. vtxUnlock();
  662. }
  663. if(center && area)*center/=area;
  664. return area;
  665. }
  666. /******************************************************************************/
  667. // SET
  668. /******************************************************************************
  669. void MeshRender::setNormal()
  670. {
  671. Int ofs_pos=vtxOfs(VTX_POS),
  672. ofs_nrm=vtxOfs(VTX_NRM);
  673. if( ofs_pos>=0 && ofs_nrm>=0)
  674. if(Byte *vtx=vtxLock())
  675. {
  676. if(Ptr ind=indLock(LOCK_READ))
  677. {
  678. Byte *vtx_pos=vtx+ofs_pos,
  679. *vtx_nrm=vtx+ofs_nrm;
  680. REP(vtxs())((Vec*)(vtx_nrm+i*vtxSize()))->zero();
  681. if(_ib.bit16())
  682. {
  683. U16 *d=(U16*)ind;
  684. REP(tris)
  685. {
  686. Vec &p0=*(Vec*)(vtx_pos+d[0]*vtxSize()),
  687. &p1=*(Vec*)(vtx_pos+d[1]*vtxSize()),
  688. &p2=*(Vec*)(vtx_pos+d[2]*vtxSize()),
  689. &n0=*(Vec*)(vtx_nrm+d[0]*vtxSize()),
  690. &n1=*(Vec*)(vtx_nrm+d[1]*vtxSize()),
  691. &n2=*(Vec*)(vtx_nrm+d[2]*vtxSize()),
  692. nrm=GetNormal (p0, p1, p2);
  693. Flt a0=AbsAngleBetween(p2, p0, p1),
  694. a1=AbsAngleBetween(p0, p1, p2), a2=PI-a0-a1;
  695. n0+=a0*nrm;
  696. n1+=a1*nrm;
  697. n2+=a2*nrm;
  698. d+=3;
  699. }
  700. }else
  701. {
  702. U32 *d=(U32*)ind;
  703. REP(tris)
  704. {
  705. Vec &p0=*(Vec*)(vtx_pos+d[0]*vtxSize()),
  706. &p1=*(Vec*)(vtx_pos+d[1]*vtxSize()),
  707. &p2=*(Vec*)(vtx_pos+d[2]*vtxSize()),
  708. &n0=*(Vec*)(vtx_nrm+d[0]*vtxSize()),
  709. &n1=*(Vec*)(vtx_nrm+d[1]*vtxSize()),
  710. &n2=*(Vec*)(vtx_nrm+d[2]*vtxSize()),
  711. nrm=GetNormal (p0, p1, p2);
  712. Flt a0=AbsAngleBetween(p2, p0, p1),
  713. a1=AbsAngleBetween(p0, p1, p2), a2=PI-a0-a1;
  714. n0+=a0*nrm;
  715. n1+=a1*nrm;
  716. n2+=a2*nrm;
  717. d+=3;
  718. }
  719. }
  720. REP(vtxs())((Vec*)(vtx_nrm+i*vtxSize()))->normalize();
  721. indUnlock();
  722. }
  723. vtxUnlock();
  724. }
  725. }
  726. // this code assumes that vertexes have been created in order, and as squares
  727. void MeshRender::setNormalHeightmap(Int x, Int y)
  728. {
  729. Int ofs_pos=vtxOfs(VTX_POS),
  730. ofs_nrm=vtxOfs(VTX_NRM);
  731. if( ofs_pos>=0 && ofs_nrm>=0)
  732. if(Byte *vtx=vtxLock())
  733. {
  734. Byte *vtx_pos=vtx+ofs_pos,
  735. *vtx_nrm=vtx+ofs_nrm;
  736. // center
  737. for(Int sy=1; sy<y-1; sy++)
  738. for(Int sx=1; sx<x-1; sx++)
  739. {
  740. Vec &l=*(Vec*)(vtx_pos+(sx + sy*x -1)*vtxSize()),
  741. &r=*(Vec*)(vtx_pos+(sx + sy*x +1)*vtxSize()),
  742. &d=*(Vec*)(vtx_pos+(sx + sy*x -x)*vtxSize()),
  743. &u=*(Vec*)(vtx_pos+(sx + sy*x +x)*vtxSize()),
  744. &n=*(Vec*)(vtx_nrm+(sx + sy*x )*vtxSize());
  745. n.x=(l.y-r.y)*x;
  746. n.z=(d.y-u.y)*y;
  747. n.y=2;
  748. n.normalize();
  749. }
  750. // left
  751. for(Int sy=1; sy<y-1; sy++)
  752. {
  753. Vec &c=*(Vec*)(vtx_pos+(sy*x )*vtxSize()),
  754. &r=*(Vec*)(vtx_pos+(sy*x +1)*vtxSize()),
  755. &u=*(Vec*)(vtx_pos+(sy*x +x)*vtxSize()),
  756. &d=*(Vec*)(vtx_pos+(sy*x -x)*vtxSize()),
  757. &n=*(Vec*)(vtx_nrm+(sy*x )*vtxSize());
  758. n.x=(c.y-r.y)*(x*2);
  759. n.z=(d.y-u.y)*y;
  760. n.y=2;
  761. n.normalize();
  762. }
  763. // right
  764. for(Int sy=1; sy<y-1; sy++)
  765. {
  766. Vec &l=*(Vec*)(vtx_pos+(x-1 + sy*x -1)*vtxSize()),
  767. &c=*(Vec*)(vtx_pos+(x-1 + sy*x )*vtxSize()),
  768. &u=*(Vec*)(vtx_pos+(x-1 + sy*x +x)*vtxSize()),
  769. &d=*(Vec*)(vtx_pos+(x-1 + sy*x -x)*vtxSize()),
  770. &n=*(Vec*)(vtx_nrm+(x-1 + sy*x )*vtxSize());
  771. n.x=(l.y-c.y)*(x*2);
  772. n.z=(d.y-u.y)*y;
  773. n.y=2;
  774. n.normalize();
  775. }
  776. // down
  777. for(Int sx=1; sx<x-1; sx++)
  778. {
  779. Vec &l=*(Vec*)(vtx_pos+(sx -1)*vtxSize()),
  780. &r=*(Vec*)(vtx_pos+(sx +1)*vtxSize()),
  781. &c=*(Vec*)(vtx_pos+(sx )*vtxSize()),
  782. &u=*(Vec*)(vtx_pos+(sx +x)*vtxSize()),
  783. &n=*(Vec*)(vtx_nrm+(sx )*vtxSize());
  784. n.x=(l.y-r.y)*x;
  785. n.z=(c.y-u.y)*(y*2);
  786. n.y=2;
  787. n.normalize();
  788. }
  789. // up
  790. for(Int sx=1; sx<x-1; sx++)
  791. {
  792. Vec &l=*(Vec*)(vtx_pos+(sx + (y-1)*x -1)*vtxSize()),
  793. &r=*(Vec*)(vtx_pos+(sx + (y-1)*x +1)*vtxSize()),
  794. &d=*(Vec*)(vtx_pos+(sx + (y-1)*x -x)*vtxSize()),
  795. &c=*(Vec*)(vtx_pos+(sx + (y-1)*x )*vtxSize()),
  796. &n=*(Vec*)(vtx_nrm+(sx + (y-1)*x )*vtxSize());
  797. n.x=(l.y-r.y)*x;
  798. n.z=(d.y-c.y)*(y*2);
  799. n.y=2;
  800. n.normalize();
  801. }
  802. // left-down
  803. {
  804. Vec &c=*(Vec*)(vtx_pos ),
  805. &r=*(Vec*)(vtx_pos+1*vtxSize()),
  806. &u=*(Vec*)(vtx_pos+x*vtxSize()),
  807. &n=*(Vec*)(vtx_nrm );
  808. n.x=(c.y-r.y)*x;
  809. n.z=(c.y-u.y)*y;
  810. n.y=1;
  811. n.normalize();
  812. }
  813. // right-down
  814. {
  815. Vec &c=*(Vec*)(vtx_pos+(x-1 )*vtxSize()),
  816. &l=*(Vec*)(vtx_pos+(x-1 -1)*vtxSize()),
  817. &u=*(Vec*)(vtx_pos+(x-1 +x)*vtxSize()),
  818. &n=*(Vec*)(vtx_nrm+(x-1 )*vtxSize());
  819. n.x=(l.y-c.y)*x;
  820. n.z=(c.y-u.y)*y;
  821. n.y=1;
  822. n.normalize();
  823. }
  824. // left-up
  825. {
  826. Vec &c=*(Vec*)(vtx_pos+((y-1)*x )*vtxSize()),
  827. &r=*(Vec*)(vtx_pos+((y-1)*x +1)*vtxSize()),
  828. &d=*(Vec*)(vtx_pos+((y-1)*x -x)*vtxSize()),
  829. &n=*(Vec*)(vtx_nrm+((y-1)*x )*vtxSize());
  830. n.x=(c.y-r.y)*x;
  831. n.z=(d.y-c.y)*y;
  832. n.y=1;
  833. n.normalize();
  834. }
  835. // right-up
  836. {
  837. Vec &c=*(Vec*)(vtx_pos+(x-1 + (y-1)*x )*vtxSize()),
  838. &l=*(Vec*)(vtx_pos+(x-1 + (y-1)*x -1)*vtxSize()),
  839. &d=*(Vec*)(vtx_pos+(x-1 + (y-1)*x -x)*vtxSize()),
  840. &n=*(Vec*)(vtx_nrm+(x-1 + (y-1)*x )*vtxSize());
  841. n.x=(l.y-c.y)*x;
  842. n.z=(d.y-c.y)*y;
  843. n.y=1;
  844. n.normalize();
  845. }
  846. vtxUnlock();
  847. }
  848. }
  849. void MeshRender::setNormalHeightmap(Image &height,Image *l,Image *r,Image *b,Image *f)
  850. {
  851. Int ofs_pos=vtxOfs(VTX_POS),
  852. ofs_nrm=vtxOfs(VTX_NRM);
  853. if( ofs_pos>=0 && ofs_nrm>=0)if(Byte *vtx=vtxLock())
  854. {
  855. Int dx=height.x(),
  856. dy=height.y();
  857. Vec *vtx_pos=(Vec*)(vtx+ofs_pos),
  858. *vtx_nrm=(Vec*)(vtx+ofs_nrm);
  859. REP(vtxs())
  860. {
  861. Int x=Round(vtx_pos->x*(dx-1)),
  862. y=Round(vtx_pos->z*(dy-1));
  863. if(x==0 )vtx_nrm->x=(l ? l ->pixelF(dx-2, y)-height.pixelF(1,y) : (height.pixelF( 0, y)-height.pixelF( 1, y))*2);else
  864. if(x==dx-1)vtx_nrm->x=(r ? height.pixelF(dx-2, y)-r ->pixelF(1,y) : (height.pixelF(dx-2, y)-height.pixelF(dx-1, y))*2);else
  865. vtx_nrm->x= (height.pixelF( x-1, y)-height.pixelF( x+1, y)) ;
  866. if(y==0 )vtx_nrm->z=(b ? b ->pixelF(x, dy-2)-height.pixelF(x,1) : (height.pixelF(x, 0)-height.pixelF(x, 1))*2);else
  867. if(y==dy-1)vtx_nrm->z=(f ? height.pixelF(x, dy-2)-f ->pixelF(x,1) : (height.pixelF(x, dy-2)-height.pixelF(x, dy-1))*2);else
  868. vtx_nrm->z= (height.pixelF(x, y-1)-height.pixelF(x, y+1)) ;
  869. vtx_nrm->x*=dx;
  870. vtx_nrm->z*=dy;
  871. vtx_nrm->y = 2;
  872. vtx_nrm->normalize();
  873. vtx_pos=(Vec*)(((Byte*)vtx_pos)+vtxSize());
  874. vtx_nrm=(Vec*)(((Byte*)vtx_nrm)+vtxSize());
  875. }
  876. vtxUnlock();
  877. }
  878. }
  879. /******************************************************************************/
  880. // TEXTURIZE
  881. /******************************************************************************/
  882. void MeshRender::texMove(C Vec2 &move, Byte tex_index)
  883. {
  884. if(InRange(tex_index, 3) && move.any())
  885. {
  886. Int pos =vtxOfs((tex_index==0) ? VTX_TEX0 : (tex_index==1) ? VTX_TEX1 : VTX_TEX2);
  887. if( pos>=0)if(Byte *vtx=vtxLock())
  888. {
  889. vtx+=pos; REP(vtxs()){*(Vec2*)vtx+=move; vtx+=vtxSize();}
  890. vtxUnlock();
  891. }
  892. }
  893. }
  894. void MeshRender::texScale(C Vec2 &scale, Byte tex_index)
  895. {
  896. if(InRange(tex_index, 3) && scale!=1)
  897. {
  898. Int pos =vtxOfs((tex_index==0) ? VTX_TEX0 : (tex_index==1) ? VTX_TEX1 : VTX_TEX2);
  899. if( pos>=0)if(Byte *vtx=vtxLock())
  900. {
  901. vtx+=pos; REP(vtxs()){*(Vec2*)vtx*=scale; vtx+=vtxSize();}
  902. vtxUnlock();
  903. }
  904. }
  905. }
  906. void MeshRender::texRotate(Flt angle, Byte tex_index)
  907. {
  908. if(InRange(tex_index, 3) && angle)
  909. {
  910. Int pos =vtxOfs((tex_index==0) ? VTX_TEX0 : (tex_index==1) ? VTX_TEX1 : VTX_TEX2);
  911. if( pos>=0)if(Byte *vtx=vtxLock())
  912. {
  913. Flt cos, sin; CosSin(cos, sin, angle);
  914. vtx+=pos; REP(vtxs()){((Vec2*)vtx)->rotateCosSin(cos, sin); vtx+=vtxSize();}
  915. vtxUnlock();
  916. }
  917. }
  918. }
  919. /******************************************************************************/
  920. // TRANSFORM
  921. /******************************************************************************/
  922. void MeshRender::scaleMove(C Vec &scale, C Vec &move)
  923. {
  924. Int pos=vtxOfs(VTX_POS),
  925. hlp=vtxOfs(VTX_HLP);
  926. if(Byte *vtx=vtxLock())
  927. {
  928. REP(vtxs())
  929. {
  930. if(pos>=0){Vec &v=*(Vec*)(&vtx[pos]); v=v*scale+move;}
  931. if(hlp>=0){Vec &v=*(Vec*)(&vtx[hlp]); v=v*scale+move;}
  932. vtx+=vtxSize();
  933. }
  934. vtxUnlock();
  935. }
  936. }
  937. /*void MeshRender::transform(Matrix3 &matrix)
  938. {
  939. if(mesh)
  940. {
  941. Int pos=vtxOfs(VTX_POS),
  942. nrm=vtxOfs(VTX_NRM),
  943. tan=vtxOfs(VTX_TAN),
  944. bin=vtxOfs(VTX_BIN),
  945. hlp=vtxOfs(VTX_HLP);
  946. if(Byte *vtx=vtxLock())
  947. {
  948. Matrix3 matrix_n=matrix; matrix_n.inverseScale();
  949. REP(vtxs())
  950. {
  951. if(pos>=0)*(Vec*)(&vtx[pos])*=matrix ;
  952. if(hlp>=0)*(Vec*)(&vtx[hlp])*=matrix ;
  953. if(nrm>=0)*(Vec*)(&vtx[nrm])*=matrix_n; normalize..?
  954. if(tan>=0)*(Vec*)(&vtx[tan])*=matrix_n;
  955. if(bin>=0)*(Vec*)(&vtx[bin])*=matrix_n;
  956. vtx+=vtxSize();
  957. }
  958. vtxUnlock();
  959. }
  960. }
  961. }
  962. void MeshRender::transform(Matrix &matrix)
  963. {
  964. if(mesh)
  965. {
  966. Int pos=vtxOfs(VTX_POS),
  967. nrm=vtxOfs(VTX_NRM),
  968. tan=vtxOfs(VTX_TAN),
  969. bin=vtxOfs(VTX_BIN),
  970. hlp=vtxOfs(VTX_HLP);
  971. if(Byte *vtx=vtxLock())
  972. {
  973. Matrix3 matrix_n=matrix; matrix_n.inverseScale();
  974. REP(vtxs())
  975. {
  976. if(pos>=0)*(Vec*)(&vtx[pos])*=matrix ;
  977. if(hlp>=0)*(Vec*)(&vtx[hlp])*=matrix ;
  978. if(nrm>=0)*(Vec*)(&vtx[nrm])*=matrix_n; normalize?
  979. if(tan>=0)*(Vec*)(&vtx[tan])*=matrix_n;
  980. if(bin>=0)*(Vec*)(&vtx[bin])*=matrix_n;
  981. vtx+=vtxSize();
  982. }
  983. vtxUnlock();
  984. }
  985. }
  986. }
  987. /******************************************************************************/
  988. // OPERATIONS
  989. /******************************************************************************/
  990. void MeshRender::adjustToPlatform()
  991. {
  992. const Bool bone_split =D.meshBoneSplit();
  993. Bool change_signed =(storageCompress() && T.storageSigned ()!=D.meshStorageSigned() && (_flag&(VTX_NRM|VTX_TAN))),
  994. change_bone_split=(_bone_split && T.storageBoneSplit()!=bone_split && (_flag&(VTX_MATRIX )));
  995. if(change_signed || change_bone_split)
  996. {
  997. if(Byte *vtx=vtxLock())
  998. {
  999. Int nrm_ofs=vtxOfs(VTX_NRM ),
  1000. tan_ofs=vtxOfs(VTX_TAN ),
  1001. bone_ofs=vtxOfs(VTX_MATRIX);
  1002. if(change_signed && (nrm_ofs>=0 || tan_ofs>=0))
  1003. {
  1004. Byte *v=vtx;
  1005. REP(_vb.vtxs())
  1006. {
  1007. #if DEBUG // avoid debug runtime checks
  1008. if(nrm_ofs>=0){VecB4 &v4=((VecB4&)v[nrm_ofs]); REPAO(v4.c)=(v4.c[i]+128)&0xFF;}
  1009. if(tan_ofs>=0){VecB4 &v4=((VecB4&)v[tan_ofs]); REPAO(v4.c)=(v4.c[i]+128)&0xFF;}
  1010. #else
  1011. if(nrm_ofs>=0)((VecB4&)v[nrm_ofs])+=128;
  1012. if(tan_ofs>=0)((VecB4&)v[tan_ofs])+=128;
  1013. #endif
  1014. v+=_vb._vtx_size;
  1015. }
  1016. }
  1017. if(change_bone_split && bone_ofs>=0)
  1018. {
  1019. Byte *v=vtx+bone_ofs;
  1020. FREP(_bone_splits)
  1021. {
  1022. BoneSplit &split=_bone_split[i];
  1023. FREP(split.vtxs)
  1024. {
  1025. VecB4 &matrix=*(VecB4*)v;
  1026. if(bone_split)
  1027. {
  1028. matrix.x=split.realToSplit0(matrix.x);
  1029. matrix.y=split.realToSplit0(matrix.y);
  1030. matrix.z=split.realToSplit0(matrix.z);
  1031. matrix.w=split.realToSplit0(matrix.w);
  1032. }else
  1033. {
  1034. matrix.x=split.split_to_real[matrix.x];
  1035. matrix.y=split.split_to_real[matrix.y];
  1036. matrix.z=split.split_to_real[matrix.z];
  1037. matrix.w=split.split_to_real[matrix.w];
  1038. }
  1039. v+=_vb._vtx_size;
  1040. }
  1041. }
  1042. }
  1043. vtxUnlock();
  1044. FlagSet(_storage, MSHR_SIGNED , D.meshStorageSigned());
  1045. FlagSet(_storage, MSHR_BONE_SPLIT, bone_split );
  1046. }
  1047. }
  1048. }
  1049. void MeshRender:: setUsedBones(Bool (&bones)[256])C {Zero(bones); includeUsedBones(bones);}
  1050. void MeshRender::includeUsedBones(Bool (&bones)[256])C
  1051. {
  1052. Int matrix_ofs =vtxOfs(VTX_MATRIX);
  1053. if( matrix_ofs>=0)if(C Byte *vtx=vtxLockRead())
  1054. {
  1055. Int blend_ofs=vtxOfs(VTX_BLEND);
  1056. C Byte *vtx_matrix=vtx+matrix_ofs, *vtx_blend=((blend_ofs>=0) ? vtx+blend_ofs : null);
  1057. if(_bone_split && storageBoneSplit())FREP(_bone_splits)
  1058. {
  1059. C MeshRender::BoneSplit &split=_bone_split[i];
  1060. FREP(split.vtxs)
  1061. {
  1062. REP(4) // 4 bytes in VecB4
  1063. {
  1064. Byte bone=split.split_to_real[vtx_matrix[i]];
  1065. if( bone)
  1066. {
  1067. bone--;
  1068. if(vtx_blend ? vtx_blend[i] : true)bones[bone]=true;
  1069. }
  1070. }
  1071. vtx_matrix+=vtxSize();
  1072. if(vtx_blend)vtx_blend +=vtxSize();
  1073. }
  1074. }else
  1075. REP(vtxs())
  1076. {
  1077. REP(4) // 4 bytes in VecB4
  1078. {
  1079. Byte bone=vtx_matrix[i];
  1080. if( bone)
  1081. {
  1082. bone--;
  1083. if(vtx_blend ? vtx_blend[i] : true)bones[bone]=true;
  1084. }
  1085. }
  1086. vtx_matrix+=vtxSize();
  1087. if(vtx_blend)vtx_blend +=vtxSize();
  1088. }
  1089. vtxUnlock();
  1090. }
  1091. }
  1092. /******************************************************************************/
  1093. C MeshRender& MeshRender::set()C
  1094. {
  1095. #if GL
  1096. if(D.notShaderModelGLES2()) // VAO
  1097. {
  1098. #if VAO_EXCLUSIVE
  1099. if(_vao_reset)
  1100. {
  1101. if(!ConstCast(T).setVF())Exit("Can't create VAO");
  1102. }else // 'setVF' will already 'glBindVertexArray' the VAO
  1103. #endif
  1104. glBindVertexArray(_vao);
  1105. }else
  1106. #endif
  1107. {_vb.set(); _ib.set(); D.vf(_vf);} // OpenGL requires setting VF after VBO
  1108. return T;
  1109. }
  1110. /******************************************************************************/
  1111. MeshRender& MeshRender::freeOpenGLESData()
  1112. {
  1113. _vb.freeOpenGLESData();
  1114. _ib.freeOpenGLESData();
  1115. return T;
  1116. }
  1117. MeshRender& MeshRender::operator =(C MeshRender &src) {create(src); return T;}
  1118. MeshRender& MeshRender::operator+=(C MeshRender &src)
  1119. {
  1120. if(src.is())
  1121. {
  1122. C MeshRender *meshes[]={this, &src};
  1123. create(meshes, Elms(meshes));
  1124. }
  1125. return T;
  1126. }
  1127. /******************************************************************************/
  1128. }
  1129. /******************************************************************************/