mesh_storage.cpp 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441
  1. /**************************************************************************/
  2. /* mesh_storage.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #ifdef GLES3_ENABLED
  31. #include "mesh_storage.h"
  32. #include "config.h"
  33. #include "texture_storage.h"
  34. #include "utilities.h"
  35. using namespace GLES3;
  36. MeshStorage *MeshStorage::singleton = nullptr;
  37. MeshStorage *MeshStorage::get_singleton() {
  38. return singleton;
  39. }
  40. MeshStorage::MeshStorage() {
  41. singleton = this;
  42. {
  43. skeleton_shader.shader.initialize();
  44. skeleton_shader.shader_version = skeleton_shader.shader.version_create();
  45. }
  46. }
  47. MeshStorage::~MeshStorage() {
  48. singleton = nullptr;
  49. skeleton_shader.shader.version_free(skeleton_shader.shader_version);
  50. }
  51. /* MESH API */
  52. RID MeshStorage::mesh_allocate() {
  53. return mesh_owner.allocate_rid();
  54. }
  55. void MeshStorage::mesh_initialize(RID p_rid) {
  56. mesh_owner.initialize_rid(p_rid, Mesh());
  57. }
  58. void MeshStorage::mesh_free(RID p_rid) {
  59. mesh_clear(p_rid);
  60. mesh_set_shadow_mesh(p_rid, RID());
  61. Mesh *mesh = mesh_owner.get_or_null(p_rid);
  62. ERR_FAIL_NULL(mesh);
  63. mesh->dependency.deleted_notify(p_rid);
  64. if (mesh->instances.size()) {
  65. ERR_PRINT("deleting mesh with active instances");
  66. }
  67. if (mesh->shadow_owners.size()) {
  68. for (Mesh *E : mesh->shadow_owners) {
  69. Mesh *shadow_owner = E;
  70. shadow_owner->shadow_mesh = RID();
  71. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  72. }
  73. }
  74. mesh_owner.free(p_rid);
  75. }
  76. void MeshStorage::mesh_set_blend_shape_count(RID p_mesh, int p_blend_shape_count) {
  77. ERR_FAIL_COND(p_blend_shape_count < 0);
  78. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  79. ERR_FAIL_NULL(mesh);
  80. ERR_FAIL_COND(mesh->surface_count > 0); //surfaces already exist
  81. mesh->blend_shape_count = p_blend_shape_count;
  82. }
  83. bool MeshStorage::mesh_needs_instance(RID p_mesh, bool p_has_skeleton) {
  84. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  85. ERR_FAIL_NULL_V(mesh, false);
  86. return mesh->blend_shape_count > 0 || (mesh->has_bone_weights && p_has_skeleton);
  87. }
  88. void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) {
  89. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  90. ERR_FAIL_NULL(mesh);
  91. ERR_FAIL_COND(mesh->surface_count == RS::MAX_MESH_SURFACES);
  92. #ifdef DEBUG_ENABLED
  93. //do a validation, to catch errors first
  94. {
  95. uint32_t stride = 0;
  96. uint32_t attrib_stride = 0;
  97. uint32_t skin_stride = 0;
  98. for (int i = 0; i < RS::ARRAY_WEIGHTS; i++) {
  99. if ((p_surface.format & (1ULL << i))) {
  100. switch (i) {
  101. case RS::ARRAY_VERTEX: {
  102. if ((p_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) || (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  103. stride += sizeof(float) * 2;
  104. } else {
  105. stride += sizeof(float) * 3;
  106. }
  107. } break;
  108. case RS::ARRAY_NORMAL: {
  109. stride += sizeof(uint16_t) * 2;
  110. } break;
  111. case RS::ARRAY_TANGENT: {
  112. if (!(p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  113. stride += sizeof(uint16_t) * 2;
  114. }
  115. } break;
  116. case RS::ARRAY_COLOR: {
  117. attrib_stride += sizeof(uint32_t);
  118. } break;
  119. case RS::ARRAY_TEX_UV: {
  120. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  121. attrib_stride += sizeof(uint16_t) * 2;
  122. } else {
  123. attrib_stride += sizeof(float) * 2;
  124. }
  125. } break;
  126. case RS::ARRAY_TEX_UV2: {
  127. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  128. attrib_stride += sizeof(uint16_t) * 2;
  129. } else {
  130. attrib_stride += sizeof(float) * 2;
  131. }
  132. } break;
  133. case RS::ARRAY_CUSTOM0:
  134. case RS::ARRAY_CUSTOM1:
  135. case RS::ARRAY_CUSTOM2:
  136. case RS::ARRAY_CUSTOM3: {
  137. int idx = i - RS::ARRAY_CUSTOM0;
  138. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  139. uint32_t fmt = (p_surface.format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  140. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  141. attrib_stride += fmtsize[fmt];
  142. } break;
  143. case RS::ARRAY_WEIGHTS:
  144. case RS::ARRAY_BONES: {
  145. //uses a separate array
  146. bool use_8 = p_surface.format & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  147. skin_stride += sizeof(int16_t) * (use_8 ? 16 : 8);
  148. } break;
  149. }
  150. }
  151. }
  152. int expected_size = stride * p_surface.vertex_count;
  153. ERR_FAIL_COND_MSG(expected_size != p_surface.vertex_data.size(), "Size of vertex data provided (" + itos(p_surface.vertex_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  154. int bs_expected_size = expected_size * mesh->blend_shape_count;
  155. ERR_FAIL_COND_MSG(bs_expected_size != p_surface.blend_shape_data.size(), "Size of blend shape data provided (" + itos(p_surface.blend_shape_data.size()) + ") does not match expected (" + itos(bs_expected_size) + ")");
  156. int expected_attrib_size = attrib_stride * p_surface.vertex_count;
  157. ERR_FAIL_COND_MSG(expected_attrib_size != p_surface.attribute_data.size(), "Size of attribute data provided (" + itos(p_surface.attribute_data.size()) + ") does not match expected (" + itos(expected_attrib_size) + ")");
  158. if ((p_surface.format & RS::ARRAY_FORMAT_WEIGHTS) && (p_surface.format & RS::ARRAY_FORMAT_BONES)) {
  159. expected_size = skin_stride * p_surface.vertex_count;
  160. ERR_FAIL_COND_MSG(expected_size != p_surface.skin_data.size(), "Size of skin data provided (" + itos(p_surface.skin_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  161. }
  162. }
  163. #endif
  164. uint64_t surface_version = p_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  165. RS::SurfaceData new_surface = p_surface;
  166. #ifdef DISABLE_DEPRECATED
  167. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION, "Surface version provided (" + itos(int(surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT)) + ") does not match current version (" + itos(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) + ")");
  168. #else
  169. if (surface_version != uint64_t(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION)) {
  170. RS::get_singleton()->fix_surface_compatibility(new_surface);
  171. surface_version = new_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  172. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION,
  173. vformat("Surface version provided (%d) does not match current version (%d).",
  174. (surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK,
  175. (RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK));
  176. }
  177. #endif
  178. Mesh::Surface *s = memnew(Mesh::Surface);
  179. s->format = new_surface.format;
  180. s->primitive = new_surface.primitive;
  181. if (new_surface.vertex_data.size()) {
  182. glGenBuffers(1, &s->vertex_buffer);
  183. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  184. // If we have an uncompressed surface that contains normals, but not tangents, we need to differentiate the array
  185. // from a compressed array in the shader. To do so, we allow the normal to read 4 components out of the buffer
  186. // But only give it 2 components per normal. So essentially, each vertex reads the next normal in normal.zw.
  187. // This allows us to avoid adding a shader permutation, and avoid passing dummy tangents. Since the stride is kept small
  188. // this should still be a net win for bandwidth.
  189. // If we do this, then the last normal will read past the end of the array. So we need to pad the array with dummy data.
  190. if (!(new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (new_surface.format & RS::ARRAY_FORMAT_NORMAL) && !(new_surface.format & RS::ARRAY_FORMAT_TANGENT)) {
  191. // Unfortunately, we need to copy the buffer, which is fine as doing a resize triggers a CoW anyway.
  192. Vector<uint8_t> new_vertex_data;
  193. new_vertex_data.resize_initialized(new_surface.vertex_data.size() + sizeof(uint16_t) * 2);
  194. memcpy(new_vertex_data.ptrw(), new_surface.vertex_data.ptr(), new_surface.vertex_data.size());
  195. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_vertex_data.size(), new_vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  196. s->vertex_buffer_size = new_vertex_data.size();
  197. } else {
  198. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_surface.vertex_data.size(), new_surface.vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  199. s->vertex_buffer_size = new_surface.vertex_data.size();
  200. }
  201. }
  202. if (new_surface.attribute_data.size()) {
  203. glGenBuffers(1, &s->attribute_buffer);
  204. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  205. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->attribute_buffer, new_surface.attribute_data.size(), new_surface.attribute_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh attribute buffer");
  206. s->attribute_buffer_size = new_surface.attribute_data.size();
  207. }
  208. if (new_surface.skin_data.size()) {
  209. glGenBuffers(1, &s->skin_buffer);
  210. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  211. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->skin_buffer, new_surface.skin_data.size(), new_surface.skin_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh skin buffer");
  212. s->skin_buffer_size = new_surface.skin_data.size();
  213. }
  214. glBindBuffer(GL_ARRAY_BUFFER, 0);
  215. s->vertex_count = new_surface.vertex_count;
  216. if (new_surface.format & RS::ARRAY_FORMAT_BONES) {
  217. mesh->has_bone_weights = true;
  218. }
  219. if (new_surface.index_count) {
  220. bool is_index_16 = new_surface.vertex_count <= 65536 && new_surface.vertex_count > 0;
  221. glGenBuffers(1, &s->index_buffer);
  222. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer);
  223. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer, new_surface.index_data.size(), new_surface.index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer");
  224. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  225. s->index_count = new_surface.index_count;
  226. s->index_buffer_size = new_surface.index_data.size();
  227. if (new_surface.lods.size()) {
  228. s->lods = memnew_arr(Mesh::Surface::LOD, new_surface.lods.size());
  229. s->lod_count = new_surface.lods.size();
  230. for (int i = 0; i < new_surface.lods.size(); i++) {
  231. glGenBuffers(1, &s->lods[i].index_buffer);
  232. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer);
  233. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer, new_surface.lods[i].index_data.size(), new_surface.lods[i].index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer LOD[" + itos(i) + "]");
  234. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  235. s->lods[i].edge_length = new_surface.lods[i].edge_length;
  236. s->lods[i].index_count = new_surface.lods[i].index_data.size() / (is_index_16 ? 2 : 4);
  237. s->lods[i].index_buffer_size = new_surface.lods[i].index_data.size();
  238. }
  239. }
  240. }
  241. ERR_FAIL_COND_MSG(!new_surface.index_count && !new_surface.vertex_count, "Meshes must contain a vertex array, an index array, or both");
  242. if (GLES3::Config::get_singleton()->generate_wireframes && s->primitive == RS::PRIMITIVE_TRIANGLES) {
  243. // Generate wireframes. This is mostly used by the editor.
  244. s->wireframe = memnew(Mesh::Surface::Wireframe);
  245. Vector<uint32_t> wf_indices;
  246. uint32_t &wf_index_count = s->wireframe->index_count;
  247. uint32_t *wr = nullptr;
  248. if (new_surface.format & RS::ARRAY_FORMAT_INDEX) {
  249. wf_index_count = s->index_count * 2;
  250. wf_indices.resize(wf_index_count);
  251. Vector<uint8_t> ir = new_surface.index_data;
  252. wr = wf_indices.ptrw();
  253. if (new_surface.vertex_count <= 65536) {
  254. // Read 16 bit indices.
  255. const uint16_t *src_idx = (const uint16_t *)ir.ptr();
  256. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  257. // We use GL_LINES instead of GL_TRIANGLES for drawing these primitives later,
  258. // so we need double the indices for each triangle.
  259. wr[i + 0] = src_idx[i / 2];
  260. wr[i + 1] = src_idx[i / 2 + 1];
  261. wr[i + 2] = src_idx[i / 2 + 1];
  262. wr[i + 3] = src_idx[i / 2 + 2];
  263. wr[i + 4] = src_idx[i / 2 + 2];
  264. wr[i + 5] = src_idx[i / 2];
  265. }
  266. } else {
  267. // Read 32 bit indices.
  268. const uint32_t *src_idx = (const uint32_t *)ir.ptr();
  269. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  270. wr[i + 0] = src_idx[i / 2];
  271. wr[i + 1] = src_idx[i / 2 + 1];
  272. wr[i + 2] = src_idx[i / 2 + 1];
  273. wr[i + 3] = src_idx[i / 2 + 2];
  274. wr[i + 4] = src_idx[i / 2 + 2];
  275. wr[i + 5] = src_idx[i / 2];
  276. }
  277. }
  278. } else {
  279. // Not using indices.
  280. wf_index_count = s->vertex_count * 2;
  281. wf_indices.resize(wf_index_count);
  282. wr = wf_indices.ptrw();
  283. for (uint32_t i = 0; i + 5 < wf_index_count; i += 6) {
  284. wr[i + 0] = i / 2;
  285. wr[i + 1] = i / 2 + 1;
  286. wr[i + 2] = i / 2 + 1;
  287. wr[i + 3] = i / 2 + 2;
  288. wr[i + 4] = i / 2 + 2;
  289. wr[i + 5] = i / 2;
  290. }
  291. }
  292. s->wireframe->index_buffer_size = wf_index_count * sizeof(uint32_t);
  293. glGenBuffers(1, &s->wireframe->index_buffer);
  294. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->wireframe->index_buffer);
  295. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->wireframe->index_buffer, s->wireframe->index_buffer_size, wr, GL_STATIC_DRAW, "Mesh wireframe index buffer");
  296. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); // unbind
  297. }
  298. s->aabb = new_surface.aabb;
  299. s->bone_aabbs = new_surface.bone_aabbs; //only really useful for returning them.
  300. s->mesh_to_skeleton_xform = p_surface.mesh_to_skeleton_xform;
  301. s->uv_scale = new_surface.uv_scale;
  302. if (new_surface.skin_data.size() || mesh->blend_shape_count > 0) {
  303. // Size must match the size of the vertex array.
  304. int size = new_surface.vertex_data.size();
  305. int vertex_size = 0;
  306. int position_stride = 0;
  307. int normal_tangent_stride = 0;
  308. int normal_offset = 0;
  309. int tangent_offset = 0;
  310. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  311. if (new_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  312. vertex_size = 2;
  313. position_stride = sizeof(float) * vertex_size;
  314. } else {
  315. if (new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  316. vertex_size = 4;
  317. position_stride = sizeof(uint16_t) * vertex_size;
  318. } else {
  319. vertex_size = 3;
  320. position_stride = sizeof(float) * vertex_size;
  321. }
  322. }
  323. }
  324. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  325. normal_offset = position_stride * s->vertex_count;
  326. normal_tangent_stride += sizeof(uint16_t) * 2;
  327. }
  328. if ((new_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  329. tangent_offset = normal_offset + normal_tangent_stride;
  330. normal_tangent_stride += sizeof(uint16_t) * 2;
  331. }
  332. if (mesh->blend_shape_count > 0) {
  333. // Blend shapes are passed as one large array, for OpenGL, we need to split each of them into their own buffer
  334. s->blend_shapes = memnew_arr(Mesh::Surface::BlendShape, mesh->blend_shape_count);
  335. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  336. glGenVertexArrays(1, &s->blend_shapes[i].vertex_array);
  337. glBindVertexArray(s->blend_shapes[i].vertex_array);
  338. glGenBuffers(1, &s->blend_shapes[i].vertex_buffer);
  339. glBindBuffer(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer);
  340. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer, size, new_surface.blend_shape_data.ptr() + i * size, (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh blend shape buffer");
  341. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  342. glEnableVertexAttribArray(RS::ARRAY_VERTEX + 3);
  343. glVertexAttribPointer(RS::ARRAY_VERTEX + 3, vertex_size, GL_FLOAT, GL_FALSE, position_stride, CAST_INT_TO_UCHAR_PTR(0));
  344. }
  345. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  346. // Normal and tangent are packed into the same attribute.
  347. glEnableVertexAttribArray(RS::ARRAY_NORMAL + 3);
  348. glVertexAttribPointer(RS::ARRAY_NORMAL + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(normal_offset));
  349. }
  350. if ((p_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  351. glEnableVertexAttribArray(RS::ARRAY_TANGENT + 3);
  352. glVertexAttribPointer(RS::ARRAY_TANGENT + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(tangent_offset));
  353. }
  354. }
  355. glBindVertexArray(0);
  356. glBindBuffer(GL_ARRAY_BUFFER, 0);
  357. }
  358. glBindVertexArray(0);
  359. glBindBuffer(GL_ARRAY_BUFFER, 0);
  360. }
  361. if (mesh->surface_count == 0) {
  362. mesh->aabb = new_surface.aabb;
  363. } else {
  364. mesh->aabb.merge_with(new_surface.aabb);
  365. }
  366. mesh->skeleton_aabb_version = 0;
  367. s->material = new_surface.material;
  368. mesh->surfaces = (Mesh::Surface **)memrealloc(mesh->surfaces, sizeof(Mesh::Surface *) * (mesh->surface_count + 1));
  369. mesh->surfaces[mesh->surface_count] = s;
  370. mesh->surface_count++;
  371. for (MeshInstance *mi : mesh->instances) {
  372. _mesh_instance_add_surface(mi, mesh, mesh->surface_count - 1);
  373. }
  374. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  375. for (Mesh *E : mesh->shadow_owners) {
  376. Mesh *shadow_owner = E;
  377. shadow_owner->shadow_mesh = RID();
  378. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  379. }
  380. mesh->material_cache.clear();
  381. }
  382. void MeshStorage::_mesh_surface_clear(Mesh *mesh, int p_surface) {
  383. Mesh::Surface &s = *mesh->surfaces[p_surface];
  384. if (s.vertex_buffer != 0) {
  385. GLES3::Utilities::get_singleton()->buffer_free_data(s.vertex_buffer);
  386. s.vertex_buffer = 0;
  387. }
  388. if (s.version_count != 0) {
  389. for (uint32_t j = 0; j < s.version_count; j++) {
  390. glDeleteVertexArrays(1, &s.versions[j].vertex_array);
  391. s.versions[j].vertex_array = 0;
  392. }
  393. }
  394. if (s.attribute_buffer != 0) {
  395. GLES3::Utilities::get_singleton()->buffer_free_data(s.attribute_buffer);
  396. s.attribute_buffer = 0;
  397. }
  398. if (s.skin_buffer != 0) {
  399. GLES3::Utilities::get_singleton()->buffer_free_data(s.skin_buffer);
  400. s.skin_buffer = 0;
  401. }
  402. if (s.index_buffer != 0) {
  403. GLES3::Utilities::get_singleton()->buffer_free_data(s.index_buffer);
  404. s.index_buffer = 0;
  405. }
  406. if (s.versions) {
  407. memfree(s.versions); // reallocs, so free with memfree.
  408. }
  409. if (s.wireframe) {
  410. GLES3::Utilities::get_singleton()->buffer_free_data(s.wireframe->index_buffer);
  411. memdelete(s.wireframe);
  412. }
  413. if (s.lod_count) {
  414. for (uint32_t j = 0; j < s.lod_count; j++) {
  415. if (s.lods[j].index_buffer != 0) {
  416. GLES3::Utilities::get_singleton()->buffer_free_data(s.lods[j].index_buffer);
  417. s.lods[j].index_buffer = 0;
  418. }
  419. }
  420. memdelete_arr(s.lods);
  421. }
  422. if (mesh->blend_shape_count) {
  423. for (uint32_t j = 0; j < mesh->blend_shape_count; j++) {
  424. if (s.blend_shapes[j].vertex_buffer != 0) {
  425. GLES3::Utilities::get_singleton()->buffer_free_data(s.blend_shapes[j].vertex_buffer);
  426. s.blend_shapes[j].vertex_buffer = 0;
  427. }
  428. if (s.blend_shapes[j].vertex_array != 0) {
  429. glDeleteVertexArrays(1, &s.blend_shapes[j].vertex_array);
  430. s.blend_shapes[j].vertex_array = 0;
  431. }
  432. }
  433. memdelete_arr(s.blend_shapes);
  434. }
  435. memdelete(mesh->surfaces[p_surface]);
  436. }
  437. int MeshStorage::mesh_get_blend_shape_count(RID p_mesh) const {
  438. const Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  439. ERR_FAIL_NULL_V(mesh, -1);
  440. return mesh->blend_shape_count;
  441. }
  442. void MeshStorage::mesh_set_blend_shape_mode(RID p_mesh, RS::BlendShapeMode p_mode) {
  443. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  444. ERR_FAIL_NULL(mesh);
  445. ERR_FAIL_INDEX((int)p_mode, 2);
  446. mesh->blend_shape_mode = p_mode;
  447. }
  448. RS::BlendShapeMode MeshStorage::mesh_get_blend_shape_mode(RID p_mesh) const {
  449. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  450. ERR_FAIL_NULL_V(mesh, RS::BLEND_SHAPE_MODE_NORMALIZED);
  451. return mesh->blend_shape_mode;
  452. }
  453. void MeshStorage::mesh_surface_update_vertex_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  454. ERR_FAIL_COND(p_data.is_empty());
  455. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  456. ERR_FAIL_NULL(mesh);
  457. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  458. uint64_t data_size = p_data.size();
  459. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->vertex_buffer_size);
  460. const uint8_t *r = p_data.ptr();
  461. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->vertex_buffer);
  462. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  463. glBindBuffer(GL_ARRAY_BUFFER, 0);
  464. }
  465. void MeshStorage::mesh_surface_update_attribute_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  466. ERR_FAIL_COND(p_data.is_empty());
  467. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  468. ERR_FAIL_NULL(mesh);
  469. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  470. uint64_t data_size = p_data.size();
  471. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->attribute_buffer_size);
  472. const uint8_t *r = p_data.ptr();
  473. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->attribute_buffer);
  474. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  475. glBindBuffer(GL_ARRAY_BUFFER, 0);
  476. }
  477. void MeshStorage::mesh_surface_update_skin_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  478. ERR_FAIL_COND(p_data.is_empty());
  479. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  480. ERR_FAIL_NULL(mesh);
  481. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  482. uint64_t data_size = p_data.size();
  483. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->skin_buffer_size);
  484. const uint8_t *r = p_data.ptr();
  485. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->skin_buffer);
  486. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  487. glBindBuffer(GL_ARRAY_BUFFER, 0);
  488. }
  489. void MeshStorage::mesh_surface_update_index_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  490. ERR_FAIL_COND(p_data.is_empty());
  491. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  492. ERR_FAIL_NULL(mesh);
  493. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  494. uint64_t data_size = p_data.size();
  495. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->index_buffer_size);
  496. const uint8_t *r = p_data.ptr();
  497. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->index_buffer);
  498. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  499. glBindBuffer(GL_ARRAY_BUFFER, 0);
  500. }
  501. void MeshStorage::mesh_surface_set_material(RID p_mesh, int p_surface, RID p_material) {
  502. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  503. ERR_FAIL_NULL(mesh);
  504. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  505. mesh->surfaces[p_surface]->material = p_material;
  506. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MATERIAL);
  507. mesh->material_cache.clear();
  508. }
  509. RID MeshStorage::mesh_surface_get_material(RID p_mesh, int p_surface) const {
  510. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  511. ERR_FAIL_NULL_V(mesh, RID());
  512. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RID());
  513. return mesh->surfaces[p_surface]->material;
  514. }
  515. RS::SurfaceData MeshStorage::mesh_get_surface(RID p_mesh, int p_surface) const {
  516. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  517. ERR_FAIL_NULL_V(mesh, RS::SurfaceData());
  518. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RS::SurfaceData());
  519. Mesh::Surface &s = *mesh->surfaces[p_surface];
  520. RS::SurfaceData sd;
  521. sd.format = s.format;
  522. if (s.vertex_buffer != 0) {
  523. sd.vertex_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.vertex_buffer, s.vertex_buffer_size);
  524. // When using an uncompressed buffer with normals, but without tangents, we have to trim the padding.
  525. if (!(s.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (s.format & RS::ARRAY_FORMAT_NORMAL) && !(s.format & RS::ARRAY_FORMAT_TANGENT)) {
  526. sd.vertex_data.resize(sd.vertex_data.size() - sizeof(uint16_t) * 2);
  527. }
  528. }
  529. if (s.attribute_buffer != 0) {
  530. sd.attribute_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.attribute_buffer, s.attribute_buffer_size);
  531. }
  532. if (s.skin_buffer != 0) {
  533. sd.skin_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.skin_buffer, s.skin_buffer_size);
  534. }
  535. sd.vertex_count = s.vertex_count;
  536. sd.index_count = s.index_count;
  537. sd.primitive = s.primitive;
  538. if (sd.index_count) {
  539. sd.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.index_buffer, s.index_buffer_size);
  540. }
  541. sd.aabb = s.aabb;
  542. for (uint32_t i = 0; i < s.lod_count; i++) {
  543. RS::SurfaceData::LOD lod;
  544. lod.edge_length = s.lods[i].edge_length;
  545. lod.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.lods[i].index_buffer, s.lods[i].index_buffer_size);
  546. sd.lods.push_back(lod);
  547. }
  548. sd.bone_aabbs = s.bone_aabbs;
  549. sd.mesh_to_skeleton_xform = s.mesh_to_skeleton_xform;
  550. if (mesh->blend_shape_count) {
  551. sd.blend_shape_data = Vector<uint8_t>();
  552. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  553. sd.blend_shape_data.append_array(Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.blend_shapes[i].vertex_buffer, s.vertex_buffer_size));
  554. }
  555. }
  556. sd.uv_scale = s.uv_scale;
  557. return sd;
  558. }
  559. int MeshStorage::mesh_get_surface_count(RID p_mesh) const {
  560. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  561. ERR_FAIL_NULL_V(mesh, 0);
  562. return mesh->surface_count;
  563. }
  564. void MeshStorage::mesh_set_custom_aabb(RID p_mesh, const AABB &p_aabb) {
  565. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  566. ERR_FAIL_NULL(mesh);
  567. mesh->custom_aabb = p_aabb;
  568. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  569. }
  570. AABB MeshStorage::mesh_get_custom_aabb(RID p_mesh) const {
  571. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  572. ERR_FAIL_NULL_V(mesh, AABB());
  573. return mesh->custom_aabb;
  574. }
  575. AABB MeshStorage::mesh_get_aabb(RID p_mesh, RID p_skeleton) {
  576. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  577. ERR_FAIL_NULL_V(mesh, AABB());
  578. if (mesh->custom_aabb != AABB()) {
  579. return mesh->custom_aabb;
  580. }
  581. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  582. if (!skeleton || skeleton->size == 0 || mesh->skeleton_aabb_version == skeleton->version) {
  583. return mesh->aabb;
  584. }
  585. // Calculate AABB based on Skeleton
  586. AABB aabb;
  587. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  588. AABB laabb;
  589. const Mesh::Surface &surface = *mesh->surfaces[i];
  590. if ((surface.format & RS::ARRAY_FORMAT_BONES) && surface.bone_aabbs.size()) {
  591. int bs = surface.bone_aabbs.size();
  592. const AABB *skbones = surface.bone_aabbs.ptr();
  593. int sbs = skeleton->size;
  594. ERR_CONTINUE(bs > sbs);
  595. const float *baseptr = skeleton->data.ptr();
  596. bool found_bone_aabb = false;
  597. if (skeleton->use_2d) {
  598. for (int j = 0; j < bs; j++) {
  599. if (skbones[j].size == Vector3(-1, -1, -1)) {
  600. continue; //bone is unused
  601. }
  602. const float *dataptr = baseptr + j * 8;
  603. Transform3D mtx;
  604. mtx.basis.rows[0][0] = dataptr[0];
  605. mtx.basis.rows[0][1] = dataptr[1];
  606. mtx.origin.x = dataptr[3];
  607. mtx.basis.rows[1][0] = dataptr[4];
  608. mtx.basis.rows[1][1] = dataptr[5];
  609. mtx.origin.y = dataptr[7];
  610. // Transform bounds to skeleton's space before applying animation data.
  611. AABB baabb = surface.mesh_to_skeleton_xform.xform(skbones[j]);
  612. baabb = mtx.xform(baabb);
  613. if (!found_bone_aabb) {
  614. laabb = baabb;
  615. found_bone_aabb = true;
  616. } else {
  617. laabb.merge_with(baabb);
  618. }
  619. }
  620. } else {
  621. for (int j = 0; j < bs; j++) {
  622. if (skbones[j].size == Vector3(-1, -1, -1)) {
  623. continue; //bone is unused
  624. }
  625. const float *dataptr = baseptr + j * 12;
  626. Transform3D mtx;
  627. mtx.basis.rows[0][0] = dataptr[0];
  628. mtx.basis.rows[0][1] = dataptr[1];
  629. mtx.basis.rows[0][2] = dataptr[2];
  630. mtx.origin.x = dataptr[3];
  631. mtx.basis.rows[1][0] = dataptr[4];
  632. mtx.basis.rows[1][1] = dataptr[5];
  633. mtx.basis.rows[1][2] = dataptr[6];
  634. mtx.origin.y = dataptr[7];
  635. mtx.basis.rows[2][0] = dataptr[8];
  636. mtx.basis.rows[2][1] = dataptr[9];
  637. mtx.basis.rows[2][2] = dataptr[10];
  638. mtx.origin.z = dataptr[11];
  639. // Transform bounds to skeleton's space before applying animation data.
  640. AABB baabb = surface.mesh_to_skeleton_xform.xform(skbones[j]);
  641. baabb = mtx.xform(baabb);
  642. if (!found_bone_aabb) {
  643. laabb = baabb;
  644. found_bone_aabb = true;
  645. } else {
  646. laabb.merge_with(baabb);
  647. }
  648. }
  649. }
  650. if (found_bone_aabb) {
  651. // Transform skeleton bounds back to mesh's space if any animated AABB applied.
  652. laabb = surface.mesh_to_skeleton_xform.affine_inverse().xform(laabb);
  653. }
  654. if (laabb.size == Vector3()) {
  655. laabb = surface.aabb;
  656. }
  657. } else {
  658. laabb = surface.aabb;
  659. }
  660. if (i == 0) {
  661. aabb = laabb;
  662. } else {
  663. aabb.merge_with(laabb);
  664. }
  665. }
  666. mesh->aabb = aabb;
  667. mesh->skeleton_aabb_version = skeleton->version;
  668. return aabb;
  669. }
  670. void MeshStorage::mesh_set_path(RID p_mesh, const String &p_path) {
  671. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  672. ERR_FAIL_NULL(mesh);
  673. mesh->path = p_path;
  674. }
  675. String MeshStorage::mesh_get_path(RID p_mesh) const {
  676. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  677. ERR_FAIL_NULL_V(mesh, String());
  678. return mesh->path;
  679. }
  680. void MeshStorage::mesh_set_shadow_mesh(RID p_mesh, RID p_shadow_mesh) {
  681. ERR_FAIL_COND_MSG(p_mesh == p_shadow_mesh, "Cannot set a mesh as its own shadow mesh.");
  682. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  683. ERR_FAIL_NULL(mesh);
  684. Mesh *shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  685. if (shadow_mesh) {
  686. shadow_mesh->shadow_owners.erase(mesh);
  687. }
  688. mesh->shadow_mesh = p_shadow_mesh;
  689. shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  690. if (shadow_mesh) {
  691. shadow_mesh->shadow_owners.insert(mesh);
  692. }
  693. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  694. }
  695. void MeshStorage::mesh_clear(RID p_mesh) {
  696. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  697. ERR_FAIL_NULL(mesh);
  698. // Clear instance data before mesh data.
  699. for (MeshInstance *mi : mesh->instances) {
  700. _mesh_instance_clear(mi);
  701. }
  702. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  703. _mesh_surface_clear(mesh, i);
  704. }
  705. if (mesh->surfaces) {
  706. memfree(mesh->surfaces);
  707. }
  708. mesh->surfaces = nullptr;
  709. mesh->surface_count = 0;
  710. mesh->material_cache.clear();
  711. mesh->has_bone_weights = false;
  712. mesh->aabb = AABB();
  713. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  714. for (Mesh *E : mesh->shadow_owners) {
  715. Mesh *shadow_owner = E;
  716. shadow_owner->shadow_mesh = RID();
  717. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  718. }
  719. }
  720. void MeshStorage::_mesh_surface_generate_version_for_input_mask(Mesh::Surface::Version &v, Mesh::Surface *s, uint64_t p_input_mask, MeshInstance::Surface *mis) {
  721. Mesh::Surface::Attrib attribs[RS::ARRAY_MAX];
  722. int position_stride = 0; // Vertex position only.
  723. int normal_tangent_stride = 0;
  724. int attributes_stride = 0;
  725. int skin_stride = 0;
  726. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  727. attribs[i].enabled = false;
  728. attribs[i].integer = false;
  729. if (!(s->format & (1ULL << i))) {
  730. continue;
  731. }
  732. if ((p_input_mask & (1ULL << i))) {
  733. // Only enable if it matches input mask.
  734. // Iterate over all anyway, so we can calculate stride.
  735. attribs[i].enabled = true;
  736. }
  737. switch (i) {
  738. case RS::ARRAY_VERTEX: {
  739. attribs[i].offset = 0;
  740. attribs[i].type = GL_FLOAT;
  741. attribs[i].normalized = GL_FALSE;
  742. if (s->format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  743. attribs[i].size = 2;
  744. position_stride = attribs[i].size * sizeof(float);
  745. } else {
  746. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  747. attribs[i].size = 4;
  748. position_stride = attribs[i].size * sizeof(uint16_t);
  749. attribs[i].type = GL_UNSIGNED_SHORT;
  750. attribs[i].normalized = GL_TRUE;
  751. } else {
  752. attribs[i].size = 3;
  753. position_stride = attribs[i].size * sizeof(float);
  754. }
  755. }
  756. } break;
  757. case RS::ARRAY_NORMAL: {
  758. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  759. attribs[i].size = 2;
  760. normal_tangent_stride += 2 * attribs[i].size;
  761. } else {
  762. attribs[i].size = 4;
  763. // A small trick here: if we are uncompressed and we have normals, but no tangents. We need
  764. // the shader to think there are 4 components to "axis_tangent_attrib". So we give a size of 4,
  765. // but a stride based on only having 2 elements.
  766. if (!(s->format & RS::ARRAY_FORMAT_TANGENT)) {
  767. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 2;
  768. } else {
  769. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 4;
  770. }
  771. }
  772. if (mis) {
  773. // Transform feedback has interleave all or no attributes. It can't mix interleaving.
  774. attribs[i].offset = position_stride;
  775. normal_tangent_stride += position_stride;
  776. position_stride = normal_tangent_stride;
  777. } else {
  778. attribs[i].offset = position_stride * s->vertex_count;
  779. }
  780. attribs[i].type = (mis ? GL_FLOAT : GL_UNSIGNED_SHORT);
  781. attribs[i].normalized = GL_TRUE;
  782. } break;
  783. case RS::ARRAY_TANGENT: {
  784. // We never use the tangent attribute. It is always packed in ARRAY_NORMAL, or ARRAY_VERTEX.
  785. attribs[i].enabled = false;
  786. attribs[i].integer = false;
  787. } break;
  788. case RS::ARRAY_COLOR: {
  789. attribs[i].offset = attributes_stride;
  790. attribs[i].size = 4;
  791. attribs[i].type = GL_UNSIGNED_BYTE;
  792. attributes_stride += 4;
  793. attribs[i].normalized = GL_TRUE;
  794. } break;
  795. case RS::ARRAY_TEX_UV: {
  796. attribs[i].offset = attributes_stride;
  797. attribs[i].size = 2;
  798. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  799. attribs[i].type = GL_UNSIGNED_SHORT;
  800. attributes_stride += 2 * sizeof(uint16_t);
  801. attribs[i].normalized = GL_TRUE;
  802. } else {
  803. attribs[i].type = GL_FLOAT;
  804. attributes_stride += 2 * sizeof(float);
  805. attribs[i].normalized = GL_FALSE;
  806. }
  807. } break;
  808. case RS::ARRAY_TEX_UV2: {
  809. attribs[i].offset = attributes_stride;
  810. attribs[i].size = 2;
  811. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  812. attribs[i].type = GL_UNSIGNED_SHORT;
  813. attributes_stride += 2 * sizeof(uint16_t);
  814. attribs[i].normalized = GL_TRUE;
  815. } else {
  816. attribs[i].type = GL_FLOAT;
  817. attributes_stride += 2 * sizeof(float);
  818. attribs[i].normalized = GL_FALSE;
  819. }
  820. } break;
  821. case RS::ARRAY_CUSTOM0:
  822. case RS::ARRAY_CUSTOM1:
  823. case RS::ARRAY_CUSTOM2:
  824. case RS::ARRAY_CUSTOM3: {
  825. attribs[i].offset = attributes_stride;
  826. int idx = i - RS::ARRAY_CUSTOM0;
  827. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  828. uint32_t fmt = (s->format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  829. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  830. GLenum gl_type[RS::ARRAY_CUSTOM_MAX] = { GL_UNSIGNED_BYTE, GL_BYTE, GL_HALF_FLOAT, GL_HALF_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT };
  831. GLboolean norm[RS::ARRAY_CUSTOM_MAX] = { GL_TRUE, GL_TRUE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE };
  832. attribs[i].type = gl_type[fmt];
  833. attributes_stride += fmtsize[fmt];
  834. attribs[i].size = fmtsize[fmt] / sizeof(float);
  835. attribs[i].normalized = norm[fmt];
  836. } break;
  837. case RS::ARRAY_BONES: {
  838. attribs[i].offset = skin_stride;
  839. attribs[i].size = 4;
  840. attribs[i].type = GL_UNSIGNED_SHORT;
  841. skin_stride += 4 * sizeof(uint16_t);
  842. attribs[i].normalized = GL_FALSE;
  843. attribs[i].integer = true;
  844. } break;
  845. case RS::ARRAY_WEIGHTS: {
  846. attribs[i].offset = skin_stride;
  847. attribs[i].size = 4;
  848. attribs[i].type = GL_UNSIGNED_SHORT;
  849. skin_stride += 4 * sizeof(uint16_t);
  850. attribs[i].normalized = GL_TRUE;
  851. } break;
  852. }
  853. }
  854. glGenVertexArrays(1, &v.vertex_array);
  855. glBindVertexArray(v.vertex_array);
  856. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  857. if (!attribs[i].enabled) {
  858. glDisableVertexAttribArray(i);
  859. continue;
  860. }
  861. if (i <= RS::ARRAY_TANGENT) {
  862. attribs[i].stride = (i == RS::ARRAY_VERTEX) ? position_stride : normal_tangent_stride;
  863. if (mis) {
  864. glBindBuffer(GL_ARRAY_BUFFER, mis->vertex_buffer);
  865. } else {
  866. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  867. }
  868. } else if (i <= RS::ARRAY_CUSTOM3) {
  869. attribs[i].stride = attributes_stride;
  870. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  871. } else {
  872. attribs[i].stride = skin_stride;
  873. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  874. }
  875. if (attribs[i].integer) {
  876. glVertexAttribIPointer(i, attribs[i].size, attribs[i].type, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  877. } else {
  878. glVertexAttribPointer(i, attribs[i].size, attribs[i].type, attribs[i].normalized, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  879. }
  880. glEnableVertexAttribArray(i);
  881. }
  882. // Do not bind index here as we want to switch between index buffers for LOD
  883. glBindVertexArray(0);
  884. glBindBuffer(GL_ARRAY_BUFFER, 0);
  885. v.input_mask = p_input_mask;
  886. }
  887. void MeshStorage::mesh_surface_remove(RID p_mesh, int p_surface) {
  888. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  889. ERR_FAIL_NULL(mesh);
  890. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  891. // Clear instance data before mesh data.
  892. for (MeshInstance *mi : mesh->instances) {
  893. _mesh_instance_remove_surface(mi, p_surface);
  894. }
  895. _mesh_surface_clear(mesh, p_surface);
  896. if ((uint32_t)p_surface < mesh->surface_count - 1) {
  897. memmove(mesh->surfaces + p_surface, mesh->surfaces + p_surface + 1, sizeof(Mesh::Surface *) * (mesh->surface_count - (p_surface + 1)));
  898. }
  899. mesh->surfaces = (Mesh::Surface **)memrealloc(mesh->surfaces, sizeof(Mesh::Surface *) * (mesh->surface_count - 1));
  900. --mesh->surface_count;
  901. mesh->material_cache.clear();
  902. mesh->skeleton_aabb_version = 0;
  903. if (mesh->has_bone_weights) {
  904. mesh->has_bone_weights = false;
  905. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  906. if (mesh->surfaces[i]->format & RS::ARRAY_FORMAT_BONES) {
  907. mesh->has_bone_weights = true;
  908. break;
  909. }
  910. }
  911. }
  912. if (mesh->surface_count == 0) {
  913. mesh->aabb = AABB();
  914. } else {
  915. mesh->aabb = mesh->surfaces[0]->aabb;
  916. for (uint32_t i = 1; i < mesh->surface_count; i++) {
  917. mesh->aabb.merge_with(mesh->surfaces[i]->aabb);
  918. }
  919. }
  920. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  921. for (Mesh *E : mesh->shadow_owners) {
  922. Mesh *shadow_owner = E;
  923. shadow_owner->shadow_mesh = RID();
  924. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  925. }
  926. }
  927. /* MESH INSTANCE API */
  928. RID MeshStorage::mesh_instance_create(RID p_base) {
  929. Mesh *mesh = mesh_owner.get_or_null(p_base);
  930. ERR_FAIL_NULL_V(mesh, RID());
  931. RID rid = mesh_instance_owner.make_rid();
  932. MeshInstance *mi = mesh_instance_owner.get_or_null(rid);
  933. mi->mesh = mesh;
  934. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  935. _mesh_instance_add_surface(mi, mesh, i);
  936. }
  937. mi->I = mesh->instances.push_back(mi);
  938. mi->dirty = true;
  939. return rid;
  940. }
  941. void MeshStorage::mesh_instance_free(RID p_rid) {
  942. MeshInstance *mi = mesh_instance_owner.get_or_null(p_rid);
  943. _mesh_instance_clear(mi);
  944. mi->mesh->instances.erase(mi->I);
  945. mi->I = nullptr;
  946. mesh_instance_owner.free(p_rid);
  947. }
  948. void MeshStorage::mesh_instance_set_skeleton(RID p_mesh_instance, RID p_skeleton) {
  949. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  950. if (mi->skeleton == p_skeleton) {
  951. return;
  952. }
  953. mi->skeleton = p_skeleton;
  954. mi->skeleton_version = 0;
  955. mi->dirty = true;
  956. }
  957. void MeshStorage::mesh_instance_set_blend_shape_weight(RID p_mesh_instance, int p_shape, float p_weight) {
  958. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  959. ERR_FAIL_NULL(mi);
  960. ERR_FAIL_INDEX(p_shape, (int)mi->blend_weights.size());
  961. mi->blend_weights[p_shape] = p_weight;
  962. mi->dirty = true;
  963. }
  964. void MeshStorage::_mesh_instance_clear(MeshInstance *mi) {
  965. while (mi->surfaces.size()) {
  966. _mesh_instance_remove_surface(mi, mi->surfaces.size() - 1);
  967. }
  968. mi->dirty = false;
  969. }
  970. void MeshStorage::_mesh_instance_add_surface(MeshInstance *mi, Mesh *mesh, uint32_t p_surface) {
  971. if (mesh->blend_shape_count > 0) {
  972. mi->blend_weights.resize(mesh->blend_shape_count);
  973. for (uint32_t i = 0; i < mi->blend_weights.size(); i++) {
  974. mi->blend_weights[i] = 0.0;
  975. }
  976. }
  977. MeshInstance::Surface s;
  978. if ((mesh->blend_shape_count > 0 || (mesh->surfaces[p_surface]->format & RS::ARRAY_FORMAT_BONES)) && mesh->surfaces[p_surface]->vertex_buffer_size > 0) {
  979. // Cache surface properties
  980. s.format_cache = mesh->surfaces[p_surface]->format;
  981. if ((s.format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  982. if (s.format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  983. s.vertex_size_cache = 2;
  984. } else {
  985. s.vertex_size_cache = 3;
  986. }
  987. s.vertex_stride_cache = sizeof(float) * s.vertex_size_cache;
  988. }
  989. if ((s.format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  990. s.vertex_normal_offset_cache = s.vertex_stride_cache;
  991. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  992. }
  993. if ((s.format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  994. s.vertex_tangent_offset_cache = s.vertex_stride_cache;
  995. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  996. }
  997. int buffer_size = s.vertex_stride_cache * mesh->surfaces[p_surface]->vertex_count;
  998. // Buffer to be used for rendering. Final output of skeleton and blend shapes.
  999. glGenBuffers(1, &s.vertex_buffer);
  1000. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffer);
  1001. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffer, buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance vertex buffer");
  1002. if (mesh->blend_shape_count > 0) {
  1003. // Ping-Pong buffers for processing blendshapes.
  1004. glGenBuffers(2, s.vertex_buffers);
  1005. for (uint32_t i = 0; i < 2; i++) {
  1006. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffers[i]);
  1007. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffers[i], buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance process buffer[" + itos(i) + "]");
  1008. }
  1009. }
  1010. glBindBuffer(GL_ARRAY_BUFFER, 0); //unbind
  1011. }
  1012. mi->surfaces.push_back(s);
  1013. mi->dirty = true;
  1014. }
  1015. void MeshStorage::_mesh_instance_remove_surface(MeshInstance *mi, int p_surface) {
  1016. MeshInstance::Surface &surface = mi->surfaces[p_surface];
  1017. if (surface.version_count != 0) {
  1018. for (uint32_t j = 0; j < surface.version_count; j++) {
  1019. glDeleteVertexArrays(1, &surface.versions[j].vertex_array);
  1020. surface.versions[j].vertex_array = 0;
  1021. }
  1022. memfree(surface.versions);
  1023. }
  1024. if (surface.vertex_buffers[0] != 0) {
  1025. GLES3::Utilities::get_singleton()->buffer_free_data(surface.vertex_buffers[0]);
  1026. GLES3::Utilities::get_singleton()->buffer_free_data(surface.vertex_buffers[1]);
  1027. surface.vertex_buffers[0] = 0;
  1028. surface.vertex_buffers[1] = 0;
  1029. }
  1030. if (surface.vertex_buffer != 0) {
  1031. GLES3::Utilities::get_singleton()->buffer_free_data(surface.vertex_buffer);
  1032. surface.vertex_buffer = 0;
  1033. }
  1034. mi->surfaces.remove_at(p_surface);
  1035. if (mi->surfaces.is_empty()) {
  1036. mi->blend_weights.clear();
  1037. mi->weights_dirty = false;
  1038. mi->skeleton_version = 0;
  1039. }
  1040. mi->dirty = true;
  1041. }
  1042. void MeshStorage::mesh_instance_check_for_update(RID p_mesh_instance) {
  1043. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  1044. bool needs_update = mi->dirty;
  1045. if (mi->array_update_list.in_list()) {
  1046. return;
  1047. }
  1048. if (!needs_update && mi->skeleton.is_valid()) {
  1049. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  1050. if (sk && sk->version != mi->skeleton_version) {
  1051. needs_update = true;
  1052. }
  1053. }
  1054. if (needs_update) {
  1055. dirty_mesh_instance_arrays.add(&mi->array_update_list);
  1056. }
  1057. }
  1058. void MeshStorage::mesh_instance_set_canvas_item_transform(RID p_mesh_instance, const Transform2D &p_transform) {
  1059. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  1060. mi->canvas_item_transform_2d = p_transform;
  1061. }
  1062. void MeshStorage::_blend_shape_bind_mesh_instance_buffer(MeshInstance *p_mi, uint32_t p_surface) {
  1063. glBindBuffer(GL_ARRAY_BUFFER, p_mi->surfaces[p_surface].vertex_buffers[0]);
  1064. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  1065. glEnableVertexAttribArray(RS::ARRAY_VERTEX);
  1066. glVertexAttribPointer(RS::ARRAY_VERTEX, p_mi->surfaces[p_surface].vertex_size_cache, GL_FLOAT, GL_FALSE, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(0));
  1067. } else {
  1068. glDisableVertexAttribArray(RS::ARRAY_VERTEX);
  1069. }
  1070. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1071. glEnableVertexAttribArray(RS::ARRAY_NORMAL);
  1072. glVertexAttribIPointer(RS::ARRAY_NORMAL, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_normal_offset_cache));
  1073. } else {
  1074. glDisableVertexAttribArray(RS::ARRAY_NORMAL);
  1075. }
  1076. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1077. glEnableVertexAttribArray(RS::ARRAY_TANGENT);
  1078. glVertexAttribIPointer(RS::ARRAY_TANGENT, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_tangent_offset_cache));
  1079. } else {
  1080. glDisableVertexAttribArray(RS::ARRAY_TANGENT);
  1081. }
  1082. }
  1083. void MeshStorage::_compute_skeleton(MeshInstance *p_mi, Skeleton *p_sk, uint32_t p_surface) {
  1084. // Add in the bones and weights.
  1085. glBindBuffer(GL_ARRAY_BUFFER, p_mi->mesh->surfaces[p_surface]->skin_buffer);
  1086. bool use_8_weights = p_mi->surfaces[p_surface].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  1087. int skin_stride = sizeof(int16_t) * (use_8_weights ? 16 : 8);
  1088. glEnableVertexAttribArray(RS::ARRAY_BONES);
  1089. glVertexAttribIPointer(RS::ARRAY_BONES, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(0));
  1090. if (use_8_weights) {
  1091. glEnableVertexAttribArray(11);
  1092. glVertexAttribIPointer(11, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  1093. glEnableVertexAttribArray(12);
  1094. glVertexAttribPointer(12, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(8 * sizeof(uint16_t)));
  1095. glEnableVertexAttribArray(13);
  1096. glVertexAttribPointer(13, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(12 * sizeof(uint16_t)));
  1097. } else {
  1098. glEnableVertexAttribArray(RS::ARRAY_WEIGHTS);
  1099. glVertexAttribPointer(RS::ARRAY_WEIGHTS, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  1100. }
  1101. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, p_mi->surfaces[p_surface].vertex_buffer);
  1102. glActiveTexture(GL_TEXTURE0);
  1103. glBindTexture(GL_TEXTURE_2D, p_sk->transforms_texture);
  1104. glBeginTransformFeedback(GL_POINTS);
  1105. glDrawArrays(GL_POINTS, 0, p_mi->mesh->surfaces[p_surface]->vertex_count);
  1106. glEndTransformFeedback();
  1107. glDisableVertexAttribArray(RS::ARRAY_BONES);
  1108. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS);
  1109. glDisableVertexAttribArray(RS::ARRAY_BONES + 2);
  1110. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS + 2);
  1111. glBindVertexArray(0);
  1112. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  1113. }
  1114. void MeshStorage::update_mesh_instances() {
  1115. if (dirty_mesh_instance_arrays.first() == nullptr) {
  1116. return; //nothing to do
  1117. }
  1118. glEnable(GL_RASTERIZER_DISCARD);
  1119. glBindFramebuffer(GL_FRAMEBUFFER, GLES3::TextureStorage::system_fbo);
  1120. // Process skeletons and blend shapes using transform feedback
  1121. while (dirty_mesh_instance_arrays.first()) {
  1122. MeshInstance *mi = dirty_mesh_instance_arrays.first()->self();
  1123. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  1124. // Precompute base weight if using blend shapes.
  1125. float base_weight = 1.0;
  1126. if (mi->surfaces.size() && mi->mesh->blend_shape_count && mi->mesh->blend_shape_mode == RS::BLEND_SHAPE_MODE_NORMALIZED) {
  1127. for (uint32_t i = 0; i < mi->mesh->blend_shape_count; i++) {
  1128. base_weight -= mi->blend_weights[i];
  1129. }
  1130. }
  1131. for (uint32_t i = 0; i < mi->surfaces.size(); i++) {
  1132. if (mi->surfaces[i].vertex_buffer == 0) {
  1133. continue;
  1134. }
  1135. bool array_is_2d = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES;
  1136. bool can_use_skeleton = sk != nullptr && sk->use_2d == array_is_2d && (mi->surfaces[i].format_cache & RS::ARRAY_FORMAT_BONES);
  1137. bool use_8_weights = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  1138. // Always process blend shapes first.
  1139. if (mi->mesh->blend_shape_count) {
  1140. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  1141. uint64_t specialization = 0;
  1142. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  1143. specialization |= SkeletonShaderGLES3::USE_BLEND_SHAPES;
  1144. if (!array_is_2d) {
  1145. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1146. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  1147. }
  1148. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1149. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1150. }
  1151. }
  1152. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1153. if (!success) {
  1154. continue;
  1155. }
  1156. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, base_weight, skeleton_shader.shader_version, variant, specialization);
  1157. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1158. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1159. GLuint vertex_array_gl = 0;
  1160. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1161. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1162. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1163. glBindVertexArray(vertex_array_gl);
  1164. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[0]);
  1165. glBeginTransformFeedback(GL_POINTS);
  1166. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1167. glEndTransformFeedback();
  1168. variant = SkeletonShaderGLES3::MODE_BLEND_PASS;
  1169. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1170. if (!success) {
  1171. continue;
  1172. }
  1173. //Do the last blend shape separately, as it can be combined with the skeleton pass.
  1174. for (uint32_t bs = 0; bs < mi->mesh->blend_shape_count - 1; bs++) {
  1175. float weight = mi->blend_weights[bs];
  1176. if (Math::is_zero_approx(weight)) {
  1177. //not bother with this one
  1178. continue;
  1179. }
  1180. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1181. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1182. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1183. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1184. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[1]);
  1185. glBeginTransformFeedback(GL_POINTS);
  1186. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1187. glEndTransformFeedback();
  1188. SWAP(mi->surfaces[i].vertex_buffers[0], mi->surfaces[i].vertex_buffers[1]);
  1189. }
  1190. uint32_t bs = mi->mesh->blend_shape_count - 1;
  1191. float weight = mi->blend_weights[bs];
  1192. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1193. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1194. specialization |= can_use_skeleton ? SkeletonShaderGLES3::USE_SKELETON : 0;
  1195. specialization |= (can_use_skeleton && use_8_weights) ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1196. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1197. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1198. if (!success) {
  1199. continue;
  1200. }
  1201. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1202. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1203. if (can_use_skeleton) {
  1204. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1205. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1206. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1207. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1208. Transform2D inverse_transform = transform.affine_inverse();
  1209. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1210. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1211. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1212. // Do last blendshape in the same pass as the Skeleton.
  1213. _compute_skeleton(mi, sk, i);
  1214. can_use_skeleton = false;
  1215. } else {
  1216. // Do last blendshape by itself and prepare vertex data for use by the renderer.
  1217. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffer);
  1218. glBeginTransformFeedback(GL_POINTS);
  1219. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1220. glEndTransformFeedback();
  1221. }
  1222. glBindVertexArray(0);
  1223. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  1224. }
  1225. // This branch should only execute when Skeleton is run by itself.
  1226. if (can_use_skeleton) {
  1227. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  1228. uint64_t specialization = 0;
  1229. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  1230. specialization |= SkeletonShaderGLES3::USE_SKELETON;
  1231. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1232. specialization |= use_8_weights ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1233. if (!array_is_2d) {
  1234. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1235. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  1236. }
  1237. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1238. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1239. }
  1240. }
  1241. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1242. if (!success) {
  1243. continue;
  1244. }
  1245. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1246. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1247. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1248. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1249. Transform2D inverse_transform = transform.affine_inverse();
  1250. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1251. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1252. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1253. GLuint vertex_array_gl = 0;
  1254. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1255. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1256. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1257. glBindVertexArray(vertex_array_gl);
  1258. _compute_skeleton(mi, sk, i);
  1259. }
  1260. }
  1261. mi->dirty = false;
  1262. if (sk) {
  1263. mi->skeleton_version = sk->version;
  1264. }
  1265. dirty_mesh_instance_arrays.remove(&mi->array_update_list);
  1266. }
  1267. glDisable(GL_RASTERIZER_DISCARD);
  1268. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1269. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0);
  1270. }
  1271. /* MULTIMESH API */
  1272. RID MeshStorage::_multimesh_allocate() {
  1273. return multimesh_owner.allocate_rid();
  1274. }
  1275. void MeshStorage::_multimesh_initialize(RID p_rid) {
  1276. multimesh_owner.initialize_rid(p_rid, MultiMesh());
  1277. }
  1278. void MeshStorage::_multimesh_free(RID p_rid) {
  1279. // Remove from interpolator.
  1280. _interpolation_data.notify_free_multimesh(p_rid);
  1281. _update_dirty_multimeshes();
  1282. multimesh_allocate_data(p_rid, 0, RS::MULTIMESH_TRANSFORM_2D);
  1283. MultiMesh *multimesh = multimesh_owner.get_or_null(p_rid);
  1284. multimesh->dependency.deleted_notify(p_rid);
  1285. multimesh_owner.free(p_rid);
  1286. }
  1287. void MeshStorage::_multimesh_allocate_data(RID p_multimesh, int p_instances, RS::MultimeshTransformFormat p_transform_format, bool p_use_colors, bool p_use_custom_data, bool p_use_indirect) {
  1288. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1289. ERR_FAIL_NULL(multimesh);
  1290. if (multimesh->instances == p_instances && multimesh->xform_format == p_transform_format && multimesh->uses_colors == p_use_colors && multimesh->uses_custom_data == p_use_custom_data) {
  1291. return;
  1292. }
  1293. if (multimesh->buffer) {
  1294. GLES3::Utilities::get_singleton()->buffer_free_data(multimesh->buffer);
  1295. multimesh->buffer = 0;
  1296. }
  1297. if (multimesh->data_cache_dirty_regions) {
  1298. memdelete_arr(multimesh->data_cache_dirty_regions);
  1299. multimesh->data_cache_dirty_regions = nullptr;
  1300. multimesh->data_cache_used_dirty_regions = 0;
  1301. }
  1302. // If we have either color or custom data, reserve space for both to make data handling logic simpler.
  1303. // This way we can always treat them both as a single, compressed uvec4.
  1304. int color_and_custom_strides = (p_use_colors || p_use_custom_data) ? 2 : 0;
  1305. multimesh->instances = p_instances;
  1306. multimesh->xform_format = p_transform_format;
  1307. multimesh->uses_colors = p_use_colors;
  1308. multimesh->color_offset_cache = p_transform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1309. multimesh->uses_custom_data = p_use_custom_data;
  1310. multimesh->custom_data_offset_cache = multimesh->color_offset_cache + color_and_custom_strides;
  1311. multimesh->stride_cache = multimesh->custom_data_offset_cache + color_and_custom_strides;
  1312. multimesh->buffer_set = false;
  1313. multimesh->data_cache = Vector<float>();
  1314. multimesh->aabb = AABB();
  1315. multimesh->aabb_dirty = false;
  1316. multimesh->visible_instances = MIN(multimesh->visible_instances, multimesh->instances);
  1317. if (multimesh->instances) {
  1318. glGenBuffers(1, &multimesh->buffer);
  1319. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1320. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float), nullptr, GL_STATIC_DRAW, "MultiMesh buffer");
  1321. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1322. }
  1323. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH);
  1324. }
  1325. int MeshStorage::_multimesh_get_instance_count(RID p_multimesh) const {
  1326. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1327. ERR_FAIL_NULL_V(multimesh, 0);
  1328. return multimesh->instances;
  1329. }
  1330. void MeshStorage::_multimesh_set_mesh(RID p_multimesh, RID p_mesh) {
  1331. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1332. ERR_FAIL_NULL(multimesh);
  1333. if (multimesh->mesh == p_mesh || p_mesh.is_null()) {
  1334. return;
  1335. }
  1336. multimesh->mesh = p_mesh;
  1337. if (multimesh->instances == 0) {
  1338. return;
  1339. }
  1340. if (multimesh->data_cache.size()) {
  1341. //we have a data cache, just mark it dirty
  1342. _multimesh_mark_all_dirty(multimesh, false, true);
  1343. } else if (multimesh->instances) {
  1344. // Need to re-create AABB. Unfortunately, calling this has a penalty.
  1345. if (multimesh->buffer_set) {
  1346. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1347. const uint8_t *r = buffer.ptr();
  1348. const float *data = (const float *)r;
  1349. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1350. }
  1351. }
  1352. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  1353. }
  1354. #define MULTIMESH_DIRTY_REGION_SIZE 512
  1355. void MeshStorage::_multimesh_make_local(MultiMesh *multimesh) const {
  1356. if (multimesh->data_cache.size() > 0 || multimesh->instances == 0) {
  1357. return; //already local
  1358. }
  1359. ERR_FAIL_COND(multimesh->data_cache.size() > 0);
  1360. // this means that the user wants to load/save individual elements,
  1361. // for this, the data must reside on CPU, so just copy it there.
  1362. multimesh->data_cache.resize(multimesh->instances * multimesh->stride_cache);
  1363. {
  1364. float *w = multimesh->data_cache.ptrw();
  1365. if (multimesh->buffer_set) {
  1366. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1367. {
  1368. const uint8_t *r = buffer.ptr();
  1369. memcpy(w, r, buffer.size());
  1370. }
  1371. } else {
  1372. memset(w, 0, (size_t)multimesh->instances * multimesh->stride_cache * sizeof(float));
  1373. }
  1374. }
  1375. uint32_t data_cache_dirty_region_count = Math::division_round_up(multimesh->instances, MULTIMESH_DIRTY_REGION_SIZE);
  1376. multimesh->data_cache_dirty_regions = memnew_arr(bool, data_cache_dirty_region_count);
  1377. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1378. multimesh->data_cache_dirty_regions[i] = false;
  1379. }
  1380. multimesh->data_cache_used_dirty_regions = 0;
  1381. }
  1382. void MeshStorage::_multimesh_mark_dirty(MultiMesh *multimesh, int p_index, bool p_aabb) {
  1383. uint32_t region_index = p_index / MULTIMESH_DIRTY_REGION_SIZE;
  1384. #ifdef DEBUG_ENABLED
  1385. uint32_t data_cache_dirty_region_count = Math::division_round_up(multimesh->instances, MULTIMESH_DIRTY_REGION_SIZE);
  1386. ERR_FAIL_UNSIGNED_INDEX(region_index, data_cache_dirty_region_count); //bug
  1387. #endif
  1388. if (!multimesh->data_cache_dirty_regions[region_index]) {
  1389. multimesh->data_cache_dirty_regions[region_index] = true;
  1390. multimesh->data_cache_used_dirty_regions++;
  1391. }
  1392. if (p_aabb) {
  1393. multimesh->aabb_dirty = true;
  1394. }
  1395. if (!multimesh->dirty) {
  1396. multimesh->dirty_list = multimesh_dirty_list;
  1397. multimesh_dirty_list = multimesh;
  1398. multimesh->dirty = true;
  1399. }
  1400. }
  1401. void MeshStorage::_multimesh_mark_all_dirty(MultiMesh *multimesh, bool p_data, bool p_aabb) {
  1402. if (p_data) {
  1403. uint32_t data_cache_dirty_region_count = Math::division_round_up(multimesh->instances, MULTIMESH_DIRTY_REGION_SIZE);
  1404. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1405. if (!multimesh->data_cache_dirty_regions[i]) {
  1406. multimesh->data_cache_dirty_regions[i] = true;
  1407. multimesh->data_cache_used_dirty_regions++;
  1408. }
  1409. }
  1410. }
  1411. if (p_aabb) {
  1412. multimesh->aabb_dirty = true;
  1413. }
  1414. if (!multimesh->dirty) {
  1415. multimesh->dirty_list = multimesh_dirty_list;
  1416. multimesh_dirty_list = multimesh;
  1417. multimesh->dirty = true;
  1418. }
  1419. }
  1420. void MeshStorage::_multimesh_re_create_aabb(MultiMesh *multimesh, const float *p_data, int p_instances) {
  1421. ERR_FAIL_COND(multimesh->mesh.is_null());
  1422. if (multimesh->custom_aabb != AABB()) {
  1423. return;
  1424. }
  1425. AABB aabb;
  1426. AABB mesh_aabb = mesh_get_aabb(multimesh->mesh);
  1427. for (int i = 0; i < p_instances; i++) {
  1428. const float *data = p_data + multimesh->stride_cache * i;
  1429. Transform3D t;
  1430. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1431. t.basis.rows[0][0] = data[0];
  1432. t.basis.rows[0][1] = data[1];
  1433. t.basis.rows[0][2] = data[2];
  1434. t.origin.x = data[3];
  1435. t.basis.rows[1][0] = data[4];
  1436. t.basis.rows[1][1] = data[5];
  1437. t.basis.rows[1][2] = data[6];
  1438. t.origin.y = data[7];
  1439. t.basis.rows[2][0] = data[8];
  1440. t.basis.rows[2][1] = data[9];
  1441. t.basis.rows[2][2] = data[10];
  1442. t.origin.z = data[11];
  1443. } else {
  1444. t.basis.rows[0][0] = data[0];
  1445. t.basis.rows[0][1] = data[1];
  1446. t.origin.x = data[3];
  1447. t.basis.rows[1][0] = data[4];
  1448. t.basis.rows[1][1] = data[5];
  1449. t.origin.y = data[7];
  1450. }
  1451. if (i == 0) {
  1452. aabb = t.xform(mesh_aabb);
  1453. } else {
  1454. aabb.merge_with(t.xform(mesh_aabb));
  1455. }
  1456. }
  1457. multimesh->aabb = aabb;
  1458. }
  1459. void MeshStorage::_multimesh_instance_set_transform(RID p_multimesh, int p_index, const Transform3D &p_transform) {
  1460. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1461. ERR_FAIL_NULL(multimesh);
  1462. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1463. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D);
  1464. _multimesh_make_local(multimesh);
  1465. {
  1466. float *w = multimesh->data_cache.ptrw();
  1467. float *dataptr = w + p_index * multimesh->stride_cache;
  1468. dataptr[0] = p_transform.basis.rows[0][0];
  1469. dataptr[1] = p_transform.basis.rows[0][1];
  1470. dataptr[2] = p_transform.basis.rows[0][2];
  1471. dataptr[3] = p_transform.origin.x;
  1472. dataptr[4] = p_transform.basis.rows[1][0];
  1473. dataptr[5] = p_transform.basis.rows[1][1];
  1474. dataptr[6] = p_transform.basis.rows[1][2];
  1475. dataptr[7] = p_transform.origin.y;
  1476. dataptr[8] = p_transform.basis.rows[2][0];
  1477. dataptr[9] = p_transform.basis.rows[2][1];
  1478. dataptr[10] = p_transform.basis.rows[2][2];
  1479. dataptr[11] = p_transform.origin.z;
  1480. }
  1481. _multimesh_mark_dirty(multimesh, p_index, true);
  1482. }
  1483. void MeshStorage::_multimesh_instance_set_transform_2d(RID p_multimesh, int p_index, const Transform2D &p_transform) {
  1484. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1485. ERR_FAIL_NULL(multimesh);
  1486. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1487. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D);
  1488. _multimesh_make_local(multimesh);
  1489. {
  1490. float *w = multimesh->data_cache.ptrw();
  1491. float *dataptr = w + p_index * multimesh->stride_cache;
  1492. dataptr[0] = p_transform.columns[0][0];
  1493. dataptr[1] = p_transform.columns[1][0];
  1494. dataptr[2] = 0;
  1495. dataptr[3] = p_transform.columns[2][0];
  1496. dataptr[4] = p_transform.columns[0][1];
  1497. dataptr[5] = p_transform.columns[1][1];
  1498. dataptr[6] = 0;
  1499. dataptr[7] = p_transform.columns[2][1];
  1500. }
  1501. _multimesh_mark_dirty(multimesh, p_index, true);
  1502. }
  1503. void MeshStorage::_multimesh_instance_set_color(RID p_multimesh, int p_index, const Color &p_color) {
  1504. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1505. ERR_FAIL_NULL(multimesh);
  1506. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1507. ERR_FAIL_COND(!multimesh->uses_colors);
  1508. _multimesh_make_local(multimesh);
  1509. {
  1510. // Colors are packed into 2 floats.
  1511. float *w = multimesh->data_cache.ptrw();
  1512. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1513. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1514. memcpy(dataptr, val, 2 * 4);
  1515. }
  1516. _multimesh_mark_dirty(multimesh, p_index, false);
  1517. }
  1518. void MeshStorage::_multimesh_instance_set_custom_data(RID p_multimesh, int p_index, const Color &p_color) {
  1519. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1520. ERR_FAIL_NULL(multimesh);
  1521. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1522. ERR_FAIL_COND(!multimesh->uses_custom_data);
  1523. _multimesh_make_local(multimesh);
  1524. {
  1525. float *w = multimesh->data_cache.ptrw();
  1526. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1527. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1528. memcpy(dataptr, val, 2 * 4);
  1529. }
  1530. _multimesh_mark_dirty(multimesh, p_index, false);
  1531. }
  1532. RID MeshStorage::_multimesh_get_mesh(RID p_multimesh) const {
  1533. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1534. ERR_FAIL_NULL_V(multimesh, RID());
  1535. return multimesh->mesh;
  1536. }
  1537. void MeshStorage::_multimesh_set_custom_aabb(RID p_multimesh, const AABB &p_aabb) {
  1538. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1539. ERR_FAIL_NULL(multimesh);
  1540. multimesh->custom_aabb = p_aabb;
  1541. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1542. }
  1543. AABB MeshStorage::_multimesh_get_custom_aabb(RID p_multimesh) const {
  1544. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1545. ERR_FAIL_NULL_V(multimesh, AABB());
  1546. return multimesh->custom_aabb;
  1547. }
  1548. AABB MeshStorage::_multimesh_get_aabb(RID p_multimesh) {
  1549. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1550. ERR_FAIL_NULL_V(multimesh, AABB());
  1551. if (multimesh->custom_aabb != AABB()) {
  1552. return multimesh->custom_aabb;
  1553. }
  1554. if (multimesh->aabb_dirty) {
  1555. _update_dirty_multimeshes();
  1556. }
  1557. return multimesh->aabb;
  1558. }
  1559. Transform3D MeshStorage::_multimesh_instance_get_transform(RID p_multimesh, int p_index) const {
  1560. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1561. ERR_FAIL_NULL_V(multimesh, Transform3D());
  1562. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform3D());
  1563. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D, Transform3D());
  1564. _multimesh_make_local(multimesh);
  1565. Transform3D t;
  1566. {
  1567. const float *r = multimesh->data_cache.ptr();
  1568. const float *dataptr = r + p_index * multimesh->stride_cache;
  1569. t.basis.rows[0][0] = dataptr[0];
  1570. t.basis.rows[0][1] = dataptr[1];
  1571. t.basis.rows[0][2] = dataptr[2];
  1572. t.origin.x = dataptr[3];
  1573. t.basis.rows[1][0] = dataptr[4];
  1574. t.basis.rows[1][1] = dataptr[5];
  1575. t.basis.rows[1][2] = dataptr[6];
  1576. t.origin.y = dataptr[7];
  1577. t.basis.rows[2][0] = dataptr[8];
  1578. t.basis.rows[2][1] = dataptr[9];
  1579. t.basis.rows[2][2] = dataptr[10];
  1580. t.origin.z = dataptr[11];
  1581. }
  1582. return t;
  1583. }
  1584. Transform2D MeshStorage::_multimesh_instance_get_transform_2d(RID p_multimesh, int p_index) const {
  1585. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1586. ERR_FAIL_NULL_V(multimesh, Transform2D());
  1587. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform2D());
  1588. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D, Transform2D());
  1589. _multimesh_make_local(multimesh);
  1590. Transform2D t;
  1591. {
  1592. const float *r = multimesh->data_cache.ptr();
  1593. const float *dataptr = r + p_index * multimesh->stride_cache;
  1594. t.columns[0][0] = dataptr[0];
  1595. t.columns[1][0] = dataptr[1];
  1596. t.columns[2][0] = dataptr[3];
  1597. t.columns[0][1] = dataptr[4];
  1598. t.columns[1][1] = dataptr[5];
  1599. t.columns[2][1] = dataptr[7];
  1600. }
  1601. return t;
  1602. }
  1603. Color MeshStorage::_multimesh_instance_get_color(RID p_multimesh, int p_index) const {
  1604. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1605. ERR_FAIL_NULL_V(multimesh, Color());
  1606. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1607. ERR_FAIL_COND_V(!multimesh->uses_colors, Color());
  1608. _multimesh_make_local(multimesh);
  1609. Color c;
  1610. {
  1611. const float *r = multimesh->data_cache.ptr();
  1612. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1613. uint16_t raw_data[4];
  1614. memcpy(raw_data, dataptr, 2 * 4);
  1615. c.r = Math::half_to_float(raw_data[0]);
  1616. c.g = Math::half_to_float(raw_data[1]);
  1617. c.b = Math::half_to_float(raw_data[2]);
  1618. c.a = Math::half_to_float(raw_data[3]);
  1619. }
  1620. return c;
  1621. }
  1622. Color MeshStorage::_multimesh_instance_get_custom_data(RID p_multimesh, int p_index) const {
  1623. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1624. ERR_FAIL_NULL_V(multimesh, Color());
  1625. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1626. ERR_FAIL_COND_V(!multimesh->uses_custom_data, Color());
  1627. _multimesh_make_local(multimesh);
  1628. Color c;
  1629. {
  1630. const float *r = multimesh->data_cache.ptr();
  1631. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1632. uint16_t raw_data[4];
  1633. memcpy(raw_data, dataptr, 2 * 4);
  1634. c.r = Math::half_to_float(raw_data[0]);
  1635. c.g = Math::half_to_float(raw_data[1]);
  1636. c.b = Math::half_to_float(raw_data[2]);
  1637. c.a = Math::half_to_float(raw_data[3]);
  1638. }
  1639. return c;
  1640. }
  1641. void MeshStorage::_multimesh_set_buffer(RID p_multimesh, const Vector<float> &p_buffer) {
  1642. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1643. ERR_FAIL_NULL(multimesh);
  1644. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1645. // Color and custom need to be packed so copy buffer to data_cache and pack.
  1646. _multimesh_make_local(multimesh);
  1647. uint32_t old_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1648. old_stride += multimesh->uses_colors ? 4 : 0;
  1649. old_stride += multimesh->uses_custom_data ? 4 : 0;
  1650. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)old_stride));
  1651. multimesh->data_cache = p_buffer;
  1652. float *w = multimesh->data_cache.ptrw();
  1653. for (int i = 0; i < multimesh->instances; i++) {
  1654. {
  1655. float *dataptr = w + i * old_stride;
  1656. float *newptr = w + i * multimesh->stride_cache;
  1657. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3], dataptr[4], dataptr[5], dataptr[6], dataptr[7] };
  1658. memcpy(newptr, vals, 8 * 4);
  1659. }
  1660. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1661. float *dataptr = w + i * old_stride + 8;
  1662. float *newptr = w + i * multimesh->stride_cache + 8;
  1663. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3] };
  1664. memcpy(newptr, vals, 4 * 4);
  1665. }
  1666. if (multimesh->uses_colors) {
  1667. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1668. float *newptr = w + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1669. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1670. memcpy(newptr, val, 2 * 4);
  1671. }
  1672. if (multimesh->uses_custom_data) {
  1673. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1674. float *newptr = w + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1675. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1676. memcpy(newptr, val, 2 * 4);
  1677. }
  1678. }
  1679. multimesh->data_cache.resize(multimesh->instances * (int)multimesh->stride_cache);
  1680. const float *r = multimesh->data_cache.ptr();
  1681. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1682. glBufferData(GL_ARRAY_BUFFER, multimesh->data_cache.size() * sizeof(float), r, GL_STATIC_DRAW);
  1683. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1684. } else {
  1685. // If we have a data cache, just update it.
  1686. if (multimesh->data_cache.size()) {
  1687. multimesh->data_cache = p_buffer;
  1688. }
  1689. // Only Transform is being used, so we can upload directly.
  1690. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)multimesh->stride_cache));
  1691. const float *r = p_buffer.ptr();
  1692. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1693. glBufferData(GL_ARRAY_BUFFER, p_buffer.size() * sizeof(float), r, GL_STATIC_DRAW);
  1694. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1695. }
  1696. multimesh->buffer_set = true;
  1697. if (multimesh->data_cache.size() || multimesh->uses_colors || multimesh->uses_custom_data) {
  1698. // Clear dirty since nothing will be dirty anymore.
  1699. uint32_t data_cache_dirty_region_count = Math::division_round_up(multimesh->instances, MULTIMESH_DIRTY_REGION_SIZE);
  1700. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1701. multimesh->data_cache_dirty_regions[i] = false;
  1702. }
  1703. multimesh->data_cache_used_dirty_regions = 0;
  1704. _multimesh_mark_all_dirty(multimesh, false, true); //update AABB
  1705. } else if (multimesh->mesh.is_valid()) {
  1706. //if we have a mesh set, we need to re-generate the AABB from the new data
  1707. const float *data = p_buffer.ptr();
  1708. if (multimesh->custom_aabb == AABB()) {
  1709. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1710. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1711. }
  1712. }
  1713. }
  1714. RID MeshStorage::_multimesh_get_command_buffer_rd_rid(RID p_multimesh) const {
  1715. ERR_FAIL_V_MSG(RID(), "GLES3 does not implement indirect multimeshes.");
  1716. }
  1717. RID MeshStorage::_multimesh_get_buffer_rd_rid(RID p_multimesh) const {
  1718. ERR_FAIL_V_MSG(RID(), "GLES3 does not contain a Rid for the multimesh buffer.");
  1719. }
  1720. Vector<float> MeshStorage::_multimesh_get_buffer(RID p_multimesh) const {
  1721. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1722. ERR_FAIL_NULL_V(multimesh, Vector<float>());
  1723. Vector<float> ret;
  1724. if (multimesh->buffer == 0 || multimesh->instances == 0) {
  1725. return Vector<float>();
  1726. } else if (multimesh->data_cache.size()) {
  1727. ret = multimesh->data_cache;
  1728. } else {
  1729. // Buffer not cached, so fetch from GPU memory. This can be a stalling operation, avoid whenever possible.
  1730. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1731. ret.resize(multimesh->instances * multimesh->stride_cache);
  1732. {
  1733. float *w = ret.ptrw();
  1734. const uint8_t *r = buffer.ptr();
  1735. memcpy(w, r, buffer.size());
  1736. }
  1737. }
  1738. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1739. // Need to decompress buffer.
  1740. uint32_t new_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1741. new_stride += multimesh->uses_colors ? 4 : 0;
  1742. new_stride += multimesh->uses_custom_data ? 4 : 0;
  1743. Vector<float> decompressed;
  1744. decompressed.resize(multimesh->instances * (int)new_stride);
  1745. float *w = decompressed.ptrw();
  1746. const float *r = ret.ptr();
  1747. for (int i = 0; i < multimesh->instances; i++) {
  1748. {
  1749. float *newptr = w + i * new_stride;
  1750. const float *oldptr = r + i * multimesh->stride_cache;
  1751. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3], oldptr[4], oldptr[5], oldptr[6], oldptr[7] };
  1752. memcpy(newptr, vals, 8 * 4);
  1753. }
  1754. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1755. float *newptr = w + i * new_stride + 8;
  1756. const float *oldptr = r + i * multimesh->stride_cache + 8;
  1757. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3] };
  1758. memcpy(newptr, vals, 4 * 4);
  1759. }
  1760. if (multimesh->uses_colors) {
  1761. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1762. const float *oldptr = r + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1763. uint16_t raw_data[4];
  1764. memcpy(raw_data, oldptr, 2 * 4);
  1765. newptr[0] = Math::half_to_float(raw_data[0]);
  1766. newptr[1] = Math::half_to_float(raw_data[1]);
  1767. newptr[2] = Math::half_to_float(raw_data[2]);
  1768. newptr[3] = Math::half_to_float(raw_data[3]);
  1769. }
  1770. if (multimesh->uses_custom_data) {
  1771. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1772. const float *oldptr = r + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1773. uint16_t raw_data[4];
  1774. memcpy(raw_data, oldptr, 2 * 4);
  1775. newptr[0] = Math::half_to_float(raw_data[0]);
  1776. newptr[1] = Math::half_to_float(raw_data[1]);
  1777. newptr[2] = Math::half_to_float(raw_data[2]);
  1778. newptr[3] = Math::half_to_float(raw_data[3]);
  1779. }
  1780. }
  1781. return decompressed;
  1782. } else {
  1783. return ret;
  1784. }
  1785. }
  1786. void MeshStorage::_multimesh_set_visible_instances(RID p_multimesh, int p_visible) {
  1787. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1788. ERR_FAIL_NULL(multimesh);
  1789. ERR_FAIL_COND(p_visible < -1 || p_visible > multimesh->instances);
  1790. if (multimesh->visible_instances == p_visible) {
  1791. return;
  1792. }
  1793. if (multimesh->data_cache.size()) {
  1794. // There is a data cache, but we may need to update some sections.
  1795. _multimesh_mark_all_dirty(multimesh, false, true);
  1796. int start = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1797. for (int i = start; i < p_visible; i++) {
  1798. _multimesh_mark_dirty(multimesh, i, true);
  1799. }
  1800. }
  1801. multimesh->visible_instances = p_visible;
  1802. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH_VISIBLE_INSTANCES);
  1803. }
  1804. int MeshStorage::_multimesh_get_visible_instances(RID p_multimesh) const {
  1805. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1806. ERR_FAIL_NULL_V(multimesh, 0);
  1807. return multimesh->visible_instances;
  1808. }
  1809. MeshStorage::MultiMeshInterpolator *MeshStorage::_multimesh_get_interpolator(RID p_multimesh) const {
  1810. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1811. ERR_FAIL_NULL_V_MSG(multimesh, nullptr, "Multimesh not found: " + itos(p_multimesh.get_id()));
  1812. return &multimesh->interpolator;
  1813. }
  1814. void MeshStorage::_update_dirty_multimeshes() {
  1815. while (multimesh_dirty_list) {
  1816. MultiMesh *multimesh = multimesh_dirty_list;
  1817. if (multimesh->data_cache.size()) { //may have been cleared, so only process if it exists
  1818. const float *data = multimesh->data_cache.ptr();
  1819. uint32_t visible_instances = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1820. if (multimesh->data_cache_used_dirty_regions) {
  1821. uint32_t data_cache_dirty_region_count = Math::division_round_up(multimesh->instances, (int)MULTIMESH_DIRTY_REGION_SIZE);
  1822. uint32_t visible_region_count = visible_instances == 0 ? 0 : Math::division_round_up(visible_instances, (uint32_t)MULTIMESH_DIRTY_REGION_SIZE);
  1823. GLint region_size = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * sizeof(float);
  1824. if (multimesh->data_cache_used_dirty_regions > 32 || multimesh->data_cache_used_dirty_regions > visible_region_count / 2) {
  1825. // If there too many dirty regions, or represent the majority of regions, just copy all, else transfer cost piles up too much
  1826. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1827. glBufferSubData(GL_ARRAY_BUFFER, 0, MIN(visible_region_count * region_size, multimesh->instances * multimesh->stride_cache * sizeof(float)), data);
  1828. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1829. } else {
  1830. // Not that many regions? update them all
  1831. // TODO: profile the performance cost on low end
  1832. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1833. for (uint32_t i = 0; i < visible_region_count; i++) {
  1834. if (multimesh->data_cache_dirty_regions[i]) {
  1835. GLint offset = i * region_size;
  1836. GLint size = multimesh->stride_cache * (uint32_t)multimesh->instances * (uint32_t)sizeof(float);
  1837. uint32_t region_start_index = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * i;
  1838. glBufferSubData(GL_ARRAY_BUFFER, offset, MIN(region_size, size - offset), &data[region_start_index]);
  1839. }
  1840. }
  1841. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1842. }
  1843. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1844. multimesh->data_cache_dirty_regions[i] = false;
  1845. }
  1846. multimesh->data_cache_used_dirty_regions = 0;
  1847. }
  1848. if (multimesh->aabb_dirty && multimesh->mesh.is_valid()) {
  1849. multimesh->aabb_dirty = false;
  1850. if (multimesh->custom_aabb == AABB()) {
  1851. _multimesh_re_create_aabb(multimesh, data, visible_instances);
  1852. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1853. }
  1854. }
  1855. }
  1856. multimesh_dirty_list = multimesh->dirty_list;
  1857. multimesh->dirty_list = nullptr;
  1858. multimesh->dirty = false;
  1859. }
  1860. multimesh_dirty_list = nullptr;
  1861. }
  1862. /* SKELETON API */
  1863. RID MeshStorage::skeleton_allocate() {
  1864. return skeleton_owner.allocate_rid();
  1865. }
  1866. void MeshStorage::skeleton_initialize(RID p_rid) {
  1867. skeleton_owner.initialize_rid(p_rid, Skeleton());
  1868. }
  1869. void MeshStorage::skeleton_free(RID p_rid) {
  1870. _update_dirty_skeletons();
  1871. skeleton_allocate_data(p_rid, 0);
  1872. Skeleton *skeleton = skeleton_owner.get_or_null(p_rid);
  1873. skeleton->dependency.deleted_notify(p_rid);
  1874. skeleton_owner.free(p_rid);
  1875. }
  1876. void MeshStorage::_skeleton_make_dirty(Skeleton *skeleton) {
  1877. if (!skeleton->dirty) {
  1878. skeleton->dirty = true;
  1879. skeleton->dirty_list = skeleton_dirty_list;
  1880. skeleton_dirty_list = skeleton;
  1881. }
  1882. }
  1883. void MeshStorage::skeleton_allocate_data(RID p_skeleton, int p_bones, bool p_2d_skeleton) {
  1884. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1885. ERR_FAIL_NULL(skeleton);
  1886. ERR_FAIL_COND(p_bones < 0);
  1887. if (skeleton->size == p_bones && skeleton->use_2d == p_2d_skeleton) {
  1888. return;
  1889. }
  1890. skeleton->size = p_bones;
  1891. skeleton->use_2d = p_2d_skeleton;
  1892. skeleton->height = (p_bones * (p_2d_skeleton ? 2 : 3)) / 256;
  1893. if ((p_bones * (p_2d_skeleton ? 2 : 3)) % 256) {
  1894. skeleton->height++;
  1895. }
  1896. if (skeleton->transforms_texture != 0) {
  1897. GLES3::Utilities::get_singleton()->texture_free_data(skeleton->transforms_texture);
  1898. skeleton->transforms_texture = 0;
  1899. skeleton->data.clear();
  1900. }
  1901. if (skeleton->size) {
  1902. skeleton->data.resize(256 * skeleton->height * 4);
  1903. glGenTextures(1, &skeleton->transforms_texture);
  1904. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  1905. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, nullptr);
  1906. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  1907. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  1908. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  1909. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  1910. glBindTexture(GL_TEXTURE_2D, 0);
  1911. GLES3::Utilities::get_singleton()->texture_allocated_data(skeleton->transforms_texture, skeleton->data.size() * sizeof(float), "Skeleton transforms texture");
  1912. memset(skeleton->data.ptr(), 0, skeleton->data.size() * sizeof(float));
  1913. _skeleton_make_dirty(skeleton);
  1914. }
  1915. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_DATA);
  1916. }
  1917. void MeshStorage::skeleton_set_base_transform_2d(RID p_skeleton, const Transform2D &p_base_transform) {
  1918. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1919. ERR_FAIL_NULL(skeleton);
  1920. ERR_FAIL_COND(!skeleton->use_2d);
  1921. skeleton->base_transform_2d = p_base_transform;
  1922. }
  1923. int MeshStorage::skeleton_get_bone_count(RID p_skeleton) const {
  1924. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1925. ERR_FAIL_NULL_V(skeleton, 0);
  1926. return skeleton->size;
  1927. }
  1928. void MeshStorage::skeleton_bone_set_transform(RID p_skeleton, int p_bone, const Transform3D &p_transform) {
  1929. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1930. ERR_FAIL_NULL(skeleton);
  1931. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1932. ERR_FAIL_COND(skeleton->use_2d);
  1933. float *dataptr = skeleton->data.ptr() + p_bone * 12;
  1934. dataptr[0] = p_transform.basis.rows[0][0];
  1935. dataptr[1] = p_transform.basis.rows[0][1];
  1936. dataptr[2] = p_transform.basis.rows[0][2];
  1937. dataptr[3] = p_transform.origin.x;
  1938. dataptr[4] = p_transform.basis.rows[1][0];
  1939. dataptr[5] = p_transform.basis.rows[1][1];
  1940. dataptr[6] = p_transform.basis.rows[1][2];
  1941. dataptr[7] = p_transform.origin.y;
  1942. dataptr[8] = p_transform.basis.rows[2][0];
  1943. dataptr[9] = p_transform.basis.rows[2][1];
  1944. dataptr[10] = p_transform.basis.rows[2][2];
  1945. dataptr[11] = p_transform.origin.z;
  1946. _skeleton_make_dirty(skeleton);
  1947. }
  1948. Transform3D MeshStorage::skeleton_bone_get_transform(RID p_skeleton, int p_bone) const {
  1949. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1950. ERR_FAIL_NULL_V(skeleton, Transform3D());
  1951. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform3D());
  1952. ERR_FAIL_COND_V(skeleton->use_2d, Transform3D());
  1953. const float *dataptr = skeleton->data.ptr() + p_bone * 12;
  1954. Transform3D t;
  1955. t.basis.rows[0][0] = dataptr[0];
  1956. t.basis.rows[0][1] = dataptr[1];
  1957. t.basis.rows[0][2] = dataptr[2];
  1958. t.origin.x = dataptr[3];
  1959. t.basis.rows[1][0] = dataptr[4];
  1960. t.basis.rows[1][1] = dataptr[5];
  1961. t.basis.rows[1][2] = dataptr[6];
  1962. t.origin.y = dataptr[7];
  1963. t.basis.rows[2][0] = dataptr[8];
  1964. t.basis.rows[2][1] = dataptr[9];
  1965. t.basis.rows[2][2] = dataptr[10];
  1966. t.origin.z = dataptr[11];
  1967. return t;
  1968. }
  1969. void MeshStorage::skeleton_bone_set_transform_2d(RID p_skeleton, int p_bone, const Transform2D &p_transform) {
  1970. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1971. ERR_FAIL_NULL(skeleton);
  1972. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1973. ERR_FAIL_COND(!skeleton->use_2d);
  1974. float *dataptr = skeleton->data.ptr() + p_bone * 8;
  1975. dataptr[0] = p_transform.columns[0][0];
  1976. dataptr[1] = p_transform.columns[1][0];
  1977. dataptr[2] = 0;
  1978. dataptr[3] = p_transform.columns[2][0];
  1979. dataptr[4] = p_transform.columns[0][1];
  1980. dataptr[5] = p_transform.columns[1][1];
  1981. dataptr[6] = 0;
  1982. dataptr[7] = p_transform.columns[2][1];
  1983. _skeleton_make_dirty(skeleton);
  1984. }
  1985. Transform2D MeshStorage::skeleton_bone_get_transform_2d(RID p_skeleton, int p_bone) const {
  1986. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1987. ERR_FAIL_NULL_V(skeleton, Transform2D());
  1988. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform2D());
  1989. ERR_FAIL_COND_V(!skeleton->use_2d, Transform2D());
  1990. const float *dataptr = skeleton->data.ptr() + p_bone * 8;
  1991. Transform2D t;
  1992. t.columns[0][0] = dataptr[0];
  1993. t.columns[1][0] = dataptr[1];
  1994. t.columns[2][0] = dataptr[3];
  1995. t.columns[0][1] = dataptr[4];
  1996. t.columns[1][1] = dataptr[5];
  1997. t.columns[2][1] = dataptr[7];
  1998. return t;
  1999. }
  2000. void MeshStorage::_update_dirty_skeletons() {
  2001. while (skeleton_dirty_list) {
  2002. Skeleton *skeleton = skeleton_dirty_list;
  2003. if (skeleton->size) {
  2004. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  2005. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, skeleton->data.ptr());
  2006. glBindTexture(GL_TEXTURE_2D, 0);
  2007. }
  2008. skeleton_dirty_list = skeleton->dirty_list;
  2009. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_BONES);
  2010. skeleton->version++;
  2011. skeleton->dirty = false;
  2012. skeleton->dirty_list = nullptr;
  2013. }
  2014. skeleton_dirty_list = nullptr;
  2015. }
  2016. void MeshStorage::skeleton_update_dependency(RID p_skeleton, DependencyTracker *p_instance) {
  2017. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  2018. ERR_FAIL_NULL(skeleton);
  2019. p_instance->update_dependency(&skeleton->dependency);
  2020. }
  2021. #endif // GLES3_ENABLED