mesh_storage.cpp 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220
  1. /**************************************************************************/
  2. /* mesh_storage.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #ifdef GLES3_ENABLED
  31. #include "mesh_storage.h"
  32. #include "material_storage.h"
  33. #include "utilities.h"
  34. using namespace GLES3;
  35. MeshStorage *MeshStorage::singleton = nullptr;
  36. MeshStorage *MeshStorage::get_singleton() {
  37. return singleton;
  38. }
  39. MeshStorage::MeshStorage() {
  40. singleton = this;
  41. {
  42. skeleton_shader.shader.initialize();
  43. skeleton_shader.shader_version = skeleton_shader.shader.version_create();
  44. }
  45. }
  46. MeshStorage::~MeshStorage() {
  47. singleton = nullptr;
  48. skeleton_shader.shader.version_free(skeleton_shader.shader_version);
  49. }
  50. /* MESH API */
  51. RID MeshStorage::mesh_allocate() {
  52. return mesh_owner.allocate_rid();
  53. }
  54. void MeshStorage::mesh_initialize(RID p_rid) {
  55. mesh_owner.initialize_rid(p_rid, Mesh());
  56. }
  57. void MeshStorage::mesh_free(RID p_rid) {
  58. mesh_clear(p_rid);
  59. mesh_set_shadow_mesh(p_rid, RID());
  60. Mesh *mesh = mesh_owner.get_or_null(p_rid);
  61. ERR_FAIL_NULL(mesh);
  62. mesh->dependency.deleted_notify(p_rid);
  63. if (mesh->instances.size()) {
  64. ERR_PRINT("deleting mesh with active instances");
  65. }
  66. if (mesh->shadow_owners.size()) {
  67. for (Mesh *E : mesh->shadow_owners) {
  68. Mesh *shadow_owner = E;
  69. shadow_owner->shadow_mesh = RID();
  70. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  71. }
  72. }
  73. mesh_owner.free(p_rid);
  74. }
  75. void MeshStorage::mesh_set_blend_shape_count(RID p_mesh, int p_blend_shape_count) {
  76. ERR_FAIL_COND(p_blend_shape_count < 0);
  77. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  78. ERR_FAIL_NULL(mesh);
  79. ERR_FAIL_COND(mesh->surface_count > 0); //surfaces already exist
  80. mesh->blend_shape_count = p_blend_shape_count;
  81. }
  82. bool MeshStorage::mesh_needs_instance(RID p_mesh, bool p_has_skeleton) {
  83. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  84. ERR_FAIL_NULL_V(mesh, false);
  85. return mesh->blend_shape_count > 0 || (mesh->has_bone_weights && p_has_skeleton);
  86. }
  87. void MeshStorage::mesh_add_surface(RID p_mesh, const RS::SurfaceData &p_surface) {
  88. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  89. ERR_FAIL_NULL(mesh);
  90. ERR_FAIL_COND(mesh->surface_count == RS::MAX_MESH_SURFACES);
  91. #ifdef DEBUG_ENABLED
  92. //do a validation, to catch errors first
  93. {
  94. uint32_t stride = 0;
  95. uint32_t attrib_stride = 0;
  96. uint32_t skin_stride = 0;
  97. for (int i = 0; i < RS::ARRAY_WEIGHTS; i++) {
  98. if ((p_surface.format & (1ULL << i))) {
  99. switch (i) {
  100. case RS::ARRAY_VERTEX: {
  101. if ((p_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) || (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  102. stride += sizeof(float) * 2;
  103. } else {
  104. stride += sizeof(float) * 3;
  105. }
  106. } break;
  107. case RS::ARRAY_NORMAL: {
  108. stride += sizeof(uint16_t) * 2;
  109. } break;
  110. case RS::ARRAY_TANGENT: {
  111. if (!(p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  112. stride += sizeof(uint16_t) * 2;
  113. }
  114. } break;
  115. case RS::ARRAY_COLOR: {
  116. attrib_stride += sizeof(uint32_t);
  117. } break;
  118. case RS::ARRAY_TEX_UV: {
  119. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  120. attrib_stride += sizeof(uint16_t) * 2;
  121. } else {
  122. attrib_stride += sizeof(float) * 2;
  123. }
  124. } break;
  125. case RS::ARRAY_TEX_UV2: {
  126. if (p_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  127. attrib_stride += sizeof(uint16_t) * 2;
  128. } else {
  129. attrib_stride += sizeof(float) * 2;
  130. }
  131. } break;
  132. case RS::ARRAY_CUSTOM0:
  133. case RS::ARRAY_CUSTOM1:
  134. case RS::ARRAY_CUSTOM2:
  135. case RS::ARRAY_CUSTOM3: {
  136. int idx = i - RS::ARRAY_CUSTOM0;
  137. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  138. uint32_t fmt = (p_surface.format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  139. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  140. attrib_stride += fmtsize[fmt];
  141. } break;
  142. case RS::ARRAY_WEIGHTS:
  143. case RS::ARRAY_BONES: {
  144. //uses a separate array
  145. bool use_8 = p_surface.format & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  146. skin_stride += sizeof(int16_t) * (use_8 ? 16 : 8);
  147. } break;
  148. }
  149. }
  150. }
  151. int expected_size = stride * p_surface.vertex_count;
  152. ERR_FAIL_COND_MSG(expected_size != p_surface.vertex_data.size(), "Size of vertex data provided (" + itos(p_surface.vertex_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  153. int bs_expected_size = expected_size * mesh->blend_shape_count;
  154. ERR_FAIL_COND_MSG(bs_expected_size != p_surface.blend_shape_data.size(), "Size of blend shape data provided (" + itos(p_surface.blend_shape_data.size()) + ") does not match expected (" + itos(bs_expected_size) + ")");
  155. int expected_attrib_size = attrib_stride * p_surface.vertex_count;
  156. ERR_FAIL_COND_MSG(expected_attrib_size != p_surface.attribute_data.size(), "Size of attribute data provided (" + itos(p_surface.attribute_data.size()) + ") does not match expected (" + itos(expected_attrib_size) + ")");
  157. if ((p_surface.format & RS::ARRAY_FORMAT_WEIGHTS) && (p_surface.format & RS::ARRAY_FORMAT_BONES)) {
  158. expected_size = skin_stride * p_surface.vertex_count;
  159. ERR_FAIL_COND_MSG(expected_size != p_surface.skin_data.size(), "Size of skin data provided (" + itos(p_surface.skin_data.size()) + ") does not match expected (" + itos(expected_size) + ")");
  160. }
  161. }
  162. #endif
  163. uint64_t surface_version = p_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  164. RS::SurfaceData new_surface = p_surface;
  165. #ifdef DISABLE_DEPRECATED
  166. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION, "Surface version provided (" + itos(int(surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT)) + ") does not match current version (" + itos(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) + ")");
  167. #else
  168. if (surface_version != uint64_t(RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION)) {
  169. RS::get_singleton()->fix_surface_compatibility(new_surface);
  170. surface_version = new_surface.format & (uint64_t(RS::ARRAY_FLAG_FORMAT_VERSION_MASK) << RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT);
  171. ERR_FAIL_COND_MSG(surface_version != RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION,
  172. vformat("Surface version provided (%d) does not match current version (%d).",
  173. (surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK,
  174. (RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK));
  175. }
  176. #endif
  177. Mesh::Surface *s = memnew(Mesh::Surface);
  178. s->format = new_surface.format;
  179. s->primitive = new_surface.primitive;
  180. if (new_surface.vertex_data.size()) {
  181. glGenBuffers(1, &s->vertex_buffer);
  182. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  183. // If we have an uncompressed surface that contains normals, but not tangents, we need to differentiate the array
  184. // from a compressed array in the shader. To do so, we allow the the normal to read 4 components out of the buffer
  185. // But only give it 2 components per normal. So essentially, each vertex reads the next normal in normal.zw.
  186. // This allows us to avoid adding a shader permutation, and avoid passing dummy tangents. Since the stride is kept small
  187. // this should still be a net win for bandwidth.
  188. // If we do this, then the last normal will read past the end of the array. So we need to pad the array with dummy data.
  189. if (!(new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (new_surface.format & RS::ARRAY_FORMAT_NORMAL) && !(new_surface.format & RS::ARRAY_FORMAT_TANGENT)) {
  190. // Unfortunately, we need to copy the buffer, which is fine as doing a resize triggers a CoW anyway.
  191. Vector<uint8_t> new_vertex_data;
  192. new_vertex_data.resize_zeroed(new_surface.vertex_data.size() + sizeof(uint16_t) * 2);
  193. memcpy(new_vertex_data.ptrw(), new_surface.vertex_data.ptr(), new_surface.vertex_data.size());
  194. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_vertex_data.size(), new_vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  195. s->vertex_buffer_size = new_vertex_data.size();
  196. } else {
  197. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->vertex_buffer, new_surface.vertex_data.size(), new_surface.vertex_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh vertex buffer");
  198. s->vertex_buffer_size = new_surface.vertex_data.size();
  199. }
  200. }
  201. if (new_surface.attribute_data.size()) {
  202. glGenBuffers(1, &s->attribute_buffer);
  203. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  204. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->attribute_buffer, new_surface.attribute_data.size(), new_surface.attribute_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh attribute buffer");
  205. s->attribute_buffer_size = new_surface.attribute_data.size();
  206. }
  207. if (new_surface.skin_data.size()) {
  208. glGenBuffers(1, &s->skin_buffer);
  209. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  210. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->skin_buffer, new_surface.skin_data.size(), new_surface.skin_data.ptr(), (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh skin buffer");
  211. s->skin_buffer_size = new_surface.skin_data.size();
  212. }
  213. glBindBuffer(GL_ARRAY_BUFFER, 0);
  214. s->vertex_count = new_surface.vertex_count;
  215. if (new_surface.format & RS::ARRAY_FORMAT_BONES) {
  216. mesh->has_bone_weights = true;
  217. }
  218. if (new_surface.index_count) {
  219. bool is_index_16 = new_surface.vertex_count <= 65536 && new_surface.vertex_count > 0;
  220. glGenBuffers(1, &s->index_buffer);
  221. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer);
  222. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->index_buffer, new_surface.index_data.size(), new_surface.index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer");
  223. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  224. s->index_count = new_surface.index_count;
  225. s->index_buffer_size = new_surface.index_data.size();
  226. if (new_surface.lods.size()) {
  227. s->lods = memnew_arr(Mesh::Surface::LOD, new_surface.lods.size());
  228. s->lod_count = new_surface.lods.size();
  229. for (int i = 0; i < new_surface.lods.size(); i++) {
  230. glGenBuffers(1, &s->lods[i].index_buffer);
  231. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer);
  232. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ELEMENT_ARRAY_BUFFER, s->lods[i].index_buffer, new_surface.lods[i].index_data.size(), new_surface.lods[i].index_data.ptr(), GL_STATIC_DRAW, "Mesh index buffer LOD[" + itos(i) + "]");
  233. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); //unbind
  234. s->lods[i].edge_length = new_surface.lods[i].edge_length;
  235. s->lods[i].index_count = new_surface.lods[i].index_data.size() / (is_index_16 ? 2 : 4);
  236. s->lods[i].index_buffer_size = new_surface.lods[i].index_data.size();
  237. }
  238. }
  239. }
  240. ERR_FAIL_COND_MSG(!new_surface.index_count && !new_surface.vertex_count, "Meshes must contain a vertex array, an index array, or both");
  241. s->aabb = new_surface.aabb;
  242. s->bone_aabbs = new_surface.bone_aabbs; //only really useful for returning them.
  243. s->uv_scale = new_surface.uv_scale;
  244. if (new_surface.skin_data.size() || mesh->blend_shape_count > 0) {
  245. // Size must match the size of the vertex array.
  246. int size = new_surface.vertex_data.size();
  247. int vertex_size = 0;
  248. int position_stride = 0;
  249. int normal_tangent_stride = 0;
  250. int normal_offset = 0;
  251. int tangent_offset = 0;
  252. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  253. if (new_surface.format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  254. vertex_size = 2;
  255. position_stride = sizeof(float) * vertex_size;
  256. } else {
  257. if (new_surface.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  258. vertex_size = 4;
  259. position_stride = sizeof(uint16_t) * vertex_size;
  260. } else {
  261. vertex_size = 3;
  262. position_stride = sizeof(float) * vertex_size;
  263. }
  264. }
  265. }
  266. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  267. normal_offset = position_stride * s->vertex_count;
  268. normal_tangent_stride += sizeof(uint16_t) * 2;
  269. }
  270. if ((new_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  271. tangent_offset = normal_offset + normal_tangent_stride;
  272. normal_tangent_stride += sizeof(uint16_t) * 2;
  273. }
  274. if (mesh->blend_shape_count > 0) {
  275. // Blend shapes are passed as one large array, for OpenGL, we need to split each of them into their own buffer
  276. s->blend_shapes = memnew_arr(Mesh::Surface::BlendShape, mesh->blend_shape_count);
  277. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  278. glGenVertexArrays(1, &s->blend_shapes[i].vertex_array);
  279. glBindVertexArray(s->blend_shapes[i].vertex_array);
  280. glGenBuffers(1, &s->blend_shapes[i].vertex_buffer);
  281. glBindBuffer(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer);
  282. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s->blend_shapes[i].vertex_buffer, size, new_surface.blend_shape_data.ptr() + i * size, (s->format & RS::ARRAY_FLAG_USE_DYNAMIC_UPDATE) ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW, "Mesh blend shape buffer");
  283. if ((new_surface.format & (1ULL << RS::ARRAY_VERTEX))) {
  284. glEnableVertexAttribArray(RS::ARRAY_VERTEX + 3);
  285. glVertexAttribPointer(RS::ARRAY_VERTEX + 3, vertex_size, GL_FLOAT, GL_FALSE, position_stride, CAST_INT_TO_UCHAR_PTR(0));
  286. }
  287. if ((new_surface.format & (1ULL << RS::ARRAY_NORMAL))) {
  288. // Normal and tangent are packed into the same attribute.
  289. glEnableVertexAttribArray(RS::ARRAY_NORMAL + 3);
  290. glVertexAttribPointer(RS::ARRAY_NORMAL + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(normal_offset));
  291. }
  292. if ((p_surface.format & (1ULL << RS::ARRAY_TANGENT))) {
  293. glEnableVertexAttribArray(RS::ARRAY_TANGENT + 3);
  294. glVertexAttribPointer(RS::ARRAY_TANGENT + 3, 2, GL_UNSIGNED_SHORT, GL_TRUE, normal_tangent_stride, CAST_INT_TO_UCHAR_PTR(tangent_offset));
  295. }
  296. }
  297. glBindVertexArray(0);
  298. glBindBuffer(GL_ARRAY_BUFFER, 0);
  299. }
  300. glBindVertexArray(0);
  301. glBindBuffer(GL_ARRAY_BUFFER, 0);
  302. }
  303. if (mesh->surface_count == 0) {
  304. mesh->aabb = new_surface.aabb;
  305. } else {
  306. mesh->aabb.merge_with(new_surface.aabb);
  307. }
  308. mesh->skeleton_aabb_version = 0;
  309. s->material = new_surface.material;
  310. mesh->surfaces = (Mesh::Surface **)memrealloc(mesh->surfaces, sizeof(Mesh::Surface *) * (mesh->surface_count + 1));
  311. mesh->surfaces[mesh->surface_count] = s;
  312. mesh->surface_count++;
  313. for (MeshInstance *mi : mesh->instances) {
  314. _mesh_instance_add_surface(mi, mesh, mesh->surface_count - 1);
  315. }
  316. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  317. for (Mesh *E : mesh->shadow_owners) {
  318. Mesh *shadow_owner = E;
  319. shadow_owner->shadow_mesh = RID();
  320. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  321. }
  322. mesh->material_cache.clear();
  323. }
  324. int MeshStorage::mesh_get_blend_shape_count(RID p_mesh) const {
  325. const Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  326. ERR_FAIL_NULL_V(mesh, -1);
  327. return mesh->blend_shape_count;
  328. }
  329. void MeshStorage::mesh_set_blend_shape_mode(RID p_mesh, RS::BlendShapeMode p_mode) {
  330. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  331. ERR_FAIL_NULL(mesh);
  332. ERR_FAIL_INDEX((int)p_mode, 2);
  333. mesh->blend_shape_mode = p_mode;
  334. }
  335. RS::BlendShapeMode MeshStorage::mesh_get_blend_shape_mode(RID p_mesh) const {
  336. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  337. ERR_FAIL_NULL_V(mesh, RS::BLEND_SHAPE_MODE_NORMALIZED);
  338. return mesh->blend_shape_mode;
  339. }
  340. void MeshStorage::mesh_surface_update_vertex_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  341. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  342. ERR_FAIL_NULL(mesh);
  343. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  344. ERR_FAIL_COND(p_data.size() == 0);
  345. uint64_t data_size = p_data.size();
  346. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->vertex_buffer_size);
  347. const uint8_t *r = p_data.ptr();
  348. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->vertex_buffer);
  349. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  350. glBindBuffer(GL_ARRAY_BUFFER, 0);
  351. }
  352. void MeshStorage::mesh_surface_update_attribute_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  353. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  354. ERR_FAIL_NULL(mesh);
  355. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  356. ERR_FAIL_COND(p_data.size() == 0);
  357. uint64_t data_size = p_data.size();
  358. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->attribute_buffer_size);
  359. const uint8_t *r = p_data.ptr();
  360. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->attribute_buffer);
  361. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  362. glBindBuffer(GL_ARRAY_BUFFER, 0);
  363. }
  364. void MeshStorage::mesh_surface_update_skin_region(RID p_mesh, int p_surface, int p_offset, const Vector<uint8_t> &p_data) {
  365. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  366. ERR_FAIL_NULL(mesh);
  367. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  368. ERR_FAIL_COND(p_data.size() == 0);
  369. uint64_t data_size = p_data.size();
  370. ERR_FAIL_COND(p_offset + data_size > mesh->surfaces[p_surface]->skin_buffer_size);
  371. const uint8_t *r = p_data.ptr();
  372. glBindBuffer(GL_ARRAY_BUFFER, mesh->surfaces[p_surface]->skin_buffer);
  373. glBufferSubData(GL_ARRAY_BUFFER, p_offset, data_size, r);
  374. glBindBuffer(GL_ARRAY_BUFFER, 0);
  375. }
  376. void MeshStorage::mesh_surface_set_material(RID p_mesh, int p_surface, RID p_material) {
  377. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  378. ERR_FAIL_NULL(mesh);
  379. ERR_FAIL_UNSIGNED_INDEX((uint32_t)p_surface, mesh->surface_count);
  380. mesh->surfaces[p_surface]->material = p_material;
  381. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MATERIAL);
  382. mesh->material_cache.clear();
  383. }
  384. RID MeshStorage::mesh_surface_get_material(RID p_mesh, int p_surface) const {
  385. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  386. ERR_FAIL_NULL_V(mesh, RID());
  387. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RID());
  388. return mesh->surfaces[p_surface]->material;
  389. }
  390. RS::SurfaceData MeshStorage::mesh_get_surface(RID p_mesh, int p_surface) const {
  391. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  392. ERR_FAIL_NULL_V(mesh, RS::SurfaceData());
  393. ERR_FAIL_UNSIGNED_INDEX_V((uint32_t)p_surface, mesh->surface_count, RS::SurfaceData());
  394. Mesh::Surface &s = *mesh->surfaces[p_surface];
  395. RS::SurfaceData sd;
  396. sd.format = s.format;
  397. if (s.vertex_buffer != 0) {
  398. sd.vertex_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.vertex_buffer, s.vertex_buffer_size);
  399. // When using an uncompressed buffer with normals, but without tangents, we have to trim the padding.
  400. if (!(s.format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) && (s.format & RS::ARRAY_FORMAT_NORMAL) && !(s.format & RS::ARRAY_FORMAT_TANGENT)) {
  401. sd.vertex_data.resize(sd.vertex_data.size() - sizeof(uint16_t) * 2);
  402. }
  403. }
  404. if (s.attribute_buffer != 0) {
  405. sd.attribute_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.attribute_buffer, s.attribute_buffer_size);
  406. }
  407. if (s.skin_buffer != 0) {
  408. sd.skin_data = Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.skin_buffer, s.skin_buffer_size);
  409. }
  410. sd.vertex_count = s.vertex_count;
  411. sd.index_count = s.index_count;
  412. sd.primitive = s.primitive;
  413. if (sd.index_count) {
  414. sd.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.index_buffer, s.index_buffer_size);
  415. }
  416. sd.aabb = s.aabb;
  417. for (uint32_t i = 0; i < s.lod_count; i++) {
  418. RS::SurfaceData::LOD lod;
  419. lod.edge_length = s.lods[i].edge_length;
  420. lod.index_data = Utilities::buffer_get_data(GL_ELEMENT_ARRAY_BUFFER, s.lods[i].index_buffer, s.lods[i].index_buffer_size);
  421. sd.lods.push_back(lod);
  422. }
  423. sd.bone_aabbs = s.bone_aabbs;
  424. if (mesh->blend_shape_count) {
  425. sd.blend_shape_data = Vector<uint8_t>();
  426. for (uint32_t i = 0; i < mesh->blend_shape_count; i++) {
  427. sd.blend_shape_data.append_array(Utilities::buffer_get_data(GL_ARRAY_BUFFER, s.blend_shapes[i].vertex_buffer, s.vertex_buffer_size));
  428. }
  429. }
  430. sd.uv_scale = s.uv_scale;
  431. return sd;
  432. }
  433. int MeshStorage::mesh_get_surface_count(RID p_mesh) const {
  434. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  435. ERR_FAIL_NULL_V(mesh, 0);
  436. return mesh->surface_count;
  437. }
  438. void MeshStorage::mesh_set_custom_aabb(RID p_mesh, const AABB &p_aabb) {
  439. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  440. ERR_FAIL_NULL(mesh);
  441. mesh->custom_aabb = p_aabb;
  442. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  443. }
  444. AABB MeshStorage::mesh_get_custom_aabb(RID p_mesh) const {
  445. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  446. ERR_FAIL_NULL_V(mesh, AABB());
  447. return mesh->custom_aabb;
  448. }
  449. AABB MeshStorage::mesh_get_aabb(RID p_mesh, RID p_skeleton) {
  450. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  451. ERR_FAIL_NULL_V(mesh, AABB());
  452. if (mesh->custom_aabb != AABB()) {
  453. return mesh->custom_aabb;
  454. }
  455. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  456. if (!skeleton || skeleton->size == 0 || mesh->skeleton_aabb_version == skeleton->version) {
  457. return mesh->aabb;
  458. }
  459. // Calculate AABB based on Skeleton
  460. AABB aabb;
  461. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  462. AABB laabb;
  463. if ((mesh->surfaces[i]->format & RS::ARRAY_FORMAT_BONES) && mesh->surfaces[i]->bone_aabbs.size()) {
  464. int bs = mesh->surfaces[i]->bone_aabbs.size();
  465. const AABB *skbones = mesh->surfaces[i]->bone_aabbs.ptr();
  466. int sbs = skeleton->size;
  467. ERR_CONTINUE(bs > sbs);
  468. const float *baseptr = skeleton->data.ptr();
  469. bool first = true;
  470. if (skeleton->use_2d) {
  471. for (int j = 0; j < bs; j++) {
  472. if (skbones[j].size == Vector3(-1, -1, -1)) {
  473. continue; //bone is unused
  474. }
  475. const float *dataptr = baseptr + j * 8;
  476. Transform3D mtx;
  477. mtx.basis.rows[0][0] = dataptr[0];
  478. mtx.basis.rows[0][1] = dataptr[1];
  479. mtx.origin.x = dataptr[3];
  480. mtx.basis.rows[1][0] = dataptr[4];
  481. mtx.basis.rows[1][1] = dataptr[5];
  482. mtx.origin.y = dataptr[7];
  483. AABB baabb = mtx.xform(skbones[j]);
  484. if (first) {
  485. laabb = baabb;
  486. first = false;
  487. } else {
  488. laabb.merge_with(baabb);
  489. }
  490. }
  491. } else {
  492. for (int j = 0; j < bs; j++) {
  493. if (skbones[j].size == Vector3(-1, -1, -1)) {
  494. continue; //bone is unused
  495. }
  496. const float *dataptr = baseptr + j * 12;
  497. Transform3D mtx;
  498. mtx.basis.rows[0][0] = dataptr[0];
  499. mtx.basis.rows[0][1] = dataptr[1];
  500. mtx.basis.rows[0][2] = dataptr[2];
  501. mtx.origin.x = dataptr[3];
  502. mtx.basis.rows[1][0] = dataptr[4];
  503. mtx.basis.rows[1][1] = dataptr[5];
  504. mtx.basis.rows[1][2] = dataptr[6];
  505. mtx.origin.y = dataptr[7];
  506. mtx.basis.rows[2][0] = dataptr[8];
  507. mtx.basis.rows[2][1] = dataptr[9];
  508. mtx.basis.rows[2][2] = dataptr[10];
  509. mtx.origin.z = dataptr[11];
  510. AABB baabb = mtx.xform(skbones[j]);
  511. if (first) {
  512. laabb = baabb;
  513. first = false;
  514. } else {
  515. laabb.merge_with(baabb);
  516. }
  517. }
  518. }
  519. if (laabb.size == Vector3()) {
  520. laabb = mesh->surfaces[i]->aabb;
  521. }
  522. } else {
  523. laabb = mesh->surfaces[i]->aabb;
  524. }
  525. if (i == 0) {
  526. aabb = laabb;
  527. } else {
  528. aabb.merge_with(laabb);
  529. }
  530. }
  531. mesh->aabb = aabb;
  532. mesh->skeleton_aabb_version = skeleton->version;
  533. return aabb;
  534. }
  535. void MeshStorage::mesh_set_shadow_mesh(RID p_mesh, RID p_shadow_mesh) {
  536. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  537. ERR_FAIL_NULL(mesh);
  538. Mesh *shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  539. if (shadow_mesh) {
  540. shadow_mesh->shadow_owners.erase(mesh);
  541. }
  542. mesh->shadow_mesh = p_shadow_mesh;
  543. shadow_mesh = mesh_owner.get_or_null(mesh->shadow_mesh);
  544. if (shadow_mesh) {
  545. shadow_mesh->shadow_owners.insert(mesh);
  546. }
  547. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  548. }
  549. void MeshStorage::mesh_clear(RID p_mesh) {
  550. Mesh *mesh = mesh_owner.get_or_null(p_mesh);
  551. ERR_FAIL_NULL(mesh);
  552. // Clear instance data before mesh data.
  553. for (MeshInstance *mi : mesh->instances) {
  554. _mesh_instance_clear(mi);
  555. }
  556. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  557. Mesh::Surface &s = *mesh->surfaces[i];
  558. if (s.vertex_buffer != 0) {
  559. GLES3::Utilities::get_singleton()->buffer_free_data(s.vertex_buffer);
  560. s.vertex_buffer = 0;
  561. }
  562. if (s.version_count != 0) {
  563. for (uint32_t j = 0; j < s.version_count; j++) {
  564. glDeleteVertexArrays(1, &s.versions[j].vertex_array);
  565. s.versions[j].vertex_array = 0;
  566. }
  567. }
  568. if (s.attribute_buffer != 0) {
  569. GLES3::Utilities::get_singleton()->buffer_free_data(s.attribute_buffer);
  570. s.attribute_buffer = 0;
  571. }
  572. if (s.skin_buffer != 0) {
  573. GLES3::Utilities::get_singleton()->buffer_free_data(s.skin_buffer);
  574. s.skin_buffer = 0;
  575. }
  576. if (s.index_buffer != 0) {
  577. GLES3::Utilities::get_singleton()->buffer_free_data(s.index_buffer);
  578. s.index_buffer = 0;
  579. }
  580. if (s.versions) {
  581. memfree(s.versions); //reallocs, so free with memfree.
  582. }
  583. if (s.lod_count) {
  584. for (uint32_t j = 0; j < s.lod_count; j++) {
  585. if (s.lods[j].index_buffer != 0) {
  586. GLES3::Utilities::get_singleton()->buffer_free_data(s.lods[j].index_buffer);
  587. s.lods[j].index_buffer = 0;
  588. }
  589. }
  590. memdelete_arr(s.lods);
  591. }
  592. if (mesh->blend_shape_count) {
  593. for (uint32_t j = 0; j < mesh->blend_shape_count; j++) {
  594. if (s.blend_shapes[j].vertex_buffer != 0) {
  595. GLES3::Utilities::get_singleton()->buffer_free_data(s.blend_shapes[j].vertex_buffer);
  596. s.blend_shapes[j].vertex_buffer = 0;
  597. }
  598. if (s.blend_shapes[j].vertex_array != 0) {
  599. glDeleteVertexArrays(1, &s.blend_shapes[j].vertex_array);
  600. s.blend_shapes[j].vertex_array = 0;
  601. }
  602. }
  603. memdelete_arr(s.blend_shapes);
  604. }
  605. memdelete(mesh->surfaces[i]);
  606. }
  607. if (mesh->surfaces) {
  608. memfree(mesh->surfaces);
  609. }
  610. mesh->surfaces = nullptr;
  611. mesh->surface_count = 0;
  612. mesh->material_cache.clear();
  613. mesh->has_bone_weights = false;
  614. mesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  615. for (Mesh *E : mesh->shadow_owners) {
  616. Mesh *shadow_owner = E;
  617. shadow_owner->shadow_mesh = RID();
  618. shadow_owner->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  619. }
  620. }
  621. void MeshStorage::_mesh_surface_generate_version_for_input_mask(Mesh::Surface::Version &v, Mesh::Surface *s, uint64_t p_input_mask, MeshInstance::Surface *mis) {
  622. Mesh::Surface::Attrib attribs[RS::ARRAY_MAX];
  623. int position_stride = 0; // Vertex position only.
  624. int normal_tangent_stride = 0;
  625. int attributes_stride = 0;
  626. int skin_stride = 0;
  627. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  628. attribs[i].enabled = false;
  629. attribs[i].integer = false;
  630. if (!(s->format & (1ULL << i))) {
  631. continue;
  632. }
  633. if ((p_input_mask & (1ULL << i))) {
  634. // Only enable if it matches input mask.
  635. // Iterate over all anyway, so we can calculate stride.
  636. attribs[i].enabled = true;
  637. }
  638. switch (i) {
  639. case RS::ARRAY_VERTEX: {
  640. attribs[i].offset = 0;
  641. attribs[i].type = GL_FLOAT;
  642. attribs[i].normalized = GL_FALSE;
  643. if (s->format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  644. attribs[i].size = 2;
  645. position_stride = attribs[i].size * sizeof(float);
  646. } else {
  647. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  648. attribs[i].size = 4;
  649. position_stride = attribs[i].size * sizeof(uint16_t);
  650. attribs[i].type = GL_UNSIGNED_SHORT;
  651. attribs[i].normalized = GL_TRUE;
  652. } else {
  653. attribs[i].size = 3;
  654. position_stride = attribs[i].size * sizeof(float);
  655. }
  656. }
  657. } break;
  658. case RS::ARRAY_NORMAL: {
  659. if (!mis && (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
  660. attribs[i].size = 2;
  661. normal_tangent_stride += 2 * attribs[i].size;
  662. } else {
  663. attribs[i].size = 4;
  664. // A small trick here: if we are uncompressed and we have normals, but no tangents. We need
  665. // the shader to think there are 4 components to "axis_tangent_attrib". So we give a size of 4,
  666. // but a stride based on only having 2 elements.
  667. if (!(s->format & RS::ARRAY_FORMAT_TANGENT)) {
  668. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 2;
  669. } else {
  670. normal_tangent_stride += (mis ? sizeof(float) : sizeof(uint16_t)) * 4;
  671. }
  672. }
  673. if (mis) {
  674. // Transform feedback has interleave all or no attributes. It can't mix interleaving.
  675. attribs[i].offset = position_stride;
  676. normal_tangent_stride += position_stride;
  677. position_stride = normal_tangent_stride;
  678. } else {
  679. attribs[i].offset = position_stride * s->vertex_count;
  680. }
  681. attribs[i].type = (mis ? GL_FLOAT : GL_UNSIGNED_SHORT);
  682. attribs[i].normalized = GL_TRUE;
  683. } break;
  684. case RS::ARRAY_TANGENT: {
  685. // We never use the tangent attribute. It is always packed in ARRAY_NORMAL, or ARRAY_VERTEX.
  686. attribs[i].enabled = false;
  687. attribs[i].integer = false;
  688. } break;
  689. case RS::ARRAY_COLOR: {
  690. attribs[i].offset = attributes_stride;
  691. attribs[i].size = 4;
  692. attribs[i].type = GL_UNSIGNED_BYTE;
  693. attributes_stride += 4;
  694. attribs[i].normalized = GL_TRUE;
  695. } break;
  696. case RS::ARRAY_TEX_UV: {
  697. attribs[i].offset = attributes_stride;
  698. attribs[i].size = 2;
  699. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  700. attribs[i].type = GL_UNSIGNED_SHORT;
  701. attributes_stride += 2 * sizeof(uint16_t);
  702. attribs[i].normalized = GL_TRUE;
  703. } else {
  704. attribs[i].type = GL_FLOAT;
  705. attributes_stride += 2 * sizeof(float);
  706. attribs[i].normalized = GL_FALSE;
  707. }
  708. } break;
  709. case RS::ARRAY_TEX_UV2: {
  710. attribs[i].offset = attributes_stride;
  711. attribs[i].size = 2;
  712. if (s->format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
  713. attribs[i].type = GL_UNSIGNED_SHORT;
  714. attributes_stride += 2 * sizeof(uint16_t);
  715. attribs[i].normalized = GL_TRUE;
  716. } else {
  717. attribs[i].type = GL_FLOAT;
  718. attributes_stride += 2 * sizeof(float);
  719. attribs[i].normalized = GL_FALSE;
  720. }
  721. } break;
  722. case RS::ARRAY_CUSTOM0:
  723. case RS::ARRAY_CUSTOM1:
  724. case RS::ARRAY_CUSTOM2:
  725. case RS::ARRAY_CUSTOM3: {
  726. attribs[i].offset = attributes_stride;
  727. int idx = i - RS::ARRAY_CUSTOM0;
  728. uint32_t fmt_shift[RS::ARRAY_CUSTOM_COUNT] = { RS::ARRAY_FORMAT_CUSTOM0_SHIFT, RS::ARRAY_FORMAT_CUSTOM1_SHIFT, RS::ARRAY_FORMAT_CUSTOM2_SHIFT, RS::ARRAY_FORMAT_CUSTOM3_SHIFT };
  729. uint32_t fmt = (s->format >> fmt_shift[idx]) & RS::ARRAY_FORMAT_CUSTOM_MASK;
  730. uint32_t fmtsize[RS::ARRAY_CUSTOM_MAX] = { 4, 4, 4, 8, 4, 8, 12, 16 };
  731. GLenum gl_type[RS::ARRAY_CUSTOM_MAX] = { GL_UNSIGNED_BYTE, GL_BYTE, GL_HALF_FLOAT, GL_HALF_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT, GL_FLOAT };
  732. GLboolean norm[RS::ARRAY_CUSTOM_MAX] = { GL_TRUE, GL_TRUE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE };
  733. attribs[i].type = gl_type[fmt];
  734. attributes_stride += fmtsize[fmt];
  735. attribs[i].size = fmtsize[fmt] / sizeof(float);
  736. attribs[i].normalized = norm[fmt];
  737. } break;
  738. case RS::ARRAY_BONES: {
  739. attribs[i].offset = skin_stride;
  740. attribs[i].size = 4;
  741. attribs[i].type = GL_UNSIGNED_SHORT;
  742. skin_stride += 4 * sizeof(uint16_t);
  743. attribs[i].normalized = GL_FALSE;
  744. attribs[i].integer = true;
  745. } break;
  746. case RS::ARRAY_WEIGHTS: {
  747. attribs[i].offset = skin_stride;
  748. attribs[i].size = 4;
  749. attribs[i].type = GL_UNSIGNED_SHORT;
  750. skin_stride += 4 * sizeof(uint16_t);
  751. attribs[i].normalized = GL_TRUE;
  752. } break;
  753. }
  754. }
  755. glGenVertexArrays(1, &v.vertex_array);
  756. glBindVertexArray(v.vertex_array);
  757. for (int i = 0; i < RS::ARRAY_INDEX; i++) {
  758. if (!attribs[i].enabled) {
  759. glDisableVertexAttribArray(i);
  760. continue;
  761. }
  762. if (i <= RS::ARRAY_TANGENT) {
  763. attribs[i].stride = (i == RS::ARRAY_VERTEX) ? position_stride : normal_tangent_stride;
  764. if (mis) {
  765. glBindBuffer(GL_ARRAY_BUFFER, mis->vertex_buffer);
  766. } else {
  767. glBindBuffer(GL_ARRAY_BUFFER, s->vertex_buffer);
  768. }
  769. } else if (i <= RS::ARRAY_CUSTOM3) {
  770. attribs[i].stride = attributes_stride;
  771. glBindBuffer(GL_ARRAY_BUFFER, s->attribute_buffer);
  772. } else {
  773. attribs[i].stride = skin_stride;
  774. glBindBuffer(GL_ARRAY_BUFFER, s->skin_buffer);
  775. }
  776. if (attribs[i].integer) {
  777. glVertexAttribIPointer(i, attribs[i].size, attribs[i].type, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  778. } else {
  779. glVertexAttribPointer(i, attribs[i].size, attribs[i].type, attribs[i].normalized, attribs[i].stride, CAST_INT_TO_UCHAR_PTR(attribs[i].offset));
  780. }
  781. glEnableVertexAttribArray(i);
  782. }
  783. // Do not bind index here as we want to switch between index buffers for LOD
  784. glBindVertexArray(0);
  785. glBindBuffer(GL_ARRAY_BUFFER, 0);
  786. v.input_mask = p_input_mask;
  787. }
  788. /* MESH INSTANCE API */
  789. RID MeshStorage::mesh_instance_create(RID p_base) {
  790. Mesh *mesh = mesh_owner.get_or_null(p_base);
  791. ERR_FAIL_NULL_V(mesh, RID());
  792. RID rid = mesh_instance_owner.make_rid();
  793. MeshInstance *mi = mesh_instance_owner.get_or_null(rid);
  794. mi->mesh = mesh;
  795. for (uint32_t i = 0; i < mesh->surface_count; i++) {
  796. _mesh_instance_add_surface(mi, mesh, i);
  797. }
  798. mi->I = mesh->instances.push_back(mi);
  799. mi->dirty = true;
  800. return rid;
  801. }
  802. void MeshStorage::mesh_instance_free(RID p_rid) {
  803. MeshInstance *mi = mesh_instance_owner.get_or_null(p_rid);
  804. _mesh_instance_clear(mi);
  805. mi->mesh->instances.erase(mi->I);
  806. mi->I = nullptr;
  807. mesh_instance_owner.free(p_rid);
  808. }
  809. void MeshStorage::mesh_instance_set_skeleton(RID p_mesh_instance, RID p_skeleton) {
  810. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  811. if (mi->skeleton == p_skeleton) {
  812. return;
  813. }
  814. mi->skeleton = p_skeleton;
  815. mi->skeleton_version = 0;
  816. mi->dirty = true;
  817. }
  818. void MeshStorage::mesh_instance_set_blend_shape_weight(RID p_mesh_instance, int p_shape, float p_weight) {
  819. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  820. ERR_FAIL_NULL(mi);
  821. ERR_FAIL_INDEX(p_shape, (int)mi->blend_weights.size());
  822. mi->blend_weights[p_shape] = p_weight;
  823. mi->dirty = true;
  824. }
  825. void MeshStorage::_mesh_instance_clear(MeshInstance *mi) {
  826. for (uint32_t i = 0; i < mi->surfaces.size(); i++) {
  827. if (mi->surfaces[i].version_count != 0) {
  828. for (uint32_t j = 0; j < mi->surfaces[i].version_count; j++) {
  829. glDeleteVertexArrays(1, &mi->surfaces[i].versions[j].vertex_array);
  830. mi->surfaces[i].versions[j].vertex_array = 0;
  831. }
  832. memfree(mi->surfaces[i].versions);
  833. }
  834. if (mi->surfaces[i].vertex_buffers[0] != 0) {
  835. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffers[0]);
  836. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffers[1]);
  837. mi->surfaces[i].vertex_buffers[0] = 0;
  838. mi->surfaces[i].vertex_buffers[1] = 0;
  839. }
  840. if (mi->surfaces[i].vertex_buffer != 0) {
  841. GLES3::Utilities::get_singleton()->buffer_free_data(mi->surfaces[i].vertex_buffer);
  842. mi->surfaces[i].vertex_buffer = 0;
  843. }
  844. }
  845. mi->surfaces.clear();
  846. mi->blend_weights.clear();
  847. mi->skeleton_version = 0;
  848. }
  849. void MeshStorage::_mesh_instance_add_surface(MeshInstance *mi, Mesh *mesh, uint32_t p_surface) {
  850. if (mesh->blend_shape_count > 0) {
  851. mi->blend_weights.resize(mesh->blend_shape_count);
  852. for (uint32_t i = 0; i < mi->blend_weights.size(); i++) {
  853. mi->blend_weights[i] = 0.0;
  854. }
  855. }
  856. MeshInstance::Surface s;
  857. if ((mesh->blend_shape_count > 0 || (mesh->surfaces[p_surface]->format & RS::ARRAY_FORMAT_BONES)) && mesh->surfaces[p_surface]->vertex_buffer_size > 0) {
  858. // Cache surface properties
  859. s.format_cache = mesh->surfaces[p_surface]->format;
  860. if ((s.format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  861. if (s.format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES) {
  862. s.vertex_size_cache = 2;
  863. } else {
  864. s.vertex_size_cache = 3;
  865. }
  866. s.vertex_stride_cache = sizeof(float) * s.vertex_size_cache;
  867. }
  868. if ((s.format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  869. s.vertex_normal_offset_cache = s.vertex_stride_cache;
  870. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  871. }
  872. if ((s.format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  873. s.vertex_tangent_offset_cache = s.vertex_stride_cache;
  874. s.vertex_stride_cache += sizeof(uint32_t) * 2;
  875. }
  876. int buffer_size = s.vertex_stride_cache * mesh->surfaces[p_surface]->vertex_count;
  877. // Buffer to be used for rendering. Final output of skeleton and blend shapes.
  878. glGenBuffers(1, &s.vertex_buffer);
  879. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffer);
  880. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffer, buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance vertex buffer");
  881. if (mesh->blend_shape_count > 0) {
  882. // Ping-Pong buffers for processing blendshapes.
  883. glGenBuffers(2, s.vertex_buffers);
  884. for (uint32_t i = 0; i < 2; i++) {
  885. glBindBuffer(GL_ARRAY_BUFFER, s.vertex_buffers[i]);
  886. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, s.vertex_buffers[i], buffer_size, nullptr, GL_DYNAMIC_DRAW, "MeshInstance process buffer[" + itos(i) + "]");
  887. }
  888. }
  889. glBindBuffer(GL_ARRAY_BUFFER, 0); //unbind
  890. }
  891. mi->surfaces.push_back(s);
  892. mi->dirty = true;
  893. }
  894. void MeshStorage::mesh_instance_check_for_update(RID p_mesh_instance) {
  895. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  896. bool needs_update = mi->dirty;
  897. if (mi->array_update_list.in_list()) {
  898. return;
  899. }
  900. if (!needs_update && mi->skeleton.is_valid()) {
  901. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  902. if (sk && sk->version != mi->skeleton_version) {
  903. needs_update = true;
  904. }
  905. }
  906. if (needs_update) {
  907. dirty_mesh_instance_arrays.add(&mi->array_update_list);
  908. }
  909. }
  910. void MeshStorage::mesh_instance_set_canvas_item_transform(RID p_mesh_instance, const Transform2D &p_transform) {
  911. MeshInstance *mi = mesh_instance_owner.get_or_null(p_mesh_instance);
  912. mi->canvas_item_transform_2d = p_transform;
  913. }
  914. void MeshStorage::_blend_shape_bind_mesh_instance_buffer(MeshInstance *p_mi, uint32_t p_surface) {
  915. glBindBuffer(GL_ARRAY_BUFFER, p_mi->surfaces[p_surface].vertex_buffers[0]);
  916. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_VERTEX))) {
  917. glEnableVertexAttribArray(RS::ARRAY_VERTEX);
  918. glVertexAttribPointer(RS::ARRAY_VERTEX, p_mi->surfaces[p_surface].vertex_size_cache, GL_FLOAT, GL_FALSE, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(0));
  919. } else {
  920. glDisableVertexAttribArray(RS::ARRAY_VERTEX);
  921. }
  922. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  923. glEnableVertexAttribArray(RS::ARRAY_NORMAL);
  924. glVertexAttribIPointer(RS::ARRAY_NORMAL, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_normal_offset_cache));
  925. } else {
  926. glDisableVertexAttribArray(RS::ARRAY_NORMAL);
  927. }
  928. if ((p_mi->surfaces[p_surface].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  929. glEnableVertexAttribArray(RS::ARRAY_TANGENT);
  930. glVertexAttribIPointer(RS::ARRAY_TANGENT, 2, GL_UNSIGNED_INT, p_mi->surfaces[p_surface].vertex_stride_cache, CAST_INT_TO_UCHAR_PTR(p_mi->surfaces[p_surface].vertex_tangent_offset_cache));
  931. } else {
  932. glDisableVertexAttribArray(RS::ARRAY_TANGENT);
  933. }
  934. }
  935. void MeshStorage::_compute_skeleton(MeshInstance *p_mi, Skeleton *p_sk, uint32_t p_surface) {
  936. // Add in the bones and weights.
  937. glBindBuffer(GL_ARRAY_BUFFER, p_mi->mesh->surfaces[p_surface]->skin_buffer);
  938. bool use_8_weights = p_mi->surfaces[p_surface].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  939. int skin_stride = sizeof(int16_t) * (use_8_weights ? 16 : 8);
  940. glEnableVertexAttribArray(RS::ARRAY_BONES);
  941. glVertexAttribIPointer(RS::ARRAY_BONES, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(0));
  942. if (use_8_weights) {
  943. glEnableVertexAttribArray(11);
  944. glVertexAttribIPointer(11, 4, GL_UNSIGNED_SHORT, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  945. glEnableVertexAttribArray(12);
  946. glVertexAttribPointer(12, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(8 * sizeof(uint16_t)));
  947. glEnableVertexAttribArray(13);
  948. glVertexAttribPointer(13, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(12 * sizeof(uint16_t)));
  949. } else {
  950. glEnableVertexAttribArray(RS::ARRAY_WEIGHTS);
  951. glVertexAttribPointer(RS::ARRAY_WEIGHTS, 4, GL_UNSIGNED_SHORT, GL_TRUE, skin_stride, CAST_INT_TO_UCHAR_PTR(4 * sizeof(uint16_t)));
  952. }
  953. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, p_mi->surfaces[p_surface].vertex_buffer);
  954. glActiveTexture(GL_TEXTURE0);
  955. glBindTexture(GL_TEXTURE_2D, p_sk->transforms_texture);
  956. glBeginTransformFeedback(GL_POINTS);
  957. glDrawArrays(GL_POINTS, 0, p_mi->mesh->surfaces[p_surface]->vertex_count);
  958. glEndTransformFeedback();
  959. glDisableVertexAttribArray(RS::ARRAY_BONES);
  960. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS);
  961. glDisableVertexAttribArray(RS::ARRAY_BONES + 2);
  962. glDisableVertexAttribArray(RS::ARRAY_WEIGHTS + 2);
  963. glBindVertexArray(0);
  964. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  965. }
  966. void MeshStorage::update_mesh_instances() {
  967. if (dirty_mesh_instance_arrays.first() == nullptr) {
  968. return; //nothing to do
  969. }
  970. glEnable(GL_RASTERIZER_DISCARD);
  971. glBindFramebuffer(GL_FRAMEBUFFER, 0);
  972. // Process skeletons and blend shapes using transform feedback
  973. while (dirty_mesh_instance_arrays.first()) {
  974. MeshInstance *mi = dirty_mesh_instance_arrays.first()->self();
  975. Skeleton *sk = skeleton_owner.get_or_null(mi->skeleton);
  976. // Precompute base weight if using blend shapes.
  977. float base_weight = 1.0;
  978. if (mi->mesh->blend_shape_count && mi->mesh->blend_shape_mode == RS::BLEND_SHAPE_MODE_NORMALIZED) {
  979. for (uint32_t i = 0; i < mi->mesh->blend_shape_count; i++) {
  980. base_weight -= mi->blend_weights[i];
  981. }
  982. }
  983. for (uint32_t i = 0; i < mi->surfaces.size(); i++) {
  984. if (mi->surfaces[i].vertex_buffer == 0) {
  985. continue;
  986. }
  987. bool array_is_2d = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_2D_VERTICES;
  988. bool can_use_skeleton = sk != nullptr && sk->use_2d == array_is_2d && (mi->surfaces[i].format_cache & RS::ARRAY_FORMAT_BONES);
  989. bool use_8_weights = mi->surfaces[i].format_cache & RS::ARRAY_FLAG_USE_8_BONE_WEIGHTS;
  990. // Always process blend shapes first.
  991. if (mi->mesh->blend_shape_count) {
  992. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  993. uint64_t specialization = 0;
  994. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  995. specialization |= SkeletonShaderGLES3::USE_BLEND_SHAPES;
  996. if (!array_is_2d) {
  997. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  998. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  999. }
  1000. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1001. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1002. }
  1003. }
  1004. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1005. if (!success) {
  1006. continue;
  1007. }
  1008. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, base_weight, skeleton_shader.shader_version, variant, specialization);
  1009. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1010. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1011. GLuint vertex_array_gl = 0;
  1012. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1013. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1014. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1015. glBindVertexArray(vertex_array_gl);
  1016. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[0]);
  1017. glBeginTransformFeedback(GL_POINTS);
  1018. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1019. glEndTransformFeedback();
  1020. variant = SkeletonShaderGLES3::MODE_BLEND_PASS;
  1021. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1022. if (!success) {
  1023. continue;
  1024. }
  1025. //Do the last blend shape separately, as it can be combined with the skeleton pass.
  1026. for (uint32_t bs = 0; bs < mi->mesh->blend_shape_count - 1; bs++) {
  1027. float weight = mi->blend_weights[bs];
  1028. if (Math::is_zero_approx(weight)) {
  1029. //not bother with this one
  1030. continue;
  1031. }
  1032. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1033. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1034. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1035. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1036. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffers[1]);
  1037. glBeginTransformFeedback(GL_POINTS);
  1038. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1039. glEndTransformFeedback();
  1040. SWAP(mi->surfaces[i].vertex_buffers[0], mi->surfaces[i].vertex_buffers[1]);
  1041. }
  1042. uint32_t bs = mi->mesh->blend_shape_count - 1;
  1043. float weight = mi->blend_weights[bs];
  1044. glBindVertexArray(mi->mesh->surfaces[i]->blend_shapes[bs].vertex_array);
  1045. _blend_shape_bind_mesh_instance_buffer(mi, i);
  1046. specialization |= can_use_skeleton ? SkeletonShaderGLES3::USE_SKELETON : 0;
  1047. specialization |= (can_use_skeleton && use_8_weights) ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1048. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1049. success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1050. if (!success) {
  1051. continue;
  1052. }
  1053. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_WEIGHT, weight, skeleton_shader.shader_version, variant, specialization);
  1054. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::BLEND_SHAPE_COUNT, float(mi->mesh->blend_shape_count), skeleton_shader.shader_version, variant, specialization);
  1055. if (can_use_skeleton) {
  1056. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1057. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1058. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1059. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1060. Transform2D inverse_transform = transform.affine_inverse();
  1061. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1062. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1063. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1064. // Do last blendshape in the same pass as the Skeleton.
  1065. _compute_skeleton(mi, sk, i);
  1066. can_use_skeleton = false;
  1067. } else {
  1068. // Do last blendshape by itself and prepare vertex data for use by the renderer.
  1069. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, mi->surfaces[i].vertex_buffer);
  1070. glBeginTransformFeedback(GL_POINTS);
  1071. glDrawArrays(GL_POINTS, 0, mi->mesh->surfaces[i]->vertex_count);
  1072. glEndTransformFeedback();
  1073. }
  1074. glBindVertexArray(0);
  1075. glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
  1076. }
  1077. // This branch should only execute when Skeleton is run by itself.
  1078. if (can_use_skeleton) {
  1079. SkeletonShaderGLES3::ShaderVariant variant = SkeletonShaderGLES3::MODE_BASE_PASS;
  1080. uint64_t specialization = 0;
  1081. specialization |= array_is_2d ? SkeletonShaderGLES3::MODE_2D : 0;
  1082. specialization |= SkeletonShaderGLES3::USE_SKELETON;
  1083. specialization |= SkeletonShaderGLES3::FINAL_PASS;
  1084. specialization |= use_8_weights ? SkeletonShaderGLES3::USE_EIGHT_WEIGHTS : 0;
  1085. if (!array_is_2d) {
  1086. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_NORMAL))) {
  1087. specialization |= SkeletonShaderGLES3::USE_NORMAL;
  1088. }
  1089. if ((mi->surfaces[i].format_cache & (1ULL << RS::ARRAY_TANGENT))) {
  1090. specialization |= SkeletonShaderGLES3::USE_TANGENT;
  1091. }
  1092. }
  1093. bool success = skeleton_shader.shader.version_bind_shader(skeleton_shader.shader_version, variant, specialization);
  1094. if (!success) {
  1095. continue;
  1096. }
  1097. Transform2D transform = mi->canvas_item_transform_2d.affine_inverse() * sk->base_transform_2d;
  1098. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_X, transform[0], skeleton_shader.shader_version, variant, specialization);
  1099. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_Y, transform[1], skeleton_shader.shader_version, variant, specialization);
  1100. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::SKELETON_TRANSFORM_OFFSET, transform[2], skeleton_shader.shader_version, variant, specialization);
  1101. Transform2D inverse_transform = transform.affine_inverse();
  1102. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_X, inverse_transform[0], skeleton_shader.shader_version, variant, specialization);
  1103. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_Y, inverse_transform[1], skeleton_shader.shader_version, variant, specialization);
  1104. skeleton_shader.shader.version_set_uniform(SkeletonShaderGLES3::INVERSE_TRANSFORM_OFFSET, inverse_transform[2], skeleton_shader.shader_version, variant, specialization);
  1105. GLuint vertex_array_gl = 0;
  1106. uint64_t mask = RS::ARRAY_FORMAT_VERTEX | RS::ARRAY_FORMAT_NORMAL | RS::ARRAY_FORMAT_VERTEX;
  1107. uint64_t format = mi->mesh->surfaces[i]->format & mask; // Format should only have vertex, normal, tangent (as necessary).
  1108. mesh_surface_get_vertex_arrays_and_format(mi->mesh->surfaces[i], format, vertex_array_gl);
  1109. glBindVertexArray(vertex_array_gl);
  1110. _compute_skeleton(mi, sk, i);
  1111. }
  1112. }
  1113. mi->dirty = false;
  1114. if (sk) {
  1115. mi->skeleton_version = sk->version;
  1116. }
  1117. dirty_mesh_instance_arrays.remove(&mi->array_update_list);
  1118. }
  1119. glDisable(GL_RASTERIZER_DISCARD);
  1120. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1121. glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0);
  1122. }
  1123. /* MULTIMESH API */
  1124. RID MeshStorage::multimesh_allocate() {
  1125. return multimesh_owner.allocate_rid();
  1126. }
  1127. void MeshStorage::multimesh_initialize(RID p_rid) {
  1128. multimesh_owner.initialize_rid(p_rid, MultiMesh());
  1129. }
  1130. void MeshStorage::multimesh_free(RID p_rid) {
  1131. _update_dirty_multimeshes();
  1132. multimesh_allocate_data(p_rid, 0, RS::MULTIMESH_TRANSFORM_2D);
  1133. MultiMesh *multimesh = multimesh_owner.get_or_null(p_rid);
  1134. multimesh->dependency.deleted_notify(p_rid);
  1135. multimesh_owner.free(p_rid);
  1136. }
  1137. void MeshStorage::multimesh_allocate_data(RID p_multimesh, int p_instances, RS::MultimeshTransformFormat p_transform_format, bool p_use_colors, bool p_use_custom_data) {
  1138. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1139. ERR_FAIL_NULL(multimesh);
  1140. if (multimesh->instances == p_instances && multimesh->xform_format == p_transform_format && multimesh->uses_colors == p_use_colors && multimesh->uses_custom_data == p_use_custom_data) {
  1141. return;
  1142. }
  1143. if (multimesh->buffer) {
  1144. GLES3::Utilities::get_singleton()->buffer_free_data(multimesh->buffer);
  1145. multimesh->buffer = 0;
  1146. }
  1147. if (multimesh->data_cache_dirty_regions) {
  1148. memdelete_arr(multimesh->data_cache_dirty_regions);
  1149. multimesh->data_cache_dirty_regions = nullptr;
  1150. multimesh->data_cache_used_dirty_regions = 0;
  1151. }
  1152. // If we have either color or custom data, reserve space for both to make data handling logic simpler.
  1153. // This way we can always treat them both as a single, compressed uvec4.
  1154. int color_and_custom_strides = (p_use_colors || p_use_custom_data) ? 2 : 0;
  1155. multimesh->instances = p_instances;
  1156. multimesh->xform_format = p_transform_format;
  1157. multimesh->uses_colors = p_use_colors;
  1158. multimesh->color_offset_cache = p_transform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1159. multimesh->uses_custom_data = p_use_custom_data;
  1160. multimesh->custom_data_offset_cache = multimesh->color_offset_cache + color_and_custom_strides;
  1161. multimesh->stride_cache = multimesh->custom_data_offset_cache + color_and_custom_strides;
  1162. multimesh->buffer_set = false;
  1163. multimesh->data_cache = Vector<float>();
  1164. multimesh->aabb = AABB();
  1165. multimesh->aabb_dirty = false;
  1166. multimesh->visible_instances = MIN(multimesh->visible_instances, multimesh->instances);
  1167. if (multimesh->instances) {
  1168. glGenBuffers(1, &multimesh->buffer);
  1169. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1170. GLES3::Utilities::get_singleton()->buffer_allocate_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float), nullptr, GL_STATIC_DRAW, "MultiMesh buffer");
  1171. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1172. }
  1173. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH);
  1174. }
  1175. int MeshStorage::multimesh_get_instance_count(RID p_multimesh) const {
  1176. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1177. ERR_FAIL_NULL_V(multimesh, 0);
  1178. return multimesh->instances;
  1179. }
  1180. void MeshStorage::multimesh_set_mesh(RID p_multimesh, RID p_mesh) {
  1181. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1182. ERR_FAIL_NULL(multimesh);
  1183. if (multimesh->mesh == p_mesh || p_mesh.is_null()) {
  1184. return;
  1185. }
  1186. multimesh->mesh = p_mesh;
  1187. if (multimesh->instances == 0) {
  1188. return;
  1189. }
  1190. if (multimesh->data_cache.size()) {
  1191. //we have a data cache, just mark it dirty
  1192. _multimesh_mark_all_dirty(multimesh, false, true);
  1193. } else if (multimesh->instances) {
  1194. // Need to re-create AABB. Unfortunately, calling this has a penalty.
  1195. if (multimesh->buffer_set) {
  1196. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1197. const uint8_t *r = buffer.ptr();
  1198. const float *data = (const float *)r;
  1199. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1200. }
  1201. }
  1202. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MESH);
  1203. }
  1204. #define MULTIMESH_DIRTY_REGION_SIZE 512
  1205. void MeshStorage::_multimesh_make_local(MultiMesh *multimesh) const {
  1206. if (multimesh->data_cache.size() > 0 || multimesh->instances == 0) {
  1207. return; //already local
  1208. }
  1209. ERR_FAIL_COND(multimesh->data_cache.size() > 0);
  1210. // this means that the user wants to load/save individual elements,
  1211. // for this, the data must reside on CPU, so just copy it there.
  1212. multimesh->data_cache.resize(multimesh->instances * multimesh->stride_cache);
  1213. {
  1214. float *w = multimesh->data_cache.ptrw();
  1215. if (multimesh->buffer_set) {
  1216. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1217. {
  1218. const uint8_t *r = buffer.ptr();
  1219. memcpy(w, r, buffer.size());
  1220. }
  1221. } else {
  1222. memset(w, 0, (size_t)multimesh->instances * multimesh->stride_cache * sizeof(float));
  1223. }
  1224. }
  1225. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1226. multimesh->data_cache_dirty_regions = memnew_arr(bool, data_cache_dirty_region_count);
  1227. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1228. multimesh->data_cache_dirty_regions[i] = false;
  1229. }
  1230. multimesh->data_cache_used_dirty_regions = 0;
  1231. }
  1232. void MeshStorage::_multimesh_mark_dirty(MultiMesh *multimesh, int p_index, bool p_aabb) {
  1233. uint32_t region_index = p_index / MULTIMESH_DIRTY_REGION_SIZE;
  1234. #ifdef DEBUG_ENABLED
  1235. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1236. ERR_FAIL_UNSIGNED_INDEX(region_index, data_cache_dirty_region_count); //bug
  1237. #endif
  1238. if (!multimesh->data_cache_dirty_regions[region_index]) {
  1239. multimesh->data_cache_dirty_regions[region_index] = true;
  1240. multimesh->data_cache_used_dirty_regions++;
  1241. }
  1242. if (p_aabb) {
  1243. multimesh->aabb_dirty = true;
  1244. }
  1245. if (!multimesh->dirty) {
  1246. multimesh->dirty_list = multimesh_dirty_list;
  1247. multimesh_dirty_list = multimesh;
  1248. multimesh->dirty = true;
  1249. }
  1250. }
  1251. void MeshStorage::_multimesh_mark_all_dirty(MultiMesh *multimesh, bool p_data, bool p_aabb) {
  1252. if (p_data) {
  1253. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1254. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1255. if (!multimesh->data_cache_dirty_regions[i]) {
  1256. multimesh->data_cache_dirty_regions[i] = true;
  1257. multimesh->data_cache_used_dirty_regions++;
  1258. }
  1259. }
  1260. }
  1261. if (p_aabb) {
  1262. multimesh->aabb_dirty = true;
  1263. }
  1264. if (!multimesh->dirty) {
  1265. multimesh->dirty_list = multimesh_dirty_list;
  1266. multimesh_dirty_list = multimesh;
  1267. multimesh->dirty = true;
  1268. }
  1269. }
  1270. void MeshStorage::_multimesh_re_create_aabb(MultiMesh *multimesh, const float *p_data, int p_instances) {
  1271. ERR_FAIL_COND(multimesh->mesh.is_null());
  1272. AABB aabb;
  1273. AABB mesh_aabb = mesh_get_aabb(multimesh->mesh);
  1274. for (int i = 0; i < p_instances; i++) {
  1275. const float *data = p_data + multimesh->stride_cache * i;
  1276. Transform3D t;
  1277. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1278. t.basis.rows[0][0] = data[0];
  1279. t.basis.rows[0][1] = data[1];
  1280. t.basis.rows[0][2] = data[2];
  1281. t.origin.x = data[3];
  1282. t.basis.rows[1][0] = data[4];
  1283. t.basis.rows[1][1] = data[5];
  1284. t.basis.rows[1][2] = data[6];
  1285. t.origin.y = data[7];
  1286. t.basis.rows[2][0] = data[8];
  1287. t.basis.rows[2][1] = data[9];
  1288. t.basis.rows[2][2] = data[10];
  1289. t.origin.z = data[11];
  1290. } else {
  1291. t.basis.rows[0][0] = data[0];
  1292. t.basis.rows[0][1] = data[1];
  1293. t.origin.x = data[3];
  1294. t.basis.rows[1][0] = data[4];
  1295. t.basis.rows[1][1] = data[5];
  1296. t.origin.y = data[7];
  1297. }
  1298. if (i == 0) {
  1299. aabb = t.xform(mesh_aabb);
  1300. } else {
  1301. aabb.merge_with(t.xform(mesh_aabb));
  1302. }
  1303. }
  1304. multimesh->aabb = aabb;
  1305. }
  1306. void MeshStorage::multimesh_instance_set_transform(RID p_multimesh, int p_index, const Transform3D &p_transform) {
  1307. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1308. ERR_FAIL_NULL(multimesh);
  1309. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1310. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D);
  1311. _multimesh_make_local(multimesh);
  1312. {
  1313. float *w = multimesh->data_cache.ptrw();
  1314. float *dataptr = w + p_index * multimesh->stride_cache;
  1315. dataptr[0] = p_transform.basis.rows[0][0];
  1316. dataptr[1] = p_transform.basis.rows[0][1];
  1317. dataptr[2] = p_transform.basis.rows[0][2];
  1318. dataptr[3] = p_transform.origin.x;
  1319. dataptr[4] = p_transform.basis.rows[1][0];
  1320. dataptr[5] = p_transform.basis.rows[1][1];
  1321. dataptr[6] = p_transform.basis.rows[1][2];
  1322. dataptr[7] = p_transform.origin.y;
  1323. dataptr[8] = p_transform.basis.rows[2][0];
  1324. dataptr[9] = p_transform.basis.rows[2][1];
  1325. dataptr[10] = p_transform.basis.rows[2][2];
  1326. dataptr[11] = p_transform.origin.z;
  1327. }
  1328. _multimesh_mark_dirty(multimesh, p_index, true);
  1329. }
  1330. void MeshStorage::multimesh_instance_set_transform_2d(RID p_multimesh, int p_index, const Transform2D &p_transform) {
  1331. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1332. ERR_FAIL_NULL(multimesh);
  1333. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1334. ERR_FAIL_COND(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D);
  1335. _multimesh_make_local(multimesh);
  1336. {
  1337. float *w = multimesh->data_cache.ptrw();
  1338. float *dataptr = w + p_index * multimesh->stride_cache;
  1339. dataptr[0] = p_transform.columns[0][0];
  1340. dataptr[1] = p_transform.columns[1][0];
  1341. dataptr[2] = 0;
  1342. dataptr[3] = p_transform.columns[2][0];
  1343. dataptr[4] = p_transform.columns[0][1];
  1344. dataptr[5] = p_transform.columns[1][1];
  1345. dataptr[6] = 0;
  1346. dataptr[7] = p_transform.columns[2][1];
  1347. }
  1348. _multimesh_mark_dirty(multimesh, p_index, true);
  1349. }
  1350. void MeshStorage::multimesh_instance_set_color(RID p_multimesh, int p_index, const Color &p_color) {
  1351. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1352. ERR_FAIL_NULL(multimesh);
  1353. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1354. ERR_FAIL_COND(!multimesh->uses_colors);
  1355. _multimesh_make_local(multimesh);
  1356. {
  1357. // Colors are packed into 2 floats.
  1358. float *w = multimesh->data_cache.ptrw();
  1359. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1360. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1361. memcpy(dataptr, val, 2 * 4);
  1362. }
  1363. _multimesh_mark_dirty(multimesh, p_index, false);
  1364. }
  1365. void MeshStorage::multimesh_instance_set_custom_data(RID p_multimesh, int p_index, const Color &p_color) {
  1366. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1367. ERR_FAIL_NULL(multimesh);
  1368. ERR_FAIL_INDEX(p_index, multimesh->instances);
  1369. ERR_FAIL_COND(!multimesh->uses_custom_data);
  1370. _multimesh_make_local(multimesh);
  1371. {
  1372. float *w = multimesh->data_cache.ptrw();
  1373. float *dataptr = w + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1374. uint16_t val[4] = { Math::make_half_float(p_color.r), Math::make_half_float(p_color.g), Math::make_half_float(p_color.b), Math::make_half_float(p_color.a) };
  1375. memcpy(dataptr, val, 2 * 4);
  1376. }
  1377. _multimesh_mark_dirty(multimesh, p_index, false);
  1378. }
  1379. RID MeshStorage::multimesh_get_mesh(RID p_multimesh) const {
  1380. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1381. ERR_FAIL_NULL_V(multimesh, RID());
  1382. return multimesh->mesh;
  1383. }
  1384. AABB MeshStorage::multimesh_get_aabb(RID p_multimesh) const {
  1385. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1386. ERR_FAIL_NULL_V(multimesh, AABB());
  1387. if (multimesh->aabb_dirty) {
  1388. const_cast<MeshStorage *>(this)->_update_dirty_multimeshes();
  1389. }
  1390. return multimesh->aabb;
  1391. }
  1392. Transform3D MeshStorage::multimesh_instance_get_transform(RID p_multimesh, int p_index) const {
  1393. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1394. ERR_FAIL_NULL_V(multimesh, Transform3D());
  1395. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform3D());
  1396. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_3D, Transform3D());
  1397. _multimesh_make_local(multimesh);
  1398. Transform3D t;
  1399. {
  1400. const float *r = multimesh->data_cache.ptr();
  1401. const float *dataptr = r + p_index * multimesh->stride_cache;
  1402. t.basis.rows[0][0] = dataptr[0];
  1403. t.basis.rows[0][1] = dataptr[1];
  1404. t.basis.rows[0][2] = dataptr[2];
  1405. t.origin.x = dataptr[3];
  1406. t.basis.rows[1][0] = dataptr[4];
  1407. t.basis.rows[1][1] = dataptr[5];
  1408. t.basis.rows[1][2] = dataptr[6];
  1409. t.origin.y = dataptr[7];
  1410. t.basis.rows[2][0] = dataptr[8];
  1411. t.basis.rows[2][1] = dataptr[9];
  1412. t.basis.rows[2][2] = dataptr[10];
  1413. t.origin.z = dataptr[11];
  1414. }
  1415. return t;
  1416. }
  1417. Transform2D MeshStorage::multimesh_instance_get_transform_2d(RID p_multimesh, int p_index) const {
  1418. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1419. ERR_FAIL_NULL_V(multimesh, Transform2D());
  1420. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Transform2D());
  1421. ERR_FAIL_COND_V(multimesh->xform_format != RS::MULTIMESH_TRANSFORM_2D, Transform2D());
  1422. _multimesh_make_local(multimesh);
  1423. Transform2D t;
  1424. {
  1425. const float *r = multimesh->data_cache.ptr();
  1426. const float *dataptr = r + p_index * multimesh->stride_cache;
  1427. t.columns[0][0] = dataptr[0];
  1428. t.columns[1][0] = dataptr[1];
  1429. t.columns[2][0] = dataptr[3];
  1430. t.columns[0][1] = dataptr[4];
  1431. t.columns[1][1] = dataptr[5];
  1432. t.columns[2][1] = dataptr[7];
  1433. }
  1434. return t;
  1435. }
  1436. Color MeshStorage::multimesh_instance_get_color(RID p_multimesh, int p_index) const {
  1437. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1438. ERR_FAIL_NULL_V(multimesh, Color());
  1439. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1440. ERR_FAIL_COND_V(!multimesh->uses_colors, Color());
  1441. _multimesh_make_local(multimesh);
  1442. Color c;
  1443. {
  1444. const float *r = multimesh->data_cache.ptr();
  1445. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->color_offset_cache;
  1446. uint16_t raw_data[4];
  1447. memcpy(raw_data, dataptr, 2 * 4);
  1448. c.r = Math::half_to_float(raw_data[0]);
  1449. c.g = Math::half_to_float(raw_data[1]);
  1450. c.b = Math::half_to_float(raw_data[2]);
  1451. c.a = Math::half_to_float(raw_data[3]);
  1452. }
  1453. return c;
  1454. }
  1455. Color MeshStorage::multimesh_instance_get_custom_data(RID p_multimesh, int p_index) const {
  1456. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1457. ERR_FAIL_NULL_V(multimesh, Color());
  1458. ERR_FAIL_INDEX_V(p_index, multimesh->instances, Color());
  1459. ERR_FAIL_COND_V(!multimesh->uses_custom_data, Color());
  1460. _multimesh_make_local(multimesh);
  1461. Color c;
  1462. {
  1463. const float *r = multimesh->data_cache.ptr();
  1464. const float *dataptr = r + p_index * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1465. uint16_t raw_data[4];
  1466. memcpy(raw_data, dataptr, 2 * 4);
  1467. c.r = Math::half_to_float(raw_data[0]);
  1468. c.g = Math::half_to_float(raw_data[1]);
  1469. c.b = Math::half_to_float(raw_data[2]);
  1470. c.a = Math::half_to_float(raw_data[3]);
  1471. }
  1472. return c;
  1473. }
  1474. void MeshStorage::multimesh_set_buffer(RID p_multimesh, const Vector<float> &p_buffer) {
  1475. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1476. ERR_FAIL_NULL(multimesh);
  1477. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1478. // Color and custom need to be packed so copy buffer to data_cache and pack.
  1479. _multimesh_make_local(multimesh);
  1480. uint32_t old_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1481. old_stride += multimesh->uses_colors ? 4 : 0;
  1482. old_stride += multimesh->uses_custom_data ? 4 : 0;
  1483. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)old_stride));
  1484. multimesh->data_cache = p_buffer;
  1485. float *w = multimesh->data_cache.ptrw();
  1486. for (int i = 0; i < multimesh->instances; i++) {
  1487. {
  1488. float *dataptr = w + i * old_stride;
  1489. float *newptr = w + i * multimesh->stride_cache;
  1490. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3], dataptr[4], dataptr[5], dataptr[6], dataptr[7] };
  1491. memcpy(newptr, vals, 8 * 4);
  1492. }
  1493. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1494. float *dataptr = w + i * old_stride + 8;
  1495. float *newptr = w + i * multimesh->stride_cache + 8;
  1496. float vals[8] = { dataptr[0], dataptr[1], dataptr[2], dataptr[3] };
  1497. memcpy(newptr, vals, 4 * 4);
  1498. }
  1499. if (multimesh->uses_colors) {
  1500. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1501. float *newptr = w + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1502. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1503. memcpy(newptr, val, 2 * 4);
  1504. }
  1505. if (multimesh->uses_custom_data) {
  1506. float *dataptr = w + i * old_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1507. float *newptr = w + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1508. uint16_t val[4] = { Math::make_half_float(dataptr[0]), Math::make_half_float(dataptr[1]), Math::make_half_float(dataptr[2]), Math::make_half_float(dataptr[3]) };
  1509. memcpy(newptr, val, 2 * 4);
  1510. }
  1511. }
  1512. multimesh->data_cache.resize(multimesh->instances * (int)multimesh->stride_cache);
  1513. const float *r = multimesh->data_cache.ptr();
  1514. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1515. glBufferData(GL_ARRAY_BUFFER, multimesh->data_cache.size() * sizeof(float), r, GL_STATIC_DRAW);
  1516. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1517. } else {
  1518. // Only Transform is being used, so we can upload directly.
  1519. ERR_FAIL_COND(p_buffer.size() != (multimesh->instances * (int)multimesh->stride_cache));
  1520. const float *r = p_buffer.ptr();
  1521. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1522. glBufferData(GL_ARRAY_BUFFER, p_buffer.size() * sizeof(float), r, GL_STATIC_DRAW);
  1523. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1524. }
  1525. multimesh->buffer_set = true;
  1526. if (multimesh->data_cache.size() || multimesh->uses_colors || multimesh->uses_custom_data) {
  1527. //if we have a data cache, just update it
  1528. multimesh->data_cache = multimesh->data_cache;
  1529. {
  1530. //clear dirty since nothing will be dirty anymore
  1531. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1532. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1533. multimesh->data_cache_dirty_regions[i] = false;
  1534. }
  1535. multimesh->data_cache_used_dirty_regions = 0;
  1536. }
  1537. _multimesh_mark_all_dirty(multimesh, false, true); //update AABB
  1538. } else if (multimesh->mesh.is_valid()) {
  1539. //if we have a mesh set, we need to re-generate the AABB from the new data
  1540. const float *data = p_buffer.ptr();
  1541. _multimesh_re_create_aabb(multimesh, data, multimesh->instances);
  1542. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1543. }
  1544. }
  1545. Vector<float> MeshStorage::multimesh_get_buffer(RID p_multimesh) const {
  1546. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1547. ERR_FAIL_NULL_V(multimesh, Vector<float>());
  1548. Vector<float> ret;
  1549. if (multimesh->buffer == 0 || multimesh->instances == 0) {
  1550. return Vector<float>();
  1551. } else if (multimesh->data_cache.size()) {
  1552. ret = multimesh->data_cache;
  1553. } else {
  1554. // Buffer not cached, so fetch from GPU memory. This can be a stalling operation, avoid whenever possible.
  1555. Vector<uint8_t> buffer = Utilities::buffer_get_data(GL_ARRAY_BUFFER, multimesh->buffer, multimesh->instances * multimesh->stride_cache * sizeof(float));
  1556. ret.resize(multimesh->instances * multimesh->stride_cache);
  1557. {
  1558. float *w = ret.ptrw();
  1559. const uint8_t *r = buffer.ptr();
  1560. memcpy(w, r, buffer.size());
  1561. }
  1562. }
  1563. if (multimesh->uses_colors || multimesh->uses_custom_data) {
  1564. // Need to decompress buffer.
  1565. uint32_t new_stride = multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12;
  1566. new_stride += multimesh->uses_colors ? 4 : 0;
  1567. new_stride += multimesh->uses_custom_data ? 4 : 0;
  1568. Vector<float> decompressed;
  1569. decompressed.resize(multimesh->instances * (int)new_stride);
  1570. float *w = decompressed.ptrw();
  1571. const float *r = ret.ptr();
  1572. for (int i = 0; i < multimesh->instances; i++) {
  1573. {
  1574. float *newptr = w + i * new_stride;
  1575. const float *oldptr = r + i * multimesh->stride_cache;
  1576. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3], oldptr[4], oldptr[5], oldptr[6], oldptr[7] };
  1577. memcpy(newptr, vals, 8 * 4);
  1578. }
  1579. if (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_3D) {
  1580. float *newptr = w + i * new_stride + 8;
  1581. const float *oldptr = r + i * multimesh->stride_cache + 8;
  1582. float vals[8] = { oldptr[0], oldptr[1], oldptr[2], oldptr[3] };
  1583. memcpy(newptr, vals, 4 * 4);
  1584. }
  1585. if (multimesh->uses_colors) {
  1586. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12);
  1587. const float *oldptr = r + i * multimesh->stride_cache + multimesh->color_offset_cache;
  1588. uint16_t raw_data[4];
  1589. memcpy(raw_data, oldptr, 2 * 4);
  1590. newptr[0] = Math::half_to_float(raw_data[0]);
  1591. newptr[1] = Math::half_to_float(raw_data[1]);
  1592. newptr[2] = Math::half_to_float(raw_data[2]);
  1593. newptr[3] = Math::half_to_float(raw_data[3]);
  1594. }
  1595. if (multimesh->uses_custom_data) {
  1596. float *newptr = w + i * new_stride + (multimesh->xform_format == RS::MULTIMESH_TRANSFORM_2D ? 8 : 12) + (multimesh->uses_colors ? 4 : 0);
  1597. const float *oldptr = r + i * multimesh->stride_cache + multimesh->custom_data_offset_cache;
  1598. uint16_t raw_data[4];
  1599. memcpy(raw_data, oldptr, 2 * 4);
  1600. newptr[0] = Math::half_to_float(raw_data[0]);
  1601. newptr[1] = Math::half_to_float(raw_data[1]);
  1602. newptr[2] = Math::half_to_float(raw_data[2]);
  1603. newptr[3] = Math::half_to_float(raw_data[3]);
  1604. }
  1605. }
  1606. return decompressed;
  1607. } else {
  1608. return ret;
  1609. }
  1610. }
  1611. void MeshStorage::multimesh_set_visible_instances(RID p_multimesh, int p_visible) {
  1612. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1613. ERR_FAIL_NULL(multimesh);
  1614. ERR_FAIL_COND(p_visible < -1 || p_visible > multimesh->instances);
  1615. if (multimesh->visible_instances == p_visible) {
  1616. return;
  1617. }
  1618. if (multimesh->data_cache.size()) {
  1619. // There is a data cache, but we may need to update some sections.
  1620. _multimesh_mark_all_dirty(multimesh, false, true);
  1621. int start = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1622. for (int i = start; i < p_visible; i++) {
  1623. _multimesh_mark_dirty(multimesh, i, true);
  1624. }
  1625. }
  1626. multimesh->visible_instances = p_visible;
  1627. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MULTIMESH_VISIBLE_INSTANCES);
  1628. }
  1629. int MeshStorage::multimesh_get_visible_instances(RID p_multimesh) const {
  1630. MultiMesh *multimesh = multimesh_owner.get_or_null(p_multimesh);
  1631. ERR_FAIL_NULL_V(multimesh, 0);
  1632. return multimesh->visible_instances;
  1633. }
  1634. void MeshStorage::_update_dirty_multimeshes() {
  1635. while (multimesh_dirty_list) {
  1636. MultiMesh *multimesh = multimesh_dirty_list;
  1637. if (multimesh->data_cache.size()) { //may have been cleared, so only process if it exists
  1638. const float *data = multimesh->data_cache.ptr();
  1639. uint32_t visible_instances = multimesh->visible_instances >= 0 ? multimesh->visible_instances : multimesh->instances;
  1640. if (multimesh->data_cache_used_dirty_regions) {
  1641. uint32_t data_cache_dirty_region_count = (multimesh->instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1642. uint32_t visible_region_count = visible_instances == 0 ? 0 : (visible_instances - 1) / MULTIMESH_DIRTY_REGION_SIZE + 1;
  1643. GLint region_size = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * sizeof(float);
  1644. if (multimesh->data_cache_used_dirty_regions > 32 || multimesh->data_cache_used_dirty_regions > visible_region_count / 2) {
  1645. // If there too many dirty regions, or represent the majority of regions, just copy all, else transfer cost piles up too much
  1646. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1647. glBufferSubData(GL_ARRAY_BUFFER, 0, MIN(visible_region_count * region_size, multimesh->instances * multimesh->stride_cache * sizeof(float)), data);
  1648. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1649. } else {
  1650. // Not that many regions? update them all
  1651. // TODO: profile the performance cost on low end
  1652. glBindBuffer(GL_ARRAY_BUFFER, multimesh->buffer);
  1653. for (uint32_t i = 0; i < visible_region_count; i++) {
  1654. if (multimesh->data_cache_dirty_regions[i]) {
  1655. GLint offset = i * region_size;
  1656. GLint size = multimesh->stride_cache * (uint32_t)multimesh->instances * (uint32_t)sizeof(float);
  1657. uint32_t region_start_index = multimesh->stride_cache * MULTIMESH_DIRTY_REGION_SIZE * i;
  1658. glBufferSubData(GL_ARRAY_BUFFER, offset, MIN(region_size, size - offset), &data[region_start_index]);
  1659. }
  1660. }
  1661. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1662. }
  1663. for (uint32_t i = 0; i < data_cache_dirty_region_count; i++) {
  1664. multimesh->data_cache_dirty_regions[i] = false;
  1665. }
  1666. multimesh->data_cache_used_dirty_regions = 0;
  1667. }
  1668. if (multimesh->aabb_dirty && multimesh->mesh.is_valid()) {
  1669. _multimesh_re_create_aabb(multimesh, data, visible_instances);
  1670. multimesh->aabb_dirty = false;
  1671. multimesh->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_AABB);
  1672. }
  1673. }
  1674. multimesh_dirty_list = multimesh->dirty_list;
  1675. multimesh->dirty_list = nullptr;
  1676. multimesh->dirty = false;
  1677. }
  1678. multimesh_dirty_list = nullptr;
  1679. }
  1680. /* SKELETON API */
  1681. RID MeshStorage::skeleton_allocate() {
  1682. return skeleton_owner.allocate_rid();
  1683. }
  1684. void MeshStorage::skeleton_initialize(RID p_rid) {
  1685. skeleton_owner.initialize_rid(p_rid, Skeleton());
  1686. }
  1687. void MeshStorage::skeleton_free(RID p_rid) {
  1688. _update_dirty_skeletons();
  1689. skeleton_allocate_data(p_rid, 0);
  1690. Skeleton *skeleton = skeleton_owner.get_or_null(p_rid);
  1691. skeleton->dependency.deleted_notify(p_rid);
  1692. skeleton_owner.free(p_rid);
  1693. }
  1694. void MeshStorage::_skeleton_make_dirty(Skeleton *skeleton) {
  1695. if (!skeleton->dirty) {
  1696. skeleton->dirty = true;
  1697. skeleton->dirty_list = skeleton_dirty_list;
  1698. skeleton_dirty_list = skeleton;
  1699. }
  1700. }
  1701. void MeshStorage::skeleton_allocate_data(RID p_skeleton, int p_bones, bool p_2d_skeleton) {
  1702. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1703. ERR_FAIL_NULL(skeleton);
  1704. ERR_FAIL_COND(p_bones < 0);
  1705. if (skeleton->size == p_bones && skeleton->use_2d == p_2d_skeleton) {
  1706. return;
  1707. }
  1708. skeleton->size = p_bones;
  1709. skeleton->use_2d = p_2d_skeleton;
  1710. skeleton->height = (p_bones * (p_2d_skeleton ? 2 : 3)) / 256;
  1711. if ((p_bones * (p_2d_skeleton ? 2 : 3)) % 256) {
  1712. skeleton->height++;
  1713. }
  1714. if (skeleton->transforms_texture != 0) {
  1715. GLES3::Utilities::get_singleton()->texture_free_data(skeleton->transforms_texture);
  1716. skeleton->transforms_texture = 0;
  1717. skeleton->data.clear();
  1718. }
  1719. if (skeleton->size) {
  1720. skeleton->data.resize(256 * skeleton->height * 4);
  1721. glGenTextures(1, &skeleton->transforms_texture);
  1722. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  1723. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, nullptr);
  1724. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  1725. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  1726. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  1727. glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  1728. glBindTexture(GL_TEXTURE_2D, 0);
  1729. GLES3::Utilities::get_singleton()->texture_allocated_data(skeleton->transforms_texture, skeleton->data.size() * sizeof(float), "Skeleton transforms texture");
  1730. memset(skeleton->data.ptrw(), 0, skeleton->data.size() * sizeof(float));
  1731. _skeleton_make_dirty(skeleton);
  1732. }
  1733. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_DATA);
  1734. }
  1735. void MeshStorage::skeleton_set_base_transform_2d(RID p_skeleton, const Transform2D &p_base_transform) {
  1736. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1737. ERR_FAIL_NULL(skeleton);
  1738. ERR_FAIL_COND(!skeleton->use_2d);
  1739. skeleton->base_transform_2d = p_base_transform;
  1740. }
  1741. int MeshStorage::skeleton_get_bone_count(RID p_skeleton) const {
  1742. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1743. ERR_FAIL_NULL_V(skeleton, 0);
  1744. return skeleton->size;
  1745. }
  1746. void MeshStorage::skeleton_bone_set_transform(RID p_skeleton, int p_bone, const Transform3D &p_transform) {
  1747. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1748. ERR_FAIL_NULL(skeleton);
  1749. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1750. ERR_FAIL_COND(skeleton->use_2d);
  1751. float *dataptr = skeleton->data.ptrw() + p_bone * 12;
  1752. dataptr[0] = p_transform.basis.rows[0][0];
  1753. dataptr[1] = p_transform.basis.rows[0][1];
  1754. dataptr[2] = p_transform.basis.rows[0][2];
  1755. dataptr[3] = p_transform.origin.x;
  1756. dataptr[4] = p_transform.basis.rows[1][0];
  1757. dataptr[5] = p_transform.basis.rows[1][1];
  1758. dataptr[6] = p_transform.basis.rows[1][2];
  1759. dataptr[7] = p_transform.origin.y;
  1760. dataptr[8] = p_transform.basis.rows[2][0];
  1761. dataptr[9] = p_transform.basis.rows[2][1];
  1762. dataptr[10] = p_transform.basis.rows[2][2];
  1763. dataptr[11] = p_transform.origin.z;
  1764. _skeleton_make_dirty(skeleton);
  1765. }
  1766. Transform3D MeshStorage::skeleton_bone_get_transform(RID p_skeleton, int p_bone) const {
  1767. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1768. ERR_FAIL_NULL_V(skeleton, Transform3D());
  1769. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform3D());
  1770. ERR_FAIL_COND_V(skeleton->use_2d, Transform3D());
  1771. const float *dataptr = skeleton->data.ptr() + p_bone * 12;
  1772. Transform3D t;
  1773. t.basis.rows[0][0] = dataptr[0];
  1774. t.basis.rows[0][1] = dataptr[1];
  1775. t.basis.rows[0][2] = dataptr[2];
  1776. t.origin.x = dataptr[3];
  1777. t.basis.rows[1][0] = dataptr[4];
  1778. t.basis.rows[1][1] = dataptr[5];
  1779. t.basis.rows[1][2] = dataptr[6];
  1780. t.origin.y = dataptr[7];
  1781. t.basis.rows[2][0] = dataptr[8];
  1782. t.basis.rows[2][1] = dataptr[9];
  1783. t.basis.rows[2][2] = dataptr[10];
  1784. t.origin.z = dataptr[11];
  1785. return t;
  1786. }
  1787. void MeshStorage::skeleton_bone_set_transform_2d(RID p_skeleton, int p_bone, const Transform2D &p_transform) {
  1788. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1789. ERR_FAIL_NULL(skeleton);
  1790. ERR_FAIL_INDEX(p_bone, skeleton->size);
  1791. ERR_FAIL_COND(!skeleton->use_2d);
  1792. float *dataptr = skeleton->data.ptrw() + p_bone * 8;
  1793. dataptr[0] = p_transform.columns[0][0];
  1794. dataptr[1] = p_transform.columns[1][0];
  1795. dataptr[2] = 0;
  1796. dataptr[3] = p_transform.columns[2][0];
  1797. dataptr[4] = p_transform.columns[0][1];
  1798. dataptr[5] = p_transform.columns[1][1];
  1799. dataptr[6] = 0;
  1800. dataptr[7] = p_transform.columns[2][1];
  1801. _skeleton_make_dirty(skeleton);
  1802. }
  1803. Transform2D MeshStorage::skeleton_bone_get_transform_2d(RID p_skeleton, int p_bone) const {
  1804. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1805. ERR_FAIL_NULL_V(skeleton, Transform2D());
  1806. ERR_FAIL_INDEX_V(p_bone, skeleton->size, Transform2D());
  1807. ERR_FAIL_COND_V(!skeleton->use_2d, Transform2D());
  1808. const float *dataptr = skeleton->data.ptr() + p_bone * 8;
  1809. Transform2D t;
  1810. t.columns[0][0] = dataptr[0];
  1811. t.columns[1][0] = dataptr[1];
  1812. t.columns[2][0] = dataptr[3];
  1813. t.columns[0][1] = dataptr[4];
  1814. t.columns[1][1] = dataptr[5];
  1815. t.columns[2][1] = dataptr[7];
  1816. return t;
  1817. }
  1818. void MeshStorage::_update_dirty_skeletons() {
  1819. while (skeleton_dirty_list) {
  1820. Skeleton *skeleton = skeleton_dirty_list;
  1821. if (skeleton->size) {
  1822. glBindTexture(GL_TEXTURE_2D, skeleton->transforms_texture);
  1823. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 256, skeleton->height, 0, GL_RGBA, GL_FLOAT, skeleton->data.ptr());
  1824. glBindTexture(GL_TEXTURE_2D, 0);
  1825. }
  1826. skeleton_dirty_list = skeleton->dirty_list;
  1827. skeleton->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_SKELETON_BONES);
  1828. skeleton->version++;
  1829. skeleton->dirty = false;
  1830. skeleton->dirty_list = nullptr;
  1831. }
  1832. skeleton_dirty_list = nullptr;
  1833. }
  1834. void MeshStorage::skeleton_update_dependency(RID p_skeleton, DependencyTracker *p_instance) {
  1835. Skeleton *skeleton = skeleton_owner.get_or_null(p_skeleton);
  1836. ERR_FAIL_NULL(skeleton);
  1837. p_instance->update_dependency(&skeleton->dependency);
  1838. }
  1839. #endif // GLES3_ENABLED