|
@@ -322,7 +322,40 @@ RID RenderingServer::get_white_texture() {
|
|
|
return white_texture;
|
|
|
}
|
|
|
|
|
|
-Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint32_t *p_offsets, uint32_t p_vertex_stride, uint32_t p_attrib_stride, uint32_t p_skin_stride, Vector<uint8_t> &r_vertex_array, Vector<uint8_t> &r_attrib_array, Vector<uint8_t> &r_skin_array, int p_vertex_array_len, Vector<uint8_t> &r_index_array, int p_index_array_len, AABB &r_aabb, Vector<AABB> &r_bone_aabb) {
|
|
|
+void _get_axis_angle(const Vector3 &p_normal, const Vector4 &p_tangent, float &r_angle, Vector3 &r_axis) {
|
|
|
+ Vector3 tangent = Vector3(p_tangent.x, p_tangent.y, p_tangent.z);
|
|
|
+ float d = p_tangent.w;
|
|
|
+ Vector3 binormal = p_normal.cross(tangent);
|
|
|
+
|
|
|
+ r_angle = Math::acos((tangent.x + binormal.y + p_normal.z - 1.0) / 2.0);
|
|
|
+ float denom = 2.0 * Math::sin(r_angle);
|
|
|
+ r_axis.x = (p_normal.y - binormal.z) / denom;
|
|
|
+ r_axis.y = (tangent.z - p_normal.x) / denom;
|
|
|
+ r_axis.z = (binormal.x - tangent.y) / denom;
|
|
|
+ r_axis.normalize();
|
|
|
+
|
|
|
+ if (d < 0.0) {
|
|
|
+ r_angle = CLAMP((1.0 - r_angle / Math_PI) * 0.5, 0.0, 0.49999);
|
|
|
+ } else {
|
|
|
+ r_angle = (r_angle / Math_PI) * 0.5 + 0.5;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// The inputs to this function should match the outputs of _get_axis_angle. I.e. p_axis is a normalized vector
|
|
|
+// and p_angle includes the binormal direction.
|
|
|
+void _get_tbn_from_axis_angle(const Vector3 &p_axis, float p_angle, Vector3 &r_normal, Vector4 &r_tangent) {
|
|
|
+ float binormal_sign = p_angle > 0.5 ? 1.0 : -1.0;
|
|
|
+ float angle = Math::abs(p_angle * 2.0 - 1.0) * Math_PI;
|
|
|
+ float c = cos(angle);
|
|
|
+ float s = sin(angle);
|
|
|
+ Vector3 omc_axis = (1.0 - c) * p_axis;
|
|
|
+ Vector3 s_axis = s * p_axis;
|
|
|
+ Vector3 tan = omc_axis.x * p_axis + Vector3(c, -s_axis.z, s_axis.y);
|
|
|
+ r_tangent = Vector4(tan.x, tan.y, tan.z, binormal_sign);
|
|
|
+ r_normal = omc_axis.z * p_axis + Vector3(-s_axis.y, s_axis.x, c);
|
|
|
+}
|
|
|
+
|
|
|
+Error RenderingServer::_surface_set_data(Array p_arrays, uint64_t p_format, uint32_t *p_offsets, uint32_t p_vertex_stride, uint32_t p_normal_stride, uint32_t p_attrib_stride, uint32_t p_skin_stride, Vector<uint8_t> &r_vertex_array, Vector<uint8_t> &r_attrib_array, Vector<uint8_t> &r_skin_array, int p_vertex_array_len, Vector<uint8_t> &r_index_array, int p_index_array_len, AABB &r_aabb, Vector<AABB> &r_bone_aabb, Vector4 &r_uv_scale) {
|
|
|
uint8_t *vw = r_vertex_array.ptrw();
|
|
|
uint8_t *aw = r_attrib_array.ptrw();
|
|
|
uint8_t *sw = r_skin_array.ptrw();
|
|
@@ -334,8 +367,44 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint
|
|
|
|
|
|
int max_bone = 0;
|
|
|
|
|
|
+ // Preprocess UVs if compression is enabled
|
|
|
+ if (p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES && ((p_format & RS::ARRAY_FORMAT_TEX_UV) || (p_format & RS::ARRAY_FORMAT_TEX_UV2))) {
|
|
|
+ const Vector2 *uv_src = nullptr;
|
|
|
+ if (p_format & RS::ARRAY_FORMAT_TEX_UV) {
|
|
|
+ Vector<Vector2> array = p_arrays[RS::ARRAY_TEX_UV];
|
|
|
+ uv_src = array.ptr();
|
|
|
+ }
|
|
|
+
|
|
|
+ const Vector2 *uv2_src = nullptr;
|
|
|
+ if (p_format & RS::ARRAY_FORMAT_TEX_UV2) {
|
|
|
+ Vector<Vector2> array = p_arrays[RS::ARRAY_TEX_UV2];
|
|
|
+ uv2_src = array.ptr();
|
|
|
+ }
|
|
|
+
|
|
|
+ Vector2 max_val = Vector2(0.0, 0.0);
|
|
|
+ Vector2 min_val = Vector2(0.0, 0.0);
|
|
|
+ Vector2 max_val2 = Vector2(0.0, 0.0);
|
|
|
+ Vector2 min_val2 = Vector2(0.0, 0.0);
|
|
|
+
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ if (p_format & RS::ARRAY_FORMAT_TEX_UV) {
|
|
|
+ max_val = max_val.max(uv_src[i]);
|
|
|
+ min_val = min_val.min(uv_src[i]);
|
|
|
+ }
|
|
|
+ if (p_format & RS::ARRAY_FORMAT_TEX_UV2) {
|
|
|
+ max_val2 = max_val2.max(uv2_src[i]);
|
|
|
+ min_val2 = min_val2.min(uv2_src[i]);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ max_val = max_val.abs().max(min_val.abs());
|
|
|
+ max_val2 = max_val2.abs().max(min_val2.abs());
|
|
|
+
|
|
|
+ r_uv_scale = Vector4(max_val.x, max_val.y, max_val2.x, max_val2.y) * Vector4(2.0, 2.0, 2.0, 2.0);
|
|
|
+ }
|
|
|
+
|
|
|
for (int ai = 0; ai < RS::ARRAY_MAX; ai++) {
|
|
|
- if (!(p_format & (1 << ai))) { // No array
|
|
|
+ if (!(p_format & (1ULL << ai))) { // No array
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -375,7 +444,118 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint
|
|
|
// Setting vertices means regenerating the AABB.
|
|
|
AABB aabb;
|
|
|
|
|
|
- {
|
|
|
+ if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ // First we need to generate the AABB for the entire surface.
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ if (i == 0) {
|
|
|
+ aabb = AABB(src[i], SMALL_VEC3);
|
|
|
+ } else {
|
|
|
+ aabb.expand_to(src[i]);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ bool using_normals_tangents = (p_format & RS::ARRAY_FORMAT_NORMAL) && (p_format & RS::ARRAY_FORMAT_TANGENT);
|
|
|
+
|
|
|
+ if (!using_normals_tangents) {
|
|
|
+ // Early out if we are only setting vertex positions.
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ Vector3 pos = (src[i] - aabb.position) / aabb.size;
|
|
|
+ uint16_t vector[4] = {
|
|
|
+ (uint16_t)CLAMP(pos.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.y * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.z * 65535, 0, 65535),
|
|
|
+ (uint16_t)0
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, sizeof(uint16_t) * 4);
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Validate normal and tangent arrays.
|
|
|
+ ERR_FAIL_COND_V(p_arrays[RS::ARRAY_NORMAL].get_type() != Variant::PACKED_VECTOR3_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
+ Variant::Type tangent_type = p_arrays[RS::ARRAY_TANGENT].get_type();
|
|
|
+ ERR_FAIL_COND_V(tangent_type != Variant::PACKED_FLOAT32_ARRAY && tangent_type != Variant::PACKED_FLOAT64_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
+
|
|
|
+ Vector<Vector3> normal_array = p_arrays[RS::ARRAY_NORMAL];
|
|
|
+ ERR_FAIL_COND_V(normal_array.size() != p_vertex_array_len, ERR_INVALID_PARAMETER);
|
|
|
+ const Vector3 *normal_src = normal_array.ptr();
|
|
|
+
|
|
|
+ // We need a different version if using double precision tangents.
|
|
|
+ if (tangent_type == Variant::PACKED_FLOAT32_ARRAY) {
|
|
|
+ Vector<float> tangent_array = p_arrays[RS::ARRAY_TANGENT];
|
|
|
+ ERR_FAIL_COND_V(tangent_array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
+ const float *tangent_src = tangent_array.ptr();
|
|
|
+
|
|
|
+ // Set data for vertex, normal, and tangent.
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ float angle = 0.0;
|
|
|
+ Vector3 axis;
|
|
|
+ Vector4 tangent = Vector4(tangent_src[i * 4 + 0], tangent_src[i * 4 + 1], tangent_src[i * 4 + 2], tangent_src[i * 4 + 3]);
|
|
|
+ _get_axis_angle(normal_src[i], tangent, angle, axis);
|
|
|
+
|
|
|
+ // Store axis.
|
|
|
+ {
|
|
|
+ Vector2 res = axis.octahedron_encode();
|
|
|
+ uint16_t vector[2] = {
|
|
|
+ (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[RS::ARRAY_NORMAL] + i * p_normal_stride], vector, 4);
|
|
|
+ }
|
|
|
+
|
|
|
+ // Store vertex position + angle.
|
|
|
+ {
|
|
|
+ Vector3 pos = (src[i] - aabb.position) / aabb.size;
|
|
|
+ uint16_t vector[4] = {
|
|
|
+ (uint16_t)CLAMP(pos.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.y * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.z * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(angle * 65535, 0, 65535)
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, sizeof(uint16_t) * 4);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else { // PACKED_FLOAT64_ARRAY
|
|
|
+ Vector<double> tangent_array = p_arrays[RS::ARRAY_TANGENT];
|
|
|
+ ERR_FAIL_COND_V(tangent_array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
+ const double *tangent_src = tangent_array.ptr();
|
|
|
+
|
|
|
+ // Set data for vertex, normal, and tangent.
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ float angle;
|
|
|
+ Vector3 axis;
|
|
|
+ Vector4 tangent = Vector4(tangent_src[i * 4 + 0], tangent_src[i * 4 + 1], tangent_src[i * 4 + 2], tangent_src[i * 4 + 3]);
|
|
|
+ _get_axis_angle(normal_src[i], tangent, angle, axis);
|
|
|
+
|
|
|
+ // Store axis.
|
|
|
+ {
|
|
|
+ Vector2 res = axis.octahedron_encode();
|
|
|
+ uint16_t vector[2] = {
|
|
|
+ (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[RS::ARRAY_NORMAL] + i * p_normal_stride], vector, 4);
|
|
|
+ }
|
|
|
+
|
|
|
+ // Store vertex position + angle.
|
|
|
+ {
|
|
|
+ Vector3 pos = (src[i] - aabb.position) / aabb.size;
|
|
|
+ uint16_t vector[4] = {
|
|
|
+ (uint16_t)CLAMP(pos.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.y * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(pos.z * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(angle * 65535, 0, 65535)
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, sizeof(uint16_t) * 4);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
float vector[3] = { (float)src[i].x, (float)src[i].y, (float)src[i].z };
|
|
|
|
|
@@ -394,55 +574,61 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint
|
|
|
|
|
|
} break;
|
|
|
case RS::ARRAY_NORMAL: {
|
|
|
- ERR_FAIL_COND_V(p_arrays[ai].get_type() != Variant::PACKED_VECTOR3_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
+ // If using compression we store normal while storing vertices.
|
|
|
+ if (!(p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
|
|
|
+ ERR_FAIL_COND_V(p_arrays[ai].get_type() != Variant::PACKED_VECTOR3_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
|
|
|
- Vector<Vector3> array = p_arrays[ai];
|
|
|
- ERR_FAIL_COND_V(array.size() != p_vertex_array_len, ERR_INVALID_PARAMETER);
|
|
|
-
|
|
|
- const Vector3 *src = array.ptr();
|
|
|
- for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
- Vector2 res = src[i].octahedron_encode();
|
|
|
- uint16_t vector[2] = {
|
|
|
- (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
- (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
- };
|
|
|
-
|
|
|
- memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, 4);
|
|
|
- }
|
|
|
- } break;
|
|
|
-
|
|
|
- case RS::ARRAY_TANGENT: {
|
|
|
- Variant::Type type = p_arrays[ai].get_type();
|
|
|
- ERR_FAIL_COND_V(type != Variant::PACKED_FLOAT32_ARRAY && type != Variant::PACKED_FLOAT64_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
- if (type == Variant::PACKED_FLOAT32_ARRAY) {
|
|
|
- Vector<float> array = p_arrays[ai];
|
|
|
- ERR_FAIL_COND_V(array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
- const float *src_ptr = array.ptr();
|
|
|
+ Vector<Vector3> array = p_arrays[ai];
|
|
|
+ ERR_FAIL_COND_V(array.size() != p_vertex_array_len, ERR_INVALID_PARAMETER);
|
|
|
|
|
|
+ const Vector3 *src = array.ptr();
|
|
|
for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
- const Vector3 src(src_ptr[i * 4 + 0], src_ptr[i * 4 + 1], src_ptr[i * 4 + 2]);
|
|
|
- Vector2 res = src.octahedron_tangent_encode(src_ptr[i * 4 + 3]);
|
|
|
+ Vector2 res = src[i].octahedron_encode();
|
|
|
uint16_t vector[2] = {
|
|
|
(uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
(uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
};
|
|
|
|
|
|
- memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, 4);
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_normal_stride], vector, 4);
|
|
|
}
|
|
|
- } else { // PACKED_FLOAT64_ARRAY
|
|
|
- Vector<double> array = p_arrays[ai];
|
|
|
- ERR_FAIL_COND_V(array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
- const double *src_ptr = array.ptr();
|
|
|
+ }
|
|
|
+ } break;
|
|
|
|
|
|
- for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
- const Vector3 src(src_ptr[i * 4 + 0], src_ptr[i * 4 + 1], src_ptr[i * 4 + 2]);
|
|
|
- Vector2 res = src.octahedron_tangent_encode(src_ptr[i * 4 + 3]);
|
|
|
- uint16_t vector[2] = {
|
|
|
- (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
- (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
- };
|
|
|
+ case RS::ARRAY_TANGENT: {
|
|
|
+ // If using compression we store tangent while storing vertices.
|
|
|
+ if (!(p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
|
|
|
+ Variant::Type type = p_arrays[ai].get_type();
|
|
|
+ ERR_FAIL_COND_V(type != Variant::PACKED_FLOAT32_ARRAY && type != Variant::PACKED_FLOAT64_ARRAY, ERR_INVALID_PARAMETER);
|
|
|
+ if (type == Variant::PACKED_FLOAT32_ARRAY) {
|
|
|
+ Vector<float> array = p_arrays[ai];
|
|
|
+ ERR_FAIL_COND_V(array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
+ const float *src_ptr = array.ptr();
|
|
|
+
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ const Vector3 src(src_ptr[i * 4 + 0], src_ptr[i * 4 + 1], src_ptr[i * 4 + 2]);
|
|
|
+ Vector2 res = src.octahedron_tangent_encode(src_ptr[i * 4 + 3]);
|
|
|
+ uint16_t vector[2] = {
|
|
|
+ (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_normal_stride], vector, 4);
|
|
|
+ }
|
|
|
+ } else { // PACKED_FLOAT64_ARRAY
|
|
|
+ Vector<double> array = p_arrays[ai];
|
|
|
+ ERR_FAIL_COND_V(array.size() != p_vertex_array_len * 4, ERR_INVALID_PARAMETER);
|
|
|
+ const double *src_ptr = array.ptr();
|
|
|
|
|
|
- memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, 4);
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ const Vector3 src(src_ptr[i * 4 + 0], src_ptr[i * 4 + 1], src_ptr[i * 4 + 2]);
|
|
|
+ Vector2 res = src.octahedron_tangent_encode(src_ptr[i * 4 + 3]);
|
|
|
+ uint16_t vector[2] = {
|
|
|
+ (uint16_t)CLAMP(res.x * 65535, 0, 65535),
|
|
|
+ (uint16_t)CLAMP(res.y * 65535, 0, 65535),
|
|
|
+ };
|
|
|
+
|
|
|
+ memcpy(&vw[p_offsets[ai] + i * p_normal_stride], vector, 4);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
} break;
|
|
@@ -472,13 +658,20 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint
|
|
|
ERR_FAIL_COND_V(array.size() != p_vertex_array_len, ERR_INVALID_PARAMETER);
|
|
|
|
|
|
const Vector2 *src = array.ptr();
|
|
|
-
|
|
|
- for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
- float uv[2] = { (float)src[i].x, (float)src[i].y };
|
|
|
-
|
|
|
- memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 2 * 4);
|
|
|
+ if (p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ Vector2 vec = src[i];
|
|
|
+ // Normalize into 0-1 from possible range -uv_scale - uv_scale.
|
|
|
+ vec = vec / (Vector2(r_uv_scale.x, r_uv_scale.y)) + Vector2(0.5, 0.5);
|
|
|
+ uint16_t uv[2] = { (uint16_t)CLAMP(vec.x * 65535, 0, 65535), (uint16_t)CLAMP(vec.y * 65535, 0, 65535) };
|
|
|
+ memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 4);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ float uv[2] = { (float)src[i].x, (float)src[i].y };
|
|
|
+ memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 2 * 4);
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
} break;
|
|
|
|
|
|
case RS::ARRAY_TEX_UV2: {
|
|
@@ -490,9 +683,19 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint32_t p_format, uint
|
|
|
|
|
|
const Vector2 *src = array.ptr();
|
|
|
|
|
|
- for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
- float uv[2] = { (float)src[i].x, (float)src[i].y };
|
|
|
- memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 2 * 4);
|
|
|
+ if (p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ Vector2 vec = src[i];
|
|
|
+ // Normalize into 0-1 from possible range -uv_scale - uv_scale.
|
|
|
+ vec = vec / (Vector2(r_uv_scale.z, r_uv_scale.w)) + Vector2(0.5, 0.5);
|
|
|
+ uint16_t uv[2] = { (uint16_t)CLAMP(vec.x * 65535, 0, 65535), (uint16_t)CLAMP(vec.y * 65535, 0, 65535) };
|
|
|
+ memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 4);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for (int i = 0; i < p_vertex_array_len; i++) {
|
|
|
+ float uv[2] = { (float)src[i].x, (float)src[i].y };
|
|
|
+ memcpy(&aw[p_offsets[ai] + i * p_attrib_stride], uv, 2 * 4);
|
|
|
+ }
|
|
|
}
|
|
|
} break;
|
|
|
case RS::ARRAY_CUSTOM0:
|
|
@@ -707,9 +910,10 @@ uint32_t RenderingServer::mesh_surface_get_format_offset(BitField<ArrayFormat> p
|
|
|
p_format = int64_t(p_format) & ~ARRAY_FORMAT_INDEX;
|
|
|
uint32_t offsets[ARRAY_MAX];
|
|
|
uint32_t vstr;
|
|
|
+ uint32_t ntstr;
|
|
|
uint32_t astr;
|
|
|
uint32_t sstr;
|
|
|
- mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, astr, sstr);
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, ntstr, astr, sstr);
|
|
|
return offsets[p_array_index];
|
|
|
}
|
|
|
|
|
@@ -717,32 +921,48 @@ uint32_t RenderingServer::mesh_surface_get_format_vertex_stride(BitField<ArrayFo
|
|
|
p_format = int64_t(p_format) & ~ARRAY_FORMAT_INDEX;
|
|
|
uint32_t offsets[ARRAY_MAX];
|
|
|
uint32_t vstr;
|
|
|
+ uint32_t ntstr;
|
|
|
uint32_t astr;
|
|
|
uint32_t sstr;
|
|
|
- mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, astr, sstr);
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, ntstr, astr, sstr);
|
|
|
return vstr;
|
|
|
}
|
|
|
+
|
|
|
+uint32_t RenderingServer::mesh_surface_get_format_normal_tangent_stride(BitField<ArrayFormat> p_format, int p_vertex_len) const {
|
|
|
+ p_format = int64_t(p_format) & ~ARRAY_FORMAT_INDEX;
|
|
|
+ uint32_t offsets[ARRAY_MAX];
|
|
|
+ uint32_t vstr;
|
|
|
+ uint32_t ntstr;
|
|
|
+ uint32_t astr;
|
|
|
+ uint32_t sstr;
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, ntstr, astr, sstr);
|
|
|
+ return vstr;
|
|
|
+}
|
|
|
+
|
|
|
uint32_t RenderingServer::mesh_surface_get_format_attribute_stride(BitField<ArrayFormat> p_format, int p_vertex_len) const {
|
|
|
p_format = int64_t(p_format) & ~ARRAY_FORMAT_INDEX;
|
|
|
uint32_t offsets[ARRAY_MAX];
|
|
|
uint32_t vstr;
|
|
|
+ uint32_t ntstr;
|
|
|
uint32_t astr;
|
|
|
uint32_t sstr;
|
|
|
- mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, astr, sstr);
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, ntstr, astr, sstr);
|
|
|
return astr;
|
|
|
}
|
|
|
uint32_t RenderingServer::mesh_surface_get_format_skin_stride(BitField<ArrayFormat> p_format, int p_vertex_len) const {
|
|
|
p_format = int64_t(p_format) & ~ARRAY_FORMAT_INDEX;
|
|
|
uint32_t offsets[ARRAY_MAX];
|
|
|
uint32_t vstr;
|
|
|
+ uint32_t ntstr;
|
|
|
uint32_t astr;
|
|
|
uint32_t sstr;
|
|
|
- mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, astr, sstr);
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, 0, offsets, vstr, ntstr, astr, sstr);
|
|
|
return sstr;
|
|
|
}
|
|
|
|
|
|
-void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, int p_vertex_len, int p_index_len, uint32_t *r_offsets, uint32_t &r_vertex_element_size, uint32_t &r_attrib_element_size, uint32_t &r_skin_element_size) const {
|
|
|
+void RenderingServer::mesh_surface_make_offsets_from_format(uint64_t p_format, int p_vertex_len, int p_index_len, uint32_t *r_offsets, uint32_t &r_vertex_element_size, uint32_t &r_normal_element_size, uint32_t &r_attrib_element_size, uint32_t &r_skin_element_size) const {
|
|
|
r_vertex_element_size = 0;
|
|
|
+ r_normal_element_size = 0;
|
|
|
r_attrib_element_size = 0;
|
|
|
r_skin_element_size = 0;
|
|
|
|
|
@@ -753,13 +973,15 @@ void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, i
|
|
|
|
|
|
if (i == RS::ARRAY_VERTEX) {
|
|
|
size_accum = &r_vertex_element_size;
|
|
|
+ } else if (i == RS::ARRAY_NORMAL) {
|
|
|
+ size_accum = &r_normal_element_size;
|
|
|
} else if (i == RS::ARRAY_COLOR) {
|
|
|
size_accum = &r_attrib_element_size;
|
|
|
} else if (i == RS::ARRAY_BONES) {
|
|
|
size_accum = &r_skin_element_size;
|
|
|
}
|
|
|
|
|
|
- if (!(p_format & (1 << i))) { // No array
|
|
|
+ if (!(p_format & (1ULL << i))) { // No array
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -770,7 +992,7 @@ void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, i
|
|
|
if (p_format & ARRAY_FLAG_USE_2D_VERTICES) {
|
|
|
elem_size = 2;
|
|
|
} else {
|
|
|
- elem_size = 3;
|
|
|
+ elem_size = (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) ? 2 : 3;
|
|
|
}
|
|
|
|
|
|
elem_size *= sizeof(float);
|
|
@@ -779,22 +1001,22 @@ void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, i
|
|
|
elem_size = 4;
|
|
|
} break;
|
|
|
case RS::ARRAY_TANGENT: {
|
|
|
- elem_size = 4;
|
|
|
+ elem_size = (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) ? 0 : 4;
|
|
|
} break;
|
|
|
case RS::ARRAY_COLOR: {
|
|
|
elem_size = 4;
|
|
|
} break;
|
|
|
case RS::ARRAY_TEX_UV: {
|
|
|
- elem_size = 8;
|
|
|
+ elem_size = (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) ? 4 : 8;
|
|
|
} break;
|
|
|
case RS::ARRAY_TEX_UV2: {
|
|
|
- elem_size = 8;
|
|
|
+ elem_size = (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) ? 4 : 8;
|
|
|
} break;
|
|
|
case RS::ARRAY_CUSTOM0:
|
|
|
case RS::ARRAY_CUSTOM1:
|
|
|
case RS::ARRAY_CUSTOM2:
|
|
|
case RS::ARRAY_CUSTOM3: {
|
|
|
- uint32_t format = (p_format >> (ARRAY_FORMAT_CUSTOM_BASE + (ARRAY_FORMAT_CUSTOM_BITS * (i - ARRAY_CUSTOM0)))) & ARRAY_FORMAT_CUSTOM_MASK;
|
|
|
+ uint64_t format = (p_format >> (ARRAY_FORMAT_CUSTOM_BASE + (ARRAY_FORMAT_CUSTOM_BITS * (i - ARRAY_CUSTOM0)))) & ARRAY_FORMAT_CUSTOM_MASK;
|
|
|
switch (format) {
|
|
|
case ARRAY_CUSTOM_RGBA8_UNORM: {
|
|
|
elem_size = 4;
|
|
@@ -852,6 +1074,9 @@ void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, i
|
|
|
|
|
|
if (size_accum != nullptr) {
|
|
|
r_offsets[i] = (*size_accum);
|
|
|
+ if (i == RS::ARRAY_NORMAL || i == RS::ARRAY_TANGENT) {
|
|
|
+ r_offsets[i] += r_vertex_element_size * p_vertex_len;
|
|
|
+ }
|
|
|
(*size_accum) += elem_size;
|
|
|
} else {
|
|
|
r_offsets[i] = 0;
|
|
@@ -859,11 +1084,11 @@ void RenderingServer::mesh_surface_make_offsets_from_format(uint32_t p_format, i
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surface_data, PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes, const Dictionary &p_lods, uint32_t p_compress_format) {
|
|
|
+Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surface_data, PrimitiveType p_primitive, const Array &p_arrays, const Array &p_blend_shapes, const Dictionary &p_lods, uint64_t p_compress_format) {
|
|
|
ERR_FAIL_INDEX_V(p_primitive, RS::PRIMITIVE_MAX, ERR_INVALID_PARAMETER);
|
|
|
ERR_FAIL_COND_V(p_arrays.size() != RS::ARRAY_MAX, ERR_INVALID_PARAMETER);
|
|
|
|
|
|
- uint32_t format = 0;
|
|
|
+ uint64_t format = 0;
|
|
|
|
|
|
// Validation
|
|
|
int index_array_len = 0;
|
|
@@ -874,7 +1099,7 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- format |= (1 << i);
|
|
|
+ format |= (1ULL << i);
|
|
|
|
|
|
if (i == RS::ARRAY_VERTEX) {
|
|
|
switch (p_arrays[i].get_type()) {
|
|
@@ -930,7 +1155,7 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
|
|
|
for (uint32_t i = 0; i < RS::ARRAY_CUSTOM_COUNT; ++i) {
|
|
|
// Include custom array format type.
|
|
|
- if (format & (1 << (ARRAY_CUSTOM0 + i))) {
|
|
|
+ if (format & (1ULL << (ARRAY_CUSTOM0 + i))) {
|
|
|
format |= (RS::ARRAY_FORMAT_CUSTOM_MASK << (RS::ARRAY_FORMAT_CUSTOM_BASE + i * RS::ARRAY_FORMAT_CUSTOM_BITS)) & p_compress_format;
|
|
|
}
|
|
|
}
|
|
@@ -938,21 +1163,33 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
uint32_t offsets[RS::ARRAY_MAX];
|
|
|
|
|
|
uint32_t vertex_element_size;
|
|
|
+ uint32_t normal_element_size;
|
|
|
uint32_t attrib_element_size;
|
|
|
uint32_t skin_element_size;
|
|
|
|
|
|
- mesh_surface_make_offsets_from_format(format, array_len, index_array_len, offsets, vertex_element_size, attrib_element_size, skin_element_size);
|
|
|
-
|
|
|
- uint32_t mask = (1 << ARRAY_MAX) - 1;
|
|
|
+ uint64_t mask = (1ULL << ARRAY_MAX) - 1ULL;
|
|
|
format |= (~mask) & p_compress_format; // Make the full format.
|
|
|
|
|
|
+ // Force version to the current version as this function will always return a surface with the current version.
|
|
|
+ format &= ~(ARRAY_FLAG_FORMAT_VERSION_MASK << ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+ format |= ARRAY_FLAG_FORMAT_CURRENT_VERSION & (ARRAY_FLAG_FORMAT_VERSION_MASK << ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+
|
|
|
+ mesh_surface_make_offsets_from_format(format, array_len, index_array_len, offsets, vertex_element_size, normal_element_size, attrib_element_size, skin_element_size);
|
|
|
+
|
|
|
if ((format & RS::ARRAY_FORMAT_VERTEX) == 0 && !(format & RS::ARRAY_FLAG_USES_EMPTY_VERTEX_ARRAY)) {
|
|
|
ERR_PRINT("Mesh created without vertex array. This mesh will not be visible with the default shader. If using an empty vertex array is intentional, create the mesh with the ARRAY_FLAG_USES_EMPTY_VERTEX_ARRAY flag to silence this error.");
|
|
|
// Set the flag here after warning to suppress errors down the pipeline.
|
|
|
format |= RS::ARRAY_FLAG_USES_EMPTY_VERTEX_ARRAY;
|
|
|
}
|
|
|
|
|
|
- int vertex_array_size = vertex_element_size * array_len;
|
|
|
+ if (format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES && ((format & RS::ARRAY_FORMAT_NORMAL) || (format & RS::ARRAY_FORMAT_TANGENT))) {
|
|
|
+ // If using normals or tangents, then we need all three.
|
|
|
+ ERR_FAIL_COND_V_MSG(!(format & RS::ARRAY_FORMAT_VERTEX), ERR_INVALID_PARAMETER, "Can't use compression flag 'ARRAY_FLAG_COMPRESS_ATTRIBUTES' while using normals or tangents without vertex array.");
|
|
|
+ ERR_FAIL_COND_V_MSG(!(format & RS::ARRAY_FORMAT_NORMAL), ERR_INVALID_PARAMETER, "Can't use compression flag 'ARRAY_FLAG_COMPRESS_ATTRIBUTES' while using tangents without normal array.");
|
|
|
+ ERR_FAIL_COND_V_MSG(!(format & RS::ARRAY_FORMAT_TANGENT), ERR_INVALID_PARAMETER, "Can't use compression flag 'ARRAY_FLAG_COMPRESS_ATTRIBUTES' while using normals without tangent array.");
|
|
|
+ }
|
|
|
+
|
|
|
+ int vertex_array_size = (vertex_element_size + normal_element_size) * array_len;
|
|
|
int attrib_array_size = attrib_element_size * array_len;
|
|
|
int skin_array_size = skin_element_size * array_len;
|
|
|
int index_array_size = offsets[RS::ARRAY_INDEX] * index_array_len;
|
|
@@ -972,7 +1209,9 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
AABB aabb;
|
|
|
Vector<AABB> bone_aabb;
|
|
|
|
|
|
- Error err = _surface_set_data(p_arrays, format, offsets, vertex_element_size, attrib_element_size, skin_element_size, vertex_array, attrib_array, skin_array, array_len, index_array, index_array_len, aabb, bone_aabb);
|
|
|
+ Vector4 uv_scale = Vector4(0.0, 0.0, 0.0, 0.0);
|
|
|
+
|
|
|
+ Error err = _surface_set_data(p_arrays, format, offsets, vertex_element_size, normal_element_size, attrib_element_size, skin_element_size, vertex_array, attrib_array, skin_array, array_len, index_array, index_array_len, aabb, bone_aabb, uv_scale);
|
|
|
ERR_FAIL_COND_V_MSG(err != OK, ERR_INVALID_DATA, "Invalid array format for surface.");
|
|
|
|
|
|
Vector<uint8_t> blend_shape_data;
|
|
@@ -987,7 +1226,8 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
Vector<uint8_t> noskin;
|
|
|
|
|
|
AABB laabb;
|
|
|
- Error err2 = _surface_set_data(p_blend_shapes[i], bs_format, offsets, vertex_element_size, 0, 0, vertex_array_shape, noattrib, noskin, array_len, noindex, 0, laabb, bone_aabb);
|
|
|
+ Vector4 bone_uv_scale; // Not used.
|
|
|
+ Error err2 = _surface_set_data(p_blend_shapes[i], bs_format, offsets, vertex_element_size, normal_element_size, 0, 0, vertex_array_shape, noattrib, noskin, array_len, noindex, 0, laabb, bone_aabb, bone_uv_scale);
|
|
|
aabb.merge_with(laabb);
|
|
|
ERR_FAIL_COND_V_MSG(err2 != OK, ERR_INVALID_DATA, "Invalid blend shape array format for surface.");
|
|
|
|
|
@@ -1048,6 +1288,7 @@ Error RenderingServer::mesh_create_surface_data_from_arrays(SurfaceData *r_surfa
|
|
|
surface_data.blend_shape_data = blend_shape_data;
|
|
|
surface_data.bone_aabbs = bone_aabb;
|
|
|
surface_data.lods = lods;
|
|
|
+ surface_data.uv_scale = uv_scale;
|
|
|
|
|
|
return OK;
|
|
|
}
|
|
@@ -1061,13 +1302,14 @@ void RenderingServer::mesh_add_surface_from_arrays(RID p_mesh, PrimitiveType p_p
|
|
|
mesh_add_surface(p_mesh, sd);
|
|
|
}
|
|
|
|
|
|
-Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t> p_vertex_data, Vector<uint8_t> p_attrib_data, Vector<uint8_t> p_skin_data, int p_vertex_len, Vector<uint8_t> p_index_data, int p_index_len) const {
|
|
|
+Array RenderingServer::_get_array_from_surface(uint64_t p_format, Vector<uint8_t> p_vertex_data, Vector<uint8_t> p_attrib_data, Vector<uint8_t> p_skin_data, int p_vertex_len, Vector<uint8_t> p_index_data, int p_index_len, const AABB &p_aabb) const {
|
|
|
uint32_t offsets[RS::ARRAY_MAX];
|
|
|
|
|
|
uint32_t vertex_elem_size;
|
|
|
+ uint32_t normal_elem_size;
|
|
|
uint32_t attrib_elem_size;
|
|
|
uint32_t skin_elem_size;
|
|
|
- mesh_surface_make_offsets_from_format(p_format, p_vertex_len, p_index_len, offsets, vertex_elem_size, attrib_elem_size, skin_elem_size);
|
|
|
+ mesh_surface_make_offsets_from_format(p_format, p_vertex_len, p_index_len, offsets, vertex_elem_size, normal_elem_size, attrib_elem_size, skin_elem_size);
|
|
|
|
|
|
Array ret;
|
|
|
ret.resize(RS::ARRAY_MAX);
|
|
@@ -1077,7 +1319,7 @@ Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t
|
|
|
const uint8_t *sr = p_skin_data.ptr();
|
|
|
|
|
|
for (int i = 0; i < RS::ARRAY_MAX; i++) {
|
|
|
- if (!(p_format & (1 << i))) {
|
|
|
+ if (!(p_format & (1ULL << i))) {
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1104,9 +1346,54 @@ Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t
|
|
|
{
|
|
|
Vector3 *w = arr_3d.ptrw();
|
|
|
|
|
|
- for (int j = 0; j < p_vertex_len; j++) {
|
|
|
- const float *v = reinterpret_cast<const float *>(&r[j * vertex_elem_size + offsets[i]]);
|
|
|
- w[j] = Vector3(v[0], v[1], v[2]);
|
|
|
+ if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ bool using_normals_tangents = (p_format & RS::ARRAY_FORMAT_NORMAL) && (p_format & RS::ARRAY_FORMAT_TANGENT);
|
|
|
+
|
|
|
+ // We only have vertices to read, so just read them and skip everything else.
|
|
|
+ if (!using_normals_tangents) {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint16_t *v = reinterpret_cast<const uint16_t *>(&r[j * vertex_elem_size + offsets[i]]);
|
|
|
+ Vector3 vec = Vector3(float(v[0]) / 65535.0, float(v[1]) / 65535.0, float(v[2]) / 65535.0);
|
|
|
+ w[j] = (vec * p_aabb.size) + p_aabb.position;
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ Vector<Vector3> normals;
|
|
|
+ normals.resize(p_vertex_len);
|
|
|
+ Vector3 *normalsw = normals.ptrw();
|
|
|
+
|
|
|
+ Vector<float> tangents;
|
|
|
+ tangents.resize(p_vertex_len * 4);
|
|
|
+ float *tangentsw = tangents.ptrw();
|
|
|
+
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint32_t n = *(const uint32_t *)&r[j * normal_elem_size + offsets[RS::ARRAY_NORMAL]];
|
|
|
+ Vector3 axis = Vector3::octahedron_decode(Vector2((n & 0xFFFF) / 65535.0, ((n >> 16) & 0xFFFF) / 65535.0));
|
|
|
+
|
|
|
+ const uint16_t *v = reinterpret_cast<const uint16_t *>(&r[j * vertex_elem_size + offsets[i]]);
|
|
|
+ Vector3 vec = Vector3(float(v[0]) / 65535.0, float(v[1]) / 65535.0, float(v[2]) / 65535.0);
|
|
|
+ float angle = float(v[3]) / 65535.0;
|
|
|
+ w[j] = (vec * p_aabb.size) + p_aabb.position;
|
|
|
+
|
|
|
+ Vector3 normal;
|
|
|
+ Vector4 tan;
|
|
|
+ _get_tbn_from_axis_angle(axis, angle, normal, tan);
|
|
|
+
|
|
|
+ normalsw[j] = normal;
|
|
|
+ tangentsw[j * 4 + 0] = tan.x;
|
|
|
+ tangentsw[j * 4 + 1] = tan.y;
|
|
|
+ tangentsw[j * 4 + 2] = tan.z;
|
|
|
+ tangentsw[j * 4 + 3] = tan.w;
|
|
|
+ }
|
|
|
+ ret[RS::ARRAY_NORMAL] = normals;
|
|
|
+ ret[RS::ARRAY_FORMAT_TANGENT] = tangents;
|
|
|
+
|
|
|
+ } else {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const float *v = reinterpret_cast<const float *>(&r[j * vertex_elem_size + offsets[i]]);
|
|
|
+ w[j] = Vector3(v[0], v[1], v[2]);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1115,39 +1402,41 @@ Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t
|
|
|
|
|
|
} break;
|
|
|
case RS::ARRAY_NORMAL: {
|
|
|
- Vector<Vector3> arr;
|
|
|
- arr.resize(p_vertex_len);
|
|
|
-
|
|
|
- Vector3 *w = arr.ptrw();
|
|
|
+ if (!(p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
|
|
|
+ Vector<Vector3> arr;
|
|
|
+ arr.resize(p_vertex_len);
|
|
|
|
|
|
- for (int j = 0; j < p_vertex_len; j++) {
|
|
|
- const uint32_t v = *(const uint32_t *)&r[j * vertex_elem_size + offsets[i]];
|
|
|
+ Vector3 *w = arr.ptrw();
|
|
|
|
|
|
- w[j] = Vector3::octahedron_decode(Vector2((v & 0xFFFF) / 65535.0, ((v >> 16) & 0xFFFF) / 65535.0));
|
|
|
- }
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint32_t v = *(const uint32_t *)&r[j * normal_elem_size + offsets[i]];
|
|
|
|
|
|
- ret[i] = arr;
|
|
|
+ w[j] = Vector3::octahedron_decode(Vector2((v & 0xFFFF) / 65535.0, ((v >> 16) & 0xFFFF) / 65535.0));
|
|
|
+ }
|
|
|
|
|
|
+ ret[i] = arr;
|
|
|
+ }
|
|
|
} break;
|
|
|
|
|
|
case RS::ARRAY_TANGENT: {
|
|
|
- Vector<float> arr;
|
|
|
- arr.resize(p_vertex_len * 4);
|
|
|
-
|
|
|
- float *w = arr.ptrw();
|
|
|
+ if (!(p_format & RS::ARRAY_FLAG_COMPRESS_ATTRIBUTES)) {
|
|
|
+ Vector<float> arr;
|
|
|
+ arr.resize(p_vertex_len * 4);
|
|
|
|
|
|
- for (int j = 0; j < p_vertex_len; j++) {
|
|
|
- const uint32_t v = *(const uint32_t *)&r[j * vertex_elem_size + offsets[i]];
|
|
|
- float tangent_sign;
|
|
|
- Vector3 res = Vector3::octahedron_tangent_decode(Vector2((v & 0xFFFF) / 65535.0, ((v >> 16) & 0xFFFF) / 65535.0), &tangent_sign);
|
|
|
- w[j * 4 + 0] = res.x;
|
|
|
- w[j * 4 + 1] = res.y;
|
|
|
- w[j * 4 + 2] = res.z;
|
|
|
- w[j * 4 + 3] = tangent_sign;
|
|
|
- }
|
|
|
+ float *w = arr.ptrw();
|
|
|
|
|
|
- ret[i] = arr;
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint32_t v = *(const uint32_t *)&r[j * normal_elem_size + offsets[i]];
|
|
|
+ float tangent_sign;
|
|
|
+ Vector3 res = Vector3::octahedron_tangent_decode(Vector2((v & 0xFFFF) / 65535.0, ((v >> 16) & 0xFFFF) / 65535.0), &tangent_sign);
|
|
|
+ w[j * 4 + 0] = res.x;
|
|
|
+ w[j * 4 + 1] = res.y;
|
|
|
+ w[j * 4 + 2] = res.z;
|
|
|
+ w[j * 4 + 3] = tangent_sign;
|
|
|
+ }
|
|
|
|
|
|
+ ret[i] = arr;
|
|
|
+ }
|
|
|
} break;
|
|
|
case RS::ARRAY_COLOR: {
|
|
|
Vector<Color> arr;
|
|
@@ -1168,12 +1457,17 @@ Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t
|
|
|
arr.resize(p_vertex_len);
|
|
|
|
|
|
Vector2 *w = arr.ptrw();
|
|
|
-
|
|
|
- for (int j = 0; j < p_vertex_len; j++) {
|
|
|
- const float *v = reinterpret_cast<const float *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
- w[j] = Vector2(v[0], v[1]);
|
|
|
+ if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint16_t *v = reinterpret_cast<const uint16_t *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
+ w[j] = Vector2(float(v[0]) / 65535.0, float(v[1]) / 65535.0);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const float *v = reinterpret_cast<const float *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
+ w[j] = Vector2(v[0], v[1]);
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
ret[i] = arr;
|
|
|
} break;
|
|
|
|
|
@@ -1183,9 +1477,16 @@ Array RenderingServer::_get_array_from_surface(uint32_t p_format, Vector<uint8_t
|
|
|
|
|
|
Vector2 *w = arr.ptrw();
|
|
|
|
|
|
- for (int j = 0; j < p_vertex_len; j++) {
|
|
|
- const float *v = reinterpret_cast<const float *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
- w[j] = Vector2(v[0], v[1]);
|
|
|
+ if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const uint16_t *v = reinterpret_cast<const uint16_t *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
+ w[j] = Vector2(float(v[0]) / 65535.0, float(v[1]) / 65535.0);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for (int j = 0; j < p_vertex_len; j++) {
|
|
|
+ const float *v = reinterpret_cast<const float *>(&ar[j * attrib_elem_size + offsets[i]]);
|
|
|
+ w[j] = Vector2(v[0], v[1]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ret[i] = arr;
|
|
@@ -1358,12 +1659,13 @@ TypedArray<Array> RenderingServer::mesh_surface_get_blend_shape_arrays(RID p_mes
|
|
|
uint32_t bs_offsets[RS::ARRAY_MAX];
|
|
|
uint32_t bs_format = (sd.format & RS::ARRAY_FORMAT_BLEND_SHAPE_MASK);
|
|
|
uint32_t vertex_elem_size;
|
|
|
+ uint32_t normal_elem_size;
|
|
|
uint32_t attrib_elem_size;
|
|
|
uint32_t skin_elem_size;
|
|
|
+ //CLAY
|
|
|
+ mesh_surface_make_offsets_from_format(bs_format, sd.vertex_count, 0, bs_offsets, vertex_elem_size, normal_elem_size, attrib_elem_size, skin_elem_size);
|
|
|
|
|
|
- mesh_surface_make_offsets_from_format(bs_format, sd.vertex_count, 0, bs_offsets, vertex_elem_size, attrib_elem_size, skin_elem_size);
|
|
|
-
|
|
|
- int divisor = vertex_elem_size * sd.vertex_count;
|
|
|
+ int divisor = (vertex_elem_size + normal_elem_size) * sd.vertex_count;
|
|
|
ERR_FAIL_COND_V((blend_shape_data.size() % divisor) != 0, Array());
|
|
|
|
|
|
uint32_t blend_shape_count = blend_shape_data.size() / divisor;
|
|
@@ -1375,7 +1677,7 @@ TypedArray<Array> RenderingServer::mesh_surface_get_blend_shape_arrays(RID p_mes
|
|
|
for (uint32_t i = 0; i < blend_shape_count; i++) {
|
|
|
Vector<uint8_t> bs_data = blend_shape_data.slice(i * divisor, (i + 1) * divisor);
|
|
|
Vector<uint8_t> unused;
|
|
|
- blend_shape_array.set(i, _get_array_from_surface(bs_format, bs_data, unused, unused, sd.vertex_count, unused, 0));
|
|
|
+ blend_shape_array.set(i, _get_array_from_surface(bs_format, bs_data, unused, unused, sd.vertex_count, unused, 0, sd.aabb));
|
|
|
}
|
|
|
|
|
|
return blend_shape_array;
|
|
@@ -1395,9 +1697,9 @@ Array RenderingServer::mesh_create_arrays_from_surface_data(const SurfaceData &p
|
|
|
Vector<uint8_t> index_data = p_data.index_data;
|
|
|
int index_len = p_data.index_count;
|
|
|
|
|
|
- uint32_t format = p_data.format;
|
|
|
+ uint64_t format = p_data.format;
|
|
|
|
|
|
- return _get_array_from_surface(format, vertex_data, attrib_data, skin_data, vertex_len, index_data, index_len);
|
|
|
+ return _get_array_from_surface(format, vertex_data, attrib_data, skin_data, vertex_len, index_data, index_len, p_data.aabb);
|
|
|
}
|
|
|
#if 0
|
|
|
Array RenderingServer::_mesh_surface_get_skeleton_aabb_bind(RID p_mesh, int p_surface) const {
|
|
@@ -1531,7 +1833,9 @@ static RS::SurfaceData _dict_to_surf(const Dictionary &p_dictionary) {
|
|
|
RS::SurfaceData sd;
|
|
|
|
|
|
sd.primitive = RS::PrimitiveType(int(p_dictionary["primitive"]));
|
|
|
- sd.format = p_dictionary["format"];
|
|
|
+ if (p_dictionary.has("uv_scale")) {
|
|
|
+ sd.format = p_dictionary["format"];
|
|
|
+ }
|
|
|
sd.vertex_data = p_dictionary["vertex_data"];
|
|
|
if (p_dictionary.has("attribute_data")) {
|
|
|
sd.attribute_data = p_dictionary["attribute_data"];
|
|
@@ -1549,6 +1853,7 @@ static RS::SurfaceData _dict_to_surf(const Dictionary &p_dictionary) {
|
|
|
}
|
|
|
|
|
|
sd.aabb = p_dictionary["aabb"];
|
|
|
+ sd.uv_scale = p_dictionary["uv_scale"];
|
|
|
|
|
|
if (p_dictionary.has("lods")) {
|
|
|
Array lods = p_dictionary["lods"];
|
|
@@ -1610,6 +1915,7 @@ Dictionary RenderingServer::_mesh_get_surface(RID p_mesh, int p_idx) {
|
|
|
d["index_count"] = sd.index_count;
|
|
|
}
|
|
|
d["aabb"] = sd.aabb;
|
|
|
+ d["uv_scale"] = sd.uv_scale;
|
|
|
|
|
|
if (sd.lods.size()) {
|
|
|
Array lods;
|
|
@@ -1663,6 +1969,117 @@ void RenderingServer::_particles_set_trail_bind_poses(RID p_particles, const Typ
|
|
|
particles_set_trail_bind_poses(p_particles, tbposes);
|
|
|
}
|
|
|
|
|
|
+Vector<uint8_t> _convert_surface_version_1_to_surface_version_2(uint64_t p_format, Vector<uint8_t> p_vertex_data, uint32_t p_vertex_count, uint32_t p_old_stride, uint32_t p_vertex_size, uint32_t p_normal_size, uint32_t p_position_stride, uint32_t p_normal_tangent_stride) {
|
|
|
+ Vector<uint8_t> new_vertex_data;
|
|
|
+ new_vertex_data.resize(p_vertex_data.size());
|
|
|
+ uint8_t *dst_vertex_ptr = new_vertex_data.ptrw();
|
|
|
+
|
|
|
+ const uint8_t *src_vertex_ptr = p_vertex_data.ptr();
|
|
|
+
|
|
|
+ uint32_t position_size = p_position_stride * p_vertex_count;
|
|
|
+
|
|
|
+ for (uint32_t j = 0; j < RS::ARRAY_COLOR; j++) {
|
|
|
+ if (!(p_format & (1ULL << j))) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ switch (j) {
|
|
|
+ case RS::ARRAY_VERTEX: {
|
|
|
+ if (p_format & RS::ARRAY_FLAG_USE_2D_VERTICES) {
|
|
|
+ for (uint32_t i = 0; i < p_vertex_count; i++) {
|
|
|
+ const float *src = (const float *)&src_vertex_ptr[i * p_old_stride];
|
|
|
+ float *dst = (float *)&dst_vertex_ptr[i * p_position_stride];
|
|
|
+ dst[0] = src[0];
|
|
|
+ dst[1] = src[1];
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for (uint32_t i = 0; i < p_vertex_count; i++) {
|
|
|
+ const float *src = (const float *)&src_vertex_ptr[i * p_old_stride];
|
|
|
+ float *dst = (float *)&dst_vertex_ptr[i * p_position_stride];
|
|
|
+ dst[0] = src[0];
|
|
|
+ dst[1] = src[1];
|
|
|
+ dst[2] = src[2];
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } break;
|
|
|
+ case RS::ARRAY_NORMAL: {
|
|
|
+ for (uint32_t i = 0; i < p_vertex_count; i++) {
|
|
|
+ const uint16_t *src = (const uint16_t *)&src_vertex_ptr[i * p_old_stride + p_vertex_size];
|
|
|
+ uint16_t *dst = (uint16_t *)&dst_vertex_ptr[i * p_normal_tangent_stride + position_size];
|
|
|
+
|
|
|
+ dst[0] = src[0];
|
|
|
+ dst[1] = src[1];
|
|
|
+ }
|
|
|
+ } break;
|
|
|
+ case RS::ARRAY_TANGENT: {
|
|
|
+ for (uint32_t i = 0; i < p_vertex_count; i++) {
|
|
|
+ const uint16_t *src = (const uint16_t *)&src_vertex_ptr[i * p_old_stride + p_vertex_size + p_normal_size];
|
|
|
+ uint16_t *dst = (uint16_t *)&dst_vertex_ptr[i * p_normal_tangent_stride + position_size + p_normal_size];
|
|
|
+
|
|
|
+ dst[0] = src[0];
|
|
|
+ dst[1] = src[1];
|
|
|
+ }
|
|
|
+ } break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return new_vertex_data;
|
|
|
+}
|
|
|
+
|
|
|
+#ifndef DISABLE_DEPRECATED
|
|
|
+void RenderingServer::_fix_surface_compatibility(SurfaceData &p_surface) {
|
|
|
+ uint64_t surface_version = p_surface.format & (ARRAY_FLAG_FORMAT_VERSION_MASK << ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+ ERR_FAIL_COND_MSG(surface_version > ARRAY_FLAG_FORMAT_CURRENT_VERSION, "Cannot convert surface with version provided (" + itos((surface_version >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK) + ") to current version (" + itos((RS::ARRAY_FLAG_FORMAT_CURRENT_VERSION >> RS::ARRAY_FLAG_FORMAT_VERSION_SHIFT) & RS::ARRAY_FLAG_FORMAT_VERSION_MASK) + ")");
|
|
|
+
|
|
|
+ if (surface_version == ARRAY_FLAG_FORMAT_VERSION_1) {
|
|
|
+ // The only difference for now is that Version 1 uses interleaved vertex positions while version 2 does not.
|
|
|
+ // I.e. PNTPNTPNT -> PPPNTNTNT.
|
|
|
+ WARN_PRINT_ED("Upgrading mesh from older surface format. Once saved again (or re-imported), this mesh will be incompatible with earlier versions of Godot.");
|
|
|
+
|
|
|
+ int vertex_size = 0;
|
|
|
+ int normal_size = 0;
|
|
|
+ int tangent_size = 0;
|
|
|
+ if (p_surface.format & ARRAY_FORMAT_VERTEX) {
|
|
|
+ if (p_surface.format & ARRAY_FLAG_USE_2D_VERTICES) {
|
|
|
+ vertex_size = sizeof(float) * 2;
|
|
|
+ } else {
|
|
|
+ vertex_size = sizeof(float) * 3;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (p_surface.format & ARRAY_FORMAT_NORMAL) {
|
|
|
+ normal_size += sizeof(uint16_t) * 2;
|
|
|
+ }
|
|
|
+ if (p_surface.format & ARRAY_FORMAT_TANGENT) {
|
|
|
+ tangent_size = sizeof(uint16_t) * 2;
|
|
|
+ }
|
|
|
+ int stride = p_surface.vertex_data.size() / p_surface.vertex_count;
|
|
|
+ int position_stride = vertex_size;
|
|
|
+ int normal_tangent_stride = normal_size + tangent_size;
|
|
|
+
|
|
|
+ p_surface.vertex_data = _convert_surface_version_1_to_surface_version_2(p_surface.format, p_surface.vertex_data, p_surface.vertex_count, stride, vertex_size, normal_size, position_stride, normal_tangent_stride);
|
|
|
+
|
|
|
+ if (p_surface.blend_shape_data.size() > 0) {
|
|
|
+ // The size of one blend shape.
|
|
|
+ int divisor = (vertex_size + normal_size + tangent_size) * p_surface.vertex_count;
|
|
|
+ ERR_FAIL_COND((p_surface.blend_shape_data.size() % divisor) != 0);
|
|
|
+
|
|
|
+ uint32_t blend_shape_count = p_surface.blend_shape_data.size() / divisor;
|
|
|
+
|
|
|
+ Vector<uint8_t> new_blend_shape_data;
|
|
|
+ for (uint32_t i = 0; i < blend_shape_count; i++) {
|
|
|
+ Vector<uint8_t> bs_data = p_surface.blend_shape_data.slice(i * divisor, (i + 1) * divisor);
|
|
|
+ Vector<uint8_t> blend_shape = _convert_surface_version_1_to_surface_version_2(p_surface.format, bs_data, p_surface.vertex_count, stride, vertex_size, normal_size, position_stride, normal_tangent_stride);
|
|
|
+ new_blend_shape_data.append_array(blend_shape);
|
|
|
+ }
|
|
|
+
|
|
|
+ ERR_FAIL_COND(p_surface.blend_shape_data.size() != new_blend_shape_data.size());
|
|
|
+
|
|
|
+ p_surface.blend_shape_data = new_blend_shape_data;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ p_surface.format &= ~(ARRAY_FLAG_FORMAT_VERSION_MASK << ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+ p_surface.format |= ARRAY_FLAG_FORMAT_CURRENT_VERSION & (ARRAY_FLAG_FORMAT_VERSION_MASK << ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
void RenderingServer::_bind_methods() {
|
|
|
BIND_CONSTANT(NO_INDEX_ARRAY);
|
|
|
BIND_CONSTANT(ARRAY_WEIGHTS_SIZE);
|
|
@@ -1753,6 +2170,7 @@ void RenderingServer::_bind_methods() {
|
|
|
ClassDB::bind_method(D_METHOD("mesh_create"), &RenderingServer::mesh_create);
|
|
|
ClassDB::bind_method(D_METHOD("mesh_surface_get_format_offset", "format", "vertex_count", "array_index"), &RenderingServer::mesh_surface_get_format_offset);
|
|
|
ClassDB::bind_method(D_METHOD("mesh_surface_get_format_vertex_stride", "format", "vertex_count"), &RenderingServer::mesh_surface_get_format_vertex_stride);
|
|
|
+ ClassDB::bind_method(D_METHOD("mesh_surface_get_format_normal_tangent_stride", "format", "vertex_count"), &RenderingServer::mesh_surface_get_format_normal_tangent_stride);
|
|
|
ClassDB::bind_method(D_METHOD("mesh_surface_get_format_attribute_stride", "format", "vertex_count"), &RenderingServer::mesh_surface_get_format_attribute_stride);
|
|
|
ClassDB::bind_method(D_METHOD("mesh_surface_get_format_skin_stride", "format", "vertex_count"), &RenderingServer::mesh_surface_get_format_skin_stride);
|
|
|
ClassDB::bind_method(D_METHOD("mesh_add_surface", "mesh", "surface"), &RenderingServer::_mesh_add_surface);
|
|
@@ -1835,6 +2253,15 @@ void RenderingServer::_bind_methods() {
|
|
|
BIND_BITFIELD_FLAG(ARRAY_FLAG_USE_8_BONE_WEIGHTS);
|
|
|
BIND_BITFIELD_FLAG(ARRAY_FLAG_USES_EMPTY_VERTEX_ARRAY);
|
|
|
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_COMPRESS_ATTRIBUTES);
|
|
|
+
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_VERSION_BASE);
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_VERSION_SHIFT);
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_VERSION_1);
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_VERSION_2);
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_CURRENT_VERSION);
|
|
|
+ BIND_BITFIELD_FLAG(ARRAY_FLAG_FORMAT_VERSION_MASK);
|
|
|
+
|
|
|
BIND_ENUM_CONSTANT(PRIMITIVE_POINTS);
|
|
|
BIND_ENUM_CONSTANT(PRIMITIVE_LINES);
|
|
|
BIND_ENUM_CONSTANT(PRIMITIVE_LINE_STRIP);
|