ソースを参照

Updated meshoptimizer.

Бранимир Караџић 7 ヶ月 前
コミット
37e43f6a9e

+ 6 - 5
3rdparty/meshoptimizer/src/meshoptimizer.h

@@ -684,15 +684,16 @@ MESHOPTIMIZER_EXPERIMENTAL struct meshopt_Bounds meshopt_computeSphereBounds(con
 
 
 /**
 /**
  * Experimental: Cluster partitioner
  * Experimental: Cluster partitioner
- * Partitions clusters into groups of similar size, prioritizing grouping clusters that share vertices.
+ * Partitions clusters into groups of similar size, prioritizing grouping clusters that share vertices or are close to each other.
  *
  *
  * destination must contain enough space for the resulting partiotion data (cluster_count elements)
  * destination must contain enough space for the resulting partiotion data (cluster_count elements)
  * destination[i] will contain the partition id for cluster i, with the total number of partitions returned by the function
  * destination[i] will contain the partition id for cluster i, with the total number of partitions returned by the function
  * cluster_indices should have the vertex indices referenced by each cluster, stored sequentially
  * cluster_indices should have the vertex indices referenced by each cluster, stored sequentially
  * cluster_index_counts should have the number of indices in each cluster; sum of all cluster_index_counts must be equal to total_index_count
  * cluster_index_counts should have the number of indices in each cluster; sum of all cluster_index_counts must be equal to total_index_count
+ * vertex_positions should have float3 position in the first 12 bytes of each vertex (or can be NULL if not used)
  * target_partition_size is a target size for each partition, in clusters; the resulting partitions may be smaller or larger
  * target_partition_size is a target size for each partition, in clusters; the resulting partitions may be smaller or larger
  */
  */
-MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, size_t vertex_count, size_t target_partition_size);
+MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_partition_size);
 
 
 /**
 /**
  * Spatial sorter
  * Spatial sorter
@@ -841,7 +842,7 @@ inline size_t meshopt_buildMeshletsSplit(meshopt_Meshlet* meshlets, unsigned int
 template <typename T>
 template <typename T>
 inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
 inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
 template <typename T>
 template <typename T>
-inline size_t meshopt_partitionClusters(unsigned int* destination, const T* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, size_t vertex_count, size_t target_partition_size);
+inline size_t meshopt_partitionClusters(unsigned int* destination, const T* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_partition_size);
 template <typename T>
 template <typename T>
 inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
 inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
 #endif
 #endif
@@ -1278,11 +1279,11 @@ inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t inde
 }
 }
 
 
 template <typename T>
 template <typename T>
-inline size_t meshopt_partitionClusters(unsigned int* destination, const T* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, size_t vertex_count, size_t target_partition_size)
+inline size_t meshopt_partitionClusters(unsigned int* destination, const T* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_partition_size)
 {
 {
 	meshopt_IndexAdapter<T> in(NULL, cluster_indices, total_index_count);
 	meshopt_IndexAdapter<T> in(NULL, cluster_indices, total_index_count);
 
 
-	return meshopt_partitionClusters(destination, in.data, total_index_count, cluster_index_counts, cluster_count, vertex_count, target_partition_size);
+	return meshopt_partitionClusters(destination, in.data, total_index_count, cluster_index_counts, cluster_count, vertex_positions, vertex_count, vertex_positions_stride, target_partition_size);
 }
 }
 
 
 template <typename T>
 template <typename T>

+ 148 - 80
3rdparty/meshoptimizer/src/partition.cpp

@@ -15,26 +15,97 @@ struct ClusterAdjacency
 	unsigned int* shared;
 	unsigned int* shared;
 };
 };
 
 
-static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, size_t cluster_count, unsigned char* used, size_t vertex_count, meshopt_Allocator& allocator)
+static void filterClusterIndices(unsigned int* data, unsigned int* offsets, const unsigned int* cluster_indices, const unsigned int* cluster_index_counts, size_t cluster_count, unsigned char* used, size_t vertex_count, size_t total_index_count)
 {
 {
-	unsigned int* ref_offsets = allocator.allocate<unsigned int>(vertex_count + 1);
+	(void)vertex_count;
+	(void)total_index_count;
 
 
-	// compute number of clusters referenced by each vertex
-	memset(ref_offsets, 0, vertex_count * sizeof(unsigned int));
+	size_t cluster_start = 0;
+	size_t cluster_write = 0;
 
 
 	for (size_t i = 0; i < cluster_count; ++i)
 	for (size_t i = 0; i < cluster_count; ++i)
 	{
 	{
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
+		offsets[i] = unsigned(cluster_write);
+
+		// copy cluster indices, skipping duplicates
+		for (size_t j = 0; j < cluster_index_counts[i]; ++j)
 		{
 		{
-			unsigned int v = cluster_indices[j];
+			unsigned int v = cluster_indices[cluster_start + j];
 			assert(v < vertex_count);
 			assert(v < vertex_count);
 
 
-			ref_offsets[v] += 1 - used[v];
+			data[cluster_write] = v;
+			cluster_write += 1 - used[v];
 			used[v] = 1;
 			used[v] = 1;
 		}
 		}
 
 
+		// reset used flags for the next cluster
+		for (size_t j = offsets[i]; j < cluster_write; ++j)
+			used[data[j]] = 0;
+
+		cluster_start += cluster_index_counts[i];
+	}
+
+	assert(cluster_start == total_index_count);
+	assert(cluster_write <= total_index_count);
+	offsets[cluster_count] = unsigned(cluster_write);
+}
+
+static void computeClusterBounds(float* cluster_bounds, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, size_t cluster_count, const float* vertex_positions, size_t vertex_positions_stride)
+{
+	size_t vertex_stride_float = vertex_positions_stride / sizeof(float);
+
+	for (size_t i = 0; i < cluster_count; ++i)
+	{
+		float center[3] = {0, 0, 0};
+
+		// approximate center of the cluster by averaging all vertex positions
+		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
+		{
+			const float* p = vertex_positions + cluster_indices[j] * vertex_stride_float;
+
+			center[0] += p[0];
+			center[1] += p[1];
+			center[2] += p[2];
+		}
+
+		// note: technically clusters can't be empty per meshopt_partitionCluster but we check for a division by zero in case that changes
+		if (size_t cluster_size = cluster_offsets[i + 1] - cluster_offsets[i])
+		{
+			center[0] /= float(cluster_size);
+			center[1] /= float(cluster_size);
+			center[2] /= float(cluster_size);
+		}
+
+		// compute radius of the bounding sphere for each cluster
+		float radiussq = 0;
+
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-			used[cluster_indices[j]] = 0;
+		{
+			const float* p = vertex_positions + cluster_indices[j] * vertex_stride_float;
+
+			float d2 = (p[0] - center[0]) * (p[0] - center[0]) + (p[1] - center[1]) * (p[1] - center[1]) + (p[2] - center[2]) * (p[2] - center[2]);
+
+			radiussq = radiussq < d2 ? d2 : radiussq;
+		}
+
+		cluster_bounds[i * 4 + 0] = center[0];
+		cluster_bounds[i * 4 + 1] = center[1];
+		cluster_bounds[i * 4 + 2] = center[2];
+		cluster_bounds[i * 4 + 3] = sqrtf(radiussq);
+	}
+}
+
+static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, size_t cluster_count, size_t vertex_count, meshopt_Allocator& allocator)
+{
+	unsigned int* ref_offsets = allocator.allocate<unsigned int>(vertex_count + 1);
+
+	// compute number of clusters referenced by each vertex
+	memset(ref_offsets, 0, vertex_count * sizeof(unsigned int));
+
+	for (size_t i = 0; i < cluster_count; ++i)
+	{
+		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
+			ref_offsets[cluster_indices[j]]++;
 	}
 	}
 
 
 	// compute (worst-case) number of adjacent clusters for each cluster
 	// compute (worst-case) number of adjacent clusters for each cluster
@@ -43,21 +114,13 @@ static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned in
 	for (size_t i = 0; i < cluster_count; ++i)
 	for (size_t i = 0; i < cluster_count; ++i)
 	{
 	{
 		size_t count = 0;
 		size_t count = 0;
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-		{
-			unsigned int v = cluster_indices[j];
-			assert(v < vertex_count);
 
 
-			// worst case is every vertex has a disjoint cluster list
-			count += used[v] ? 0 : ref_offsets[v] - 1;
-			used[v] = 1;
-		}
+		// worst case is every vertex has a disjoint cluster list
+		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
+			count += ref_offsets[cluster_indices[j]] - 1;
 
 
 		// ... but only every other cluster can be adjacent in the end
 		// ... but only every other cluster can be adjacent in the end
 		total_adjacency += count < cluster_count - 1 ? count : cluster_count - 1;
 		total_adjacency += count < cluster_count - 1 ? count : cluster_count - 1;
-
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-			used[cluster_indices[j]] = 0;
 	}
 	}
 
 
 	// we can now allocate adjacency buffers
 	// we can now allocate adjacency buffers
@@ -81,19 +144,7 @@ static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned in
 	for (size_t i = 0; i < cluster_count; ++i)
 	for (size_t i = 0; i < cluster_count; ++i)
 	{
 	{
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-		{
-			unsigned int v = cluster_indices[j];
-			assert(v < vertex_count);
-
-			if (used[v])
-				continue;
-
-			ref_data[ref_offsets[v]++] = unsigned(i);
-			used[v] = 1;
-		}
-
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-			used[cluster_indices[j]] = 0;
+			ref_data[ref_offsets[cluster_indices[j]]++] = unsigned(i);
 	}
 	}
 
 
 	// after the previous pass, ref_offsets contain the end of the data for each vertex; shift it forward to get the start
 	// after the previous pass, ref_offsets contain the end of the data for each vertex; shift it forward to get the start
@@ -112,10 +163,6 @@ static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned in
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
 		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
 		{
 		{
 			unsigned int v = cluster_indices[j];
 			unsigned int v = cluster_indices[j];
-			assert(v < vertex_count);
-
-			if (used[v])
-				continue;
 
 
 			// merge the entire cluster list of each vertex into current list
 			// merge the entire cluster list of each vertex into current list
 			for (size_t k = ref_offsets[v]; k < ref_offsets[v + 1]; ++k)
 			for (size_t k = ref_offsets[v]; k < ref_offsets[v + 1]; ++k)
@@ -144,13 +191,8 @@ static void buildClusterAdjacency(ClusterAdjacency& adjacency, const unsigned in
 					count++;
 					count++;
 				}
 				}
 			}
 			}
-
-			used[v] = 1;
 		}
 		}
 
 
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-			used[cluster_indices[j]] = 0;
-
 		// mark the end of the adjacency list; the next cluster will start there as well
 		// mark the end of the adjacency list; the next cluster will start there as well
 		adjacency.offsets[i + 1] = adjacency.offsets[i] + unsigned(count);
 		adjacency.offsets[i + 1] = adjacency.offsets[i] + unsigned(count);
 	}
 	}
@@ -223,29 +265,6 @@ static GroupOrder heapPop(GroupOrder* heap, size_t size)
 	return top;
 	return top;
 }
 }
 
 
-static unsigned int countTotal(const ClusterGroup* groups, int id, const unsigned int* cluster_indices, const unsigned int* cluster_offsets, unsigned char* used)
-{
-	unsigned int total = 0;
-
-	for (int i = id; i >= 0; i = groups[i].next)
-	{
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-		{
-			unsigned int v = cluster_indices[j];
-			total += 1 - used[v];
-			used[v] = 1;
-		}
-	}
-
-	for (int i = id; i >= 0; i = groups[i].next)
-	{
-		for (size_t j = cluster_offsets[i]; j < cluster_offsets[i + 1]; ++j)
-			used[cluster_indices[j]] = 0;
-	}
-
-	return total;
-}
-
 static unsigned int countShared(const ClusterGroup* groups, int group1, int group2, const ClusterAdjacency& adjacency)
 static unsigned int countShared(const ClusterGroup* groups, int group1, int group2, const ClusterAdjacency& adjacency)
 {
 {
 	unsigned int total = 0;
 	unsigned int total = 0;
@@ -264,7 +283,41 @@ static unsigned int countShared(const ClusterGroup* groups, int group1, int grou
 	return total;
 	return total;
 }
 }
 
 
-static int pickGroupToMerge(const ClusterGroup* groups, int id, const ClusterAdjacency& adjacency, size_t max_partition_size)
+static void mergeBounds(float* target, const float* source)
+{
+	float r1 = target[3], r2 = source[3];
+	float dx = source[0] - target[0], dy = source[1] - target[1], dz = source[2] - target[2];
+	float d = sqrtf(dx * dx + dy * dy + dz * dz);
+
+	if (d + r1 < r2)
+	{
+		memcpy(target, source, 4 * sizeof(float));
+		return;
+	}
+
+	if (d + r2 > r1)
+	{
+		float k = d > 0 ? (d + r2 - r1) / (2 * d) : 0.f;
+
+		target[0] += dx * k;
+		target[1] += dy * k;
+		target[2] += dz * k;
+		target[3] = (d + r2 + r1) / 2;
+	}
+}
+
+static float boundsScore(const float* target, const float* source)
+{
+	float r1 = target[3], r2 = source[3];
+	float dx = source[0] - target[0], dy = source[1] - target[1], dz = source[2] - target[2];
+	float d = sqrtf(dx * dx + dy * dy + dz * dz);
+
+	float mr = d + r1 < r2 ? r2 : (d + r2 < r1 ? r1 : (d + r2 + r1) / 2);
+
+	return mr > 0 ? r1 / mr : 0.f;
+}
+
+static int pickGroupToMerge(const ClusterGroup* groups, int id, const ClusterAdjacency& adjacency, size_t max_partition_size, const float* cluster_bounds)
 {
 {
 	assert(groups[id].size > 0);
 	assert(groups[id].size > 0);
 
 
@@ -291,6 +344,10 @@ static int pickGroupToMerge(const ClusterGroup* groups, int id, const ClusterAdj
 			// normalize shared count by the expected boundary of each group (+ keeps scoring symmetric)
 			// normalize shared count by the expected boundary of each group (+ keeps scoring symmetric)
 			float score = float(int(shared)) * (group_rsqrt + other_rsqrt);
 			float score = float(int(shared)) * (group_rsqrt + other_rsqrt);
 
 
+			// incorporate spatial score to favor merging nearby groups
+			if (cluster_bounds)
+				score *= 1.f + 0.4f * boundsScore(&cluster_bounds[id * 4], &cluster_bounds[other * 4]);
+
 			if (score > best_score)
 			if (score > best_score)
 			{
 			{
 				best_group = other;
 				best_group = other;
@@ -304,10 +361,12 @@ static int pickGroupToMerge(const ClusterGroup* groups, int id, const ClusterAdj
 
 
 } // namespace meshopt
 } // namespace meshopt
 
 
-size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, size_t vertex_count, size_t target_partition_size)
+size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int* cluster_indices, size_t total_index_count, const unsigned int* cluster_index_counts, size_t cluster_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_partition_size)
 {
 {
 	using namespace meshopt;
 	using namespace meshopt;
 
 
+	assert((vertex_positions == NULL || vertex_positions_stride >= 12) && vertex_positions_stride <= 256);
+	assert(vertex_positions_stride % sizeof(float) == 0);
 	assert(target_partition_size > 0);
 	assert(target_partition_size > 0);
 
 
 	size_t max_partition_size = target_partition_size + target_partition_size * 3 / 8;
 	size_t max_partition_size = target_partition_size + target_partition_size * 3 / 8;
@@ -317,24 +376,25 @@ size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int*
 	unsigned char* used = allocator.allocate<unsigned char>(vertex_count);
 	unsigned char* used = allocator.allocate<unsigned char>(vertex_count);
 	memset(used, 0, vertex_count);
 	memset(used, 0, vertex_count);
 
 
-	// build cluster index offsets as a prefix sum
+	unsigned int* cluster_newindices = allocator.allocate<unsigned int>(total_index_count);
 	unsigned int* cluster_offsets = allocator.allocate<unsigned int>(cluster_count + 1);
 	unsigned int* cluster_offsets = allocator.allocate<unsigned int>(cluster_count + 1);
-	unsigned int cluster_nextoffset = 0;
 
 
-	for (size_t i = 0; i < cluster_count; ++i)
-	{
-		assert(cluster_index_counts[i] > 0);
+	// make new cluster index list that filters out duplicate indices
+	filterClusterIndices(cluster_newindices, cluster_offsets, cluster_indices, cluster_index_counts, cluster_count, used, vertex_count, total_index_count);
+	cluster_indices = cluster_newindices;
 
 
-		cluster_offsets[i] = cluster_nextoffset;
-		cluster_nextoffset += cluster_index_counts[i];
-	}
+	// compute bounding sphere for each cluster if positions are provided
+	float* cluster_bounds = NULL;
 
 
-	assert(cluster_nextoffset == total_index_count);
-	cluster_offsets[cluster_count] = unsigned(total_index_count);
+	if (vertex_positions)
+	{
+		cluster_bounds = allocator.allocate<float>(cluster_count * 4);
+		computeClusterBounds(cluster_bounds, cluster_indices, cluster_offsets, cluster_count, vertex_positions, vertex_positions_stride);
+	}
 
 
 	// build cluster adjacency along with edge weights (shared vertex count)
 	// build cluster adjacency along with edge weights (shared vertex count)
 	ClusterAdjacency adjacency = {};
 	ClusterAdjacency adjacency = {};
-	buildClusterAdjacency(adjacency, cluster_indices, cluster_offsets, cluster_count, used, vertex_count, allocator);
+	buildClusterAdjacency(adjacency, cluster_indices, cluster_offsets, cluster_count, vertex_count, allocator);
 
 
 	ClusterGroup* groups = allocator.allocate<ClusterGroup>(cluster_count);
 	ClusterGroup* groups = allocator.allocate<ClusterGroup>(cluster_count);
 
 
@@ -347,7 +407,8 @@ size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int*
 		groups[i].group = int(i);
 		groups[i].group = int(i);
 		groups[i].next = -1;
 		groups[i].next = -1;
 		groups[i].size = 1;
 		groups[i].size = 1;
-		groups[i].vertices = countTotal(groups, int(i), cluster_indices, cluster_offsets, used);
+		groups[i].vertices = cluster_offsets[i + 1] - cluster_offsets[i];
+		assert(groups[i].vertices > 0);
 
 
 		GroupOrder item = {};
 		GroupOrder item = {};
 		item.id = unsigned(i);
 		item.id = unsigned(i);
@@ -376,7 +437,7 @@ size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int*
 		if (groups[top.id].size >= target_partition_size)
 		if (groups[top.id].size >= target_partition_size)
 			continue;
 			continue;
 
 
-		int best_group = pickGroupToMerge(groups, top.id, adjacency, max_partition_size);
+		int best_group = pickGroupToMerge(groups, top.id, adjacency, max_partition_size, cluster_bounds);
 
 
 		// we can't grow the group any more, emit as is
 		// we can't grow the group any more, emit as is
 		if (best_group == -1)
 		if (best_group == -1)
@@ -395,7 +456,7 @@ size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int*
 				break;
 				break;
 			}
 			}
 
 
-		// update group sizes; note, the vertex update is an approximation which avoids recomputing the true size via countTotal
+		// update group sizes; note, the vertex update is a O(1) approximation which avoids recomputing the true size
 		groups[top.id].size += groups[best_group].size;
 		groups[top.id].size += groups[best_group].size;
 		groups[top.id].vertices += groups[best_group].vertices;
 		groups[top.id].vertices += groups[best_group].vertices;
 		groups[top.id].vertices = (groups[top.id].vertices > shared) ? groups[top.id].vertices - shared : 1;
 		groups[top.id].vertices = (groups[top.id].vertices > shared) ? groups[top.id].vertices - shared : 1;
@@ -403,6 +464,13 @@ size_t meshopt_partitionClusters(unsigned int* destination, const unsigned int*
 		groups[best_group].size = 0;
 		groups[best_group].size = 0;
 		groups[best_group].vertices = 0;
 		groups[best_group].vertices = 0;
 
 
+		// merge bounding spheres if bounds are available
+		if (cluster_bounds)
+		{
+			mergeBounds(&cluster_bounds[top.id * 4], &cluster_bounds[best_group * 4]);
+			memset(&cluster_bounds[best_group * 4], 0, 4 * sizeof(float));
+		}
+
 		// re-associate all clusters back to the merged group
 		// re-associate all clusters back to the merged group
 		for (int i = top.id; i >= 0; i = groups[i].next)
 		for (int i = top.id; i >= 0; i = groups[i].next)
 			groups[i].group = int(top.id);
 			groups[i].group = int(top.id);