|
@@ -38,13 +38,10 @@ bool CallableCustomMethodPointerBase::compare_equal(const CallableCustom *p_a, c
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
- for (uint32_t i = 0; i < a->comp_size; i++) {
|
|
|
|
- if (a->comp_ptr[i] != b->comp_ptr[i]) {
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return true;
|
|
|
|
|
|
+ // Avoid sorting by memory address proximity, which leads to unpredictable performance over time
|
|
|
|
+ // due to the reuse of old addresses for newer objects. Use byte-wise comparison to leverage the
|
|
|
|
+ // backwards encoding of little-endian systems as a way to decouple spatiality and time.
|
|
|
|
+ return memcmp(a->comp_ptr, b->comp_ptr, a->comp_size * 4) == 0;
|
|
}
|
|
}
|
|
|
|
|
|
bool CallableCustomMethodPointerBase::compare_less(const CallableCustom *p_a, const CallableCustom *p_b) {
|
|
bool CallableCustomMethodPointerBase::compare_less(const CallableCustom *p_a, const CallableCustom *p_b) {
|
|
@@ -55,15 +52,8 @@ bool CallableCustomMethodPointerBase::compare_less(const CallableCustom *p_a, co
|
|
return a->comp_size < b->comp_size;
|
|
return a->comp_size < b->comp_size;
|
|
}
|
|
}
|
|
|
|
|
|
- for (uint32_t i = 0; i < a->comp_size; i++) {
|
|
|
|
- if (a->comp_ptr[i] == b->comp_ptr[i]) {
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return a->comp_ptr[i] < b->comp_ptr[i];
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return false;
|
|
|
|
|
|
+ // See note in compare_equal().
|
|
|
|
+ return memcmp(a->comp_ptr, b->comp_ptr, a->comp_size * 4) < 0;
|
|
}
|
|
}
|
|
|
|
|
|
CallableCustom::CompareEqualFunc CallableCustomMethodPointerBase::get_compare_equal_func() const {
|
|
CallableCustom::CompareEqualFunc CallableCustomMethodPointerBase::get_compare_equal_func() const {
|