test_bytes_vec_alloc.rs 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. use std::alloc::{GlobalAlloc, Layout, System};
  2. use std::ptr::null_mut;
  3. use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
  4. use bytes::{Buf, Bytes};
  5. #[global_allocator]
  6. static LEDGER: Ledger = Ledger::new();
  7. const LEDGER_LENGTH: usize = 2048;
  8. struct Ledger {
  9. alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
  10. }
  11. impl Ledger {
  12. const fn new() -> Self {
  13. const ELEM: (AtomicPtr<u8>, AtomicUsize) =
  14. (AtomicPtr::new(null_mut()), AtomicUsize::new(0));
  15. let alloc_table = [ELEM; LEDGER_LENGTH];
  16. Self { alloc_table }
  17. }
  18. /// Iterate over our table until we find an open entry, then insert into said entry
  19. fn insert(&self, ptr: *mut u8, size: usize) {
  20. for (entry_ptr, entry_size) in self.alloc_table.iter() {
  21. // SeqCst is good enough here, we don't care about perf, i just want to be correct!
  22. if entry_ptr
  23. .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
  24. .is_ok()
  25. {
  26. entry_size.store(size, Ordering::SeqCst);
  27. break;
  28. }
  29. }
  30. }
  31. fn remove(&self, ptr: *mut u8) -> usize {
  32. for (entry_ptr, entry_size) in self.alloc_table.iter() {
  33. // set the value to be something that will never try and be deallocated, so that we
  34. // don't have any chance of a race condition
  35. //
  36. // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
  37. if entry_ptr
  38. .compare_exchange(
  39. ptr,
  40. invalid_ptr(usize::MAX),
  41. Ordering::SeqCst,
  42. Ordering::SeqCst,
  43. )
  44. .is_ok()
  45. {
  46. return entry_size.load(Ordering::SeqCst);
  47. }
  48. }
  49. panic!("Couldn't find a matching entry for {:x?}", ptr);
  50. }
  51. }
  52. unsafe impl GlobalAlloc for Ledger {
  53. unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
  54. let size = layout.size();
  55. let ptr = System.alloc(layout);
  56. self.insert(ptr, size);
  57. ptr
  58. }
  59. unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
  60. let orig_size = self.remove(ptr);
  61. if orig_size != layout.size() {
  62. panic!(
  63. "bad dealloc: alloc size was {}, dealloc size is {}",
  64. orig_size,
  65. layout.size()
  66. );
  67. } else {
  68. System.dealloc(ptr, layout);
  69. }
  70. }
  71. }
  72. #[test]
  73. fn test_bytes_advance() {
  74. let mut bytes = Bytes::from(vec![10, 20, 30]);
  75. bytes.advance(1);
  76. drop(bytes);
  77. }
  78. #[test]
  79. fn test_bytes_truncate() {
  80. let mut bytes = Bytes::from(vec![10, 20, 30]);
  81. bytes.truncate(2);
  82. drop(bytes);
  83. }
  84. #[test]
  85. fn test_bytes_truncate_and_advance() {
  86. let mut bytes = Bytes::from(vec![10, 20, 30]);
  87. bytes.truncate(2);
  88. bytes.advance(1);
  89. drop(bytes);
  90. }
  91. /// Returns a dangling pointer with the given address. This is used to store
  92. /// integer data in pointer fields.
  93. #[inline]
  94. fn invalid_ptr<T>(addr: usize) -> *mut T {
  95. let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
  96. debug_assert_eq!(ptr as usize, addr);
  97. ptr.cast::<T>()
  98. }
  99. #[test]
  100. fn test_bytes_into_vec() {
  101. let vec = vec![33u8; 1024];
  102. // Test cases where kind == KIND_VEC
  103. let b1 = Bytes::from(vec.clone());
  104. assert_eq!(Vec::from(b1), vec);
  105. // Test cases where kind == KIND_ARC, ref_cnt == 1
  106. let b1 = Bytes::from(vec.clone());
  107. drop(b1.clone());
  108. assert_eq!(Vec::from(b1), vec);
  109. // Test cases where kind == KIND_ARC, ref_cnt == 2
  110. let b1 = Bytes::from(vec.clone());
  111. let b2 = b1.clone();
  112. assert_eq!(Vec::from(b1), vec);
  113. // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
  114. assert_eq!(Vec::from(b2), vec);
  115. // Test cases where offset != 0
  116. let mut b1 = Bytes::from(vec.clone());
  117. let b2 = b1.split_off(20);
  118. assert_eq!(Vec::from(b2), vec[20..]);
  119. assert_eq!(Vec::from(b1), vec[..20]);
  120. }