edata.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. #ifndef JEMALLOC_INTERNAL_EDATA_H
  2. #define JEMALLOC_INTERNAL_EDATA_H
  3. #include "jemalloc/internal/atomic.h"
  4. #include "jemalloc/internal/bin_info.h"
  5. #include "jemalloc/internal/bit_util.h"
  6. #include "jemalloc/internal/hpdata.h"
  7. #include "jemalloc/internal/nstime.h"
  8. #include "jemalloc/internal/ph.h"
  9. #include "jemalloc/internal/ql.h"
  10. #include "jemalloc/internal/sc.h"
  11. #include "jemalloc/internal/slab_data.h"
  12. #include "jemalloc/internal/sz.h"
  13. #include "jemalloc/internal/typed_list.h"
  14. /*
  15. * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
  16. * to free up the low bits in the rtree leaf.
  17. */
  18. #define EDATA_ALIGNMENT 128
  19. enum extent_state_e {
  20. extent_state_active = 0,
  21. extent_state_dirty = 1,
  22. extent_state_muzzy = 2,
  23. extent_state_retained = 3,
  24. extent_state_transition = 4, /* States below are intermediate. */
  25. extent_state_merging = 5,
  26. extent_state_max = 5 /* Sanity checking only. */
  27. };
  28. typedef enum extent_state_e extent_state_t;
  29. enum extent_head_state_e {
  30. EXTENT_NOT_HEAD,
  31. EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
  32. };
  33. typedef enum extent_head_state_e extent_head_state_t;
  34. /*
  35. * Which implementation of the page allocator interface, (PAI, defined in
  36. * pai.h) owns the given extent?
  37. */
  38. enum extent_pai_e {
  39. EXTENT_PAI_PAC = 0,
  40. EXTENT_PAI_HPA = 1
  41. };
  42. typedef enum extent_pai_e extent_pai_t;
  43. struct e_prof_info_s {
  44. /* Time when this was allocated. */
  45. nstime_t e_prof_alloc_time;
  46. /* Allocation request size. */
  47. size_t e_prof_alloc_size;
  48. /* Points to a prof_tctx_t. */
  49. atomic_p_t e_prof_tctx;
  50. /*
  51. * Points to a prof_recent_t for the allocation; NULL
  52. * means the recent allocation record no longer exists.
  53. * Protected by prof_recent_alloc_mtx.
  54. */
  55. atomic_p_t e_prof_recent_alloc;
  56. };
  57. typedef struct e_prof_info_s e_prof_info_t;
  58. /*
  59. * The information about a particular edata that lives in an emap. Space is
  60. * more precious there (the information, plus the edata pointer, has to live in
  61. * a 64-bit word if we want to enable a packed representation.
  62. *
  63. * There are two things that are special about the information here:
  64. * - It's quicker to access. You have one fewer pointer hop, since finding the
  65. * edata_t associated with an item always requires accessing the rtree leaf in
  66. * which this data is stored.
  67. * - It can be read unsynchronized, and without worrying about lifetime issues.
  68. */
  69. typedef struct edata_map_info_s edata_map_info_t;
  70. struct edata_map_info_s {
  71. bool slab;
  72. szind_t szind;
  73. };
  74. typedef struct edata_cmp_summary_s edata_cmp_summary_t;
  75. struct edata_cmp_summary_s {
  76. uint64_t sn;
  77. uintptr_t addr;
  78. };
  79. /* Extent (span of pages). Use accessor functions for e_* fields. */
  80. typedef struct edata_s edata_t;
  81. ph_structs(edata_avail, edata_t);
  82. ph_structs(edata_heap, edata_t);
  83. struct edata_s {
  84. /*
  85. * Bitfield containing several fields:
  86. *
  87. * a: arena_ind
  88. * b: slab
  89. * c: committed
  90. * p: pai
  91. * z: zeroed
  92. * g: guarded
  93. * t: state
  94. * i: szind
  95. * f: nfree
  96. * s: bin_shard
  97. *
  98. * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
  99. *
  100. * arena_ind: Arena from which this extent came, or all 1 bits if
  101. * unassociated.
  102. *
  103. * slab: The slab flag indicates whether the extent is used for a slab
  104. * of small regions. This helps differentiate small size classes,
  105. * and it indicates whether interior pointers can be looked up via
  106. * iealloc().
  107. *
  108. * committed: The committed flag indicates whether physical memory is
  109. * committed to the extent, whether explicitly or implicitly
  110. * as on a system that overcommits and satisfies physical
  111. * memory needs on demand via soft page faults.
  112. *
  113. * pai: The pai flag is an extent_pai_t.
  114. *
  115. * zeroed: The zeroed flag is used by extent recycling code to track
  116. * whether memory is zero-filled.
  117. *
  118. * guarded: The guarded flag is use by the sanitizer to track whether
  119. * the extent has page guards around it.
  120. *
  121. * state: The state flag is an extent_state_t.
  122. *
  123. * szind: The szind flag indicates usable size class index for
  124. * allocations residing in this extent, regardless of whether the
  125. * extent is a slab. Extent size and usable size often differ
  126. * even for non-slabs, either due to sz_large_pad or promotion of
  127. * sampled small regions.
  128. *
  129. * nfree: Number of free regions in slab.
  130. *
  131. * bin_shard: the shard of the bin from which this extent came.
  132. */
  133. uint64_t e_bits;
  134. #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
  135. #define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
  136. #define EDATA_BITS_ARENA_SHIFT 0
  137. #define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
  138. #define EDATA_BITS_SLAB_WIDTH 1
  139. #define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
  140. #define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
  141. #define EDATA_BITS_COMMITTED_WIDTH 1
  142. #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
  143. #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
  144. #define EDATA_BITS_PAI_WIDTH 1
  145. #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
  146. #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
  147. #define EDATA_BITS_ZEROED_WIDTH 1
  148. #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
  149. #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
  150. #define EDATA_BITS_GUARDED_WIDTH 1
  151. #define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
  152. #define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
  153. #define EDATA_BITS_STATE_WIDTH 3
  154. #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
  155. #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
  156. #define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
  157. #define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
  158. #define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
  159. #define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
  160. #define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
  161. #define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
  162. #define EDATA_BITS_BINSHARD_WIDTH 6
  163. #define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
  164. #define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
  165. #define EDATA_BITS_IS_HEAD_WIDTH 1
  166. #define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
  167. #define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
  168. /* Pointer to the extent that this structure is responsible for. */
  169. void *e_addr;
  170. union {
  171. /*
  172. * Extent size and serial number associated with the extent
  173. * structure (different than the serial number for the extent at
  174. * e_addr).
  175. *
  176. * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
  177. */
  178. size_t e_size_esn;
  179. #define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
  180. #define EDATA_ESN_MASK ((size_t)PAGE-1)
  181. /* Base extent size, which may not be a multiple of PAGE. */
  182. size_t e_bsize;
  183. };
  184. /*
  185. * If this edata is a user allocation from an HPA, it comes out of some
  186. * pageslab (we don't yet support huegpage allocations that don't fit
  187. * into pageslabs). This tracks it.
  188. */
  189. hpdata_t *e_ps;
  190. /*
  191. * Serial number. These are not necessarily unique; splitting an extent
  192. * results in two extents with the same serial number.
  193. */
  194. uint64_t e_sn;
  195. union {
  196. /*
  197. * List linkage used when the edata_t is active; either in
  198. * arena's large allocations or bin_t's slabs_full.
  199. */
  200. ql_elm(edata_t) ql_link_active;
  201. /*
  202. * Pairing heap linkage. Used whenever the extent is inactive
  203. * (in the page allocators), or when it is active and in
  204. * slabs_nonfull, or when the edata_t is unassociated with an
  205. * extent and sitting in an edata_cache.
  206. */
  207. union {
  208. edata_heap_link_t heap_link;
  209. edata_avail_link_t avail_link;
  210. };
  211. };
  212. union {
  213. /*
  214. * List linkage used when the extent is inactive:
  215. * - Stashed dirty extents
  216. * - Ecache LRU functionality.
  217. */
  218. ql_elm(edata_t) ql_link_inactive;
  219. /* Small region slab metadata. */
  220. slab_data_t e_slab_data;
  221. /* Profiling data, used for large objects. */
  222. e_prof_info_t e_prof_info;
  223. };
  224. };
  225. TYPED_LIST(edata_list_active, edata_t, ql_link_active)
  226. TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
  227. static inline unsigned
  228. edata_arena_ind_get(const edata_t *edata) {
  229. unsigned arena_ind = (unsigned)((edata->e_bits &
  230. EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
  231. assert(arena_ind < MALLOCX_ARENA_LIMIT);
  232. return arena_ind;
  233. }
  234. static inline szind_t
  235. edata_szind_get_maybe_invalid(const edata_t *edata) {
  236. szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
  237. EDATA_BITS_SZIND_SHIFT);
  238. assert(szind <= SC_NSIZES);
  239. return szind;
  240. }
  241. static inline szind_t
  242. edata_szind_get(const edata_t *edata) {
  243. szind_t szind = edata_szind_get_maybe_invalid(edata);
  244. assert(szind < SC_NSIZES); /* Never call when "invalid". */
  245. return szind;
  246. }
  247. static inline size_t
  248. edata_usize_get(const edata_t *edata) {
  249. return sz_index2size(edata_szind_get(edata));
  250. }
  251. static inline unsigned
  252. edata_binshard_get(const edata_t *edata) {
  253. unsigned binshard = (unsigned)((edata->e_bits &
  254. EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
  255. assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
  256. return binshard;
  257. }
  258. static inline uint64_t
  259. edata_sn_get(const edata_t *edata) {
  260. return edata->e_sn;
  261. }
  262. static inline extent_state_t
  263. edata_state_get(const edata_t *edata) {
  264. return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
  265. EDATA_BITS_STATE_SHIFT);
  266. }
  267. static inline bool
  268. edata_guarded_get(const edata_t *edata) {
  269. return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
  270. EDATA_BITS_GUARDED_SHIFT);
  271. }
  272. static inline bool
  273. edata_zeroed_get(const edata_t *edata) {
  274. return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
  275. EDATA_BITS_ZEROED_SHIFT);
  276. }
  277. static inline bool
  278. edata_committed_get(const edata_t *edata) {
  279. return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
  280. EDATA_BITS_COMMITTED_SHIFT);
  281. }
  282. static inline extent_pai_t
  283. edata_pai_get(const edata_t *edata) {
  284. return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
  285. EDATA_BITS_PAI_SHIFT);
  286. }
  287. static inline bool
  288. edata_slab_get(const edata_t *edata) {
  289. return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
  290. EDATA_BITS_SLAB_SHIFT);
  291. }
  292. static inline unsigned
  293. edata_nfree_get(const edata_t *edata) {
  294. assert(edata_slab_get(edata));
  295. return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
  296. EDATA_BITS_NFREE_SHIFT);
  297. }
  298. static inline void *
  299. edata_base_get(const edata_t *edata) {
  300. assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
  301. !edata_slab_get(edata));
  302. return PAGE_ADDR2BASE(edata->e_addr);
  303. }
  304. static inline void *
  305. edata_addr_get(const edata_t *edata) {
  306. assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
  307. !edata_slab_get(edata));
  308. return edata->e_addr;
  309. }
  310. static inline size_t
  311. edata_size_get(const edata_t *edata) {
  312. return (edata->e_size_esn & EDATA_SIZE_MASK);
  313. }
  314. static inline size_t
  315. edata_esn_get(const edata_t *edata) {
  316. return (edata->e_size_esn & EDATA_ESN_MASK);
  317. }
  318. static inline size_t
  319. edata_bsize_get(const edata_t *edata) {
  320. return edata->e_bsize;
  321. }
  322. static inline hpdata_t *
  323. edata_ps_get(const edata_t *edata) {
  324. assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
  325. return edata->e_ps;
  326. }
  327. static inline void *
  328. edata_before_get(const edata_t *edata) {
  329. return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
  330. }
  331. static inline void *
  332. edata_last_get(const edata_t *edata) {
  333. return (void *)((uintptr_t)edata_base_get(edata) +
  334. edata_size_get(edata) - PAGE);
  335. }
  336. static inline void *
  337. edata_past_get(const edata_t *edata) {
  338. return (void *)((uintptr_t)edata_base_get(edata) +
  339. edata_size_get(edata));
  340. }
  341. static inline slab_data_t *
  342. edata_slab_data_get(edata_t *edata) {
  343. assert(edata_slab_get(edata));
  344. return &edata->e_slab_data;
  345. }
  346. static inline const slab_data_t *
  347. edata_slab_data_get_const(const edata_t *edata) {
  348. assert(edata_slab_get(edata));
  349. return &edata->e_slab_data;
  350. }
  351. static inline prof_tctx_t *
  352. edata_prof_tctx_get(const edata_t *edata) {
  353. return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
  354. ATOMIC_ACQUIRE);
  355. }
  356. static inline const nstime_t *
  357. edata_prof_alloc_time_get(const edata_t *edata) {
  358. return &edata->e_prof_info.e_prof_alloc_time;
  359. }
  360. static inline size_t
  361. edata_prof_alloc_size_get(const edata_t *edata) {
  362. return edata->e_prof_info.e_prof_alloc_size;
  363. }
  364. static inline prof_recent_t *
  365. edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
  366. return (prof_recent_t *)atomic_load_p(
  367. &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
  368. }
  369. static inline void
  370. edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
  371. edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
  372. ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
  373. }
  374. static inline void
  375. edata_binshard_set(edata_t *edata, unsigned binshard) {
  376. /* The assertion assumes szind is set already. */
  377. assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
  378. edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
  379. ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
  380. }
  381. static inline void
  382. edata_addr_set(edata_t *edata, void *addr) {
  383. edata->e_addr = addr;
  384. }
  385. static inline void
  386. edata_size_set(edata_t *edata, size_t size) {
  387. assert((size & ~EDATA_SIZE_MASK) == 0);
  388. edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
  389. }
  390. static inline void
  391. edata_esn_set(edata_t *edata, size_t esn) {
  392. edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
  393. EDATA_ESN_MASK);
  394. }
  395. static inline void
  396. edata_bsize_set(edata_t *edata, size_t bsize) {
  397. edata->e_bsize = bsize;
  398. }
  399. static inline void
  400. edata_ps_set(edata_t *edata, hpdata_t *ps) {
  401. assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
  402. edata->e_ps = ps;
  403. }
  404. static inline void
  405. edata_szind_set(edata_t *edata, szind_t szind) {
  406. assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
  407. edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
  408. ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
  409. }
  410. static inline void
  411. edata_nfree_set(edata_t *edata, unsigned nfree) {
  412. assert(edata_slab_get(edata));
  413. edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
  414. ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
  415. }
  416. static inline void
  417. edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
  418. /* The assertion assumes szind is set already. */
  419. assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
  420. edata->e_bits = (edata->e_bits &
  421. (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
  422. ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
  423. ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
  424. }
  425. static inline void
  426. edata_nfree_inc(edata_t *edata) {
  427. assert(edata_slab_get(edata));
  428. edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
  429. }
  430. static inline void
  431. edata_nfree_dec(edata_t *edata) {
  432. assert(edata_slab_get(edata));
  433. edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
  434. }
  435. static inline void
  436. edata_nfree_sub(edata_t *edata, uint64_t n) {
  437. assert(edata_slab_get(edata));
  438. edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
  439. }
  440. static inline void
  441. edata_sn_set(edata_t *edata, uint64_t sn) {
  442. edata->e_sn = sn;
  443. }
  444. static inline void
  445. edata_state_set(edata_t *edata, extent_state_t state) {
  446. edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
  447. ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
  448. }
  449. static inline void
  450. edata_guarded_set(edata_t *edata, bool guarded) {
  451. edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
  452. ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
  453. }
  454. static inline void
  455. edata_zeroed_set(edata_t *edata, bool zeroed) {
  456. edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
  457. ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
  458. }
  459. static inline void
  460. edata_committed_set(edata_t *edata, bool committed) {
  461. edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
  462. ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
  463. }
  464. static inline void
  465. edata_pai_set(edata_t *edata, extent_pai_t pai) {
  466. edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
  467. ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
  468. }
  469. static inline void
  470. edata_slab_set(edata_t *edata, bool slab) {
  471. edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
  472. ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
  473. }
  474. static inline void
  475. edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
  476. atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
  477. }
  478. static inline void
  479. edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
  480. nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
  481. }
  482. static inline void
  483. edata_prof_alloc_size_set(edata_t *edata, size_t size) {
  484. edata->e_prof_info.e_prof_alloc_size = size;
  485. }
  486. static inline void
  487. edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
  488. prof_recent_t *recent_alloc) {
  489. atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
  490. ATOMIC_RELAXED);
  491. }
  492. static inline bool
  493. edata_is_head_get(edata_t *edata) {
  494. return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
  495. EDATA_BITS_IS_HEAD_SHIFT);
  496. }
  497. static inline void
  498. edata_is_head_set(edata_t *edata, bool is_head) {
  499. edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
  500. ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
  501. }
  502. static inline bool
  503. edata_state_in_transition(extent_state_t state) {
  504. return state >= extent_state_transition;
  505. }
  506. /*
  507. * Because this function is implemented as a sequence of bitfield modifications,
  508. * even though each individual bit is properly initialized, we technically read
  509. * uninitialized data within it. This is mostly fine, since most callers get
  510. * their edatas from zeroing sources, but callers who make stack edata_ts need
  511. * to manually zero them.
  512. */
  513. static inline void
  514. edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
  515. bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
  516. bool committed, extent_pai_t pai, extent_head_state_t is_head) {
  517. assert(addr == PAGE_ADDR2BASE(addr) || !slab);
  518. edata_arena_ind_set(edata, arena_ind);
  519. edata_addr_set(edata, addr);
  520. edata_size_set(edata, size);
  521. edata_slab_set(edata, slab);
  522. edata_szind_set(edata, szind);
  523. edata_sn_set(edata, sn);
  524. edata_state_set(edata, state);
  525. edata_guarded_set(edata, false);
  526. edata_zeroed_set(edata, zeroed);
  527. edata_committed_set(edata, committed);
  528. edata_pai_set(edata, pai);
  529. edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
  530. if (config_prof) {
  531. edata_prof_tctx_set(edata, NULL);
  532. }
  533. }
  534. static inline void
  535. edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
  536. edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
  537. edata_addr_set(edata, addr);
  538. edata_bsize_set(edata, bsize);
  539. edata_slab_set(edata, false);
  540. edata_szind_set(edata, SC_NSIZES);
  541. edata_sn_set(edata, sn);
  542. edata_state_set(edata, extent_state_active);
  543. edata_guarded_set(edata, false);
  544. edata_zeroed_set(edata, true);
  545. edata_committed_set(edata, true);
  546. /*
  547. * This isn't strictly true, but base allocated extents never get
  548. * deallocated and can't be looked up in the emap, but no sense in
  549. * wasting a state bit to encode this fact.
  550. */
  551. edata_pai_set(edata, EXTENT_PAI_PAC);
  552. }
  553. static inline int
  554. edata_esn_comp(const edata_t *a, const edata_t *b) {
  555. size_t a_esn = edata_esn_get(a);
  556. size_t b_esn = edata_esn_get(b);
  557. return (a_esn > b_esn) - (a_esn < b_esn);
  558. }
  559. static inline int
  560. edata_ead_comp(const edata_t *a, const edata_t *b) {
  561. uintptr_t a_eaddr = (uintptr_t)a;
  562. uintptr_t b_eaddr = (uintptr_t)b;
  563. return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
  564. }
  565. static inline edata_cmp_summary_t
  566. edata_cmp_summary_get(const edata_t *edata) {
  567. return (edata_cmp_summary_t){edata_sn_get(edata),
  568. (uintptr_t)edata_addr_get(edata)};
  569. }
  570. static inline int
  571. edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
  572. int ret;
  573. ret = (a.sn > b.sn) - (a.sn < b.sn);
  574. if (ret != 0) {
  575. return ret;
  576. }
  577. ret = (a.addr > b.addr) - (a.addr < b.addr);
  578. return ret;
  579. }
  580. static inline int
  581. edata_snad_comp(const edata_t *a, const edata_t *b) {
  582. edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
  583. edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
  584. return edata_cmp_summary_comp(a_cmp, b_cmp);
  585. }
  586. static inline int
  587. edata_esnead_comp(const edata_t *a, const edata_t *b) {
  588. int ret;
  589. ret = edata_esn_comp(a, b);
  590. if (ret != 0) {
  591. return ret;
  592. }
  593. ret = edata_ead_comp(a, b);
  594. return ret;
  595. }
  596. ph_proto(, edata_avail, edata_t)
  597. ph_proto(, edata_heap, edata_t)
  598. #endif /* JEMALLOC_INTERNAL_EDATA_H */