allchblk.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. #include "private/gc_priv.h"
  17. #include <stdio.h>
  18. #ifdef GC_USE_ENTIRE_HEAP
  19. int GC_use_entire_heap = TRUE;
  20. #else
  21. int GC_use_entire_heap = FALSE;
  22. #endif
  23. /*
  24. * Free heap blocks are kept on one of several free lists,
  25. * depending on the size of the block. Each free list is doubly linked.
  26. * Adjacent free blocks are coalesced.
  27. */
  28. # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
  29. /* largest block we will allocate starting on a black */
  30. /* listed block. Must be >= HBLKSIZE. */
  31. # define UNIQUE_THRESHOLD 32
  32. /* Sizes up to this many HBLKs each have their own free list */
  33. # define HUGE_THRESHOLD 256
  34. /* Sizes of at least this many heap blocks are mapped to a */
  35. /* single free list. */
  36. # define FL_COMPRESSION 8
  37. /* In between sizes map this many distinct sizes to a single */
  38. /* bin. */
  39. # define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \
  40. + UNIQUE_THRESHOLD)
  41. #ifndef GC_GCJ_SUPPORT
  42. STATIC
  43. #endif
  44. struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
  45. /* List of completely empty heap blocks */
  46. /* Linked through hb_next field of */
  47. /* header structure associated with */
  48. /* block. Remains externally visible */
  49. /* as used by GNU GCJ currently. */
  50. #ifndef GC_GCJ_SUPPORT
  51. STATIC
  52. #endif
  53. word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
  54. /* Number of free bytes on each list. Remains visible to GCJ. */
  55. /* Return the largest n such that the number of free bytes on lists */
  56. /* n .. N_HBLK_FLS is greater or equal to GC_max_large_allocd_bytes */
  57. /* minus GC_large_allocd_bytes. If there is no such n, return 0. */
  58. GC_INLINE int GC_enough_large_bytes_left(void)
  59. {
  60. int n;
  61. word bytes = GC_large_allocd_bytes;
  62. GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
  63. for (n = N_HBLK_FLS; n >= 0; --n) {
  64. bytes += GC_free_bytes[n];
  65. if (bytes >= GC_max_large_allocd_bytes) return n;
  66. }
  67. return 0;
  68. }
  69. /* Map a number of blocks to the appropriate large block free list index. */
  70. STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
  71. {
  72. if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
  73. if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
  74. return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
  75. + UNIQUE_THRESHOLD;
  76. }
  77. # define PHDR(hhdr) HDR((hhdr) -> hb_prev)
  78. # define NHDR(hhdr) HDR((hhdr) -> hb_next)
  79. # ifdef USE_MUNMAP
  80. # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
  81. # else
  82. # define IS_MAPPED(hhdr) TRUE
  83. # endif /* !USE_MUNMAP */
  84. #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
  85. /* Should return the same value as GC_large_free_bytes. */
  86. GC_INNER word GC_compute_large_free_bytes(void)
  87. {
  88. word total_free = 0;
  89. unsigned i;
  90. for (i = 0; i <= N_HBLK_FLS; ++i) {
  91. struct hblk * h;
  92. hdr * hhdr;
  93. for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
  94. hhdr = HDR(h);
  95. total_free += hhdr->hb_sz;
  96. }
  97. }
  98. return total_free;
  99. }
  100. #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
  101. # if !defined(NO_DEBUGGING)
  102. void GC_print_hblkfreelist(void)
  103. {
  104. unsigned i;
  105. word total;
  106. for (i = 0; i <= N_HBLK_FLS; ++i) {
  107. struct hblk * h = GC_hblkfreelist[i];
  108. if (0 != h) GC_printf("Free list %u (total size %lu):\n",
  109. i, (unsigned long)GC_free_bytes[i]);
  110. while (h != 0) {
  111. hdr * hhdr = HDR(h);
  112. GC_printf("\t%p size %lu %s black listed\n",
  113. (void *)h, (unsigned long) hhdr -> hb_sz,
  114. GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
  115. GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
  116. "not");
  117. h = hhdr -> hb_next;
  118. }
  119. }
  120. GC_printf("GC_large_free_bytes: %lu\n",
  121. (unsigned long)GC_large_free_bytes);
  122. if ((total = GC_compute_large_free_bytes()) != GC_large_free_bytes)
  123. GC_err_printf("GC_large_free_bytes INCONSISTENT!! Should be: %lu\n",
  124. (unsigned long)total);
  125. }
  126. /* Return the free list index on which the block described by the header */
  127. /* appears, or -1 if it appears nowhere. */
  128. static int free_list_index_of(hdr *wanted)
  129. {
  130. int i;
  131. for (i = 0; i <= N_HBLK_FLS; ++i) {
  132. struct hblk * h;
  133. hdr * hhdr;
  134. for (h = GC_hblkfreelist[i]; h != 0; h = hhdr -> hb_next) {
  135. hhdr = HDR(h);
  136. if (hhdr == wanted) return i;
  137. }
  138. }
  139. return -1;
  140. }
  141. GC_API void GC_CALL GC_dump_regions(void)
  142. {
  143. unsigned i;
  144. for (i = 0; i < GC_n_heap_sects; ++i) {
  145. ptr_t start = GC_heap_sects[i].hs_start;
  146. size_t bytes = GC_heap_sects[i].hs_bytes;
  147. ptr_t end = start + bytes;
  148. ptr_t p;
  149. /* Merge in contiguous sections. */
  150. while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
  151. ++i;
  152. end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
  153. }
  154. GC_printf("***Section from %p to %p\n", (void *)start, (void *)end);
  155. for (p = start; (word)p < (word)end; ) {
  156. hdr *hhdr = HDR(p);
  157. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  158. GC_printf("\t%p Missing header!!(%p)\n",
  159. (void *)p, (void *)hhdr);
  160. p += HBLKSIZE;
  161. continue;
  162. }
  163. if (HBLK_IS_FREE(hhdr)) {
  164. int correct_index = GC_hblk_fl_from_blocks(
  165. divHBLKSZ(hhdr -> hb_sz));
  166. int actual_index;
  167. GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n",
  168. (void *)p, (unsigned long)(hhdr -> hb_sz),
  169. IS_MAPPED(hhdr) ? "" : " (unmapped)");
  170. actual_index = free_list_index_of(hhdr);
  171. if (-1 == actual_index) {
  172. GC_printf("\t\tBlock not on free list %d!!\n",
  173. correct_index);
  174. } else if (correct_index != actual_index) {
  175. GC_printf("\t\tBlock on list %d, should be on %d!!\n",
  176. actual_index, correct_index);
  177. }
  178. p += hhdr -> hb_sz;
  179. } else {
  180. GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n",
  181. (void *)p, (unsigned long)(hhdr -> hb_sz));
  182. p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  183. }
  184. }
  185. }
  186. }
  187. # endif /* NO_DEBUGGING */
  188. /* Initialize hdr for a block containing the indicated size and */
  189. /* kind of objects. */
  190. /* Return FALSE on failure. */
  191. static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
  192. int kind, unsigned flags)
  193. {
  194. word descr;
  195. # ifdef MARK_BIT_PER_GRANULE
  196. if (byte_sz > MAXOBJBYTES)
  197. flags |= LARGE_BLOCK;
  198. # endif
  199. # ifdef ENABLE_DISCLAIM
  200. if (GC_obj_kinds[kind].ok_disclaim_proc)
  201. flags |= HAS_DISCLAIM;
  202. if (GC_obj_kinds[kind].ok_mark_unconditionally)
  203. flags |= MARK_UNCONDITIONALLY;
  204. # endif
  205. /* Set size, kind and mark proc fields */
  206. hhdr -> hb_sz = byte_sz;
  207. hhdr -> hb_obj_kind = (unsigned char)kind;
  208. hhdr -> hb_flags = (unsigned char)flags;
  209. hhdr -> hb_block = block;
  210. descr = GC_obj_kinds[kind].ok_descriptor;
  211. if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
  212. hhdr -> hb_descr = descr;
  213. # ifdef MARK_BIT_PER_OBJ
  214. /* Set hb_inv_sz as portably as possible. */
  215. /* We set it to the smallest value such that sz * inv_sz >= 2**32 */
  216. /* This may be more precision than necessary. */
  217. if (byte_sz > MAXOBJBYTES) {
  218. hhdr -> hb_inv_sz = LARGE_INV_SZ;
  219. } else {
  220. word inv_sz;
  221. # if CPP_WORDSZ == 64
  222. inv_sz = ((word)1 << 32)/byte_sz;
  223. if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
  224. # else /* 32 bit words */
  225. GC_ASSERT(byte_sz >= 4);
  226. inv_sz = ((unsigned)1 << 31)/byte_sz;
  227. inv_sz *= 2;
  228. while (inv_sz*byte_sz > byte_sz) ++inv_sz;
  229. # endif
  230. # ifdef INV_SZ_COMPUTATION_CHECK
  231. GC_ASSERT(((1ULL << 32) + byte_sz - 1) / byte_sz == inv_sz);
  232. # endif
  233. hhdr -> hb_inv_sz = inv_sz;
  234. }
  235. # endif
  236. # ifdef MARK_BIT_PER_GRANULE
  237. {
  238. size_t granules = BYTES_TO_GRANULES(byte_sz);
  239. if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
  240. /* Make it look like a valid block. */
  241. hhdr -> hb_sz = HBLKSIZE;
  242. hhdr -> hb_descr = 0;
  243. hhdr -> hb_flags |= LARGE_BLOCK;
  244. hhdr -> hb_map = 0;
  245. return FALSE;
  246. }
  247. hhdr -> hb_map = GC_obj_map[(hhdr -> hb_flags & LARGE_BLOCK) != 0 ?
  248. 0 : granules];
  249. }
  250. # endif /* MARK_BIT_PER_GRANULE */
  251. /* Clear mark bits */
  252. GC_clear_hdr_marks(hhdr);
  253. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  254. return(TRUE);
  255. }
  256. /* Remove hhdr from the free list (it is assumed to specified by index). */
  257. STATIC void GC_remove_from_fl_at(hdr *hhdr, int index)
  258. {
  259. GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
  260. if (hhdr -> hb_prev == 0) {
  261. GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
  262. GC_hblkfreelist[index] = hhdr -> hb_next;
  263. } else {
  264. hdr *phdr;
  265. GET_HDR(hhdr -> hb_prev, phdr);
  266. phdr -> hb_next = hhdr -> hb_next;
  267. }
  268. /* We always need index to maintain free counts. */
  269. GC_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
  270. GC_free_bytes[index] -= hhdr -> hb_sz;
  271. if (0 != hhdr -> hb_next) {
  272. hdr * nhdr;
  273. GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
  274. GET_HDR(hhdr -> hb_next, nhdr);
  275. nhdr -> hb_prev = hhdr -> hb_prev;
  276. }
  277. }
  278. /* Remove hhdr from the appropriate free list (we assume it is on the */
  279. /* size-appropriate free list). */
  280. GC_INLINE void GC_remove_from_fl(hdr *hhdr)
  281. {
  282. GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
  283. }
  284. /* Return a pointer to the free block ending just before h, if any. */
  285. STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
  286. {
  287. struct hblk * p = h - 1;
  288. hdr * phdr;
  289. GET_HDR(p, phdr);
  290. while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
  291. p = FORWARDED_ADDR(p,phdr);
  292. phdr = HDR(p);
  293. }
  294. if (0 != phdr) {
  295. if(HBLK_IS_FREE(phdr)) {
  296. return p;
  297. } else {
  298. return 0;
  299. }
  300. }
  301. p = GC_prev_block(h - 1);
  302. if (0 != p) {
  303. phdr = HDR(p);
  304. if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
  305. return p;
  306. }
  307. }
  308. return 0;
  309. }
  310. /* Add hhdr to the appropriate free list. */
  311. /* We maintain individual free lists sorted by address. */
  312. STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
  313. {
  314. int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
  315. struct hblk *second = GC_hblkfreelist[index];
  316. # if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
  317. struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
  318. hdr * nexthdr = HDR(next);
  319. struct hblk *prev = GC_free_block_ending_at(h);
  320. hdr * prevhdr = HDR(prev);
  321. GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
  322. || (GC_heapsize & SIGNB) != 0);
  323. /* In the last case, blocks may be too large to merge. */
  324. GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
  325. || (GC_heapsize & SIGNB) != 0);
  326. # endif
  327. GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
  328. GC_hblkfreelist[index] = h;
  329. GC_free_bytes[index] += hhdr -> hb_sz;
  330. GC_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
  331. hhdr -> hb_next = second;
  332. hhdr -> hb_prev = 0;
  333. if (0 != second) {
  334. hdr * second_hdr;
  335. GET_HDR(second, second_hdr);
  336. second_hdr -> hb_prev = h;
  337. }
  338. hhdr -> hb_flags |= FREE_BLK;
  339. }
  340. #ifdef USE_MUNMAP
  341. # ifndef MUNMAP_THRESHOLD
  342. # define MUNMAP_THRESHOLD 6
  343. # endif
  344. GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
  345. /* Unmap blocks that haven't been recently touched. This is the only way */
  346. /* way blocks are ever unmapped. */
  347. GC_INNER void GC_unmap_old(void)
  348. {
  349. int i;
  350. if (GC_unmap_threshold == 0)
  351. return; /* unmapping disabled */
  352. for (i = 0; i <= N_HBLK_FLS; ++i) {
  353. struct hblk * h;
  354. hdr * hhdr;
  355. for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
  356. hhdr = HDR(h);
  357. if (!IS_MAPPED(hhdr)) continue;
  358. /* Check that the interval is larger than the threshold (the */
  359. /* truncated counter value wrapping is handled correctly). */
  360. if ((unsigned short)(GC_gc_no - hhdr->hb_last_reclaimed) >
  361. (unsigned short)GC_unmap_threshold) {
  362. GC_unmap((ptr_t)h, (size_t)hhdr->hb_sz);
  363. hhdr -> hb_flags |= WAS_UNMAPPED;
  364. }
  365. }
  366. }
  367. }
  368. # ifdef MPROTECT_VDB
  369. GC_INNER GC_bool GC_has_unmapped_memory(void)
  370. {
  371. int i;
  372. for (i = 0; i <= N_HBLK_FLS; ++i) {
  373. struct hblk * h;
  374. hdr * hhdr;
  375. for (h = GC_hblkfreelist[i]; h != NULL; h = hhdr -> hb_next) {
  376. hhdr = HDR(h);
  377. if (!IS_MAPPED(hhdr)) return TRUE;
  378. }
  379. }
  380. return FALSE;
  381. }
  382. # endif /* MPROTECT_VDB */
  383. /* Merge all unmapped blocks that are adjacent to other free */
  384. /* blocks. This may involve remapping, since all blocks are either */
  385. /* fully mapped or fully unmapped. */
  386. GC_INNER void GC_merge_unmapped(void)
  387. {
  388. int i;
  389. for (i = 0; i <= N_HBLK_FLS; ++i) {
  390. struct hblk *h = GC_hblkfreelist[i];
  391. while (h != 0) {
  392. struct hblk *next;
  393. hdr *hhdr, *nexthdr;
  394. word size, nextsize;
  395. GET_HDR(h, hhdr);
  396. size = hhdr->hb_sz;
  397. next = (struct hblk *)((word)h + size);
  398. GET_HDR(next, nexthdr);
  399. /* Coalesce with successor, if possible */
  400. if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
  401. && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
  402. /* no pot. overflow */) {
  403. /* Note that we usually try to avoid adjacent free blocks */
  404. /* that are either both mapped or both unmapped. But that */
  405. /* isn't guaranteed to hold since we remap blocks when we */
  406. /* split them, and don't merge at that point. It may also */
  407. /* not hold if the merged block would be too big. */
  408. if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
  409. /* make both consistent, so that we can merge */
  410. if (size > nextsize) {
  411. GC_remap((ptr_t)next, nextsize);
  412. } else {
  413. GC_unmap((ptr_t)h, size);
  414. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  415. hhdr -> hb_flags |= WAS_UNMAPPED;
  416. }
  417. } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
  418. if (size > nextsize) {
  419. GC_unmap((ptr_t)next, nextsize);
  420. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  421. } else {
  422. GC_remap((ptr_t)h, size);
  423. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  424. hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
  425. }
  426. } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
  427. /* Unmap any gap in the middle */
  428. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  429. }
  430. /* If they are both unmapped, we merge, but leave unmapped. */
  431. GC_remove_from_fl_at(hhdr, i);
  432. GC_remove_from_fl(nexthdr);
  433. hhdr -> hb_sz += nexthdr -> hb_sz;
  434. GC_remove_header(next);
  435. GC_add_to_fl(h, hhdr);
  436. /* Start over at beginning of list */
  437. h = GC_hblkfreelist[i];
  438. } else /* not mergable with successor */ {
  439. h = hhdr -> hb_next;
  440. }
  441. } /* while (h != 0) ... */
  442. } /* for ... */
  443. }
  444. #endif /* USE_MUNMAP */
  445. /*
  446. * Return a pointer to a block starting at h of length bytes.
  447. * Memory for the block is mapped.
  448. * Remove the block from its free list, and return the remainder (if any)
  449. * to its appropriate free list.
  450. * May fail by returning 0.
  451. * The header for the returned block must be set up by the caller.
  452. * If the return value is not 0, then hhdr is the header for it.
  453. */
  454. STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
  455. size_t bytes, int index)
  456. {
  457. word total_size = hhdr -> hb_sz;
  458. struct hblk * rest;
  459. hdr * rest_hdr;
  460. GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
  461. GC_remove_from_fl_at(hhdr, index);
  462. if (total_size == bytes) return h;
  463. rest = (struct hblk *)((word)h + bytes);
  464. rest_hdr = GC_install_header(rest);
  465. if (0 == rest_hdr) {
  466. /* FIXME: This is likely to be very bad news ... */
  467. WARN("Header allocation failed: dropping block\n", 0);
  468. return(0);
  469. }
  470. rest_hdr -> hb_sz = total_size - bytes;
  471. rest_hdr -> hb_flags = 0;
  472. # ifdef GC_ASSERTIONS
  473. /* Mark h not free, to avoid assertion about adjacent free blocks. */
  474. hhdr -> hb_flags &= ~FREE_BLK;
  475. # endif
  476. GC_add_to_fl(rest, rest_hdr);
  477. return h;
  478. }
  479. /*
  480. * H is a free block. N points at an address inside it.
  481. * A new header for n has already been set up. Fix up h's header
  482. * to reflect the fact that it is being split, move it to the
  483. * appropriate free list.
  484. * N replaces h in the original free list.
  485. *
  486. * Nhdr is not completely filled in, since it is about to allocated.
  487. * It may in fact end up on the wrong free list for its size.
  488. * That's not a disaster, since n is about to be allocated
  489. * by our caller.
  490. * (Hence adding it to a free list is silly. But this path is hopefully
  491. * rare enough that it doesn't matter. The code is cleaner this way.)
  492. */
  493. STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
  494. hdr *nhdr, int index /* Index of free list */)
  495. {
  496. word total_size = hhdr -> hb_sz;
  497. word h_size = (word)n - (word)h;
  498. struct hblk *prev = hhdr -> hb_prev;
  499. struct hblk *next = hhdr -> hb_next;
  500. /* Replace h with n on its freelist */
  501. nhdr -> hb_prev = prev;
  502. nhdr -> hb_next = next;
  503. nhdr -> hb_sz = total_size - h_size;
  504. nhdr -> hb_flags = 0;
  505. if (0 != prev) {
  506. HDR(prev) -> hb_next = n;
  507. } else {
  508. GC_hblkfreelist[index] = n;
  509. }
  510. if (0 != next) {
  511. HDR(next) -> hb_prev = n;
  512. }
  513. GC_ASSERT(GC_free_bytes[index] > h_size);
  514. GC_free_bytes[index] -= h_size;
  515. # ifdef USE_MUNMAP
  516. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  517. # endif
  518. hhdr -> hb_sz = h_size;
  519. GC_add_to_fl(h, hhdr);
  520. nhdr -> hb_flags |= FREE_BLK;
  521. }
  522. STATIC struct hblk *
  523. GC_allochblk_nth(size_t sz /* bytes */, int kind, unsigned flags, int n,
  524. int may_split);
  525. #define AVOID_SPLIT_REMAPPED 2
  526. /*
  527. * Allocate (and return pointer to) a heap block
  528. * for objects of size sz bytes, searching the nth free list.
  529. *
  530. * NOTE: We set obj_map field in header correctly.
  531. * Caller is responsible for building an object freelist in block.
  532. *
  533. * The client is responsible for clearing the block, if necessary.
  534. */
  535. GC_INNER struct hblk *
  536. GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
  537. {
  538. word blocks;
  539. int start_list;
  540. struct hblk *result;
  541. int may_split;
  542. int split_limit; /* Highest index of free list whose blocks we */
  543. /* split. */
  544. GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
  545. blocks = OBJ_SZ_TO_BLOCKS_CHECKED(sz);
  546. if ((signed_word)(blocks * HBLKSIZE) < 0) {
  547. return 0;
  548. }
  549. start_list = GC_hblk_fl_from_blocks(blocks);
  550. /* Try for an exact match first. */
  551. result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
  552. if (0 != result) return result;
  553. may_split = TRUE;
  554. if (GC_use_entire_heap || GC_dont_gc
  555. || USED_HEAP_SIZE < GC_requested_heapsize
  556. || GC_incremental || !GC_should_collect()) {
  557. /* Should use more of the heap, even if it requires splitting. */
  558. split_limit = N_HBLK_FLS;
  559. } else if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
  560. /* If we are deallocating lots of memory from */
  561. /* finalizers, fail and collect sooner rather */
  562. /* than later. */
  563. split_limit = 0;
  564. } else {
  565. /* If we have enough large blocks left to cover any */
  566. /* previous request for large blocks, we go ahead */
  567. /* and split. Assuming a steady state, that should */
  568. /* be safe. It means that we can use the full */
  569. /* heap if we allocate only small objects. */
  570. split_limit = GC_enough_large_bytes_left();
  571. # ifdef USE_MUNMAP
  572. if (split_limit > 0)
  573. may_split = AVOID_SPLIT_REMAPPED;
  574. # endif
  575. }
  576. if (start_list < UNIQUE_THRESHOLD) {
  577. /* No reason to try start_list again, since all blocks are exact */
  578. /* matches. */
  579. ++start_list;
  580. }
  581. for (; start_list <= split_limit; ++start_list) {
  582. result = GC_allochblk_nth(sz, kind, flags, start_list, may_split);
  583. if (0 != result)
  584. break;
  585. }
  586. return result;
  587. }
  588. STATIC long GC_large_alloc_warn_suppressed = 0;
  589. /* Number of warnings suppressed so far. */
  590. /* The same, but with search restricted to nth free list. Flags is */
  591. /* IGNORE_OFF_PAGE or zero. sz is in bytes. The may_split flag */
  592. /* indicates whether it is OK to split larger blocks (if set to */
  593. /* AVOID_SPLIT_REMAPPED then memory remapping followed by splitting */
  594. /* should be generally avoided). */
  595. STATIC struct hblk *
  596. GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
  597. {
  598. struct hblk *hbp;
  599. hdr * hhdr; /* Header corr. to hbp */
  600. struct hblk *thishbp;
  601. hdr * thishdr; /* Header corr. to thishbp */
  602. signed_word size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
  603. /* number of bytes in requested objects */
  604. /* search for a big enough block in free list */
  605. for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
  606. signed_word size_avail; /* bytes available in this block */
  607. if (NULL == hbp) return NULL;
  608. GET_HDR(hbp, hhdr); /* set hhdr value */
  609. size_avail = (signed_word)hhdr->hb_sz;
  610. if (size_avail < size_needed) continue;
  611. if (size_avail != size_needed) {
  612. if (!may_split) continue;
  613. /* If the next heap block is obviously better, go on. */
  614. /* This prevents us from disassembling a single large */
  615. /* block to get tiny blocks. */
  616. thishbp = hhdr -> hb_next;
  617. if (thishbp != 0) {
  618. signed_word next_size;
  619. GET_HDR(thishbp, thishdr);
  620. next_size = (signed_word)(thishdr -> hb_sz);
  621. if (next_size < size_avail
  622. && next_size >= size_needed
  623. && !GC_is_black_listed(thishbp, (word)size_needed)) {
  624. continue;
  625. }
  626. }
  627. }
  628. if (!IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
  629. || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
  630. struct hblk * lasthbp = hbp;
  631. ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
  632. signed_word orig_avail = size_avail;
  633. signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
  634. (signed_word)HBLKSIZE
  635. : size_needed;
  636. while ((word)lasthbp <= (word)search_end
  637. && (thishbp = GC_is_black_listed(lasthbp,
  638. (word)eff_size_needed)) != 0) {
  639. lasthbp = thishbp;
  640. }
  641. size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
  642. thishbp = lasthbp;
  643. if (size_avail >= size_needed) {
  644. if (thishbp != hbp) {
  645. # ifdef USE_MUNMAP
  646. /* Avoid remapping followed by splitting. */
  647. if (may_split == AVOID_SPLIT_REMAPPED && !IS_MAPPED(hhdr))
  648. continue;
  649. # endif
  650. thishdr = GC_install_header(thishbp);
  651. if (0 != thishdr) {
  652. /* Make sure it's mapped before we mangle it. */
  653. # ifdef USE_MUNMAP
  654. if (!IS_MAPPED(hhdr)) {
  655. GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
  656. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  657. }
  658. # endif
  659. /* Split the block at thishbp */
  660. GC_split_block(hbp, hhdr, thishbp, thishdr, n);
  661. /* Advance to thishbp */
  662. hbp = thishbp;
  663. hhdr = thishdr;
  664. /* We must now allocate thishbp, since it may */
  665. /* be on the wrong free list. */
  666. }
  667. }
  668. } else if (size_needed > (signed_word)BL_LIMIT
  669. && orig_avail - size_needed
  670. > (signed_word)BL_LIMIT) {
  671. /* Punt, since anything else risks unreasonable heap growth. */
  672. if (++GC_large_alloc_warn_suppressed
  673. >= GC_large_alloc_warn_interval) {
  674. WARN("Repeated allocation of very large block "
  675. "(appr. size %" WARN_PRIdPTR "):\n"
  676. "\tMay lead to memory leak and poor performance\n",
  677. size_needed);
  678. GC_large_alloc_warn_suppressed = 0;
  679. }
  680. size_avail = orig_avail;
  681. } else if (size_avail == 0
  682. && size_needed == (signed_word)HBLKSIZE
  683. && IS_MAPPED(hhdr)) {
  684. if (!GC_find_leak) {
  685. static unsigned count = 0;
  686. /* The block is completely blacklisted. We need */
  687. /* to drop some such blocks, since otherwise we spend */
  688. /* all our time traversing them if pointer-free */
  689. /* blocks are unpopular. */
  690. /* A dropped block will be reconsidered at next GC. */
  691. if ((++count & 3) == 0) {
  692. /* Allocate and drop the block in small chunks, to */
  693. /* maximize the chance that we will recover some */
  694. /* later. */
  695. word total_size = hhdr -> hb_sz;
  696. struct hblk * limit = hbp + divHBLKSZ(total_size);
  697. struct hblk * h;
  698. struct hblk * prev = hhdr -> hb_prev;
  699. GC_large_free_bytes -= total_size;
  700. GC_bytes_dropped += total_size;
  701. GC_remove_from_fl_at(hhdr, n);
  702. for (h = hbp; (word)h < (word)limit; h++) {
  703. if (h != hbp) {
  704. hhdr = GC_install_header(h);
  705. }
  706. if (NULL != hhdr) {
  707. (void)setup_header(hhdr, h, HBLKSIZE, PTRFREE, 0);
  708. /* Can't fail. */
  709. if (GC_debugging_started) {
  710. BZERO(h, HBLKSIZE);
  711. }
  712. }
  713. }
  714. /* Restore hbp to point at free block */
  715. hbp = prev;
  716. if (0 == hbp) {
  717. return GC_allochblk_nth(sz, kind, flags, n, may_split);
  718. }
  719. hhdr = HDR(hbp);
  720. }
  721. }
  722. }
  723. }
  724. if( size_avail >= size_needed ) {
  725. # ifdef USE_MUNMAP
  726. if (!IS_MAPPED(hhdr)) {
  727. GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
  728. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  729. /* Note: This may leave adjacent, mapped free blocks. */
  730. }
  731. # endif
  732. /* hbp may be on the wrong freelist; the parameter n */
  733. /* is important. */
  734. hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
  735. break;
  736. }
  737. }
  738. if (0 == hbp) return 0;
  739. /* Add it to map of valid blocks */
  740. if (!GC_install_counts(hbp, (word)size_needed)) return(0);
  741. /* This leaks memory under very rare conditions. */
  742. /* Set up header */
  743. if (!setup_header(hhdr, hbp, sz, kind, flags)) {
  744. GC_remove_counts(hbp, (word)size_needed);
  745. return(0); /* ditto */
  746. }
  747. # ifndef GC_DISABLE_INCREMENTAL
  748. /* Notify virtual dirty bit implementation that we are about to */
  749. /* write. Ensure that pointer-free objects are not protected */
  750. /* if it is avoidable. This also ensures that newly allocated */
  751. /* blocks are treated as dirty. Necessary since we don't */
  752. /* protect free blocks. */
  753. GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
  754. GC_remove_protection(hbp, divHBLKSZ(size_needed),
  755. (hhdr -> hb_descr == 0) /* pointer-free */);
  756. # endif
  757. /* We just successfully allocated a block. Restart count of */
  758. /* consecutive failures. */
  759. GC_fail_count = 0;
  760. GC_large_free_bytes -= size_needed;
  761. GC_ASSERT(IS_MAPPED(hhdr));
  762. return( hbp );
  763. }
  764. /*
  765. * Free a heap block.
  766. *
  767. * Coalesce the block with its neighbors if possible.
  768. *
  769. * All mark words are assumed to be cleared.
  770. */
  771. GC_INNER void GC_freehblk(struct hblk *hbp)
  772. {
  773. struct hblk *next, *prev;
  774. hdr *hhdr, *prevhdr, *nexthdr;
  775. word size;
  776. GET_HDR(hbp, hhdr);
  777. size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
  778. if ((signed_word)size <= 0)
  779. ABORT("Deallocating excessively large block. Too large an allocation?");
  780. /* Probably possible if we try to allocate more than half the address */
  781. /* space at once. If we don't catch it here, strange things happen */
  782. /* later. */
  783. GC_remove_counts(hbp, size);
  784. hhdr->hb_sz = size;
  785. # ifdef USE_MUNMAP
  786. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  787. # endif
  788. /* Check for duplicate deallocation in the easy case */
  789. if (HBLK_IS_FREE(hhdr)) {
  790. ABORT_ARG1("Duplicate large block deallocation",
  791. " of %p", (void *)hbp);
  792. }
  793. GC_ASSERT(IS_MAPPED(hhdr));
  794. hhdr -> hb_flags |= FREE_BLK;
  795. next = (struct hblk *)((ptr_t)hbp + size);
  796. GET_HDR(next, nexthdr);
  797. prev = GC_free_block_ending_at(hbp);
  798. /* Coalesce with successor, if possible */
  799. if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
  800. && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
  801. /* no overflow */) {
  802. GC_remove_from_fl(nexthdr);
  803. hhdr -> hb_sz += nexthdr -> hb_sz;
  804. GC_remove_header(next);
  805. }
  806. /* Coalesce with predecessor, if possible. */
  807. if (0 != prev) {
  808. prevhdr = HDR(prev);
  809. if (IS_MAPPED(prevhdr)
  810. && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
  811. GC_remove_from_fl(prevhdr);
  812. prevhdr -> hb_sz += hhdr -> hb_sz;
  813. # ifdef USE_MUNMAP
  814. prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  815. # endif
  816. GC_remove_header(hbp);
  817. hbp = prev;
  818. hhdr = prevhdr;
  819. }
  820. }
  821. /* FIXME: It is not clear we really always want to do these merges */
  822. /* with USE_MUNMAP, since it updates ages and hence prevents */
  823. /* unmapping. */
  824. GC_large_free_bytes += size;
  825. GC_add_to_fl(hbp, hhdr);
  826. }