allchblk.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. #include "private/gc_priv.h"
  17. #include <stdio.h>
  18. #ifdef GC_USE_ENTIRE_HEAP
  19. int GC_use_entire_heap = TRUE;
  20. #else
  21. int GC_use_entire_heap = FALSE;
  22. #endif
  23. /*
  24. * Free heap blocks are kept on one of several free lists,
  25. * depending on the size of the block. Each free list is doubly linked.
  26. * Adjacent free blocks are coalesced.
  27. */
  28. # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
  29. /* largest block we will allocate starting on a black */
  30. /* listed block. Must be >= HBLKSIZE. */
  31. # define UNIQUE_THRESHOLD 32
  32. /* Sizes up to this many HBLKs each have their own free list */
  33. # define HUGE_THRESHOLD 256
  34. /* Sizes of at least this many heap blocks are mapped to a */
  35. /* single free list. */
  36. # define FL_COMPRESSION 8
  37. /* In between sizes map this many distinct sizes to a single */
  38. /* bin. */
  39. # define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \
  40. + UNIQUE_THRESHOLD)
  41. #ifndef GC_GCJ_SUPPORT
  42. STATIC
  43. #endif
  44. struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
  45. /* List of completely empty heap blocks */
  46. /* Linked through hb_next field of */
  47. /* header structure associated with */
  48. /* block. Remains externally visible */
  49. /* as used by GNU GCJ currently. */
  50. #ifndef GC_GCJ_SUPPORT
  51. STATIC
  52. #endif
  53. word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
  54. /* Number of free bytes on each list. Remains visible to GCJ. */
  55. /* Return the largest n such that the number of free bytes on lists */
  56. /* n .. N_HBLK_FLS is greater or equal to GC_max_large_allocd_bytes */
  57. /* minus GC_large_allocd_bytes. If there is no such n, return 0. */
  58. GC_INLINE int GC_enough_large_bytes_left(void)
  59. {
  60. int n;
  61. word bytes = GC_large_allocd_bytes;
  62. GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
  63. for (n = N_HBLK_FLS; n >= 0; --n) {
  64. bytes += GC_free_bytes[n];
  65. if (bytes >= GC_max_large_allocd_bytes) return n;
  66. }
  67. return 0;
  68. }
  69. /* Map a number of blocks to the appropriate large block free list index. */
  70. STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
  71. {
  72. if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
  73. if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
  74. return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
  75. + UNIQUE_THRESHOLD;
  76. }
  77. # define PHDR(hhdr) HDR((hhdr) -> hb_prev)
  78. # define NHDR(hhdr) HDR((hhdr) -> hb_next)
  79. # ifdef USE_MUNMAP
  80. # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
  81. # else
  82. # define IS_MAPPED(hhdr) TRUE
  83. # endif /* !USE_MUNMAP */
  84. #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
  85. /* Should return the same value as GC_large_free_bytes. */
  86. GC_INNER word GC_compute_large_free_bytes(void)
  87. {
  88. struct hblk * h;
  89. hdr * hhdr;
  90. word total_free = 0;
  91. unsigned i;
  92. for (i = 0; i <= N_HBLK_FLS; ++i) {
  93. for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
  94. hhdr = HDR(h);
  95. total_free += hhdr->hb_sz;
  96. }
  97. }
  98. return total_free;
  99. }
  100. #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
  101. # if !defined(NO_DEBUGGING)
  102. void GC_print_hblkfreelist(void)
  103. {
  104. struct hblk * h;
  105. hdr * hhdr;
  106. unsigned i;
  107. word total;
  108. for (i = 0; i <= N_HBLK_FLS; ++i) {
  109. h = GC_hblkfreelist[i];
  110. if (0 != h) GC_printf("Free list %u (total size %lu):\n",
  111. i, (unsigned long)GC_free_bytes[i]);
  112. while (h != 0) {
  113. hhdr = HDR(h);
  114. GC_printf("\t%p size %lu %s black listed\n",
  115. (void *)h, (unsigned long) hhdr -> hb_sz,
  116. GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
  117. GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
  118. "not");
  119. h = hhdr -> hb_next;
  120. }
  121. }
  122. GC_printf("GC_large_free_bytes: %lu\n",
  123. (unsigned long)GC_large_free_bytes);
  124. if ((total = GC_compute_large_free_bytes()) != GC_large_free_bytes)
  125. GC_err_printf("GC_large_free_bytes INCONSISTENT!! Should be: %lu\n",
  126. (unsigned long)total);
  127. }
  128. /* Return the free list index on which the block described by the header */
  129. /* appears, or -1 if it appears nowhere. */
  130. static int free_list_index_of(hdr *wanted)
  131. {
  132. struct hblk * h;
  133. hdr * hhdr;
  134. int i;
  135. for (i = 0; i <= N_HBLK_FLS; ++i) {
  136. h = GC_hblkfreelist[i];
  137. while (h != 0) {
  138. hhdr = HDR(h);
  139. if (hhdr == wanted) return i;
  140. h = hhdr -> hb_next;
  141. }
  142. }
  143. return -1;
  144. }
  145. void GC_dump_regions(void)
  146. {
  147. unsigned i;
  148. ptr_t start, end;
  149. ptr_t p;
  150. size_t bytes;
  151. hdr *hhdr;
  152. for (i = 0; i < GC_n_heap_sects; ++i) {
  153. start = GC_heap_sects[i].hs_start;
  154. bytes = GC_heap_sects[i].hs_bytes;
  155. end = start + bytes;
  156. /* Merge in contiguous sections. */
  157. while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
  158. ++i;
  159. end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
  160. }
  161. GC_printf("***Section from %p to %p\n", start, end);
  162. for (p = start; (word)p < (word)end; ) {
  163. hhdr = HDR(p);
  164. if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
  165. GC_printf("\t%p Missing header!!(%p)\n", p, (void *)hhdr);
  166. p += HBLKSIZE;
  167. continue;
  168. }
  169. if (HBLK_IS_FREE(hhdr)) {
  170. int correct_index = GC_hblk_fl_from_blocks(
  171. divHBLKSZ(hhdr -> hb_sz));
  172. int actual_index;
  173. GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
  174. (unsigned long)(hhdr -> hb_sz),
  175. IS_MAPPED(hhdr) ? "" : " (unmapped)");
  176. actual_index = free_list_index_of(hhdr);
  177. if (-1 == actual_index) {
  178. GC_printf("\t\tBlock not on free list %d!!\n",
  179. correct_index);
  180. } else if (correct_index != actual_index) {
  181. GC_printf("\t\tBlock on list %d, should be on %d!!\n",
  182. actual_index, correct_index);
  183. }
  184. p += hhdr -> hb_sz;
  185. } else {
  186. GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
  187. (unsigned long)(hhdr -> hb_sz));
  188. p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
  189. }
  190. }
  191. }
  192. }
  193. # endif /* NO_DEBUGGING */
  194. /* Initialize hdr for a block containing the indicated size and */
  195. /* kind of objects. */
  196. /* Return FALSE on failure. */
  197. static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
  198. int kind, unsigned flags)
  199. {
  200. word descr;
  201. # ifndef MARK_BIT_PER_OBJ
  202. size_t granules;
  203. # endif
  204. # ifdef ENABLE_DISCLAIM
  205. if (GC_obj_kinds[kind].ok_disclaim_proc)
  206. flags |= HAS_DISCLAIM;
  207. if (GC_obj_kinds[kind].ok_mark_unconditionally)
  208. flags |= MARK_UNCONDITIONALLY;
  209. # endif
  210. /* Set size, kind and mark proc fields */
  211. hhdr -> hb_sz = byte_sz;
  212. hhdr -> hb_obj_kind = (unsigned char)kind;
  213. hhdr -> hb_flags = (unsigned char)flags;
  214. hhdr -> hb_block = block;
  215. descr = GC_obj_kinds[kind].ok_descriptor;
  216. if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
  217. hhdr -> hb_descr = descr;
  218. # ifdef MARK_BIT_PER_OBJ
  219. /* Set hb_inv_sz as portably as possible. */
  220. /* We set it to the smallest value such that sz * inv_sz > 2**32 */
  221. /* This may be more precision than necessary. */
  222. if (byte_sz > MAXOBJBYTES) {
  223. hhdr -> hb_inv_sz = LARGE_INV_SZ;
  224. } else {
  225. word inv_sz;
  226. # if CPP_WORDSZ == 64
  227. inv_sz = ((word)1 << 32)/byte_sz;
  228. if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
  229. # else /* 32 bit words */
  230. GC_ASSERT(byte_sz >= 4);
  231. inv_sz = ((unsigned)1 << 31)/byte_sz;
  232. inv_sz *= 2;
  233. while (inv_sz*byte_sz > byte_sz) ++inv_sz;
  234. # endif
  235. hhdr -> hb_inv_sz = inv_sz;
  236. }
  237. # else /* MARK_BIT_PER_GRANULE */
  238. hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
  239. granules = BYTES_TO_GRANULES(byte_sz);
  240. if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
  241. /* Make it look like a valid block. */
  242. hhdr -> hb_sz = HBLKSIZE;
  243. hhdr -> hb_descr = 0;
  244. hhdr -> hb_large_block = TRUE;
  245. hhdr -> hb_map = 0;
  246. return FALSE;
  247. } else {
  248. size_t index = (hhdr -> hb_large_block? 0 : granules);
  249. hhdr -> hb_map = GC_obj_map[index];
  250. }
  251. # endif /* MARK_BIT_PER_GRANULE */
  252. /* Clear mark bits */
  253. GC_clear_hdr_marks(hhdr);
  254. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  255. return(TRUE);
  256. }
  257. /* Remove hhdr from the free list (it is assumed to specified by index). */
  258. STATIC void GC_remove_from_fl_at(hdr *hhdr, int index)
  259. {
  260. GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
  261. if (hhdr -> hb_prev == 0) {
  262. GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
  263. GC_hblkfreelist[index] = hhdr -> hb_next;
  264. } else {
  265. hdr *phdr;
  266. GET_HDR(hhdr -> hb_prev, phdr);
  267. phdr -> hb_next = hhdr -> hb_next;
  268. }
  269. /* We always need index to maintain free counts. */
  270. GC_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
  271. GC_free_bytes[index] -= hhdr -> hb_sz;
  272. if (0 != hhdr -> hb_next) {
  273. hdr * nhdr;
  274. GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
  275. GET_HDR(hhdr -> hb_next, nhdr);
  276. nhdr -> hb_prev = hhdr -> hb_prev;
  277. }
  278. }
  279. /* Remove hhdr from the appropriate free list (we assume it is on the */
  280. /* size-appropriate free list). */
  281. GC_INLINE void GC_remove_from_fl(hdr *hhdr)
  282. {
  283. GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
  284. }
  285. /* Return a pointer to the free block ending just before h, if any. */
  286. STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
  287. {
  288. struct hblk * p = h - 1;
  289. hdr * phdr;
  290. GET_HDR(p, phdr);
  291. while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
  292. p = FORWARDED_ADDR(p,phdr);
  293. phdr = HDR(p);
  294. }
  295. if (0 != phdr) {
  296. if(HBLK_IS_FREE(phdr)) {
  297. return p;
  298. } else {
  299. return 0;
  300. }
  301. }
  302. p = GC_prev_block(h - 1);
  303. if (0 != p) {
  304. phdr = HDR(p);
  305. if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
  306. return p;
  307. }
  308. }
  309. return 0;
  310. }
  311. /* Add hhdr to the appropriate free list. */
  312. /* We maintain individual free lists sorted by address. */
  313. STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
  314. {
  315. int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
  316. struct hblk *second = GC_hblkfreelist[index];
  317. hdr * second_hdr;
  318. # if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
  319. struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
  320. hdr * nexthdr = HDR(next);
  321. struct hblk *prev = GC_free_block_ending_at(h);
  322. hdr * prevhdr = HDR(prev);
  323. GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
  324. || (signed_word)GC_heapsize < 0);
  325. /* In the last case, blocks may be too large to merge. */
  326. GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
  327. || (signed_word)GC_heapsize < 0);
  328. # endif
  329. GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
  330. GC_hblkfreelist[index] = h;
  331. GC_free_bytes[index] += hhdr -> hb_sz;
  332. GC_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
  333. hhdr -> hb_next = second;
  334. hhdr -> hb_prev = 0;
  335. if (0 != second) {
  336. GET_HDR(second, second_hdr);
  337. second_hdr -> hb_prev = h;
  338. }
  339. hhdr -> hb_flags |= FREE_BLK;
  340. }
  341. #ifdef USE_MUNMAP
  342. # ifndef MUNMAP_THRESHOLD
  343. # define MUNMAP_THRESHOLD 6
  344. # endif
  345. GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
  346. /* Unmap blocks that haven't been recently touched. This is the only way */
  347. /* way blocks are ever unmapped. */
  348. GC_INNER void GC_unmap_old(void)
  349. {
  350. struct hblk * h;
  351. hdr * hhdr;
  352. int i;
  353. if (GC_unmap_threshold == 0)
  354. return; /* unmapping disabled */
  355. for (i = 0; i <= N_HBLK_FLS; ++i) {
  356. for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
  357. hhdr = HDR(h);
  358. if (!IS_MAPPED(hhdr)) continue;
  359. if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
  360. (unsigned short)GC_unmap_threshold) {
  361. GC_unmap((ptr_t)h, hhdr -> hb_sz);
  362. hhdr -> hb_flags |= WAS_UNMAPPED;
  363. }
  364. }
  365. }
  366. }
  367. /* Merge all unmapped blocks that are adjacent to other free */
  368. /* blocks. This may involve remapping, since all blocks are either */
  369. /* fully mapped or fully unmapped. */
  370. GC_INNER void GC_merge_unmapped(void)
  371. {
  372. struct hblk * h, *next;
  373. hdr * hhdr, *nexthdr;
  374. word size, nextsize;
  375. int i;
  376. for (i = 0; i <= N_HBLK_FLS; ++i) {
  377. h = GC_hblkfreelist[i];
  378. while (h != 0) {
  379. GET_HDR(h, hhdr);
  380. size = hhdr->hb_sz;
  381. next = (struct hblk *)((word)h + size);
  382. GET_HDR(next, nexthdr);
  383. /* Coalesce with successor, if possible */
  384. if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
  385. && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
  386. /* no pot. overflow */) {
  387. /* Note that we usually try to avoid adjacent free blocks */
  388. /* that are either both mapped or both unmapped. But that */
  389. /* isn't guaranteed to hold since we remap blocks when we */
  390. /* split them, and don't merge at that point. It may also */
  391. /* not hold if the merged block would be too big. */
  392. if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
  393. /* make both consistent, so that we can merge */
  394. if (size > nextsize) {
  395. GC_remap((ptr_t)next, nextsize);
  396. } else {
  397. GC_unmap((ptr_t)h, size);
  398. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  399. hhdr -> hb_flags |= WAS_UNMAPPED;
  400. }
  401. } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
  402. if (size > nextsize) {
  403. GC_unmap((ptr_t)next, nextsize);
  404. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  405. } else {
  406. GC_remap((ptr_t)h, size);
  407. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  408. hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
  409. }
  410. } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
  411. /* Unmap any gap in the middle */
  412. GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
  413. }
  414. /* If they are both unmapped, we merge, but leave unmapped. */
  415. GC_remove_from_fl_at(hhdr, i);
  416. GC_remove_from_fl(nexthdr);
  417. hhdr -> hb_sz += nexthdr -> hb_sz;
  418. GC_remove_header(next);
  419. GC_add_to_fl(h, hhdr);
  420. /* Start over at beginning of list */
  421. h = GC_hblkfreelist[i];
  422. } else /* not mergable with successor */ {
  423. h = hhdr -> hb_next;
  424. }
  425. } /* while (h != 0) ... */
  426. } /* for ... */
  427. }
  428. #endif /* USE_MUNMAP */
  429. /*
  430. * Return a pointer to a block starting at h of length bytes.
  431. * Memory for the block is mapped.
  432. * Remove the block from its free list, and return the remainder (if any)
  433. * to its appropriate free list.
  434. * May fail by returning 0.
  435. * The header for the returned block must be set up by the caller.
  436. * If the return value is not 0, then hhdr is the header for it.
  437. */
  438. STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
  439. size_t bytes, int index)
  440. {
  441. word total_size = hhdr -> hb_sz;
  442. struct hblk * rest;
  443. hdr * rest_hdr;
  444. GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
  445. GC_remove_from_fl_at(hhdr, index);
  446. if (total_size == bytes) return h;
  447. rest = (struct hblk *)((word)h + bytes);
  448. rest_hdr = GC_install_header(rest);
  449. if (0 == rest_hdr) {
  450. /* FIXME: This is likely to be very bad news ... */
  451. WARN("Header allocation failed: Dropping block.\n", 0);
  452. return(0);
  453. }
  454. rest_hdr -> hb_sz = total_size - bytes;
  455. rest_hdr -> hb_flags = 0;
  456. # ifdef GC_ASSERTIONS
  457. /* Mark h not free, to avoid assertion about adjacent free blocks. */
  458. hhdr -> hb_flags &= ~FREE_BLK;
  459. # endif
  460. GC_add_to_fl(rest, rest_hdr);
  461. return h;
  462. }
  463. /*
  464. * H is a free block. N points at an address inside it.
  465. * A new header for n has already been set up. Fix up h's header
  466. * to reflect the fact that it is being split, move it to the
  467. * appropriate free list.
  468. * N replaces h in the original free list.
  469. *
  470. * Nhdr is not completely filled in, since it is about to allocated.
  471. * It may in fact end up on the wrong free list for its size.
  472. * That's not a disaster, since n is about to be allocated
  473. * by our caller.
  474. * (Hence adding it to a free list is silly. But this path is hopefully
  475. * rare enough that it doesn't matter. The code is cleaner this way.)
  476. */
  477. STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
  478. hdr *nhdr, int index /* Index of free list */)
  479. {
  480. word total_size = hhdr -> hb_sz;
  481. word h_size = (word)n - (word)h;
  482. struct hblk *prev = hhdr -> hb_prev;
  483. struct hblk *next = hhdr -> hb_next;
  484. /* Replace h with n on its freelist */
  485. nhdr -> hb_prev = prev;
  486. nhdr -> hb_next = next;
  487. nhdr -> hb_sz = total_size - h_size;
  488. nhdr -> hb_flags = 0;
  489. if (0 != prev) {
  490. HDR(prev) -> hb_next = n;
  491. } else {
  492. GC_hblkfreelist[index] = n;
  493. }
  494. if (0 != next) {
  495. HDR(next) -> hb_prev = n;
  496. }
  497. GC_ASSERT(GC_free_bytes[index] > h_size);
  498. GC_free_bytes[index] -= h_size;
  499. # ifdef USE_MUNMAP
  500. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  501. # endif
  502. hhdr -> hb_sz = h_size;
  503. GC_add_to_fl(h, hhdr);
  504. nhdr -> hb_flags |= FREE_BLK;
  505. }
  506. STATIC struct hblk *
  507. GC_allochblk_nth(size_t sz /* bytes */, int kind, unsigned flags, int n,
  508. int may_split);
  509. #define AVOID_SPLIT_REMAPPED 2
  510. /*
  511. * Allocate (and return pointer to) a heap block
  512. * for objects of size sz bytes, searching the nth free list.
  513. *
  514. * NOTE: We set obj_map field in header correctly.
  515. * Caller is responsible for building an object freelist in block.
  516. *
  517. * The client is responsible for clearing the block, if necessary.
  518. */
  519. GC_INNER struct hblk *
  520. GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
  521. {
  522. word blocks;
  523. int start_list;
  524. struct hblk *result;
  525. int may_split;
  526. int split_limit; /* Highest index of free list whose blocks we */
  527. /* split. */
  528. GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
  529. blocks = OBJ_SZ_TO_BLOCKS(sz);
  530. if ((signed_word)(blocks * HBLKSIZE) < 0) {
  531. return 0;
  532. }
  533. start_list = GC_hblk_fl_from_blocks(blocks);
  534. /* Try for an exact match first. */
  535. result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
  536. if (0 != result) return result;
  537. may_split = TRUE;
  538. if (GC_use_entire_heap || GC_dont_gc
  539. || USED_HEAP_SIZE < GC_requested_heapsize
  540. || GC_incremental || !GC_should_collect()) {
  541. /* Should use more of the heap, even if it requires splitting. */
  542. split_limit = N_HBLK_FLS;
  543. } else if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
  544. /* If we are deallocating lots of memory from */
  545. /* finalizers, fail and collect sooner rather */
  546. /* than later. */
  547. split_limit = 0;
  548. } else {
  549. /* If we have enough large blocks left to cover any */
  550. /* previous request for large blocks, we go ahead */
  551. /* and split. Assuming a steady state, that should */
  552. /* be safe. It means that we can use the full */
  553. /* heap if we allocate only small objects. */
  554. split_limit = GC_enough_large_bytes_left();
  555. # ifdef USE_MUNMAP
  556. if (split_limit > 0)
  557. may_split = AVOID_SPLIT_REMAPPED;
  558. # endif
  559. }
  560. if (start_list < UNIQUE_THRESHOLD) {
  561. /* No reason to try start_list again, since all blocks are exact */
  562. /* matches. */
  563. ++start_list;
  564. }
  565. for (; start_list <= split_limit; ++start_list) {
  566. result = GC_allochblk_nth(sz, kind, flags, start_list, may_split);
  567. if (0 != result)
  568. break;
  569. }
  570. return result;
  571. }
  572. STATIC long GC_large_alloc_warn_suppressed = 0;
  573. /* Number of warnings suppressed so far. */
  574. /* The same, but with search restricted to nth free list. Flags is */
  575. /* IGNORE_OFF_PAGE or zero. sz is in bytes. The may_split flag */
  576. /* indicates whether it is OK to split larger blocks (if set to */
  577. /* AVOID_SPLIT_REMAPPED then memory remapping followed by splitting */
  578. /* should be generally avoided). */
  579. STATIC struct hblk *
  580. GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
  581. {
  582. struct hblk *hbp;
  583. hdr * hhdr; /* Header corr. to hbp */
  584. struct hblk *thishbp;
  585. hdr * thishdr; /* Header corr. to thishbp */
  586. signed_word size_needed; /* number of bytes in requested objects */
  587. signed_word size_avail; /* bytes available in this block */
  588. size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
  589. /* search for a big enough block in free list */
  590. for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
  591. if (NULL == hbp) return NULL;
  592. GET_HDR(hbp, hhdr); /* set hhdr value */
  593. size_avail = hhdr->hb_sz;
  594. if (size_avail < size_needed) continue;
  595. if (size_avail != size_needed) {
  596. signed_word next_size;
  597. if (!may_split) continue;
  598. /* If the next heap block is obviously better, go on. */
  599. /* This prevents us from disassembling a single large */
  600. /* block to get tiny blocks. */
  601. thishbp = hhdr -> hb_next;
  602. if (thishbp != 0) {
  603. GET_HDR(thishbp, thishdr);
  604. next_size = (signed_word)(thishdr -> hb_sz);
  605. if (next_size < size_avail
  606. && next_size >= size_needed
  607. && !GC_is_black_listed(thishbp, (word)size_needed)) {
  608. continue;
  609. }
  610. }
  611. }
  612. if (!IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
  613. || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
  614. struct hblk * lasthbp = hbp;
  615. ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
  616. signed_word orig_avail = size_avail;
  617. signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
  618. (signed_word)HBLKSIZE
  619. : size_needed;
  620. while ((word)lasthbp <= (word)search_end
  621. && (thishbp = GC_is_black_listed(lasthbp,
  622. (word)eff_size_needed)) != 0) {
  623. lasthbp = thishbp;
  624. }
  625. size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
  626. thishbp = lasthbp;
  627. if (size_avail >= size_needed) {
  628. if (thishbp != hbp) {
  629. # ifdef USE_MUNMAP
  630. /* Avoid remapping followed by splitting. */
  631. if (may_split == AVOID_SPLIT_REMAPPED && !IS_MAPPED(hhdr))
  632. continue;
  633. # endif
  634. thishdr = GC_install_header(thishbp);
  635. if (0 != thishdr) {
  636. /* Make sure it's mapped before we mangle it. */
  637. # ifdef USE_MUNMAP
  638. if (!IS_MAPPED(hhdr)) {
  639. GC_remap((ptr_t)hbp, hhdr -> hb_sz);
  640. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  641. }
  642. # endif
  643. /* Split the block at thishbp */
  644. GC_split_block(hbp, hhdr, thishbp, thishdr, n);
  645. /* Advance to thishbp */
  646. hbp = thishbp;
  647. hhdr = thishdr;
  648. /* We must now allocate thishbp, since it may */
  649. /* be on the wrong free list. */
  650. }
  651. }
  652. } else if (size_needed > (signed_word)BL_LIMIT
  653. && orig_avail - size_needed
  654. > (signed_word)BL_LIMIT) {
  655. /* Punt, since anything else risks unreasonable heap growth. */
  656. if (++GC_large_alloc_warn_suppressed
  657. >= GC_large_alloc_warn_interval) {
  658. WARN("Repeated allocation of very large block "
  659. "(appr. size %" WARN_PRIdPTR "):\n"
  660. "\tMay lead to memory leak and poor performance.\n",
  661. size_needed);
  662. GC_large_alloc_warn_suppressed = 0;
  663. }
  664. size_avail = orig_avail;
  665. } else if (size_avail == 0 && size_needed == HBLKSIZE
  666. && IS_MAPPED(hhdr)) {
  667. if (!GC_find_leak) {
  668. static unsigned count = 0;
  669. /* The block is completely blacklisted. We need */
  670. /* to drop some such blocks, since otherwise we spend */
  671. /* all our time traversing them if pointer-free */
  672. /* blocks are unpopular. */
  673. /* A dropped block will be reconsidered at next GC. */
  674. if ((++count & 3) == 0) {
  675. /* Allocate and drop the block in small chunks, to */
  676. /* maximize the chance that we will recover some */
  677. /* later. */
  678. word total_size = hhdr -> hb_sz;
  679. struct hblk * limit = hbp + divHBLKSZ(total_size);
  680. struct hblk * h;
  681. struct hblk * prev = hhdr -> hb_prev;
  682. GC_large_free_bytes -= total_size;
  683. GC_bytes_dropped += total_size;
  684. GC_remove_from_fl_at(hhdr, n);
  685. for (h = hbp; (word)h < (word)limit; h++) {
  686. if (h != hbp) {
  687. hhdr = GC_install_header(h);
  688. }
  689. if (NULL != hhdr) {
  690. (void)setup_header(hhdr, h, HBLKSIZE, PTRFREE, 0);
  691. /* Can't fail. */
  692. if (GC_debugging_started) {
  693. BZERO(h, HBLKSIZE);
  694. }
  695. }
  696. }
  697. /* Restore hbp to point at free block */
  698. hbp = prev;
  699. if (0 == hbp) {
  700. return GC_allochblk_nth(sz, kind, flags, n, may_split);
  701. }
  702. hhdr = HDR(hbp);
  703. }
  704. }
  705. }
  706. }
  707. if( size_avail >= size_needed ) {
  708. # ifdef USE_MUNMAP
  709. if (!IS_MAPPED(hhdr)) {
  710. GC_remap((ptr_t)hbp, hhdr -> hb_sz);
  711. hhdr -> hb_flags &= ~WAS_UNMAPPED;
  712. /* Note: This may leave adjacent, mapped free blocks. */
  713. }
  714. # endif
  715. /* hbp may be on the wrong freelist; the parameter n */
  716. /* is important. */
  717. hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
  718. break;
  719. }
  720. }
  721. if (0 == hbp) return 0;
  722. /* Add it to map of valid blocks */
  723. if (!GC_install_counts(hbp, (word)size_needed)) return(0);
  724. /* This leaks memory under very rare conditions. */
  725. /* Set up header */
  726. if (!setup_header(hhdr, hbp, sz, kind, flags)) {
  727. GC_remove_counts(hbp, (word)size_needed);
  728. return(0); /* ditto */
  729. }
  730. # ifndef GC_DISABLE_INCREMENTAL
  731. /* Notify virtual dirty bit implementation that we are about to */
  732. /* write. Ensure that pointer-free objects are not protected */
  733. /* if it is avoidable. This also ensures that newly allocated */
  734. /* blocks are treated as dirty. Necessary since we don't */
  735. /* protect free blocks. */
  736. GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
  737. GC_remove_protection(hbp, divHBLKSZ(size_needed),
  738. (hhdr -> hb_descr == 0) /* pointer-free */);
  739. # endif
  740. /* We just successfully allocated a block. Restart count of */
  741. /* consecutive failures. */
  742. GC_fail_count = 0;
  743. GC_large_free_bytes -= size_needed;
  744. GC_ASSERT(IS_MAPPED(hhdr));
  745. return( hbp );
  746. }
  747. /*
  748. * Free a heap block.
  749. *
  750. * Coalesce the block with its neighbors if possible.
  751. *
  752. * All mark words are assumed to be cleared.
  753. */
  754. GC_INNER void GC_freehblk(struct hblk *hbp)
  755. {
  756. struct hblk *next, *prev;
  757. hdr *hhdr, *prevhdr, *nexthdr;
  758. word size;
  759. GET_HDR(hbp, hhdr);
  760. size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
  761. if ((signed_word)size <= 0)
  762. ABORT("Deallocating excessively large block. Too large an allocation?");
  763. /* Probably possible if we try to allocate more than half the address */
  764. /* space at once. If we don't catch it here, strange things happen */
  765. /* later. */
  766. GC_remove_counts(hbp, size);
  767. hhdr->hb_sz = size;
  768. # ifdef USE_MUNMAP
  769. hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  770. # endif
  771. /* Check for duplicate deallocation in the easy case */
  772. if (HBLK_IS_FREE(hhdr)) {
  773. ABORT_ARG1("Duplicate large block deallocation",
  774. " of %p", (void *)hbp);
  775. }
  776. GC_ASSERT(IS_MAPPED(hhdr));
  777. hhdr -> hb_flags |= FREE_BLK;
  778. next = (struct hblk *)((ptr_t)hbp + size);
  779. GET_HDR(next, nexthdr);
  780. prev = GC_free_block_ending_at(hbp);
  781. /* Coalesce with successor, if possible */
  782. if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
  783. && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
  784. /* no overflow */) {
  785. GC_remove_from_fl(nexthdr);
  786. hhdr -> hb_sz += nexthdr -> hb_sz;
  787. GC_remove_header(next);
  788. }
  789. /* Coalesce with predecessor, if possible. */
  790. if (0 != prev) {
  791. prevhdr = HDR(prev);
  792. if (IS_MAPPED(prevhdr)
  793. && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
  794. GC_remove_from_fl(prevhdr);
  795. prevhdr -> hb_sz += hhdr -> hb_sz;
  796. # ifdef USE_MUNMAP
  797. prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
  798. # endif
  799. GC_remove_header(hbp);
  800. hbp = prev;
  801. hhdr = prevhdr;
  802. }
  803. }
  804. /* FIXME: It is not clear we really always want to do these merges */
  805. /* with USE_MUNMAP, since it updates ages and hence prevents */
  806. /* unmapping. */
  807. GC_large_free_bytes += size;
  808. GC_add_to_fl(hbp, hhdr);
  809. }