alloc.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. *
  16. */
  17. #include "private/gc_priv.h"
  18. #include <stdio.h>
  19. #if !defined(MACOS) && !defined(MSWINCE)
  20. # include <signal.h>
  21. # if !defined(__CC_ARM)
  22. # include <sys/types.h>
  23. # endif
  24. #endif
  25. /*
  26. * Separate free lists are maintained for different sized objects
  27. * up to MAXOBJBYTES.
  28. * The call GC_allocobj(i,k) ensures that the freelist for
  29. * kind k objects of size i points to a non-empty
  30. * free list. It returns a pointer to the first entry on the free list.
  31. * In a single-threaded world, GC_allocobj may be called to allocate
  32. * an object of (small) size i as follows:
  33. *
  34. * opp = &(GC_objfreelist[i]);
  35. * if (*opp == 0) GC_allocobj(i, NORMAL);
  36. * ptr = *opp;
  37. * *opp = obj_link(ptr);
  38. *
  39. * Note that this is very fast if the free list is non-empty; it should
  40. * only involve the execution of 4 or 5 simple instructions.
  41. * All composite objects on freelists are cleared, except for
  42. * their first word.
  43. */
  44. /*
  45. * The allocator uses GC_allochblk to allocate large chunks of objects.
  46. * These chunks all start on addresses which are multiples of
  47. * HBLKSZ. Each allocated chunk has an associated header,
  48. * which can be located quickly based on the address of the chunk.
  49. * (See headers.c for details.)
  50. * This makes it possible to check quickly whether an
  51. * arbitrary address corresponds to an object administered by the
  52. * allocator.
  53. */
  54. word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
  55. word GC_gc_no = 0;
  56. #ifndef GC_DISABLE_INCREMENTAL
  57. GC_INNER int GC_incremental = 0; /* By default, stop the world. */
  58. #endif
  59. #ifdef THREADS
  60. int GC_parallel = FALSE; /* By default, parallel GC is off. */
  61. #endif
  62. #ifndef GC_FULL_FREQ
  63. # define GC_FULL_FREQ 19 /* Every 20th collection is a full */
  64. /* collection, whether we need it */
  65. /* or not. */
  66. #endif
  67. int GC_full_freq = GC_FULL_FREQ;
  68. STATIC GC_bool GC_need_full_gc = FALSE;
  69. /* Need full GC do to heap growth. */
  70. #ifdef THREAD_LOCAL_ALLOC
  71. GC_INNER GC_bool GC_world_stopped = FALSE;
  72. #endif
  73. STATIC word GC_used_heap_size_after_full = 0;
  74. /* GC_copyright symbol is externally visible. */
  75. char * const GC_copyright[] =
  76. {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
  77. "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
  78. "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
  79. "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
  80. "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
  81. " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
  82. "See source code for details." };
  83. /* Version macros are now defined in gc_version.h, which is included by */
  84. /* gc.h, which is included by gc_priv.h. */
  85. #ifndef GC_NO_VERSION_VAR
  86. const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
  87. (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
  88. #endif
  89. GC_API unsigned GC_CALL GC_get_version(void)
  90. {
  91. return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
  92. GC_VERSION_MICRO;
  93. }
  94. /* some more variables */
  95. #ifdef GC_DONT_EXPAND
  96. int GC_dont_expand = TRUE;
  97. #else
  98. int GC_dont_expand = FALSE;
  99. #endif
  100. #ifndef GC_FREE_SPACE_DIVISOR
  101. # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
  102. #endif
  103. word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
  104. GC_INNER int GC_CALLBACK GC_never_stop_func(void)
  105. {
  106. return(0);
  107. }
  108. #ifndef GC_TIME_LIMIT
  109. # define GC_TIME_LIMIT 50 /* We try to keep pause times from exceeding */
  110. /* this by much. In milliseconds. */
  111. #endif
  112. unsigned long GC_time_limit = GC_TIME_LIMIT;
  113. #ifndef NO_CLOCK
  114. STATIC CLOCK_TYPE GC_start_time = 0;
  115. /* Time at which we stopped world. */
  116. /* used only in GC_timeout_stop_func. */
  117. #endif
  118. STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
  119. /* collection within GC_time_limit. */
  120. STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
  121. /* accessed holding the lock. */
  122. GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
  123. {
  124. DCL_LOCK_STATE;
  125. GC_ASSERT(stop_func != 0);
  126. LOCK();
  127. GC_default_stop_func = stop_func;
  128. UNLOCK();
  129. }
  130. GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
  131. {
  132. GC_stop_func stop_func;
  133. DCL_LOCK_STATE;
  134. LOCK();
  135. stop_func = GC_default_stop_func;
  136. UNLOCK();
  137. return stop_func;
  138. }
  139. #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
  140. # define GC_timeout_stop_func GC_default_stop_func
  141. #else
  142. STATIC int GC_CALLBACK GC_timeout_stop_func (void)
  143. {
  144. CLOCK_TYPE current_time;
  145. static unsigned count = 0;
  146. unsigned long time_diff;
  147. if ((*GC_default_stop_func)())
  148. return(1);
  149. if ((count++ & 3) != 0) return(0);
  150. GET_TIME(current_time);
  151. time_diff = MS_TIME_DIFF(current_time,GC_start_time);
  152. if (time_diff >= GC_time_limit) {
  153. GC_COND_LOG_PRINTF(
  154. "Abandoning stopped marking after %lu msecs (attempt %d)\n",
  155. time_diff, GC_n_attempts);
  156. return(1);
  157. }
  158. return(0);
  159. }
  160. #endif /* !GC_DISABLE_INCREMENTAL */
  161. #ifdef THREADS
  162. GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
  163. #endif
  164. /* Return the minimum number of bytes that must be allocated between */
  165. /* collections to amortize the collection cost. Should be non-zero. */
  166. static word min_bytes_allocd(void)
  167. {
  168. word result;
  169. # ifdef STACK_GROWS_UP
  170. word stack_size = GC_approx_sp() - GC_stackbottom;
  171. /* GC_stackbottom is used only for a single-threaded case. */
  172. # else
  173. word stack_size = GC_stackbottom - GC_approx_sp();
  174. # endif
  175. word total_root_size; /* includes double stack size, */
  176. /* since the stack is expensive */
  177. /* to scan. */
  178. word scan_size; /* Estimate of memory to be scanned */
  179. /* during normal GC. */
  180. # ifdef THREADS
  181. if (GC_need_to_lock) {
  182. /* We are multi-threaded... */
  183. stack_size = GC_total_stacksize;
  184. /* For now, we just use the value computed during the latest GC. */
  185. # ifdef DEBUG_THREADS
  186. GC_log_printf("Total stacks size: %lu\n",
  187. (unsigned long)stack_size);
  188. # endif
  189. }
  190. # endif
  191. total_root_size = 2 * stack_size + GC_root_size;
  192. scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
  193. + total_root_size;
  194. result = scan_size / GC_free_space_divisor;
  195. if (GC_incremental) {
  196. result /= 2;
  197. }
  198. return result > 0 ? result : 1;
  199. }
  200. STATIC word GC_non_gc_bytes_at_gc = 0;
  201. /* Number of explicitly managed bytes of storage */
  202. /* at last collection. */
  203. /* Return the number of bytes allocated, adjusted for explicit storage */
  204. /* management, etc.. This number is used in deciding when to trigger */
  205. /* collections. */
  206. STATIC word GC_adj_bytes_allocd(void)
  207. {
  208. signed_word result;
  209. signed_word expl_managed = (signed_word)GC_non_gc_bytes
  210. - (signed_word)GC_non_gc_bytes_at_gc;
  211. /* Don't count what was explicitly freed, or newly allocated for */
  212. /* explicit management. Note that deallocating an explicitly */
  213. /* managed object should not alter result, assuming the client */
  214. /* is playing by the rules. */
  215. result = (signed_word)GC_bytes_allocd
  216. + (signed_word)GC_bytes_dropped
  217. - (signed_word)GC_bytes_freed
  218. + (signed_word)GC_finalizer_bytes_freed
  219. - expl_managed;
  220. if (result > (signed_word)GC_bytes_allocd) {
  221. result = GC_bytes_allocd;
  222. /* probably client bug or unfortunate scheduling */
  223. }
  224. result += GC_bytes_finalized;
  225. /* We count objects enqueued for finalization as though they */
  226. /* had been reallocated this round. Finalization is user */
  227. /* visible progress. And if we don't count this, we have */
  228. /* stability problems for programs that finalize all objects. */
  229. if (result < (signed_word)(GC_bytes_allocd >> 3)) {
  230. /* Always count at least 1/8 of the allocations. We don't want */
  231. /* to collect too infrequently, since that would inhibit */
  232. /* coalescing of free storage blocks. */
  233. /* This also makes us partially robust against client bugs. */
  234. return(GC_bytes_allocd >> 3);
  235. } else {
  236. return(result);
  237. }
  238. }
  239. /* Clear up a few frames worth of garbage left at the top of the stack. */
  240. /* This is used to prevent us from accidentally treating garbage left */
  241. /* on the stack by other parts of the collector as roots. This */
  242. /* differs from the code in misc.c, which actually tries to keep the */
  243. /* stack clear of long-lived, client-generated garbage. */
  244. STATIC void GC_clear_a_few_frames(void)
  245. {
  246. # ifndef CLEAR_NWORDS
  247. # define CLEAR_NWORDS 64
  248. # endif
  249. volatile word frames[CLEAR_NWORDS];
  250. BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
  251. }
  252. /* Heap size at which we need a collection to avoid expanding past */
  253. /* limits used by blacklisting. */
  254. STATIC word GC_collect_at_heapsize = (word)(-1);
  255. /* Have we allocated enough to amortize a collection? */
  256. GC_INNER GC_bool GC_should_collect(void)
  257. {
  258. static word last_min_bytes_allocd;
  259. static word last_gc_no;
  260. if (last_gc_no != GC_gc_no) {
  261. last_gc_no = GC_gc_no;
  262. last_min_bytes_allocd = min_bytes_allocd();
  263. }
  264. return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
  265. || GC_heapsize >= GC_collect_at_heapsize);
  266. }
  267. /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
  268. /* Called at start of full collections. */
  269. /* Not called if 0. Called with the allocation */
  270. /* lock held. Not used by GC itself. */
  271. GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
  272. {
  273. DCL_LOCK_STATE;
  274. LOCK();
  275. GC_start_call_back = fn;
  276. UNLOCK();
  277. }
  278. GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
  279. {
  280. GC_start_callback_proc fn;
  281. DCL_LOCK_STATE;
  282. LOCK();
  283. fn = GC_start_call_back;
  284. UNLOCK();
  285. return fn;
  286. }
  287. GC_INLINE void GC_notify_full_gc(void)
  288. {
  289. if (GC_start_call_back != 0) {
  290. (*GC_start_call_back)();
  291. }
  292. }
  293. STATIC GC_bool GC_is_full_gc = FALSE;
  294. STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
  295. STATIC void GC_finish_collection(void);
  296. /*
  297. * Initiate a garbage collection if appropriate.
  298. * Choose judiciously
  299. * between partial, full, and stop-world collections.
  300. */
  301. STATIC void GC_maybe_gc(void)
  302. {
  303. static int n_partial_gcs = 0;
  304. GC_ASSERT(I_HOLD_LOCK());
  305. ASSERT_CANCEL_DISABLED();
  306. if (GC_should_collect()) {
  307. if (!GC_incremental) {
  308. /* FIXME: If possible, GC_default_stop_func should be used here */
  309. GC_try_to_collect_inner(GC_never_stop_func);
  310. n_partial_gcs = 0;
  311. return;
  312. } else {
  313. # ifdef PARALLEL_MARK
  314. if (GC_parallel)
  315. GC_wait_for_reclaim();
  316. # endif
  317. if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
  318. GC_COND_LOG_PRINTF(
  319. "***>Full mark for collection #%lu after %lu allocd bytes\n",
  320. (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
  321. GC_promote_black_lists();
  322. (void)GC_reclaim_all((GC_stop_func)0, TRUE);
  323. GC_notify_full_gc();
  324. GC_clear_marks();
  325. n_partial_gcs = 0;
  326. GC_is_full_gc = TRUE;
  327. } else {
  328. n_partial_gcs++;
  329. }
  330. }
  331. /* We try to mark with the world stopped. */
  332. /* If we run out of time, this turns into */
  333. /* incremental marking. */
  334. # ifndef NO_CLOCK
  335. if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
  336. # endif
  337. /* FIXME: If possible, GC_default_stop_func should be */
  338. /* used instead of GC_never_stop_func here. */
  339. if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
  340. GC_never_stop_func : GC_timeout_stop_func)) {
  341. # ifdef SAVE_CALL_CHAIN
  342. GC_save_callers(GC_last_stack);
  343. # endif
  344. GC_finish_collection();
  345. } else {
  346. if (!GC_is_full_gc) {
  347. /* Count this as the first attempt */
  348. GC_n_attempts++;
  349. }
  350. }
  351. }
  352. }
  353. /*
  354. * Stop the world garbage collection. Assumes lock held. If stop_func is
  355. * not GC_never_stop_func then abort if stop_func returns TRUE.
  356. * Return TRUE if we successfully completed the collection.
  357. */
  358. GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
  359. {
  360. # ifndef SMALL_CONFIG
  361. CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
  362. CLOCK_TYPE current_time;
  363. # endif
  364. ASSERT_CANCEL_DISABLED();
  365. if (GC_dont_gc || (*stop_func)()) return FALSE;
  366. if (GC_incremental && GC_collection_in_progress()) {
  367. GC_COND_LOG_PRINTF(
  368. "GC_try_to_collect_inner: finishing collection in progress\n");
  369. /* Just finish collection already in progress. */
  370. while(GC_collection_in_progress()) {
  371. if ((*stop_func)()) return(FALSE);
  372. GC_collect_a_little_inner(1);
  373. }
  374. }
  375. GC_notify_full_gc();
  376. # ifndef SMALL_CONFIG
  377. if (GC_print_stats) {
  378. GET_TIME(start_time);
  379. GC_log_printf("Initiating full world-stop collection!\n");
  380. }
  381. # endif
  382. GC_promote_black_lists();
  383. /* Make sure all blocks have been reclaimed, so sweep routines */
  384. /* don't see cleared mark bits. */
  385. /* If we're guaranteed to finish, then this is unnecessary. */
  386. /* In the find_leak case, we have to finish to guarantee that */
  387. /* previously unmarked objects are not reported as leaks. */
  388. # ifdef PARALLEL_MARK
  389. if (GC_parallel)
  390. GC_wait_for_reclaim();
  391. # endif
  392. if ((GC_find_leak || stop_func != GC_never_stop_func)
  393. && !GC_reclaim_all(stop_func, FALSE)) {
  394. /* Aborted. So far everything is still consistent. */
  395. return(FALSE);
  396. }
  397. GC_invalidate_mark_state(); /* Flush mark stack. */
  398. GC_clear_marks();
  399. # ifdef SAVE_CALL_CHAIN
  400. GC_save_callers(GC_last_stack);
  401. # endif
  402. GC_is_full_gc = TRUE;
  403. if (!GC_stopped_mark(stop_func)) {
  404. if (!GC_incremental) {
  405. /* We're partially done and have no way to complete or use */
  406. /* current work. Reestablish invariants as cheaply as */
  407. /* possible. */
  408. GC_invalidate_mark_state();
  409. GC_unpromote_black_lists();
  410. } /* else we claim the world is already still consistent. We'll */
  411. /* finish incrementally. */
  412. return(FALSE);
  413. }
  414. GC_finish_collection();
  415. # ifndef SMALL_CONFIG
  416. if (GC_print_stats) {
  417. GET_TIME(current_time);
  418. GC_log_printf("Complete collection took %lu msecs\n",
  419. MS_TIME_DIFF(current_time,start_time));
  420. }
  421. # endif
  422. return(TRUE);
  423. }
  424. /*
  425. * Perform n units of garbage collection work. A unit is intended to touch
  426. * roughly GC_RATE pages. Every once in a while, we do more than that.
  427. * This needs to be a fairly large number with our current incremental
  428. * GC strategy, since otherwise we allocate too much during GC, and the
  429. * cleanup gets expensive.
  430. */
  431. #ifndef GC_RATE
  432. # define GC_RATE 10
  433. #endif
  434. #ifndef MAX_PRIOR_ATTEMPTS
  435. # define MAX_PRIOR_ATTEMPTS 1
  436. #endif
  437. /* Maximum number of prior attempts at world stop marking */
  438. /* A value of 1 means that we finish the second time, no matter */
  439. /* how long it takes. Doesn't count the initial root scan */
  440. /* for a full GC. */
  441. STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
  442. /* that we have made. */
  443. GC_INNER void GC_collect_a_little_inner(int n)
  444. {
  445. int i;
  446. IF_CANCEL(int cancel_state;)
  447. if (GC_dont_gc) return;
  448. DISABLE_CANCEL(cancel_state);
  449. if (GC_incremental && GC_collection_in_progress()) {
  450. for (i = GC_deficit; i < GC_RATE*n; i++) {
  451. if (GC_mark_some((ptr_t)0)) {
  452. /* Need to finish a collection */
  453. # ifdef SAVE_CALL_CHAIN
  454. GC_save_callers(GC_last_stack);
  455. # endif
  456. # ifdef PARALLEL_MARK
  457. if (GC_parallel)
  458. GC_wait_for_reclaim();
  459. # endif
  460. if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
  461. && GC_time_limit != GC_TIME_UNLIMITED) {
  462. # ifndef NO_CLOCK
  463. GET_TIME(GC_start_time);
  464. # endif
  465. if (!GC_stopped_mark(GC_timeout_stop_func)) {
  466. GC_n_attempts++;
  467. break;
  468. }
  469. } else {
  470. /* FIXME: If possible, GC_default_stop_func should be */
  471. /* used here. */
  472. (void)GC_stopped_mark(GC_never_stop_func);
  473. }
  474. GC_finish_collection();
  475. break;
  476. }
  477. }
  478. if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
  479. if (GC_deficit < 0) GC_deficit = 0;
  480. } else {
  481. GC_maybe_gc();
  482. }
  483. RESTORE_CANCEL(cancel_state);
  484. }
  485. GC_INNER void (*GC_check_heap)(void) = 0;
  486. GC_INNER void (*GC_print_all_smashed)(void) = 0;
  487. GC_API int GC_CALL GC_collect_a_little(void)
  488. {
  489. int result;
  490. DCL_LOCK_STATE;
  491. LOCK();
  492. GC_collect_a_little_inner(1);
  493. result = (int)GC_collection_in_progress();
  494. UNLOCK();
  495. if (!result && GC_debugging_started) GC_print_all_smashed();
  496. return(result);
  497. }
  498. #ifndef SMALL_CONFIG
  499. /* Variables for world-stop average delay time statistic computation. */
  500. /* "divisor" is incremented every world-stop and halved when reached */
  501. /* its maximum (or upon "total_time" overflow). */
  502. static unsigned world_stopped_total_time = 0;
  503. static unsigned world_stopped_total_divisor = 0;
  504. # ifndef MAX_TOTAL_TIME_DIVISOR
  505. /* We shall not use big values here (so "outdated" delay time */
  506. /* values would have less impact on "average" delay time value than */
  507. /* newer ones). */
  508. # define MAX_TOTAL_TIME_DIVISOR 1000
  509. # endif
  510. #endif
  511. #ifdef USE_MUNMAP
  512. # define IF_USE_MUNMAP(x) x
  513. # define COMMA_IF_USE_MUNMAP(x) /* comma */, x
  514. #else
  515. # define IF_USE_MUNMAP(x) /* empty */
  516. # define COMMA_IF_USE_MUNMAP(x) /* empty */
  517. #endif
  518. /*
  519. * Assumes lock is held. We stop the world and mark from all roots.
  520. * If stop_func() ever returns TRUE, we may fail and return FALSE.
  521. * Increment GC_gc_no if we succeed.
  522. */
  523. STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
  524. {
  525. unsigned i;
  526. # ifndef SMALL_CONFIG
  527. CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
  528. CLOCK_TYPE current_time;
  529. # endif
  530. # if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
  531. GC_add_current_malloc_heap();
  532. # endif
  533. # if defined(REGISTER_LIBRARIES_EARLY)
  534. GC_cond_register_dynamic_libraries();
  535. # endif
  536. # ifndef SMALL_CONFIG
  537. if (GC_PRINT_STATS_FLAG)
  538. GET_TIME(start_time);
  539. # endif
  540. STOP_WORLD();
  541. # ifdef THREAD_LOCAL_ALLOC
  542. GC_world_stopped = TRUE;
  543. # endif
  544. /* Output blank line for convenience here */
  545. GC_COND_LOG_PRINTF(
  546. "\n--> Marking for collection #%lu after %lu allocated bytes\n",
  547. (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
  548. # ifdef MAKE_BACK_GRAPH
  549. if (GC_print_back_height) {
  550. GC_build_back_graph();
  551. }
  552. # endif
  553. /* Mark from all roots. */
  554. /* Minimize junk left in my registers and on the stack */
  555. GC_clear_a_few_frames();
  556. GC_noop6(0,0,0,0,0,0);
  557. GC_initiate_gc();
  558. for (i = 0;;i++) {
  559. if ((*stop_func)()) {
  560. GC_COND_LOG_PRINTF("Abandoned stopped marking after"
  561. " %u iterations\n", i);
  562. GC_deficit = i; /* Give the mutator a chance. */
  563. # ifdef THREAD_LOCAL_ALLOC
  564. GC_world_stopped = FALSE;
  565. # endif
  566. START_WORLD();
  567. return(FALSE);
  568. }
  569. if (GC_mark_some(GC_approx_sp())) break;
  570. }
  571. GC_gc_no++;
  572. GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB"
  573. IF_USE_MUNMAP(" (+ %lu KiB unmapped)") "\n",
  574. (unsigned long)GC_gc_no, (long)GC_bytes_found,
  575. TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
  576. COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)));
  577. /* Check all debugged objects for consistency */
  578. if (GC_debugging_started) {
  579. (*GC_check_heap)();
  580. }
  581. # ifdef THREAD_LOCAL_ALLOC
  582. GC_world_stopped = FALSE;
  583. # endif
  584. START_WORLD();
  585. # ifndef SMALL_CONFIG
  586. if (GC_PRINT_STATS_FLAG) {
  587. unsigned long time_diff;
  588. unsigned total_time, divisor;
  589. GET_TIME(current_time);
  590. time_diff = MS_TIME_DIFF(current_time,start_time);
  591. /* Compute new world-stop delay total time */
  592. total_time = world_stopped_total_time;
  593. divisor = world_stopped_total_divisor;
  594. if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
  595. /* Halve values if overflow occurs */
  596. total_time >>= 1;
  597. divisor >>= 1;
  598. }
  599. total_time += time_diff < (((unsigned)-1) >> 1) ?
  600. (unsigned)time_diff : ((unsigned)-1) >> 1;
  601. /* Update old world_stopped_total_time and its divisor */
  602. world_stopped_total_time = total_time;
  603. world_stopped_total_divisor = ++divisor;
  604. GC_ASSERT(divisor != 0);
  605. GC_log_printf(
  606. "World-stopped marking took %lu msecs (%u in average)\n",
  607. time_diff, total_time / divisor);
  608. }
  609. # endif
  610. return(TRUE);
  611. }
  612. /* Set all mark bits for the free list whose first entry is q */
  613. GC_INNER void GC_set_fl_marks(ptr_t q)
  614. {
  615. struct hblk *h, *last_h;
  616. hdr *hhdr;
  617. IF_PER_OBJ(size_t sz;)
  618. unsigned bit_no;
  619. if (q != NULL) {
  620. h = HBLKPTR(q);
  621. last_h = h;
  622. hhdr = HDR(h);
  623. IF_PER_OBJ(sz = hhdr->hb_sz;)
  624. for (;;) {
  625. bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
  626. if (!mark_bit_from_hdr(hhdr, bit_no)) {
  627. set_mark_bit_from_hdr(hhdr, bit_no);
  628. ++hhdr -> hb_n_marks;
  629. }
  630. q = obj_link(q);
  631. if (q == NULL)
  632. break;
  633. h = HBLKPTR(q);
  634. if (h != last_h) {
  635. last_h = h;
  636. hhdr = HDR(h);
  637. IF_PER_OBJ(sz = hhdr->hb_sz;)
  638. }
  639. }
  640. }
  641. }
  642. #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
  643. /* Check that all mark bits for the free list whose first entry is */
  644. /* (*pfreelist) are set. Check skipped if points to a special value. */
  645. void GC_check_fl_marks(void **pfreelist)
  646. {
  647. # ifdef AO_HAVE_load_acquire_read
  648. AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
  649. /* Atomic operations are used because the world is running. */
  650. AO_t *prev;
  651. AO_t *p;
  652. if ((word)list <= HBLKSIZE) return;
  653. prev = (AO_t *)pfreelist;
  654. for (p = list; p != NULL;) {
  655. AO_t *next;
  656. if (!GC_is_marked(p)) {
  657. ABORT_ARG2("Unmarked local free list entry",
  658. ": object %p on list %p", (void *)p, (void *)list);
  659. }
  660. /* While traversing the free-list, it re-reads the pointer to */
  661. /* the current node before accepting its next pointer and */
  662. /* bails out if the latter has changed. That way, it won't */
  663. /* try to follow the pointer which might be been modified */
  664. /* after the object was returned to the client. It might */
  665. /* perform the mark-check on the just allocated object but */
  666. /* that should be harmless. */
  667. next = (AO_t *)AO_load_acquire_read(p);
  668. if (AO_load(prev) != (AO_t)p)
  669. break;
  670. prev = p;
  671. p = next;
  672. }
  673. # else
  674. /* FIXME: Not implemented (just skipped). */
  675. (void)pfreelist;
  676. # endif
  677. }
  678. #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
  679. /* Clear all mark bits for the free list whose first entry is q */
  680. /* Decrement GC_bytes_found by number of bytes on free list. */
  681. STATIC void GC_clear_fl_marks(ptr_t q)
  682. {
  683. struct hblk *h, *last_h;
  684. hdr *hhdr;
  685. size_t sz;
  686. unsigned bit_no;
  687. if (q != NULL) {
  688. h = HBLKPTR(q);
  689. last_h = h;
  690. hhdr = HDR(h);
  691. sz = hhdr->hb_sz; /* Normally set only once. */
  692. for (;;) {
  693. bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
  694. if (mark_bit_from_hdr(hhdr, bit_no)) {
  695. size_t n_marks = hhdr -> hb_n_marks - 1;
  696. clear_mark_bit_from_hdr(hhdr, bit_no);
  697. # ifdef PARALLEL_MARK
  698. /* Appr. count, don't decrement to zero! */
  699. if (0 != n_marks || !GC_parallel) {
  700. hhdr -> hb_n_marks = n_marks;
  701. }
  702. # else
  703. hhdr -> hb_n_marks = n_marks;
  704. # endif
  705. }
  706. GC_bytes_found -= sz;
  707. q = obj_link(q);
  708. if (q == NULL)
  709. break;
  710. h = HBLKPTR(q);
  711. if (h != last_h) {
  712. last_h = h;
  713. hhdr = HDR(h);
  714. sz = hhdr->hb_sz;
  715. }
  716. }
  717. }
  718. }
  719. #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
  720. void GC_check_tls(void);
  721. #endif
  722. GC_on_heap_resize_proc GC_on_heap_resize = 0;
  723. /* Used for logging only. */
  724. GC_INLINE int GC_compute_heap_usage_percent(void)
  725. {
  726. word used = GC_composite_in_use + GC_atomic_in_use;
  727. word heap_sz = GC_heapsize - GC_unmapped_bytes;
  728. return used >= heap_sz ? 0 : used < ((word)-1) / 100 ?
  729. (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
  730. }
  731. /* Finish up a collection. Assumes mark bits are consistent, lock is */
  732. /* held, but the world is otherwise running. */
  733. STATIC void GC_finish_collection(void)
  734. {
  735. # ifndef SMALL_CONFIG
  736. CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
  737. CLOCK_TYPE finalize_time = 0;
  738. CLOCK_TYPE done_time;
  739. # endif
  740. # if defined(GC_ASSERTIONS) && defined(THREADS) \
  741. && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
  742. /* Check that we marked some of our own data. */
  743. /* FIXME: Add more checks. */
  744. GC_check_tls();
  745. # endif
  746. # ifndef SMALL_CONFIG
  747. if (GC_print_stats)
  748. GET_TIME(start_time);
  749. # endif
  750. # ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
  751. if (GC_bytes_found > 0)
  752. GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
  753. # endif
  754. GC_bytes_found = 0;
  755. # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
  756. if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
  757. GC_print_address_map();
  758. }
  759. # endif
  760. COND_DUMP;
  761. if (GC_find_leak) {
  762. /* Mark all objects on the free list. All objects should be */
  763. /* marked when we're done. */
  764. word size; /* current object size */
  765. unsigned kind;
  766. ptr_t q;
  767. for (kind = 0; kind < GC_n_kinds; kind++) {
  768. for (size = 1; size <= MAXOBJGRANULES; size++) {
  769. q = GC_obj_kinds[kind].ok_freelist[size];
  770. if (q != 0) GC_set_fl_marks(q);
  771. }
  772. }
  773. GC_start_reclaim(TRUE);
  774. /* The above just checks; it doesn't really reclaim anything. */
  775. }
  776. # ifndef GC_NO_FINALIZATION
  777. GC_finalize();
  778. # endif
  779. # ifdef STUBBORN_ALLOC
  780. GC_clean_changing_list();
  781. # endif
  782. # ifndef SMALL_CONFIG
  783. if (GC_print_stats)
  784. GET_TIME(finalize_time);
  785. # endif
  786. if (GC_print_back_height) {
  787. # ifdef MAKE_BACK_GRAPH
  788. GC_traverse_back_graph();
  789. # elif !defined(SMALL_CONFIG)
  790. GC_err_printf("Back height not available: "
  791. "Rebuild collector with -DMAKE_BACK_GRAPH\n");
  792. # endif
  793. }
  794. /* Clear free list mark bits, in case they got accidentally marked */
  795. /* (or GC_find_leak is set and they were intentionally marked). */
  796. /* Also subtract memory remaining from GC_bytes_found count. */
  797. /* Note that composite objects on free list are cleared. */
  798. /* Thus accidentally marking a free list is not a problem; only */
  799. /* objects on the list itself will be marked, and that's fixed here. */
  800. {
  801. word size; /* current object size */
  802. ptr_t q; /* pointer to current object */
  803. unsigned kind;
  804. for (kind = 0; kind < GC_n_kinds; kind++) {
  805. for (size = 1; size <= MAXOBJGRANULES; size++) {
  806. q = GC_obj_kinds[kind].ok_freelist[size];
  807. if (q != 0) GC_clear_fl_marks(q);
  808. }
  809. }
  810. }
  811. GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
  812. (long)GC_bytes_found);
  813. /* Reconstruct free lists to contain everything not marked */
  814. GC_start_reclaim(FALSE);
  815. GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n",
  816. GC_compute_heap_usage_percent(),
  817. TO_KiB_UL(GC_composite_in_use),
  818. TO_KiB_UL(GC_atomic_in_use));
  819. if (GC_is_full_gc) {
  820. GC_used_heap_size_after_full = USED_HEAP_SIZE;
  821. GC_need_full_gc = FALSE;
  822. } else {
  823. GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
  824. > min_bytes_allocd();
  825. }
  826. GC_VERBOSE_LOG_PRINTF("Immediately reclaimed %ld bytes, heapsize:"
  827. " %lu bytes" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
  828. (long)GC_bytes_found,
  829. (unsigned long)GC_heapsize /*, */
  830. COMMA_IF_USE_MUNMAP((unsigned long)
  831. GC_unmapped_bytes));
  832. /* Reset or increment counters for next cycle */
  833. GC_n_attempts = 0;
  834. GC_is_full_gc = FALSE;
  835. GC_bytes_allocd_before_gc += GC_bytes_allocd;
  836. GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
  837. GC_bytes_allocd = 0;
  838. GC_bytes_dropped = 0;
  839. GC_bytes_freed = 0;
  840. GC_finalizer_bytes_freed = 0;
  841. IF_USE_MUNMAP(GC_unmap_old());
  842. # ifndef SMALL_CONFIG
  843. if (GC_print_stats) {
  844. GET_TIME(done_time);
  845. # ifndef GC_NO_FINALIZATION
  846. /* A convenient place to output finalization statistics. */
  847. GC_print_finalization_stats();
  848. # endif
  849. GC_log_printf("Finalize plus initiate sweep took %lu + %lu msecs\n",
  850. MS_TIME_DIFF(finalize_time,start_time),
  851. MS_TIME_DIFF(done_time,finalize_time));
  852. }
  853. # endif
  854. }
  855. /* If stop_func == 0 then GC_default_stop_func is used instead. */
  856. STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
  857. GC_bool force_unmap GC_ATTR_UNUSED)
  858. {
  859. GC_bool result;
  860. IF_USE_MUNMAP(int old_unmap_threshold;)
  861. IF_CANCEL(int cancel_state;)
  862. DCL_LOCK_STATE;
  863. if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
  864. if (GC_debugging_started) GC_print_all_smashed();
  865. GC_INVOKE_FINALIZERS();
  866. LOCK();
  867. DISABLE_CANCEL(cancel_state);
  868. # ifdef USE_MUNMAP
  869. old_unmap_threshold = GC_unmap_threshold;
  870. if (force_unmap ||
  871. (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
  872. GC_unmap_threshold = 1; /* unmap as much as possible */
  873. # endif
  874. ENTER_GC();
  875. /* Minimize junk left in my registers */
  876. GC_noop6(0,0,0,0,0,0);
  877. result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
  878. GC_default_stop_func);
  879. EXIT_GC();
  880. IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
  881. RESTORE_CANCEL(cancel_state);
  882. UNLOCK();
  883. if (result) {
  884. if (GC_debugging_started) GC_print_all_smashed();
  885. GC_INVOKE_FINALIZERS();
  886. }
  887. return(result);
  888. }
  889. /* Externally callable routines to invoke full, stop-the-world collection. */
  890. GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
  891. {
  892. GC_ASSERT(stop_func != 0);
  893. return (int)GC_try_to_collect_general(stop_func, FALSE);
  894. }
  895. GC_API void GC_CALL GC_gcollect(void)
  896. {
  897. /* 0 is passed as stop_func to get GC_default_stop_func value */
  898. /* while holding the allocation lock (to prevent data races). */
  899. (void)GC_try_to_collect_general(0, FALSE);
  900. if (GC_have_errors) GC_print_all_errors();
  901. }
  902. GC_API void GC_CALL GC_gcollect_and_unmap(void)
  903. {
  904. (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
  905. }
  906. GC_INNER word GC_n_heap_sects = 0;
  907. /* Number of sections currently in heap. */
  908. #ifdef USE_PROC_FOR_LIBRARIES
  909. GC_INNER word GC_n_memory = 0;
  910. /* Number of GET_MEM allocated memory sections. */
  911. #endif
  912. #ifdef USE_PROC_FOR_LIBRARIES
  913. /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
  914. /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
  915. GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
  916. {
  917. if (0 == p) return;
  918. if (GC_n_memory >= MAX_HEAP_SECTS)
  919. ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
  920. GC_our_memory[GC_n_memory].hs_start = p;
  921. GC_our_memory[GC_n_memory].hs_bytes = bytes;
  922. GC_n_memory++;
  923. }
  924. #endif
  925. /*
  926. * Use the chunk of memory starting at p of size bytes as part of the heap.
  927. * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
  928. */
  929. GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
  930. {
  931. hdr * phdr;
  932. word endp;
  933. if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
  934. ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
  935. }
  936. while ((word)p <= HBLKSIZE) {
  937. /* Can't handle memory near address zero. */
  938. ++p;
  939. bytes -= HBLKSIZE;
  940. if (0 == bytes) return;
  941. }
  942. endp = (word)p + bytes;
  943. if (endp <= (word)p) {
  944. /* Address wrapped. */
  945. bytes -= HBLKSIZE;
  946. if (0 == bytes) return;
  947. endp -= HBLKSIZE;
  948. }
  949. phdr = GC_install_header(p);
  950. if (0 == phdr) {
  951. /* This is extremely unlikely. Can't add it. This will */
  952. /* almost certainly result in a 0 return from the allocator, */
  953. /* which is entirely appropriate. */
  954. return;
  955. }
  956. GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
  957. GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
  958. GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
  959. GC_n_heap_sects++;
  960. phdr -> hb_sz = bytes;
  961. phdr -> hb_flags = 0;
  962. GC_freehblk(p);
  963. GC_heapsize += bytes;
  964. if ((word)p <= (word)GC_least_plausible_heap_addr
  965. || GC_least_plausible_heap_addr == 0) {
  966. GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
  967. /* Making it a little smaller than necessary prevents */
  968. /* us from getting a false hit from the variable */
  969. /* itself. There's some unintentional reflection */
  970. /* here. */
  971. }
  972. if ((word)p + bytes >= (word)GC_greatest_plausible_heap_addr) {
  973. GC_greatest_plausible_heap_addr = (void *)endp;
  974. }
  975. }
  976. #if !defined(NO_DEBUGGING)
  977. void GC_print_heap_sects(void)
  978. {
  979. unsigned i;
  980. GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
  981. (unsigned long)GC_heapsize /*, */
  982. COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
  983. for (i = 0; i < GC_n_heap_sects; i++) {
  984. ptr_t start = GC_heap_sects[i].hs_start;
  985. size_t len = GC_heap_sects[i].hs_bytes;
  986. struct hblk *h;
  987. unsigned nbl = 0;
  988. for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
  989. if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
  990. }
  991. GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
  992. i, start, start + len,
  993. (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
  994. }
  995. }
  996. #endif
  997. void * GC_least_plausible_heap_addr = (void *)ONES;
  998. void * GC_greatest_plausible_heap_addr = 0;
  999. GC_INLINE word GC_max(word x, word y)
  1000. {
  1001. return(x > y? x : y);
  1002. }
  1003. GC_INLINE word GC_min(word x, word y)
  1004. {
  1005. return(x < y? x : y);
  1006. }
  1007. STATIC word GC_max_heapsize = 0;
  1008. GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
  1009. {
  1010. GC_max_heapsize = n;
  1011. }
  1012. GC_word GC_max_retries = 0;
  1013. /* This explicitly increases the size of the heap. It is used */
  1014. /* internally, but may also be invoked from GC_expand_hp by the user. */
  1015. /* The argument is in units of HBLKSIZE (tiny values are rounded up). */
  1016. /* Returns FALSE on failure. */
  1017. GC_INNER GC_bool GC_expand_hp_inner(word n)
  1018. {
  1019. word bytes;
  1020. struct hblk * space;
  1021. word expansion_slop; /* Number of bytes by which we expect the */
  1022. /* heap to expand soon. */
  1023. if (n < MINHINCR) n = MINHINCR;
  1024. bytes = n * HBLKSIZE;
  1025. /* Make sure bytes is a multiple of GC_page_size */
  1026. {
  1027. word mask = GC_page_size - 1;
  1028. bytes += mask;
  1029. bytes &= ~mask;
  1030. }
  1031. if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
  1032. /* Exceeded self-imposed limit */
  1033. return(FALSE);
  1034. }
  1035. space = GET_MEM(bytes);
  1036. GC_add_to_our_memory((ptr_t)space, bytes);
  1037. if (space == 0) {
  1038. WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n", bytes);
  1039. return(FALSE);
  1040. }
  1041. GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
  1042. TO_KiB_UL(GC_heapsize + bytes),
  1043. (unsigned long)GC_bytes_allocd);
  1044. /* Adjust heap limits generously for blacklisting to work better. */
  1045. /* GC_add_to_heap performs minimal adjustment needed for */
  1046. /* correctness. */
  1047. expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
  1048. if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
  1049. || (GC_last_heap_addr != 0
  1050. && (word)GC_last_heap_addr < (word)space)) {
  1051. /* Assume the heap is growing up */
  1052. word new_limit = (word)space + bytes + expansion_slop;
  1053. if (new_limit > (word)space) {
  1054. GC_greatest_plausible_heap_addr =
  1055. (void *)GC_max((word)GC_greatest_plausible_heap_addr,
  1056. (word)new_limit);
  1057. }
  1058. } else {
  1059. /* Heap is growing down */
  1060. word new_limit = (word)space - expansion_slop;
  1061. if (new_limit < (word)space) {
  1062. GC_least_plausible_heap_addr =
  1063. (void *)GC_min((word)GC_least_plausible_heap_addr,
  1064. (word)space - expansion_slop);
  1065. }
  1066. }
  1067. GC_prev_heap_addr = GC_last_heap_addr;
  1068. GC_last_heap_addr = (ptr_t)space;
  1069. GC_add_to_heap(space, bytes);
  1070. /* Force GC before we are likely to allocate past expansion_slop */
  1071. GC_collect_at_heapsize =
  1072. GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
  1073. if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
  1074. GC_collect_at_heapsize = (word)(-1);
  1075. if (GC_on_heap_resize)
  1076. (*GC_on_heap_resize)(GC_heapsize);
  1077. return(TRUE);
  1078. }
  1079. /* Really returns a bool, but it's externally visible, so that's clumsy. */
  1080. /* Arguments is in bytes. Includes GC_init() call. */
  1081. GC_API int GC_CALL GC_expand_hp(size_t bytes)
  1082. {
  1083. int result;
  1084. DCL_LOCK_STATE;
  1085. LOCK();
  1086. if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
  1087. result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
  1088. if (result) GC_requested_heapsize += bytes;
  1089. UNLOCK();
  1090. return(result);
  1091. }
  1092. word GC_fo_entries = 0; /* used also in extra/MacOS.c */
  1093. GC_INNER unsigned GC_fail_count = 0;
  1094. /* How many consecutive GC/expansion failures? */
  1095. /* Reset by GC_allochblk. */
  1096. static word last_fo_entries = 0;
  1097. static word last_bytes_finalized = 0;
  1098. /* Collect or expand heap in an attempt make the indicated number of */
  1099. /* free blocks available. Should be called until the blocks are */
  1100. /* available (seting retry value to TRUE unless this is the first call */
  1101. /* in a loop) or until it fails by returning FALSE. */
  1102. GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
  1103. GC_bool ignore_off_page,
  1104. GC_bool retry)
  1105. {
  1106. GC_bool gc_not_stopped = TRUE;
  1107. word blocks_to_get;
  1108. IF_CANCEL(int cancel_state;)
  1109. DISABLE_CANCEL(cancel_state);
  1110. if (!GC_incremental && !GC_dont_gc &&
  1111. ((GC_dont_expand && GC_bytes_allocd > 0)
  1112. || (GC_fo_entries > (last_fo_entries + 500)
  1113. && (last_bytes_finalized | GC_bytes_finalized) != 0)
  1114. || GC_should_collect())) {
  1115. /* Try to do a full collection using 'default' stop_func (unless */
  1116. /* nothing has been allocated since the latest collection or heap */
  1117. /* expansion is disabled). */
  1118. gc_not_stopped = GC_try_to_collect_inner(
  1119. GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
  1120. GC_default_stop_func : GC_never_stop_func);
  1121. if (gc_not_stopped == TRUE || !retry) {
  1122. /* Either the collection hasn't been aborted or this is the */
  1123. /* first attempt (in a loop). */
  1124. last_fo_entries = GC_fo_entries;
  1125. last_bytes_finalized = GC_bytes_finalized;
  1126. RESTORE_CANCEL(cancel_state);
  1127. return(TRUE);
  1128. }
  1129. }
  1130. blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
  1131. + needed_blocks;
  1132. if (blocks_to_get > MAXHINCR) {
  1133. word slop;
  1134. /* Get the minimum required to make it likely that we can satisfy */
  1135. /* the current request in the presence of black-listing. */
  1136. /* This will probably be more than MAXHINCR. */
  1137. if (ignore_off_page) {
  1138. slop = 4;
  1139. } else {
  1140. slop = 2 * divHBLKSZ(BL_LIMIT);
  1141. if (slop > needed_blocks) slop = needed_blocks;
  1142. }
  1143. if (needed_blocks + slop > MAXHINCR) {
  1144. blocks_to_get = needed_blocks + slop;
  1145. } else {
  1146. blocks_to_get = MAXHINCR;
  1147. }
  1148. }
  1149. if (!GC_expand_hp_inner(blocks_to_get)
  1150. && !GC_expand_hp_inner(needed_blocks)) {
  1151. if (gc_not_stopped == FALSE) {
  1152. /* Don't increment GC_fail_count here (and no warning). */
  1153. GC_gcollect_inner();
  1154. GC_ASSERT(GC_bytes_allocd == 0);
  1155. } else if (GC_fail_count++ < GC_max_retries) {
  1156. WARN("Out of Memory! Trying to continue ...\n", 0);
  1157. GC_gcollect_inner();
  1158. } else {
  1159. # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
  1160. WARN("Out of Memory! Heap size: %" WARN_PRIdPTR " MiB."
  1161. " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
  1162. # endif
  1163. RESTORE_CANCEL(cancel_state);
  1164. return(FALSE);
  1165. }
  1166. } else if (GC_fail_count) {
  1167. GC_COND_LOG_PRINTF("Memory available again...\n");
  1168. }
  1169. RESTORE_CANCEL(cancel_state);
  1170. return(TRUE);
  1171. }
  1172. /*
  1173. * Make sure the object free list for size gran (in granules) is not empty.
  1174. * Return a pointer to the first object on the free list.
  1175. * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
  1176. * Assumes we hold the allocator lock.
  1177. */
  1178. GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
  1179. {
  1180. void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
  1181. GC_bool tried_minor = FALSE;
  1182. GC_bool retry = FALSE;
  1183. if (gran == 0) return(0);
  1184. while (*flh == 0) {
  1185. ENTER_GC();
  1186. /* Do our share of marking work */
  1187. if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
  1188. /* Sweep blocks for objects of this size */
  1189. GC_continue_reclaim(gran, kind);
  1190. EXIT_GC();
  1191. if (*flh == 0) {
  1192. GC_new_hblk(gran, kind);
  1193. }
  1194. if (*flh == 0) {
  1195. ENTER_GC();
  1196. if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
  1197. && !tried_minor) {
  1198. GC_collect_a_little_inner(1);
  1199. tried_minor = TRUE;
  1200. } else {
  1201. if (!GC_collect_or_expand(1, FALSE, retry)) {
  1202. EXIT_GC();
  1203. return(0);
  1204. }
  1205. retry = TRUE;
  1206. }
  1207. EXIT_GC();
  1208. }
  1209. }
  1210. /* Successful allocation; reset failure count. */
  1211. GC_fail_count = 0;
  1212. return(*flh);
  1213. }