Kaynağa Gözat

Update to bdwgc 7.7.0.d76816e.

woollybah 7 yıl önce
ebeveyn
işleme
6679b2836c
73 değiştirilmiş dosya ile 2687 ekleme ve 2340 silme
  1. 2 0
      appstub.mod/appstub.linux.c
  2. 6 2
      blitz.mod/bdwgc/allchblk.c
  3. 66 16
      blitz.mod/bdwgc/alloc.c
  4. 121 57
      blitz.mod/bdwgc/backgraph.c
  5. 2 2
      blitz.mod/bdwgc/blacklst.c
  6. 1 42
      blitz.mod/bdwgc/checksums.c
  7. 5 5
      blitz.mod/bdwgc/darwin_stop_world.c
  8. 75 175
      blitz.mod/bdwgc/dbg_mlc.c
  9. 60 31
      blitz.mod/bdwgc/dyn_load.c
  10. 149 91
      blitz.mod/bdwgc/finalize.c
  11. 99 0
      blitz.mod/bdwgc/gc_cpp.cc
  12. 2 0
      blitz.mod/bdwgc/gc_cpp.cpp
  13. 22 12
      blitz.mod/bdwgc/gcj_mlc.c
  14. 40 31
      blitz.mod/bdwgc/headers.c
  15. 11 0
      blitz.mod/bdwgc/ia64_save_regs_in_stack.s
  16. 1 1
      blitz.mod/bdwgc/include/ec.h
  17. 70 43
      blitz.mod/bdwgc/include/gc.h
  18. 40 0
      blitz.mod/bdwgc/include/gc_alloc_ptrs.h
  19. 102 65
      blitz.mod/bdwgc/include/gc_allocator.h
  20. 5 4
      blitz.mod/bdwgc/include/gc_config_macros.h
  21. 90 37
      blitz.mod/bdwgc/include/gc_cpp.h
  22. 28 12
      blitz.mod/bdwgc/include/gc_inline.h
  23. 1 1
      blitz.mod/bdwgc/include/gc_mark.h
  24. 2 4
      blitz.mod/bdwgc/include/include.am
  25. 60 28
      blitz.mod/bdwgc/include/new_gc_alloc.h
  26. 5 6
      blitz.mod/bdwgc/include/private/darwin_stop_world.h
  27. 15 11
      blitz.mod/bdwgc/include/private/dbg_mlc.h
  28. 12 0
      blitz.mod/bdwgc/include/private/gc_atomic_ops.h
  29. 10 15
      blitz.mod/bdwgc/include/private/gc_hdrs.h
  30. 17 37
      blitz.mod/bdwgc/include/private/gc_locks.h
  31. 67 40
      blitz.mod/bdwgc/include/private/gc_pmark.h
  32. 119 108
      blitz.mod/bdwgc/include/private/gc_priv.h
  33. 159 279
      blitz.mod/bdwgc/include/private/gcconfig.h
  34. 2 7
      blitz.mod/bdwgc/include/private/pthread_stop_world.h
  35. 13 13
      blitz.mod/bdwgc/include/private/pthread_support.h
  36. 16 11
      blitz.mod/bdwgc/include/private/specific.h
  37. 18 20
      blitz.mod/bdwgc/include/private/thread_local_alloc.h
  38. 0 217
      blitz.mod/bdwgc/include/weakpointer.h
  39. 36 26
      blitz.mod/bdwgc/libatomic_ops/.travis.yml
  40. 2 0
      blitz.mod/bdwgc/libatomic_ops/AUTHORS
  41. 20 21
      blitz.mod/bdwgc/libatomic_ops/COPYING
  42. 12 0
      blitz.mod/bdwgc/libatomic_ops/ChangeLog
  43. 3 3
      blitz.mod/bdwgc/libatomic_ops/Makefile.am
  44. 1 1
      blitz.mod/bdwgc/libatomic_ops/README.md
  45. 18 5
      blitz.mod/bdwgc/libatomic_ops/configure.ac
  46. 1 1
      blitz.mod/bdwgc/libatomic_ops/src/Makefile.am
  47. 21 37
      blitz.mod/bdwgc/libatomic_ops/src/atomic_ops.h
  48. 12 3
      blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/mips.h
  49. 0 14
      blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/nios2.h
  50. 22 0
      blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/riscv.h
  51. 9 1
      blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/x86.h
  52. 14 8
      blitz.mod/bdwgc/mach_dep.c
  53. 8 23
      blitz.mod/bdwgc/malloc.c
  54. 39 10
      blitz.mod/bdwgc/mallocx.c
  55. 43 72
      blitz.mod/bdwgc/mark.c
  56. 102 18
      blitz.mod/bdwgc/mark_rts.c
  57. 57 23
      blitz.mod/bdwgc/misc.c
  58. 257 276
      blitz.mod/bdwgc/os_dep.c
  59. 1 0
      blitz.mod/bdwgc/pthread_start.c
  60. 207 174
      blitz.mod/bdwgc/pthread_stop_world.c
  61. 49 54
      blitz.mod/bdwgc/pthread_support.c
  62. 8 6
      blitz.mod/bdwgc/ptr_chck.c
  63. 15 15
      blitz.mod/bdwgc/reclaim.c
  64. 61 0
      blitz.mod/bdwgc/sparc_mach_dep.S
  65. 34 0
      blitz.mod/bdwgc/sparc_netbsd_mach_dep.s
  66. 32 0
      blitz.mod/bdwgc/sparc_sunos4_mach_dep.s
  67. 21 11
      blitz.mod/bdwgc/specific.c
  68. 0 56
      blitz.mod/bdwgc/stubborn.c
  69. 15 20
      blitz.mod/bdwgc/thread_local_alloc.c
  70. 25 21
      blitz.mod/bdwgc/typd_mlc.c
  71. 27 17
      blitz.mod/bdwgc/win32_threads.c
  72. 0 1
      blitz.mod/blitz.bmx
  73. 2 0
      blitz.mod/blitz_gc.c

+ 2 - 0
appstub.mod/appstub.linux.c

@@ -22,6 +22,7 @@ int SDL_main( int argc,char *argv[] ){
 	return 0;
 }
 
+#ifndef __EMSCRIPTEN__
 size_t bmx_process_vm_readv(size_t dataSize, void * pointer, void * buffer) {
 
 	struct iovec local;
@@ -38,3 +39,4 @@ size_t bmx_process_vm_readv(size_t dataSize, void * pointer, void * buffer) {
 	
 	return result;
 }
+#endif

+ 6 - 2
blitz.mod/bdwgc/allchblk.c

@@ -250,7 +250,7 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
 
 #   ifdef MARK_BIT_PER_OBJ
      /* Set hb_inv_sz as portably as possible.                          */
-     /* We set it to the smallest value such that sz * inv_sz > 2**32   */
+     /* We set it to the smallest value such that sz * inv_sz >= 2**32  */
      /* This may be more precision than necessary.                      */
       if (byte_sz > MAXOBJBYTES) {
          hhdr -> hb_inv_sz = LARGE_INV_SZ;
@@ -265,6 +265,9 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
           inv_sz = ((unsigned)1 << 31)/byte_sz;
           inv_sz *= 2;
           while (inv_sz*byte_sz > byte_sz) ++inv_sz;
+#       endif
+#       ifdef INV_SZ_COMPUTATION_CHECK
+          GC_ASSERT(((1ULL << 32) + byte_sz - 1) / byte_sz == inv_sz);
 #       endif
         hhdr -> hb_inv_sz = inv_sz;
       }
@@ -754,7 +757,8 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
                   GC_large_alloc_warn_suppressed = 0;
                 }
                 size_avail = orig_avail;
-              } else if (size_avail == 0 && size_needed == HBLKSIZE
+              } else if (size_avail == 0
+                         && size_needed == (signed_word)HBLKSIZE
                          && IS_MAPPED(hhdr)) {
                 if (!GC_find_leak) {
                   static unsigned count = 0;

+ 66 - 16
blitz.mod/bdwgc/alloc.c

@@ -32,13 +32,12 @@
  * The call GC_allocobj(i,k) ensures that the freelist for
  * kind k objects of size i points to a non-empty
  * free list. It returns a pointer to the first entry on the free list.
- * If not using thread-local allocation, GC_allocobj may be called to
- * allocate an object of small size lb (and NORMAL kind) as follows
+ * In a single-threaded world, GC_allocobj may be called to allocate
+ * an object of small size lb (and NORMAL kind) as follows
  * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
  * fills in GC_size_map if needed):
  *
  *   lg = GC_size_map[lb];
- *   LOCK();
  *   op = GC_objfreelist[lg];
  *   if (NULL == op) {
  *     op = GC_generic_malloc_inner(lb, NORMAL);
@@ -46,7 +45,6 @@
  *     GC_objfreelist[lg] = obj_link(op);
  *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  *   }
- *   UNLOCK();
  *
  * Note that this is very fast if the free list is non-empty; it should
  * only involve the execution of 4 or 5 simple instructions.
@@ -117,6 +115,9 @@ STATIC GC_bool GC_need_full_gc = FALSE;
 STATIC word GC_used_heap_size_after_full = 0;
 
 /* GC_copyright symbol is externally visible. */
+EXTERN_C_BEGIN
+extern const char * const GC_copyright[];
+EXTERN_C_END
 const char * const GC_copyright[] =
 {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
 "Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. ",
@@ -129,6 +130,9 @@ const char * const GC_copyright[] =
 /* Version macros are now defined in gc_version.h, which is included by */
 /* gc.h, which is included by gc_priv.h.                                */
 #ifndef GC_NO_VERSION_VAR
+  EXTERN_C_BEGIN
+  extern const unsigned GC_version;
+  EXTERN_C_END
   const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
                         (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
 #endif
@@ -226,6 +230,20 @@ GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
   GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
 #endif
 
+static size_t min_bytes_allocd_minimum = 1;
+                        /* The lowest value returned by min_bytes_allocd(). */
+
+GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
+{
+    GC_ASSERT(value > 0);
+    min_bytes_allocd_minimum = value;
+}
+
+GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
+{
+    return min_bytes_allocd_minimum;
+}
+
 /* Return the minimum number of bytes that must be allocated between    */
 /* collections to amortize the collection cost.  Should be non-zero.    */
 static word min_bytes_allocd(void)
@@ -266,7 +284,8 @@ static word min_bytes_allocd(void)
     if (GC_incremental) {
       result /= 2;
     }
-    return result > 0 ? result : 1;
+    return result > min_bytes_allocd_minimum
+            ? result : min_bytes_allocd_minimum;
 }
 
 STATIC word GC_non_gc_bytes_at_gc = 0;
@@ -550,7 +569,7 @@ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
 
 /*
  * Perform n units of garbage collection work.  A unit is intended to touch
- * roughly GC_RATE pages.  Every once in a while, we do more than that.
+ * roughly GC_rate pages.  Every once in a while, we do more than that.
  * This needs to be a fairly large number with our current incremental
  * GC strategy, since otherwise we allocate too much during GC, and the
  * cleanup gets expensive.
@@ -558,6 +577,7 @@ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
 #ifndef GC_RATE
 # define GC_RATE 10
 #endif
+
 #ifndef MAX_PRIOR_ATTEMPTS
 # define MAX_PRIOR_ATTEMPTS 1
 #endif
@@ -569,16 +589,45 @@ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some  */
                           /* that we have made.                         */
 
+STATIC int GC_rate = GC_RATE;
+
+GC_API void GC_CALL GC_set_rate(int value)
+{
+    GC_ASSERT(value > 0);
+    GC_rate = value;
+}
+
+GC_API int GC_CALL GC_get_rate(void)
+{
+    return GC_rate;
+}
+
+static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
+
+GC_API void GC_CALL GC_set_max_prior_attempts(int value)
+{
+    GC_ASSERT(value >= 0);
+    max_prior_attempts = value;
+}
+
+GC_API int GC_CALL GC_get_max_prior_attempts(void)
+{
+    return max_prior_attempts;
+}
+
 GC_INNER void GC_collect_a_little_inner(int n)
 {
     IF_CANCEL(int cancel_state;)
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (GC_dont_gc) return;
+
     DISABLE_CANCEL(cancel_state);
     if (GC_incremental && GC_collection_in_progress()) {
         int i;
+        int max_deficit = GC_rate * n;
 
-        for (i = GC_deficit; i < GC_RATE*n; i++) {
+        for (i = GC_deficit; i < max_deficit; i++) {
             if (GC_mark_some((ptr_t)0)) {
                 /* Need to finish a collection */
 #               ifdef SAVE_CALL_CHAIN
@@ -588,7 +637,7 @@ GC_INNER void GC_collect_a_little_inner(int n)
                     if (GC_parallel)
                       GC_wait_for_reclaim();
 #               endif
-                if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
+                if (GC_n_attempts < max_prior_attempts
                     && GC_time_limit != GC_TIME_UNLIMITED) {
 #                 ifndef NO_CLOCK
                     GET_TIME(GC_start_time);
@@ -606,8 +655,11 @@ GC_INNER void GC_collect_a_little_inner(int n)
                 break;
             }
         }
-        if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
-        if (GC_deficit < 0) GC_deficit = 0;
+        if (GC_deficit > 0) {
+            GC_deficit -= max_deficit;
+            if (GC_deficit < 0)
+                GC_deficit = 0;
+        }
     } else {
         GC_maybe_gc();
     }
@@ -653,7 +705,7 @@ GC_API int GC_CALL GC_collect_a_little(void)
 #endif
 
 /*
- * Assumes lock is held.  We stop the world and mark from all roots.
+ * We stop the world and mark from all roots.
  * If stop_func() ever returns TRUE, we may fail and return FALSE.
  * Increment GC_gc_no if we succeed.
  */
@@ -664,6 +716,7 @@ STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
       CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
 #   endif
 
+    GC_ASSERT(I_HOLD_LOCK());
 #   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
         GC_add_current_malloc_heap();
 #   endif
@@ -942,6 +995,7 @@ STATIC void GC_finish_collection(void)
       CLOCK_TYPE finalize_time = 0;
 #   endif
 
+    GC_ASSERT(I_HOLD_LOCK());
 #   if defined(GC_ASSERTIONS) \
        && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
         /* Check that we marked some of our own data.           */
@@ -988,10 +1042,6 @@ STATIC void GC_finish_collection(void)
 #   ifndef GC_NO_FINALIZATION
       GC_finalize();
 #   endif
-#   ifdef STUBBORN_ALLOC
-      GC_clean_changing_list();
-#   endif
-
 #   ifndef NO_CLOCK
       if (GC_print_stats)
         GET_TIME(finalize_time);
@@ -1456,7 +1506,6 @@ GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
  * Make sure the object free list for size gran (in granules) is not empty.
  * Return a pointer to the first object on the free list.
  * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
- * Assumes we hold the allocator lock.
  */
 GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
 {
@@ -1464,6 +1513,7 @@ GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
     GC_bool tried_minor = FALSE;
     GC_bool retry = FALSE;
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (gran == 0) return(0);
 
     while (*flh == 0) {

+ 121 - 57
blitz.mod/bdwgc/backgraph.c

@@ -181,30 +181,6 @@ GC_INLINE void pop_in_progress(ptr_t p GC_ATTR_UNUSED)
                 (ptr_t)GC_REVEAL_POINTER(((oh *)(p)) -> oh_bg_ptr)
 #define SET_OH_BG_PTR(p,q) (((oh *)(p)) -> oh_bg_ptr = GC_HIDE_POINTER(q))
 
-/* Execute s once for each predecessor q of p in the points-to graph.   */
-/* s should be a bracketed statement.  We declare q.                    */
-#define FOR_EACH_PRED(q, p, s) \
-  do { \
-    ptr_t q = GET_OH_BG_PTR(p); \
-    if (!((word)q & FLAG_MANY)) { \
-      if (q && !((word)q & 1)) s \
-              /* !((word)q & 1) checks for a misinterpreted freelist link */ \
-    } else { \
-      back_edges *orig_be_ = (back_edges *)((word)q & ~FLAG_MANY); \
-      back_edges *be_ = orig_be_; \
-      int local_; \
-      word total_; \
-      word n_edges_ = be_ -> n_edges; \
-      for (total_ = 0, local_ = 0; total_ < n_edges_; ++local_, ++total_) { \
-          if (local_ == MAX_IN) { \
-              be_ = be_ -> cont; \
-              local_ = 0; \
-          } \
-          q = be_ -> edges[local_]; s \
-      } \
-    } \
-  } while (0)
-
 /* Ensure that p has a back_edges structure associated with it. */
 static void ensure_struct(ptr_t p)
 {
@@ -230,7 +206,7 @@ static void ensure_struct(ptr_t p)
 /* q are pointers to the object base, i.e. pointers to an oh.           */
 static void add_edge(ptr_t p, ptr_t q)
 {
-    ptr_t old_back_ptr = GET_OH_BG_PTR(q);
+    ptr_t pred = GET_OH_BG_PTR(q);
     back_edges * be, *be_cont;
     word i;
 
@@ -240,7 +216,7 @@ static void add_edge(ptr_t p, ptr_t q)
       /* a pointer to a free list.  Don't overwrite it!                 */
       return;
     }
-    if (0 == old_back_ptr) {
+    if (NULL == pred) {
       static unsigned random_number = 13;
 #     define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
         /* A not very random number we use to occasionally allocate a   */
@@ -253,11 +229,37 @@ static void add_edge(ptr_t p, ptr_t q)
         if (GOT_LUCKY_NUMBER) ensure_struct(q);
         return;
     }
+
     /* Check whether it was already in the list of predecessors. */
-      FOR_EACH_PRED(pred, q, { if (p == pred) return; });
+    {
+      back_edges *e = (back_edges *)((word)pred & ~FLAG_MANY);
+      word n_edges;
+      word total;
+      int local = 0;
+
+      if (((word)pred & FLAG_MANY) != 0) {
+        n_edges = e -> n_edges;
+      } else if (pred != NULL && ((word)pred & 1) == 0) {
+        /* A misinterpreted freelist link.      */
+        n_edges = 1;
+        local = -1;
+      } else {
+        n_edges = 0;
+      }
+      for (total = 0; total < n_edges; ++total) {
+        if (local == MAX_IN) {
+          e = e -> cont;
+          local = 0;
+        }
+        if (local >= 0)
+          pred = e -> edges[local++];
+        if (pred == p)
+          return;
+      }
+    }
+
     ensure_struct(q);
-    old_back_ptr = GET_OH_BG_PTR(q);
-    be = (back_edges *)((word)old_back_ptr & ~FLAG_MANY);
+    be = (back_edges *)((word)GET_OH_BG_PTR(q) & ~FLAG_MANY);
     for (i = be -> n_edges, be_cont = be; i > MAX_IN; i -= MAX_IN)
         be_cont = be_cont -> cont;
     if (i == MAX_IN) {
@@ -357,6 +359,7 @@ static void add_back_edges(ptr_t p, size_t n_bytes, word gc_descr)
 /* Does not examine mark bits.  Can be called before GC.                */
 GC_INNER void GC_build_back_graph(void)
 {
+  GC_ASSERT(I_HOLD_LOCK());
   GC_apply_to_each_object(add_back_edges);
 }
 
@@ -366,39 +369,68 @@ GC_INNER void GC_build_back_graph(void)
 static word backwards_height(ptr_t p)
 {
   word result;
-  ptr_t back_ptr = GET_OH_BG_PTR(p);
+  ptr_t pred = GET_OH_BG_PTR(p);
   back_edges *be;
 
-  if (0 == back_ptr) return 1;
-  if (!((word)back_ptr & FLAG_MANY)) {
+  if (NULL == pred)
+    return 1;
+  if (((word)pred & FLAG_MANY) == 0) {
     if (is_in_progress(p)) return 0; /* DFS back edge, i.e. we followed */
                                      /* an edge to an object already    */
                                      /* on our stack: ignore            */
     push_in_progress(p);
-    result = backwards_height(back_ptr)+1;
+    result = backwards_height(pred) + 1;
     pop_in_progress(p);
     return result;
   }
-  be = (back_edges *)((word)back_ptr & ~FLAG_MANY);
+  be = (back_edges *)((word)pred & ~FLAG_MANY);
   if (be -> height >= 0 && be -> height_gc_no == (unsigned short)GC_gc_no)
       return be -> height;
   /* Ignore back edges in DFS */
     if (be -> height == HEIGHT_IN_PROGRESS) return 0;
   result = (be -> height > 0? be -> height : 1);
   be -> height = HEIGHT_IN_PROGRESS;
-  FOR_EACH_PRED(q, p, {
-    word this_height;
-    if (GC_is_marked(q) && !(FLAG_MANY & (word)GET_OH_BG_PTR(p))) {
-      GC_COND_LOG_PRINTF("Found bogus pointer from %p to %p\n",
-                         (void *)q, (void *)p);
-        /* Reachable object "points to" unreachable one.                */
-        /* Could be caused by our lax treatment of GC descriptors.      */
-      this_height = 1;
-    } else {
-        this_height = backwards_height(q);
-    }
-    if (this_height >= result) result = this_height + 1;
-  });
+
+  {
+      back_edges *e = be;
+      word n_edges;
+      word total;
+      int local = 0;
+
+      if (((word)pred & FLAG_MANY) != 0) {
+        n_edges = e -> n_edges;
+      } else if (pred != NULL && ((word)pred & 1) == 0) {
+        /* A misinterpreted freelist link.      */
+        n_edges = 1;
+        local = -1;
+      } else {
+        n_edges = 0;
+      }
+      for (total = 0; total < n_edges; ++total) {
+        word this_height;
+        if (local == MAX_IN) {
+          e = e -> cont;
+          local = 0;
+        }
+        if (local >= 0)
+          pred = e -> edges[local++];
+
+        /* Execute the following once for each predecessor pred of p    */
+        /* in the points-to graph.                                      */
+        if (GC_is_marked(pred) && ((word)GET_OH_BG_PTR(p) & FLAG_MANY) == 0) {
+          GC_COND_LOG_PRINTF("Found bogus pointer from %p to %p\n",
+                             (void *)pred, (void *)p);
+            /* Reachable object "points to" unreachable one.            */
+            /* Could be caused by our lax treatment of GC descriptors.  */
+          this_height = 1;
+        } else {
+          this_height = backwards_height(pred);
+        }
+        if (this_height >= result)
+          result = this_height + 1;
+      }
+  }
+
   be -> height = result;
   be -> height_gc_no = (unsigned short)GC_gc_no;
   return result;
@@ -430,17 +462,43 @@ static void update_max_height(ptr_t p, size_t n_bytes GC_ATTR_UNUSED,
       be = (back_edges *)((word)back_ptr & ~FLAG_MANY);
       if (be -> height != HEIGHT_UNKNOWN) p_height = be -> height;
     }
-    FOR_EACH_PRED(q, p, {
-      if (!GC_is_marked(q) && GC_HAS_DEBUG_INFO(q)) {
-        word q_height;
-
-        q_height = backwards_height(q);
-        if (q_height > p_height) {
-          p_height = q_height;
-          p_deepest_obj = q;
+
+    {
+      ptr_t pred = GET_OH_BG_PTR(p);
+      back_edges *e = (back_edges *)((word)pred & ~FLAG_MANY);
+      word n_edges;
+      word total;
+      int local = 0;
+
+      if (((word)pred & FLAG_MANY) != 0) {
+        n_edges = e -> n_edges;
+      } else if (pred != NULL && ((word)pred & 1) == 0) {
+        /* A misinterpreted freelist link.      */
+        n_edges = 1;
+        local = -1;
+      } else {
+        n_edges = 0;
+      }
+      for (total = 0; total < n_edges; ++total) {
+        if (local == MAX_IN) {
+          e = e -> cont;
+          local = 0;
+        }
+        if (local >= 0)
+          pred = e -> edges[local++];
+
+        /* Execute the following once for each predecessor pred of p    */
+        /* in the points-to graph.                                      */
+        if (!GC_is_marked(pred) && GC_HAS_DEBUG_INFO(pred)) {
+          word this_height = backwards_height(pred);
+          if (this_height > p_height) {
+            p_height = this_height;
+            p_deepest_obj = pred;
+          }
         }
       }
-    });
+    }
+
     if (p_height > 0) {
       /* Remember the height for next time. */
         if (be == 0) {
@@ -463,6 +521,7 @@ STATIC word GC_max_max_height = 0;
 
 GC_INNER void GC_traverse_back_graph(void)
 {
+  GC_ASSERT(I_HOLD_LOCK());
   GC_max_height = 0;
   GC_apply_to_each_object(update_max_height);
   if (0 != GC_deepest_obj)
@@ -471,14 +530,19 @@ GC_INNER void GC_traverse_back_graph(void)
 
 void GC_print_back_graph_stats(void)
 {
+  GC_ASSERT(I_HOLD_LOCK());
   GC_printf("Maximum backwards height of reachable objects at GC %lu is %lu\n",
             (unsigned long) GC_gc_no, (unsigned long)GC_max_height);
   if (GC_max_height > GC_max_max_height) {
+    ptr_t obj = GC_deepest_obj;
+
     GC_max_max_height = GC_max_height;
+    UNLOCK();
     GC_err_printf(
             "The following unreachable object is last in a longest chain "
             "of unreachable objects:\n");
-    GC_print_heap_obj(GC_deepest_obj);
+    GC_print_heap_obj(obj);
+    LOCK();
   }
   GC_COND_LOG_PRINTF("Needed max total of %d back-edge structs\n",
                      GC_n_back_edge_structs);

+ 2 - 2
blitz.mod/bdwgc/blacklst.c

@@ -263,7 +263,7 @@ struct hblk * GC_is_black_listed(struct hblk *h, word len)
 STATIC word GC_number_stack_black_listed(struct hblk *start,
                                          struct hblk *endp1)
 {
-    register struct hblk * h;
+    struct hblk * h;
     word result = 0;
 
     for (h = start; (word)h < (word)endp1; h++) {
@@ -277,7 +277,7 @@ STATIC word GC_number_stack_black_listed(struct hblk *start,
 /* Return the total number of (stack) black-listed bytes. */
 static word total_stack_black_listed(void)
 {
-    register unsigned i;
+    unsigned i;
     word total = 0;
 
     for (i = 0; i < GC_n_heap_sects; i++) {

+ 1 - 42
blitz.mod/bdwgc/checksums.c

@@ -17,9 +17,6 @@
 
 /* This is debugging code intended to verify the results of dirty bit   */
 /* computations. Works only in a single threaded environment.           */
-/* We assume that stubborn objects are changed only when they are       */
-/* enabled for writing.  (Certain kinds of writing are actually         */
-/* safe under other conditions.)                                        */
 #define NSUMS 10000
 
 #define OFFSET 0x10000
@@ -73,26 +70,8 @@ STATIC word GC_checksum(struct hblk *h)
     return(result | 0x80000000 /* doesn't look like pointer */);
 }
 
-#ifdef STUBBORN_ALLOC
-  /* Check whether a stubborn object from the given block appears on    */
-  /* the appropriate free list.                                         */
-  STATIC GC_bool GC_on_free_list(struct hblk *h)
-  {
-    hdr * hhdr = HDR(h);
-    word sz = BYTES_TO_WORDS(hhdr -> hb_sz);
-    ptr_t p;
-
-    if (sz > MAXOBJWORDS) return(FALSE);
-    for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
-        if (HBLKPTR(p) == h) return(TRUE);
-    }
-    return(FALSE);
-  }
-#endif
-
 int GC_n_dirty_errors = 0;
 int GC_n_faulted_dirty_errors = 0;
-int GC_n_changed_errors = 0;
 unsigned long GC_n_clean = 0;
 unsigned long GC_n_dirty = 0;
 
@@ -128,16 +107,6 @@ STATIC void GC_update_check_page(struct hblk *h, int index)
             /* Set breakpoint here */GC_n_dirty_errors++;
             if (was_faulted) GC_n_faulted_dirty_errors++;
         }
-#       ifdef STUBBORN_ALLOC
-          if (!HBLK_IS_FREE(hhdr)
-            && hhdr -> hb_obj_kind == STUBBORN
-            && !GC_page_was_changed(h)
-            && !GC_on_free_list(h)) {
-            /* if GC_on_free_list(h) then reclaim may have touched it   */
-            /* without any allocations taking place.                    */
-            /* Set breakpoint here */GC_n_changed_errors++;
-          }
-#       endif
     }
     pe -> new_valid = TRUE;
     pe -> block = h + OFFSET;
@@ -168,7 +137,7 @@ STATIC void GC_check_blocks(void)
     }
 }
 
-/* Should be called immediately after GC_read_dirty and GC_read_changed. */
+/* Should be called immediately after GC_read_dirty.    */
 void GC_check_dirty(void)
 {
     int index;
@@ -180,7 +149,6 @@ void GC_check_dirty(void)
 
     GC_n_dirty_errors = 0;
     GC_n_faulted_dirty_errors = 0;
-    GC_n_changed_errors = 0;
     GC_n_clean = 0;
     GC_n_dirty = 0;
 
@@ -201,15 +169,6 @@ out:
         GC_err_printf("Found %d dirty bit errors (%d were faulted)\n",
                       GC_n_dirty_errors, GC_n_faulted_dirty_errors);
     }
-    if (GC_n_changed_errors > 0) {
-        GC_err_printf("Found %d changed bit errors\n", GC_n_changed_errors);
-        GC_err_printf(
-                "These may be benign (provoked by nonpointer changes)\n");
-#       ifdef THREADS
-          GC_err_printf(
-            "Also expect 1 per thread currently allocating a stubborn obj\n");
-#       endif
-    }
     for (i = 0; i < GC_n_faulted; ++i) {
         GC_faulted[i] = 0; /* Don't expose block pointers to GC */
     }

+ 5 - 5
blitz.mod/bdwgc/darwin_stop_world.c

@@ -208,7 +208,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
       ABORT("thread_get_state failed");
 
 #   if defined(I386)
-      lo = (void *)state.THREAD_FLD(esp);
+      lo = (ptr_t)state.THREAD_FLD(esp);
 #     ifndef DARWIN_DONT_PARSE_STACK
         *phi = GC_FindTopOfStack(state.THREAD_FLD(esp));
 #     endif
@@ -221,7 +221,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
       GC_push_one(state.THREAD_FLD(ebp));
 
 #   elif defined(X86_64)
-      lo = (void *)state.THREAD_FLD(rsp);
+      lo = (ptr_t)state.THREAD_FLD(rsp);
 #     ifndef DARWIN_DONT_PARSE_STACK
         *phi = GC_FindTopOfStack(state.THREAD_FLD(rsp));
 #     endif
@@ -243,7 +243,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
       GC_push_one(state.THREAD_FLD(r15));
 
 #   elif defined(POWERPC)
-      lo = (void *)(state.THREAD_FLD(r1) - PPC_RED_ZONE_SIZE);
+      lo = (ptr_t)(state.THREAD_FLD(r1) - PPC_RED_ZONE_SIZE);
 #     ifndef DARWIN_DONT_PARSE_STACK
         *phi = GC_FindTopOfStack(state.THREAD_FLD(r1));
 #     endif
@@ -280,7 +280,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
       GC_push_one(state.THREAD_FLD(r31));
 
 #   elif defined(ARM32)
-      lo = (void *)state.THREAD_FLD(sp);
+      lo = (ptr_t)state.THREAD_FLD(sp);
 #     ifndef DARWIN_DONT_PARSE_STACK
         *phi = GC_FindTopOfStack(state.THREAD_FLD(r[7])); /* fp */
 #     endif
@@ -296,7 +296,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
       GC_push_one(state.THREAD_FLD(lr));
 
 #   elif defined(AARCH64)
-      lo = (void *)state.THREAD_FLD(sp);
+      lo = (ptr_t)state.THREAD_FLD(sp);
 #     ifndef DARWIN_DONT_PARSE_STACK
         *phi = GC_FindTopOfStack(state.THREAD_FLD(fp));
 #     endif

+ 75 - 175
blitz.mod/bdwgc/dbg_mlc.c

@@ -90,7 +90,12 @@
   GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest)
   {
     if (GC_HAS_DEBUG_INFO(dest)) {
-      ((oh *)dest) -> oh_back_ptr = HIDE_BACK_PTR(source);
+#     ifdef PARALLEL_MARK
+        AO_store((volatile AO_t *)&((oh *)dest)->oh_back_ptr,
+                 (AO_t)HIDE_BACK_PTR(source));
+#     else
+        ((oh *)dest) -> oh_back_ptr = HIDE_BACK_PTR(source);
+#     endif
     }
   }
 
@@ -119,7 +124,7 @@
       if (!hdr) ABORT("Invalid GC_get_back_ptr_info argument");
 #   endif
     if (!GC_HAS_DEBUG_INFO((ptr_t) hdr)) return GC_NO_SPACE;
-    bp = GC_REVEAL_POINTER(hdr -> oh_back_ptr);
+    bp = (ptr_t)GC_REVEAL_POINTER(hdr -> oh_back_ptr);
     if (MARKED_FOR_FINALIZATION == bp) return GC_FINALIZER_REFD;
     if (MARKED_FROM_REGISTER == bp) return GC_REFD_FROM_REG;
     if (NOT_MARKED == bp) return GC_UNREFERENCED;
@@ -190,7 +195,7 @@
     ptr_t result;
     ptr_t base;
     do {
-      result = GC_generate_random_heap_address();
+      result = (ptr_t)GC_generate_random_heap_address();
       base = (ptr_t)GC_base(result);
     } while (NULL == base || !GC_is_marked(base));
     return result;
@@ -205,7 +210,7 @@
     size_t offset;
     void *base;
 
-    GC_print_heap_obj(GC_base(current));
+    GC_print_heap_obj((ptr_t)GC_base(current));
 
     for (i = 0; ; ++i) {
       source = GC_get_back_ptr_info(current, &base, &offset);
@@ -231,7 +236,7 @@
         case GC_REFD_FROM_HEAP:
           GC_err_printf("offset %ld in object:\n", (long)offset);
           /* Take GC_base(base) to get real base, i.e. header. */
-          GC_print_heap_obj(GC_base(base));
+          GC_print_heap_obj((ptr_t)GC_base(base));
           break;
         default:
           GC_err_printf("INTERNAL ERROR: UNEXPECTED SOURCE!!!!\n");
@@ -267,13 +272,12 @@
 # define CROSSES_HBLK(p, sz) \
         (((word)((p) + sizeof(oh) + (sz) - 1) ^ (word)(p)) >= HBLKSIZE)
 
-/* Store debugging info into p.  Return displaced pointer.         */
-/* This version assumes we do hold the allocation lock.            */
-STATIC void *GC_store_debug_info_inner(void *p, word sz GC_ATTR_UNUSED,
-                                       const char *string, int linenum)
+GC_INNER void *GC_store_debug_info_inner(void *p, word sz GC_ATTR_UNUSED,
+                                         const char *string, int linenum)
 {
     word * result = (word *)((oh *)p + 1);
 
+    GC_ASSERT(I_HOLD_LOCK());
     GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
     GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK((ptr_t)p, sz)));
 #   ifdef KEEP_BACK_PTRS
@@ -293,14 +297,24 @@ STATIC void *GC_store_debug_info_inner(void *p, word sz GC_ATTR_UNUSED,
     return result;
 }
 
-GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string,
-                                   int linenum)
+/* Check the allocation is successful, store debugging info into p,     */
+/* start the debugging mode (if not yet), and return displaced pointer. */
+static void *store_debug_info(void *p, size_t lb,
+                              const char *fn, GC_EXTRA_PARAMS)
 {
-    ptr_t result;
+    void *result;
     DCL_LOCK_STATE;
 
+    if (NULL == p) {
+        GC_err_printf("%s(%lu) returning NULL (%s:%d)\n",
+                      fn, (unsigned long)lb, s, i);
+        return NULL;
+    }
     LOCK();
-    result = (ptr_t)GC_store_debug_info_inner(p, sz, string, linenum);
+    if (!GC_debugging_started)
+        GC_start_debugging_inner();
+    ADD_CALL_CHAIN(p, ra);
+    result = GC_store_debug_info_inner(p, (word)lb, s, i);
     UNLOCK();
     return result;
 }
@@ -392,9 +406,6 @@ STATIC void GC_print_obj(ptr_t p)
               kind_str = "ATOMIC_UNCOLLECTABLE";
               break;
 #         endif
-          case STUBBORN:
-            kind_str = "STUBBORN";
-            break;
           default:
             kind_str = NULL;
                 /* The alternative is to use snprintf(buffer) but it is */
@@ -480,20 +491,6 @@ GC_INNER void GC_start_debugging_inner(void)
   GC_register_displacement_inner((word)sizeof(oh));
 }
 
-#ifdef THREADS
-  STATIC void GC_start_debugging(void)
-  {
-    DCL_LOCK_STATE;
-
-    LOCK();
-    if (!GC_debugging_started)
-      GC_start_debugging_inner();
-    UNLOCK();
-  }
-#else
-# define GC_start_debugging GC_start_debugging_inner
-#endif /* !THREADS */
-
 size_t GC_debug_header_size = sizeof(oh);
 
 GC_API void GC_CALL GC_debug_register_displacement(size_t offset)
@@ -541,16 +538,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc(size_t lb,
         GC_caller_func_offset(ra, &s, &i);
       }
 #   endif
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc(%lu) returning NULL (%s:%d)\n",
-                      (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb, "GC_debug_malloc", OPT_RA s, i);
 }
 
 GC_API GC_ATTR_MALLOC void * GC_CALL
@@ -558,16 +546,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
 {
     void * result = GC_malloc_ignore_off_page(SIZET_SAT_ADD(lb, DEBUG_BYTES));
 
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_ignore_off_page(%lu)"
-                      " returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb, "GC_debug_malloc_ignore_off_page",
+                            OPT_RA s, i);
 }
 
 GC_API GC_ATTR_MALLOC void * GC_CALL
@@ -576,47 +556,30 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
     void * result = GC_malloc_atomic_ignore_off_page(
                                 SIZET_SAT_ADD(lb, DEBUG_BYTES));
 
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)"
-                      " returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb,
+                            "GC_debug_malloc_atomic_ignore_off_page",
+                            OPT_RA s, i);
 }
 
 STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS)
 {
     void * result = GC_generic_malloc(SIZET_SAT_ADD(lb, DEBUG_BYTES), knd);
 
-    if (NULL == result) {
-        GC_err_printf(
-                "GC_debug_generic_malloc(%lu, %d) returning NULL (%s:%d)\n",
-                (unsigned long)lb, knd, s, i);
-        return NULL;
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb, "GC_debug_generic_malloc",
+                            OPT_RA s, i);
 }
 
 #ifdef DBG_HDRS_ALL
   /* An allocation function for internal use.  Normally internally      */
   /* allocated objects do not have debug information.  But in this      */
   /* case, we need to make sure that all objects have debug headers.    */
-  /* We assume debugging was started in collector initialization, and   */
-  /* we already hold the GC lock.                                       */
   GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k)
   {
-    void * result = GC_generic_malloc_inner(
-                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+    void * result;
 
-    if (result == 0) {
+    GC_ASSERT(I_HOLD_LOCK());
+    result = GC_generic_malloc_inner(SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+    if (NULL == result) {
         GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n",
                        (unsigned long) lb);
         return(0);
@@ -631,10 +594,12 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS)
   GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
                                                                 int k)
   {
-    void * result = GC_generic_malloc_inner_ignore_off_page(
-                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+    void * result;
 
-    if (result == 0) {
+    GC_ASSERT(I_HOLD_LOCK());
+    result = GC_generic_malloc_inner_ignore_off_page(
+                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+    if (NULL == result) {
         GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n",
                        (unsigned long) lb);
         return(0);
@@ -647,85 +612,38 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int knd, GC_EXTRA_PARAMS)
   }
 #endif /* DBG_HDRS_ALL */
 
-#ifdef STUBBORN_ALLOC
-  GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_stubborn(size_t lb,
-                                                        GC_EXTRA_PARAMS)
-  {
-    void * result = GC_malloc_stubborn(SIZET_SAT_ADD(lb, DEBUG_BYTES));
-
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_stubborn(%lu)"
-                      " returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
-  }
-
-  GC_API void GC_CALL GC_debug_change_stubborn(const void *p)
-  {
-    const void * q = GC_base_C(p);
-    hdr * hhdr;
+GC_API void * GC_CALL GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
+{
+    return GC_debug_malloc(lb, OPT_RA s, i);
+}
 
-    if (q == 0) {
-        ABORT_ARG1("GC_debug_change_stubborn: bad arg", ": %p", p);
-    }
-    hhdr = HDR(q);
-    if (hhdr -> hb_obj_kind != STUBBORN) {
-        ABORT_ARG1("GC_debug_change_stubborn: arg not stubborn", ": %p", p);
-    }
-    GC_change_stubborn(q);
-  }
+GC_API void GC_CALL GC_debug_change_stubborn(
+                                const void * p GC_ATTR_UNUSED) {}
 
-  GC_API void GC_CALL GC_debug_end_stubborn_change(const void *p)
-  {
+GC_API void GC_CALL GC_debug_end_stubborn_change(const void *p)
+{
     const void * q = GC_base_C(p);
-    hdr * hhdr;
 
-    if (q == 0) {
+    if (NULL == q) {
         ABORT_ARG1("GC_debug_end_stubborn_change: bad arg", ": %p", p);
     }
-    hhdr = HDR(q);
-    if (hhdr -> hb_obj_kind != STUBBORN) {
-        ABORT_ARG1("GC_debug_end_stubborn_change: arg not stubborn",
-                   ": %p", p);
-    }
     GC_end_stubborn_change(q);
-  }
-
-#else /* !STUBBORN_ALLOC */
-
-  GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_stubborn(size_t lb,
-                                                        GC_EXTRA_PARAMS)
-  {
-    return GC_debug_malloc(lb, OPT_RA s, i);
-  }
-
-  GC_API void GC_CALL GC_debug_change_stubborn(
-                                const void * p GC_ATTR_UNUSED) {}
+}
 
-  GC_API void GC_CALL GC_debug_end_stubborn_change(
-                                const void * p GC_ATTR_UNUSED) {}
-#endif /* !STUBBORN_ALLOC */
+GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void *p, const void *q)
+{
+    *(void **)GC_is_visible(p) = GC_is_valid_displacement((void *)q);
+    GC_debug_end_stubborn_change(p);
+    REACHABLE_AFTER_DIRTY(q);
+}
 
 GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_atomic(size_t lb,
                                                             GC_EXTRA_PARAMS)
 {
     void * result = GC_malloc_atomic(SIZET_SAT_ADD(lb, DEBUG_BYTES));
 
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_atomic(%lu) returning NULL (%s:%d)\n",
-                      (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb, "GC_debug_malloc_atomic",
+                            OPT_RA s, i);
 }
 
 GC_API GC_ATTR_MALLOC char * GC_CALL GC_debug_strdup(const char *str,
@@ -796,16 +714,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_uncollectable(size_t lb,
     void * result = GC_malloc_uncollectable(
                                 SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES));
 
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_uncollectable(%lu)"
-                      " returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb, "GC_debug_malloc_uncollectable",
+                            OPT_RA s, i);
 }
 
 #ifdef GC_ATOMIC_UNCOLLECTABLE
@@ -815,16 +725,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_uncollectable(size_t lb,
     void * result = GC_malloc_atomic_uncollectable(
                                 SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES));
 
-    if (result == 0) {
-        GC_err_printf("GC_debug_malloc_atomic_uncollectable(%lu)"
-                      " returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
-        return(0);
-    }
-    if (!GC_debugging_started) {
-        GC_start_debugging();
-    }
-    ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    return store_debug_info(result, lb,
+                            "GC_debug_malloc_atomic_uncollectable",
+                            OPT_RA s, i);
   }
 #endif /* GC_ATOMIC_UNCOLLECTABLE */
 
@@ -896,11 +799,17 @@ GC_API void GC_CALL GC_debug_free(void * p)
         GC_free(base);
       } else {
         word i;
-        word obj_sz = BYTES_TO_WORDS(hhdr->hb_sz - sizeof(oh));
+        word sz = hhdr -> hb_sz;
+        word obj_sz = BYTES_TO_WORDS(sz - sizeof(oh));
 
         for (i = 0; i < obj_sz; ++i)
           ((word *)p)[i] = GC_FREED_MEM_MARKER;
-        GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
+        GC_ASSERT((word *)p + i == (word *)(base + sz));
+        /* Update the counter even though the real deallocation */
+        /* is deferred.                                         */
+        LOCK();
+        GC_bytes_freed += sz;
+        UNLOCK();
       }
     } /* !GC_find_leak */
 }
@@ -952,11 +861,6 @@ GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
     }
     hhdr = HDR(base);
     switch (hhdr -> hb_obj_kind) {
-#    ifdef STUBBORN_ALLOC
-      case STUBBORN:
-        result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
-        break;
-#    endif
       case NORMAL:
         result = GC_debug_malloc(lb, OPT_RA s, i);
         break;
@@ -994,10 +898,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
     GC_debug_generic_or_special_malloc(size_t lb, int knd, GC_EXTRA_PARAMS)
 {
     switch (knd) {
-#     ifdef STUBBORN_ALLOC
-        case STUBBORN:
-            return GC_debug_malloc_stubborn(lb, OPT_RA s, i);
-#     endif
         case PTRFREE:
             return GC_debug_malloc_atomic(lb, OPT_RA s, i);
         case NORMAL:

+ 60 - 31
blitz.mod/bdwgc/dyn_load.c

@@ -131,7 +131,10 @@ STATIC GC_has_static_roots_func GC_has_static_roots = 0;
       };
 #   endif
 # else
+    EXTERN_C_BEGIN      /* Workaround missing extern "C" around _DYNAMIC */
+                        /* symbol in link.h of some Linux hosts.         */
 #   include <link.h>
+    EXTERN_C_END
 # endif
 #endif
 
@@ -161,10 +164,13 @@ STATIC GC_has_static_roots_func GC_has_static_roots = 0;
 
 #if defined(SOLARISDL) && !defined(USE_PROC_FOR_LIBRARIES)
 
-STATIC struct link_map *
-GC_FirstDLOpenedLinkMap(void)
-{
-    extern ElfW(Dyn) _DYNAMIC;
+  EXTERN_C_BEGIN
+  extern ElfW(Dyn) _DYNAMIC;
+  EXTERN_C_END
+
+  STATIC struct link_map *
+  GC_FirstDLOpenedLinkMap(void)
+  {
     ElfW(Dyn) *dp;
     static struct link_map * cachedResult = 0;
     static ElfW(Dyn) *dynStructureAddr = 0;
@@ -202,7 +208,7 @@ GC_FirstDLOpenedLinkMap(void)
         }
     }
     return cachedResult;
-}
+  }
 
 #endif /* SOLARISDL ... */
 
@@ -240,11 +246,7 @@ GC_INNER void GC_register_dynamic_libraries(void)
               {
                 if( !(p->p_flags & PF_W) ) break;
                 start = ((char *)(p->p_vaddr)) + offset;
-                GC_add_roots_inner(
-                  start,
-                  start + p->p_memsz,
-                  TRUE
-                );
+                GC_add_roots_inner(start, start + p->p_memsz, TRUE);
               }
               break;
             default:
@@ -303,7 +305,7 @@ static void sort_heap_sects(struct HeapSect *base, size_t number_of_elements)
     }
 }
 
-STATIC word GC_register_map_entries(char *maps)
+STATIC void GC_register_map_entries(char *maps)
 {
     char *prot;
     char *buf_ptr = maps;
@@ -321,7 +323,8 @@ STATIC word GC_register_map_entries(char *maps)
     for (;;) {
         buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, &prot,
                                      &maj_dev, 0);
-        if (buf_ptr == NULL) return 1;
+        if (NULL == buf_ptr)
+            break;
         if (prot[1] == 'w') {
             /* This is a writable mapping.  Add it to           */
             /* the root set unless it is already otherwise      */
@@ -360,7 +363,7 @@ STATIC word GC_register_map_entries(char *maps)
             if ((word)end <= (word)least_ha
                 || (word)start >= (word)greatest_ha) {
               /* The easy case; just trace entire segment */
-              GC_add_roots_inner((char *)start, (char *)end, TRUE);
+              GC_add_roots_inner(start, end, TRUE);
               continue;
             }
             /* Add sections that don't belong to us. */
@@ -378,23 +381,29 @@ STATIC word GC_register_map_entries(char *maps)
                      && (word)GC_our_memory[i].hs_start < (word)end
                      && (word)start < (word)end) {
                   if ((word)start < (word)GC_our_memory[i].hs_start)
-                    GC_add_roots_inner((char *)start,
+                    GC_add_roots_inner(start,
                                        GC_our_memory[i].hs_start, TRUE);
                   start = GC_our_memory[i].hs_start
                           + GC_our_memory[i].hs_bytes;
                   ++i;
               }
               if ((word)start < (word)end)
-                  GC_add_roots_inner((char *)start, (char *)end, TRUE);
+                  GC_add_roots_inner(start, end, TRUE);
+        } else if (prot[0] == '-' && prot[1] == '-' && prot[2] == '-') {
+            /* Even roots added statically might disappear partially    */
+            /* (e.g. the roots added by INCLUDE_LINUX_THREAD_DESCR).    */
+            GC_remove_roots_subregion(start, end);
         }
     }
-    return 1;
 }
 
 GC_INNER void GC_register_dynamic_libraries(void)
 {
-    if (!GC_register_map_entries(GC_get_maps()))
+    char *maps = GC_get_maps();
+
+    if (NULL == maps)
         ABORT("Failed to read /proc for library registration");
+    GC_register_map_entries(maps);
 }
 
 /* We now take care of the main data segment ourselves: */
@@ -419,12 +428,16 @@ GC_INNER GC_bool GC_register_main_static_data(void)
 # endif
 # ifdef HOST_ANDROID
     /* Android headers might have no such definition for some targets.  */
-    int dl_iterate_phdr(int (*cb)(struct dl_phdr_info *, size_t, void *),
-                        void *data);
+    EXTERN_C_BEGIN
+    extern int dl_iterate_phdr(int (*cb)(struct dl_phdr_info *,
+                                         size_t, void *),
+                               void *data);
+    EXTERN_C_END
 # endif
 #endif /* __GLIBC__ >= 2 || HOST_ANDROID */
 
-#if (defined(FREEBSD) && __FreeBSD__ >= 7) || defined(__DragonFly__)
+#if defined(__DragonFly__) || defined(__FreeBSD_kernel__) \
+    || (defined(FREEBSD) && __FreeBSD__ >= 7)
   /* On the FreeBSD system, any target system at major version 7 shall   */
   /* have dl_iterate_phdr; therefore, we need not make it weak as below. */
 # ifndef HAVE_DL_ITERATE_PHDR
@@ -435,7 +448,9 @@ GC_INNER GC_bool GC_register_main_static_data(void)
   /* We have the header files for a glibc that includes dl_iterate_phdr.*/
   /* It may still not be available in the library on the target system. */
   /* Thus we also treat it as a weak symbol.                            */
+  EXTERN_C_BEGIN
 # pragma weak dl_iterate_phdr
+  EXTERN_C_END
 #endif
 
 #if defined(HAVE_DL_ITERATE_PHDR)
@@ -602,8 +617,7 @@ STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
       }
 #   endif
   } else {
-      char *datastart;
-      char *dataend;
+      ptr_t datastart, dataend;
 #     ifdef DATASTART_IS_FUNC
         static ptr_t datastart_cached = (ptr_t)(word)-1;
 
@@ -611,7 +625,7 @@ STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
         if (datastart_cached == (ptr_t)(word)-1) {
           datastart_cached = DATASTART;
         }
-        datastart = (char *)datastart_cached;
+        datastart = datastart_cached;
 #     else
         datastart = DATASTART;
 #     endif
@@ -622,7 +636,7 @@ STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
           if (dataend_cached == 0) {
             dataend_cached = DATAEND;
           }
-          dataend = (char *)dataend_cached;
+          dataend = dataend_cached;
         }
 #     else
         dataend = DATAEND;
@@ -682,10 +696,12 @@ STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
 
 #endif /* !HAVE_DL_ITERATE_PHDR */
 
+EXTERN_C_BEGIN
 #ifdef __GNUC__
 # pragma weak _DYNAMIC
 #endif
 extern ElfW(Dyn) _DYNAMIC[];
+EXTERN_C_END
 
 STATIC struct link_map *
 GC_FirstDLOpenedLinkMap(void)
@@ -875,7 +891,7 @@ GC_INNER void GC_register_dynamic_libraries(void)
                                         /* Known irrelevant map entries */
             static int n_irr = 0;
             struct stat buf;
-            register int j;
+            int j;
 
             for (j = 0; j < n_irr; j++) {
                 if (map_irr[j] == start) goto irrelevant;
@@ -1016,7 +1032,9 @@ GC_INNER void GC_register_dynamic_libraries(void)
             protect = buf.Protect;
             if (buf.State == MEM_COMMIT
                 && (protect == PAGE_EXECUTE_READWRITE
-                    || protect == PAGE_READWRITE)
+                    || protect == PAGE_EXECUTE_WRITECOPY
+                    || protect == PAGE_READWRITE
+                    || protect == PAGE_WRITECOPY)
                 && (buf.Type == MEM_IMAGE
 #                   ifdef GC_REGISTER_MEM_PRIVATE
                       || (protect == PAGE_READWRITE && buf.Type == MEM_PRIVATE)
@@ -1051,9 +1069,11 @@ GC_INNER void GC_register_dynamic_libraries(void)
 
 #include <loader.h>
 
+EXTERN_C_BEGIN
 extern char *sys_errlist[];
 extern int sys_nerr;
 extern int errno;
+EXTERN_C_END
 
 GC_INNER void GC_register_dynamic_libraries(void)
 {
@@ -1139,8 +1159,10 @@ GC_INNER void GC_register_dynamic_libraries(void)
 #include <errno.h>
 #include <dl.h>
 
+EXTERN_C_BEGIN
 extern char *sys_errlist[];
 extern int sys_nerr;
+EXTERN_C_END
 
 GC_INNER void GC_register_dynamic_libraries(void)
 {
@@ -1247,7 +1269,7 @@ GC_INNER void GC_register_dynamic_libraries(void)
 /*#define DARWIN_DEBUG*/
 
 /* Writable sections generally available on Darwin.     */
-STATIC const struct {
+STATIC const struct dyld_sections_s {
     const char *seg;
     const char *sect;
 } GC_dyld_sections[] = {
@@ -1355,8 +1377,10 @@ STATIC void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr,
     }
   }
 
-# ifdef DARWIN_DEBUG
+# if defined(DARWIN_DEBUG) && !defined(NO_DEBUGGING)
+    LOCK();
     GC_print_static_roots();
+    UNLOCK();
 # endif
 }
 
@@ -1367,6 +1391,9 @@ STATIC void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr,
   unsigned long start, end;
   unsigned i, j;
   const struct GC_MACH_SECTION *sec;
+# if defined(DARWIN_DEBUG) && !defined(NO_DEBUGGING)
+    DCL_LOCK_STATE;
+# endif
 
   for (i = 0; i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]); i++) {
     sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
@@ -1408,8 +1435,10 @@ STATIC void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr,
     }
   }
 
-# ifdef DARWIN_DEBUG
+# if defined(DARWIN_DEBUG) && !defined(NO_DEBUGGING)
+    LOCK();
     GC_print_static_roots();
+    UNLOCK();
 # endif
 }
 
@@ -1527,8 +1556,8 @@ GC_INNER GC_bool GC_register_main_static_data(void)
       for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
         if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
             == PCR_IL_SegFlags_Traced_on) {
-          GC_add_roots_inner((char *)(q -> ls_addr),
-                             (char *)(q -> ls_addr) + q -> ls_bytes, TRUE);
+          GC_add_roots_inner((ptr_t)q->ls_addr,
+                             (ptr_t)q->ls_addr + q->ls_bytes, TRUE);
         }
       }
     }

+ 149 - 91
blitz.mod/bdwgc/finalize.c

@@ -17,6 +17,7 @@
 #include "private/gc_pmark.h"
 
 #ifndef GC_NO_FINALIZATION
+# include "javaxfc.h" /* to get GC_finalize_all() as extern "C" */
 
 /* Type of mark procedure used for marking from finalizable object.     */
 /* This procedure normally does not mark the object, only its           */
@@ -71,33 +72,48 @@ struct finalizable_object {
 
 static signed_word log_fo_table_size = -1;
 
-STATIC struct {
+STATIC struct fnlz_roots_s {
   struct finalizable_object **fo_head;
   /* List of objects that should be finalized now: */
   struct finalizable_object *finalize_now;
 } GC_fnlz_roots = { NULL, NULL };
 
+#ifdef AO_HAVE_store
+  /* Update finalize_now atomically as GC_should_invoke_finalizers does */
+  /* not acquire the allocation lock.                                   */
+# define SET_FINALIZE_NOW(fo) \
+            AO_store((volatile AO_t *)&GC_fnlz_roots.finalize_now, (AO_t)(fo))
+#else
+# define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
+#endif /* !THREADS */
+
 GC_API void GC_CALL GC_push_finalizer_structures(void)
 {
-  GC_ASSERT((word)&GC_dl_hashtbl.head % sizeof(word) == 0);
-  GC_ASSERT((word)&GC_fnlz_roots % sizeof(word) == 0);
+  GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
+  GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
 # ifndef GC_LONG_REFS_NOT_NEEDED
-    GC_ASSERT((word)&GC_ll_hashtbl.head % sizeof(word) == 0);
+    GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
     GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
 # endif
   GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
   GC_PUSH_ALL_SYM(GC_fnlz_roots);
 }
 
-/* Double the size of a hash table. *size_ptr is the log of its current */
-/* size.  May be a no-op.                                               */
+/* Threshold of log_size to initiate full collection before growing     */
+/* a hash table.                                                        */
+#ifndef GC_ON_GROW_LOG_SIZE_MIN
+# define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
+#endif
+
+/* Double the size of a hash table. *log_size_ptr is the log of its     */
+/* current size.  May be a no-op.                                       */
 /* *table is a pointer to an array of hash headers.  If we succeed, we  */
 /* update both *table and *log_size_ptr.  Lock is held.                 */
 STATIC void GC_grow_table(struct hash_chain_entry ***table,
-                          signed_word *log_size_ptr)
+                          signed_word *log_size_ptr, word *entries_ptr)
 {
-    register word i;
-    register struct hash_chain_entry *p;
+    word i;
+    struct hash_chain_entry *p;
     signed_word log_old_size = *log_size_ptr;
     signed_word log_new_size = log_old_size + 1;
     word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
@@ -106,6 +122,19 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
     struct hash_chain_entry **new_table;
 
     GC_ASSERT(I_HOLD_LOCK());
+    /* Avoid growing the table in case of at least 25% of entries can   */
+    /* be deleted by enforcing a collection.  Ignored for small tables. */
+    if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN) {
+      IF_CANCEL(int cancel_state;)
+
+      DISABLE_CANCEL(cancel_state);
+      (void)GC_try_to_collect_inner(GC_never_stop_func);
+      RESTORE_CANCEL(cancel_state);
+      /* GC_finalize might decrease entries value.  */
+      if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
+        return;
+    }
+
     new_table = (struct hash_chain_entry **)
                     GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
                         (size_t)new_size * sizeof(struct hash_chain_entry *),
@@ -125,12 +154,14 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
         size_t new_hash = HASH3(real_key, new_size, log_new_size);
 
         p -> next = new_table[new_hash];
+        GC_dirty(p);
         new_table[new_hash] = p;
         p = next;
       }
     }
     *log_size_ptr = log_new_size;
     *table = new_table;
+    GC_dirty(new_table); /* entire object */
 }
 
 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
@@ -158,7 +189,7 @@ STATIC int GC_register_disappearing_link_inner(
     if (dl_hashtbl -> log_size == -1
         || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
         GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
-                      &dl_hashtbl -> log_size);
+                      &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
 #       ifdef LINT2
           if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
 #       endif
@@ -205,8 +236,10 @@ STATIC int GC_register_disappearing_link_inner(
     new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
     new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
     dl_set_next(new_dl, dl_hashtbl -> head[index]);
+    GC_dirty(new_dl);
     dl_hashtbl -> head[index] = new_dl;
     dl_hashtbl -> entries++;
+    GC_dirty(dl_hashtbl->head + index);
     UNLOCK();
     return GC_SUCCESS;
 }
@@ -227,7 +260,6 @@ GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
 #endif
 
 /* Unregisters given link and returns the link entry to free.   */
-/* Assume the lock is held.                                     */
 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
                                 struct dl_hashtbl_s *dl_hashtbl, void **link)
 {
@@ -235,6 +267,7 @@ GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
     struct disappearing_link *prev_dl = NULL;
     size_t index;
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (dl_hashtbl->log_size == -1)
         return NULL; /* prevent integer shift by a negative amount */
 
@@ -245,8 +278,10 @@ GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
             /* Remove found entry from the table. */
             if (NULL == prev_dl) {
                 dl_hashtbl -> head[index] = dl_next(curr_dl);
+                GC_dirty(dl_hashtbl->head + index);
             } else {
                 dl_set_next(prev_dl, dl_next(curr_dl));
+                GC_dirty(prev_dl);
             }
             dl_hashtbl -> entries--;
             break;
@@ -288,6 +323,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
   {
     int i;
     int new_size = 0;
+    GC_bool needs_barrier = FALSE;
 
     GC_ASSERT(I_HOLD_LOCK());
     for (i = 0; i < GC_toggleref_array_size; ++i) {
@@ -305,6 +341,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
         break;
       case GC_TOGGLE_REF_STRONG:
         GC_toggleref_arr[new_size++].strong_ref = obj;
+        needs_barrier = TRUE;
         break;
       case GC_TOGGLE_REF_WEAK:
         GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
@@ -319,6 +356,8 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
             (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
       GC_toggleref_array_size = new_size;
     }
+    if (needs_barrier)
+      GC_dirty(GC_toggleref_arr); /* entire object */
   }
 
   STATIC void GC_normal_finalize_mark_proc(ptr_t);
@@ -390,6 +429,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
   static GC_bool ensure_toggleref_capacity(int capacity_inc)
   {
     GC_ASSERT(capacity_inc >= 0);
+    GC_ASSERT(I_HOLD_LOCK());
     if (NULL == GC_toggleref_arr) {
       GC_toggleref_array_capacity = 32; /* initial capacity */
       GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
@@ -433,8 +473,11 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
       if (!ensure_toggleref_capacity(1)) {
         res = GC_NO_MEMORY;
       } else {
-        GC_toggleref_arr[GC_toggleref_array_size++].strong_ref =
+        GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
                         is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
+        if (is_strong_ref)
+          GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
+        GC_toggleref_array_size++;
       }
     }
     UNLOCK();
@@ -501,6 +544,7 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
     word curr_hidden_link;
     word new_hidden_link;
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (dl_hashtbl->log_size == -1)
         return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
 
@@ -539,10 +583,13 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
       dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
     } else {
       dl_set_next(prev_dl, dl_next(curr_dl));
+      GC_dirty(prev_dl);
     }
     curr_dl -> dl_hidden_link = new_hidden_link;
     dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
     dl_hashtbl -> head[new_index] = curr_dl;
+    GC_dirty(curr_dl);
+    GC_dirty(dl_hashtbl->head); /* entire object */
     return GC_SUCCESS;
   }
 
@@ -587,10 +634,8 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
 /* overflow is handled by the caller, and is not a disaster.            */
 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
 {
-    hdr * hhdr = HDR(p);
-
-    PUSH_OBJ(p, hhdr, GC_mark_stack_top,
-             &(GC_mark_stack[GC_mark_stack_size]));
+    GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
+                                    GC_mark_stack + GC_mark_stack_size);
 }
 
 /* This only pays very partial attention to the mark descriptor.        */
@@ -654,7 +699,7 @@ STATIC void GC_register_finalizer_inner(void * obj,
     if (log_fo_table_size == -1
         || GC_fo_entries > ((word)1 << log_fo_table_size)) {
         GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
-                      &log_fo_table_size);
+                      &log_fo_table_size, &GC_fo_entries);
 #       ifdef LINT2
           if (log_fo_table_size < 0) ABORT("log_size is negative");
 #       endif
@@ -681,6 +726,7 @@ STATIC void GC_register_finalizer_inner(void * obj,
             GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
           } else {
             fo_set_next(prev_fo, fo_next(curr_fo));
+            GC_dirty(prev_fo);
           }
           if (fn == 0) {
             GC_fo_entries--;
@@ -694,14 +740,18 @@ STATIC void GC_register_finalizer_inner(void * obj,
             curr_fo -> fo_fn = fn;
             curr_fo -> fo_client_data = (ptr_t)cd;
             curr_fo -> fo_mark_proc = mp;
+            GC_dirty(curr_fo);
             /* Reinsert it.  We deleted it first to maintain    */
             /* consistency in the event of a signal.            */
             if (prev_fo == 0) {
               GC_fnlz_roots.fo_head[index] = curr_fo;
             } else {
               fo_set_next(prev_fo, curr_fo);
+              GC_dirty(prev_fo);
             }
           }
+          if (NULL == prev_fo)
+            GC_dirty(GC_fnlz_roots.fo_head + index);
           UNLOCK();
 #         ifndef DBG_HDRS_ALL
             if (EXPECT(new_fo != 0, FALSE)) {
@@ -762,8 +812,10 @@ STATIC void GC_register_finalizer_inner(void * obj,
     new_fo -> fo_object_size = hhdr -> hb_sz;
     new_fo -> fo_mark_proc = mp;
     fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
+    GC_dirty(new_fo);
     GC_fo_entries++;
     GC_fnlz_roots.fo_head[index] = new_fo;
+    GC_dirty(GC_fnlz_roots.fo_head + index);
     UNLOCK();
 }
 
@@ -886,68 +938,53 @@ GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
   }
 #endif /* THREADS */
 
-#define ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr_dl, prev_dl) \
-  { \
-    size_t i; \
-    size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
-                                (size_t)1 << dl_hashtbl->log_size; \
-    for (i = 0; i < dl_size; i++) { \
-      struct disappearing_link *prev_dl = NULL; \
-      curr_dl = dl_hashtbl -> head[i]; \
-      while (curr_dl) {
-
-#define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
-        prev_dl = curr_dl; \
-        curr_dl = dl_next(curr_dl); \
-      } \
-    } \
-  }
-
-#define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
-  { \
-    next_dl = dl_next(curr_dl); \
-    if (NULL == prev_dl) { \
-        dl_hashtbl -> head[i] = next_dl; \
-    } else { \
-        dl_set_next(prev_dl, next_dl); \
-    } \
-    GC_clear_mark_bit(curr_dl); \
-    dl_hashtbl -> entries--; \
-    curr_dl = next_dl; \
-    continue; \
-  }
-
 GC_INLINE void GC_make_disappearing_links_disappear(
-                                struct dl_hashtbl_s* dl_hashtbl)
+                                        struct dl_hashtbl_s* dl_hashtbl,
+                                        GC_bool is_remove_dangling)
 {
-    struct disappearing_link *curr, *next;
+  size_t i;
+  size_t dl_size = dl_hashtbl->log_size == -1 ? 0
+                        : (size_t)1 << dl_hashtbl->log_size;
+  GC_bool needs_barrier = FALSE;
+
+  GC_ASSERT(I_HOLD_LOCK());
+  for (i = 0; i < dl_size; i++) {
+    struct disappearing_link *curr_dl, *next_dl;
+    struct disappearing_link *prev_dl = NULL;
 
-    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
-        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr->dl_hidden_obj);
-        ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr->dl_hidden_link);
+    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
+      next_dl = dl_next(curr_dl);
+      if (is_remove_dangling) {
+        ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
+                                                curr_dl->dl_hidden_link));
 
-        if (!GC_is_marked(real_ptr)) {
-            *(word *)real_link = 0;
-            GC_clear_mark_bit(curr);
-            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
+        if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
+          prev_dl = curr_dl;
+          continue;
         }
-    ITERATE_DL_HASHTBL_END(curr, prev)
-}
-
-GC_INLINE void GC_remove_dangling_disappearing_links(
-                                struct dl_hashtbl_s* dl_hashtbl)
-{
-    struct disappearing_link *curr, *next;
-
-    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
-        ptr_t real_link =
-                (ptr_t)GC_base(GC_REVEAL_POINTER(curr->dl_hidden_link));
-
-        if (NULL != real_link && !GC_is_marked(real_link)) {
-            GC_clear_mark_bit(curr);
-            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
+      } else {
+        if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
+                                        curr_dl->dl_hidden_obj)), TRUE)) {
+          prev_dl = curr_dl;
+          continue;
         }
-    ITERATE_DL_HASHTBL_END(curr, prev)
+        *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
+      }
+
+      /* Delete curr_dl entry from dl_hashtbl.  */
+      if (NULL == prev_dl) {
+        dl_hashtbl -> head[i] = next_dl;
+        needs_barrier = TRUE;
+      } else {
+        dl_set_next(prev_dl, next_dl);
+        GC_dirty(prev_dl);
+      }
+      GC_clear_mark_bit(curr_dl);
+      dl_hashtbl -> entries--;
+    }
+  }
+  if (needs_barrier)
+    GC_dirty(dl_hashtbl -> head); /* entire object */
 }
 
 /* Called with held lock (but the world is running).                    */
@@ -960,7 +997,9 @@ GC_INNER void GC_finalize(void)
     size_t i;
     size_t fo_size = log_fo_table_size == -1 ? 0 :
                                 (size_t)1 << log_fo_table_size;
+    GC_bool needs_barrier = FALSE;
 
+    GC_ASSERT(I_HOLD_LOCK());
 #   ifndef SMALL_CONFIG
       /* Save current GC_[dl/ll]_entries value for stats printing */
       GC_old_dl_entries = GC_dl_hashtbl.entries;
@@ -972,7 +1011,7 @@ GC_INNER void GC_finalize(void)
 #   ifndef GC_TOGGLE_REFS_NOT_NEEDED
       GC_mark_togglerefs();
 #   endif
-    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);
+    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
 
   /* Mark all objects reachable via chains of 1 or more pointers        */
   /* from finalizable objects.                                          */
@@ -1007,8 +1046,14 @@ GC_INNER void GC_finalize(void)
               next_fo = fo_next(curr_fo);
               if (NULL == prev_fo) {
                 GC_fnlz_roots.fo_head[i] = next_fo;
+                if (GC_object_finalized_proc) {
+                  GC_dirty(GC_fnlz_roots.fo_head + i);
+                } else {
+                  needs_barrier = TRUE;
+                }
               } else {
                 fo_set_next(prev_fo, next_fo);
+                GC_dirty(prev_fo);
               }
               GC_fo_entries--;
               if (GC_object_finalized_proc)
@@ -1016,7 +1061,8 @@ GC_INNER void GC_finalize(void)
 
             /* Add to list of objects awaiting finalization.    */
               fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
-              GC_fnlz_roots.finalize_now = curr_fo;
+              GC_dirty(curr_fo);
+              SET_FINALIZE_NOW(curr_fo);
               /* unhide object pointer so any future collections will   */
               /* see it.                                                */
               curr_fo -> fo_hidden_base =
@@ -1066,9 +1112,10 @@ GC_INNER void GC_finalize(void)
               GC_set_mark_bit(real_ptr);
             } else {
               if (NULL == prev_fo) {
-                GC_fnlz_roots.finalize_now = next_fo;
+                SET_FINALIZE_NOW(next_fo);
               } else {
                 fo_set_next(prev_fo, next_fo);
+                GC_dirty(prev_fo);
               }
               curr_fo -> fo_hidden_base =
                                 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
@@ -1077,9 +1124,11 @@ GC_INNER void GC_finalize(void)
 
               i = HASH2(real_ptr, log_fo_table_size);
               fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
+              GC_dirty(curr_fo);
               GC_fo_entries++;
               GC_fnlz_roots.fo_head[i] = curr_fo;
               curr_fo = prev_fo;
+              needs_barrier = TRUE;
             }
           }
           prev_fo = curr_fo;
@@ -1087,14 +1136,18 @@ GC_INNER void GC_finalize(void)
         }
       }
   }
+  if (needs_barrier)
+    GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
+
+  /* Remove dangling disappearing links. */
+  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
 
-  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
     GC_clear_togglerefs();
 # endif
 # ifndef GC_LONG_REFS_NOT_NEEDED
-    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
-    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
+    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
+    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
 # endif
 
   if (GC_fail_count) {
@@ -1110,21 +1163,23 @@ GC_INNER void GC_finalize(void)
 
 #ifndef JAVA_FINALIZATION_NOT_NEEDED
 
-  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
+  /* Enqueue all remaining finalizers to be run.        */
   STATIC void GC_enqueue_all_finalizers(void)
   {
-    struct finalizable_object * curr_fo, * next_fo;
-    ptr_t real_ptr;
+    struct finalizable_object * next_fo;
     int i;
     int fo_size;
 
+    GC_ASSERT(I_HOLD_LOCK());
     fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
     GC_bytes_finalized = 0;
     for (i = 0; i < fo_size; i++) {
-      curr_fo = GC_fnlz_roots.fo_head[i];
+      struct finalizable_object * curr_fo = GC_fnlz_roots.fo_head[i];
+
       GC_fnlz_roots.fo_head[i] = NULL;
       while (curr_fo != NULL) {
-          real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
+          ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
+
           GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
           GC_set_mark_bit(real_ptr);
 
@@ -1132,7 +1187,8 @@ GC_INNER void GC_finalize(void)
 
           /* Add to list of objects awaiting finalization.      */
           fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
-          GC_fnlz_roots.finalize_now = curr_fo;
+          GC_dirty(curr_fo);
+          SET_FINALIZE_NOW(curr_fo);
 
           /* unhide object pointer so any future collections will       */
           /* see it.                                                    */
@@ -1141,7 +1197,7 @@ GC_INNER void GC_finalize(void)
           GC_bytes_finalized +=
                 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
           curr_fo = next_fo;
-        }
+      }
     }
     GC_fo_entries = 0;  /* all entries deleted from the hash table */
   }
@@ -1183,10 +1239,13 @@ GC_INNER void GC_finalize(void)
 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
 /* finalizers can only be called from some kind of "safe state" and     */
 /* getting into that safe state is expensive.)                          */
-GC_ATTR_NO_SANITIZE_THREAD
 GC_API int GC_CALL GC_should_invoke_finalizers(void)
 {
-  return GC_fnlz_roots.finalize_now != NULL;
+# ifdef AO_HAVE_load
+    return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0;
+# else
+    return GC_fnlz_roots.finalize_now != NULL;
+# endif /* !THREADS */
 }
 
 /* Invoke finalizers for all objects that are ready to be finalized.    */
@@ -1209,7 +1268,8 @@ GC_API int GC_CALL GC_invoke_finalizers(void)
         }
         curr_fo = GC_fnlz_roots.finalize_now;
 #       ifdef THREADS
-            if (curr_fo != 0) GC_fnlz_roots.finalize_now = fo_next(curr_fo);
+            if (curr_fo != NULL)
+                SET_FINALIZE_NOW(fo_next(curr_fo));
             UNLOCK();
             if (curr_fo == 0) break;
 #       else
@@ -1280,9 +1340,7 @@ GC_INNER void GC_notify_or_invoke_finalizers(void)
 #       endif
 #       ifdef MAKE_BACK_GRAPH
           if (GC_print_back_height) {
-            UNLOCK();
             GC_print_back_graph_stats();
-            LOCK();
           }
 #       endif
       }

+ 99 - 0
blitz.mod/bdwgc/gc_cpp.cc

@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this code for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+/*************************************************************************
+This implementation module for gc_c++.h provides an implementation of
+the global operators "new" and "delete" that calls the Boehm
+allocator.  All objects allocated by this implementation will be
+uncollectible but part of the root set of the collector.
+
+You should ensure (using implementation-dependent techniques) that the
+linker finds this module before the library that defines the default
+built-in "new" and "delete".
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifndef GC_BUILD
+# define GC_BUILD
+#endif
+
+#define GC_DONT_INCL_WINDOWS_H
+#include "gc.h"
+
+#include <new> // for bad_alloc, precedes include of gc_cpp.h
+
+#include "gc_cpp.h" // for GC_OPERATOR_NEW_ARRAY, GC_NOEXCEPT
+
+#if defined(GC_NEW_ABORTS_ON_OOM) || defined(_LIBCPP_NO_EXCEPTIONS)
+# define GC_ALLOCATOR_THROW_OR_ABORT() GC_abort_on_oom()
+#else
+# define GC_ALLOCATOR_THROW_OR_ABORT() throw std::bad_alloc()
+#endif
+
+GC_API void GC_CALL GC_throw_bad_alloc() {
+  GC_ALLOCATOR_THROW_OR_ABORT();
+}
+
+#if !defined(_MSC_VER) && !defined(__DMC__)
+
+# if !defined(GC_NEW_DELETE_THROW_NOT_NEEDED) \
+    && !defined(GC_NEW_DELETE_NEED_THROW) && GC_GNUC_PREREQ(4, 2) \
+    && (__cplusplus < 201103L || defined(__clang__))
+#   define GC_NEW_DELETE_NEED_THROW
+# endif
+
+# ifdef GC_NEW_DELETE_NEED_THROW
+#   define GC_DECL_NEW_THROW throw(std::bad_alloc)
+# else
+#   define GC_DECL_NEW_THROW /* empty */
+# endif
+
+  void* operator new(size_t size) GC_DECL_NEW_THROW {
+    void* obj = GC_MALLOC_UNCOLLECTABLE(size);
+    if (0 == obj)
+      GC_ALLOCATOR_THROW_OR_ABORT();
+    return obj;
+  }
+
+  void operator delete(void* obj) GC_NOEXCEPT {
+    GC_FREE(obj);
+  }
+
+# if defined(GC_OPERATOR_NEW_ARRAY) && !defined(CPPCHECK)
+    void* operator new[](size_t size) GC_DECL_NEW_THROW {
+      void* obj = GC_MALLOC_UNCOLLECTABLE(size);
+      if (0 == obj)
+        GC_ALLOCATOR_THROW_OR_ABORT();
+      return obj;
+    }
+
+    void operator delete[](void* obj) GC_NOEXCEPT {
+      GC_FREE(obj);
+    }
+# endif // GC_OPERATOR_NEW_ARRAY
+
+# if __cplusplus > 201103L // C++14
+    void operator delete(void* obj, size_t size) GC_NOEXCEPT {
+      (void)size; // size is ignored
+      GC_FREE(obj);
+    }
+
+#   if defined(GC_OPERATOR_NEW_ARRAY) && !defined(CPPCHECK)
+      void operator delete[](void* obj, size_t size) GC_NOEXCEPT {
+        (void)size;
+        GC_FREE(obj);
+      }
+#   endif
+# endif // C++14
+
+#endif // !_MSC_VER

+ 2 - 0
blitz.mod/bdwgc/gc_cpp.cpp

@@ -0,0 +1,2 @@
+// Visual C++ seems to prefer a .cpp extension to .cc
+#include "gc_cpp.cc"

+ 22 - 12
blitz.mod/bdwgc/gcj_mlc.c

@@ -67,7 +67,9 @@ STATIC struct GC_ms_entry * GC_gcj_fake_mark_proc(word * addr GC_ATTR_UNUSED,
 GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
                                        void * /* really GC_mark_proc */mp)
 {
-    GC_bool ignore_gcj_info;
+#   ifndef GC_IGNORE_GCJ_INFO
+      GC_bool ignore_gcj_info;
+#   endif
     DCL_LOCK_STATE;
 
     if (mp == 0)        /* In case GC_DS_PROC is unused.        */
@@ -82,7 +84,7 @@ GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
     GC_gcj_malloc_initialized = TRUE;
 #   ifdef GC_IGNORE_GCJ_INFO
       /* This is useful for debugging on platforms with missing getenv(). */
-      ignore_gcj_info = 1;
+#     define ignore_gcj_info TRUE
 #   else
       ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO"));
 #   endif
@@ -119,6 +121,7 @@ GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
                                 FALSE, TRUE);
       }
     UNLOCK();
+#   undef ignore_gcj_info
 }
 
 #define GENERAL_MALLOC_INNER(lb,k) \
@@ -163,9 +166,10 @@ static void maybe_finalize(void)
 
     GC_DBG_COLLECT_AT_MALLOC(lb);
     if(SMALL_OBJ(lb)) {
-        word lg = GC_size_map[lb];
+        word lg;
 
         LOCK();
+        lg = GC_size_map[lb];
         op = GC_gcjobjfreelist[lg];
         if(EXPECT(0 == op, FALSE)) {
             maybe_finalize();
@@ -179,9 +183,7 @@ static void maybe_finalize(void)
             GC_gcjobjfreelist[lg] = (ptr_t)obj_link(op);
             GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
         }
-        *(void **)op = ptr_to_struct_containing_descr;
         GC_ASSERT(((void **)op)[1] == 0);
-        UNLOCK();
     } else {
         LOCK();
         maybe_finalize();
@@ -191,10 +193,12 @@ static void maybe_finalize(void)
             UNLOCK();
             return((*oom_fn)(lb));
         }
-        *(void **)op = ptr_to_struct_containing_descr;
-        UNLOCK();
     }
-    return((void *) op);
+    *(void **)op = ptr_to_struct_containing_descr;
+    UNLOCK();
+    GC_dirty(op);
+    REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
+    return (void *)op;
 }
 
 /* Similar to GC_gcj_malloc, but add debug info.  This is allocated     */
@@ -222,9 +226,12 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_gcj_malloc(size_t lb,
     if (!GC_debugging_started) {
         GC_start_debugging_inner();
     }
-    UNLOCK();
     ADD_CALL_CHAIN(result, ra);
-    return GC_store_debug_info((ptr_t)result, (word)lb, s, i);
+    result = GC_store_debug_info_inner(result, (word)lb, s, i);
+    UNLOCK();
+    GC_dirty(result);
+    REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
+    return result;
 }
 
 /* There is no THREAD_LOCAL_ALLOC for GC_gcj_malloc_ignore_off_page().  */
@@ -236,9 +243,10 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
 
     GC_DBG_COLLECT_AT_MALLOC(lb);
     if(SMALL_OBJ(lb)) {
-        word lg = GC_size_map[lb];
+        word lg;
 
         LOCK();
+        lg = GC_size_map[lb];
         op = GC_gcjobjfreelist[lg];
         if (EXPECT(0 == op, FALSE)) {
             maybe_finalize();
@@ -264,7 +272,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
     }
     *(void **)op = ptr_to_struct_containing_descr;
     UNLOCK();
-    return((void *) op);
+    GC_dirty(op);
+    REACHABLE_AFTER_DIRTY(ptr_to_struct_containing_descr);
+    return (void *)op;
 }
 
 #endif  /* GC_GCJ_SUPPORT */

+ 40 - 31
blitz.mod/bdwgc/headers.c

@@ -25,12 +25,12 @@
  */
 
 STATIC bottom_index * GC_all_bottom_indices = 0;
-                                /* Pointer to first (lowest addr) */
-                                /* bottom_index.                  */
+                        /* Pointer to the first (lowest address)        */
+                        /* bottom_index.  Assumes the lock is held.     */
 
 STATIC bottom_index * GC_all_bottom_indices_end = 0;
-                                /* Pointer to last (highest addr) */
-                                /* bottom_index.                  */
+                        /* Pointer to the last (highest address)        */
+                        /* bottom_index.  Assumes the lock is held.     */
 
 /* Non-macro version of header location routine */
 GC_INNER hdr * GC_find_header(ptr_t h)
@@ -167,9 +167,9 @@ static hdr * hdr_free_list = 0;
 /* Return an uninitialized header */
 static hdr * alloc_hdr(void)
 {
-    register hdr * result;
+    hdr * result;
 
-    if (hdr_free_list == 0) {
+    if (NULL == hdr_free_list) {
         result = (hdr *)GC_scratch_alloc(sizeof(hdr));
     } else {
         result = hdr_free_list;
@@ -192,7 +192,7 @@ GC_INLINE void free_hdr(hdr * hhdr)
 
 GC_INNER void GC_init_headers(void)
 {
-    register unsigned i;
+    unsigned i;
 
     GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
     if (GC_all_nils == NULL) {
@@ -205,7 +205,7 @@ GC_INNER void GC_init_headers(void)
     }
 }
 
-/* Make sure that there is a bottom level index block for address addr  */
+/* Make sure that there is a bottom level index block for address addr. */
 /* Return FALSE on failure.                                             */
 static GC_bool get_index(word addr)
 {
@@ -213,30 +213,32 @@ static GC_bool get_index(word addr)
     bottom_index * r;
     bottom_index * p;
     bottom_index ** prev;
-    bottom_index *pi;
+    bottom_index *pi; /* old_p */
+    word i;
 
+    GC_ASSERT(I_HOLD_LOCK());
 #   ifdef HASH_TL
-      word i = TL_HASH(hi);
-      bottom_index * old;
+      i = TL_HASH(hi);
 
-      old = p = GC_top_index[i];
+      pi = p = GC_top_index[i];
       while(p != GC_all_nils) {
           if (p -> key == hi) return(TRUE);
           p = p -> hash_link;
       }
-      r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
-      if (r == 0) return(FALSE);
-      BZERO(r, sizeof (bottom_index));
-      r -> hash_link = old;
-      GC_top_index[i] = r;
 #   else
-      if (GC_top_index[hi] != GC_all_nils) return(TRUE);
-      r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
-      if (r == 0) return(FALSE);
-      GC_top_index[hi] = r;
-      BZERO(r, sizeof (bottom_index));
+      if (GC_top_index[hi] != GC_all_nils)
+        return TRUE;
+      i = hi;
 #   endif
+    r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
+    if (EXPECT(NULL == r, FALSE))
+      return FALSE;
+    BZERO(r, sizeof(bottom_index));
     r -> key = hi;
+#   ifdef HASH_TL
+      r -> hash_link = pi;
+#   endif
+
     /* Add it to the list of bottom indices */
       prev = &GC_all_bottom_indices;    /* pointer to p */
       pi = 0;                           /* bottom_index preceding p */
@@ -252,6 +254,8 @@ static GC_bool get_index(word addr)
       }
       r -> asc_link = p;
       *prev = r;
+
+      GC_top_index[i] = r;
     return(TRUE);
 }
 
@@ -302,14 +306,16 @@ GC_INNER void GC_remove_header(struct hblk *h)
 /* Remove forwarding counts for h */
 GC_INNER void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
 {
-    register struct hblk * hbp;
+    struct hblk * hbp;
+
     for (hbp = h+1; (word)hbp < (word)h + sz; hbp += 1) {
         SET_HDR(hbp, 0);
     }
 }
 
-/* Apply fn to all allocated blocks */
-/*VARARGS1*/
+/* Apply fn to all allocated blocks.  It is the caller responsibility   */
+/* to avoid data race during the function execution (e.g. by holding    */
+/* the allocation lock).                                                */
 void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
                             word client_data)
 {
@@ -340,12 +346,14 @@ void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
 /* Return 0 if there is none.                           */
 GC_INNER struct hblk * GC_next_used_block(struct hblk *h)
 {
-    register bottom_index * bi;
-    register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+    REGISTER bottom_index * bi;
+    REGISTER word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
 
+    GC_ASSERT(I_HOLD_LOCK());
     GET_BI(h, bi);
     if (bi == GC_all_nils) {
-        register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+        REGISTER word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+
         bi = GC_all_bottom_indices;
         while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
         j = 0;
@@ -376,12 +384,13 @@ GC_INNER struct hblk * GC_next_used_block(struct hblk *h)
 /* Unlike the above, this may return a free block.              */
 GC_INNER struct hblk * GC_prev_block(struct hblk *h)
 {
-    register bottom_index * bi;
-    register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+    bottom_index * bi;
+    signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
 
+    GC_ASSERT(I_HOLD_LOCK());
     GET_BI(h, bi);
     if (bi == GC_all_nils) {
-        register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+        word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
         bi = GC_all_bottom_indices_end;
         while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
         j = BOTTOM_SZ - 1;

+ 11 - 0
blitz.mod/bdwgc/ia64_save_regs_in_stack.s

@@ -0,0 +1,11 @@
+        .text
+        .align 16
+        .global GC_save_regs_in_stack
+        .proc GC_save_regs_in_stack
+GC_save_regs_in_stack:
+        .body
+        flushrs
+        ;;
+        mov r8=ar.bsp
+        br.ret.sptk.few rp
+        .endp GC_save_regs_in_stack

+ 1 - 1
blitz.mod/bdwgc/include/ec.h

@@ -47,7 +47,7 @@ typedef struct CORD_ec_struct {
 /* This structure represents the concatenation of ec_cord with  */
 /* ec_buf[0 ... (ec_bufptr-ec_buf-1)]                           */
 
-/* Flush the buffer part of the extended chord into ec_cord.    */
+/* Flush the buffer part of the extended cord into ec_cord.     */
 /* Note that this is almost the only real function, and it is   */
 /* implemented in 6 lines in cordxtra.c                         */
 void CORD_ec_flush_buf(CORD_ec x);

+ 70 - 43
blitz.mod/bdwgc/include/gc.h

@@ -427,6 +427,22 @@ GC_API void GC_CALL GC_set_pages_executable(int);
 /* use or need synchronization (i.e. acquiring the allocator lock).     */
 GC_API int GC_CALL GC_get_pages_executable(void);
 
+/* The setter and getter of the minimum value returned by the internal  */
+/* min_bytes_allocd().  The value should not be zero; the default value */
+/* is one.  Not synchronized.                                           */
+GC_API void GC_CALL GC_set_min_bytes_allocd(size_t);
+GC_API size_t GC_CALL GC_get_min_bytes_allocd(void);
+
+/* Set/get the size in pages of units operated by GC_collect_a_little.  */
+/* The value should not be zero.  Not synchronized.                     */
+GC_API void GC_CALL GC_set_rate(int);
+GC_API int GC_CALL GC_get_rate(void);
+
+/* Set/get the maximum number of prior attempts at the world-stop       */
+/* marking.  Not synchronized.                                          */
+GC_API void GC_CALL GC_set_max_prior_attempts(int);
+GC_API int GC_CALL GC_get_max_prior_attempts(void);
+
 /* Overrides the default handle-fork mode.  Non-zero value means GC     */
 /* should install proper pthread_atfork handlers.  Has effect only if   */
 /* called before GC_INIT.  Clients should invoke GC_set_handle_fork     */
@@ -468,16 +484,12 @@ GC_API void GC_CALL GC_deinit(void);
 /* General purpose allocation routines, with roughly malloc calling     */
 /* conv.  The atomic versions promise that no relevant pointers are     */
 /* contained in the object.  The non-atomic versions guarantee that the */
-/* new object is cleared.  GC_malloc_stubborn promises that no changes  */
-/* to the object will occur after GC_end_stubborn_change has been       */
-/* called on the result of GC_malloc_stubborn.  GC_malloc_uncollectable */
-/* allocates an object that is scanned for pointers to collectible      */
+/* new object is cleared.  GC_malloc_uncollectable allocates            */
+/* an object that is scanned for pointers to collectible                */
 /* objects, but is not itself collectible.  The object is scanned even  */
 /* if it does not appear to be reachable.  GC_malloc_uncollectable and  */
 /* GC_free called on the resulting object implicitly update             */
 /* GC_non_gc_bytes appropriately.                                       */
-/* Note that the GC_malloc_stubborn support doesn't really exist        */
-/* anymore.  MANUAL_VDB provides comparable functionality.              */
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
         GC_malloc(size_t /* size_in_bytes */);
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
@@ -487,8 +499,7 @@ GC_API GC_ATTR_MALLOC char * GC_CALL
         GC_strndup(const char *, size_t) GC_ATTR_NONNULL(1);
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
         GC_malloc_uncollectable(size_t /* size_in_bytes */);
-GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
-        GC_malloc_stubborn(size_t /* size_in_bytes */);
+GC_API GC_ATTR_DEPRECATED void * GC_CALL GC_malloc_stubborn(size_t);
 
 /* GC_memalign() is not well tested.                                    */
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
@@ -498,27 +509,25 @@ GC_API int GC_CALL GC_posix_memalign(void ** /* memptr */, size_t /* align */,
 
 /* Explicitly deallocate an object.  Dangerous if used incorrectly.     */
 /* Requires a pointer to the base of an object.                         */
-/* If the argument is stubborn, it should not be changeable when freed. */
 /* An object should not be enabled for finalization (and it should not  */
 /* contain registered disappearing links of any kind) when it is        */
 /* explicitly deallocated.                                              */
 /* GC_free(0) is a no-op, as required by ANSI C for free.               */
 GC_API void GC_CALL GC_free(void *);
 
-/* Stubborn objects may be changed only if the collector is explicitly  */
-/* informed.  The collector is implicitly informed of coming change     */
-/* when such an object is first allocated.  The following routines      */
-/* inform the collector that an object will no longer be changed, or    */
-/* that it will once again be changed.  Only non-NULL pointer stores    */
-/* into the object are considered to be changes.  The argument to       */
-/* GC_end_stubborn_change must be exactly the value returned by         */
-/* GC_malloc_stubborn or passed to GC_change_stubborn.  (In the second  */
-/* case, it may be an interior pointer within 512 bytes of the          */
-/* beginning of the objects.)  There is a performance penalty for       */
-/* allowing more than one stubborn object to be changed at once, but it */
-/* is acceptable to do so.  The same applies to dropping stubborn       */
-/* objects that are still changeable.                                   */
-GC_API void GC_CALL GC_change_stubborn(const void *) GC_ATTR_NONNULL(1);
+/* The "stubborn" objects allocation is not supported anymore.  Exists  */
+/* only for the backward compatibility.                                 */
+#define GC_MALLOC_STUBBORN(sz)  GC_MALLOC(sz)
+#define GC_NEW_STUBBORN(t)      GC_NEW(t)
+#define GC_CHANGE_STUBBORN(p)   GC_change_stubborn(p)
+GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void *);
+
+/* Inform the collector that the object has been changed.               */
+/* Only non-NULL pointer stores into the object are considered to be    */
+/* changes.  Matters only if the library has been compiled with         */
+/* MANUAL_VDB defined (otherwise the function does nothing).            */
+/* Should be followed typically by GC_reachable_here called for each    */
+/* of the stored pointers.                                              */
 GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1);
 
 /* Return a pointer to the base (lowest address) of an object given     */
@@ -550,7 +559,6 @@ GC_API size_t GC_CALL GC_size(const void * /* obj_addr */) GC_ATTR_NONNULL(1);
 /* or with the standard C library, your code is broken.  In my          */
 /* opinion, it shouldn't have been invented, but now we're stuck. -HB   */
 /* The resulting object has the same kind as the original.              */
-/* If the argument is stubborn, the result will have changes enabled.   */
 /* It is an error to have changes enabled for the original object.      */
 /* It does not change the content of the object from its beginning to   */
 /* the minimum of old size and new_size_in_bytes; the content above in  */
@@ -773,7 +781,9 @@ GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *,
 /* Get the element value (converted to bytes) at a given index of       */
 /* size_map table which provides requested-to-actual allocation size    */
 /* mapping.  Assumes the collector is initialized.  Returns -1 if the   */
-/* index is out of size_map table bounds. Does not use synchronization. */
+/* index is out of size_map table bounds. Does not use synchronization, */
+/* thus clients should call it using GC_call_with_alloc_lock typically  */
+/* to avoid data races on multiprocessors.                              */
 GC_API size_t GC_CALL GC_get_size_map_at(int i);
 
 /* Count total memory use in bytes by all allocated blocks.  Acquires   */
@@ -879,7 +889,7 @@ GC_API GC_ATTR_MALLOC char * GC_CALL
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
         GC_debug_malloc_uncollectable(size_t /* size_in_bytes */,
                                       GC_EXTRA_PARAMS);
-GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
+GC_API GC_ATTR_DEPRECATED void * GC_CALL
         GC_debug_malloc_stubborn(size_t /* size_in_bytes */, GC_EXTRA_PARAMS);
 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
         GC_debug_malloc_ignore_off_page(size_t /* size_in_bytes */,
@@ -891,7 +901,11 @@ GC_API void GC_CALL GC_debug_free(void *);
 GC_API void * GC_CALL GC_debug_realloc(void * /* old_object */,
                         size_t /* new_size_in_bytes */, GC_EXTRA_PARAMS)
                         /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2);
-GC_API void GC_CALL GC_debug_change_stubborn(const void *) GC_ATTR_NONNULL(1);
+GC_API
+#if !defined(CPPCHECK)
+  GC_ATTR_DEPRECATED
+#endif
+void GC_CALL GC_debug_change_stubborn(const void *);
 GC_API void GC_CALL GC_debug_end_stubborn_change(const void *)
                                                         GC_ATTR_NONNULL(1);
 
@@ -945,9 +959,8 @@ GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
       GC_debug_register_finalizer_no_order(p, f, d, of, od)
 # define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
       GC_debug_register_finalizer_unreachable(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS)
-# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
 # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# define GC_PTR_STORE_AND_DIRTY(p, q) GC_debug_ptr_store_and_dirty(p, q)
 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
       GC_general_register_disappearing_link(link, \
                                         GC_base((/* no const */ void *)(obj)))
@@ -973,9 +986,8 @@ GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
       GC_register_finalizer_no_order(p, f, d, of, od)
 # define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
       GC_register_finalizer_unreachable(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
-# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
 # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# define GC_PTR_STORE_AND_DIRTY(p, q) GC_ptr_store_and_dirty(p, q)
 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
       GC_general_register_disappearing_link(link, obj)
 # define GC_REGISTER_LONG_LINK(link, obj) \
@@ -990,7 +1002,6 @@ GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
 /* may return NULL (if out of memory).                                  */
 #define GC_NEW(t)               ((t*)GC_MALLOC(sizeof(t)))
 #define GC_NEW_ATOMIC(t)        ((t*)GC_MALLOC_ATOMIC(sizeof(t)))
-#define GC_NEW_STUBBORN(t)      ((t*)GC_MALLOC_STUBBORN(sizeof(t)))
 #define GC_NEW_UNCOLLECTABLE(t) ((t*)GC_MALLOC_UNCOLLECTABLE(sizeof(t)))
 
 #ifdef GC_REQUIRE_WCSDUP
@@ -1336,6 +1347,9 @@ typedef void (GC_CALLBACK * GC_abort_func)(const char * /* msg */);
 GC_API void GC_CALL GC_set_abort_func(GC_abort_func) GC_ATTR_NONNULL(1);
 GC_API GC_abort_func GC_CALL GC_get_abort_func(void);
 
+/* A portable way to abort the application because of not enough memory.*/
+GC_API void GC_CALL GC_abort_on_oom(void);
+
 /* The following is intended to be used by a higher level       */
 /* (e.g. Java-like) finalization facility.  It is expected      */
 /* that finalization code will arrange for hidden pointers to   */
@@ -1564,12 +1578,17 @@ GC_API void * GC_CALL GC_is_valid_displacement(void * /* p */);
 /* Explicitly dump the GC state.  This is most often called from the    */
 /* debugger, or by setting the GC_DUMP_REGULARLY environment variable,  */
 /* but it may be useful to call it from client code during debugging.   */
-/* If name is specified (and non-NULL), it is printed to help           */
+/* The current collection number is printed in the header of the dump.  */
+/* Acquires the GC lock to avoid data races.                            */
+/* Defined only if the library has been compiled without NO_DEBUGGING.  */
+GC_API void GC_CALL GC_dump(void);
+
+/* The same as GC_dump but allows to specify the name of dump and does  */
+/* not acquire the lock.  If name is non-NULL, it is printed to help    */
 /* identifying individual dumps.  Otherwise the current collection      */
 /* number is used as the name.                                          */
 /* Defined only if the library has been compiled without NO_DEBUGGING.  */
 GC_API void GC_CALL GC_dump_named(const char * /* name */);
-GC_API void GC_CALL GC_dump(void); /* = GC_dump_named(NULL) */
 
 /* Dump information about each block of every GC memory section.        */
 /* Defined only if the library has been compiled without NO_DEBUGGING.  */
@@ -1595,10 +1614,10 @@ GC_API void GC_CALL GC_dump_finalization(void);
         ((type_of_result)GC_pre_incr((void **)(&(x)), (n)*sizeof(*x)))
 # define GC_POST_INCR3(x, n, type_of_result) \
         ((type_of_result)GC_post_incr((void **)(&(x)), (n)*sizeof(*x)))
-# define GC_PTR_ADD(x, n) GC_PTR_ADD3(x, n, typeof(x))
-# define GC_PRE_INCR(x, n) GC_PRE_INCR3(x, n, typeof(x))
-# define GC_POST_INCR(x) GC_POST_INCR3(x, 1, typeof(x))
-# define GC_POST_DECR(x) GC_POST_INCR3(x, -1, typeof(x))
+# define GC_PTR_ADD(x, n) GC_PTR_ADD3(x, n, __typeof__(x))
+# define GC_PRE_INCR(x, n) GC_PRE_INCR3(x, n, __typeof__(x))
+# define GC_POST_INCR(x) GC_POST_INCR3(x, 1, __typeof__(x))
+# define GC_POST_DECR(x) GC_POST_INCR3(x, -1, __typeof__(x))
 #else /* !GC_DEBUG || !__GNUC__ */
   /* We can't do this right without typeof, which ANSI decided was not    */
   /* sufficiently useful.  Without it we resort to the non-debug version. */
@@ -1612,11 +1631,20 @@ GC_API void GC_CALL GC_dump_finalization(void);
 /* Safer assignment of a pointer to a non-stack location.       */
 #ifdef GC_DEBUG
 # define GC_PTR_STORE(p, q) \
-        (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
+        (*(void **)GC_is_visible((void *)(p)) = \
+                    GC_is_valid_displacement((void *)(q)))
 #else
-# define GC_PTR_STORE(p, q) (*(p) = (q))
+# define GC_PTR_STORE(p, q) (*(void **)(p) = (void *)(q))
 #endif
 
+/* GC_PTR_STORE_AND_DIRTY(p,q) is equivalent to GC_PTR_STORE(p,q)       */
+/* followed by GC_END_STUBBORN_CHANGE(p) and GC_reachable_here(q)       */
+/* (assuming p and q do not have side effects).                         */
+GC_API void GC_CALL GC_ptr_store_and_dirty(void * /* p */,
+                                           const void * /* q */);
+GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void * /* p */,
+                                                 const void * /* q */);
+
 /* Functions called to report pointer checking errors */
 GC_API void (GC_CALLBACK * GC_same_obj_print_proc)(void * /* p */,
                                                    void * /* q */);
@@ -1673,7 +1701,8 @@ GC_API void GC_CALL GC_register_has_static_roots_callback(
                 /* Note: for Cygwin and pthreads-win32, this is skipped */
                 /* unless windows.h is included before gc.h.            */
 
-# if !defined(GC_NO_THREAD_DECLS) || defined(GC_BUILD)
+# if (!defined(GC_NO_THREAD_DECLS) || defined(GC_BUILD)) \
+     && !defined(GC_DONT_INCL_WINDOWS_H)
 
 #   ifdef __cplusplus
       } /* Including windows.h in an extern "C" context no longer works. */
@@ -2004,8 +2033,6 @@ GC_API void GC_CALL GC_win32_free_heap(void);
         (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic)
 # define GC_malloc_uncollectable(a) \
         (*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable)
-# define GC_malloc_stubborn(a) \
-        (*GC_amiga_allocwrapper_do)(a,GC_malloc_stubborn)
 # define GC_malloc_atomic_uncollectable(a) \
         (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable)
 # define GC_malloc_ignore_off_page(a) \

+ 40 - 0
blitz.mod/bdwgc/include/gc_alloc_ptrs.h

@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1996-1998 by Silicon Graphics.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* This file should never be included by clients directly.      */
+
+#ifndef GC_ALLOC_PTRS_H
+#define GC_ALLOC_PTRS_H
+
+#include "gc.h"
+
+#ifdef __cplusplus
+  extern "C" {
+#endif
+
+GC_API void ** const GC_objfreelist_ptr;
+GC_API void ** const GC_aobjfreelist_ptr;
+GC_API void ** const GC_uobjfreelist_ptr;
+
+#ifdef GC_ATOMIC_UNCOLLECTABLE
+  GC_API void ** const GC_auobjfreelist_ptr;
+#endif
+
+GC_API void GC_CALL GC_incr_bytes_allocd(size_t bytes);
+GC_API void GC_CALL GC_incr_bytes_freed(size_t bytes);
+
+#ifdef __cplusplus
+  } /* extern "C" */
+#endif
+
+#endif /* GC_ALLOC_PTRS_H */

+ 102 - 65
blitz.mod/bdwgc/include/gc_allocator.h

@@ -38,11 +38,10 @@
  */
 
 #ifndef GC_ALLOCATOR_H
-
 #define GC_ALLOCATOR_H
 
 #include "gc.h"
-#include <new> // for placement new
+#include <new> // for placement new and bad_alloc
 
 #ifndef GC_ATTR_EXPLICIT
 # if (__cplusplus >= 201103L) || defined(CPPCHECK)
@@ -52,6 +51,33 @@
 # endif
 #endif
 
+#if !defined(GC_NO_MEMBER_TEMPLATES) && defined(_MSC_VER) && _MSC_VER <= 1200
+  // MSVC++ 6.0 do not support member templates.
+# define GC_NO_MEMBER_TEMPLATES
+#endif
+
+#ifndef GC_NOEXCEPT
+# if defined(__DMC__) || (defined(__BORLANDC__) \
+        && (defined(_RWSTD_NO_EXCEPTIONS) || defined(_RWSTD_NO_EX_SPEC))) \
+     || (defined(_MSC_VER) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS) \
+     || (defined(__WATCOMC__) && !defined(_CPPUNWIND))
+#   define GC_NOEXCEPT /* empty */
+#   ifndef GC_NEW_ABORTS_ON_OOM
+#     define GC_NEW_ABORTS_ON_OOM
+#   endif
+# elif __cplusplus >= 201103L
+#   define GC_NOEXCEPT noexcept
+# else
+#   define GC_NOEXCEPT throw()
+# endif
+#endif // !GC_NOEXCEPT
+
+#if defined(GC_NEW_ABORTS_ON_OOM) || defined(_LIBCPP_NO_EXCEPTIONS)
+# define GC_ALLOCATOR_THROW_OR_ABORT() GC_abort_on_oom()
+#else
+# define GC_ALLOCATOR_THROW_OR_ABORT() throw std::bad_alloc()
+#endif
+
 /* First some helpers to allow us to dispatch on whether or not a type
  * is known to be pointer-free.
  * These are private, except that the client may invoke the
@@ -87,23 +113,31 @@ GC_DECLARE_PTRFREE(long double);
 // pointer-free object.
 template <class GC_Tp>
 inline void * GC_selective_alloc(size_t n, GC_Tp, bool ignore_off_page) {
-    return ignore_off_page?GC_MALLOC_IGNORE_OFF_PAGE(n):GC_MALLOC(n);
+    void *obj = ignore_off_page ? GC_MALLOC_IGNORE_OFF_PAGE(n) : GC_MALLOC(n);
+    if (0 == obj)
+      GC_ALLOCATOR_THROW_OR_ABORT();
+    return obj;
 }
 
-template <>
-inline void * GC_selective_alloc<GC_true_type>(size_t n, GC_true_type,
-                                               bool ignore_off_page) {
-    return ignore_off_page? GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(n)
-                          : GC_MALLOC_ATOMIC(n);
-}
+#if !defined(__WATCOMC__)
+  /* Note: template-id not supported in this context by Watcom compiler. */
+  template <>
+  inline void * GC_selective_alloc<GC_true_type>(size_t n, GC_true_type,
+                                                 bool ignore_off_page) {
+    void * obj = ignore_off_page ? GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(n)
+                                 : GC_MALLOC_ATOMIC(n);
+    if (0 == obj)
+      GC_ALLOCATOR_THROW_OR_ABORT();
+    return obj;
+  }
+#endif
 
-/* Now the public gc_allocator<T> class:
- */
+// Now the public gc_allocator<T> class.
 template <class GC_Tp>
 class gc_allocator {
 public:
-  typedef size_t     size_type;
-  typedef ptrdiff_t  difference_type;
+  typedef size_t       size_type;
+  typedef ptrdiff_t    difference_type;
   typedef GC_Tp*       pointer;
   typedef const GC_Tp* const_pointer;
   typedef GC_Tp&       reference;
@@ -114,14 +148,13 @@ public:
     typedef gc_allocator<GC_Tp1> other;
   };
 
-  gc_allocator()  {}
-    gc_allocator(const gc_allocator&) throw() {}
-# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
-  // MSVC++ 6.0 do not support member templates
-  template <class GC_Tp1> GC_ATTR_EXPLICIT
-    gc_allocator(const gc_allocator<GC_Tp1>&) throw() {}
+  gc_allocator() GC_NOEXCEPT {}
+  gc_allocator(const gc_allocator&) GC_NOEXCEPT {}
+# ifndef GC_NO_MEMBER_TEMPLATES
+    template <class GC_Tp1> GC_ATTR_EXPLICIT
+    gc_allocator(const gc_allocator<GC_Tp1>&) GC_NOEXCEPT {}
 # endif
-  ~gc_allocator() throw() {}
+  ~gc_allocator() GC_NOEXCEPT {}
 
   pointer address(reference GC_x) const { return &GC_x; }
   const_pointer address(const_reference GC_x) const { return &GC_x; }
@@ -130,16 +163,15 @@ public:
   // the return value is when GC_n == 0.
   GC_Tp* allocate(size_type GC_n, const void* = 0) {
     GC_type_traits<GC_Tp> traits;
-    return static_cast<GC_Tp *>
-            (GC_selective_alloc(GC_n * sizeof(GC_Tp),
-                                traits.GC_is_ptr_free, false));
+    return static_cast<GC_Tp *>(GC_selective_alloc(GC_n * sizeof(GC_Tp),
+                                                   traits.GC_is_ptr_free,
+                                                   false));
   }
 
-  // __p is not permitted to be a null pointer.
-  void deallocate(pointer __p, size_type /* GC_n */)
+  void deallocate(pointer __p, size_type /* GC_n */) GC_NOEXCEPT
     { GC_FREE(__p); }
 
-  size_type max_size() const throw()
+  size_type max_size() const GC_NOEXCEPT
     { return size_t(-1) / sizeof(GC_Tp); }
 
   void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
@@ -161,25 +193,26 @@ class gc_allocator<void> {
 
 
 template <class GC_T1, class GC_T2>
-inline bool operator==(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
+inline bool operator==(const gc_allocator<GC_T1>&,
+                       const gc_allocator<GC_T2>&) GC_NOEXCEPT
 {
   return true;
 }
 
 template <class GC_T1, class GC_T2>
-inline bool operator!=(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
+inline bool operator!=(const gc_allocator<GC_T1>&,
+                       const gc_allocator<GC_T2>&) GC_NOEXCEPT
 {
   return false;
 }
 
 
-/* Now the public gc_allocator_ignore_off_page<T> class:
- */
+// Now the public gc_allocator_ignore_off_page<T> class.
 template <class GC_Tp>
 class gc_allocator_ignore_off_page {
 public:
-  typedef size_t     size_type;
-  typedef ptrdiff_t  difference_type;
+  typedef size_t       size_type;
+  typedef ptrdiff_t    difference_type;
   typedef GC_Tp*       pointer;
   typedef const GC_Tp* const_pointer;
   typedef GC_Tp&       reference;
@@ -190,15 +223,15 @@ public:
     typedef gc_allocator_ignore_off_page<GC_Tp1> other;
   };
 
-  gc_allocator_ignore_off_page()  {}
-    gc_allocator_ignore_off_page(const gc_allocator_ignore_off_page&) throw() {}
-# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
-  // MSVC++ 6.0 do not support member templates
-  template <class GC_Tp1> GC_ATTR_EXPLICIT
+  gc_allocator_ignore_off_page() GC_NOEXCEPT {}
+  gc_allocator_ignore_off_page(const gc_allocator_ignore_off_page&)
+    GC_NOEXCEPT {}
+# ifndef GC_NO_MEMBER_TEMPLATES
+    template <class GC_Tp1> GC_ATTR_EXPLICIT
     gc_allocator_ignore_off_page(const gc_allocator_ignore_off_page<GC_Tp1>&)
-        throw() {}
+      GC_NOEXCEPT {}
 # endif
-  ~gc_allocator_ignore_off_page() throw() {}
+  ~gc_allocator_ignore_off_page() GC_NOEXCEPT {}
 
   pointer address(reference GC_x) const { return &GC_x; }
   const_pointer address(const_reference GC_x) const { return &GC_x; }
@@ -207,16 +240,15 @@ public:
   // the return value is when GC_n == 0.
   GC_Tp* allocate(size_type GC_n, const void* = 0) {
     GC_type_traits<GC_Tp> traits;
-    return static_cast<GC_Tp *>
-            (GC_selective_alloc(GC_n * sizeof(GC_Tp),
-                                traits.GC_is_ptr_free, true));
+    return static_cast<GC_Tp *>(GC_selective_alloc(GC_n * sizeof(GC_Tp),
+                                                   traits.GC_is_ptr_free,
+                                                   true));
   }
 
-  // __p is not permitted to be a null pointer.
-  void deallocate(pointer __p, size_type /* GC_n */)
+  void deallocate(pointer __p, size_type /* GC_n */) GC_NOEXCEPT
     { GC_FREE(__p); }
 
-  size_type max_size() const throw()
+  size_type max_size() const GC_NOEXCEPT
     { return size_t(-1) / sizeof(GC_Tp); }
 
   void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
@@ -237,24 +269,26 @@ class gc_allocator_ignore_off_page<void> {
 };
 
 template <class GC_T1, class GC_T2>
-inline bool operator==(const gc_allocator_ignore_off_page<GC_T1>&, const gc_allocator_ignore_off_page<GC_T2>&)
+inline bool operator==(const gc_allocator_ignore_off_page<GC_T1>&,
+                       const gc_allocator_ignore_off_page<GC_T2>&) GC_NOEXCEPT
 {
   return true;
 }
 
 template <class GC_T1, class GC_T2>
-inline bool operator!=(const gc_allocator_ignore_off_page<GC_T1>&, const gc_allocator_ignore_off_page<GC_T2>&)
+inline bool operator!=(const gc_allocator_ignore_off_page<GC_T1>&,
+                       const gc_allocator_ignore_off_page<GC_T2>&) GC_NOEXCEPT
 {
   return false;
 }
 
-/*
- * And the public traceable_allocator class.
+// And the public traceable_allocator class.
+
+/* Note that we currently don't specialize the pointer-free case, since a
+ * pointer-free traceable container doesn't make that much sense,
+ * though it could become an issue due to abstraction boundaries.
  */
 
-// Note that we currently don't specialize the pointer-free case, since a
-// pointer-free traceable container doesn't make that much sense,
-// though it could become an issue due to abstraction boundaries.
 template <class GC_Tp>
 class traceable_allocator {
 public:
@@ -270,14 +304,13 @@ public:
     typedef traceable_allocator<GC_Tp1> other;
   };
 
-  traceable_allocator() throw() {}
-    traceable_allocator(const traceable_allocator&) throw() {}
-# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
-  // MSVC++ 6.0 do not support member templates
-  template <class GC_Tp1> GC_ATTR_EXPLICIT
-    traceable_allocator(const traceable_allocator<GC_Tp1>&) throw() {}
+  traceable_allocator() GC_NOEXCEPT {}
+  traceable_allocator(const traceable_allocator&) GC_NOEXCEPT {}
+# ifndef GC_NO_MEMBER_TEMPLATES
+    template <class GC_Tp1> GC_ATTR_EXPLICIT
+    traceable_allocator(const traceable_allocator<GC_Tp1>&) GC_NOEXCEPT {}
 # endif
-  ~traceable_allocator() throw() {}
+  ~traceable_allocator() GC_NOEXCEPT {}
 
   pointer address(reference GC_x) const { return &GC_x; }
   const_pointer address(const_reference GC_x) const { return &GC_x; }
@@ -285,14 +318,16 @@ public:
   // GC_n is permitted to be 0.  The C++ standard says nothing about what
   // the return value is when GC_n == 0.
   GC_Tp* allocate(size_type GC_n, const void* = 0) {
-    return static_cast<GC_Tp*>(GC_MALLOC_UNCOLLECTABLE(GC_n * sizeof(GC_Tp)));
+    void * obj = GC_MALLOC_UNCOLLECTABLE(GC_n * sizeof(GC_Tp));
+    if (0 == obj)
+      GC_ALLOCATOR_THROW_OR_ABORT();
+    return static_cast<GC_Tp*>(obj);
   }
 
-  // __p is not permitted to be a null pointer.
-  void deallocate(pointer __p, size_type /* GC_n */)
+  void deallocate(pointer __p, size_type /* GC_n */) GC_NOEXCEPT
     { GC_FREE(__p); }
 
-  size_type max_size() const throw()
+  size_type max_size() const GC_NOEXCEPT
     { return size_t(-1) / sizeof(GC_Tp); }
 
   void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
@@ -314,13 +349,15 @@ class traceable_allocator<void> {
 
 
 template <class GC_T1, class GC_T2>
-inline bool operator==(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
+inline bool operator==(const traceable_allocator<GC_T1>&,
+                       const traceable_allocator<GC_T2>&) GC_NOEXCEPT
 {
   return true;
 }
 
 template <class GC_T1, class GC_T2>
-inline bool operator!=(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
+inline bool operator!=(const traceable_allocator<GC_T1>&,
+                       const traceable_allocator<GC_T2>&) GC_NOEXCEPT
 {
   return false;
 }

+ 5 - 4
blitz.mod/bdwgc/include/gc_config_macros.h

@@ -91,8 +91,8 @@
 #   define GC_HAIKU_THREADS
 # elif defined(__OpenBSD__)
 #   define GC_OPENBSD_THREADS
-# elif (defined(__FreeBSD__) && !defined(SN_TARGET_ORBIS)) \
-       || defined(__DragonFly__)
+# elif defined(__DragonFly__) || defined(__FreeBSD_kernel__) \
+       || (defined(__FreeBSD__) && !defined(SN_TARGET_ORBIS))
 #   define GC_FREEBSD_THREADS
 # elif defined(__NetBSD__)
 #   define GC_NETBSD_THREADS
@@ -332,8 +332,9 @@
 /* of compilers.                                                        */
 /* This may also be desirable if it is possible but expensive to        */
 /* retrieve the call chain.                                             */
-#if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \
-     || defined(__FreeBSD__) || defined(__DragonFly__) \
+#if (defined(__linux__) || defined(__DragonFly__) || defined(__FreeBSD__) \
+     || defined(__FreeBSD_kernel__) || defined(__HAIKU__) \
+     || defined(__NetBSD__) || defined(__OpenBSD__) \
      || defined(HOST_ANDROID) || defined(__ANDROID__)) \
     && !defined(GC_CAN_SAVE_CALL_STACKS)
 # define GC_ADD_CALLER

+ 90 - 37
blitz.mod/bdwgc/include/gc_cpp.h

@@ -173,6 +173,34 @@ by UseGC.  GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
 # define GC_PLACEMENT_DELETE
 #endif
 
+#ifndef GC_NOEXCEPT
+# if defined(__DMC__) || (defined(__BORLANDC__) \
+        && (defined(_RWSTD_NO_EXCEPTIONS) || defined(_RWSTD_NO_EX_SPEC))) \
+     || (defined(_MSC_VER) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS) \
+     || (defined(__WATCOMC__) && !defined(_CPPUNWIND))
+#   define GC_NOEXCEPT /* empty */
+#   ifndef GC_NEW_ABORTS_ON_OOM
+#     define GC_NEW_ABORTS_ON_OOM
+#   endif
+# elif __cplusplus >= 201103L
+#   define GC_NOEXCEPT noexcept
+# else
+#   define GC_NOEXCEPT throw()
+# endif
+#endif // !GC_NOEXCEPT
+
+#if defined(GC_NEW_ABORTS_ON_OOM) || defined(_LIBCPP_NO_EXCEPTIONS)
+# define GC_OP_NEW_OOM_CHECK(obj) \
+                do { if (!(obj)) GC_abort_on_oom(); } while (0)
+#elif defined(GC_INCLUDE_NEW)
+# include <new> // for bad_alloc
+# define GC_OP_NEW_OOM_CHECK(obj) if (obj) {} else throw std::bad_alloc()
+#else
+  // "new" header is not included, so bad_alloc cannot be thrown directly.
+  GC_API void GC_CALL GC_throw_bad_alloc();
+# define GC_OP_NEW_OOM_CHECK(obj) if (obj) {} else GC_throw_bad_alloc()
+#endif // !GC_NEW_ABORTS_ON_OOM && !GC_INCLUDE_NEW
+
 #ifdef GC_NAMESPACE
 namespace boehmgc
 {
@@ -200,25 +228,25 @@ class gc
 public:
   inline void* operator new(size_t size);
   inline void* operator new(size_t size, GCPlacement gcp);
-  inline void* operator new(size_t size, void* p);
+  inline void* operator new(size_t size, void* p) GC_NOEXCEPT;
     // Must be redefined here, since the other overloadings hide
     // the global definition.
-  inline void operator delete(void* obj);
+  inline void operator delete(void* obj) GC_NOEXCEPT;
 
 # ifdef GC_PLACEMENT_DELETE
-    inline void operator delete(void*, GCPlacement);
+    inline void operator delete(void*, GCPlacement) GC_NOEXCEPT;
       // Called if construction fails.
-    inline void operator delete(void*, void*);
+    inline void operator delete(void*, void*) GC_NOEXCEPT;
 # endif // GC_PLACEMENT_DELETE
 
 # ifdef GC_OPERATOR_NEW_ARRAY
     inline void* operator new[](size_t size);
     inline void* operator new[](size_t size, GCPlacement gcp);
-    inline void* operator new[](size_t size, void* p);
-    inline void operator delete[](void* obj);
+    inline void* operator new[](size_t size, void* p) GC_NOEXCEPT;
+    inline void operator delete[](void* obj) GC_NOEXCEPT;
 #   ifdef GC_PLACEMENT_DELETE
-      inline void operator delete[](void*, GCPlacement);
-      inline void operator delete[](void*, void*);
+      inline void operator delete[](void*, GCPlacement) GC_NOEXCEPT;
+      inline void operator delete[](void*, void*) GC_NOEXCEPT;
 #   endif
 # endif // GC_OPERATOR_NEW_ARRAY
 };
@@ -251,6 +279,9 @@ extern "C" {
   // Disable warning that "no matching operator delete found; memory will
   // not be freed if initialization throws an exception"
 # pragma warning(disable:4291)
+  // TODO: "non-member operator new or delete may not be declared inline"
+  // warning is disabled for now.
+# pragma warning(disable:4595)
 #endif
 
 inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
@@ -271,10 +302,11 @@ inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
 
 #ifdef GC_PLACEMENT_DELETE
   inline void operator delete(void*, GC_NS_QUALIFY(GCPlacement),
-                              GC_NS_QUALIFY(GCCleanUpFunc), void*);
+                              GC_NS_QUALIFY(GCCleanUpFunc),
+                              void*) GC_NOEXCEPT;
 #endif
 
-#ifdef _MSC_VER
+#if defined(_MSC_VER) || defined(__DMC__)
   // The following ensures that the system default operator new[] does not
   // get undefined, which is what seems to happen on VC++ 6 for some reason
   // if we define a multi-argument operator new[].
@@ -283,13 +315,15 @@ inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
   // Inlining done to avoid mix up of new and delete operators by VC++ 9 (due
   // to arbitrary ordering during linking).
 
-# if _MSC_VER > 1020
+# ifdef GC_OPERATOR_NEW_ARRAY
     inline void* operator new[](size_t size)
     {
-      return GC_MALLOC_UNCOLLECTABLE(size);
+      void* obj = GC_MALLOC_UNCOLLECTABLE(size);
+      GC_OP_NEW_OOM_CHECK(obj);
+      return obj;
     }
 
-    inline void operator delete[](void* obj)
+    inline void operator delete[](void* obj) GC_NOEXCEPT
     {
       GC_FREE(obj);
     }
@@ -297,10 +331,12 @@ inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
 
   inline void* operator new(size_t size)
   {
-    return GC_MALLOC_UNCOLLECTABLE(size);
+    void* obj = GC_MALLOC_UNCOLLECTABLE(size);
+    GC_OP_NEW_OOM_CHECK(obj);
+    return obj;
   }
 
-  inline void operator delete(void* obj)
+  inline void operator delete(void* obj) GC_NOEXCEPT
   {
     GC_FREE(obj);
   }
@@ -310,17 +346,21 @@ inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
     inline void* operator new(size_t size, int /* nBlockUse */,
                               const char* szFileName, int nLine)
     {
-      return GC_debug_malloc_uncollectable(size, szFileName, nLine);
+      void* obj = GC_debug_malloc_uncollectable(size, szFileName, nLine);
+      GC_OP_NEW_OOM_CHECK(obj);
+      return obj;
     }
 # else
     inline void* operator new(size_t size, int /* nBlockUse */,
                               const char* /* szFileName */, int /* nLine */)
     {
-      return GC_malloc_uncollectable(size);
+      void* obj = GC_malloc_uncollectable(size);
+      GC_OP_NEW_OOM_CHECK(obj);
+      return obj;
     }
 # endif /* !GC_DEBUG */
 
-# if _MSC_VER > 1020
+# ifdef GC_OPERATOR_NEW_ARRAY
     // This new operator is used by VC++ 7+ in Debug builds:
     inline void* operator new[](size_t size, int nBlockUse,
                                 const char* szFileName, int nLine)
@@ -347,40 +387,48 @@ namespace boehmgc
 
 inline void* gc::operator new(size_t size)
 {
-  return GC_MALLOC(size);
+  void* obj = GC_MALLOC(size);
+  GC_OP_NEW_OOM_CHECK(obj);
+  return obj;
 }
 
 inline void* gc::operator new(size_t size, GCPlacement gcp)
 {
+  void* obj;
   switch (gcp) {
   case UseGC:
-    return GC_MALLOC(size);
+    obj = GC_MALLOC(size);
+    break;
   case PointerFreeGC:
-    return GC_MALLOC_ATOMIC(size);
+    obj = GC_MALLOC_ATOMIC(size);
+    break;
 # ifdef GC_ATOMIC_UNCOLLECTABLE
     case PointerFreeNoGC:
-      return GC_MALLOC_ATOMIC_UNCOLLECTABLE(size);
+      obj = GC_MALLOC_ATOMIC_UNCOLLECTABLE(size);
+      break;
 # endif
   case NoGC:
   default:
-    return GC_MALLOC_UNCOLLECTABLE(size);
+    obj = GC_MALLOC_UNCOLLECTABLE(size);
   }
+  GC_OP_NEW_OOM_CHECK(obj);
+  return obj;
 }
 
-inline void* gc::operator new(size_t /* size */, void* p)
+inline void* gc::operator new(size_t /* size */, void* p) GC_NOEXCEPT
 {
   return p;
 }
 
-inline void gc::operator delete(void* obj)
+inline void gc::operator delete(void* obj) GC_NOEXCEPT
 {
   GC_FREE(obj);
 }
 
 #ifdef GC_PLACEMENT_DELETE
-  inline void gc::operator delete(void*, void*) {}
+  inline void gc::operator delete(void*, void*) GC_NOEXCEPT {}
 
-  inline void gc::operator delete(void* p, GCPlacement /* gcp */)
+  inline void gc::operator delete(void* p, GCPlacement /* gcp */) GC_NOEXCEPT
   {
     GC_FREE(p);
   }
@@ -397,20 +445,21 @@ inline void gc::operator delete(void* obj)
     return gc::operator new(size, gcp);
   }
 
-  inline void* gc::operator new[](size_t /* size */, void* p)
+  inline void* gc::operator new[](size_t /* size */, void* p) GC_NOEXCEPT
   {
     return p;
   }
 
-  inline void gc::operator delete[](void* obj)
+  inline void gc::operator delete[](void* obj) GC_NOEXCEPT
   {
     gc::operator delete(obj);
   }
 
 # ifdef GC_PLACEMENT_DELETE
-    inline void gc::operator delete[](void*, void*) {}
+    inline void gc::operator delete[](void*, void*) GC_NOEXCEPT {}
 
-    inline void gc::operator delete[](void* p, GCPlacement /* gcp */)
+    inline void gc::operator delete[](void* p,
+                                      GCPlacement /* gcp */) GC_NOEXCEPT
     {
       gc::operator delete(p);
     }
@@ -458,26 +507,30 @@ inline void* operator new(size_t size, GC_NS_QUALIFY(GCPlacement) gcp,
   switch (gcp) {
   case GC_NS_QUALIFY(UseGC):
     obj = GC_MALLOC(size);
-    if (cleanup != 0) {
+    if (cleanup != 0 && obj != 0) {
       GC_REGISTER_FINALIZER_IGNORE_SELF(obj, cleanup, clientData, 0, 0);
     }
-    return obj;
+    break;
   case GC_NS_QUALIFY(PointerFreeGC):
-    return GC_MALLOC_ATOMIC(size);
+    obj = GC_MALLOC_ATOMIC(size);
+    break;
 # ifdef GC_ATOMIC_UNCOLLECTABLE
     case GC_NS_QUALIFY(PointerFreeNoGC):
-      return GC_MALLOC_ATOMIC_UNCOLLECTABLE(size);
+      obj = GC_MALLOC_ATOMIC_UNCOLLECTABLE(size);
+      break;
 # endif
   case GC_NS_QUALIFY(NoGC):
   default:
-    return GC_MALLOC_UNCOLLECTABLE(size);
+    obj = GC_MALLOC_UNCOLLECTABLE(size);
   }
+  GC_OP_NEW_OOM_CHECK(obj);
+  return obj;
 }
 
 #ifdef GC_PLACEMENT_DELETE
   inline void operator delete(void* p, GC_NS_QUALIFY(GCPlacement) /* gcp */,
                               GC_NS_QUALIFY(GCCleanUpFunc) /* cleanup */,
-                              void* /* clientData */)
+                              void* /* clientData */) GC_NOEXCEPT
   {
     GC_FREE(p);
   }

+ 28 - 12
blitz.mod/bdwgc/include/gc_inline.h

@@ -83,6 +83,15 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
 # define GC_malloc_kind_global GC_malloc_kind
 #endif
 
+/* An internal macro to update the free list pointer atomically (if     */
+/* the AO primitives are available) to avoid race with the marker.      */
+#if defined(GC_THREADS) && defined(AO_HAVE_store)
+# define GC_FAST_M_AO_STORE(my_fl, next) \
+                AO_store((volatile AO_t *)(my_fl), (AO_t)(next))
+#else
+# define GC_FAST_M_AO_STORE(my_fl, next) (void)(*(my_fl) = (next))
+#endif
+
 /* The ultimately general inline allocation macro.  Allocate an object  */
 /* of size granules, putting the resulting pointer in result.  Tiny_fl  */
 /* is a "tiny" free list array, which will be used first, if the size   */
@@ -116,9 +125,13 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
                           > (num_direct) + GC_TINY_FREELISTS + 1, 1)) { \
                 next = *(void **)(my_entry); \
                 result = (void *)my_entry; \
-                *my_fl = next; \
+                GC_FAST_M_AO_STORE(my_fl, next); \
                 init; \
                 GC_PREFETCH_FOR_WRITE(next); \
+                if ((kind) != GC_I_PTRFREE) { \
+                    GC_end_stubborn_change(my_fl); \
+                    GC_reachable_here(next); \
+                } \
                 GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
                 GC_ASSERT((kind) == GC_I_PTRFREE \
                           || ((GC_word *)result)[1] == 0); \
@@ -129,7 +142,8 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
                     /* (GC_word)my_entry <= (num_direct) */ \
                     && my_entry != NULL) { \
                 /* Small counter value, not NULL */ \
-                *my_fl = (char *)my_entry + (granules) + 1; \
+                GC_FAST_M_AO_STORE(my_fl, (char *)my_entry \
+                                          + (granules) + 1); \
                 result = (default_expr); \
                 break; \
             } else { \
@@ -157,11 +171,11 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
 /* the caller is responsible for supplying a cleared tiny_fl            */
 /* free list array.  For single-threaded applications, this may be      */
 /* a global array.                                                      */
-# define GC_MALLOC_WORDS_KIND(result,n,tiny_fl,k,init) \
+# define GC_MALLOC_WORDS_KIND(result,n,tiny_fl,kind,init) \
     do { \
-      size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
-      GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, k, \
-                           GC_malloc_kind(grans * GC_GRANULE_BYTES, k), \
+      size_t granules = GC_WORDS_TO_WHOLE_GRANULES(n); \
+      GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, 0, kind, \
+                           GC_malloc_kind(granules*GC_GRANULE_BYTES, kind), \
                            init); \
     } while (0)
 
@@ -175,12 +189,14 @@ GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
 /* And once more for two word initialized objects: */
 # define GC_CONS(result, first, second, tiny_fl) \
     do { \
-      size_t grans = GC_WORDS_TO_WHOLE_GRANULES(2); \
-      GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, GC_I_NORMAL, \
-                           GC_malloc_kind(grans * GC_GRANULE_BYTES, \
-                                          GC_I_NORMAL), \
-                           *(void **)(result) = (void *)(first)); \
-      ((void **)(result))[1] = (void *)(second); \
+      void *l = (void *)(first); \
+      void *r = (void *)(second); \
+      GC_MALLOC_WORDS_KIND(result, 2, tiny_fl, GC_I_NORMAL, (void)0); \
+      if ((result) != NULL) { \
+        *(void **)(result) = l; \
+        GC_PTR_STORE_AND_DIRTY((void **)(result) + 1, r); \
+        GC_reachable_here(l); \
+      } \
     } while (0)
 
 GC_API void GC_CALL GC_print_free_list(int /* kind */,

+ 1 - 1
blitz.mod/bdwgc/include/gc_mark.h

@@ -184,7 +184,7 @@ GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc);
 GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc);
 
 /* Allocate an object of a given kind.  By default, there are only      */
-/* a few kinds: composite (pointer-free), atomic, uncollectible, etc.   */
+/* a few kinds: composite (pointerful), atomic, uncollectible, etc.     */
 /* We claim it is possible for clever client code that understands the  */
 /* GC internals to add more, e.g. to communicate object layout          */
 /* information to the collector.  Note that in the multi-threaded       */

+ 2 - 4
blitz.mod/bdwgc/include/include.am

@@ -14,10 +14,8 @@
 #
 pkginclude_HEADERS += \
         include/gc.h \
-        include/gc_allocator.h \
         include/gc_backptr.h \
         include/gc_config_macros.h \
-        include/gc_disclaim.h \
         include/gc_gcj.h \
         include/gc_inline.h \
         include/gc_mark.h \
@@ -26,8 +24,7 @@ pkginclude_HEADERS += \
         include/gc_typed.h \
         include/gc_version.h \
         include/javaxfc.h \
-        include/leak_detector.h \
-        include/weakpointer.h
+        include/leak_detector.h
 
 # headers which are not installed
 #
@@ -35,6 +32,7 @@ dist_noinst_HEADERS += \
         include/cord.h \
         include/cord_pos.h \
         include/ec.h \
+        include/gc_alloc_ptrs.h \
         include/new_gc_alloc.h \
         include/private/darwin_semaphore.h \
         include/private/darwin_stop_world.h \

+ 60 - 28
blitz.mod/bdwgc/include/new_gc_alloc.h

@@ -72,22 +72,13 @@
 #include <stddef.h>
 #include <string.h>
 
-// The following need to match collector data structures.
 // We can't include gc_priv.h, since that pulls in way too much stuff.
-// This should eventually be factored out into another include file.
+#include "gc_alloc_ptrs.h"
 
-extern "C" {
-    GC_API void ** const GC_objfreelist_ptr;
-    GC_API void ** const GC_aobjfreelist_ptr;
-    GC_API void ** const GC_uobjfreelist_ptr;
-    GC_API void ** const GC_auobjfreelist_ptr;
+#define GC_generic_malloc_words_small(lw, k) \
+                        GC_generic_malloc((lw) * sizeof(GC_word), k)
 
-    GC_API void GC_CALL GC_incr_bytes_allocd(size_t bytes);
-    GC_API void GC_CALL GC_incr_bytes_freed(size_t bytes);
-
-    GC_API char * GC_CALL GC_generic_malloc_words_small(size_t word, int kind);
-                /* FIXME: Doesn't exist anymore.        */
-}
+#define GC_ALLOCATOR_THROW_OR_ABORT() GC_abort_on_oom()
 
 // Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
 // AUNCOLLECTABLE in gc_priv.h.
@@ -159,6 +150,10 @@ size_t GC_aux_template<dummy>::GC_uncollectable_bytes_recently_freed = 0;
 template <int dummy>
 void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
 {
+    void * op = GC_generic_malloc_words_small(nwords, kind);
+    if (0 == op)
+        GC_ALLOCATOR_THROW_OR_ABORT();
+
     GC_bytes_recently_allocd += GC_uncollectable_bytes_recently_allocd;
     GC_non_gc_bytes +=
                 GC_uncollectable_bytes_recently_allocd;
@@ -173,8 +168,7 @@ void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
 
     GC_incr_bytes_freed(GC_bytes_recently_freed);
     GC_bytes_recently_freed = 0;
-
-    return GC_generic_malloc_words_small(nwords, kind);
+    return op;
 }
 
 typedef GC_aux_template<0> GC_aux;
@@ -192,7 +186,12 @@ class single_client_gc_alloc_template {
             void ** flh;
             void * op;
 
-            if (n > GC_max_fast_bytes) return GC_malloc(n);
+            if (n > GC_max_fast_bytes) {
+                op = GC_malloc(n);
+                if (0 == op)
+                    GC_ALLOCATOR_THROW_OR_ABORT();
+                return op;
+            }
             flh = &GC_objfreelist_ptr[nwords];
             op = *flh;
             if (0 == op) {
@@ -208,7 +207,12 @@ class single_client_gc_alloc_template {
             void ** flh;
             void * op;
 
-            if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
+            if (n > GC_max_fast_bytes) {
+                op = GC_malloc_atomic(n);
+                if (0 == op)
+                    GC_ALLOCATOR_THROW_OR_ABORT();
+                return op;
+            }
             flh = &GC_aobjfreelist_ptr[nwords];
             op = *flh;
             if (0 == op) {
@@ -260,7 +264,12 @@ class single_client_traceable_alloc_template {
             void ** flh;
             void * op;
 
-            if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
+            if (n > GC_max_fast_bytes) {
+                op = GC_malloc_uncollectable(n);
+                if (0 == op)
+                    GC_ALLOCATOR_THROW_OR_ABORT();
+                return op;
+            }
             flh = &GC_uobjfreelist_ptr[nwords];
             op = *flh;
             if (0 == op) {
@@ -277,7 +286,12 @@ class single_client_traceable_alloc_template {
             void ** flh;
             void * op;
 
-            if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
+            if (n > GC_max_fast_bytes) {
+                op = GC_malloc_atomic_uncollectable(n);
+                if (0 == op)
+                    GC_ALLOCATOR_THROW_OR_ABORT();
+                return op;
+            }
             flh = &GC_auobjfreelist_ptr[nwords];
             op = *flh;
             if (0 == op) {
@@ -323,9 +337,18 @@ typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
 template < int dummy >
 class gc_alloc_template {
     public:
-        static void * allocate(size_t n) { return GC_malloc(n); }
-        static void * ptr_free_allocate(size_t n)
-                { return GC_malloc_atomic(n); }
+        static void * allocate(size_t n) {
+            void * op = GC_malloc(n);
+            if (0 == op)
+                GC_ALLOCATOR_THROW_OR_ABORT();
+            return op;
+        }
+        static void * ptr_free_allocate(size_t n) {
+            void * op = GC_malloc_atomic(n);
+            if (0 == op)
+                GC_ALLOCATOR_THROW_OR_ABORT();
+            return op;
+        }
         static void deallocate(void *, size_t) { }
         static void ptr_free_deallocate(void *, size_t) { }
 };
@@ -335,9 +358,18 @@ typedef gc_alloc_template < 0 > gc_alloc;
 template < int dummy >
 class traceable_alloc_template {
     public:
-        static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
-        static void * ptr_free_allocate(size_t n)
-                { return GC_malloc_atomic_uncollectable(n); }
+        static void * allocate(size_t n) {
+            void * op = GC_malloc_uncollectable(n);
+            if (0 == op)
+                GC_ALLOCATOR_THROW_OR_ABORT();
+            return op;
+        }
+        static void * ptr_free_allocate(size_t n) {
+            void * op = GC_malloc_atomic_uncollectable(n);
+            if (0 == op)
+                GC_ALLOCATOR_THROW_OR_ABORT();
+            return op;
+        }
         static void deallocate(void *p, size_t) { GC_free(p); }
         static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
 };
@@ -354,12 +386,12 @@ typedef traceable_alloc_template < 0 > traceable_alloc;
   class simple_alloc<T, alloc> { \
   public: \
     static T *allocate(size_t n) \
-        { return 0 == n? 0 : \
-            reinterpret_cast<T*>(alloc::ptr_free_allocate(n * sizeof(T))); } \
+        { reinterpret_cast<T*>(alloc::ptr_free_allocate(0 == n ? 1 \
+                                                    : n * sizeof(T))); } \
     static T *allocate(void) \
         { return reinterpret_cast<T*>(alloc::ptr_free_allocate(sizeof(T))); } \
     static void deallocate(T *p, size_t n) \
-        { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof(T)); } \
+        { alloc::ptr_free_deallocate(p, 0 == n ? 1 : n * sizeof(T)); } \
     static void deallocate(T *p) \
         { alloc::ptr_free_deallocate(p, sizeof(T)); } \
   };

+ 5 - 6
blitz.mod/bdwgc/include/private/darwin_stop_world.h

@@ -25,9 +25,7 @@
 #include <mach/mach.h>
 #include <mach/thread_act.h>
 
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 struct thread_stop_info {
   mach_port_t mach_thread;
@@ -41,14 +39,15 @@ struct thread_stop_info {
 #ifdef MPROTECT_VDB
   GC_INNER void GC_mprotect_stop(void);
   GC_INNER void GC_mprotect_resume(void);
+# ifndef GC_NO_THREADS_DISCOVERY
+    GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread);
+# endif
 #endif
 
 #if defined(PARALLEL_MARK) && !defined(GC_NO_THREADS_DISCOVERY)
   GC_INNER GC_bool GC_is_mach_marker(thread_act_t);
 #endif
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif

+ 15 - 11
blitz.mod/bdwgc/include/private/dbg_mlc.h

@@ -30,9 +30,7 @@
 # include "gc_backptr.h"
 #endif
 
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 #if CPP_WORDSZ == 32
 # define START_FLAG (word)0xfedcedcb
@@ -125,8 +123,7 @@ typedef struct {
 #define SIMPLE_ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
 
 /* ADD_CALL_CHAIN stores a (partial) call chain into an object  */
-/* header.  It may be called with or without the allocation     */
-/* lock.                                                        */
+/* header; it should be called with the allocation lock held.   */
 /* PRINT_CALL_CHAIN prints the call chain stored in an object   */
 /* to stderr.  It requires that we do not hold the lock.        */
 #if defined(SAVE_CALL_CHAIN)
@@ -165,14 +162,21 @@ typedef struct {
 #   error Non-ptr stored in object results in GC_HAS_DEBUG_INFO malfunction
     /* We may mistakenly conclude that p has a debugging wrapper.       */
 # endif
-# define GC_HAS_DEBUG_INFO(p) \
-        ((*((word *)p) & 1) && GC_has_other_debug_info(p) > 0)
+# if defined(PARALLEL_MARK) && defined(KEEP_BACK_PTRS)
+#   define GC_HAS_DEBUG_INFO(p) \
+                ((AO_load((volatile AO_t *)(p)) & 1) != 0 \
+                 && GC_has_other_debug_info(p) > 0)
+                        /* Atomic load is used as GC_store_back_pointer */
+                        /* stores oh_back_ptr atomically (p might point */
+                        /* to the field); this prevents a TSan warning. */
+# else
+#   define GC_HAS_DEBUG_INFO(p) \
+                ((*(word *)(p) & 1) && GC_has_other_debug_info(p) > 0)
+# endif
 #else
 # define GC_HAS_DEBUG_INFO(p) (GC_has_other_debug_info(p) > 0)
-#endif
+#endif /* !KEEP_BACK_PTRS && !MAKE_BACK_GRAPH */
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* _DBG_MLC_H */

+ 12 - 0
blitz.mod/bdwgc/include/private/gc_atomic_ops.h

@@ -61,15 +61,23 @@
 # define AO_HAVE_or
 
 # define AO_load(p) __atomic_load_n(p, __ATOMIC_RELAXED)
+# define AO_HAVE_load
 # define AO_load_acquire(p) __atomic_load_n(p, __ATOMIC_ACQUIRE)
 # define AO_HAVE_load_acquire
 # define AO_load_acquire_read(p) AO_load_acquire(p)
 # define AO_HAVE_load_acquire_read
 
 # define AO_store(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+# define AO_HAVE_store
 # define AO_store_release(p, v) __atomic_store_n(p, v, __ATOMIC_RELEASE)
 # define AO_HAVE_store_release
 # define AO_store_release_write(p, v) AO_store_release(p, v)
+# define AO_HAVE_store_release_write
+
+# define AO_char_load(p) __atomic_load_n(p, __ATOMIC_RELAXED)
+# define AO_HAVE_char_load
+# define AO_char_store(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+# define AO_HAVE_char_store
 
 # ifdef AO_REQUIRE_CAS
     AO_INLINE int
@@ -101,6 +109,10 @@
   /* only if AO_REQUIRE_CAS is defined (or if the corresponding         */
   /* AO_HAVE_x macro is defined).  x86/x64 targets have AO_nop_full,    */
   /* AO_load_acquire, AO_store_release, at least.                       */
+# if !defined(AO_HAVE_load) || !defined(AO_HAVE_store)
+#   error AO_load or AO_store is missing; probably old version of atomic_ops
+# endif
+
 #endif /* !GC_BUILTIN_ATOMIC */
 
 #endif /* GC_ATOMIC_OPS_H */

+ 10 - 15
blitz.mod/bdwgc/include/private/gc_hdrs.h

@@ -15,16 +15,14 @@
 #ifndef GC_HEADERS_H
 #define GC_HEADERS_H
 
-#ifdef __cplusplus
-  extern "C" {
-#endif
-
-typedef struct hblkhdr hdr;
-
 #if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
 # error Get a real machine
 #endif
 
+EXTERN_C_BEGIN
+
+typedef struct hblkhdr hdr;
+
 /*
  * The 2 level tree data structure that is used to find block headers.
  * If there are more than 32 bits in a pointer, the top level is a hash
@@ -176,28 +174,27 @@ typedef struct bi {
   /* Set bottom_indx to point to the bottom index for address p */
 # define GET_BI(p, bottom_indx) \
         do { \
-          register word hi = \
-              (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
-          register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+          REGISTER word hi = (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+          REGISTER bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
           while (_bi -> key != hi && _bi != GC_all_nils) \
               _bi = _bi -> hash_link; \
           (bottom_indx) = _bi; \
         } while (0)
 # define GET_HDR_ADDR(p, ha) \
         do { \
-          register bottom_index * bi; \
+          REGISTER bottom_index * bi; \
           GET_BI(p, bi); \
           (ha) = &HDR_FROM_BI(bi, p); \
         } while (0)
 # define GET_HDR(p, hhdr) \
         do { \
-          register hdr ** _ha; \
+          REGISTER hdr ** _ha; \
           GET_HDR_ADDR(p, _ha); \
           (hhdr) = *_ha; \
         } while (0)
 # define SET_HDR(p, hhdr) \
         do { \
-          register hdr ** _ha; \
+          REGISTER hdr ** _ha; \
           GET_HDR_ADDR(p, _ha); \
           *_ha = (hhdr); \
         } while (0)
@@ -212,8 +209,6 @@ typedef struct bi {
 /* h.  Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr).             */
 #define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* GC_HEADERS_H */

+ 17 - 37
blitz.mod/bdwgc/include/private/gc_locks.h

@@ -28,19 +28,12 @@
  */
 # ifdef THREADS
 
-#  if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) \
-      && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
-#    include "gc_atomic_ops.h"
-#  endif
-
 #  ifdef PCR
 #    include <base/PCR_Base.h>
 #    include <th/PCR_Th.h>
 #  endif
 
-#  ifdef __cplusplus
-     extern "C" {
-#  endif
+   EXTERN_C_BEGIN
 
 #  ifdef PCR
      GC_EXTERN PCR_Th_ML GC_allocate_ml;
@@ -62,17 +55,13 @@
 #  endif
 
 #  if defined(GC_WIN32_THREADS) && !defined(USE_PTHREAD_LOCKS)
-#    ifdef __cplusplus
-       } /* extern "C" */
-#    endif
 #    ifndef WIN32_LEAN_AND_MEAN
 #      define WIN32_LEAN_AND_MEAN 1
 #    endif
 #    define NOSERVICE
+     EXTERN_C_END
 #    include <windows.h>
-#    ifdef __cplusplus
-       extern "C" {
-#    endif
+     EXTERN_C_BEGIN
 #    define NO_THREAD (DWORD)(-1)
      GC_EXTERN CRITICAL_SECTION GC_allocate_ml;
 #    ifdef GC_ASSERTIONS
@@ -99,13 +88,9 @@
 #      define UNCOND_UNLOCK() LeaveCriticalSection(&GC_allocate_ml)
 #    endif /* !GC_ASSERTIONS */
 #  elif defined(GC_PTHREADS)
-#    ifdef __cplusplus
-       } /* extern "C" */
-#    endif
+     EXTERN_C_END
 #    include <pthread.h>
-#    ifdef __cplusplus
-       extern "C" {
-#    endif
+     EXTERN_C_BEGIN
      /* Posix allows pthread_t to be a struct, though it rarely is.     */
      /* Unfortunately, we need to use a pthread_t to index a data       */
      /* structure.  It also helps if comparisons don't involve a        */
@@ -141,13 +126,9 @@
                 /* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
 
 #    ifdef SN_TARGET_PSP2
-#      ifdef __cplusplus
-         } /* extern "C" */
-#      endif
+       EXTERN_C_END
 #      include "psp2-support.h"
-#      ifdef __cplusplus
-         extern "C" {
-#      endif
+       EXTERN_C_BEGIN
        GC_EXTERN WapiMutex GC_allocate_ml_PSP2;
 #      define UNCOND_LOCK() { int res; GC_ASSERT(I_DONT_HOLD_LOCK()); \
                               res = PSP2_MutexLock(&GC_allocate_ml_PSP2); \
@@ -191,13 +172,9 @@
 #      endif
 #    endif /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
 #    ifdef USE_PTHREAD_LOCKS
-#      ifdef __cplusplus
-         } /* extern "C" */
-#      endif
+       EXTERN_C_END
 #      include <pthread.h>
-#      ifdef __cplusplus
-         extern "C" {
-#      endif
+       EXTERN_C_BEGIN
        GC_EXTERN pthread_mutex_t GC_allocate_ml;
 #      ifdef GC_ASSERTIONS
 #        define UNCOND_LOCK() { GC_ASSERT(I_DONT_HOLD_LOCK()); \
@@ -234,8 +211,13 @@
 #    endif /* GC_ASSERTIONS */
 #    ifndef GC_WIN32_THREADS
        GC_EXTERN volatile GC_bool GC_collecting;
-#      define ENTER_GC() (void)(GC_collecting = TRUE)
-#      define EXIT_GC() (void)(GC_collecting = FALSE)
+#      ifdef AO_HAVE_char_store
+#        define ENTER_GC() AO_char_store((unsigned char*)&GC_collecting, TRUE)
+#        define EXIT_GC() AO_char_store((unsigned char*)&GC_collecting, FALSE)
+#      else
+#        define ENTER_GC() (void)(GC_collecting = TRUE)
+#        define EXIT_GC() (void)(GC_collecting = FALSE)
+#      endif
 #    endif
      GC_INNER void GC_lock(void);
 #  endif /* GC_PTHREADS */
@@ -263,9 +245,7 @@
 #    endif
 #  endif
 
-#  ifdef __cplusplus
-     } /* extern "C" */
-#  endif
+   EXTERN_C_END
 
 # else /* !THREADS */
 #   define LOCK() (void)0

+ 67 - 40
blitz.mod/bdwgc/include/private/gc_pmark.h

@@ -48,9 +48,7 @@
 # include "gc_priv.h"
 #endif
 
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 /* The real declarations of the following is in gc_priv.h, so that      */
 /* we can avoid scanning the following table.                           */
@@ -123,20 +121,23 @@ GC_EXTERN size_t GC_mark_stack_size;
 GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
 
 /* Push the object obj with corresponding heap block header hhdr onto   */
-/* the mark stack.                                                      */
-#define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
-  do { \
-    register word _descr = (hhdr) -> hb_descr; \
-    GC_ASSERT(!HBLK_IS_FREE(hhdr)); \
-    if (_descr != 0) { \
-        mark_stack_top++; \
-        if ((word)mark_stack_top >= (word)(mark_stack_limit)) { \
-          mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
-        } \
-        mark_stack_top -> mse_start = (obj); \
-        mark_stack_top -> mse_descr.w = _descr; \
-    } \
-  } while (0)
+/* the mark stack.  Returns the updated mark_stack_top value.           */
+GC_INLINE mse * GC_push_obj(ptr_t obj, hdr * hhdr,  mse * mark_stack_top,
+                            mse * mark_stack_limit)
+{
+  word descr = hhdr -> hb_descr;
+
+  GC_ASSERT(!HBLK_IS_FREE(hhdr));
+  if (descr != 0) {
+    mark_stack_top++;
+    if ((word)mark_stack_top >= (word)mark_stack_limit) {
+      mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top);
+    }
+    mark_stack_top -> mse_start = obj;
+    mark_stack_top -> mse_descr.w = descr;
+  }
+  return mark_stack_top;
+}
 
 /* Push the contents of current onto the mark stack if it is a valid    */
 /* ptr to a currently unmarked object.  Mark it.                        */
@@ -150,21 +151,45 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
 
 /* Set mark bit, exit (using "break" statement) if it is already set.   */
 #ifdef USE_MARK_BYTES
-  /* There is a race here, and we may set                               */
-  /* the bit twice in the concurrent case.  This can result in the      */
-  /* object being pushed twice.  But that's only a performance issue.   */
-# define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
-    { /* cannot use do-while(0) here */ \
-        char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
+# if defined(PARALLEL_MARK) && defined(AO_HAVE_char_store) \
+     && !defined(AO_USE_PTHREAD_DEFS)
+    /* There is a race here, and we may set the bit twice in the        */
+    /* concurrent case.  This can result in the object being pushed     */
+    /* twice.  But that is only a performance issue.                    */
+#   define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
+      { /* cannot use do-while(0) here */ \
+        volatile unsigned char * mark_byte_addr = \
+                        (unsigned char *)(hhdr)->hb_marks + (bit_no); \
+        /* Unordered atomic load and store are sufficient here. */ \
+        if (AO_char_load(mark_byte_addr) != 0) \
+          break; /* go to the enclosing loop end */ \
+        AO_char_store(mark_byte_addr, 1); \
+      }
+# else
+#   define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
+      { /* cannot use do-while(0) here */ \
+        char * mark_byte_addr = (char *)(hhdr)->hb_marks + (bit_no); \
         if (*mark_byte_addr != 0) break; /* go to the enclosing loop end */ \
         *mark_byte_addr = 1; \
-    }
+      }
+# endif /* !PARALLEL_MARK */
 #else
 # ifdef PARALLEL_MARK
     /* This is used only if we explicitly set USE_MARK_BITS.            */
     /* The following may fail to exit even if the bit was already set.  */
     /* For our uses, that's benign:                                     */
-#   define OR_WORD_EXIT_IF_SET(addr, bits) \
+#   ifdef THREAD_SANITIZER
+#     define OR_WORD_EXIT_IF_SET(addr, bits) \
+        { /* cannot use do-while(0) here */ \
+          if (!((word)AO_load((volatile AO_t *)(addr)) & (bits))) { \
+                /* Atomic load is just to avoid TSan false positive. */ \
+            AO_or((volatile AO_t *)(addr), (AO_t)(bits)); \
+          } else { \
+            break; /* go to the enclosing loop end */ \
+          } \
+        }
+#   else
+#     define OR_WORD_EXIT_IF_SET(addr, bits) \
         { /* cannot use do-while(0) here */ \
           if (!(*(addr) & (bits))) { \
             AO_or((volatile AO_t *)(addr), (AO_t)(bits)); \
@@ -172,6 +197,7 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
             break; /* go to the enclosing loop end */ \
           } \
         }
+#   endif /* !THREAD_SANITIZER */
 # else
 #   define OR_WORD_EXIT_IF_SET(addr, bits) \
         { /* cannot use do-while(0) here */ \
@@ -184,7 +210,7 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
 # endif /* !PARALLEL_MARK */
 # define SET_MARK_BIT_EXIT_IF_SET(hhdr, bit_no) \
     { /* cannot use do-while(0) here */ \
-        word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(bit_no); \
+        word * mark_word_addr = (hhdr)->hb_marks + divWORDSZ(bit_no); \
         OR_WORD_EXIT_IF_SET(mark_word_addr, \
                 (word)1 << modWORDSZ(bit_no)); /* contains "break" */ \
     }
@@ -293,7 +319,8 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
                       (unsigned)GC_gc_no, (void *)base, (void *)(source))); \
     INCR_MARKS(hhdr); \
     GC_STORE_BACK_PTR((ptr_t)(source), base); \
-    PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
+    mark_stack_top = GC_push_obj(base, hhdr, mark_stack_top, \
+                                 mark_stack_limit); \
   } while (0)
 #endif /* MARK_BIT_PER_GRANULE */
 
@@ -302,14 +329,10 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
                            source, hhdr, do_offset_check) \
   do { \
     size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
-    unsigned32 low_prod, high_prod; \
+    unsigned32 high_prod; \
     unsigned32 inv_sz = hhdr -> hb_inv_sz; \
-    ptr_t base = (ptr_t)(current); \
-    LONG_MULT(high_prod, low_prod, (unsigned32)displ, inv_sz); \
-    /* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
-    if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
-      /* FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 */ \
-        if (inv_sz == LARGE_INV_SZ) { \
+    ptr_t base; \
+    if (EXPECT(inv_sz == LARGE_INV_SZ, FALSE)) { \
           size_t obj_displ; \
           base = (ptr_t)(hhdr -> hb_block); \
           obj_displ = (ptr_t)(current) - base; \
@@ -325,8 +348,13 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
           } \
           GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
                     hhdr -> hb_block == HBLKPTR(current)); \
-          GC_ASSERT((word)hhdr->hb_block < (word)(current)); \
-        } else { \
+          GC_ASSERT((word)(hhdr)->hb_block <= (word)(current)); \
+          high_prod = 0; \
+    } else { \
+        unsigned32 low_prod; \
+        base = (ptr_t)(current); \
+        LONG_MULT(high_prod, low_prod, (unsigned32)displ, inv_sz); \
+        if ((low_prod >> 16) != 0) { \
           size_t obj_displ; \
           /* Accurate enough if HBLKSIZE <= 2**15.      */ \
           GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
@@ -351,7 +379,8 @@ GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
                       (unsigned)GC_gc_no, (void *)base, (void *)(source))); \
     INCR_MARKS(hhdr); \
     GC_STORE_BACK_PTR((ptr_t)(source), base); \
-    PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
+    mark_stack_top = GC_push_obj(base, hhdr, mark_stack_top, \
+                                 mark_stack_limit); \
   } while (0)
 #endif /* MARK_BIT_PER_OBJ */
 
@@ -482,8 +511,6 @@ typedef int mark_state_t;       /* Current state of marking, as follows:*/
 
 GC_EXTERN mark_state_t GC_mark_state;
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif  /* GC_PMARK_H */

+ 119 - 108
blitz.mod/bdwgc/include/private/gc_priv.h

@@ -157,6 +157,15 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
   /* The corresponding variable definition must start with GC_INNER.    */
 #endif /* !GC_INNER */
 
+#ifdef __cplusplus
+  /* Register storage specifier is deprecated in C++11. */
+# define REGISTER /* empty */
+#else
+  /* Used only for several local variables in the performance-critical  */
+  /* functions.  Should not be used for new code.                       */
+# define REGISTER register
+#endif
+
 #ifndef HEADERS_H
 # include "gc_hdrs.h"
 #endif
@@ -241,6 +250,10 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
 # define GC_API_PRIV GC_API
 #endif
 
+#if defined(THREADS) && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
+# include "gc_atomic_ops.h"
+#endif
+
 #ifndef GC_LOCKS_H
 # include "gc_locks.h"
 #endif
@@ -281,10 +294,6 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
 /*                               */
 /*********************************/
 
-/* #define STUBBORN_ALLOC */
-                    /* Enable stubborn allocation, and thus a limited   */
-                    /* form of incremental collection w/o dirty bits.   */
-
 /* #define ALL_INTERIOR_POINTERS */
                     /* Forces all pointers into the interior of an      */
                     /* object to be considered valid.  Also causes the  */
@@ -313,10 +322,7 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
                     /* This is now really controlled at startup,        */
                     /* through GC_all_interior_pointers.                */
 
-/* Note: never put extern "C" around an #include.                       */
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 #ifndef GC_NO_FINALIZATION
 # define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
@@ -407,9 +413,7 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
   GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
 #endif
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 /*********************************/
 /*                               */
@@ -447,14 +451,10 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
 # define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
 #elif defined(NN_PLATFORM_CTR)
 # define CLOCK_TYPE long long
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
   CLOCK_TYPE n3ds_get_system_tick(void);
   CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick);
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
+  EXTERN_C_END
 # define GET_TIME(x) (void)(x = n3ds_get_system_tick())
 # define MS_TIME_DIFF(a,b) ((long)n3ds_convert_tick_to_ms((a)-(b)))
 #else /* !BSD_TIME && !NN_PLATFORM_CTR && !MSWIN32 && !MSWINCE */
@@ -512,9 +512,7 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
 # include "th/PCR_ThCtl.h"
 #endif
 
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 /*
  * Stop and restart mutator threads.
@@ -592,17 +590,18 @@ typedef char * ptr_t;   /* A generic pointer to which we can add        */
 /* should match their format specifiers.                                */
 #define ABORT_ARG1(C_msg, C_fmt, arg1) \
                 do { \
-                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt, arg1); \
+                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \
                   ABORT(C_msg); \
                 } while (0)
 #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \
                 do { \
-                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt, arg1, arg2); \
+                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \
                   ABORT(C_msg); \
                 } while (0)
 #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \
                 do { \
-                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt, arg1, arg2, arg3); \
+                  GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", \
+                                    arg1, arg2, arg3); \
                   ABORT(C_msg); \
                 } while (0)
 
@@ -659,9 +658,7 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc;
 # define GETENV(name) getenv(name)
 #endif
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #if defined(DARWIN)
 # include <mach/thread_status.h>
@@ -744,25 +741,16 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc;
 # endif
 #endif /* DARWIN */
 
-#ifdef PARALLEL_MARK
-# include "gc_atomic_ops.h"
-# define counter_t volatile AO_t
-#else
-  typedef size_t counter_t;
-# if defined(THREADS) && (defined(MPROTECT_VDB) || defined(THREAD_SANITIZER) \
-                || (defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)))
-#   include "gc_atomic_ops.h"
-# endif
-#endif /* !PARALLEL_MARK */
-
 #include "../gc_tiny_fl.h"
 
 #include <setjmp.h>
 
-#ifdef __cplusplus
-  extern "C" {
+#if __STDC_VERSION__ >= 201112L
+# include <assert.h> /* for static_assert */
 #endif
 
+EXTERN_C_BEGIN
+
 /*********************************/
 /*                               */
 /* Word-size-dependent defines   */
@@ -1080,7 +1068,8 @@ struct hblkhdr {
                                 /* mod BYTES_TO_GRANULES(hb_sz), except */
                                 /* for large blocks.  See GC_obj_map.   */
 #   endif
-    counter_t hb_n_marks;       /* Number of set mark bits, excluding   */
+#   ifdef PARALLEL_MARK
+      volatile AO_t hb_n_marks; /* Number of set mark bits, excluding   */
                                 /* the one always set at the end.       */
                                 /* Currently it is concurrently         */
                                 /* updated and hence only approximate.  */
@@ -1098,8 +1087,10 @@ struct hblkhdr {
                                 /* The count may also be too high if    */
                                 /* multiple mark threads mark the       */
                                 /* same object due to a race.           */
-                                /* Without parallel marking, the count  */
+#   else
+      size_t hb_n_marks;        /* Without parallel marking, the count  */
                                 /* is accurate.                         */
+#   endif
 #   ifdef USE_MARK_BYTES
 #     define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
         /* Unlike the other case, this is in units of bytes.            */
@@ -1353,12 +1344,8 @@ struct _GC_arrays {
 # endif
   size_t _size_map[MAXOBJBYTES+1];
         /* Number of granules to allocate when asked for a certain      */
-        /* number of bytes.                                             */
-# ifdef STUBBORN_ALLOC
-#   define GC_sobjfreelist GC_arrays._sobjfreelist
-    ptr_t _sobjfreelist[MAXOBJGRANULES+1];
-                          /* Free list for immutable objects.   */
-# endif
+        /* number of bytes.  Should be accessed with the allocation     */
+        /* lock held.                                                   */
 # ifdef MARK_BIT_PER_GRANULE
 #   define GC_obj_map GC_arrays._obj_map
     unsigned short * _obj_map[MAXOBJGRANULES + 1];
@@ -1378,16 +1365,6 @@ struct _GC_arrays {
   char _valid_offsets[VALID_OFFSET_SZ];
                                 /* GC_valid_offsets[i] == TRUE ==> i    */
                                 /* is registered as a displacement.     */
-# ifdef STUBBORN_ALLOC
-#   define GC_changed_pages GC_arrays._changed_pages
-    page_hash_table _changed_pages;
-        /* Stubborn object pages that were changes since last call to   */
-        /* GC_read_changed.                                             */
-#   define GC_prev_changed_pages GC_arrays._prev_changed_pages
-    page_hash_table _prev_changed_pages;
-        /* Stubborn object pages that were changes before last call to  */
-        /* GC_read_changed.                                             */
-# endif
 # if defined(PROC_VDB) || defined(MPROTECT_VDB) \
      || defined(GWW_VDB) || defined(MANUAL_VDB)
 #   define GC_grungy_pages GC_arrays._grungy_pages
@@ -1530,11 +1507,11 @@ GC_EXTERN struct obj_kind {
 #define UNCOLLECTABLE 2
 #ifdef GC_ATOMIC_UNCOLLECTABLE
 # define AUNCOLLECTABLE 3
-# define STUBBORN 4
 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
+# define GC_N_KINDS_INITIAL_VALUE 4
 #else
-# define STUBBORN 3
 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
+# define GC_N_KINDS_INITIAL_VALUE 3
 #endif
 
 GC_EXTERN unsigned GC_n_kinds;
@@ -1576,7 +1553,7 @@ GC_EXTERN word GC_black_list_spacing;
 
 #ifdef GC_GCJ_SUPPORT
   extern struct hblk * GC_hblkfreelist[];
-                                        /* Remains visible to GNU GCJ. */
+  extern word GC_free_bytes[];  /* Both remain visible to GNU GCJ.      */
 #endif
 
 #ifdef GC_DISABLE_INCREMENTAL
@@ -1735,6 +1712,13 @@ GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
                                     /* As GC_push_all but consider      */
                                     /* interior pointers as valid.      */
 
+#if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
+  /* GC_mark_local does not handle memory protection faults yet.  So,   */
+  /* the static data regions are scanned immediately by GC_push_roots.  */
+  GC_INNER void GC_push_conditional_eager(void *bottom, void *top,
+                                          GC_bool all);
+#endif
+
   /* In the threads case, we push part of the current thread stack      */
   /* with GC_push_all_eager when we push the registers.  This gets the  */
   /* callee-save registers that may disappear.  The remainder of the    */
@@ -1804,6 +1788,9 @@ GC_INNER void GC_set_fl_marks(ptr_t p);
                                     /* set.  Abort if not.              */
 #endif
 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
+#ifdef USE_PROC_FOR_LIBRARIES
+  GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e);
+#endif
 GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish);
 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
     || defined(CYGWIN32) || defined(PCR)
@@ -1829,7 +1816,10 @@ void GC_register_data_segments(void);
   GC_INNER GC_bool GC_is_static_root(void *p);
                 /* Is the address p in one of the registered static     */
                 /* root sections?                                       */
-#endif
+# ifdef TRACE_BUF
+    void GC_add_trace_entry(char *kind, word arg1, word arg2);
+# endif
+#endif /* !THREADS */
 
 /* Black listing: */
 #ifdef PRINT_BLACK_LIST
@@ -1965,6 +1955,13 @@ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f);
 #define GC_gcollect_inner() \
                 (void)GC_try_to_collect_inner(GC_never_stop_func)
 
+#if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
+  GC_EXTERN GC_bool GC_in_thread_creation;
+        /* We may currently be in thread creation or destruction.       */
+        /* Only set to TRUE while allocation lock is held.              */
+        /* When set, it is OK to run GC from unknown thread.            */
+#endif
+
 GC_EXTERN GC_bool GC_is_initialized; /* GC_init() has been run. */
 
 GC_INNER void GC_collect_a_little_inner(int n);
@@ -1986,6 +1983,9 @@ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k);
                                 /* object is live.                      */
 #endif
 
+GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
+                                      GC_bool ignore_off_page, GC_bool retry);
+
 GC_INNER ptr_t GC_allocobj(size_t sz, int kind);
                                 /* Make the indicated                   */
                                 /* free list nonempty, and return its   */
@@ -2191,23 +2191,23 @@ GC_EXTERN GC_bool GC_print_back_height;
                 /* GC_enable_incremental once more).                    */
 #endif /* !GC_DISABLE_INCREMENTAL */
 
+#ifdef MANUAL_VDB
+  GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */
+# define GC_dirty(p) (GC_incremental ? GC_dirty_inner(p) : (void)0)
+# define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p)
+#else
+# define GC_dirty(p) (void)(p)
+# define REACHABLE_AFTER_DIRTY(p) (void)(p)
+#endif
+
 /* Same as GC_base but excepts and returns a pointer to const object.   */
 #define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p)))
 
-/* Stubborn objects: */
-void GC_read_changed(void); /* Analogous to GC_read_dirty */
-GC_bool GC_page_was_changed(struct hblk * h);
-                                /* Analogous to GC_page_was_dirty */
-void GC_clean_changing_list(void);
-                                /* Collect obsolete changing list entries */
-void GC_stubborn_init(void);
-
 /* Debugging print routines: */
 void GC_print_block_list(void);
 void GC_print_hblkfreelist(void);
 void GC_print_heap_sects(void);
 void GC_print_static_roots(void);
-/* void GC_dump(void); - declared in gc.h */
 
 extern word GC_fo_entries; /* should be visible in extra/MacOS.c */
 
@@ -2350,17 +2350,30 @@ GC_EXTERN signed_word GC_bytes_found;
   GC_EXTERN ptr_t * GC_gcjobjfreelist;
 #endif
 
-#if defined(GWW_VDB) && defined(MPROTECT_VDB)
-  GC_INNER GC_bool GC_gww_dirty_init(void);
-  /* Defined in os_dep.c.  Returns TRUE if GetWriteWatch is available.  */
-  /* May be called repeatedly.                                          */
-#endif
+#ifdef MPROTECT_VDB
+# ifdef GWW_VDB
+    GC_INNER GC_bool GC_gww_dirty_init(void);
+                        /* Returns TRUE if GetWriteWatch is available.  */
+                        /* May be called repeatedly.                    */
+# endif
+# ifdef USE_MUNMAP
+    GC_INNER GC_bool GC_mprotect_dirty_init(void);
+    GC_INNER GC_bool GC_has_unmapped_memory(void);
+# endif
+#endif /* MPROTECT_VDB */
 
 #if defined(CHECKSUMS) || defined(PROC_VDB)
   GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h);
                         /* Could the page contain valid heap pointers?  */
 #endif
 
+#ifdef CHECKSUMS
+# if defined(MPROTECT_VDB) && !defined(DARWIN)
+    void GC_record_fault(struct hblk * h);
+# endif
+  void GC_check_dirty(void);
+#endif
+
 GC_INNER void GC_default_print_heap_obj_proc(ptr_t p);
 
 GC_INNER void GC_setpagesize(void);
@@ -2374,9 +2387,9 @@ GC_INNER void GC_start_debugging_inner(void);   /* defined in dbg_mlc.c. */
                         /* Should not be called if GC_debugging_started. */
 
 /* Store debugging info into p.  Return displaced pointer.      */
-/* Assumes we don't hold allocation lock.                       */
-GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
-                                   int linenum);
+/* Assumes we hold the allocation lock.                         */
+GC_INNER void *GC_store_debug_info_inner(void *p, word sz, const char *str,
+                                         int linenum);
 
 #ifdef REDIRECT_MALLOC
 # ifdef GC_LINUX_THREADS
@@ -2405,9 +2418,13 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 #ifdef GC_WIN32_THREADS
   GC_INNER void GC_get_next_stack(char *start, char * limit, char **lo,
                                   char **hi);
-# ifdef MPROTECT_VDB
+# if defined(MPROTECT_VDB) && !defined(CYGWIN32)
     GC_INNER void GC_set_write_fault_handler(void);
 # endif
+# if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS)
+    GC_INNER GC_bool GC_started_thread_while_stopped(void);
+        /* Did we invalidate mark phase with an unexpected thread start? */
+# endif
 #endif /* GC_WIN32_THREADS */
 
 #ifdef THREADS
@@ -2432,10 +2449,12 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 
 #ifdef SEARCH_FOR_DATA_START
   GC_INNER void GC_init_linux_data_start(void);
+  ptr_t GC_find_limit(ptr_t, GC_bool);
 #endif
 
 #if defined(NETBSD) && defined(__ELF__)
   GC_INNER void GC_init_netbsd_elf(void);
+  ptr_t GC_find_limit(ptr_t, GC_bool);
 #endif
 
 #ifdef UNIX_LIKE
@@ -2447,6 +2466,10 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
     GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
                                       char **prot, unsigned int *maj_dev,
                                       char **mapping_name);
+# endif
+# if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
+    GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
+                                          ptr_t *startp, ptr_t *endp);
 # endif
   GC_INNER char *GC_get_maps(void); /* from os_dep.c */
 #endif /* NEED_PROC_MAPS */
@@ -2467,8 +2490,11 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 #endif
 
 /* Check a compile time assertion at compile time.      */
-#if defined(static_assert) && (__STDC_VERSION__ >= 201112L)
-# define GC_STATIC_ASSERT(expr) static_assert(expr, "")
+#if _MSC_VER >= 1700
+# define GC_STATIC_ASSERT(expr) \
+                static_assert(expr, "static assertion failed: " #expr)
+#elif defined(static_assert) && __STDC_VERSION__ >= 201112L
+# define GC_STATIC_ASSERT(expr) static_assert(expr, #expr)
 #elif defined(mips) && !defined(__GNUC__)
 /* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
    This code works correctly (ugliness is to avoid "unused var" warnings) */
@@ -2496,8 +2522,9 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 #ifndef NO_DEBUGGING
   GC_EXTERN GC_bool GC_dump_regularly;
                                 /* Generate regular debugging dumps.    */
-# define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) GC_dump(); \
-                        else COND_DUMP_CHECKS
+# define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) { \
+                        GC_dump_named(NULL); \
+                   } else COND_DUMP_CHECKS
 #else
 # define COND_DUMP COND_DUMP_CHECKS
 #endif
@@ -2584,13 +2611,9 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 #if (defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))) \
     && !defined(GC_NO_SIGSETJMP)
 # if defined(SUNOS5SIGS) && !defined(FREEBSD) && !defined(LINUX)
-#   ifdef __cplusplus
-      } /* extern "C" */
-#   endif
+    EXTERN_C_END
 #   include <sys/siginfo.h>
-#   ifdef __cplusplus
-      extern "C" {
-#   endif
+    EXTERN_C_BEGIN
 # endif
   /* Define SETJMP and friends to be the version that restores  */
   /* the signal mask.                                           */
@@ -2609,27 +2632,17 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 
 /* Do we need the GC_find_limit machinery to find the end of a  */
 /* data segment.                                                */
-#if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
-# define NEED_FIND_LIMIT
-#endif
-
-#if !defined(STACKBOTTOM) && defined(HEURISTIC2)
-# define NEED_FIND_LIMIT
-#endif
-
-#if (defined(SVR4) || defined(AUX) || defined(DGUX) \
-    || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
+#if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START) \
+    || (!defined(STACKBOTTOM) && defined(HEURISTIC2)) \
+    || ((defined(SVR4) || defined(AIX) || defined(DGUX) \
+         || (defined(LINUX) && defined(SPARC))) && !defined(PCR))
 # define NEED_FIND_LIMIT
 #endif
 
 #if defined(DATASTART_USES_BSDGETDATASTART)
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
+  EXTERN_C_END
 # include <machine/trap.h>
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
 # if !defined(PCR)
 #   define NEED_FIND_LIMIT
 # endif
@@ -2688,8 +2701,6 @@ GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
 # define ASSERT_CANCEL_DISABLED() (void)0
 #endif /* !CANCEL_SAFE */
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* GC_PRIVATE_H */

+ 159 - 279
blitz.mod/bdwgc/include/private/gcconfig.h

@@ -37,9 +37,15 @@
 /* around #includes.  Types and macros do not need such wrapping, only  */
 /* the declared global data and functions.                              */
 #ifdef __cplusplus
-  extern "C" {
+# define EXTERN_C_BEGIN extern "C" {
+# define EXTERN_C_END } /* extern "C" */
+#else
+# define EXTERN_C_BEGIN /* empty */
+# define EXTERN_C_END /* empty */
 #endif
 
+EXTERN_C_BEGIN
+
 /* Convenient internal macro to test version of Clang.  */
 #if defined(__clang__) && defined(__clang_major__)
 # define GC_CLANG_PREREQ(major, minor) \
@@ -125,13 +131,9 @@
 /* And one for Darwin: */
 # if defined(macosx) || (defined(__APPLE__) && defined(__MACH__))
 #   define DARWIN
-#   ifdef __cplusplus
-      } /* extern "C" */
-#   endif
+    EXTERN_C_END
 #   include <TargetConditionals.h>
-#   ifdef __cplusplus
-      extern "C" {
-#   endif
+    EXTERN_C_BEGIN
 # endif
 
 /* Determine the machine type: */
@@ -147,7 +149,7 @@
 # if defined(__aarch64__)
 #    define AARCH64
 #    if !defined(LINUX) && !defined(DARWIN) && !defined(FREEBSD) \
-        && !defined(NN_BUILD_TARGET_PLATFORM_NX)
+        && !defined(NETBSD) && !defined(NN_BUILD_TARGET_PLATFORM_NX)
 #      define NOSYS
 #      define mach_type_known
 #    endif
@@ -159,7 +161,6 @@
 #    elif !defined(LINUX) && !defined(NETBSD) && !defined(FREEBSD) \
           && !defined(OPENBSD) && !defined(DARWIN) && !defined(_WIN32) \
           && !defined(__CEGCC__) && !defined(NN_PLATFORM_CTR) \
-          && !defined(NN_BUILD_TARGET_PLATFORM_NX) \
           && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2) \
           && !defined(SYMBIAN)
 #      define NOSYS
@@ -201,6 +202,10 @@
 #    define ARM32
 #    define mach_type_known
 # endif
+# if defined(NETBSD) && defined(__aarch64__)
+#    define AARCH64
+#    define mach_type_known
+# endif
 # if defined(NETBSD) && defined(__sh__)
 #    define SH
 #    define mach_type_known
@@ -276,13 +281,9 @@
 # endif
 # if (defined(sun) || defined(__sun)) && (defined(sparc) || defined(__sparc))
             /* Test for SunOS 5.x */
-#   ifdef __cplusplus
-      } /* extern "C" */
-#   endif
+    EXTERN_C_END
 #   include <errno.h>
-#   ifdef __cplusplus
-      extern "C" {
-#   endif
+    EXTERN_C_BEGIN
 #   define SPARC
 #   define SOLARIS
 #   define mach_type_known
@@ -335,7 +336,7 @@
 #    define HAIKU
 #    define mach_type_known
 # endif
-# if defined(__HAIKU__) && defined(__amd64__)
+# if defined(__HAIKU__) && (defined(__amd64__) || defined(__x86_64__))
 #    define X86_64
 #    define HAIKU
 #    define mach_type_known
@@ -549,7 +550,9 @@
 #     ifdef _XBOX_ONE
 #       define MSWIN_XBOX1
 #     else
-#       define MSWIN32  /* or Win64 */
+#       ifndef MSWIN32
+#         define MSWIN32 /* or Win64 */
+#       endif
 #       if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP)
 #         define MSWINRT_FLAVOR
 #       endif
@@ -650,6 +653,10 @@
 #   endif
 #   define mach_type_known
 # endif
+# if defined(__riscv) && defined(LINUX)
+#   define RISCV
+#   define mach_type_known
+# endif
 
 # if defined(SN_TARGET_PSP2)
 #   define mach_type_known
@@ -922,13 +929,9 @@
 #       define MPROTECT_VDB
 #       ifdef __ELF__
 #         define DYNAMIC_LOADING
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <features.h>
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #         if defined(__GLIBC__) && __GLIBC__ >= 2
 #           define SEARCH_FOR_DATA_START
 #         else /* !GLIBC2 */
@@ -959,13 +962,9 @@
 #   endif
 #   ifdef MACOS
 #     ifndef __LOWMEM__
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <LowMem.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #     endif
 #     define OS_TYPE "MACOS"
                 /* see os_dep.c for details of global data segments. */
@@ -987,13 +986,9 @@
 #   ifdef MACOS
 #     define ALIGNMENT 2  /* Still necessary?  Could it be 4?   */
 #     ifndef __LOWMEM__
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <LowMem.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #     endif
 #     define OS_TYPE "MACOS"
                         /* see os_dep.c for details of global data segments. */
@@ -1047,13 +1042,9 @@
 #     define DATAEND   ((ptr_t)get_end())
 #     define USE_MMAP_ANON
 #     define MPROTECT_VDB
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)getpagesize()
 #     if defined(USE_PPC_PREFETCH) && defined(__GNUC__)
         /* The performance impact of prefetches is untested */
@@ -1070,14 +1061,10 @@
 #     define OS_TYPE "OPENBSD"
 #     define ALIGNMENT 4
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
         /* USRSTACK is defined in <machine/vmparam.h> but that is       */
         /* protected by _KERNEL in <uvm/uvm_param.h> file.              */
 #       ifdef USRSTACK
@@ -1253,14 +1240,10 @@
         /* Apparently USRSTACK is defined to be USERLIMIT, but in some  */
         /* installations that's undefined.  We work around this with a  */
         /* gross hack:                                                  */
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/vmparam.h>
 #       include <unistd.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USERLIMIT
           /* This should work everywhere, but doesn't.  */
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
@@ -1305,14 +1288,10 @@
 #   ifdef OPENBSD
 #     define OS_TYPE "OPENBSD"
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USRSTACK
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
 #       else
@@ -1378,13 +1357,9 @@
 #   endif
 #   ifdef HAIKU
 #     define OS_TYPE "HAIKU"
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <OS.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)B_PAGE_SIZE
       extern int etext[];
 #     define DATASTART ((ptr_t)((((word)(etext)) + 0xfff) & ~0xfff))
@@ -1406,13 +1381,9 @@
         /* Apparently USRSTACK is defined to be USERLIMIT, but in some  */
         /* installations that's undefined.  We work around this with a  */
         /* gross hack:                                                  */
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/vmparam.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USERLIMIT
           /* This should work everywhere, but doesn't.  */
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
@@ -1465,13 +1436,9 @@
 #       define DATAEND ((ptr_t)(&_end))
 #       define STACK_GROWS_DOWN
 #       define HEURISTIC2
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <unistd.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       define GETPAGESIZE() (unsigned)sysconf(_SC_PAGESIZE)
 #       define DYNAMIC_LOADING
 #       ifndef USE_MMAP
@@ -1495,13 +1462,9 @@
                 /* thus allowing the heap to grow to ~3GB               */
 #       ifdef __ELF__
 #           define DYNAMIC_LOADING
-#           ifdef __cplusplus
-              } /* extern "C" */
-#           endif
+            EXTERN_C_END
 #           include <features.h>
-#           ifdef __cplusplus
-              extern "C" {
-#           endif
+            EXTERN_C_BEGIN
 #            if defined(__GLIBC__) && __GLIBC__ >= 2 \
                 || defined(HOST_ANDROID) || defined(HOST_TIZEN)
 #                define SEARCH_FOR_DATA_START
@@ -1557,13 +1520,9 @@
 #       if defined(__GLIBC__) && !defined(__UCLIBC__)
           /* Workaround lock elision implementation for some glibc.     */
 #         define GLIBC_2_19_TSX_BUG
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <gnu/libc-version.h> /* for gnu_get_libc_version() */
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #       endif
 #   endif
 #   ifdef CYGWIN32
@@ -1598,13 +1557,9 @@
 #   endif
 #   ifdef DJGPP
 #       define OS_TYPE "DJGPP"
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include "stubinfo.h"
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
         extern int etext[];
         extern int _stklen;
         extern int __djgpp_stack_limit;
@@ -1616,14 +1571,10 @@
 #   ifdef OPENBSD
 #       define OS_TYPE "OPENBSD"
 #       ifndef GC_OPENBSD_THREADS
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <sys/param.h>
 #         include <uvm/uvm_extern.h>
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #         ifdef USRSTACK
 #           define STACKBOTTOM ((ptr_t)USRSTACK)
 #         else
@@ -1685,13 +1636,9 @@
 #   endif
 #   ifdef RTEMS
 #       define OS_TYPE "RTEMS"
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/unistd.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
         extern int etext[];
         extern int end[];
         void *rtems_get_stack_bottom(void);
@@ -1743,13 +1690,9 @@
 #     define STACKBOTTOM ((ptr_t)0xc0000000)
 #     define USE_MMAP_ANON
 #     define MPROTECT_VDB
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)getpagesize()
       /* There seems to be some issues with trylock hanging on darwin.  */
       /* This should be looked into some more.                          */
@@ -1877,14 +1820,10 @@
 #     define OS_TYPE "OPENBSD"
 #     define ALIGNMENT 4
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USRSTACK
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
 #       else
@@ -1995,13 +1934,9 @@
 #       define STACKBOTTOM ((ptr_t)environ)
 #     endif
 #     define DYNAMIC_LOADING
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)sysconf(_SC_PAGE_SIZE)
 #     ifndef __GNUC__
 #       define PREFETCH(x)  do { \
@@ -2021,14 +1956,10 @@
 #  ifdef OPENBSD
 #     define OS_TYPE "OPENBSD"
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USRSTACK
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
 #       else
@@ -2061,14 +1992,10 @@
 #       define OS_TYPE "OPENBSD"
 #       define ELF_CLASS ELFCLASS64
 #       ifndef GC_OPENBSD_THREADS
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <sys/param.h>
 #         include <uvm/uvm_extern.h>
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #         ifdef USRSTACK
 #           define STACKBOTTOM ((ptr_t)USRSTACK)
 #         else
@@ -2169,13 +2096,9 @@
 #       define STACKBOTTOM ((ptr_t)environ)
 #       define HPUX_STACKBOTTOM
 #       define DYNAMIC_LOADING
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <unistd.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       define GETPAGESIZE() (unsigned)sysconf(_SC_PAGE_SIZE)
         /* The following was empirically determined, and is probably    */
         /* not very robust.                                             */
@@ -2220,13 +2143,9 @@
 #           define CLEAR_DOUBLE(x) \
               __asm__ ("        stf.spill       [%0]=f0": : "r"((void *)(x)))
 #         else
-#           ifdef __cplusplus
-              } /* extern "C" */
-#           endif
+            EXTERN_C_END
 #           include <ia64intrin.h>
-#           ifdef __cplusplus
-              extern "C" {
-#           endif
+            EXTERN_C_BEGIN
 #           define PREFETCH(x) __lfetch(__lfhint_none, (x))
 #           define GC_PREFETCH_FOR_WRITE(x) __lfetch(__lfhint_nta, (x))
 #           define CLEAR_DOUBLE(x) __stf_spill((void *)(x), 0)
@@ -2353,13 +2272,9 @@
 #     define STACKBOTTOM ((ptr_t)0x16fdfffff)
 #     define USE_MMAP_ANON
 #     define MPROTECT_VDB
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)getpagesize()
       /* FIXME: There seems to be some issues with trylock hanging on   */
       /* darwin. This should be looked into some more.                  */
@@ -2381,6 +2296,22 @@
 #     define DATASTART GC_FreeBSDGetDataStart(0x1000, (ptr_t)etext)
 #     define DATASTART_USES_BSDGETDATASTART
 #   endif
+#   ifdef NETBSD
+#     define OS_TYPE "NETBSD"
+#     define HEURISTIC2
+      extern ptr_t GC_data_start;
+#     define DATASTART GC_data_start
+#     define ELF_CLASS ELFCLASS64
+#     define DYNAMIC_LOADING
+#   endif
+#   ifdef NINTENDO_SWITCH
+      extern int __bss_end[];
+#     define NO_HANDLE_FORK
+#     define DATASTART (ptr_t)ALIGNMENT /* cannot be null */
+#     define DATAEND (ptr_t)(&__bss_end)
+      void *switch_get_stack_bottom(void);
+#     define STACKBOTTOM ((ptr_t)switch_get_stack_bottom())
+#   endif
 #   ifdef NOSYS
       /* __data_start is usually defined in the target linker script.   */
       extern int __data_start[];
@@ -2417,17 +2348,13 @@
 #       define STACK_GRAN 0x10000000
 #       ifdef __ELF__
 #           define DYNAMIC_LOADING
-#           ifdef __cplusplus
-              } /* extern "C" */
-#           endif
+            EXTERN_C_END
 #           include <features.h>
-#           ifdef __cplusplus
-              extern "C" {
-#           endif
-#            if defined(__GLIBC__) && __GLIBC__ >= 2 \
+            EXTERN_C_BEGIN
+#           if defined(__GLIBC__) && __GLIBC__ >= 2 \
                 || defined(HOST_ANDROID) || defined(HOST_TIZEN)
 #                define SEARCH_FOR_DATA_START
-#            else
+#           else
                  extern char **__environ;
 #                define DATASTART ((ptr_t)(&__environ))
                               /* hideous kludge: __environ is the first */
@@ -2438,12 +2365,12 @@
                               /* would include .rodata, which may       */
                               /* contain large read-only data tables    */
                               /* that we'd rather not scan.             */
-#            endif
-             extern int _end[];
-#            define DATAEND ((ptr_t)(_end))
+#           endif
+            extern int _end[];
+#           define DATAEND ((ptr_t)(_end))
 #       else
-             extern int etext[];
-#            define DATASTART ((ptr_t)((((word)(etext)) + 0xfff) & ~0xfff))
+            extern int etext[];
+#           define DATASTART ((ptr_t)((((word)(etext)) + 0xfff) & ~0xfff))
 #       endif
 #   endif
 #   ifdef MSWINCE
@@ -2478,13 +2405,9 @@
 #     define STACKBOTTOM ((ptr_t)0x30000000)
 #     define USE_MMAP_ANON
 #     define MPROTECT_VDB
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)getpagesize()
       /* FIXME: There seems to be some issues with trylock hanging on   */
       /* darwin. This should be looked into some more.                  */
@@ -2496,14 +2419,10 @@
 #   ifdef OPENBSD
 #     define OS_TYPE "OPENBSD"
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USRSTACK
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
 #       else
@@ -2531,14 +2450,6 @@
       void *n3ds_get_stack_bottom(void);
 #     define STACKBOTTOM ((ptr_t)n3ds_get_stack_bottom())
 #   endif
-#   ifdef NINTENDO_SWITCH
-      extern int __bss_end[];
-#     define NO_HANDLE_FORK
-#     define DATASTART (ptr_t)ALIGNMENT /* cannot be null */
-#     define DATAEND (ptr_t)(&__bss_end)
-      void *switch_get_stack_bottom(void);
-#     define STACKBOTTOM ((ptr_t)switch_get_stack_bottom())
-#   endif
 #   ifdef NOSYS
       /* __data_start is usually defined in the target linker script.  */
       extern int __data_start[];
@@ -2586,14 +2497,10 @@
 #   ifdef OPENBSD
 #     define OS_TYPE "OPENBSD"
 #     ifndef GC_OPENBSD_THREADS
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/param.h>
 #       include <uvm/uvm_extern.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USRSTACK
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
 #       else
@@ -2661,13 +2568,9 @@
 #   ifdef SN_TARGET_ORBIS
 #     define DATASTART (ptr_t)ALIGNMENT
 #     define DATAEND (ptr_t)ALIGNMENT
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <pthread.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
       void *ps4_get_stack_bottom(void);
 #     define STACKBOTTOM ((ptr_t)ps4_get_stack_bottom())
 #   endif
@@ -2675,14 +2578,10 @@
 #       define OS_TYPE "OPENBSD"
 #       define ELF_CLASS ELFCLASS64
 #       ifndef GC_OPENBSD_THREADS
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <sys/param.h>
 #         include <uvm/uvm_extern.h>
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #         ifdef USRSTACK
 #           define STACKBOTTOM ((ptr_t)USRSTACK)
 #         else
@@ -2707,13 +2606,9 @@
 #       endif
 #       ifdef __ELF__
 #           define DYNAMIC_LOADING
-#           ifdef __cplusplus
-              } /* extern "C" */
-#           endif
+            EXTERN_C_END
 #           include <features.h>
-#           ifdef __cplusplus
-              extern "C" {
-#           endif
+            EXTERN_C_BEGIN
 #           define SEARCH_FOR_DATA_START
             extern int _end[];
 #           define DATAEND ((ptr_t)(_end))
@@ -2731,13 +2626,9 @@
 #       if defined(__GLIBC__) && !defined(__UCLIBC__)
           /* Workaround lock elision implementation for some glibc.     */
 #         define GLIBC_2_19_TSX_BUG
-#         ifdef __cplusplus
-            } /* extern "C" */
-#         endif
+          EXTERN_C_END
 #         include <gnu/libc-version.h> /* for gnu_get_libc_version() */
-#         ifdef __cplusplus
-            extern "C" {
-#         endif
+          EXTERN_C_BEGIN
 #       endif
 #   endif
 #   ifdef DARWIN
@@ -2753,13 +2644,9 @@
 #     define STACKBOTTOM ((ptr_t)0x7fff5fc00000)
 #     define USE_MMAP_ANON
 #     define MPROTECT_VDB
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <unistd.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)getpagesize()
       /* There seems to be some issues with trylock hanging on darwin.  */
       /* This should be looked into some more.                          */
@@ -2805,16 +2692,12 @@
 #   endif
 #   ifdef HAIKU
 #     define OS_TYPE "HAIKU"
-#     ifdef __cplusplus
-        } /* extern "C" */
-#     endif
+      EXTERN_C_END
 #     include <OS.h>
-#     ifdef __cplusplus
-        extern "C" {
-#     endif
+      EXTERN_C_BEGIN
 #     define GETPAGESIZE() (unsigned)B_PAGE_SIZE
-      extern int etext[];
-#     define DATASTART ((ptr_t)((((word)etext) + 0xfff) & ~0xfff))
+#     define HEURISTIC2
+#     define SEARCH_FOR_DATA_START
 #     define DYNAMIC_LOADING
 #     define MPROTECT_VDB
 #   endif
@@ -2834,13 +2717,9 @@
         /* Apparently USRSTACK is defined to be USERLIMIT, but in some  */
         /* installations that's undefined.  We work around this with a  */
         /* gross hack:                                                  */
-#       ifdef __cplusplus
-          } /* extern "C" */
-#       endif
+        EXTERN_C_END
 #       include <sys/vmparam.h>
-#       ifdef __cplusplus
-          extern "C" {
-#       endif
+        EXTERN_C_BEGIN
 #       ifdef USERLIMIT
           /* This should work everywhere, but doesn't.  */
 #         define STACKBOTTOM ((ptr_t)USRSTACK)
@@ -2915,22 +2794,18 @@
 #       define MPROTECT_VDB
 #       ifdef __ELF__
 #           define DYNAMIC_LOADING
-#           ifdef __cplusplus
-              } /* extern "C" */
-#           endif
+            EXTERN_C_END
 #           include <features.h>
-#           ifdef __cplusplus
-              extern "C" {
+            EXTERN_C_BEGIN
+#           if defined(__GLIBC__) && __GLIBC__ >= 2
+#               define SEARCH_FOR_DATA_START
+#           else
+#               error --> unknown Hexagon libc configuration
 #           endif
-#            if defined(__GLIBC__) && __GLIBC__ >= 2
-#                define SEARCH_FOR_DATA_START
-#            else
-#                error --> unknown Hexagon libc configuration
-#            endif
-             extern int _end[];
-#            define DATAEND ((ptr_t)(_end))
+            extern int _end[];
+#           define DATAEND ((ptr_t)(_end))
 #       elif !defined(CPPCHECK)
-#            error --> bad Hexagon Linux configuration
+#           error --> bad Hexagon Linux configuration
 #       endif
 #   else
 #       error --> unknown Hexagon OS configuration
@@ -2970,6 +2845,19 @@
 #   endif
 # endif
 
+# ifdef RISCV
+#   define MACH_TYPE "RISC-V"
+#   define CPP_WORDSZ __riscv_xlen /* 32 or 64 */
+#   define ALIGNMENT (CPP_WORDSZ/8)
+#   ifdef LINUX
+#     define OS_TYPE "LINUX"
+      extern int __data_start[];
+#     define DATASTART ((ptr_t)__data_start)
+#     define LINUX_STACKBOTTOM
+#     define DYNAMIC_LOADING
+#   endif
+# endif /* RISCV */
+
 #if defined(__GLIBC__) && !defined(DONT_USE_LIBC_PRIVATES)
   /* Use glibc's stack-end marker. */
 # define USE_LIBC_PRIVATES
@@ -3048,26 +2936,18 @@
 
 #if (defined(SVR4) || defined(HOST_ANDROID) || defined(HOST_TIZEN)) \
     && !defined(GETPAGESIZE)
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
+  EXTERN_C_END
 # include <unistd.h>
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
 # define GETPAGESIZE() (unsigned)sysconf(_SC_PAGESIZE)
 #endif
 
 #ifndef GETPAGESIZE
 # if defined(SOLARIS) || defined(IRIX5) || defined(LINUX) \
      || defined(NETBSD) || defined(FREEBSD) || defined(HPUX)
-#   ifdef __cplusplus
-      } /* extern "C" */
-#   endif
+    EXTERN_C_END
 #   include <unistd.h>
-#   ifdef __cplusplus
-      extern "C" {
-#   endif
+    EXTERN_C_BEGIN
 # endif
 # define GETPAGESIZE() (unsigned)getpagesize()
 #endif
@@ -3108,6 +2988,10 @@
 # define GC_EXPLICIT_SIGNALS_UNBLOCK
 #endif
 
+#if !defined(NO_SIGNALS_UNBLOCK_IN_MAIN) && defined(GC_NO_PTHREAD_SIGMASK)
+# define NO_SIGNALS_UNBLOCK_IN_MAIN
+#endif
+
 #if !defined(NO_MARKER_SPECIAL_SIGMASK) \
     && (defined(NACL) || defined(GC_WIN32_PTHREADS))
   /* Either there is no pthread_sigmask(), or GC marker thread cannot   */
@@ -3124,13 +3008,9 @@
 #endif
 
 #ifdef GC_OPENBSD_THREADS
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
+  EXTERN_C_END
 # include <sys/param.h>
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
   /* Prior to 5.2 release, OpenBSD had user threads and required        */
   /* special handling.                                                  */
 # if OpenBSD < 201211
@@ -3206,7 +3086,8 @@
 # define MUNMAP_THRESHOLD 2
 #endif
 
-#if defined(GC_DISABLE_INCREMENTAL) || defined(MANUAL_VDB)
+#if defined(GC_DISABLE_INCREMENTAL) || defined(DEFAULT_VDB) \
+    || defined(MANUAL_VDB)
 # undef GWW_VDB
 # undef MPROTECT_VDB
 # undef PCR_VDB
@@ -3257,15 +3138,16 @@
 #endif
 
 #if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB) \
-    && !defined(GWW_VDB) && !defined(MANUAL_VDB) \
+    && !defined(GWW_VDB) && !defined(DEFAULT_VDB) && !defined(MANUAL_VDB) \
     && !defined(GC_DISABLE_INCREMENTAL)
 # define DEFAULT_VDB
 #endif
 
-#if ((defined(UNIX_LIKE) && (defined(DARWIN) || defined(HURD) \
-                             || defined(OPENBSD) || defined(ARM32) \
-                             || defined(MIPS) || defined(AVR32) \
-                             || defined(OR1K) || defined(NIOS2))) \
+#if ((defined(UNIX_LIKE) && (defined(DARWIN) || defined(HAIKU) \
+                             || defined(HURD) || defined(OPENBSD) \
+                             || defined(ARM32) \
+                             || defined(AVR32) || defined(MIPS) \
+                             || defined(NIOS2) || defined(OR1K))) \
      || (defined(LINUX) && !defined(__gnu_linux__)) \
      || (defined(RTEMS) && defined(I386)) || defined(HOST_ANDROID)) \
     && !defined(NO_GETCONTEXT)
@@ -3705,15 +3587,13 @@
 #   define GET_MEM(bytes) (struct hblk*)switch_get_mem(bytes)
 # elif defined(HAIKU)
     ptr_t GC_haiku_get_mem(size_t bytes);
-#   define GET_MEM(bytes) (struct  hblk*)GC_haiku_get_mem(bytes)
+#   define GET_MEM(bytes) (struct hblk*)GC_haiku_get_mem(bytes)
 # else
     ptr_t GC_unix_get_mem(size_t bytes);
 #   define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
 # endif
 #endif /* GC_PRIVATE_H */
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* GCCONFIG_H */

+ 2 - 7
blitz.mod/bdwgc/include/private/pthread_stop_world.h

@@ -18,10 +18,7 @@
 #ifndef GC_PTHREAD_STOP_WORLD_H
 #define GC_PTHREAD_STOP_WORLD_H
 
-/* Note: never put extern "C" around an #include.                       */
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 struct thread_stop_info {
 #   if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
@@ -51,8 +48,6 @@ struct thread_stop_info {
 
 GC_INNER void GC_stop_init(void);
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif

+ 13 - 13
blitz.mod/bdwgc/include/private/pthread_support.h

@@ -36,10 +36,7 @@
 # include "dbg_mlc.h" /* for oh type */
 #endif
 
-/* Note: never put extern "C" around an #include.                       */
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 /* We use the allocation lock to protect thread-related data structures. */
 
@@ -143,10 +140,17 @@ typedef struct GC_Thread_Rep {
 # define THREAD_TABLE_SZ 256    /* Power of 2 (for speed). */
 #endif
 
-#define THREAD_TABLE_INDEX(id) \
+#if CPP_WORDSZ == 64
+# define THREAD_TABLE_INDEX(id) \
+    (int)(((((NUMERIC_THREAD_ID(id) >> 8) ^ NUMERIC_THREAD_ID(id)) >> 16) \
+          ^ ((NUMERIC_THREAD_ID(id) >> 8) ^ NUMERIC_THREAD_ID(id))) \
+         % THREAD_TABLE_SZ)
+#else
+# define THREAD_TABLE_INDEX(id) \
                 (int)(((NUMERIC_THREAD_ID(id) >> 16) \
                        ^ (NUMERIC_THREAD_ID(id) >> 8) \
                        ^ NUMERIC_THREAD_ID(id)) % THREAD_TABLE_SZ)
+#endif
 
 GC_EXTERN volatile GC_thread GC_threads[THREAD_TABLE_SZ];
 
@@ -154,11 +158,6 @@ GC_EXTERN GC_bool GC_thr_initialized;
 
 GC_INNER GC_thread GC_lookup_thread(pthread_t id);
 
-GC_EXTERN GC_bool GC_in_thread_creation;
-        /* We may currently be in thread creation or destruction.       */
-        /* Only set to TRUE while allocation lock is held.              */
-        /* When set, it is OK to run GC from unknown thread.            */
-
 #ifdef NACL
   GC_EXTERN __thread GC_thread GC_nacl_gc_thread_self;
   GC_INNER void GC_nacl_initialize_gc_thread(void);
@@ -175,15 +174,16 @@ GC_EXTERN GC_bool GC_in_thread_creation;
 # define GC_INNER_PTHRSTART GC_INNER
 #endif
 
+GC_INNER_PTHRSTART void * GC_CALLBACK GC_inner_start_routine(
+                                        struct GC_stack_base *sb, void *arg);
+
 GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
                                         void *(**pstart)(void *),
                                         void **pstart_arg,
                                         struct GC_stack_base *sb, void *arg);
 GC_INNER_PTHRSTART void GC_thread_exit_proc(void *);
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* GC_PTHREADS && !GC_WIN32_THREADS */
 

+ 16 - 11
blitz.mod/bdwgc/include/private/specific.h

@@ -14,12 +14,7 @@
 
 #include <errno.h>
 
-#include "gc_atomic_ops.h"
-
-/* Note: never put extern "C" around an #include.       */
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 /* Called during key creation or setspecific.           */
 /* For the GC we already hold lock.                     */
@@ -35,6 +30,18 @@
 #define HASH(p) \
           ((unsigned)((((word)(p)) >> 8) ^ (word)(p)) & (TS_HASH_SIZE - 1))
 
+#ifdef GC_ASSERTIONS
+  /* Thread-local storage is not guaranteed to be scanned by GC.        */
+  /* We hide values stored in "specific" entries for a test purpose.    */
+  typedef GC_hidden_pointer ts_entry_value_t;
+# define TS_HIDE_VALUE(p) GC_HIDE_POINTER(p)
+# define TS_REVEAL_PTR(p) GC_REVEAL_POINTER(p)
+#else
+  typedef void * ts_entry_value_t;
+# define TS_HIDE_VALUE(p) (p)
+# define TS_REVEAL_PTR(p) (p)
+#endif
+
 /* An entry describing a thread-specific value for a given thread.      */
 /* All such accessible structures preserve the invariant that if either */
 /* thread is a valid pthread id or qtid is a valid "quick thread id"    */
@@ -43,7 +50,7 @@
 /* asynchronous reads are allowed.                                      */
 typedef struct thread_specific_entry {
         volatile AO_t qtid;     /* quick thread id, only for cache */
-        void * value;
+        ts_entry_value_t value;
         struct thread_specific_entry *next;
         pthread_t thread;
 } tse;
@@ -100,11 +107,9 @@ GC_INLINE void * GC_getspecific(tsd * key)
     GC_ASSERT(qtid != INVALID_QTID);
     if (EXPECT(entry -> qtid == qtid, TRUE)) {
       GC_ASSERT(entry -> thread == pthread_self());
-      return entry -> value;
+      return TS_REVEAL_PTR(entry -> value);
     }
     return GC_slow_getspecific(key, qtid, entry_ptr);
 }
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END

+ 18 - 20
blitz.mod/bdwgc/include/private/thread_local_alloc.h

@@ -34,10 +34,7 @@
 
 #include <stdlib.h>
 
-/* Note: never put extern "C" around an #include.               */
-#ifdef __cplusplus
-  extern "C" {
-#endif
+EXTERN_C_BEGIN
 
 #if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) \
     && !defined(USE_WIN32_COMPILER_TLS) && !defined(USE_COMPILER_TLS) \
@@ -58,13 +55,16 @@
 # elif (defined(LINUX) && !defined(ARM32) && !defined(AVR32) \
          && GC_GNUC_PREREQ(3, 3) \
          && !(defined(__clang__) && defined(HOST_ANDROID))) \
+       || (defined(FREEBSD) && defined(__GLIBC__) /* kFreeBSD */ \
+            && GC_GNUC_PREREQ(4, 4)) \
        || (defined(HOST_ANDROID) && defined(ARM32) \
             && (GC_GNUC_PREREQ(4, 6) || GC_CLANG_PREREQ_FULL(3, 8, 256229)))
 #   define USE_COMPILER_TLS
 # elif defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) \
        || defined(GC_AIX_THREADS) || defined(GC_DARWIN_THREADS) \
        || defined(GC_FREEBSD_THREADS) || defined(GC_NETBSD_THREADS) \
-       || defined(GC_LINUX_THREADS) || defined(GC_RTEMS_PTHREADS)
+       || defined(GC_LINUX_THREADS) || defined(GC_HAIKU_THREADS) \
+       || defined(GC_RTEMS_PTHREADS)
 #   define USE_PTHREAD_SPECIFIC
 # elif defined(GC_HPUX_THREADS)
 #   ifdef __GNUC__
@@ -133,17 +133,13 @@ typedef struct thread_local_freelists {
 # define GC_remove_specific_after_fork(key, t) (void)0
   typedef void * GC_key_t;
 #elif defined(USE_WIN32_SPECIFIC)
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
 # ifndef WIN32_LEAN_AND_MEAN
 #   define WIN32_LEAN_AND_MEAN 1
 # endif
 # define NOSERVICE
+  EXTERN_C_END
 # include <windows.h>
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
 # define GC_getspecific TlsGetValue
 # define GC_setspecific(key, v) !TlsSetValue(key, v)
         /* We assume 0 == success, msft does the opposite.      */
@@ -158,13 +154,9 @@ typedef struct thread_local_freelists {
 # define GC_remove_specific_after_fork(key, t) (void)0
   typedef DWORD GC_key_t;
 #elif defined(USE_CUSTOM_SPECIFIC)
-# ifdef __cplusplus
-    } /* extern "C" */
-# endif
+  EXTERN_C_END
 # include "private/specific.h"
-# ifdef __cplusplus
-    extern "C" {
-# endif
+  EXTERN_C_BEGIN
 #else
 # error implement me
 #endif
@@ -184,6 +176,14 @@ GC_INNER void GC_destroy_thread_local(GC_tlfs p);
 /* we take care of an individual thread freelist structure.     */
 GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p);
 
+#ifdef GC_ASSERTIONS
+  GC_bool GC_is_thread_tsd_valid(void *tsd);
+  void GC_check_tls_for(GC_tlfs p);
+# if defined(USE_CUSTOM_SPECIFIC)
+    void GC_check_tsd_marks(tsd *key);
+# endif
+#endif /* GC_ASSERTIONS */
+
 #ifndef GC_ATTR_TLS_FAST
 # define GC_ATTR_TLS_FAST /* empty */
 #endif
@@ -199,9 +199,7 @@ extern
 /* for cleanup on thread exit.  But the thread support layer makes sure */
 /* that GC_thread_key is traced, if necessary.                          */
 
-#ifdef __cplusplus
-  } /* extern "C" */
-#endif
+EXTERN_C_END
 
 #endif /* THREAD_LOCAL_ALLOC */
 

+ 0 - 217
blitz.mod/bdwgc/include/weakpointer.h

@@ -1,217 +0,0 @@
-#ifndef _weakpointer_h_
-#define _weakpointer_h_
-
-/****************************************************************************
-
-WeakPointer and CleanUp
-
-    Copyright (c) 1991 by Xerox Corporation.  All rights reserved.
-
-    THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
-    OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
-
-    Permission is hereby granted to copy this code for any purpose,
-    provided the above notices are retained on all copies.
-
-****************************************************************************/
-
-/****************************************************************************
-
-WeakPointer
-
-A weak pointer is a pointer to a heap-allocated object that doesn't
-prevent the object from being garbage collected. Weak pointers can be
-used to track which objects haven't yet been reclaimed by the
-collector. A weak pointer is deactivated when the collector discovers
-its referent object is unreachable by normal pointers (reachability
-and deactivation are defined more precisely below). A deactivated weak
-pointer remains deactivated forever.
-
-****************************************************************************/
-
-
-template< class T > class WeakPointer {
-public:
-
-WeakPointer( T* t = 0 )
-    /* Constructs a weak pointer for *t. t may be null. It is an error
-       if t is non-null and *t is not a collected object. */
-    {impl = _WeakPointer_New( t );}
-
-T* Pointer()
-    /* wp.Pointer() returns a pointer to the referent object of wp or
-       null if wp has been deactivated (because its referent object
-       has been discovered unreachable by the collector). */
-    {return (T*) _WeakPointer_Pointer( this->impl );}
-
-int operator==( WeakPointer< T > wp2 )
-    /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
-       wp2 refer to the same object. If wp1 != wp2, then either wp1
-       and wp2 don't refer to the same object, or if they do, one or
-       both of them has been deactivated. (Note: If objects t1 and t2
-       are never made reachable by their clean-up functions, then
-       WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
-    {return _WeakPointer_Equal( this->impl, wp2.impl );}
-
-int Hash()
-    /* Returns a hash code suitable for use by multiplicative- and
-       division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
-       wp2.Hash(). */
-    {return _WeakPointer_Hash( this->impl );}
-
-private:
-void* impl;
-};
-
-/*****************************************************************************
-
-CleanUp
-
-A garbage-collected object can have an associated clean-up function
-that will be invoked some time after the collector discovers the
-object is unreachable via normal pointers. Clean-up functions can be
-used to release resources such as open-file handles or window handles
-when their containing objects become unreachable.  If a C++ object has
-a non-empty explicit destructor (i.e. it contains programmer-written
-code), the destructor will be automatically registered as the object's
-initial clean-up function.
-
-There is no guarantee that the collector will detect every unreachable
-object (though it will find almost all of them). Clients should not
-rely on clean-up to cause some action to occur immediately -- clean-up
-is only a mechanism for improving resource usage.
-
-Every object with a clean-up function also has a clean-up queue. When
-the collector finds the object is unreachable, it enqueues it on its
-queue. The clean-up function is applied when the object is removed
-from the queue. By default, objects are enqueued on the garbage
-collector's queue, and the collector removes all objects from its
-queue after each collection. If a client supplies another queue for
-objects, it is his responsibility to remove objects (and cause their
-functions to be called) by polling it periodically.
-
-Clean-up queues allow clean-up functions accessing global data to
-synchronize with the main program. Garbage collection can occur at any
-time, and clean-ups invoked by the collector might access data in an
-inconsistent state. A client can control this by defining an explicit
-queue for objects and polling it at safe points.
-
-The following definitions are used by the specification below:
-
-Given a pointer t to a collected object, the base object BO(t) is the
-value returned by new when it created the object. (Because of multiple
-inheritance, t and BO(t) may not be the same address.)
-
-A weak pointer wp references an object *t if BO(wp.Pointer()) ==
-BO(t).
-
-***************************************************************************/
-
-template< class T, class Data > class CleanUp {
-public:
-
-static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
-    /* Sets the clean-up function of object BO(t) to be <c, d>,
-       replacing any previously defined clean-up function for BO(t); c
-       and d can be null, but t cannot. Sets the clean-up queue for
-       BO(t) to be the collector's queue. When t is removed from its
-       clean-up queue, its clean-up will be applied by calling c(d,
-       t). It is an error if *t is not a collected object. */
-       {_CleanUp_Set( t, c, d );}
-
-static void Call( T* t )
-    /* Sets the new clean-up function for BO(t) to be null and, if the
-       old one is non-null, calls it immediately, even if BO(t) is
-       still reachable. Deactivates any weak pointers to BO(t). */
-       {_CleanUp_Call( t );}
-
-class Queue {public:
-    Queue()
-        /* Constructs a new queue. */
-            {this->head = _CleanUp_Queue_NewHead();}
-
-    void Set( T* t )
-        /* q.Set(t) sets the clean-up queue of BO(t) to be q. */
-            {_CleanUp_Queue_Set( this->head, t );}
-
-    int Call()
-        /* If q is non-empty, q.Call() removes the first object and
-           calls its clean-up function; does nothing if q is
-           empty. Returns true if there are more objects in the
-           queue. */
-           {return _CleanUp_Queue_Call( this->head );}
-
-    private:
-    void* head;
-    };
-};
-
-/**********************************************************************
-
-Reachability and Clean-up
-
-An object O is reachable if it can be reached via a non-empty path of
-normal pointers from the registers, stacks, global variables, or an
-object with a non-null clean-up function (including O itself),
-ignoring pointers from an object to itself.
-
-This definition of reachability ensures that if object B is accessible
-from object A (and not vice versa) and if both A and B have clean-up
-functions, then A will always be cleaned up before B. Note that as
-long as an object with a clean-up function is contained in a cycle of
-pointers, it will always be reachable and will never be cleaned up or
-collected.
-
-When the collector finds an unreachable object with a null clean-up
-function, it atomically deactivates all weak pointers referencing the
-object and recycles its storage. If object B is accessible from object
-A via a path of normal pointers, A will be discovered unreachable no
-later than B, and a weak pointer to A will be deactivated no later
-than a weak pointer to B.
-
-When the collector finds an unreachable object with a non-null
-clean-up function, the collector atomically deactivates all weak
-pointers referencing the object, redefines its clean-up function to be
-null, and enqueues it on its clean-up queue. The object then becomes
-reachable again and remains reachable at least until its clean-up
-function executes.
-
-The clean-up function is assured that its argument is the only
-accessible pointer to the object. Nothing prevents the function from
-redefining the object's clean-up function or making the object
-reachable again (for example, by storing the pointer in a global
-variable).
-
-If the clean-up function does not make its object reachable again and
-does not redefine its clean-up function, then the object will be
-collected by a subsequent collection (because the object remains
-unreachable and now has a null clean-up function). If the clean-up
-function does make its object reachable again and a clean-up function
-is subsequently redefined for the object, then the new clean-up
-function will be invoked the next time the collector finds the object
-unreachable.
-
-Note that a destructor for a collected object cannot safely redefine a
-clean-up function for its object, since after the destructor executes,
-the object has been destroyed into "raw memory". (In most
-implementations, destroying an object mutates its vtbl.)
-
-Finally, note that calling delete t on a collected object first
-deactivates any weak pointers to t and then invokes its clean-up
-function (destructor).
-
-**********************************************************************/
-
-extern "C" {
-    void* _WeakPointer_New( void* t );
-    void* _WeakPointer_Pointer( void* wp );
-    int _WeakPointer_Equal( void* wp1, void* wp2 );
-    int _WeakPointer_Hash( void* wp );
-    void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
-    void _CleanUp_Call( void* t );
-    void* _CleanUp_Queue_NewHead ();
-    void _CleanUp_Queue_Set( void* h, void* t );
-    int _CleanUp_Queue_Call( void* h );
-}
-
-#endif /* _weakpointer_h_ */

+ 36 - 26
blitz.mod/bdwgc/libatomic_ops/.travis.yml

@@ -109,53 +109,63 @@ matrix:
     addons:
       apt:
         packages:
-        - clang-4.0
+        - clang-5.0
         sources:
-        - llvm-toolchain-trusty-4.0
-    compiler: clang-4.0
+        - llvm-toolchain-trusty-5.0
+    compiler: clang-5.0
     env:
     - CFLAGS_EXTRA="-O3 -march=native -std=c11 -D AO_BL_SIZE=4 -D DEFAULT_NTHREADS=32"
   - os: linux
     addons:
       apt:
         packages:
-        - clang-4.0
+        - clang-5.0
         - gcc-multilib
         sources:
-        - llvm-toolchain-trusty-4.0
-    compiler: clang-4.0
+        - llvm-toolchain-trusty-5.0
+    compiler: clang-5.0
     env:
     - CFLAGS_EXTRA="-m32 -O3 -march=native"
   - os: linux
     addons:
       apt:
         packages:
-        - gcc-5
+        - gcc-8
         sources:
         - ubuntu-toolchain-r-test
-    compiler: gcc-5
+    compiler: gcc-8
     env:
     - CFLAGS_EXTRA="-O3 -march=native"
   - os: linux
     addons:
       apt:
         packages:
-        - gcc-5
-        - gcc-5-multilib
+        - gcc-8
         sources:
         - ubuntu-toolchain-r-test
-    compiler: gcc-5
+    compiler: gcc-8
+    env:
+    - CFLAGS_EXTRA="-O3 -march=native"
+  - os: linux
+    addons:
+      apt:
+        packages:
+        - gcc-8
+        - gcc-8-multilib
+        sources:
+        - ubuntu-toolchain-r-test
+    compiler: gcc-8
     env:
     - CFLAGS_EXTRA="-m32 -O3 -march=native"
   - os: linux
     addons:
       apt:
         packages:
-        - gcc-5
-        - gcc-5-multilib
+        - gcc-8
+        - gcc-8-multilib
         sources:
         - ubuntu-toolchain-r-test
-    compiler: gcc-5
+    compiler: gcc-8
     env:
     - CFLAGS_EXTRA="-mx32 -march=native -D _FORTIFY_SOURCE=2"
     - CONF_OPTIONS="--enable-assertions --enable-shared"
@@ -163,11 +173,11 @@ matrix:
     addons:
       apt:
         packages:
-        - gcc-5
-        - gcc-5-multilib
+        - gcc-8
+        - gcc-8-multilib
         sources:
         - ubuntu-toolchain-r-test
-    compiler: gcc-5
+    compiler: gcc-8
     env:
     - CFLAGS_EXTRA="-mx32 -march=native"
     - CONF_OPTIONS="--disable-atomic-intrinsics --disable-docs"
@@ -181,10 +191,10 @@ matrix:
     addons:
       apt:
         packages:
-        - clang-4.0
+        - clang-5.0
         sources:
-        - llvm-toolchain-trusty-4.0
-    compiler: clang-4.0
+        - llvm-toolchain-trusty-5.0
+    compiler: clang-5.0
     env:
     - CFLAGS_EXTRA="-fsanitize=address -march=native -fno-common -fno-omit-frame-pointer"
     - CONF_OPTIONS="--enable-assertions"
@@ -198,11 +208,11 @@ matrix:
     addons:
       apt:
         packages:
-        - gcc-5
-        - gcc-5-multilib
+        - gcc-8
+        - gcc-8-multilib
         sources:
         - ubuntu-toolchain-r-test
-    compiler: gcc-5
+    compiler: gcc-8
     env:
     - CFLAGS_EXTRA="-fsanitize=address -m32 -march=native -fno-omit-frame-pointer"
     - LDFLAGS="-fuse-ld=gold"
@@ -254,10 +264,10 @@ matrix:
     addons:
       apt:
         packages:
-        - clang-4.0
+        - clang-5.0
         sources:
-        - llvm-toolchain-trusty-4.0
-    compiler: clang-4.0
+        - llvm-toolchain-trusty-5.0
+    compiler: clang-5.0
     env:
     - CSA_CHECK=true
     - MAKEFILE_TARGET=all

+ 2 - 0
blitz.mod/bdwgc/libatomic_ops/AUTHORS

@@ -43,10 +43,12 @@ Philipp Zambelli <[email protected]>
 Ranko Zivojnovic <[email protected]>
 Roger Hoover <[email protected]>
 Sebastian Siewior <[email protected]>
+Shea Levy <[email protected]>
 Steve Capper <[email protected]>
 Takashi Yoshii <[email protected]>
 Thiemo Seufer <[email protected]>
 Thorsten Glaser <[email protected]>
 Tobias Leich <[email protected]>
 Tony Mantler <[email protected]>
+YunQiang Su <[email protected]>
 Yvan Roux <[email protected]>

+ 20 - 21
blitz.mod/bdwgc/libatomic_ops/COPYING

@@ -1,12 +1,12 @@
-		    GNU GENERAL PUBLIC LICENSE
-		       Version 2, June 1991
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
 
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
-			    Preamble
+                            Preamble
 
   The licenses for most software are designed to take away your
 freedom to share and change it.  By contrast, the GNU General Public
@@ -15,7 +15,7 @@ software--to make sure the software is free for all its users.  This
 General Public License applies to most of the Free Software
 Foundation's software and to any other program whose authors commit to
 using it.  (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.)  You can apply it to
+the GNU Lesser General Public License instead.)  You can apply it to
 your programs, too.
 
   When we speak of free software, we are referring to freedom, not
@@ -55,8 +55,8 @@ patent must be licensed for everyone's free use or not licensed at all.
 
   The precise terms and conditions for copying, distribution and
 modification follow.
-
-		    GNU GENERAL PUBLIC LICENSE
+
+                    GNU GENERAL PUBLIC LICENSE
    TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
 
   0. This License applies to any program or other work which contains
@@ -110,7 +110,7 @@ above, provided that you also meet all of these conditions:
     License.  (Exception: if the Program itself is interactive but
     does not normally print such an announcement, your work based on
     the Program is not required to print an announcement.)
-
+
 These requirements apply to the modified work as a whole.  If
 identifiable sections of that work are not derived from the Program,
 and can be reasonably considered independent and separate works in
@@ -168,7 +168,7 @@ access to copy from a designated place, then offering equivalent
 access to copy the source code from the same place counts as
 distribution of the source code, even though third parties are not
 compelled to copy the source along with the object code.
-
+
   4. You may not copy, modify, sublicense, or distribute the Program
 except as expressly provided under this License.  Any attempt
 otherwise to copy, modify, sublicense or distribute the Program is
@@ -225,7 +225,7 @@ impose that choice.
 
 This section is intended to make thoroughly clear what is believed to
 be a consequence of the rest of this License.
-
+
   8. If the distribution and/or use of the Program is restricted in
 certain countries either by patents or by copyrighted interfaces, the
 original copyright holder who places the Program under this License
@@ -255,7 +255,7 @@ make exceptions for this.  Our decision will be guided by the two goals
 of preserving the free status of all derivatives of our free software and
 of promoting the sharing and reuse of software generally.
 
-			    NO WARRANTY
+                            NO WARRANTY
 
   11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
 FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
@@ -277,9 +277,9 @@ YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
 PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
 POSSIBILITY OF SUCH DAMAGES.
 
-		     END OF TERMS AND CONDITIONS
-
-	    How to Apply These Terms to Your New Programs
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
 
   If you develop a new program, and you want it to be of the greatest
 possible use to the public, the best way to achieve this is to make it
@@ -303,17 +303,16 @@ the "copyright" line and a pointer to where the full notice is found.
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
 
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 Also add information on how to contact you by electronic and paper mail.
 
 If the program is interactive, make it output a short notice like this
 when it starts in an interactive mode:
 
-    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision version 69, Copyright (C) year name of author
     Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
     This is free software, and you are welcome to redistribute it
     under certain conditions; type `show c' for details.
@@ -336,5 +335,5 @@ necessary.  Here is a sample; alter the names:
 This General Public License does not permit incorporating your program into
 proprietary programs.  If your program is a subroutine library, you may
 consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Library General
+library.  If this is what you want to do, use the GNU Lesser General
 Public License instead of this License.

+ 12 - 0
blitz.mod/bdwgc/libatomic_ops/ChangeLog

@@ -1,4 +1,16 @@
 
+== [7.6.4] 2018-03-27 ==
+
+* Add RISC-V support
+* Convert atomic_ops_malloc.c and tests to valid C++ code
+* Eliminate 'function is never used' cppcheck warning for load_before_cas
+* Eliminate 'using argument that points at uninitialized var' cppcheck error
+* Fix 'AO_pt_lock undefined' error if cross-compiling manually (MinGW)
+* Fix public headers inclusion from clients C++ code
+* Remove gcc/nios2.h file (include gcc/generic.h directly for nios2)
+* Support MIPS rel6
+
+
 == [7.6.2] 2017-12-24 ==
 
 * Allow to alter DEFAULT/MAX_NTHREADS values in test_malloc/stack

+ 3 - 3
blitz.mod/bdwgc/libatomic_ops/Makefile.am

@@ -12,9 +12,9 @@ endif
 
 EXTRA_DIST = autogen.sh
 
-# TODO: After migration to autoconf-1.13+, remove check-nolink definition
-# from this Makefile.am and add AM_EXTRA_RECURSIVE_TARGETS([check-nolink])
-# back to configure.ac file.
+## TODO: After migration to autoconf-1.13+, remove check-nolink definition
+## from this Makefile.am and add AM_EXTRA_RECURSIVE_TARGETS([check-nolink])
+## back to configure.ac file.
 .PHONY: check-nolink check-nolink-local
 check-nolink: check-nolink-local
 	$(MAKE) --directory tests $(AM_MAKEFLAGS) check-nolink-local

+ 1 - 1
blitz.mod/bdwgc/libatomic_ops/README.md

@@ -23,7 +23,7 @@ Also, the latest bug fixes and new features are available in the
 ## Overview
 
 This package provides semi-portable access to hardware-provided
-atomic memory update operations on a number architectures.  These might
+atomic memory update operations on a number of architectures.  These might
 allow you to write code:
 
 * That does more interesting things in signal handlers.

+ 18 - 5
blitz.mod/bdwgc/libatomic_ops/configure.ac

@@ -1,4 +1,17 @@
-# Process this file with autoconf to produce a configure script.
+# Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2009-2018 Ivan Maidanski
+#
+# THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+# OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+#
+# Permission is hereby granted to use or copy this program
+# for any purpose,  provided the above notices are retained on all copies.
+# Permission to modify the code and to distribute modified code is granted,
+# provided the above notices are retained, and a notice that the code was
+# modified is included with the above copyright notice.
+
+dnl Process this file with autoconf to produce configure.
+
 AC_INIT([libatomic_ops],[7.7.0],https://github.com/ivmai/libatomic_ops/issues)
 
 AC_PREREQ(2.61)
@@ -10,12 +23,12 @@ AM_MAINTAINER_MODE
 
 AC_CONFIG_HEADERS([src/config.h])
 
-# Checks for programs.
+dnl Checks for programs.
 AM_PROG_CC_C_O
 AM_PROG_AS
 LT_INIT([disable-shared])
 
-# Checks for functions.
+dnl Checks for functions.
 AC_FUNC_MMAP
 
 # Determine PIC flag.
@@ -133,7 +146,7 @@ AM_CONDITIONAL(ENABLE_DOCS, test x$enable_docs != xno)
 AC_SUBST(PICFLAG)
 AC_SUBST(DEFS)
 
-# Extra user-defined C flags.
+dnl Extra user-defined C flags.
 AC_SUBST([CFLAGS_EXTRA])
 
 AH_TEMPLATE([_PTHREADS], [Indicates the use of pthreads (NetBSD).])
@@ -149,7 +162,7 @@ AH_TEMPLATE([AO_USE_WIN32_PTHREADS],
          are emulated)])
 AH_TEMPLATE([AO_TRACE_MALLOC], [Trace AO_malloc/free calls (for debug only)])
 
-# These macros are tested in public headers
+dnl These macros are tested in public headers.
 AH_TEMPLATE([AO_GENERALIZE_ASM_BOOL_CAS],
         [Force compare_and_swap definition via fetch_compare_and_swap])
 AH_TEMPLATE([AO_PREFER_GENERALIZED],

+ 1 - 1
blitz.mod/bdwgc/libatomic_ops/src/Makefile.am

@@ -90,8 +90,8 @@ nobase_private_HEADERS = atomic_ops/ao_version.h \
           atomic_ops/sysdeps/gcc/ia64.h \
           atomic_ops/sysdeps/gcc/m68k.h \
           atomic_ops/sysdeps/gcc/mips.h \
-          atomic_ops/sysdeps/gcc/nios2.h \
           atomic_ops/sysdeps/gcc/powerpc.h \
+          atomic_ops/sysdeps/gcc/riscv.h \
           atomic_ops/sysdeps/gcc/s390.h \
           atomic_ops/sysdeps/gcc/sh.h \
           atomic_ops/sysdeps/gcc/sparc.h \

+ 21 - 37
blitz.mod/bdwgc/libatomic_ops/src/atomic_ops.h

@@ -285,72 +285,56 @@
     /* it might require specifying additional options (like -march)     */
     /* or additional link libraries (if -march is not specified).       */
 #   include "atomic_ops/sysdeps/gcc/x86.h"
-# endif /* __i386__ */
-# if defined(__x86_64__)
+# elif defined(__x86_64__)
 #   if AO_GNUC_PREREQ(4, 2) && !defined(AO_USE_SYNC_CAS_BUILTIN)
       /* It is safe to use __sync CAS built-in on this architecture.    */
 #     define AO_USE_SYNC_CAS_BUILTIN
 #   endif
 #   include "atomic_ops/sysdeps/gcc/x86.h"
-# endif /* __x86_64__ */
-# if defined(__ia64__)
+# elif defined(__ia64__)
 #   include "atomic_ops/sysdeps/gcc/ia64.h"
 #   define AO_GENERALIZE_TWICE
-# endif /* __ia64__ */
-# if defined(__hppa__)
+# elif defined(__hppa__)
 #   include "atomic_ops/sysdeps/gcc/hppa.h"
 #   define AO_CAN_EMUL_CAS
-# endif /* __hppa__ */
-# if defined(__alpha__)
+# elif defined(__alpha__)
 #   include "atomic_ops/sysdeps/gcc/alpha.h"
 #   define AO_GENERALIZE_TWICE
-# endif /* __alpha__ */
-# if defined(__s390__)
+# elif defined(__s390__)
 #   include "atomic_ops/sysdeps/gcc/s390.h"
-# endif /* __s390__ */
-# if defined(__sparc__)
+# elif defined(__sparc__)
 #   include "atomic_ops/sysdeps/gcc/sparc.h"
 #   define AO_CAN_EMUL_CAS
-# endif /* __sparc__ */
-# if defined(__m68k__)
+# elif defined(__m68k__)
 #   include "atomic_ops/sysdeps/gcc/m68k.h"
-# endif /* __m68k__ */
-# if defined(__nios2__)
-#   include "atomic_ops/sysdeps/gcc/nios2.h"
-# endif /* __nios2__ */
-# if defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
-     || defined(__powerpc64__) || defined(__ppc64__) \
-     || defined(_ARCH_PPC)
+# elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
+       || defined(__powerpc64__) || defined(__ppc64__) || defined(_ARCH_PPC)
 #   include "atomic_ops/sysdeps/gcc/powerpc.h"
-# endif /* __powerpc__ */
-# if defined(__aarch64__)
+# elif defined(__aarch64__)
 #   include "atomic_ops/sysdeps/gcc/aarch64.h"
 #   define AO_CAN_EMUL_CAS
-# endif /* __aarch64__ */
-# if defined(__arm__)
+# elif defined(__arm__)
 #   include "atomic_ops/sysdeps/gcc/arm.h"
 #   define AO_CAN_EMUL_CAS
-# endif /* __arm__ */
-# if defined(__cris__) || defined(CRIS)
+# elif defined(__cris__) || defined(CRIS)
 #   include "atomic_ops/sysdeps/gcc/cris.h"
 #   define AO_CAN_EMUL_CAS
 #   define AO_GENERALIZE_TWICE
-# endif
-# if defined(__mips__)
+# elif defined(__mips__)
 #   include "atomic_ops/sysdeps/gcc/mips.h"
-# endif /* __mips__ */
-# if defined(__sh__) || defined(SH4)
+# elif defined(__sh__) || defined(SH4)
 #   include "atomic_ops/sysdeps/gcc/sh.h"
 #   define AO_CAN_EMUL_CAS
-# endif /* __sh__ */
-# if defined(__avr32__)
+# elif defined(__avr32__)
 #   include "atomic_ops/sysdeps/gcc/avr32.h"
-# endif
-# if defined(__hexagon__)
+# elif defined(__hexagon__)
 #   include "atomic_ops/sysdeps/gcc/hexagon.h"
-# endif
-# if defined(__tile__)
+# elif defined(__riscv)
+#   include "atomic_ops/sysdeps/gcc/riscv.h"
+# elif defined(__tile__)
 #   include "atomic_ops/sysdeps/gcc/tile.h"
+# else /* __nios2__, etc. */
+#   include "atomic_ops/sysdeps/gcc/generic.h"
 # endif
 #endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
 

+ 12 - 3
blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/mips.h

@@ -41,14 +41,23 @@
 
 # if !defined(_ABI64) || _MIPS_SIM != _ABI64
 #   define AO_T_IS_INT
-#   define AO_MIPS_SET_ISA    "       .set mips2\n"
+#   if __mips_isa_rev >= 6
+      /* Encoding of ll/sc in mips rel6 differs from that of mips2/3. */
+#     define AO_MIPS_SET_ISA  ""
+#   else
+#     define AO_MIPS_SET_ISA  "       .set mips2\n"
+#   endif
 #   define AO_MIPS_LL_1(args) "       ll " args "\n"
 #   define AO_MIPS_SC(args)   "       sc " args "\n"
 # else
-#   define AO_MIPS_SET_ISA    "       .set mips3\n"
+#   if __mips_isa_rev >= 6
+#     define AO_MIPS_SET_ISA  ""
+#   else
+#     define AO_MIPS_SET_ISA  "       .set mips3\n"
+#   endif
 #   define AO_MIPS_LL_1(args) "       lld " args "\n"
 #   define AO_MIPS_SC(args)   "       scd " args "\n"
-# endif
+# endif /* _MIPS_SIM == _ABI64 */
 
 #ifdef AO_ICE9A1_LLSC_WAR
   /* ICE9 rev A1 chip (used in very few systems) is reported to */

+ 0 - 14
blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/nios2.h

@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2016 Marek Vasut <[email protected]>
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-#include "generic.h"

+ 22 - 0
blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/riscv.h

@@ -0,0 +1,22 @@
+/*
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* As of gcc-7.2.0, some __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n are missing. */
+/* The operations are lock-free (even for the types smaller than word).  */
+#define AO_GCC_FORCE_HAVE_CAS
+
+/* While double-word atomic operations are provided by the compiler     */
+/* (which requires -latomic currently), they are not lock-free as       */
+/* riscv itself does not have the double-word atomic operations.        */
+
+#include "generic.h"
+
+#undef AO_GCC_FORCE_HAVE_CAS

+ 9 - 1
blitz.mod/bdwgc/libatomic_ops/src/atomic_ops/sysdeps/gcc/x86.h

@@ -67,7 +67,15 @@
 #       define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
 #     endif
 #   endif /* __x86_64__ */
-# endif /* __clang__ */
+
+# elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \
+       && !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__)
+    /* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case   */
+    /* of double-word atomic operations use (but not in case of TSan).  */
+    /* TODO: Revise it for the future gcc-7 releases. */
+#   define AO_SKIPATOMIC_double_compare_and_swap_ANY
+#   define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
+# endif /* __GNUC__ && !__clang__ */
 
 # ifdef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
 #   define AO_SKIPATOMIC_double_load

+ 14 - 8
blitz.mod/bdwgc/mach_dep.c

@@ -237,6 +237,7 @@ GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
       /* Older versions of Darwin seem to lack getcontext().    */
       /* ARM and MIPS Linux often doesn't support a real        */
       /* getcontext().                                          */
+      static signed char getcontext_works = 0; /* (-1) - broken, 1 - works */
       ucontext_t ctxt;
 #     ifdef GETCONTEXT_FPU_EXCMASK_BUG
         /* Workaround a bug (clearing the FPU exception mask) in        */
@@ -255,12 +256,17 @@ GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
 #       endif
 #     endif
 
-      if (getcontext(&ctxt) < 0) {
-        WARN("getcontext failed:"
-             " using another register retrieval method...\n", 0);
-        /* E.g., to workaround a bug in Docker ubuntu_32bit.    */
-      } else {
-        context = &ctxt;
+      if (getcontext_works >= 0) {
+        if (getcontext(&ctxt) < 0) {
+          WARN("getcontext failed:"
+               " using another register retrieval method...\n", 0);
+          /* getcontext() is broken, do not try again.          */
+          /* E.g., to workaround a bug in Docker ubuntu_32bit.  */
+        } else {
+          context = &ctxt;
+        }
+        if (EXPECT(0 == getcontext_works, FALSE))
+          getcontext_works = context != NULL ? 1 : -1;
       }
 #     ifdef GETCONTEXT_FPU_EXCMASK_BUG
 #       ifdef X86_64
@@ -298,8 +304,8 @@ GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
         /* We're not sure whether he would like  */
         /* to be acknowledged for it or not.     */
         jmp_buf regs;
-        register word * i = (word *) &regs;
-        register ptr_t lim = (ptr_t)(&regs) + (sizeof regs);
+        word * i = (word *)&regs;
+        ptr_t lim = (ptr_t)(&regs) + sizeof(regs);
 
         /* Setjmp doesn't always clear all of the buffer.               */
         /* That tends to preserve garbage.  Clear it.                   */

+ 8 - 23
blitz.mod/bdwgc/malloc.c

@@ -31,10 +31,6 @@ STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
     return(TRUE);
 }
 
-GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
-                                      GC_bool ignore_off_page,
-                                      GC_bool retry); /* from alloc.c */
-
 /* Allocate a large block of size lb bytes.     */
 /* The block is not cleared.                    */
 /* Flags is 0 or IGNORE_OFF_PAGE.               */
@@ -102,18 +98,6 @@ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
     return result;
 }
 
-/* This function should be called with the allocation lock held.        */
-/* At the same time, it is safe to get a value from GC_size_map not     */
-/* acquiring the allocation lock provided the obtained value is used    */
-/* according to the pattern given in alloc.c file (see the comment      */
-/* about GC_allocobj usage and, e.g., GC_malloc_kind_global code).      */
-GC_ATTR_NO_SANITIZE_THREAD
-static void fill_size_map(size_t low_limit, size_t byte_sz, size_t granule_sz)
-{
-  for (; low_limit <= byte_sz; low_limit++)
-    GC_size_map[low_limit] = granule_sz;
-}
-
 /* Fill in additional entries in GC_size_map, including the i-th one.   */
 /* Note that a filled in section of the array ending at n always        */
 /* has the length of at least n/4.                                      */
@@ -163,13 +147,13 @@ STATIC void GC_extend_size_map(size_t i)
                         /* We may need one extra byte; do not always    */
                         /* fill in GC_size_map[byte_sz].                */
 
-  fill_size_map(low_limit, byte_sz, granule_sz);
+  for (; low_limit <= byte_sz; low_limit++)
+    GC_size_map[low_limit] = granule_sz;
 }
 
-/* Allocate lb bytes for an object of kind k.   */
-/* Should not be used to directly to allocate   */
-/* objects such as STUBBORN objects that        */
-/* require special handling on allocation.      */
+/* Allocate lb bytes for an object of kind k.           */
+/* Should not be used to directly to allocate objects   */
+/* that require special handling on allocation.         */
 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
 {
     void *op;
@@ -311,11 +295,12 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
     if (SMALL_OBJ(lb)) {
         void *op;
         void **opp;
-        size_t lg = GC_size_map[lb];
+        size_t lg;
         DCL_LOCK_STATE;
 
         GC_DBG_COLLECT_AT_MALLOC(lb);
         LOCK();
+        lg = GC_size_map[lb];
         opp = &GC_obj_kinds[k].ok_freelist[lg];
         op = *opp;
         if (EXPECT(op != NULL, TRUE)) {
@@ -376,8 +361,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
         if (EXTRA_BYTES != 0 && lb != 0) lb--;
                   /* We don't need the extra byte, since this won't be  */
                   /* collected anyway.                                  */
-        lg = GC_size_map[lb];
         LOCK();
+        lg = GC_size_map[lb];
         opp = &GC_obj_kinds[k].ok_freelist[lg];
         op = *opp;
         if (EXPECT(op != NULL, TRUE)) {

+ 39 - 10
blitz.mod/bdwgc/mallocx.c

@@ -40,6 +40,7 @@
 /* Some externally visible but unadvertised variables to allow access to */
 /* free lists from inlined allocators without including gc_priv.h        */
 /* or introducing dependencies on internal data structure layouts.       */
+#include "gc_alloc_ptrs.h"
 void ** const GC_objfreelist_ptr = GC_objfreelist;
 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
@@ -61,10 +62,6 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
                                                                   int knd)
 {
     switch(knd) {
-#     ifdef STUBBORN_ALLOC
-        case STUBBORN:
-            return GC_malloc_stubborn(lb);
-#     endif
         case PTRFREE:
         case NORMAL:
             return GC_malloc_kind(lb, knd);
@@ -151,13 +148,9 @@ GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
     }
     if (ADD_SLOP(lb) <= sz) {
         if (lb >= (sz >> 1)) {
-#           ifdef STUBBORN_ALLOC
-                if (obj_kind == STUBBORN) GC_change_stubborn(p);
-#           endif
             if (orig_sz > lb) {
               /* Clear unneeded part of object to avoid bogus pointer */
               /* tracing.                                             */
-              /* Safe for stubborn objects.                           */
                 BZERO(((ptr_t)p) + lb, orig_sz - lb);
             }
             return(p);
@@ -318,11 +311,24 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
     DCL_LOCK_STATE;
 
     GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
-    if (!SMALL_OBJ(lb)) {
+    if (!SMALL_OBJ(lb)
+#     ifdef MANUAL_VDB
+        /* Currently a single object is allocated.                      */
+        /* TODO: GC_dirty should be called for each linked object (but  */
+        /* the last one) to support multiple objects allocation.        */
+        || GC_incremental
+#     endif
+       ) {
         op = GC_generic_malloc(lb, k);
         if (EXPECT(0 != op, TRUE))
             obj_link(op) = 0;
         *result = op;
+#       ifdef MANUAL_VDB
+          if (GC_is_heap_ptr(result)) {
+            GC_dirty(result);
+            REACHABLE_AFTER_DIRTY(op);
+          }
+#       endif
         return;
     }
     GC_ASSERT(k < MAXOBJKINDS);
@@ -532,7 +538,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
 GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
 {
   /* Check alignment properly.  */
-  if (((align - 1) & align) != 0 || align < sizeof(void *)) {
+  size_t align_minus_one = align - 1; /* to workaround a cppcheck warning */
+  if (align < sizeof(void *) || (align_minus_one & align) != 0) {
 #   ifdef MSWINCE
       return ERROR_INVALID_PARAMETER;
 #   else
@@ -606,3 +613,25 @@ GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
     return copy;
   }
 #endif /* GC_REQUIRE_WCSDUP */
+
+GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
+{
+  return GC_malloc(lb);
+}
+
+GC_API void GC_CALL GC_change_stubborn(const void *p GC_ATTR_UNUSED)
+{
+  /* Empty. */
+}
+
+GC_API void GC_CALL GC_end_stubborn_change(const void *p)
+{
+  GC_dirty(p); /* entire object */
+}
+
+GC_API void GC_CALL GC_ptr_store_and_dirty(void *p, const void *q)
+{
+  *(const void **)p = q;
+  GC_dirty(p);
+  REACHABLE_AFTER_DIRTY(q);
+}

+ 43 - 72
blitz.mod/bdwgc/mark.c

@@ -40,8 +40,7 @@ void GC_noop6(word arg1 GC_ATTR_UNUSED, word arg2 GC_ATTR_UNUSED,
               word arg5 GC_ATTR_UNUSED, word arg6 GC_ATTR_UNUSED)
 {
   /* Avoid GC_noop6 calls to be optimized away. */
-# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) \
-     || defined(PARALLEL_MARK)
+# ifdef AO_CLEAR
     AO_compiler_barrier(); /* to serve as a special side-effect */
 # else
     GC_noop1(0);
@@ -83,19 +82,8 @@ GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
                 /* 0 | */ GC_DS_LENGTH, FALSE /* add length to descr */, FALSE
                 /*, */ OK_DISCLAIM_INITZ },
 # endif
-# ifdef STUBBORN_ALLOC
-              { (void **)&GC_sobjfreelist[0], 0,
-                /* 0 | */ GC_DS_LENGTH, TRUE /* add length to descr */, TRUE
-                /*, */ OK_DISCLAIM_INITZ },
-# endif
 };
 
-# ifdef STUBBORN_ALLOC
-#   define GC_N_KINDS_INITIAL_VALUE (STUBBORN+1)
-# else
-#   define GC_N_KINDS_INITIAL_VALUE STUBBORN
-# endif
-
 GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
 
 # ifndef INITIAL_MARK_STACK_SIZE
@@ -108,7 +96,7 @@ GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
                 /* let it grow dynamically.                             */
 # endif
 
-#if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+#if !defined(GC_DISABLE_INCREMENTAL)
   STATIC word GC_n_rescuing_pages = 0;
                                 /* Number of dirty pages we marked from */
                                 /* excludes ptrfree pages, etc.         */
@@ -188,7 +176,7 @@ GC_INNER void GC_set_hdr_marks(hdr *hhdr)
  */
 static void clear_marks_for_block(struct hblk *h, word dummy GC_ATTR_UNUSED)
 {
-    register hdr * hhdr = HDR(h);
+    hdr * hhdr = HDR(h);
 
     if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
         /* Mark bit for these is cleared only once the object is        */
@@ -256,14 +244,11 @@ GC_INNER void GC_clear_marks(void)
     scan_ptr = 0;
 }
 
-#ifdef CHECKSUMS
-  void GC_check_dirty(void);
-#endif
-
 /* Initiate a garbage collection.  Initiates a full collection if the   */
 /* mark state is invalid.                                               */
 GC_INNER void GC_initiate_gc(void)
 {
+    GC_ASSERT(I_HOLD_LOCK());
 #   ifndef GC_DISABLE_INCREMENTAL
         if (GC_incremental) {
 #         ifdef CHECKSUMS
@@ -273,13 +258,10 @@ GC_INNER void GC_initiate_gc(void)
 #         endif
         }
 #   endif
-#   ifdef STUBBORN_ALLOC
-        GC_read_changed();
-#   endif
 #   ifdef CHECKSUMS
         if (GC_incremental) GC_check_dirty();
 #   endif
-#   if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+#   if !defined(GC_DISABLE_INCREMENTAL)
         GC_n_rescuing_pages = 0;
 #   endif
     if (GC_mark_state == MS_NONE) {
@@ -344,8 +326,7 @@ static void alloc_mark_stack(size_t);
             } else {
                 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
                 if (scan_ptr == 0) {
-#                 if !defined(GC_DISABLE_INCREMENTAL) \
-                     || defined(STUBBORN_ALLOC)
+#                 if !defined(GC_DISABLE_INCREMENTAL)
                     GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n",
                                        (unsigned long)GC_n_rescuing_pages);
 #                 endif
@@ -487,12 +468,6 @@ static void alloc_mark_stack(size_t);
     }
 # endif /* __GNUC__ && MSWIN32 */
 
-#if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
-  GC_INNER GC_bool GC_started_thread_while_stopped(void);
-  /* In win32_threads.c.  Did we invalidate mark phase with an  */
-  /* unexpected thread start?                                   */
-#endif
-
   GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
   {
       GC_bool ret_val;
@@ -1336,11 +1311,10 @@ static void alloc_mark_stack(size_t n)
         } else {
           WARN("Failed to grow mark stack to %" WARN_PRIdPTR " frames\n", n);
         }
+    } else if (NULL == new_stack) {
+        GC_err_printf("No space for mark stack\n");
+        EXIT();
     } else {
-        if (new_stack == 0) {
-            GC_err_printf("No space for mark stack\n");
-            EXIT();
-        }
         GC_mark_stack = new_stack;
         GC_mark_stack_size = n;
         GC_mark_stack_limit = new_stack + n;
@@ -1408,6 +1382,11 @@ GC_API void GC_CALL GC_push_all(void *bottom, void *top)
         return;
     }
     if ((*dirty_fn)(h-1)) {
+        if ((word)(GC_mark_stack_top - GC_mark_stack)
+            > 3 * GC_mark_stack_size / 4) {
+            GC_push_all(bottom, top);
+            return;
+        }
         GC_push_all(bottom, h);
     }
 
@@ -1428,9 +1407,6 @@ GC_API void GC_CALL GC_push_all(void *bottom, void *top)
     if ((ptr_t)h != top && (*dirty_fn)(h)) {
        GC_push_all(h, top);
     }
-    if ((word)GC_mark_stack_top >= (word)GC_mark_stack_limit) {
-        ABORT("Unexpected mark stack overflow");
-    }
   }
 
   GC_API void GC_CALL GC_push_conditional(void *bottom, void *top, int all)
@@ -1488,10 +1464,6 @@ GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
     return mark_stack_ptr;
 }
 
-#if defined(MANUAL_VDB) && defined(THREADS)
-  void GC_dirty(ptr_t p);
-#endif
-
 /* Mark and push (i.e. gray) a single object p onto the main    */
 /* mark stack.  Consider p to be valid if it is an interior     */
 /* pointer.                                                     */
@@ -1522,10 +1494,10 @@ GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
         GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
         return;
     }
-#   if defined(MANUAL_VDB) && defined(THREADS)
+#   ifdef THREADS
       /* Pointer is on the stack.  We may have dirtied the object       */
-      /* it points to, but not yet have called GC_dirty();              */
-      GC_dirty(p);      /* Implicitly affects entire object.            */
+      /* it points to, but have not called GC_dirty yet.                */
+      GC_dirty(p); /* entire object */
 #   endif
     PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
                       source, hhdr, FALSE);
@@ -1604,10 +1576,10 @@ GC_API void GC_CALL GC_push_all_eager(void *bottom, void *top)
 {
     word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
     word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
-    register word *p;
-    register word *lim;
-    register ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
-    register ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
+    REGISTER word *p;
+    REGISTER word *lim;
+    REGISTER ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
+    REGISTER ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
 #   define GC_greatest_plausible_heap_addr greatest_ha
 #   define GC_least_plausible_heap_addr least_ha
 
@@ -1617,7 +1589,8 @@ GC_API void GC_CALL GC_push_all_eager(void *bottom, void *top)
       lim = t - 1 /* longword */;
       for (p = b; (word)p <= (word)lim;
            p = (word *)(((ptr_t)p) + ALIGNMENT)) {
-        register word q = *p;
+        REGISTER word q = *p;
+
         GC_PUSH_ONE_STACK(q, p);
       }
 #   undef GC_greatest_plausible_heap_addr
@@ -1649,10 +1622,10 @@ GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
   {
     word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
     word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
-    register word *p;
-    register word *lim;
-    register ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
-    register ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
+    REGISTER word *p;
+    REGISTER word *lim;
+    REGISTER ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr;
+    REGISTER ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr;
 #   define GC_greatest_plausible_heap_addr greatest_ha
 #   define GC_least_plausible_heap_addr least_ha
 
@@ -1662,7 +1635,8 @@ GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
 
     lim = t - 1;
     for (p = b; (word)p <= (word)lim; p = (word *)((ptr_t)p + ALIGNMENT)) {
-      register word q = *p;
+      REGISTER word q = *p;
+
       GC_PUSH_ONE_HEAP(q, p, GC_mark_stack_top);
     }
 #   undef GC_greatest_plausible_heap_addr
@@ -1878,7 +1852,7 @@ STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
     /* Some quick shortcuts: */
         if ((/* 0 | */ GC_DS_LENGTH) == descr) return;
         if (GC_block_empty(hhdr)/* nothing marked */) return;
-#   if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+#   if !defined(GC_DISABLE_INCREMENTAL)
       GC_n_rescuing_pages++;
 #   endif
     GC_objects_are_marked = TRUE;
@@ -1903,15 +1877,18 @@ STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
          break;
 #     endif
 #    endif
+#   else
+     case 1: /* to suppress "switch statement contains no case" warning */
 #   endif
      default:
       GC_mark_stack_top_reg = GC_mark_stack_top;
       for (p = h -> hb_body, bit_no = 0; (word)p <= (word)lim;
            p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
-         if (mark_bit_from_hdr(hhdr, bit_no)) {
-           /* Mark from fields inside the object */
-             PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
-         }
+        if (mark_bit_from_hdr(hhdr, bit_no)) {
+          /* Mark from fields inside the object. */
+          GC_mark_stack_top_reg = GC_push_obj(p, hhdr, GC_mark_stack_top_reg,
+                                              mark_stack_limit);
+        }
       }
       GC_mark_stack_top = GC_mark_stack_top_reg;
     }
@@ -1939,7 +1916,7 @@ STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
     if ((/* 0 | */ GC_DS_LENGTH) == descr)
         return;
 
-#   if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+#   if !defined(GC_DISABLE_INCREMENTAL)
       GC_n_rescuing_pages++;
 #   endif
     GC_objects_are_marked = TRUE;
@@ -1950,8 +1927,9 @@ STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
 
     GC_mark_stack_top_reg = GC_mark_stack_top;
     for (p = h -> hb_body; (word)p <= (word)lim; p += sz)
-        if ((*(word *)p & 0x3) != 0)
-            PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
+      if ((*(word *)p & 0x3) != 0)
+        GC_mark_stack_top_reg = GC_push_obj(p, hhdr, GC_mark_stack_top_reg,
+                                            mark_stack_limit);
     GC_mark_stack_top = GC_mark_stack_top_reg;
   }
 #endif /* ENABLE_DISCLAIM */
@@ -2012,15 +1990,8 @@ STATIC struct hblk * GC_push_next_marked(struct hblk *h)
             if (NULL == h) ABORT("Bad HDR() definition");
 #         endif
         }
-#       ifdef STUBBORN_ALLOC
-          if (hhdr -> hb_obj_kind == STUBBORN) {
-            if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr))
-                break;
-          } else
-#       endif
-        /* else */ {
-          if (GC_block_was_dirty(h, hhdr)) break;
-        }
+        if (GC_block_was_dirty(h, hhdr))
+          break;
         h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
         hhdr = HDR(h);
     }

+ 102 - 18
blitz.mod/bdwgc/mark_rts.c

@@ -183,7 +183,7 @@ void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
       /* virtually guaranteed to be dominated by the time it    */
       /* takes to scan the roots.                               */
       {
-        register int i;
+        int i;
         struct roots * old = NULL; /* initialized to prevent warning. */
 
         for (i = 0; i < n_root_sets; i++) {
@@ -236,12 +236,18 @@ void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
         struct roots * old = (struct roots *)GC_roots_present(b);
 
         if (old != 0) {
-          if ((word)e <= (word)old->r_end)
+          if ((word)e <= (word)old->r_end) {
+            old -> r_tmp &= tmp;
             return; /* already there */
-          /* else extend */
-          GC_root_size += e - old -> r_end;
-          old -> r_end = e;
-          return;
+          }
+          if (old -> r_tmp == tmp || !tmp) {
+            /* Extend the existing root. */
+            GC_root_size += e - old -> r_end;
+            old -> r_end = e;
+            old -> r_tmp = tmp;
+            return;
+          }
+          b = old -> r_end;
         }
       }
 #   endif
@@ -317,6 +323,9 @@ STATIC void GC_remove_root_at_pos(int i)
 STATIC void GC_remove_tmp_roots(void)
 {
     int i;
+#   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
+      int old_n_roots = n_root_sets;
+#   endif
 
     for (i = 0; i < n_root_sets; ) {
         if (GC_static_roots[i].r_tmp) {
@@ -326,7 +335,8 @@ STATIC void GC_remove_tmp_roots(void)
         }
     }
 #   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
-      GC_rebuild_root_index();
+      if (n_root_sets < old_n_roots)
+        GC_rebuild_root_index();
 #   endif
 }
 #endif
@@ -352,18 +362,101 @@ STATIC void GC_remove_tmp_roots(void)
   STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e)
   {
     int i;
+    GC_bool rebuild = FALSE;
+
     for (i = 0; i < n_root_sets; ) {
         if ((word)GC_static_roots[i].r_start >= (word)b
             && (word)GC_static_roots[i].r_end <= (word)e) {
             GC_remove_root_at_pos(i);
+            rebuild = TRUE;
         } else {
             i++;
         }
     }
-    GC_rebuild_root_index();
+    if (rebuild)
+        GC_rebuild_root_index();
   }
 #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
 
+#ifdef USE_PROC_FOR_LIBRARIES
+  /* Remove given range from every static root which intersects with    */
+  /* the range.  It is assumed GC_remove_tmp_roots is called before     */
+  /* this function is called repeatedly by GC_register_map_entries.     */
+  GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
+  {
+    int i;
+    GC_bool rebuild = FALSE;
+
+    GC_ASSERT(I_HOLD_LOCK());
+    GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
+    for (i = 0; i < n_root_sets; i++) {
+      ptr_t r_start, r_end;
+
+      if (GC_static_roots[i].r_tmp) {
+        /* The remaining roots are skipped as they are all temporary. */
+#       ifdef GC_ASSERTIONS
+          int j;
+          for (j = i + 1; j < n_root_sets; j++) {
+            GC_ASSERT(GC_static_roots[j].r_tmp);
+          }
+#       endif
+        break;
+      }
+      r_start = GC_static_roots[i].r_start;
+      r_end = GC_static_roots[i].r_end;
+      if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
+#       ifdef DEBUG_ADD_DEL_ROOTS
+          GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
+                        (void *)b, (void *)e,
+                        i, (void *)r_start, (void *)r_end);
+#       endif
+        if ((word)r_start < (word)b) {
+          GC_root_size -= r_end - b;
+          GC_static_roots[i].r_end = b;
+          /* No need to rebuild as hash does not use r_end value. */
+          if ((word)e < (word)r_end) {
+            int j;
+
+            if (rebuild) {
+              GC_rebuild_root_index();
+              rebuild = FALSE;
+            }
+            GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
+            for (j = i + 1; j < n_root_sets; j++)
+              if (GC_static_roots[j].r_tmp)
+                break;
+            if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
+              /* Exchange the roots to have all temporary ones at the end. */
+              ptr_t tmp_r_start = GC_static_roots[j].r_start;
+              ptr_t tmp_r_end = GC_static_roots[j].r_end;
+
+              GC_static_roots[j].r_start =
+                                GC_static_roots[n_root_sets-1].r_start;
+              GC_static_roots[j].r_end = GC_static_roots[n_root_sets-1].r_end;
+              GC_static_roots[j].r_tmp = FALSE;
+              GC_static_roots[n_root_sets-1].r_start = tmp_r_start;
+              GC_static_roots[n_root_sets-1].r_end = tmp_r_end;
+              GC_static_roots[n_root_sets-1].r_tmp = TRUE;
+              rebuild = TRUE;
+            }
+          }
+        } else {
+          if ((word)e < (word)r_end) {
+            GC_root_size -= e - r_start;
+            GC_static_roots[i].r_start = e;
+          } else {
+            GC_remove_root_at_pos(i);
+            i--;
+          }
+          rebuild = TRUE;
+        }
+      }
+    }
+    if (rebuild)
+      GC_rebuild_root_index();
+  }
+#endif /* USE_PROC_FOR_LIBRARIES */
+
 #if !defined(NO_DEBUGGING)
   /* For the debugging purpose only.                                    */
   /* Workaround for the OS mapping and unmapping behind our back:       */
@@ -371,7 +464,7 @@ STATIC void GC_remove_tmp_roots(void)
   GC_API int GC_CALL GC_is_tmp_root(void *p)
   {
     static int last_root_set = MAX_ROOT_SETS;
-    register int i;
+    int i;
 
     if (last_root_set < n_root_sets
         && (word)p >= (word)GC_static_roots[last_root_set].r_start
@@ -499,10 +592,6 @@ GC_API void GC_CALL GC_exclude_static_roots(void *b, void *e)
 }
 
 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
-  /* GC_mark_local does not handle memory protection faults yet.  So,   */
-  /* the static data regions are scanned immediately by GC_push_roots.  */
-  GC_INNER void GC_push_conditional_eager(void *bottom, void *top,
-                                          GC_bool all);
 # define GC_PUSH_CONDITIONAL(b, t, all) \
                 (GC_parallel \
                     ? GC_push_conditional_eager(b, t, all) \
@@ -586,11 +675,6 @@ GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
 
 #else /* !THREADS */
 
-# ifdef TRACE_BUF
-    /* Defined in mark.c.       */
-    void GC_add_trace_entry(char *kind, word arg1, word arg2);
-# endif
-
                         /* Similar to GC_push_all_eager, but only the   */
                         /* part hotter than cold_gc_frame is scanned    */
                         /* immediately.  Needed to ensure that callee-  */

+ 57 - 23
blitz.mod/bdwgc/misc.c

@@ -79,7 +79,7 @@
 GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
 
 GC_INNER GC_bool GC_debugging_started = FALSE;
-        /* defined here so we don't have to load debug_malloc.o */
+                /* defined here so we don't have to load dbg_mlc.o */
 
 ptr_t GC_stackbottom = 0;
 
@@ -295,7 +295,11 @@ STATIC void GC_init_size_map(void)
     /* Clear the stack up to about limit.  Return arg.  This function   */
     /* is not static because it could also be erroneously defined in .S */
     /* file, so this error would be caught by the linker.               */
-    void *GC_clear_stack_inner(void *arg, ptr_t limit)
+    void *GC_clear_stack_inner(void *arg,
+#                           if defined(__APPLE_CC__) && !GC_CLANG_PREREQ(6, 0)
+                               volatile /* to workaround some bug */
+#                           endif
+                               ptr_t limit)
     {
 #     define CLEAR_SIZE 213 /* granularity */
       volatile word dummy[CLEAR_SIZE];
@@ -756,7 +760,13 @@ GC_API int GC_CALL GC_is_init_called(void)
   STATIC void GC_exit_check(void)
   {
     if (GC_find_leak && !skip_gc_atexit) {
-      GC_gcollect();
+#     if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
+        GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
+        GC_gcollect();
+        GC_in_thread_creation = FALSE;
+#     else
+        GC_gcollect();
+#     endif
     }
   }
 #endif
@@ -825,6 +835,11 @@ GC_API int GC_CALL GC_is_init_called(void)
   }
 #endif /* MSWIN32 */
 
+#if defined(THREADS) && defined(UNIX_LIKE) && !defined(NO_GETCONTEXT)
+  static void callee_saves_pushed_dummy_fn(ptr_t data GC_ATTR_UNUSED,
+                                           void * context GC_ATTR_UNUSED) {}
+#endif
+
 STATIC word GC_parse_mem_size_arg(const char *str)
 {
   word result = 0; /* bad value */
@@ -867,6 +882,9 @@ GC_API void GC_CALL GC_init(void)
     /* LOCK(); -- no longer does anything this early. */
     word initial_heap_sz;
     IF_CANCEL(int cancel_state;)
+#   if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
+      DCL_LOCK_STATE;
+#   endif
 
     if (EXPECT(GC_is_initialized, TRUE)) return;
 #   ifdef REDIRECT_MALLOC
@@ -1277,33 +1295,34 @@ GC_API void GC_CALL GC_init(void)
       GC_pcr_install();
 #   endif
     GC_is_initialized = TRUE;
+#   if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
+        LOCK(); /* just to set GC_lock_holder */
+#   endif
 #   if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
-#       if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
-          DCL_LOCK_STATE;
-          LOCK(); /* just to set GC_lock_holder */
-          GC_thr_init();
-          UNLOCK();
-#       else
-          GC_thr_init();
-#       endif
+        GC_thr_init();
 #       ifdef PARALLEL_MARK
           /* Actually start helper threads.     */
+#         if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
+            UNLOCK();
+#         endif
           GC_start_mark_threads_inner();
+#         if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
+            LOCK();
+#         endif
 #       endif
 #   endif
     COND_DUMP;
     /* Get black list set up and/or incremental GC started */
-      if (!GC_dont_precollect || GC_incremental) {
-#       if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
-          LOCK();
-          GC_gcollect_inner();
-          UNLOCK();
-#       else
-          GC_gcollect_inner();
-#       endif
-      }
-#   ifdef STUBBORN_ALLOC
-        GC_stubborn_init();
+    if (!GC_dont_precollect || GC_incremental) {
+        GC_gcollect_inner();
+    }
+#   if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
+        UNLOCK();
+#   endif
+#   if defined(THREADS) && defined(UNIX_LIKE) && !defined(NO_GETCONTEXT)
+      /* Ensure getcontext_works is set to avoid potential data race.   */
+      if (GC_dont_gc || GC_dont_precollect)
+        GC_with_callee_saves_pushed(callee_saves_pushed_dummy_fn, NULL);
 #   endif
 #   ifndef DONT_USE_ATEXIT
       if (GC_find_leak) {
@@ -1984,7 +2003,12 @@ GC_API unsigned GC_CALL GC_new_kind_inner(void **fl, GC_word descr,
     unsigned result = GC_n_kinds;
 
     GC_ASSERT(adjust == FALSE || adjust == TRUE);
-    GC_ASSERT(clear == FALSE || clear == TRUE);
+    /* If an object is not needed to be cleared (when moved to the      */
+    /* free list) then its descriptor should be zero to denote          */
+    /* a pointer-free object (and, as a consequence, the size of the    */
+    /* object should not be added to the descriptor template).          */
+    GC_ASSERT(clear == TRUE
+              || (descr == 0 && adjust == FALSE && clear == FALSE));
     if (result < MAXOBJKINDS) {
       GC_n_kinds++;
       GC_obj_kinds[result].ok_freelist = fl;
@@ -2181,7 +2205,11 @@ GC_API void * GC_CALL GC_do_blocking(GC_fn_type fn, void * client_data)
 #if !defined(NO_DEBUGGING)
   GC_API void GC_CALL GC_dump(void)
   {
+    DCL_LOCK_STATE;
+
+    LOCK();
     GC_dump_named(NULL);
+    UNLOCK();
   }
 
   GC_API void GC_CALL GC_dump_named(const char *name)
@@ -2494,3 +2522,9 @@ GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void)
 {
     return (int)GC_force_unmap_on_gcollect;
 }
+
+GC_API void GC_CALL GC_abort_on_oom(void)
+{
+    GC_err_printf("Insufficient memory for the allocation\n");
+    EXIT();
+}

Dosya farkı çok büyük olduğundan ihmal edildi
+ 257 - 276
blitz.mod/bdwgc/os_dep.c


+ 1 - 0
blitz.mod/bdwgc/pthread_start.c

@@ -59,6 +59,7 @@ GC_INNER_PTHRSTART void * GC_CALLBACK GC_inner_start_routine(
     GC_log_printf("Finishing thread %p\n", (void *)pthread_self());
 # endif
   me -> status = result;
+  GC_dirty(me);
 # ifndef NACL
     pthread_cleanup_pop(1);
     /* Cleanup acquires lock, ensuring that we can't exit while         */

+ 207 - 174
blitz.mod/bdwgc/pthread_stop_world.c

@@ -48,8 +48,6 @@
 #include <time.h> /* for nanosleep() */
 #include <unistd.h>
 
-#include "private/gc_atomic_ops.h"
-
 #if (!defined(AO_HAVE_load_acquire) || !defined(AO_HAVE_store_release)) \
     && !defined(CPPCHECK)
 # error AO_load_acquire and/or AO_store_release are missing;
@@ -117,7 +115,8 @@ STATIC void GC_remove_allowed_signals(sigset_t *set)
 static sigset_t suspend_handler_mask;
 
 STATIC volatile AO_t GC_stop_count = 0;
-                        /* Incremented at the beginning of GC_stop_world. */
+                        /* Incremented by two at the beginning of       */
+                        /* GC_stop_world (the lowest bit is always 0).  */
 
 STATIC volatile AO_t GC_world_is_stopped = FALSE;
                         /* FALSE ==> it is safe for threads to restart, */
@@ -125,7 +124,8 @@ STATIC volatile AO_t GC_world_is_stopped = FALSE;
                         /* before they are expected to stop (unless     */
                         /* they have stopped voluntarily).              */
 
-#if defined(GC_OSF1_THREADS) || defined(THREAD_SANITIZER)
+#if defined(GC_OSF1_THREADS) || defined(THREAD_SANITIZER) \
+    || defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)
   STATIC GC_bool GC_retry_signals = TRUE;
 #else
   STATIC GC_bool GC_retry_signals = FALSE;
@@ -188,7 +188,8 @@ GC_API int GC_CALL GC_get_thr_restart_signal(void)
             ? GC_sig_thr_restart : SIG_THR_RESTART;
 }
 
-#ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
+#if defined(GC_EXPLICIT_SIGNALS_UNBLOCK) \
+    || !defined(NO_SIGNALS_UNBLOCK_IN_MAIN)
   /* Some targets (e.g., Solaris) might require this to be called when  */
   /* doing thread registering from the thread destructor.               */
   GC_INNER void GC_unblock_gc_signals(void)
@@ -204,12 +205,7 @@ GC_API int GC_CALL GC_get_thr_restart_signal(void)
   }
 #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
 
-STATIC sem_t GC_suspend_ack_sem;
-
-#ifdef GC_NETBSD_THREADS_WORKAROUND
-  /* In case of it is necessary to wait until threads have restarted.   */
-  STATIC sem_t GC_restart_ack_sem;
-#endif
+STATIC sem_t GC_suspend_ack_sem; /* also used to acknowledge restart */
 
 STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context);
 
@@ -308,6 +304,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
 # ifdef DEBUG_THREADS
     GC_log_printf("Suspending %p\n", (void *)self);
 # endif
+  GC_ASSERT(((word)my_stop_count & 1) == 0);
 
   me = GC_lookup_thread_async(self);
 
@@ -324,7 +321,8 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
     }
 # endif
 
-  if (me -> stop_info.last_stop_count == my_stop_count) {
+  if (((word)me->stop_info.last_stop_count & ~(word)0x1)
+        == (word)my_stop_count) {
       /* Duplicate signal.  OK if we are retrying.      */
       if (!GC_retry_signals) {
           WARN("Duplicate suspend signal in thread %p\n", self);
@@ -334,22 +332,25 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
   }
   GC_store_stack_ptr(me);
 
-  /* Tell the thread that wants to stop the world that this     */
-  /* thread has been stopped.  Note that sem_post() is          */
-  /* the only async-signal-safe primitive in LinuxThreads.      */
-  sem_post(&GC_suspend_ack_sem);
-  AO_store_release(&me->stop_info.last_stop_count, my_stop_count);
-
 # ifdef THREAD_SANITIZER
     /* TSan disables signals around signal handlers.  Without   */
     /* a pthread_sigmask call, sigsuspend may block forever.    */
     {
       sigset_t set;
       sigemptyset(&set);
-      if (pthread_sigmask(SIG_SETMASK, &set, NULL) != 0)
-        ABORT("pthread_sigmask(SIG_SETMASK) failed");
+      GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
+      GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
+      sigaddset(&set, GC_sig_suspend);
+      sigaddset(&set, GC_sig_thr_restart);
+      if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
+        ABORT("pthread_sigmask failed in suspend handler");
     }
 # endif
+  /* Tell the thread that wants to stop the world that this     */
+  /* thread has been stopped.  Note that sem_post() is          */
+  /* the only async-signal-safe primitive in LinuxThreads.      */
+  sem_post(&GC_suspend_ack_sem);
+  AO_store_release(&me->stop_info.last_stop_count, my_stop_count);
 
   /* Wait until that thread tells us to restart by sending      */
   /* this thread a GC_sig_thr_restart signal (should be masked  */
@@ -365,32 +366,108 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
       sigsuspend (&suspend_handler_mask);
   } while (AO_load_acquire(&GC_world_is_stopped)
            && AO_load(&GC_stop_count) == my_stop_count);
-  /* If the RESTART signal gets lost, we can still lose.  That should   */
-  /* be less likely than losing the SUSPEND signal, since we don't do   */
-  /* much between the sem_post and sigsuspend.                          */
-  /* We'd need more handshaking to work around that.                    */
-  /* Simply dropping the sigsuspend call should be safe, but is         */
-  /* unlikely to be efficient.                                          */
 
 # ifdef DEBUG_THREADS
     GC_log_printf("Continuing %p\n", (void *)self);
 # endif
+# ifndef GC_NETBSD_THREADS_WORKAROUND
+    if (GC_retry_signals)
+# endif
+  {
+    /* If the RESTART signal loss is possible (though it should be      */
+    /* less likely than losing the SUSPEND signal as we do not do       */
+    /* much between the first sem_post and sigsuspend calls), more      */
+    /* handshaking is provided to work around it.                       */
+    sem_post(&GC_suspend_ack_sem);
+#   ifdef GC_NETBSD_THREADS_WORKAROUND
+      if (GC_retry_signals)
+#   endif
+    {
+      /* Set the flag (the lowest bit of last_stop_count) that the      */
+      /* thread has been restarted.                                     */
+      AO_store_release(&me->stop_info.last_stop_count,
+                       (AO_t)((word)my_stop_count | 1));
+    }
+  }
   RESTORE_CANCEL(cancel_state);
 }
 
+static void suspend_restart_barrier(int n_live_threads)
+{
+    int i;
+
+    for (i = 0; i < n_live_threads; i++) {
+      while (0 != sem_wait(&GC_suspend_ack_sem)) {
+        /* On Linux, sem_wait is documented to always return zero.      */
+        /* But the documentation appears to be incorrect.               */
+        /* EINTR seems to happen with some versions of gdb.             */
+        if (errno != EINTR)
+          ABORT("sem_wait failed");
+      }
+    }
+#   ifdef GC_ASSERTIONS
+      sem_getvalue(&GC_suspend_ack_sem, &i);
+      GC_ASSERT(0 == i);
+#   endif
+}
+
+static int resend_lost_signals(int n_live_threads,
+                               int (*suspend_restart_all)(void))
+{
+#   define WAIT_UNIT 3000
+#   define RETRY_INTERVAL 100000
+
+    if (n_live_threads > 0) {
+      unsigned long wait_usecs = 0;  /* Total wait since retry. */
+      for (;;) {
+        int ack_count;
+
+        sem_getvalue(&GC_suspend_ack_sem, &ack_count);
+        if (ack_count == n_live_threads)
+          break;
+        if (wait_usecs > RETRY_INTERVAL) {
+          int newly_sent = suspend_restart_all();
+
+          GC_COND_LOG_PRINTF("Resent %d signals after timeout\n", newly_sent);
+          sem_getvalue(&GC_suspend_ack_sem, &ack_count);
+          if (newly_sent < n_live_threads - ack_count) {
+            WARN("Lost some threads while stopping or starting world?!\n", 0);
+            n_live_threads = ack_count + newly_sent;
+          }
+          wait_usecs = 0;
+        }
+
+#       ifdef LINT2
+          /* Workaround "waiting while holding a lock" warning. */
+#         undef WAIT_UNIT
+#         define WAIT_UNIT 1
+          sched_yield();
+#       elif defined(CPPCHECK) /* || _POSIX_C_SOURCE >= 199309L */
+          {
+            struct timespec ts;
+
+            ts.tv_sec = 0;
+            ts.tv_nsec = WAIT_UNIT * 1000;
+            (void)nanosleep(&ts, NULL);
+          }
+#       else
+          usleep(WAIT_UNIT);
+#       endif
+        wait_usecs += WAIT_UNIT;
+      }
+    }
+    return n_live_threads;
+}
+
 STATIC void GC_restart_handler(int sig)
 {
-# if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
+# if defined(DEBUG_THREADS)
     int old_errno = errno;      /* Preserve errno value.        */
 # endif
 
   if (sig != GC_sig_thr_restart)
     ABORT("Bad signal in restart handler");
 
-# ifdef GC_NETBSD_THREADS_WORKAROUND
-    sem_post(&GC_restart_ack_sem);
-# endif
-
   /*
   ** Note: even if we don't do anything useful here,
   ** it would still be necessary to have a signal handler,
@@ -398,17 +475,16 @@ STATIC void GC_restart_handler(int sig)
   ** the signals will not be delivered at all, and
   ** will thus not interrupt the sigsuspend() above.
   */
-
 # ifdef DEBUG_THREADS
     GC_log_printf("In GC_restart_handler for %p\n", (void *)pthread_self());
-# endif
-# if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
     errno = old_errno;
 # endif
 }
 
 # ifdef USE_TKILL_ON_ANDROID
+    EXTERN_C_BEGIN
     extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
+    EXTERN_C_END
 
     static int android_thread_kill(pid_t tid, int sig)
     {
@@ -432,6 +508,7 @@ STATIC void GC_restart_handler(int sig)
 
 # ifdef GC_ENABLE_SUSPEND_THREAD
 #   include <sys/time.h>
+#   include "javaxfc.h" /* to get the prototypes as extern "C" */
 
     STATIC void GC_brief_async_signal_safe_sleep(void)
     {
@@ -470,7 +547,7 @@ STATIC void GC_restart_handler(int sig)
       /* Set the flag making the change visible to the signal handler.  */
       AO_store_release(&t->suspended_ext, TRUE);
 
-      if ((pthread_t)thread == pthread_self()) {
+      if (THREAD_EQUAL((pthread_t)thread, pthread_self())) {
         UNLOCK();
         /* It is safe as "t" cannot become invalid here (no race with   */
         /* GC_unregister_my_thread).                                    */
@@ -666,10 +743,6 @@ STATIC int GC_suspend_all(void)
 #   endif
     pthread_t self = pthread_self();
 
-#   ifdef DEBUG_THREADS
-      GC_stopping_thread = self;
-      GC_stopping_pid = getpid();
-#   endif
     for (i = 0; i < THREAD_TABLE_SZ; i++) {
       for (p = GC_threads[i]; p != 0; p = p -> next) {
         if (!THREAD_EQUAL(p -> id, self)) {
@@ -680,7 +753,7 @@ STATIC int GC_suspend_all(void)
                 if (p -> suspended_ext) continue;
 #             endif
               if (AO_load(&p->stop_info.last_stop_count) == GC_stop_count)
-                continue;
+                continue; /* matters only if GC_retry_signals */
               n_live_threads++;
 #           endif
 #           ifdef DEBUG_THREADS
@@ -729,15 +802,11 @@ STATIC int GC_suspend_all(void)
     unsigned long num_sleeps = 0;
 
 #   ifdef DEBUG_THREADS
-      GC_log_printf("pthread_stop_world: num_threads %d\n",
+      GC_log_printf("pthread_stop_world: num_threads=%d\n",
                     GC_nacl_num_gc_threads - 1);
 #   endif
     GC_nacl_thread_parker = pthread_self();
     GC_nacl_park_threads_now = 1;
-#   ifdef DEBUG_THREADS
-      GC_stopping_thread = GC_nacl_thread_parker;
-      GC_stopping_pid = getpid();
-#   endif
 
     while (1) {
       int num_threads_parked = 0;
@@ -781,13 +850,13 @@ STATIC int GC_suspend_all(void)
 GC_INNER void GC_stop_world(void)
 {
 # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
-    int i;
     int n_live_threads;
-    int code;
 # endif
   GC_ASSERT(I_HOLD_LOCK());
 # ifdef DEBUG_THREADS
-    GC_log_printf("Stopping the world from %p\n", (void *)pthread_self());
+    GC_stopping_thread = pthread_self();
+    GC_stopping_pid = getpid();
+    GC_log_printf("Stopping the world from %p\n", (void *)GC_stopping_thread);
 # endif
 
   /* Make sure all free list construction has stopped before we start.  */
@@ -805,65 +874,13 @@ GC_INNER void GC_stop_world(void)
 # if defined(GC_OPENBSD_UTHREADS) || defined(NACL)
     (void)GC_suspend_all();
 # else
-    AO_store(&GC_stop_count, GC_stop_count+1);
+    AO_store(&GC_stop_count, (AO_t)((word)GC_stop_count + 2));
         /* Only concurrent reads are possible. */
     AO_store_release(&GC_world_is_stopped, TRUE);
     n_live_threads = GC_suspend_all();
-
-    if (GC_retry_signals && n_live_threads > 0) {
-      unsigned long wait_usecs = 0;  /* Total wait since retry. */
-#     define WAIT_UNIT 3000
-#     define RETRY_INTERVAL 100000
-      for (;;) {
-        int ack_count;
-
-        sem_getvalue(&GC_suspend_ack_sem, &ack_count);
-        if (ack_count == n_live_threads) break;
-        if (wait_usecs > RETRY_INTERVAL) {
-          int newly_sent = GC_suspend_all();
-
-          GC_COND_LOG_PRINTF("Resent %d signals after timeout\n", newly_sent);
-          sem_getvalue(&GC_suspend_ack_sem, &ack_count);
-          if (newly_sent < n_live_threads - ack_count) {
-            WARN("Lost some threads during GC_stop_world?!\n",0);
-            n_live_threads = ack_count + newly_sent;
-          }
-          wait_usecs = 0;
-        }
-
-#       ifdef LINT2
-          /* Workaround "waiting while holding a lock" warning. */
-#         undef WAIT_UNIT
-#         define WAIT_UNIT 1
-          sched_yield();
-#       elif defined(CPPCHECK) /* || _POSIX_C_SOURCE >= 199309L */
-          {
-            struct timespec ts;
-
-            ts.tv_sec = 0;
-            ts.tv_nsec = WAIT_UNIT * 1000;
-            (void)nanosleep(&ts, NULL);
-          }
-#       else
-          usleep(WAIT_UNIT);
-#       endif
-        wait_usecs += WAIT_UNIT;
-      }
-    }
-
-    for (i = 0; i < n_live_threads; i++) {
-      retry:
-        code = sem_wait(&GC_suspend_ack_sem);
-        if (0 != code) {
-          /* On Linux, sem_wait is documented to always return zero.    */
-          /* But the documentation appears to be incorrect.             */
-          if (errno == EINTR) {
-            /* Seems to happen with some versions of gdb.       */
-            goto retry;
-          }
-          ABORT("sem_wait for handler failed");
-        }
-    }
+    if (GC_retry_signals)
+      n_live_threads = resend_lost_signals(n_live_threads, GC_suspend_all);
+    suspend_restart_barrier(n_live_threads);
 # endif
 
 # ifdef PARALLEL_MARK
@@ -983,8 +1000,10 @@ GC_INNER void GC_stop_world(void)
     int (*register_block_hooks)(void (*pre)(void), void (*post)(void));
   };
 
+  EXTERN_C_BEGIN
   extern size_t nacl_interface_query(const char *interface_ident,
                                      void *table, size_t tablesize);
+  EXTERN_C_END
 
   GC_INNER void GC_nacl_initialize_gc_thread(void)
   {
@@ -1026,46 +1045,38 @@ GC_INNER void GC_stop_world(void)
     GC_nacl_num_gc_threads--;
     pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
   }
-#endif /* NACL */
 
-/* Caller holds allocation lock, and has held it continuously since     */
-/* the world stopped.                                                   */
-GC_INNER void GC_start_world(void)
-{
-# ifndef NACL
+#else /* !NACL */
+
+  /* Restart all threads that were suspended by the collector.  */
+  /* Return the number of restart signals that were sent.       */
+  STATIC int GC_restart_all(void)
+  {
+    int n_live_threads = 0;
+    int i;
     pthread_t self = pthread_self();
-    register int i;
-    register GC_thread p;
+    GC_thread p;
 #   ifndef GC_OPENBSD_UTHREADS
-      register int n_live_threads = 0;
-      register int result;
-#   endif
-
-#   ifdef DEBUG_THREADS
-      GC_log_printf("World starting\n");
+      int result;
 #   endif
 
-#   ifndef GC_OPENBSD_UTHREADS
-      AO_store_release(&GC_world_is_stopped, FALSE);
-                    /* The updated value should now be visible to the   */
-                    /* signal handler (note that pthread_kill is not on */
-                    /* the list of functions which synchronize memory). */
-#   endif
     for (i = 0; i < THREAD_TABLE_SZ; i++) {
-      for (p = GC_threads[i]; p != 0; p = p -> next) {
+      for (p = GC_threads[i]; p != NULL; p = p -> next) {
         if (!THREAD_EQUAL(p -> id, self)) {
-            if ((p -> flags & FINISHED) != 0) continue;
-            if (p -> thread_blocked) continue;
-#           ifndef GC_OPENBSD_UTHREADS
-#             ifdef GC_ENABLE_SUSPEND_THREAD
-                if (p -> suspended_ext) continue;
-#             endif
-              n_live_threads++;
-#           endif
-#           ifdef DEBUG_THREADS
-              GC_log_printf("Sending restart signal to %p\n", (void *)p->id);
+          if ((p -> flags & FINISHED) != 0) continue;
+          if (p -> thread_blocked) continue;
+#         ifndef GC_OPENBSD_UTHREADS
+#           ifdef GC_ENABLE_SUSPEND_THREAD
+              if (p -> suspended_ext) continue;
 #           endif
-
+            if (GC_retry_signals && AO_load(&p->stop_info.last_stop_count)
+                                    == (AO_t)((word)GC_stop_count | 1))
+              continue; /* The thread has been restarted. */
+            n_live_threads++;
+#         endif
+#         ifdef DEBUG_THREADS
+            GC_log_printf("Sending restart signal to %p\n", (void *)p->id);
+#         endif
 #         ifdef GC_OPENBSD_UTHREADS
             if (pthread_resume_np(p -> id) != 0)
               ABORT("pthread_resume_np failed");
@@ -1074,39 +1085,56 @@ GC_INNER void GC_start_world(void)
 #         else
             result = RAISE_SIGNAL(p, GC_sig_thr_restart);
             switch(result) {
-                case ESRCH:
-                    /* Not really there anymore.  Possible? */
-                    n_live_threads--;
-                    break;
-                case 0:
-                    if (GC_on_thread_event)
-                      GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,
-                                         (void *)(word)THREAD_SYSTEM_ID(p));
-                    break;
-                default:
-                    ABORT_ARG1("pthread_kill failed at resume",
-                               ": errcode= %d", result);
+            case ESRCH:
+              /* Not really there anymore.  Possible?   */
+              n_live_threads--;
+              break;
+            case 0:
+              if (GC_on_thread_event)
+                GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,
+                                   (void *)(word)THREAD_SYSTEM_ID(p));
+              break;
+            default:
+              ABORT_ARG1("pthread_kill failed at resume",
+                         ": errcode= %d", result);
             }
 #         endif
         }
       }
     }
-#   ifdef GC_NETBSD_THREADS_WORKAROUND
-      for (i = 0; i < n_live_threads; i++) {
-        while (0 != sem_wait(&GC_restart_ack_sem)) {
-          if (errno != EINTR) {
-            ABORT_ARG1("sem_wait() for restart handler failed",
-                       ": errcode= %d", errno);
-          }
-        }
-      }
+    return n_live_threads;
+  }
+#endif /* !NACL */
+
+/* Caller holds allocation lock, and has held it continuously since     */
+/* the world stopped.                                                   */
+GC_INNER void GC_start_world(void)
+{
+# ifndef NACL
+    int n_live_threads;
+
+    GC_ASSERT(I_HOLD_LOCK());
+#   ifdef DEBUG_THREADS
+      GC_log_printf("World starting\n");
 #   endif
-#   if defined(GC_ASSERTIONS) && !defined(GC_OPENBSD_UTHREADS)
-      {
-        int ack_count;
-        sem_getvalue(&GC_suspend_ack_sem, &ack_count);
-        GC_ASSERT(0 == ack_count);
-      }
+#   ifndef GC_OPENBSD_UTHREADS
+      AO_store_release(&GC_world_is_stopped, FALSE);
+                    /* The updated value should now be visible to the   */
+                    /* signal handler (note that pthread_kill is not on */
+                    /* the list of functions which synchronize memory). */
+#   endif
+    n_live_threads = GC_restart_all();
+#   ifndef GC_OPENBSD_UTHREADS
+      if (GC_retry_signals)
+        n_live_threads = resend_lost_signals(n_live_threads, GC_restart_all);
+#     ifdef GC_NETBSD_THREADS_WORKAROUND
+        suspend_restart_barrier(n_live_threads);
+#     else
+        if (GC_retry_signals)
+          suspend_restart_barrier(n_live_threads);
+#     endif
+#   else
+      (void)n_live_threads;
 #   endif
 #   ifdef DEBUG_THREADS
       GC_log_printf("World started\n");
@@ -1126,6 +1154,7 @@ GC_INNER void GC_stop_init(void)
 {
 # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
     struct sigaction act;
+    char *str;
 
     if (SIGNAL_UNSET == GC_sig_suspend)
         GC_sig_suspend = SIG_SUSPEND;
@@ -1136,10 +1165,6 @@ GC_INNER void GC_stop_init(void)
 
     if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
         ABORT("sem_init failed");
-#   ifdef GC_NETBSD_THREADS_WORKAROUND
-      if (sem_init(&GC_restart_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
-        ABORT("sem_init failed");
-#   endif
 
 #   ifdef SA_RESTART
       act.sa_flags = SA_RESTART
@@ -1185,16 +1210,24 @@ GC_INNER void GC_stop_init(void)
     if (sigdelset(&suspend_handler_mask, GC_sig_thr_restart) != 0)
         ABORT("sigdelset failed");
 
-    /* Check for GC_RETRY_SIGNALS.      */
-    if (0 != GETENV("GC_RETRY_SIGNALS")) {
-        GC_retry_signals = TRUE;
-    }
-    if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
-        GC_retry_signals = FALSE;
+    /* Override the default value of GC_retry_signals.  */
+    str = GETENV("GC_RETRY_SIGNALS");
+    if (str != NULL) {
+        if (*str == '0' && *(str + 1) == '\0') {
+            /* Do not retry if the environment variable is set to "0". */
+            GC_retry_signals = FALSE;
+        } else {
+            GC_retry_signals = TRUE;
+        }
     }
     if (GC_retry_signals) {
-      GC_COND_LOG_PRINTF("Will retry suspend signal if necessary\n");
+      GC_COND_LOG_PRINTF(
+                "Will retry suspend and restart signals if necessary\n");
     }
+#   ifndef NO_SIGNALS_UNBLOCK_IN_MAIN
+      /* Explicitly unblock the signals once before new threads creation. */
+      GC_unblock_gc_signals();
+#   endif
 # endif /* !GC_OPENBSD_UTHREADS && !NACL */
 }
 

+ 49 - 54
blitz.mod/bdwgc/pthread_support.c

@@ -287,11 +287,6 @@ STATIC int GC_nprocs = 1;
   }
 
 # if defined(GC_ASSERTIONS)
-    void GC_check_tls_for(GC_tlfs p);
-#   if defined(USE_CUSTOM_SPECIFIC)
-      void GC_check_tsd_marks(tsd *key);
-#   endif
-
     /* Check that all thread-local free-lists are completely marked.    */
     /* Also check that thread-specific-data structures are marked.      */
     void GC_check_tls(void)
@@ -527,6 +522,7 @@ void GC_push_thread_structures(void)
 #endif /* DEBUG_THREADS */
 
 /* It may not be safe to allocate when we register the first thread.    */
+/* As "next" and "status" fields are unused, no need to push this.      */
 static struct GC_Thread_Rep first_thread;
 
 /* Add a thread to GC_threads.  We assume it wasn't already there.      */
@@ -549,6 +545,7 @@ STATIC GC_thread GC_new_thread(pthread_t id)
     if (!EXPECT(first_thread_used, TRUE)) {
         result = &first_thread;
         first_thread_used = TRUE;
+        GC_ASSERT(NULL == GC_threads[hv]);
 #       if defined(THREAD_SANITIZER) && defined(CPPCHECK)
           GC_noop1(result->dummy[0]);
 #       endif
@@ -568,6 +565,8 @@ STATIC GC_thread GC_new_thread(pthread_t id)
       GC_nacl_initialize_gc_thread();
 #   endif
     GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
+    if (EXPECT(result != &first_thread, TRUE))
+      GC_dirty(result);
     return(result);
 }
 
@@ -577,8 +576,8 @@ STATIC GC_thread GC_new_thread(pthread_t id)
 STATIC void GC_delete_thread(pthread_t id)
 {
     int hv = THREAD_TABLE_INDEX(id);
-    register GC_thread p = GC_threads[hv];
-    register GC_thread prev = 0;
+    GC_thread p = GC_threads[hv];
+    GC_thread prev = NULL;
 
 #   ifdef DEBUG_THREADS
       GC_log_printf("Deleting thread %p, n_threads = %d\n",
@@ -598,7 +597,9 @@ STATIC void GC_delete_thread(pthread_t id)
     if (prev == 0) {
         GC_threads[hv] = p -> next;
     } else {
+        GC_ASSERT(prev != &first_thread);
         prev -> next = p -> next;
+        GC_dirty(prev);
     }
     if (p != &first_thread) {
 #     ifdef GC_DARWIN_THREADS
@@ -616,8 +617,8 @@ STATIC void GC_delete_gc_thread(GC_thread t)
 {
     pthread_t id = t -> id;
     int hv = THREAD_TABLE_INDEX(id);
-    register GC_thread p = GC_threads[hv];
-    register GC_thread prev = 0;
+    GC_thread p = GC_threads[hv];
+    GC_thread prev = NULL;
 
     GC_ASSERT(I_HOLD_LOCK());
     while (p != t) {
@@ -627,7 +628,9 @@ STATIC void GC_delete_gc_thread(GC_thread t)
     if (prev == 0) {
         GC_threads[hv] = p -> next;
     } else {
+        GC_ASSERT(prev != &first_thread);
         prev -> next = p -> next;
+        GC_dirty(prev);
     }
 #   ifdef GC_DARWIN_THREADS
         mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
@@ -1170,8 +1173,6 @@ static void fork_child_proc(void)
 
 #ifdef INCLUDE_LINUX_THREAD_DESCR
   __thread int GC_dummy_thread_local;
-  GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
-                                        ptr_t *startp, ptr_t *endp);
 #endif
 
 #ifdef PARALLEL_MARK
@@ -1510,7 +1511,7 @@ GC_API int GC_CALL GC_unregister_my_thread(void)
                 "Called GC_unregister_my_thread on %p, gc_thread = %p\n",
                 (void *)self, (void *)me);
 #   endif
-    GC_ASSERT(me->id == self);
+    GC_ASSERT(THREAD_EQUAL(me->id, self));
     GC_unregister_my_thread_inner(me);
     RESTORE_CANCEL(cancel_state);
     UNLOCK();
@@ -1726,6 +1727,11 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
     } else if ((me -> flags & FINISHED) != 0) {
         /* This code is executed when a thread is registered from the   */
         /* client thread key destructor.                                */
+#       ifdef GC_DARWIN_THREADS
+          /* Reinitialize mach_thread to avoid thread_suspend fail      */
+          /* with MACH_SEND_INVALID_DEST error.                         */
+          me -> stop_info.mach_thread = mach_thread_self();
+#       endif
         GC_record_stack_base(me, sb);
         me -> flags &= ~FINISHED; /* but not DETACHED */
 #       ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
@@ -1786,12 +1792,9 @@ GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
     return me;
 }
 
-GC_INNER_PTHRSTART void * GC_CALLBACK GC_inner_start_routine(
-                                        struct GC_stack_base *sb, void *arg);
-                                        /* defined in pthread_start.c   */
-
-STATIC void * GC_start_routine(void * arg)
-{
+#if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
+  STATIC void * GC_start_routine(void * arg)
+  {
 #   ifdef INCLUDE_LINUX_THREAD_DESCR
       struct GC_stack_base sb;
 
@@ -1811,9 +1814,8 @@ STATIC void * GC_start_routine(void * arg)
 #   else
       return GC_call_with_stack_base(GC_inner_start_routine, arg);
 #   endif
-}
+  }
 
-#if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
   GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
                        GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
                        void *(*start_routine)(void *), void *arg)
@@ -1908,6 +1910,10 @@ STATIC void * GC_start_routine(void * arg)
         DISABLE_CANCEL(cancel_state);
                 /* pthread_create is not a cancellation point. */
         while (0 != sem_wait(&(si -> registered))) {
+#           if defined(GC_HAIKU_THREADS)
+              /* To workaround some bug in Haiku semaphores. */
+              if (EACCES == errno) continue;
+#           endif
             if (EINTR != errno) ABORT("sem_wait failed");
         }
         RESTORE_CANCEL(cancel_state);
@@ -2009,16 +2015,12 @@ STATIC void GC_generic_lock(pthread_mutex_t * lock)
 
 #endif /* !USE_SPIN_LOCK || ... */
 
-#if defined(THREAD_SANITIZER) \
-    && (defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK))
+#ifdef AO_HAVE_char_load
+# define is_collecting() \
+                ((GC_bool)AO_char_load((unsigned char *)&GC_collecting))
+#else
   /* GC_collecting is a hint, a potential data race between     */
   /* GC_lock() and ENTER/EXIT_GC() is OK to ignore.             */
-  GC_ATTR_NO_SANITIZE_THREAD
-  static GC_bool is_collecting(void)
-  {
-    return GC_collecting;
-  }
-#else
 # define is_collecting() GC_collecting
 #endif
 
@@ -2032,25 +2034,14 @@ GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
 
 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor  */
 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor   */
-  static unsigned spin_max = low_spin_max;
-  static unsigned last_spins = 0;
-
-  /* A potential data race between threads invoking GC_lock which reads */
-  /* and updates spin_max and last_spins could be ignored because these */
-  /* variables are hints only.  (Atomic getters and setters are avoided */
-  /* here for performance reasons.)                                     */
-  GC_ATTR_NO_SANITIZE_THREAD
-  static void set_last_spins_and_high_spin_max(unsigned new_last_spins)
-  {
-    last_spins = new_last_spins;
-    spin_max = high_spin_max;
-  }
 
-  GC_ATTR_NO_SANITIZE_THREAD
-  static void reset_spin_max(void)
-  {
-    spin_max = low_spin_max;
-  }
+  static volatile AO_t spin_max = low_spin_max;
+  static volatile AO_t last_spins = 0;
+                                /* A potential data race between        */
+                                /* threads invoking GC_lock which reads */
+                                /* and updates spin_max and last_spins  */
+                                /* could be ignored because these       */
+                                /* variables are hints only.            */
 
 GC_INNER void GC_lock(void)
 {
@@ -2061,8 +2052,8 @@ GC_INNER void GC_lock(void)
     if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
         return;
     }
-    my_spin_max = spin_max;
-    my_last_spins = last_spins;
+    my_spin_max = (unsigned)AO_load(&spin_max);
+    my_last_spins = (unsigned)AO_load(&last_spins);
     for (i = 0; i < my_spin_max; i++) {
         if (is_collecting() || GC_nprocs == 1)
           goto yield;
@@ -2077,12 +2068,13 @@ GC_INNER void GC_lock(void)
              * against the other process with which we were contending.
              * Thus it makes sense to spin longer the next time.
              */
-            set_last_spins_and_high_spin_max(i);
+            AO_store(&last_spins, (AO_t)i);
+            AO_store(&spin_max, (AO_t)high_spin_max);
             return;
         }
     }
     /* We are probably being scheduled against the other process.  Sleep. */
-    reset_spin_max();
+    AO_store(&spin_max, (AO_t)low_spin_max);
 yield:
     for (i = 0;; ++i) {
         if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
@@ -2283,10 +2275,13 @@ GC_INNER void GC_notify_all_marker(void)
 
 #ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS
   /* Workaround "undefined reference" linkage errors on some targets. */
-  void __pthread_register_cancel() __attribute__((__weak__));
-  void __pthread_unregister_cancel() __attribute__((__weak__));
-  void __pthread_register_cancel() {}
-  void __pthread_unregister_cancel() {}
+  EXTERN_C_BEGIN
+  extern void __pthread_register_cancel(void) __attribute__((__weak__));
+  extern void __pthread_unregister_cancel(void) __attribute__((__weak__));
+  EXTERN_C_END
+
+  void __pthread_register_cancel(void) {}
+  void __pthread_unregister_cancel(void) {}
 #endif
 
 #endif /* GC_PTHREADS */

+ 8 - 6
blitz.mod/bdwgc/ptr_chck.c

@@ -131,8 +131,7 @@ GC_API void * GC_CALL GC_is_valid_displacement(void *p)
            h = FORWARDED_ADDR(h, hhdr);
            hhdr = HDR(h);
         }
-    }
-    if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+    } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
         goto fail;
     }
     sz = hhdr -> hb_sz;
@@ -140,7 +139,8 @@ GC_API void * GC_CALL GC_is_valid_displacement(void *p)
     offset = pdispl % sz;
     if ((sz > MAXOBJBYTES && (word)p >= (word)h + sz)
         || !GC_valid_offsets[offset]
-        || (word)p - offset + sz > (word)(h + 1)) {
+        || ((word)p + (sz - offset) > (word)(h + 1)
+            && !IS_FORWARDING_ADDR_OR_NIL(HDR(h + 1)))) {
         goto fail;
     }
     return(p);
@@ -215,10 +215,12 @@ GC_API void * GC_CALL GC_is_visible(void *p)
         } else {
             /* p points to the heap. */
             word descr;
-            ptr_t base = (ptr_t)GC_base(p); /* Should be manually inlined? */
+            ptr_t base = (ptr_t)GC_base(p);
+                        /* TODO: should GC_base be manually inlined? */
 
-            if (base == 0) goto fail;
-            if (HBLKPTR(base) != HBLKPTR(p)) hhdr = HDR((word)p);
+            if (NULL == base) goto fail;
+            if (HBLKPTR(base) != HBLKPTR(p))
+                hhdr = HDR(base);
             descr = hhdr -> hb_descr;
     retry:
             switch(descr & GC_DS_TAGS) {

+ 15 - 15
blitz.mod/bdwgc/reclaim.c

@@ -399,7 +399,6 @@ STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
 
 #             ifdef ENABLE_DISCLAIM
                 if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
-                  struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
                   if ((*ok->ok_disclaim_proc)(hbp)) {
                     /* Not disclaimed => resurrect the object. */
                     set_mark_bit_from_hdr(hhdr, 0);
@@ -486,13 +485,13 @@ struct Print_stats
 
 /* Return the number of set mark bits in the given header.      */
 /* Remains externally visible as used by GNU GCJ currently.     */
-int GC_n_set_marks(hdr *hhdr)
+unsigned GC_n_set_marks(hdr *hhdr)
 {
-    int result = 0;
-    int i;
+    unsigned result = 0;
+    word i;
     word sz = hhdr -> hb_sz;
-    int offset = (int)MARK_BIT_OFFSET(sz);
-    int limit = (int)FINAL_MARK_BIT(sz);
+    word offset = MARK_BIT_OFFSET(sz);
+    word limit = FINAL_MARK_BIT(sz);
 
     for (i = 0; i < limit; i += offset) {
         result += hhdr -> hb_marks[i];
@@ -504,10 +503,10 @@ int GC_n_set_marks(hdr *hhdr)
 #else
 
 /* Number of set bits in a word.  Not performance critical.     */
-static int set_bits(word n)
+static unsigned set_bits(word n)
 {
     word m = n;
-    int result = 0;
+    unsigned result = 0;
 
     while (m > 0) {
         if (m & 1) result++;
@@ -516,13 +515,13 @@ static int set_bits(word n)
     return(result);
 }
 
-int GC_n_set_marks(hdr *hhdr)
+unsigned GC_n_set_marks(hdr *hhdr)
 {
-    int result = 0;
-    int i;
-    int n_mark_words;
+    unsigned result = 0;
+    word i;
+    word n_mark_words;
 #   ifdef MARK_BIT_PER_OBJ
-      int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
+      word n_objs = HBLK_OBJS(hhdr -> hb_sz);
 
       if (0 == n_objs) n_objs = 1;
       n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
@@ -538,7 +537,7 @@ int GC_n_set_marks(hdr *hhdr)
 #   else
       result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
 #   endif
-    return(result - 1);
+    return result; /* the number of set bits excluding the one past the end */
 }
 
 #endif /* !USE_MARK_BYTES  */
@@ -738,7 +737,8 @@ GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
                 }
                 hhdr = HDR(hbp);
                 *rlh = hhdr -> hb_next;
-                if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
+                if (!ignore_old
+                    || (word)hhdr->hb_last_reclaimed == GC_gc_no - 1) {
                     /* It's likely we'll need it this time, too */
                     /* It's been touched recently, so this      */
                     /* shouldn't trigger paging.                */

+ 61 - 0
blitz.mod/bdwgc/sparc_mach_dep.S

@@ -0,0 +1,61 @@
+!	SPARCompiler 3.0 and later apparently no longer handles
+!	asm outside functions.  So we need a separate .s file
+!	This is only set up for SunOS 5, not SunOS 4.
+!	Assumes this is called before the stack contents are
+!	examined.
+
+	.seg 	"text"
+	.globl	GC_save_regs_in_stack
+GC_save_regs_in_stack:
+#if defined(__arch64__) || defined(__sparcv9)
+	save	%sp,-128,%sp
+	flushw
+	ret
+	  restore %sp,2047+128,%o0
+#else /* 32 bit SPARC */
+	ta	0x3   ! ST_FLUSH_WINDOWS
+	mov	%sp,%o0
+	retl
+	nop
+#endif /* 32 bit SPARC */
+.GC_save_regs_in_stack_end:
+	.size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack
+
+! GC_clear_stack_inner(arg, limit) clears stack area up to limit and
+! returns arg.  Stack clearing is crucial on SPARC, so we supply
+! an assembly version that s more careful.  Assumes limit is hotter
+! than sp, and limit is 8 byte aligned.
+	.globl	GC_clear_stack_inner
+GC_clear_stack_inner:
+#if defined(__arch64__) || defined(__sparcv9)
+	mov %sp,%o2		! Save sp
+	add %sp,2047-8,%o3	! p = sp+bias-8
+	add %o1,-2047-192,%sp	! Move sp out of the way,
+  				! so that traps still work.
+  				! Includes some extra words
+  				! so we can be sloppy below.
+loop:
+	stx %g0,[%o3]		! *(long *)p = 0
+	cmp %o3,%o1
+	bgu,pt %xcc, loop	! if (p > limit) goto loop
+          add %o3,-8,%o3	! p -= 8 (delay slot)
+	retl
+    	  mov %o2,%sp		! Restore sp., delay slot
+#else  /* 32 bit SPARC */
+	mov	%sp,%o2		! Save sp
+	add	%sp,-8,%o3	! p = sp-8
+	clr	%g1		! [g0,g1] = 0
+	add	%o1,-0x60,%sp	! Move sp out of the way,
+				! so that traps still work.
+				! Includes some extra words
+				! so we can be sloppy below.
+loop:
+	std	%g0,[%o3]	! *(long long *)p = 0
+	cmp	%o3,%o1
+	bgu	loop		! if (p > limit) goto loop
+	  add	%o3,-8,%o3	! p -= 8 (delay slot)
+	retl
+	  mov	%o2,%sp		! Restore sp., delay slot
+#endif  /* 32 bit SPARC */
+.GC_clear_stack_inner_end:
+      	.size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner

+ 34 - 0
blitz.mod/bdwgc/sparc_netbsd_mach_dep.s

@@ -0,0 +1,34 @@
+!	SPARCompiler 3.0 and later apparently no longer handles
+!	asm outside functions.  So we need a separate .s file
+!	This is only set up for SunOS 4.
+!	Assumes this is called before the stack contents are
+!	examined.
+
+#include "machine/asm.h"
+
+	.seg 	"text"
+	.globl	_C_LABEL(GC_save_regs_in_stack)
+	.globl 	_C_LABEL(GC_push_regs)
+_C_LABEL(GC_save_regs_in_stack):
+_C_LABEL(GC_push_regs):
+	ta	0x3   ! ST_FLUSH_WINDOWS
+	mov	%sp,%o0
+	retl
+	nop
+
+	.globl	_C_LABEL(GC_clear_stack_inner)
+_C_LABEL(GC_clear_stack_inner):
+	mov	%sp,%o2		! Save sp
+	add	%sp,-8,%o3	! p = sp-8
+	clr	%g1		! [g0,g1] = 0
+	add	%o1,-0x60,%sp	! Move sp out of the way,
+				! so that traps still work.
+				! Includes some extra words
+				! so we can be sloppy below.
+loop:
+	std	%g0,[%o3]	! *(long long *)p = 0
+	cmp	%o3,%o1
+	bgu	loop		! if (p > limit) goto loop
+	add	%o3,-8,%o3	! p -= 8 (delay slot)
+	retl
+	mov	%o2,%sp		! Restore sp., delay slot

+ 32 - 0
blitz.mod/bdwgc/sparc_sunos4_mach_dep.s

@@ -0,0 +1,32 @@
+!	SPARCompiler 3.0 and later apparently no longer handles
+!	asm outside functions.  So we need a separate .s file
+!	This is only set up for SunOS 4.
+!	Assumes this is called before the stack contents are
+!	examined.
+
+	.seg 	"text"
+	.globl	_GC_save_regs_in_stack
+	.globl 	_GC_push_regs
+_GC_save_regs_in_stack:
+_GC_push_regs:
+	ta	0x3   ! ST_FLUSH_WINDOWS
+	mov	%sp,%o0
+	retl
+	nop
+
+	.globl	_GC_clear_stack_inner
+_GC_clear_stack_inner:
+	mov	%sp,%o2		! Save sp
+	add	%sp,-8,%o3	! p = sp-8
+	clr	%g1		! [g0,g1] = 0
+	add	%o1,-0x60,%sp	! Move sp out of the way,
+				! so that traps still work.
+				! Includes some extra words
+				! so we can be sloppy below.
+loop:
+	std	%g0,[%o3]	! *(long long *)p = 0
+	cmp	%o3,%o1
+	bgu	loop		! if (p > limit) goto loop
+	  add	%o3,-8,%o3	! p -= 8 (delay slot)
+	retl
+	  mov	%o2,%sp		! Restore sp., delay slot

+ 21 - 11
blitz.mod/bdwgc/specific.c

@@ -27,11 +27,13 @@ GC_INNER int GC_key_create_inner(tsd ** key_ptr)
 {
     int i;
     int ret;
-    tsd * result = (tsd *)MALLOC_CLEAR(sizeof(tsd));
+    tsd * result;
 
+    GC_ASSERT(I_HOLD_LOCK());
     /* A quick alignment check, since we need atomic stores */
     GC_ASSERT((word)(&invalid_tse.next) % sizeof(tse *) == 0);
-    if (0 == result) return ENOMEM;
+    result = (tsd *)MALLOC_CLEAR(sizeof(tsd));
+    if (NULL == result) return ENOMEM;
     ret = pthread_mutex_init(&result->lock, NULL);
     if (ret != 0) return ret;
     for (i = 0; i < TS_CACHE_SIZE; ++i) {
@@ -63,11 +65,13 @@ GC_INNER int GC_setspecific(tsd * key, void * value)
     /* Could easily check for an existing entry here.   */
     entry -> next = key->hash[hash_val].p;
     entry -> thread = self;
-    entry -> value = value;
+    entry -> value = TS_HIDE_VALUE(value);
     GC_ASSERT(entry -> qtid == INVALID_QTID);
     /* There can only be one writer at a time, but this needs to be     */
     /* atomic with respect to concurrent readers.                       */
     AO_store_release(&key->hash[hash_val].ao, (AO_t)entry);
+    GC_dirty((/* no volatile */ void *)entry);
+    GC_dirty(key->hash + hash_val);
     pthread_mutex_unlock(&(key -> lock));
     return 0;
 }
@@ -79,7 +83,7 @@ GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t)
 {
     unsigned hash_val = HASH(t);
     tse *entry;
-    tse **link = &key->hash[hash_val].p;
+    tse *prev = NULL;
 
 #   ifdef CAN_HANDLE_FORK
       /* Both GC_setspecific and GC_remove_specific should be called    */
@@ -88,16 +92,22 @@ GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t)
       GC_ASSERT(I_HOLD_LOCK());
 #   endif
     pthread_mutex_lock(&(key -> lock));
-    entry = *link;
-    while (entry != NULL && entry -> thread != t) {
-      link = &(entry -> next);
-      entry = *link;
+    entry = key->hash[hash_val].p;
+    while (entry != NULL && !THREAD_EQUAL(entry->thread, t)) {
+      prev = entry;
+      entry = entry->next;
     }
     /* Invalidate qtid field, since qtids may be reused, and a later    */
     /* cache lookup could otherwise find this entry.                    */
     if (entry != NULL) {
       entry -> qtid = INVALID_QTID;
-      *link = entry -> next;
+      if (NULL == prev) {
+        key->hash[hash_val].p = entry->next;
+        GC_dirty(key->hash + hash_val);
+      } else {
+        prev->next = entry->next;
+        GC_dirty(prev);
+      }
       /* Atomic! concurrent accesses still work.        */
       /* They must, since readers don't lock.           */
       /* We shouldn't need a volatile access here,      */
@@ -130,7 +140,7 @@ GC_INNER void * GC_slow_getspecific(tsd * key, word qtid,
     tse *entry = key->hash[hash_val].p;
 
     GC_ASSERT(qtid != INVALID_QTID);
-    while (entry != NULL && entry -> thread != self) {
+    while (entry != NULL && !THREAD_EQUAL(entry->thread, self)) {
       entry = entry -> next;
     }
     if (entry == NULL) return NULL;
@@ -143,7 +153,7 @@ GC_INNER void * GC_slow_getspecific(tsd * key, word qtid,
     *cache_ptr = entry;
         /* Again this is safe since pointer assignments are     */
         /* presumed atomic, and either pointer is valid.        */
-    return entry -> value;
+    return TS_REVEAL_PTR(entry -> value);
 }
 
 #ifdef GC_ASSERTIONS

+ 0 - 56
blitz.mod/bdwgc/stubborn.c

@@ -1,56 +0,0 @@
-/*
- * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-#include "private/gc_priv.h"
-
-#if defined(MANUAL_VDB)
-
-  /* Stubborn object (hard to change, nearly immutable) allocation.     */
-  /* This interface is deprecated.  We mostly emulate it using          */
-  /* MANUAL_VDB.  But that imposes the additional constraint that       */
-  /* written, but not yet GC_dirty()ed objects must be referenced       */
-  /* by a stack.                                                        */
-
-  void GC_dirty(ptr_t p);
-
-  GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_stubborn(size_t lb)
-  {
-    return(GC_malloc(lb));
-  }
-
-  GC_API void GC_CALL GC_end_stubborn_change(const void *p)
-  {
-    GC_dirty((ptr_t)p);
-  }
-
-  GC_API void GC_CALL GC_change_stubborn(const void *p GC_ATTR_UNUSED)
-  {
-  }
-
-#else /* !MANUAL_VDB */
-
-  GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_stubborn(size_t lb)
-  {
-    return(GC_malloc(lb));
-  }
-
-  GC_API void GC_CALL GC_end_stubborn_change(const void *p GC_ATTR_UNUSED)
-  {
-  }
-
-  GC_API void GC_CALL GC_change_stubborn(const void *p GC_ATTR_UNUSED)
-  {
-  }
-
-#endif /* !MANUAL_VDB */

+ 15 - 20
blitz.mod/bdwgc/thread_local_alloc.c

@@ -141,20 +141,15 @@ GC_INNER void GC_destroy_thread_local(GC_tlfs p)
 #   endif
 }
 
-#ifdef GC_ASSERTIONS
-  /* Defined in pthread_support.c or win32_threads.c. */
-  GC_bool GC_is_thread_tsd_valid(void *tsd);
-#endif
-
-GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
+GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int kind)
 {
     size_t granules;
     void *tsd;
     void *result;
 
 #   if MAXOBJKINDS > THREAD_FREELISTS_KINDS
-      if (EXPECT(knd >= THREAD_FREELISTS_KINDS, FALSE)) {
-        return GC_malloc_kind_global(bytes, knd);
+      if (EXPECT(kind >= THREAD_FREELISTS_KINDS, FALSE)) {
+        return GC_malloc_kind_global(bytes, kind);
       }
 #   endif
 #   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
@@ -164,31 +159,31 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
       if (EXPECT(0 == k, FALSE)) {
         /* We haven't yet run GC_init_parallel.  That means     */
         /* we also aren't locking, so this is fairly cheap.     */
-        return GC_malloc_kind_global(bytes, knd);
+        return GC_malloc_kind_global(bytes, kind);
       }
       tsd = GC_getspecific(k);
     }
 #   else
       if (!EXPECT(keys_initialized, TRUE))
-        return GC_malloc_kind_global(bytes, knd);
+        return GC_malloc_kind_global(bytes, kind);
       tsd = GC_getspecific(GC_thread_key);
 #   endif
 #   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
       if (EXPECT(0 == tsd, FALSE)) {
-        return GC_malloc_kind_global(bytes, knd);
+        return GC_malloc_kind_global(bytes, kind);
       }
 #   endif
     GC_ASSERT(GC_is_initialized);
     GC_ASSERT(GC_is_thread_tsd_valid(tsd));
     granules = ROUNDED_UP_GRANULES(bytes);
     GC_FAST_MALLOC_GRANS(result, granules,
-                         ((GC_tlfs)tsd) -> _freelists[knd], DIRECT_GRANULES,
-                         knd, GC_malloc_kind_global(bytes, knd),
-                         (void)(knd == PTRFREE ? NULL
+                         ((GC_tlfs)tsd) -> _freelists[kind], DIRECT_GRANULES,
+                         kind, GC_malloc_kind_global(bytes, kind),
+                         (void)(kind == PTRFREE ? NULL
                                                : (obj_link(result) = 0)));
 #   ifdef LOG_ALLOCS
       GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
-                    (unsigned long)bytes, knd, result,
+                    (unsigned long)bytes, kind, result,
                     (unsigned long)GC_gc_no);
 #   endif
     return result;
@@ -196,9 +191,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
 
 #ifdef GC_GCJ_SUPPORT
 
-# include "private/gc_atomic_ops.h" /* for AO_compiler_barrier() */
-
-# include "include/gc_gcj.h"
+# include "gc_gcj.h"
 
 /* Gcj-style allocation without locks is extremely tricky.  The         */
 /* fundamental issue is that we may end up marking a free list, which   */
@@ -273,13 +266,15 @@ GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p)
 
     for (j = 0; j < TINY_FREELISTS; ++j) {
       for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
-        q = (ptr_t)p->_freelists[i][j];
+        /* Load the pointer atomically as it might be updated   */
+        /* concurrently by GC_FAST_MALLOC_GRANS.                */
+        q = (ptr_t)AO_load((volatile AO_t *)&p->_freelists[i][j]);
         if ((word)q > HBLKSIZE)
           GC_set_fl_marks(q);
       }
 #     ifdef GC_GCJ_SUPPORT
         if (EXPECT(j > 0, TRUE)) {
-          q = (ptr_t)p->gcj_freelists[j];
+          q = (ptr_t)AO_load((volatile AO_t *)&p->gcj_freelists[j]);
           if ((word)q > HBLKSIZE)
             GC_set_fl_marks(q);
         }

+ 25 - 21
blitz.mod/bdwgc/typd_mlc.c

@@ -106,10 +106,6 @@ STATIC size_t GC_avail_descr = 0;       /* Next available slot.         */
 STATIC int GC_typed_mark_proc_index = 0; /* Indices of my mark          */
 STATIC int GC_array_mark_proc_index = 0; /* procedures.                 */
 
-#if defined(GC_FORCE_INCLUDE_ATOMIC_OPS) || defined(GC_BUILTIN_ATOMIC)
-# include "private/gc_atomic_ops.h"
-#endif
-
 #ifdef AO_HAVE_load_acquire
   STATIC volatile AO_t GC_explicit_typing_initialized = FALSE;
 #else
@@ -141,7 +137,7 @@ STATIC signed_word GC_add_ext_descriptor(const word * bm, word nbits)
         word ed_size = GC_ed_size;
 
         if (ed_size == 0) {
-            GC_ASSERT((word)&GC_ext_descriptors % sizeof(word) == 0);
+            GC_ASSERT((word)(&GC_ext_descriptors) % sizeof(word) == 0);
             GC_push_typed_structures = GC_push_typed_structures_proc;
             UNLOCK();
             new_size = ED_INITIAL_SIZE;
@@ -329,6 +325,9 @@ GC_make_sequence_descriptor(complex_descriptor *first,
         result -> sd_tag = SEQUENCE_TAG;
         result -> sd_first = first;
         result -> sd_second = second;
+        GC_dirty(result);
+        REACHABLE_AFTER_DIRTY(first);
+        REACHABLE_AFTER_DIRTY(second);
     }
     return((complex_descriptor *)result);
 }
@@ -427,15 +426,15 @@ STATIC word GC_descr_obj_size(complex_descriptor *d)
 STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
                                         mse *msp, mse *msl)
 {
-    register ptr_t current = (ptr_t) addr;
-    register word nelements;
-    register word sz;
-    register word i;
+    ptr_t current = (ptr_t)addr;
+    word nelements;
+    word sz;
+    word i;
 
     switch(d -> TAG) {
       case LEAF_TAG:
         {
-          register GC_descr descr = d -> ld.ld_descriptor;
+          GC_descr descr = d -> ld.ld_descriptor;
 
           nelements = d -> ld.ld_nelements;
           if (msl - msp <= (ptrdiff_t)nelements) return(0);
@@ -450,7 +449,7 @@ STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
         }
       case ARRAY_TAG:
         {
-          register complex_descriptor *descr = d -> ad.ad_element_descr;
+          complex_descriptor *descr = d -> ad.ad_element_descr;
 
           nelements = d -> ad.ad_nelements;
           sz = GC_descr_obj_size(descr);
@@ -606,6 +605,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_explicitly_typed(size_t lb,
     /* the former might be updated asynchronously.                      */
     lg = BYTES_TO_GRANULES(GC_size(op));
     op[GRANULES_TO_WORDS(lg) - 1] = d;
+    GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
+    REACHABLE_AFTER_DIRTY(d);
     return op;
 }
 
@@ -625,8 +626,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
     lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
     if (SMALL_OBJ(lb)) {
         GC_DBG_COLLECT_AT_MALLOC(lb);
-        lg = GC_size_map[lb];
         LOCK();
+        lg = GC_size_map[lb];
         op = GC_eobjfreelist[lg];
         if (EXPECT(0 == op, FALSE)) {
             UNLOCK();
@@ -640,15 +641,15 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
             GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
             UNLOCK();
         }
-        ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
-   } else {
-       op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
-       if (op != NULL) {
-         lg = BYTES_TO_GRANULES(GC_size(op));
-         ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
-       }
-   }
-   return op;
+    } else {
+        op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
+        if (NULL == op) return NULL;
+        lg = BYTES_TO_GRANULES(GC_size(op));
+    }
+    ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+    GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
+    REACHABLE_AFTER_DIRTY(d);
+    return op;
 }
 
 GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n,
@@ -701,6 +702,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n,
         size_t lw = GRANULES_TO_WORDS(lg);
 
         op[lw - 1] = (word)complex_descr;
+        GC_dirty(op + lw - 1);
+        REACHABLE_AFTER_DIRTY(complex_descr);
+
         /* Make sure the descriptor is cleared once there is any danger */
         /* it may have been collected.                                  */
         if (EXPECT(GC_general_register_disappearing_link(

+ 27 - 17
blitz.mod/bdwgc/win32_threads.c

@@ -79,7 +79,6 @@
 #if (defined(GC_DLL) || defined(GC_INSIDE_DLL)) \
         && !defined(GC_NO_THREADS_DISCOVERY) && !defined(MSWINCE) \
         && !defined(THREAD_LOCAL_ALLOC) && !defined(GC_PTHREADS)
-# include "private/gc_atomic_ops.h"
 
   /* This code operates in two distinct modes, depending on     */
   /* the setting of GC_win32_dll_threads.                       */
@@ -347,6 +346,7 @@ STATIC GC_thread GC_new_thread(DWORD id)
   if (!EXPECT(first_thread_used, TRUE)) {
     result = &first_thread;
     first_thread_used = TRUE;
+    GC_ASSERT(NULL == GC_threads[hv]);
   } else {
     GC_ASSERT(!GC_win32_dll_threads);
     result = (struct GC_Thread_Rep *)
@@ -360,6 +360,8 @@ STATIC GC_thread GC_new_thread(DWORD id)
     GC_ASSERT(result -> flags == 0);
 # endif
   GC_ASSERT(result -> thread_blocked_sp == NULL);
+  if (EXPECT(result != &first_thread, TRUE))
+    GC_dirty(result);
   return(result);
 }
 
@@ -390,7 +392,7 @@ STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
   /* The following should be a no-op according to the win32     */
   /* documentation.  There is empirical evidence that it        */
   /* isn't.             - HB                                    */
-# if defined(MPROTECT_VDB)
+# if defined(MPROTECT_VDB) && !defined(CYGWIN32)
     if (GC_incremental
 #       ifdef GWW_VDB
           && !GC_gww_dirty_init()
@@ -415,7 +417,7 @@ STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
       /* variants.                                                      */
                   /* cast away volatile qualifier */
       for (i = 0;
-           InterlockedExchange((void*)&dll_thread_table[i].tm.in_use, 1) != 0;
+           InterlockedExchange((word*)&dll_thread_table[i].tm.in_use, 1) != 0;
            i++) {
         /* Compare-and-swap would make this cleaner, but that's not     */
         /* supported before Windows 98 and NT 4.0.  In Windows 2000,    */
@@ -670,8 +672,8 @@ STATIC void GC_delete_gc_thread_no_free(GC_vthread t)
     DWORD id = ((GC_thread)t) -> id;
                 /* Cast away volatile qualifier, since we have lock.    */
     int hv = THREAD_TABLE_INDEX(id);
-    register GC_thread p = GC_threads[hv];
-    register GC_thread prev = 0;
+    GC_thread p = GC_threads[hv];
+    GC_thread prev = NULL;
 
     GC_ASSERT(I_HOLD_LOCK());
     while (p != (GC_thread)t) {
@@ -681,7 +683,9 @@ STATIC void GC_delete_gc_thread_no_free(GC_vthread t)
     if (prev == 0) {
       GC_threads[hv] = p -> tm.next;
     } else {
+      GC_ASSERT(prev != &first_thread);
       prev -> tm.next = p -> tm.next;
+      GC_dirty(prev);
     }
   }
 }
@@ -704,8 +708,8 @@ STATIC void GC_delete_thread(DWORD id)
     }
   } else {
     int hv = THREAD_TABLE_INDEX(id);
-    register GC_thread p = GC_threads[hv];
-    register GC_thread prev = 0;
+    GC_thread p = GC_threads[hv];
+    GC_thread prev = NULL;
 
     GC_ASSERT(I_HOLD_LOCK());
     while (p -> id != id) {
@@ -718,9 +722,11 @@ STATIC void GC_delete_thread(DWORD id)
     if (prev == 0) {
       GC_threads[hv] = p -> tm.next;
     } else {
+      GC_ASSERT(prev != &first_thread);
       prev -> tm.next = p -> tm.next;
+      GC_dirty(prev);
     }
-    if (p != &first_thread) {
+    if (EXPECT(p != &first_thread, TRUE)) {
       GC_INTERNAL_FREE(p);
     }
   }
@@ -2202,6 +2208,7 @@ GC_INNER void GC_get_next_stack(char *start, char *limit,
     /* This is probably pointless, since an uncaught exception is       */
     /* supposed to result in the process being killed.                  */
 #   ifndef __GNUC__
+      ret = NULL; /* to suppress "might be uninitialized" compiler warning */
       __try
 #   endif
     {
@@ -2259,6 +2266,8 @@ GC_INNER void GC_get_next_stack(char *start, char *limit,
       /* set up thread arguments */
       args -> start = lpStartAddress;
       args -> param = lpParameter;
+      GC_dirty(args);
+      REACHABLE_AFTER_DIRTY(lpParameter);
 
       set_need_to_lock();
       thread_h = CreateThread(lpThreadAttributes, dwStackSize, GC_win32_start,
@@ -2311,6 +2320,8 @@ GC_INNER void GC_get_next_stack(char *start, char *limit,
         /* set up thread arguments */
         args -> start = (LPTHREAD_START_ROUTINE)start_address;
         args -> param = arglist;
+        GC_dirty(args);
+        REACHABLE_AFTER_DIRTY(arglist);
 
         set_need_to_lock();
         thread_h = _beginthreadex(security, stack_size,
@@ -2535,7 +2546,9 @@ GC_INNER void GC_thr_init(void)
   GC_API int GC_pthread_join(pthread_t pthread_id, void **retval)
   {
     int result;
-    GC_thread t;
+#   ifndef GC_WIN32_PTHREADS
+      GC_thread t;
+#   endif
     DCL_LOCK_STATE;
 
     GC_ASSERT(!GC_win32_dll_threads);
@@ -2558,7 +2571,7 @@ GC_INNER void GC_thr_init(void)
     if (0 == result) {
 #     ifdef GC_WIN32_PTHREADS
         /* pthreads-win32 and winpthreads id are unique (not recycled). */
-        t = GC_lookup_pthread(pthread_id);
+        GC_thread t = GC_lookup_pthread(pthread_id);
         if (NULL == t) ABORT("Thread not registered");
 #     endif
 
@@ -2603,6 +2616,8 @@ GC_INNER void GC_thr_init(void)
 
       si -> start_routine = start_routine;
       si -> arg = arg;
+      GC_dirty(si);
+      REACHABLE_AFTER_DIRTY(arg);
       if (attr != 0 &&
           pthread_attr_getdetachstate(attr, &si->detached)
           == PTHREAD_CREATE_DETACHED) {
@@ -2650,6 +2665,7 @@ GC_INNER void GC_thr_init(void)
     /* we don't need to hold the allocation lock during pthread_create. */
     me = GC_register_my_thread_inner(sb, thread_id);
     SET_PTHREAD_MAP_CACHE(pthread_id, thread_id);
+    GC_ASSERT(me != &first_thread);
     me -> pthread_id = pthread_id;
     if (si->detached) me -> flags |= DETACHED;
     UNLOCK();
@@ -2662,6 +2678,7 @@ GC_INNER void GC_thr_init(void)
     pthread_cleanup_push(GC_thread_exit_proc, (void *)me);
     result = (*start)(start_arg);
     me -> status = result;
+    GC_dirty(me);
     pthread_cleanup_pop(1);
 
 #   ifdef DEBUG_THREADS
@@ -2760,7 +2777,6 @@ GC_INNER void GC_thr_init(void)
                          LPVOID reserved GC_ATTR_UNUSED)
   {
       DWORD thread_id;
-      static int entry_count = 0;
 
       /* Note that GC_use_threads_discovery should be called by the     */
       /* client application at start-up to activate automatic thread    */
@@ -2778,8 +2794,6 @@ GC_INNER void GC_thr_init(void)
             break;
           }
 #       endif
-        GC_ASSERT(entry_count == 0 || parallel_initialized);
-        ++entry_count;
         /* FALLTHRU */
        case DLL_PROCESS_ATTACH:
         /* This may run with the collector uninitialized. */
@@ -2898,10 +2912,6 @@ GC_INNER void GC_init_parallel(void)
   }
 
 # if defined(GC_ASSERTIONS)
-    void GC_check_tls_for(GC_tlfs p);
-#   if defined(USE_CUSTOM_SPECIFIC)
-      void GC_check_tsd_marks(tsd *key);
-#   endif
     /* Check that all thread-local free-lists are completely marked.    */
     /* also check that thread-specific-data structures are marked.      */
     void GC_check_tls(void)

+ 0 - 1
blitz.mod/blitz.bmx

@@ -132,7 +132,6 @@ Import "bdwgc/new_hblk.c"
 Import "bdwgc/dyn_load.c"
 Import "bdwgc/dbg_mlc.c"
 Import "bdwgc/malloc.c"
-Import "bdwgc/stubborn.c"
 Import "bdwgc/checksums.c"
 Import "bdwgc/pthread_start.c"
 Import "bdwgc/pthread_support.c"

+ 2 - 0
blitz.mod/blitz_gc.c

@@ -61,7 +61,9 @@ void bbGCStartup( void *spTop ){
 #endif
 */
 	GC_INIT();
+#ifndef __EMSCRIPTEN__
 	GC_allow_register_threads();
+#endif
 	GC_set_warn_proc( gc_warn_proc );
 }
 

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor