blitz_gc.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. #include "blitz.h"
  2. #ifdef __APPLE__
  3. #include <mach-o/getsect.h>
  4. #endif
  5. #ifdef _WIN32
  6. #ifdef __x86_64__
  7. extern void *__bss_end__;
  8. extern void *__data_start__;
  9. #else
  10. extern void *_bss_end__;
  11. extern void *_data_start__;
  12. #endif
  13. #endif
  14. #ifdef __linux__
  15. #ifdef __ANDROID__
  16. extern int __data_start[];
  17. extern int _end[];
  18. #else
  19. extern void *__data_start;
  20. extern void *_end;
  21. #endif
  22. #endif
  23. #ifdef BBCC_ALLOCCOUNT
  24. BBUInt64 bbGCAllocCount = 0;
  25. #endif
  26. static bb_mutex_t * bbReleaseRetainGuard = 0;
  27. static void gc_finalizer( void *mem,void *pool ){
  28. ((BBGCPool*)pool)->free( (BBGCMem*)mem );
  29. }
  30. static void gc_warn_proc( char *msg,GC_word arg ){
  31. /*printf(msg,arg);fflush(stdout);*/
  32. }
  33. static bb_mutex_t *bb_create_mutex(){
  34. bb_mutex_t *mutex=malloc( sizeof(bb_mutex_t) );
  35. if( bb_mutex_init( mutex ) ) return mutex;
  36. free( mutex );
  37. return 0;
  38. }
  39. void bbGCStartup( void *spTop ){
  40. /* GC_set_no_dls(1);
  41. GC_clear_roots();
  42. #ifdef _WIN32
  43. #ifdef __x86_64__
  44. GC_add_roots(&__data_start__, &__bss_end__);
  45. #else
  46. GC_add_roots(&_data_start__, &_bss_end__);
  47. #endif
  48. #endif
  49. #ifdef __APPLE__
  50. #ifndef __LP64__
  51. struct segment_command * seg;
  52. #else
  53. struct segment_command_64 * seg;
  54. #endif
  55. seg = getsegbyname( "__DATA" );
  56. // GC_add_roots((void*)seg->vmaddr, (void*)(seg->vmaddr + seg->vmsize));
  57. #endif
  58. #ifdef __linux__
  59. GC_add_roots(&__data_start, &_end);
  60. #endif
  61. */
  62. GC_INIT();
  63. #if !defined(__EMSCRIPTEN__) && !defined(__SWITCH__)
  64. #ifdef GC_THREADS
  65. GC_allow_register_threads();
  66. #endif
  67. #endif
  68. GC_set_warn_proc( gc_warn_proc );
  69. bbReleaseRetainGuard = bb_create_mutex();
  70. }
  71. BBGCMem *bbGCAlloc( unsigned int sz,BBGCPool *pool ){
  72. GC_finalization_proc ofn;
  73. void *ocd;
  74. BBGCMem *q=(BBGCMem*) GC_MALLOC( sz );
  75. #ifdef BBCC_ALLOCCOUNT
  76. ++bbGCAllocCount;
  77. #endif
  78. q->pool=pool;
  79. //q->refs=-1;
  80. GC_REGISTER_FINALIZER_NO_ORDER( q,gc_finalizer,pool,&ofn,&ocd );
  81. return q;
  82. }
  83. BBObject * bbGCAllocObject( unsigned int sz,BBClass *clas,int flags ){
  84. BBObject *q;
  85. if( flags & BBGC_ATOMIC ){
  86. q=(BBObject*)GC_MALLOC_ATOMIC( sz );
  87. }else{
  88. q=(BBObject*)GC_MALLOC( sz );
  89. }
  90. #ifdef BBCC_ALLOCCOUNT
  91. ++bbGCAllocCount;
  92. #endif
  93. q->clas=clas;
  94. if (bbCountInstances) {
  95. bbAtomicAdd(&clas->instance_count, 1);
  96. }
  97. if( (flags & BBGC_FINALIZE) || bbCountInstances ){
  98. GC_finalization_proc ofn;
  99. void *ocd;
  100. GC_REGISTER_FINALIZER_NO_ORDER( q,gc_finalizer,clas,&ofn,&ocd );
  101. }
  102. return q;
  103. }
  104. void bbGCFree( BBGCMem *q ){
  105. }
  106. int bbGCValidate( void *q ){
  107. if (GC_is_heap_ptr( q )) {
  108. BBClass * clas = ((BBObject*)q)->clas;
  109. int count;
  110. BBClass ** classes = bbObjectRegisteredTypes(&count);
  111. while (count--) {
  112. if (classes[count] == clas) {
  113. return 1;
  114. }
  115. }
  116. // maybe an array?
  117. if (clas == &bbArrayClass) {
  118. return 1;
  119. }
  120. }
  121. return 0;
  122. }
  123. size_t bbGCCollect(){
  124. GC_gcollect();
  125. return GC_get_expl_freed_bytes_since_gc();
  126. }
  127. int bbGCCollectALittle() {
  128. return GC_collect_a_little();
  129. }
  130. void bbGCSetMode( int mode ){
  131. if (mode == 1) {
  132. GC_set_disable_automatic_collection(0);
  133. } else if (mode == 2) {
  134. GC_set_disable_automatic_collection(1);
  135. }
  136. }
  137. int bbGCGetMode(){
  138. if (GC_get_disable_automatic_collection()) {
  139. return 2;
  140. } else {
  141. return 1;
  142. }
  143. }
  144. void bbGCSetDebug( int debug ){
  145. }
  146. void bbGCSuspend(){
  147. GC_disable();
  148. }
  149. void bbGCResume(){
  150. GC_enable();
  151. }
  152. size_t bbGCMemAlloced(){
  153. return GC_get_heap_size();
  154. }
  155. static struct avl_root *retain_root = 0;
  156. #define generic_compare(x, y) (((x) > (y)) - ((x) < (y)))
  157. int node_compare(const void *x, const void *y) {
  158. struct retain_node * node_x = (struct retain_node *)x;
  159. struct retain_node * node_y = (struct retain_node *)y;
  160. return generic_compare(node_x->obj, node_y->obj);
  161. }
  162. void bbGCRetain( BBObject *p ) {
  163. struct retain_node * node = (struct retain_node *)GC_malloc_uncollectable(sizeof(struct retain_node));
  164. node->count = 1;
  165. node->obj = p;
  166. #ifdef BBCC_ALLOCCOUNT
  167. ++bbGCAllocCount;
  168. #endif
  169. bb_mutex_lock(bbReleaseRetainGuard);
  170. struct retain_node * old_node = (struct retain_node *)avl_map(&node->link, node_compare, &retain_root);
  171. if (&node->link != &old_node->link) {
  172. // this object already exists here... increment our reference count
  173. old_node->count++;
  174. // unlock before free, to prevent deadlocks from finalizers.
  175. bb_mutex_unlock(bbReleaseRetainGuard);
  176. // delete the new node, since we don't need it
  177. GC_FREE(node);
  178. return;
  179. }
  180. bb_mutex_unlock(bbReleaseRetainGuard);
  181. }
  182. void bbGCRelease( BBObject *p ) {
  183. // create something to look up
  184. struct retain_node node;
  185. node.obj = p;
  186. bb_mutex_lock(bbReleaseRetainGuard);
  187. struct retain_node * found = (struct retain_node *)tree_search((struct tree_root_np *)&node, node_compare, (struct tree_root_np *)retain_root);
  188. if (found) {
  189. // found a retained object!
  190. found->count--;
  191. if (found->count <=0) {
  192. // remove from the tree
  193. avl_del(&found->link, &retain_root);
  194. // free the node
  195. found->obj = 0;
  196. // unlock before free, to prevent deadlocks from finalizers.
  197. bb_mutex_unlock(bbReleaseRetainGuard);
  198. GC_FREE(found);
  199. return;
  200. }
  201. }
  202. bb_mutex_unlock(bbReleaseRetainGuard);
  203. }
  204. int bbGCThreadIsRegistered() {
  205. #ifdef GC_THREADS
  206. return GC_thread_is_registered();
  207. #else
  208. return 0;
  209. #endif
  210. }
  211. int bbGCRegisterMyThread() {
  212. #ifdef GC_THREADS
  213. struct GC_stack_base stackBase;
  214. GC_get_stack_base(&stackBase);
  215. return GC_register_my_thread(&stackBase);
  216. #else
  217. return -1;
  218. #endif
  219. }
  220. int bbGCUnregisterMyThread() {
  221. #ifdef GC_THREADS
  222. return GC_unregister_my_thread();
  223. #else
  224. return -1;
  225. #endif
  226. }
  227. void bbGCGetStats(struct GC_prof_stats_s * stats) {
  228. GC_get_prof_stats(stats, sizeof(struct GC_prof_stats_s));
  229. }