sf_malloc.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /* $Id$
  2. *
  3. * shared memory, multi-process safe, pool based version of f_malloc
  4. *
  5. * Copyright (C) 2007 iptelorg GmbH
  6. *
  7. * Permission to use, copy, modify, and distribute this software for any
  8. * purpose with or without fee is hereby granted, provided that the above
  9. * copyright notice and this permission notice appear in all copies.
  10. *
  11. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  12. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  13. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  14. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  15. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /*
  20. * History:
  21. * --------
  22. * created by andrei
  23. * 2003-07-06 added fm_realloc (andrei)
  24. * 2004-07-19 fragments book keeping code and support for 64 bits
  25. * memory blocks (64 bits machine & size >=2^32)
  26. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  27. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  28. * 2005-03-02 added fm_info() (andrei)
  29. * 2005-12-12 fixed realloc shrink real_used accounting (andrei)
  30. * fixed initial size (andrei)
  31. * 2006-02-03 fixed realloc out of mem. free bug (andrei)
  32. * 2006-04-07 s/DBG/MDBG (andrei)
  33. * 2007-02-23 added fm_available() (andrei)
  34. * 2007-06-09 forked from the fm_maloc code (andrei)
  35. */
  36. #ifdef SF_MALLOC
  37. #include <string.h>
  38. #include <stdlib.h>
  39. #include "sf_malloc.h"
  40. #include "../dprint.h"
  41. #include "../globals.h"
  42. #include "memdbg.h"
  43. #include "../cfg/cfg.h" /* memlog */
  44. #define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
  45. #define MIN_POOL_FRAGS 10 /* min fragments per pool hash bucket */
  46. /*useful macros*/
  47. #define FRAG_NEXT(f) \
  48. ((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
  49. /* SF_ROUNDTO= 2^k so the following works */
  50. #define ROUNDTO_MASK (~((unsigned long)SF_ROUNDTO-1))
  51. #define ROUNDUP(s) (((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
  52. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  53. #define FRAG_OVERHEAD (sizeof(struct sfm_frag))
  54. #define INIT_OVERHEAD \
  55. (ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
  56. /* finds hash if s <=SF_MALLOC_OPTIMIZE */
  57. #define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
  58. /* finds hash if s > SF_MALLOC_OPTIMIZE */
  59. #define GET_BIG_HASH(s) \
  60. (SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
  61. /* finds the hash value for s, s=SF_ROUNDTO multiple*/
  62. #define GET_HASH(s) ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
  63. GET_SMALL_HASH(s): GET_BIG_HASH(s) )
  64. #define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
  65. #define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
  66. SF_MALLOC_OPTIMIZE_FACTOR-1))
  67. #define UN_HASH(h) ( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
  68. UN_HASH_SMALL(h): UN_HASH_BIG(h) )
  69. #define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
  70. #define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
  71. /* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
  72. #define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
  73. #define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
  74. #define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
  75. /* mark/test used/unused frags */
  76. #define FRAG_MARK_USED(f)
  77. #define FRAG_CLEAR_USED(f)
  78. #define FRAG_WAS_USED(f) (1)
  79. /* other frag related defines:
  80. * MEM_COALESCE_FRAGS
  81. * MEM_FRAG_AVOIDANCE
  82. */
  83. #define MEM_FRAG_AVOIDANCE
  84. #define SFM_REALLOC_REMALLOC
  85. /* computes hash number for big buckets*/
  86. inline static unsigned long big_hash_idx(unsigned long s)
  87. {
  88. unsigned long idx;
  89. /* s is rounded => s = k*2^n (SF_ROUNDTO=2^n)
  90. * index= i such that 2^i > s >= 2^(i-1)
  91. *
  92. * => index = number of the first non null bit in s*/
  93. idx=sizeof(long)*8-1;
  94. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  95. return idx;
  96. }
  97. #ifdef DBG_F_MALLOC
  98. #define ST_CHECK_PATTERN 0xf0f0f0f0
  99. #define END_CHECK_PATTERN1 0xc0c0c0c0
  100. #define END_CHECK_PATTERN2 0xabcdefed
  101. #endif
  102. #ifdef SFM_ONE_LOCK
  103. #define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
  104. #define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
  105. #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
  106. #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
  107. #warn "degraded performance, only one lock"
  108. #elif defined SFM_LOCK_PER_BUCKET
  109. #define SFM_MAIN_HASH_LOCK(qm, hash) \
  110. lock_get(&(qm)->free_hash[(hash)].lock)
  111. #define SFM_MAIN_HASH_UNLOCK(qm, hash) \
  112. lock_release(&(qm)->free_hash[(hash)].lock)
  113. #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
  114. #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
  115. #else
  116. #error no locks defined
  117. #endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
  118. #define SFM_BIG_GET_AND_SPLIT_LOCK(qm) lock_get(&(qm)->get_and_split)
  119. #define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
  120. static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
  121. searching further) */
  122. static unsigned long pool_id=(unsigned long)-1;
  123. /* call for each child */
  124. int sfm_pool_reset()
  125. {
  126. pool_id=(unsigned long)-1;
  127. return 0;
  128. }
  129. #define sfm_fix_pool_id(qm) \
  130. do{ \
  131. if (unlikely(pool_id>=SFM_POOLS_NO)) \
  132. pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
  133. }while(0)
  134. static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
  135. {
  136. frag->u.nxt_free=*head;
  137. *head=frag;
  138. }
  139. static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
  140. {
  141. struct sfm_frag* frag;
  142. frag=*head;
  143. *head=frag->u.nxt_free;
  144. return frag;
  145. }
  146. static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
  147. struct sfm_frag* frag)
  148. {
  149. unsigned long hash_bit;
  150. SFM_POOL_LOCK(pool, hash);
  151. frag_push(&pool->pool_hash[hash].first, frag);
  152. pool->pool_hash[hash].no++;
  153. /* set it only if not already set (avoids an expensive
  154. * cache trashing atomic write op) */
  155. hash_bit=HASH_TO_BITMAP(hash);
  156. if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
  157. atomic_or_long((long*)&pool->bitmap, hash_bit);
  158. SFM_POOL_UNLOCK(pool, hash);
  159. }
  160. /* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
  161. static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
  162. int hash, int split)
  163. {
  164. /* TODO: come up with something better
  165. * if fragment is some split/rest from an allocation, that is
  166. * >= requested size, accept it, else
  167. * look at misses and current fragments and decide based on them */
  168. return (p_id<SFM_POOLS_NO) && (split ||
  169. ( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
  170. ((qm->pool[p_id].pool_hash[hash].misses >
  171. qm->pool[p_id].pool_hash[hash].no) &&
  172. (qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
  173. }
  174. /* choose on which pool to add a free'd packet
  175. * return - pool idx or -1 if it should be added to main*/
  176. static inline unsigned long sfm_choose_pool(struct sfm_block* qm,
  177. struct sfm_frag* frag,
  178. int hash, int split)
  179. {
  180. /* check original pool first */
  181. if (sfm_check_pool(qm, frag->id, hash, split))
  182. return frag->id;
  183. else{
  184. /* check if our pool is properly set */
  185. sfm_fix_pool_id(qm);
  186. /* check if my pool needs some frags */
  187. if ((pool_id!=frag->id) && (sfm_check_pool(qm, pool_id, hash, 0))){
  188. frag->id=pool_id;
  189. return pool_id;
  190. }
  191. }
  192. /* else add it back to main */
  193. frag->id=(unsigned long)(-1);
  194. return frag->id;
  195. }
  196. static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
  197. int split)
  198. {
  199. struct sfm_frag** f;
  200. unsigned long p_id;
  201. int hash;
  202. unsigned long hash_bit;
  203. if (likely(frag->size<=SF_POOL_MAX_SIZE)){
  204. hash=GET_SMALL_HASH(frag->size);
  205. if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
  206. (unsigned long)-1)){
  207. /* add it back to the "main" hash */
  208. SFM_MAIN_HASH_LOCK(qm, hash);
  209. frag->id=(unsigned long)(-1); /* main hash marker */
  210. /*insert it here*/
  211. frag_push(&(qm->free_hash[hash].first), frag);
  212. qm->free_hash[hash].no++;
  213. /* set it only if not already set (avoids an expensive
  214. * cache trashing atomic write op) */
  215. hash_bit=HASH_TO_BITMAP(hash);
  216. if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
  217. atomic_or_long((long*)&qm->bitmap, hash_bit);
  218. SFM_MAIN_HASH_UNLOCK(qm, hash);
  219. }else{
  220. /* add it to one of the pools pool */
  221. sfm_pool_insert(&qm->pool[p_id], hash, frag);
  222. }
  223. }else{
  224. hash=GET_BIG_HASH(frag->size);
  225. SFM_MAIN_HASH_LOCK(qm, hash);
  226. f=&(qm->free_hash[hash].first);
  227. for(; *f; f=&((*f)->u.nxt_free))
  228. if (frag->size <= (*f)->size) break;
  229. frag->id=(unsigned long)(-1); /* main hash marker */
  230. /*insert it here*/
  231. frag->u.nxt_free=*f;
  232. *f=frag;
  233. qm->free_hash[hash].no++;
  234. /* inc. big hash free size ? */
  235. SFM_MAIN_HASH_UNLOCK(qm, hash);
  236. }
  237. }
  238. /* size should be already rounded-up */
  239. static inline
  240. #ifdef DBG_F_MALLOC
  241. void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
  242. unsigned long size,
  243. const char* file, const char* func, unsigned int line)
  244. #else
  245. void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
  246. unsigned long size)
  247. #endif
  248. {
  249. unsigned long rest;
  250. struct sfm_frag* n;
  251. int bigger_rest;
  252. rest=frag->size-size;
  253. #ifdef MEM_FRAG_AVOIDANCE
  254. if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
  255. (rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
  256. bigger_rest=1;
  257. #else
  258. if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
  259. bigger_rest=rest>=(size+FRAG_OVERHEAD);
  260. #endif
  261. frag->size=size;
  262. /*split the fragment*/
  263. n=FRAG_NEXT(frag);
  264. n->size=rest-FRAG_OVERHEAD;
  265. n->id=pool_id;
  266. FRAG_CLEAR_USED(n); /* never used */
  267. #ifdef DBG_F_MALLOC
  268. /* frag created by malloc, mark it*/
  269. n->file=file;
  270. n->func="frag. from sfm_malloc";
  271. n->line=line;
  272. n->check=ST_CHECK_PATTERN;
  273. #endif
  274. /* reinsert n in free list*/
  275. sfm_insert_free(qm, n, bigger_rest);
  276. }else{
  277. /* we cannot split this fragment any more => alloc all of it*/
  278. }
  279. }
  280. /* init malloc and return a sfm_block*/
  281. struct sfm_block* sfm_malloc_init(char* address, unsigned long size)
  282. {
  283. char* start;
  284. char* end;
  285. struct sfm_block* qm;
  286. unsigned long init_overhead;
  287. int r;
  288. #ifdef SFM_LOCK_PER_BUCKET
  289. int i;
  290. #endif
  291. /* make address and size multiple of 8*/
  292. start=(char*)ROUNDUP((unsigned long) address);
  293. DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
  294. SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
  295. DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
  296. SF_HASH_SIZE, (long)sizeof(struct sfm_block));
  297. DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
  298. if (size<start-address) return 0;
  299. size-=(start-address);
  300. if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  301. size=ROUNDDOWN(size);
  302. init_overhead=INIT_OVERHEAD;
  303. if (size < init_overhead)
  304. {
  305. /* not enough mem to create our control structures !!!*/
  306. return 0;
  307. }
  308. end=start+size;
  309. qm=(struct sfm_block*)start;
  310. memset(qm, 0, sizeof(struct sfm_block));
  311. qm->size=size;
  312. size-=init_overhead;
  313. qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
  314. qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
  315. /* init initial fragment*/
  316. qm->first_frag->size=size;
  317. qm->first_frag->id=(unsigned long)-1; /* not in a pool */
  318. qm->last_frag->size=0;
  319. #ifdef DBG_F_MALLOC
  320. qm->first_frag->check=ST_CHECK_PATTERN;
  321. qm->last_frag->check=END_CHECK_PATTERN1;
  322. #endif
  323. /* link initial fragment into the free list*/
  324. sfm_insert_free(qm, qm->first_frag, 0);
  325. sfm_max_hash=GET_HASH(size);
  326. /* init locks */
  327. if (lock_init(&qm->get_and_split)==0)
  328. goto error;
  329. #ifdef SFM_ONE_LOCK
  330. if (lock_init(&qm->lock)==0){
  331. lock_destroy(&qm->get_and_split);
  332. goto error;
  333. }
  334. for (r=0; r<SFM_POOLS_NO; r++){
  335. if (lock_init(&qm->pool[r].lock)==0){
  336. for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
  337. lock_destroy(&qm->lock);
  338. lock_destroy(&qm->get_and_split);
  339. goto error;
  340. }
  341. }
  342. #elif defined(SFM_LOCK_PER_BUCKET)
  343. for (r=0; r<SF_HASH_SIZE; r++)
  344. if (lock_init(&qm->free_hash[r].lock)==0){
  345. for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
  346. lock_destroy(&qm->get_and_split);
  347. goto error;
  348. }
  349. for (i=0; i<SFM_POOLS_NO; i++){
  350. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  351. if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
  352. for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
  353. for(; i>0; i--){
  354. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  355. lock_destroy(&qm->pool[i].pool_hash[r].lock);
  356. }
  357. for (r=0; r<SF_HASH_SIZE; r++)
  358. lock_destroy(&qm->free_hash[r].lock);
  359. lock_destroy(&qm->get_and_split);
  360. goto error;
  361. }
  362. }
  363. #endif
  364. qm->is_init=1;
  365. return qm;
  366. error:
  367. return 0;
  368. }
  369. /* cleanup */
  370. void sfm_malloc_destroy(struct sfm_block* qm)
  371. {
  372. int r, i;
  373. /* destroy all the locks */
  374. if (!qm || !qm->is_init)
  375. return; /* nothing to do */
  376. lock_destroy(&qm->get_and_split);
  377. #ifdef SFM_ONE_LOCK
  378. lock_destroy(&qm->lock);
  379. for (r=0; r<SFM_POOLS_NO; r++){
  380. lock_destroy(&qm->pool[r].lock);
  381. }
  382. #elif defined(SFM_LOCK_PER_BUCKET)
  383. for (r=0; r<SF_HASH_SIZE; r++)
  384. lock_destroy(&qm->free_hash[r].lock);
  385. for (i=0; i<SFM_POOLS_NO; i++){
  386. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  387. lock_destroy(&qm->pool[i].pool_hash[r].lock);
  388. }
  389. #endif
  390. qm->is_init=0;
  391. }
  392. /* returns next set bit in bitmap, starts at b
  393. * if b is set, returns b
  394. * if not found returns BITMAP_BITS */
  395. static inline unsigned long _next_set_bit(unsigned long b,
  396. unsigned long* bitmap)
  397. {
  398. for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
  399. return b;
  400. }
  401. /* returns start of block b and sets *end
  402. * (handles also the "rest" block at the end ) */
  403. static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
  404. {
  405. unsigned long s;
  406. if ((unlikely(b>=BITMAP_BITS))){
  407. s=BIT_TO_HASH(BITMAP_BITS);
  408. *end=SF_HASH_POOL_SIZE; /* last, possible rest block */
  409. }else{
  410. s=BIT_TO_HASH(b);
  411. *end=s+BITMAP_BLOCK_SIZE;
  412. }
  413. return s;
  414. }
  415. #ifdef DBG_F_MALLOC
  416. static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
  417. struct sfm_pool* pool, int hash, unisgned long size,
  418. const char* file, const char* func, unsigned int line)
  419. #else
  420. static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
  421. struct sfm_pool* pool,
  422. int hash, unsigned long size)
  423. #endif
  424. {
  425. int r;
  426. int next_block;
  427. struct sfm_frag* volatile* f;
  428. struct sfm_frag* frag;
  429. unsigned long b;
  430. unsigned long eob;
  431. /* special case for r=hash */
  432. r=hash;
  433. f=&pool->pool_hash[r].first;
  434. if (*f==0)
  435. goto not_found;
  436. SFM_POOL_LOCK(pool, r);
  437. if (unlikely(*f==0)){
  438. SFM_POOL_UNLOCK(pool, r);
  439. goto not_found;
  440. }
  441. found:
  442. /* detach it from the free list*/
  443. frag=frag_pop((struct sfm_frag**)f);
  444. frag->u.nxt_free=0; /* mark it as 'taken' */
  445. frag->id=pool_id;
  446. pool->pool_hash[r].no--;
  447. SFM_POOL_UNLOCK(pool, r);
  448. #ifdef DBG_F_MALLOC
  449. sfm_split_frag(qm, frag, size, file, func, line);
  450. #else
  451. sfm_split_frag(qm, frag, size);
  452. #endif
  453. if (&qm->pool[pool_id]==pool)
  454. atomic_inc_long((long*)&pool->hits);
  455. return frag;
  456. not_found:
  457. atomic_inc_long((long*)&pool->pool_hash[r].misses);
  458. r++;
  459. b=HASH_BIT_POS(r);
  460. while(r<SF_HASH_POOL_SIZE){
  461. b=_next_set_bit(b, &pool->bitmap);
  462. next_block=_hash_range(b, &eob);
  463. r=(r<next_block)?next_block:r;
  464. for (; r<eob; r++){
  465. f=&pool->pool_hash[r].first;
  466. if (*f){
  467. SFM_POOL_LOCK(pool, r);
  468. if (unlikely(*f==0)){
  469. /* not found */
  470. SFM_POOL_UNLOCK(pool, r);
  471. }else
  472. goto found;
  473. }
  474. atomic_inc_long((long*)&pool->pool_hash[r].misses);
  475. }
  476. b++;
  477. }
  478. #if 0 /* EXPENSIVE BUG CHECK */
  479. for (r=hash; r<SF_HASH_POOL_SIZE; r++){
  480. f=&pool->pool_hash[r].first;
  481. if (*f){
  482. SFM_POOL_LOCK(pool, r);
  483. if (unlikely(*f==0)){
  484. /* not found */
  485. SFM_POOL_UNLOCK(pool, r);
  486. }else{
  487. b=_next_set_bit(HASH_BIT_POS(r), &pool->bitmap);
  488. next_block=_hash_range(b, &eob);
  489. BUG("pool_get_frag: found fragm. %d at %d (bit %ld range %ld-%ld), next set bit=%ld"
  490. " bitmap %ld (%p)\n", hash, r, HASH_BIT_POS(r),
  491. next_block, eob, b, pool->bitmap, &pool->bitmap);
  492. goto found;
  493. }
  494. }
  495. }
  496. #endif
  497. atomic_inc_long((long*)&pool->missed);
  498. return 0;
  499. }
  500. #ifdef DBG_F_MALLOC
  501. static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
  502. unsigned long size,
  503. const char* file, const char* func, unsigned int line)
  504. #else
  505. static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
  506. unsigned long size)
  507. #endif
  508. {
  509. int r;
  510. int next_block;
  511. struct sfm_frag* volatile* f;
  512. struct sfm_frag* frag;
  513. unsigned long b;
  514. unsigned long eob;
  515. r=hash;
  516. b=HASH_BIT_POS(r);
  517. while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
  518. b=_next_set_bit(b, &qm->bitmap);
  519. next_block=_hash_range(b, &eob);
  520. r=(r<next_block)?next_block:r;
  521. for (; r<eob; r++){
  522. f=&qm->free_hash[r].first;
  523. if (*f){
  524. SFM_MAIN_HASH_LOCK(qm, r);
  525. if (unlikely(*f==0)){
  526. /* not found, somebody stole it */
  527. SFM_MAIN_HASH_UNLOCK(qm, r);
  528. continue;
  529. }
  530. /* detach it from the free list*/
  531. frag=frag_pop((struct sfm_frag**)f);
  532. frag->u.nxt_free=0; /* mark it as 'taken' */
  533. qm->free_hash[r].no--;
  534. SFM_MAIN_HASH_UNLOCK(qm, r);
  535. frag->id=pool_id;
  536. #ifdef DBG_F_MALLOC
  537. sfm_split_frag(qm, frag, size, file, func, line);
  538. #else
  539. sfm_split_frag(qm, frag, size);
  540. #endif
  541. return frag;
  542. }
  543. }
  544. b++;
  545. }
  546. /* big fragments */
  547. SFM_BIG_GET_AND_SPLIT_LOCK(qm);
  548. for (; r<= sfm_max_hash ; r++){
  549. f=&qm->free_hash[r].first;
  550. if (*f){
  551. SFM_MAIN_HASH_LOCK(qm, r);
  552. if (unlikely((*f)==0)){
  553. /* not found */
  554. SFM_MAIN_HASH_UNLOCK(qm, r);
  555. continue;
  556. }
  557. for(;(*f); f=&((*f)->u.nxt_free))
  558. if ((*f)->size>=size){
  559. /* found, detach it from the free list*/
  560. frag=*f;
  561. *f=frag->u.nxt_free;
  562. frag->u.nxt_free=0; /* mark it as 'taken' */
  563. qm->free_hash[r].no--;
  564. SFM_MAIN_HASH_UNLOCK(qm, r);
  565. frag->id=pool_id;
  566. #ifdef DBG_F_MALLOC
  567. sfm_split_frag(qm, frag, size, file, func, line);
  568. #else
  569. sfm_split_frag(qm, frag, size);
  570. #endif
  571. SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
  572. return frag;
  573. };
  574. SFM_MAIN_HASH_UNLOCK(qm, r);
  575. /* try in a bigger bucket */
  576. }
  577. }
  578. SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
  579. return 0;
  580. }
  581. #ifdef DBG_F_MALLOC
  582. void* sfm_malloc(struct sfm_block* qm, unsigned long size,
  583. const char* file, const char* func, unsigned int line)
  584. #else
  585. void* sfm_malloc(struct sfm_block* qm, unsigned long size)
  586. #endif
  587. {
  588. struct sfm_frag* frag;
  589. int hash;
  590. unsigned int i;
  591. #ifdef DBG_F_MALLOC
  592. MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
  593. line);
  594. #endif
  595. /*size must be a multiple of 8*/
  596. size=ROUNDUP(size);
  597. /* if (size>(qm->size-qm->real_used)) return 0; */
  598. /* check if our pool id is set */
  599. sfm_fix_pool_id(qm);
  600. /*search for a suitable free frag*/
  601. if (likely(size<=SF_POOL_MAX_SIZE)){
  602. hash=GET_SMALL_HASH(size);
  603. /* try first in our pool */
  604. #ifdef DBG_F_MALLOC
  605. if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
  606. file, func, line))!=0))
  607. goto found;
  608. /* try in the "main" free hash, go through all the hash */
  609. if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
  610. goto found;
  611. /* really low mem , try in other pools */
  612. for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
  613. if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
  614. file, func, line))!=0)
  615. goto found;
  616. }
  617. #else
  618. if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
  619. !=0 ))
  620. goto found;
  621. /* try in the "main" free hash, go through all the hash */
  622. if (likely((frag=main_get_frag(qm, hash, size))!=0))
  623. goto found;
  624. /* really low mem , try in other pools */
  625. for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
  626. if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
  627. !=0 )
  628. goto found;
  629. }
  630. #endif
  631. /* not found, bad! */
  632. return 0;
  633. }else{
  634. hash=GET_BIG_HASH(size);
  635. #ifdef DBG_F_MALLOC
  636. if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
  637. return 0; /* not found, bad! */
  638. #else
  639. if ((frag=main_get_frag(qm, hash, size))==0)
  640. return 0; /* not found, bad! */
  641. #endif
  642. }
  643. found:
  644. /* we found it!*/
  645. #ifdef DBG_F_MALLOC
  646. frag->file=file;
  647. frag->func=func;
  648. frag->line=line;
  649. frag->check=ST_CHECK_PATTERN;
  650. MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
  651. (char*)frag+sizeof(struct sfm_frag));
  652. #endif
  653. FRAG_MARK_USED(frag); /* mark it as used */
  654. return (char*)frag+sizeof(struct sfm_frag);
  655. }
  656. #ifdef DBG_F_MALLOC
  657. void sfm_free(struct sfm_block* qm, void* p, const char* file,
  658. const char* func, unsigned int line)
  659. #else
  660. void sfm_free(struct sfm_block* qm, void* p)
  661. #endif
  662. {
  663. struct sfm_frag* f;
  664. #ifdef DBG_F_MALLOC
  665. MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
  666. line);
  667. if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
  668. LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
  669. "aborting\n", p);
  670. abort();
  671. }
  672. #endif
  673. if (unlikely(p==0)) {
  674. LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
  675. return;
  676. }
  677. f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
  678. #ifdef DBG_F_MALLOC
  679. MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
  680. f->file, f->func, f->line);
  681. #endif
  682. #ifdef DBG_F_MALLOC
  683. f->file=file;
  684. f->func=func;
  685. f->line=line;
  686. #endif
  687. sfm_insert_free(qm, f, 0);
  688. }
  689. #ifdef DBG_F_MALLOC
  690. void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
  691. const char* file, const char* func, unsigned int line)
  692. #else
  693. void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
  694. #endif
  695. {
  696. struct sfm_frag *f;
  697. unsigned long orig_size;
  698. void *ptr;
  699. #ifndef SFM_REALLOC_REMALLOC
  700. struct sfm_frag *n;
  701. struct sfm_frag **pf;
  702. unsigned long diff;
  703. unsigned long p_id;
  704. int hash;
  705. unsigned long n_size;
  706. struct sfm_pool * pool;
  707. #endif
  708. #ifdef DBG_F_MALLOC
  709. MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
  710. file, func, line);
  711. if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
  712. LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
  713. "aborting\n", p);
  714. abort();
  715. }
  716. #endif
  717. if (size==0) {
  718. if (p)
  719. #ifdef DBG_F_MALLOC
  720. sfm_free(qm, p, file, func, line);
  721. #else
  722. sfm_free(qm, p);
  723. #endif
  724. return 0;
  725. }
  726. if (p==0)
  727. #ifdef DBG_F_MALLOC
  728. return sfm_malloc(qm, size, file, func, line);
  729. #else
  730. return sfm_malloc(qm, size);
  731. #endif
  732. f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
  733. #ifdef DBG_F_MALLOC
  734. MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
  735. f, f->file, f->func, f->line);
  736. #endif
  737. size=ROUNDUP(size);
  738. orig_size=f->size;
  739. if (f->size > size){
  740. /* shrink */
  741. #ifdef DBG_F_MALLOC
  742. MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
  743. sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
  744. #else
  745. sfm_split_frag(qm, f, size);
  746. #endif
  747. }else if (f->size<size){
  748. /* grow */
  749. #ifdef DBG_F_MALLOC
  750. MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
  751. #endif
  752. #ifndef SFM_REALLOC_REMALLOC
  753. diff=size-f->size;
  754. n=FRAG_NEXT(f);
  755. if (((char*)n < (char*)qm->last_frag) &&
  756. (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
  757. /* join */
  758. /* detach n from the free list */
  759. try_again:
  760. p_id=n->id;
  761. n_size=n->size;
  762. if ((unlikely(p_id >=SFM_POOLS_NO))){
  763. hash=GET_HASH(n_size);
  764. SFM_MAIN_HASH_LOCK(qm, hash);
  765. if (unlikely((n->u.nxt_free==0) ||
  766. ((n->size+FRAG_OVERHEAD)<diff))){
  767. SFM_MAIN_HASH_UNLOCK(qm, hash);
  768. goto not_found;
  769. }
  770. if (unlikely((n->id!=p_id) || (n->size!=n_size))){
  771. /* fragment still free, but changed, either
  772. * moved to another pool or has a diff. size */
  773. SFM_MAIN_HASH_UNLOCK(qm, hash);
  774. goto try_again;
  775. }
  776. pf=&(qm->free_hash[hash].first);
  777. /* find it */
  778. for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
  779. if (*pf==0){
  780. SFM_MAIN_HASH_UNLOCK(qm, hash);
  781. /* not found, bad! */
  782. LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
  783. "free " "list (hash=%d)\n", n, hash);
  784. /* somebody is in the process of changing it ? */
  785. goto not_found;
  786. }
  787. /* detach */
  788. *pf=n->u.nxt_free;
  789. n->u.nxt_free=0; /* mark it immediately as detached */
  790. qm->free_hash[hash].no--;
  791. SFM_MAIN_HASH_UNLOCK(qm, hash);
  792. /* join */
  793. f->size+=n->size+FRAG_OVERHEAD;
  794. /* split it if necessary */
  795. if (f->size > size){
  796. #ifdef DBG_F_MALLOC
  797. sfm_split_frag(qm, f, size, file, "fragm. from "
  798. "sfm_realloc", line);
  799. #else
  800. sfm_split_frag(qm, f, size);
  801. #endif
  802. }
  803. }else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
  804. hash=GET_SMALL_HASH(n_size);
  805. pool=&qm->pool[p_id];
  806. SFM_POOL_LOCK(pool, hash);
  807. if (unlikely((n->u.nxt_free==0) ||
  808. ((n->size+FRAG_OVERHEAD)<diff))){
  809. SFM_POOL_UNLOCK(pool, hash);
  810. goto not_found;
  811. }
  812. if (unlikely((n->id!=p_id) || (n->size!=n_size))){
  813. /* fragment still free, but changed, either
  814. * moved to another pool or has a diff. size */
  815. SFM_POOL_UNLOCK(pool, hash);
  816. goto try_again;
  817. }
  818. pf=&(pool->pool_hash[hash].first);
  819. /* find it */
  820. for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
  821. if (*pf==0){
  822. SFM_POOL_UNLOCK(pool, hash);
  823. /* not found, bad! */
  824. LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
  825. "free " "list (hash=%d)\n", n, hash);
  826. /* somebody is in the process of changing it ? */
  827. goto not_found;
  828. }
  829. /* detach */
  830. *pf=n->u.nxt_free;
  831. n->u.nxt_free=0; /* mark it immediately as detached */
  832. pool->pool_hash[hash].no--;
  833. SFM_POOL_UNLOCK(pool, hash);
  834. /* join */
  835. f->size+=n->size+FRAG_OVERHEAD;
  836. /* split it if necessary */
  837. if (f->size > size){
  838. #ifdef DBG_F_MALLOC
  839. sfm_split_frag(qm, f, size, file, "fragm. from "
  840. "sfm_realloc", line);
  841. #else
  842. sfm_split_frag(qm, f, size);
  843. #endif
  844. }
  845. }
  846. }else{
  847. not_found:
  848. /* could not join => realloc */
  849. #else/* SFM_REALLOC_REMALLOC */
  850. {
  851. #endif /* SFM_REALLOC_REMALLOC */
  852. #ifdef DBG_F_MALLOC
  853. ptr=sfm_malloc(qm, size, file, func, line);
  854. #else
  855. ptr=sfm_malloc(qm, size);
  856. #endif
  857. if (ptr){
  858. /* copy, need by libssl */
  859. memcpy(ptr, p, orig_size);
  860. #ifdef DBG_F_MALLOC
  861. sfm_free(qm, p, file, func, line);
  862. #else
  863. sfm_free(qm, p);
  864. #endif
  865. }
  866. p=ptr;
  867. }
  868. }else{
  869. /* do nothing */
  870. #ifdef DBG_F_MALLOC
  871. MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n",
  872. f->size, size);
  873. #endif
  874. }
  875. #ifdef DBG_F_MALLOC
  876. MDBG("sfm_realloc: returning %p\n", p);
  877. #endif
  878. return p;
  879. }
  880. void sfm_status(struct sfm_block* qm)
  881. {
  882. struct sfm_frag* f;
  883. int i,j;
  884. int h;
  885. int unused;
  886. unsigned long size;
  887. int k;
  888. int memlog;
  889. memlog=cfg_get(core, core_cfg, memlog);
  890. LOG(memlog, "sfm_status (%p):\n", qm);
  891. if (!qm) return;
  892. LOG(memlog, " heap size= %ld\n", qm->size);
  893. LOG(memlog, "dumping free list:\n");
  894. for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
  895. SFM_MAIN_HASH_LOCK(qm, h);
  896. unused=0;
  897. for (f=qm->free_hash[h].first,j=0; f;
  898. size+=f->size,f=f->u.nxt_free,i++,j++){
  899. if (!FRAG_WAS_USED(f)){
  900. unused++;
  901. #ifdef DBG_F_MALLOC
  902. LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
  903. " address %p size %lu, created from %s: %s(%ld)\n",
  904. h, f, (char*)f+sizeof(struct sfm_frag), f->size,
  905. f->file, f->func, f->line);
  906. #endif
  907. };
  908. }
  909. if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
  910. " bucket size: %9lu - %9lu (first %9lu)\n",
  911. h, j, unused, UN_HASH(h),
  912. ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
  913. qm->free_hash[h].first->size
  914. );
  915. if (j!=qm->free_hash[h].no){
  916. LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
  917. " for hash %3d\n", j, qm->free_hash[h].no, h);
  918. }
  919. SFM_MAIN_HASH_UNLOCK(qm, h);
  920. }
  921. for (k=0; k<SFM_POOLS_NO; k++){
  922. for(h=0;h<SF_HASH_POOL_SIZE;h++){
  923. SFM_POOL_LOCK(&qm->pool[k], h);
  924. unused=0;
  925. for (f=qm->pool[k].pool_hash[h].first,j=0; f;
  926. size+=f->size,f=f->u.nxt_free,i++,j++){
  927. if (!FRAG_WAS_USED(f)){
  928. unused++;
  929. #ifdef DBG_F_MALLOC
  930. LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
  931. " address %p size %lu, created from %s: "
  932. "%s(%ld)\n", k
  933. h, f, (char*)f+sizeof(struct sfm_frag),
  934. f->size, f->file, f->func, f->line);
  935. #endif
  936. };
  937. }
  938. if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
  939. "%5d\n\t\t bucket size: %9lu - %9lu "
  940. "(first %9lu)\n",
  941. k, h, j, unused, UN_HASH(h),
  942. ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
  943. UN_HASH(h),
  944. qm->pool[k].pool_hash[h].first->size
  945. );
  946. if (j!=qm->pool[k].pool_hash[h].no){
  947. LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
  948. " count: %d!=%ld for hash %3d\n",
  949. k, j, qm->pool[k].pool_hash[h].no, h);
  950. }
  951. SFM_POOL_UNLOCK(&qm->pool[k], h);
  952. }
  953. }
  954. LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
  955. LOG(memlog, "-----------------------------\n");
  956. }
  957. /* fills a malloc info structure with info about the block
  958. * if a parameter is not supported, it will be filled with 0 */
  959. void sfm_info(struct sfm_block* qm, struct mem_info* info)
  960. {
  961. int r, k;
  962. unsigned long total_frags;
  963. struct sfm_frag* f;
  964. memset(info,0, sizeof(*info));
  965. total_frags=0;
  966. info->total_size=qm->size;
  967. info->min_frag=SF_MIN_FRAG_SIZE;
  968. /* we'll have to compute it all */
  969. for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
  970. info->free+=qm->free_hash[r].no*UN_HASH(r);
  971. total_frags+=qm->free_hash[r].no;
  972. }
  973. for(;r<=sfm_max_hash; r++){
  974. total_frags+=qm->free_hash[r].no;
  975. SFM_MAIN_HASH_LOCK(qm, r);
  976. for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
  977. info->free+=f->size;
  978. }
  979. SFM_MAIN_HASH_UNLOCK(qm, r);
  980. }
  981. for (k=0; k<SFM_POOLS_NO; k++){
  982. for (r=0; r<SF_HASH_POOL_SIZE; r++){
  983. info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
  984. total_frags+=qm->pool[k].pool_hash[r].no;
  985. }
  986. }
  987. info->real_used=info->total_size-info->free;
  988. info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
  989. -FRAG_OVERHEAD;
  990. info->max_used=0; /* we don't really know */
  991. info->total_frags=total_frags;
  992. }
  993. /* returns how much free memory is available
  994. * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
  995. unsigned long sfm_available(struct sfm_block* qm)
  996. {
  997. /* we don't know how much free memory we have and it's to expensive
  998. * to compute it */
  999. return ((unsigned long)-1);
  1000. }
  1001. #endif