ll_malloc.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103
  1. /* $Id$
  2. *
  3. * shared memory, multi-process safe, pool based, mostly lockless version of
  4. * f_malloc
  5. *
  6. * Copyright (C) 2007 iptelorg GmbH
  7. *
  8. * Permission to use, copy, modify, and distribute this software for any
  9. * purpose with or without fee is hereby granted, provided that the above
  10. * copyright notice and this permission notice appear in all copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  13. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  14. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  15. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  16. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  17. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  18. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  19. */
  20. /*
  21. * History:
  22. * --------
  23. * created by andrei
  24. * 2003-07-06 added fm_realloc (andrei)
  25. * 2004-07-19 fragments book keeping code and support for 64 bits
  26. * memory blocks (64 bits machine & size >=2^32)
  27. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  28. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  29. * 2005-03-02 added fm_info() (andrei)
  30. * 2005-12-12 fixed realloc shrink real_used accounting (andrei)
  31. * fixed initial size (andrei)
  32. * 2006-02-03 fixed realloc out of mem. free bug (andrei)
  33. * 2006-04-07 s/DBG/MDBG (andrei)
  34. * 2007-02-23 added fm_available() (andrei)
  35. * 2007-06-09 forked from the fm_maloc code (andrei)
  36. * 2007-06-11 forked from the sfm_maloc code (andrei)
  37. */
  38. #ifdef LL_MALLOC
  39. #include <string.h>
  40. #include <stdlib.h>
  41. #include "ll_malloc.h"
  42. #include "../dprint.h"
  43. #include "../globals.h"
  44. #include "memdbg.h"
  45. #include "../cfg/cfg.h" /* memlog */
  46. #define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
  47. #define MIN_POOL_FRAGS 10 /* min fragments per pool hash bucket */
  48. /*useful macros*/
  49. #define FRAG_NEXT(f) \
  50. ((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
  51. /* SF_ROUNDTO= 2^k so the following works */
  52. #define ROUNDTO_MASK (~((unsigned long)SF_ROUNDTO-1))
  53. #define ROUNDUP(s) (((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
  54. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  55. #define FRAG_OVERHEAD (sizeof(struct sfm_frag))
  56. #define INIT_OVERHEAD \
  57. (ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
  58. /* finds hash if s <=SF_MALLOC_OPTIMIZE */
  59. #define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
  60. /* finds hash if s > SF_MALLOC_OPTIMIZE */
  61. #define GET_BIG_HASH(s) \
  62. (SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
  63. /* finds the hash value for s, s=SF_ROUNDTO multiple*/
  64. #define GET_HASH(s) ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
  65. GET_SMALL_HASH(s): GET_BIG_HASH(s) )
  66. #define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
  67. #define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
  68. SF_MALLOC_OPTIMIZE_FACTOR-1))
  69. #define UN_HASH(h) ( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
  70. UN_HASH_SMALL(h): UN_HASH_BIG(h) )
  71. #define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
  72. #define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
  73. /* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
  74. #define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
  75. #define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
  76. #define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
  77. /* mark/test used/unused frags */
  78. #define FRAG_MARK_USED(f)
  79. #define FRAG_CLEAR_USED(f)
  80. #define FRAG_WAS_USED(f) (1)
  81. /* other frag related defines:
  82. * MEM_COALESCE_FRAGS
  83. * MEM_FRAG_AVOIDANCE
  84. */
  85. #define MEM_FRAG_AVOIDANCE
  86. #define SFM_REALLOC_REMALLOC
  87. /* computes hash number for big buckets*/
  88. inline static unsigned long big_hash_idx(unsigned long s)
  89. {
  90. unsigned long idx;
  91. /* s is rounded => s = k*2^n (SF_ROUNDTO=2^n)
  92. * index= i such that 2^i > s >= 2^(i-1)
  93. *
  94. * => index = number of the first non null bit in s*/
  95. idx=sizeof(long)*8-1;
  96. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  97. return idx;
  98. }
  99. #ifdef DBG_F_MALLOC
  100. #define ST_CHECK_PATTERN 0xf0f0f0f0
  101. #define END_CHECK_PATTERN1 0xc0c0c0c0
  102. #define END_CHECK_PATTERN2 0xabcdefed
  103. #endif
  104. #ifdef SFM_ONE_LOCK
  105. #define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
  106. #define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
  107. #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
  108. #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
  109. #warn "degraded performance, only one lock"
  110. #elif defined SFM_LOCK_PER_BUCKET
  111. #define SFM_MAIN_HASH_LOCK(qm, hash) \
  112. lock_get(&(qm)->free_hash[(hash)].lock)
  113. #define SFM_MAIN_HASH_UNLOCK(qm, hash) \
  114. lock_release(&(qm)->free_hash[(hash)].lock)
  115. #define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
  116. #define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
  117. #else
  118. #error no locks defined
  119. #endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
  120. #define SFM_BIG_GET_AND_SPLIT_LOCK(qm) lock_get(&(qm)->get_and_split)
  121. #define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
  122. static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
  123. searching further) */
  124. static unsigned long pool_id=(unsigned long)-1;
  125. /* call for each child */
  126. int sfm_pool_reset()
  127. {
  128. pool_id=(unsigned long)-1;
  129. return 0;
  130. }
  131. #define sfm_fix_pool_id(qm) \
  132. do{ \
  133. if (unlikely(pool_id>=SFM_POOLS_NO)) \
  134. pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
  135. }while(0)
  136. static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
  137. {
  138. register struct sfm_frag* old;
  139. register struct sfm_frag* crt;
  140. crt=(void*)atomic_get_long(head);
  141. do{
  142. frag->u.nxt_free=crt;
  143. old=crt;
  144. membar_write_atomic_op();
  145. crt=(void*)atomic_cmpxchg_long((void*)head, (long)old, (long)frag);
  146. }while(crt!=old);
  147. }
  148. static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
  149. {
  150. register struct sfm_frag* old;
  151. register struct sfm_frag* crt;
  152. register struct sfm_frag* nxt;
  153. crt=(void*)atomic_get_long(head);
  154. do{
  155. /* if circular list, test not needed */
  156. nxt=crt?crt->u.nxt_free:0;
  157. old=crt;
  158. membar_read_atomic_op();
  159. crt=(void*)atomic_cmpxchg_long((void*)head, (long)old, (long)nxt);
  160. }while(crt!=old);
  161. return crt;
  162. }
  163. static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
  164. struct sfm_frag* frag)
  165. {
  166. unsigned long hash_bit;
  167. frag_push(&pool->pool_hash[hash].first, frag);
  168. atomic_inc_long((long*)&pool->pool_hash[hash].no);
  169. /* set it only if not already set (avoids an expensive
  170. * cache trashing atomic write op) */
  171. hash_bit=HASH_TO_BITMAP(hash);
  172. if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
  173. atomic_or_long((long*)&pool->bitmap, hash_bit);
  174. }
  175. /* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
  176. static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
  177. int hash, int split)
  178. {
  179. /* TODO: come up with something better
  180. * if fragment is some split/rest from an allocation, that is
  181. * >= requested size, accept it, else
  182. * look at misses and current fragments and decide based on them */
  183. return (p_id<SFM_POOLS_NO) && (split ||
  184. ( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
  185. ((qm->pool[p_id].pool_hash[hash].misses >
  186. qm->pool[p_id].pool_hash[hash].no) &&
  187. (qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
  188. }
  189. /* choose on which pool to add a free'd packet
  190. * return - pool idx or -1 if it should be added to main*/
  191. static inline unsigned long sfm_choose_pool(struct sfm_block* qm,
  192. struct sfm_frag* frag,
  193. int hash, int split)
  194. {
  195. /* check original pool first */
  196. if (sfm_check_pool(qm, frag->id, hash, split))
  197. return frag->id;
  198. else{
  199. /* check if our pool is properly set */
  200. sfm_fix_pool_id(qm);
  201. /* check if my pool needs some frags */
  202. if ((pool_id!=frag->id) && (sfm_check_pool(qm, pool_id, hash, 0))){
  203. frag->id=pool_id;
  204. return pool_id;
  205. }
  206. }
  207. /* else add it back to main */
  208. frag->id=(unsigned long)(-1);
  209. return frag->id;
  210. }
  211. static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
  212. int split)
  213. {
  214. struct sfm_frag** f;
  215. unsigned long p_id;
  216. int hash;
  217. unsigned long hash_bit;
  218. if (likely(frag->size<=SF_POOL_MAX_SIZE)){
  219. hash=GET_SMALL_HASH(frag->size);
  220. if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
  221. (unsigned long)-1)){
  222. /* add it back to the "main" hash */
  223. frag->id=(unsigned long)(-1); /* main hash marker */
  224. /*insert it here*/
  225. frag_push(&(qm->free_hash[hash].first), frag);
  226. atomic_inc_long((long*)&qm->free_hash[hash].no);
  227. /* set it only if not already set (avoids an expensive
  228. * cache trashing atomic write op) */
  229. hash_bit=HASH_TO_BITMAP(hash);
  230. if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
  231. atomic_or_long((long*)&qm->bitmap, hash_bit);
  232. }else{
  233. /* add it to one of the pools pool */
  234. sfm_pool_insert(&qm->pool[p_id], hash, frag);
  235. }
  236. }else{
  237. hash=GET_BIG_HASH(frag->size);
  238. SFM_MAIN_HASH_LOCK(qm, hash);
  239. f=&(qm->free_hash[hash].first);
  240. for(; *f; f=&((*f)->u.nxt_free))
  241. if (frag->size <= (*f)->size) break;
  242. frag->id=(unsigned long)(-1); /* main hash marker */
  243. /*insert it here*/
  244. frag->u.nxt_free=*f;
  245. *f=frag;
  246. qm->free_hash[hash].no++;
  247. /* inc. big hash free size ? */
  248. SFM_MAIN_HASH_UNLOCK(qm, hash);
  249. }
  250. }
  251. /* size should be already rounded-up */
  252. static inline
  253. #ifdef DBG_F_MALLOC
  254. void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
  255. unsigned long size,
  256. const char* file, const char* func, unsigned int line)
  257. #else
  258. void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
  259. unsigned long size)
  260. #endif
  261. {
  262. unsigned long rest;
  263. struct sfm_frag* n;
  264. int bigger_rest;
  265. rest=frag->size-size;
  266. #ifdef MEM_FRAG_AVOIDANCE
  267. if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
  268. (rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
  269. bigger_rest=1;
  270. #else
  271. if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
  272. bigger_rest=rest>=(size+FRAG_OVERHEAD);
  273. #endif
  274. frag->size=size;
  275. /*split the fragment*/
  276. n=FRAG_NEXT(frag);
  277. n->size=rest-FRAG_OVERHEAD;
  278. n->id=pool_id;
  279. FRAG_CLEAR_USED(n); /* never used */
  280. #ifdef DBG_F_MALLOC
  281. /* frag created by malloc, mark it*/
  282. n->file=file;
  283. n->func="frag. from sfm_malloc";
  284. n->line=line;
  285. n->check=ST_CHECK_PATTERN;
  286. #endif
  287. /* reinsert n in free list*/
  288. sfm_insert_free(qm, n, bigger_rest);
  289. }else{
  290. /* we cannot split this fragment any more => alloc all of it*/
  291. }
  292. }
  293. /* init malloc and return a sfm_block*/
  294. struct sfm_block* sfm_malloc_init(char* address, unsigned long size)
  295. {
  296. char* start;
  297. char* end;
  298. struct sfm_block* qm;
  299. unsigned long init_overhead;
  300. int r;
  301. #ifdef SFM_LOCK_PER_BUCKET
  302. int i;
  303. #endif
  304. /* make address and size multiple of 8*/
  305. start=(char*)ROUNDUP((unsigned long) address);
  306. DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
  307. SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
  308. DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
  309. SF_HASH_SIZE, (long)sizeof(struct sfm_block));
  310. DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
  311. if (size<start-address) return 0;
  312. size-=(start-address);
  313. if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  314. size=ROUNDDOWN(size);
  315. init_overhead=INIT_OVERHEAD;
  316. if (size < init_overhead)
  317. {
  318. /* not enough mem to create our control structures !!!*/
  319. return 0;
  320. }
  321. end=start+size;
  322. qm=(struct sfm_block*)start;
  323. memset(qm, 0, sizeof(struct sfm_block));
  324. qm->size=size;
  325. size-=init_overhead;
  326. qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
  327. qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
  328. /* init initial fragment*/
  329. qm->first_frag->size=size;
  330. qm->first_frag->id=(unsigned long)-1; /* not in a pool */
  331. qm->last_frag->size=0;
  332. #ifdef DBG_F_MALLOC
  333. qm->first_frag->check=ST_CHECK_PATTERN;
  334. qm->last_frag->check=END_CHECK_PATTERN1;
  335. #endif
  336. /* link initial fragment into the free list*/
  337. sfm_insert_free(qm, qm->first_frag, 0);
  338. sfm_max_hash=GET_HASH(size);
  339. /* init locks */
  340. if (lock_init(&qm->get_and_split)==0)
  341. goto error;
  342. #ifdef SFM_ONE_LOCK
  343. if (lock_init(&qm->lock)==0){
  344. lock_destroy(&qm->get_and_split);
  345. goto error;
  346. }
  347. for (r=0; r<SFM_POOLS_NO; r++){
  348. if (lock_init(&qm->pool[r].lock)==0){
  349. for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
  350. lock_destroy(&qm->lock);
  351. lock_destroy(&qm->get_and_split);
  352. goto error;
  353. }
  354. }
  355. #elif defined(SFM_LOCK_PER_BUCKET)
  356. for (r=0; r<SF_HASH_SIZE; r++)
  357. if (lock_init(&qm->free_hash[r].lock)==0){
  358. for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
  359. lock_destroy(&qm->get_and_split);
  360. goto error;
  361. }
  362. for (i=0; i<SFM_POOLS_NO; i++){
  363. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  364. if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
  365. for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
  366. for(; i>0; i--){
  367. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  368. lock_destroy(&qm->pool[i].pool_hash[r].lock);
  369. }
  370. for (r=0; r<SF_HASH_SIZE; r++)
  371. lock_destroy(&qm->free_hash[r].lock);
  372. lock_destroy(&qm->get_and_split);
  373. goto error;
  374. }
  375. }
  376. #endif
  377. qm->is_init=1;
  378. return qm;
  379. error:
  380. return 0;
  381. }
  382. /* cleanup */
  383. void sfm_malloc_destroy(struct sfm_block* qm)
  384. {
  385. int r, i;
  386. /* destroy all the locks */
  387. if (!qm || !qm->is_init)
  388. return; /* nothing to do */
  389. lock_destroy(&qm->get_and_split);
  390. #ifdef SFM_ONE_LOCK
  391. lock_destroy(&qm->lock);
  392. for (r=0; r<SFM_POOLS_NO; r++){
  393. lock_destroy(&qm->pool[r].lock);
  394. }
  395. #elif defined(SFM_LOCK_PER_BUCKET)
  396. for (r=0; r<SF_HASH_SIZE; r++)
  397. lock_destroy(&qm->free_hash[r].lock);
  398. for (i=0; i<SFM_POOLS_NO; i++){
  399. for (r=0; r<SF_HASH_POOL_SIZE; r++)
  400. lock_destroy(&qm->pool[i].pool_hash[r].lock);
  401. }
  402. #endif
  403. qm->is_init=0;
  404. }
  405. /* returns next set bit in bitmap, starts at b
  406. * if b is set, returns b
  407. * if not found returns BITMAP_BITS */
  408. static inline unsigned long _next_set_bit(unsigned long b,
  409. unsigned long* bitmap)
  410. {
  411. for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
  412. return b;
  413. }
  414. /* returns start of block b and sets *end
  415. * (handles also the "rest" block at the end ) */
  416. static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
  417. {
  418. unsigned long s;
  419. if ((unlikely(b>=BITMAP_BITS))){
  420. s=BIT_TO_HASH(BITMAP_BITS);
  421. *end=SF_HASH_POOL_SIZE; /* last, possible rest block */
  422. }else{
  423. s=BIT_TO_HASH(b);
  424. *end=s+BITMAP_BLOCK_SIZE;
  425. }
  426. return s;
  427. }
  428. #ifdef DBG_F_MALLOC
  429. static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
  430. struct sfm_pool* pool, int hash, unisgned long size,
  431. const char* file, const char* func, unsigned int line)
  432. #else
  433. static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
  434. struct sfm_pool* pool,
  435. int hash, unsigned long size)
  436. #endif
  437. {
  438. int r;
  439. int next_block;
  440. struct sfm_frag* volatile* f;
  441. struct sfm_frag* frag;
  442. unsigned long b;
  443. unsigned long eob;
  444. /* special case for r=hash */
  445. r=hash;
  446. f=&pool->pool_hash[r].first;
  447. /* detach it from the free list */
  448. if ((frag=frag_pop((struct sfm_frag**)f))==0)
  449. goto not_found;
  450. found:
  451. atomic_dec_long((long*)&pool->pool_hash[r].no);
  452. frag->u.nxt_free=0; /* mark it as 'taken' */
  453. frag->id=pool_id;
  454. #ifdef DBG_F_MALLOC
  455. sfm_split_frag(qm, frag, size, file, func, line);
  456. #else
  457. sfm_split_frag(qm, frag, size);
  458. #endif
  459. if (&qm->pool[pool_id]==pool)
  460. atomic_inc_long((long*)&pool->hits);
  461. return frag;
  462. not_found:
  463. atomic_inc_long((long*)&pool->pool_hash[r].misses);
  464. r++;
  465. b=HASH_BIT_POS(r);
  466. while(r<SF_HASH_POOL_SIZE){
  467. b=_next_set_bit(b, &pool->bitmap);
  468. next_block=_hash_range(b, &eob);
  469. r=(r<next_block)?next_block:r;
  470. for (; r<eob; r++){
  471. f=&pool->pool_hash[r].first;
  472. if ((frag=frag_pop((struct sfm_frag**)f))!=0)
  473. goto found;
  474. atomic_inc_long((long*)&pool->pool_hash[r].misses);
  475. }
  476. b++;
  477. }
  478. atomic_inc_long((long*)&pool->missed);
  479. return 0;
  480. }
  481. #ifdef DBG_F_MALLOC
  482. static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
  483. unsigned long size,
  484. const char* file, const char* func, unsigned int line)
  485. #else
  486. static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
  487. unsigned long size)
  488. #endif
  489. {
  490. int r;
  491. int next_block;
  492. struct sfm_frag* volatile* f;
  493. struct sfm_frag* frag;
  494. unsigned long b;
  495. unsigned long eob;
  496. r=hash;
  497. b=HASH_BIT_POS(r);
  498. while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
  499. b=_next_set_bit(b, &qm->bitmap);
  500. next_block=_hash_range(b, &eob);
  501. r=(r<next_block)?next_block:r;
  502. for (; r<eob; r++){
  503. f=&qm->free_hash[r].first;
  504. if ((frag=frag_pop((struct sfm_frag**)f))!=0){
  505. atomic_dec_long((long*)&qm->free_hash[r].no);
  506. frag->u.nxt_free=0; /* mark it as 'taken' */
  507. frag->id=pool_id;
  508. #ifdef DBG_F_MALLOC
  509. sfm_split_frag(qm, frag, size, file, func, line);
  510. #else
  511. sfm_split_frag(qm, frag, size);
  512. #endif
  513. return frag;
  514. }
  515. }
  516. b++;
  517. }
  518. /* big fragments */
  519. SFM_BIG_GET_AND_SPLIT_LOCK(qm);
  520. for (; r<= sfm_max_hash ; r++){
  521. f=&qm->free_hash[r].first;
  522. if (*f){
  523. SFM_MAIN_HASH_LOCK(qm, r);
  524. if (unlikely((*f)==0)){
  525. /* not found */
  526. SFM_MAIN_HASH_UNLOCK(qm, r);
  527. continue;
  528. }
  529. for(;(*f); f=&((*f)->u.nxt_free))
  530. if ((*f)->size>=size){
  531. /* found, detach it from the free list*/
  532. frag=*f;
  533. *f=frag->u.nxt_free;
  534. frag->u.nxt_free=0; /* mark it as 'taken' */
  535. qm->free_hash[r].no--;
  536. SFM_MAIN_HASH_UNLOCK(qm, r);
  537. frag->id=pool_id;
  538. #ifdef DBG_F_MALLOC
  539. sfm_split_frag(qm, frag, size, file, func, line);
  540. #else
  541. sfm_split_frag(qm, frag, size);
  542. #endif
  543. SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
  544. return frag;
  545. };
  546. SFM_MAIN_HASH_UNLOCK(qm, r);
  547. /* try in a bigger bucket */
  548. }
  549. }
  550. SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
  551. return 0;
  552. }
  553. #ifdef DBG_F_MALLOC
  554. void* sfm_malloc(struct sfm_block* qm, unsigned long size,
  555. const char* file, const char* func, unsigned int line)
  556. #else
  557. void* sfm_malloc(struct sfm_block* qm, unsigned long size)
  558. #endif
  559. {
  560. struct sfm_frag* frag;
  561. int hash;
  562. unsigned int i;
  563. #ifdef DBG_F_MALLOC
  564. MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
  565. line);
  566. #endif
  567. /*size must be a multiple of 8*/
  568. size=ROUNDUP(size);
  569. /* if (size>(qm->size-qm->real_used)) return 0; */
  570. /* check if our pool id is set */
  571. sfm_fix_pool_id(qm);
  572. /*search for a suitable free frag*/
  573. if (likely(size<=SF_POOL_MAX_SIZE)){
  574. hash=GET_SMALL_HASH(size);
  575. /* try first in our pool */
  576. #ifdef DBG_F_MALLOC
  577. if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
  578. file, func, line))!=0))
  579. goto found;
  580. /* try in the "main" free hash, go through all the hash */
  581. if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
  582. goto found;
  583. /* really low mem , try in other pools */
  584. for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
  585. if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
  586. file, func, line))!=0)
  587. goto found;
  588. }
  589. #else
  590. if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
  591. !=0 ))
  592. goto found;
  593. /* try in the "main" free hash, go through all the hash */
  594. if (likely((frag=main_get_frag(qm, hash, size))!=0))
  595. goto found;
  596. /* really low mem , try in other pools */
  597. for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
  598. if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
  599. !=0 )
  600. goto found;
  601. }
  602. #endif
  603. /* not found, bad! */
  604. return 0;
  605. }else{
  606. hash=GET_BIG_HASH(size);
  607. #ifdef DBG_F_MALLOC
  608. if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
  609. return 0; /* not found, bad! */
  610. #else
  611. if ((frag=main_get_frag(qm, hash, size))==0)
  612. return 0; /* not found, bad! */
  613. #endif
  614. }
  615. found:
  616. /* we found it!*/
  617. #ifdef DBG_F_MALLOC
  618. frag->file=file;
  619. frag->func=func;
  620. frag->line=line;
  621. frag->check=ST_CHECK_PATTERN;
  622. MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
  623. (char*)frag+sizeof(struct sfm_frag));
  624. #endif
  625. FRAG_MARK_USED(frag); /* mark it as used */
  626. return (char*)frag+sizeof(struct sfm_frag);
  627. }
  628. #ifdef DBG_F_MALLOC
  629. void sfm_free(struct sfm_block* qm, void* p, const char* file,
  630. const char* func, unsigned int line)
  631. #else
  632. void sfm_free(struct sfm_block* qm, void* p)
  633. #endif
  634. {
  635. struct sfm_frag* f;
  636. #ifdef DBG_F_MALLOC
  637. MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
  638. line);
  639. if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
  640. LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
  641. "aborting\n", p);
  642. abort();
  643. }
  644. #endif
  645. if (unlikely(p==0)) {
  646. LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
  647. return;
  648. }
  649. f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
  650. #ifdef DBG_F_MALLOC
  651. MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
  652. f->file, f->func, f->line);
  653. #endif
  654. #ifdef DBG_F_MALLOC
  655. f->file=file;
  656. f->func=func;
  657. f->line=line;
  658. #endif
  659. sfm_insert_free(qm, f, 0);
  660. }
  661. #ifdef DBG_F_MALLOC
  662. void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
  663. const char* file, const char* func, unsigned int line)
  664. #else
  665. void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
  666. #endif
  667. {
  668. struct sfm_frag *f;
  669. unsigned long orig_size;
  670. void *ptr;
  671. #ifndef SFM_REALLOC_REMALLOC
  672. struct sfm_frag *n;
  673. struct sfm_frag **pf;
  674. unsigned long diff;
  675. unsigned long p_id;
  676. int hash;
  677. unsigned long n_size;
  678. struct sfm_pool * pool;
  679. #endif
  680. #ifdef DBG_F_MALLOC
  681. MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
  682. file, func, line);
  683. if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
  684. LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
  685. "aborting\n", p);
  686. abort();
  687. }
  688. #endif
  689. if (size==0) {
  690. if (p)
  691. #ifdef DBG_F_MALLOC
  692. sfm_free(qm, p, file, func, line);
  693. #else
  694. sfm_free(qm, p);
  695. #endif
  696. return 0;
  697. }
  698. if (p==0)
  699. #ifdef DBG_F_MALLOC
  700. return sfm_malloc(qm, size, file, func, line);
  701. #else
  702. return sfm_malloc(qm, size);
  703. #endif
  704. f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
  705. #ifdef DBG_F_MALLOC
  706. MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
  707. f, f->file, f->func, f->line);
  708. #endif
  709. size=ROUNDUP(size);
  710. orig_size=f->size;
  711. if (f->size > size){
  712. /* shrink */
  713. #ifdef DBG_F_MALLOC
  714. MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
  715. sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
  716. #else
  717. sfm_split_frag(qm, f, size);
  718. #endif
  719. }else if (f->size<size){
  720. /* grow */
  721. #ifdef DBG_F_MALLOC
  722. MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
  723. #endif
  724. #ifndef SFM_REALLOC_REMALLOC
  725. /* should set a magic value in list head and in push/pop if magic value =>
  726. * lock and wait */
  727. #error LL_MALLOC realloc not finished yet
  728. diff=size-f->size;
  729. n=FRAG_NEXT(f);
  730. if (((char*)n < (char*)qm->last_frag) &&
  731. (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
  732. /* join */
  733. /* detach n from the free list */
  734. try_again:
  735. p_id=n->id;
  736. n_size=n->size;
  737. if ((unlikely(p_id >=SFM_POOLS_NO))){
  738. hash=GET_HASH(n_size);
  739. SFM_MAIN_HASH_LOCK(qm, hash);
  740. if (unlikely((n->u.nxt_free==0) ||
  741. ((n->size+FRAG_OVERHEAD)<diff))){
  742. SFM_MAIN_HASH_UNLOCK(qm, hash);
  743. goto not_found;
  744. }
  745. if (unlikely((n->id!=p_id) || (n->size!=n_size))){
  746. /* fragment still free, but changed, either
  747. * moved to another pool or has a diff. size */
  748. SFM_MAIN_HASH_UNLOCK(qm, hash);
  749. goto try_again;
  750. }
  751. pf=&(qm->free_hash[hash].first);
  752. /* find it */
  753. for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
  754. if (*pf==0){
  755. SFM_MAIN_HASH_UNLOCK(qm, hash);
  756. /* not found, bad! */
  757. LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
  758. "free " "list (hash=%d)\n", n, hash);
  759. /* somebody is in the process of changing it ? */
  760. goto not_found;
  761. }
  762. /* detach */
  763. *pf=n->u.nxt_free;
  764. n->u.nxt_free=0; /* mark it immediately as detached */
  765. qm->free_hash[hash].no--;
  766. SFM_MAIN_HASH_UNLOCK(qm, hash);
  767. /* join */
  768. f->size+=n->size+FRAG_OVERHEAD;
  769. /* split it if necessary */
  770. if (f->size > size){
  771. #ifdef DBG_F_MALLOC
  772. sfm_split_frag(qm, f, size, file, "fragm. from "
  773. "sfm_realloc", line);
  774. #else
  775. sfm_split_frag(qm, f, size);
  776. #endif
  777. }
  778. }else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
  779. hash=GET_SMALL_HASH(n_size);
  780. pool=&qm->pool[p_id];
  781. SFM_POOL_LOCK(pool, hash);
  782. if (unlikely((n->u.nxt_free==0) ||
  783. ((n->size+FRAG_OVERHEAD)<diff))){
  784. SFM_POOL_UNLOCK(pool, hash);
  785. goto not_found;
  786. }
  787. if (unlikely((n->id!=p_id) || (n->size!=n_size))){
  788. /* fragment still free, but changed, either
  789. * moved to another pool or has a diff. size */
  790. SFM_POOL_UNLOCK(pool, hash);
  791. goto try_again;
  792. }
  793. pf=&(pool->pool_hash[hash].first);
  794. /* find it */
  795. for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
  796. if (*pf==0){
  797. SFM_POOL_UNLOCK(pool, hash);
  798. /* not found, bad! */
  799. LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
  800. "free " "list (hash=%d)\n", n, hash);
  801. /* somebody is in the process of changing it ? */
  802. goto not_found;
  803. }
  804. /* detach */
  805. *pf=n->u.nxt_free;
  806. n->u.nxt_free=0; /* mark it immediately as detached */
  807. pool->pool_hash[hash].no--;
  808. SFM_POOL_UNLOCK(pool, hash);
  809. /* join */
  810. f->size+=n->size+FRAG_OVERHEAD;
  811. /* split it if necessary */
  812. if (f->size > size){
  813. #ifdef DBG_F_MALLOC
  814. sfm_split_frag(qm, f, size, file, "fragm. from "
  815. "sfm_realloc", line);
  816. #else
  817. sfm_split_frag(qm, f, size);
  818. #endif
  819. }
  820. }
  821. }else{
  822. not_found:
  823. /* could not join => realloc */
  824. #else/* SFM_REALLOC_REMALLOC */
  825. {
  826. #endif /* SFM_REALLOC_REMALLOC */
  827. #ifdef DBG_F_MALLOC
  828. ptr=sfm_malloc(qm, size, file, func, line);
  829. #else
  830. ptr=sfm_malloc(qm, size);
  831. #endif
  832. if (ptr){
  833. /* copy, need by libssl */
  834. memcpy(ptr, p, orig_size);
  835. #ifdef DBG_F_MALLOC
  836. sfm_free(qm, p, file, func, line);
  837. #else
  838. sfm_free(qm, p);
  839. #endif
  840. }
  841. p=ptr;
  842. }
  843. }else{
  844. /* do nothing */
  845. #ifdef DBG_F_MALLOC
  846. MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n",
  847. f->size, size);
  848. #endif
  849. }
  850. #ifdef DBG_F_MALLOC
  851. MDBG("sfm_realloc: returning %p\n", p);
  852. #endif
  853. return p;
  854. }
  855. void sfm_status(struct sfm_block* qm)
  856. {
  857. struct sfm_frag* f;
  858. int i,j;
  859. int h;
  860. int unused;
  861. unsigned long size;
  862. int k;
  863. int memlog;
  864. int mem_summary;
  865. #warning "ll_status doesn't work (might crash if used)"
  866. memlog=cfg_get(core, core_cfg, memlog);
  867. mem_summary=cfg_get(core, core_cfg, mem_summary);
  868. LOG(memlog, "sfm_status (%p):\n", qm);
  869. if (!qm) return;
  870. LOG(memlog, " heap size= %ld\n", qm->size);
  871. if (mem_summary & 16) return;
  872. LOG(memlog, "dumping free list:\n");
  873. for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
  874. SFM_MAIN_HASH_LOCK(qm, h);
  875. unused=0;
  876. for (f=qm->free_hash[h].first,j=0; f;
  877. size+=f->size,f=f->u.nxt_free,i++,j++){
  878. if (!FRAG_WAS_USED(f)){
  879. unused++;
  880. #ifdef DBG_F_MALLOC
  881. LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
  882. " address %p size %lu, created from %s: %s(%ld)\n",
  883. h, f, (char*)f+sizeof(struct sfm_frag), f->size,
  884. f->file, f->func, f->line);
  885. #endif
  886. };
  887. }
  888. if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
  889. " bucket size: %9lu - %9lu (first %9lu)\n",
  890. h, j, unused, UN_HASH(h),
  891. ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
  892. qm->free_hash[h].first->size
  893. );
  894. if (j!=qm->free_hash[h].no){
  895. LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
  896. " for hash %3d\n", j, qm->free_hash[h].no, h);
  897. }
  898. SFM_MAIN_HASH_UNLOCK(qm, h);
  899. }
  900. for (k=0; k<SFM_POOLS_NO; k++){
  901. for(h=0;h<SF_HASH_POOL_SIZE;h++){
  902. SFM_POOL_LOCK(&qm->pool[k], h);
  903. unused=0;
  904. for (f=qm->pool[k].pool_hash[h].first,j=0; f;
  905. size+=f->size,f=f->u.nxt_free,i++,j++){
  906. if (!FRAG_WAS_USED(f)){
  907. unused++;
  908. #ifdef DBG_F_MALLOC
  909. LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
  910. " address %p size %lu, created from %s: "
  911. "%s(%ld)\n", k
  912. h, f, (char*)f+sizeof(struct sfm_frag),
  913. f->size, f->file, f->func, f->line);
  914. #endif
  915. };
  916. }
  917. if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
  918. "%5d\n\t\t bucket size: %9lu - %9lu "
  919. "(first %9lu)\n",
  920. k, h, j, unused, UN_HASH(h),
  921. ((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
  922. UN_HASH(h),
  923. qm->pool[k].pool_hash[h].first->size
  924. );
  925. if (j!=qm->pool[k].pool_hash[h].no){
  926. LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
  927. " count: %d!=%ld for hash %3d\n",
  928. k, j, qm->pool[k].pool_hash[h].no, h);
  929. }
  930. SFM_POOL_UNLOCK(&qm->pool[k], h);
  931. }
  932. }
  933. LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
  934. LOG(memlog, "-----------------------------\n");
  935. }
  936. /* fills a malloc info structure with info about the block
  937. * if a parameter is not supported, it will be filled with 0 */
  938. void sfm_info(struct sfm_block* qm, struct mem_info* info)
  939. {
  940. int r, k;
  941. unsigned long total_frags;
  942. struct sfm_frag* f;
  943. memset(info,0, sizeof(*info));
  944. total_frags=0;
  945. info->total_size=qm->size;
  946. info->min_frag=SF_MIN_FRAG_SIZE;
  947. /* we'll have to compute it all */
  948. for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
  949. info->free+=qm->free_hash[r].no*UN_HASH(r);
  950. total_frags+=qm->free_hash[r].no;
  951. }
  952. for(;r<=sfm_max_hash; r++){
  953. total_frags+=qm->free_hash[r].no;
  954. SFM_MAIN_HASH_LOCK(qm, r);
  955. for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
  956. info->free+=f->size;
  957. }
  958. SFM_MAIN_HASH_UNLOCK(qm, r);
  959. }
  960. for (k=0; k<SFM_POOLS_NO; k++){
  961. for (r=0; r<SF_HASH_POOL_SIZE; r++){
  962. info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
  963. total_frags+=qm->pool[k].pool_hash[r].no;
  964. }
  965. }
  966. info->real_used=info->total_size-info->free;
  967. info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
  968. -FRAG_OVERHEAD;
  969. info->max_used=0; /* we don't really know */
  970. info->total_frags=total_frags;
  971. }
  972. /* returns how much free memory is available
  973. * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
  974. unsigned long sfm_available(struct sfm_block* qm)
  975. {
  976. /* we don't know how much free memory we have and it's to expensive
  977. * to compute it */
  978. return ((unsigned long)-1);
  979. }
  980. #endif