q_malloc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. /*
  2. * Copyright (C) 2001-2003 FhG Fokus
  3. *
  4. * This file is part of sip-router, a free SIP server.
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*
  19. * History:
  20. * --------
  21. * ????-??-?? created by andrei
  22. * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
  23. * 2003-06-29 added qm_realloc (andrei)
  24. * 2004-07-19 fragments book keeping code and support for 64 bits
  25. * memory blocks (64 bits machine & size>=2^32) (andrei)
  26. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  27. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  28. * 2005-03-02 added qm_info() (andrei)
  29. * 2005-12-12 fixed realloc shrink real_used & used accounting;
  30. * fixed initial size (andrei)
  31. * 2006-02-03 fixed realloc out of mem. free bug (andrei)
  32. * 2006-04-07 s/DBG/MDBG (andrei)
  33. * 2007-02-23 added fm_available() (andrei)
  34. * 2009-09-28 added fm_sums() (patch from Dragos Vingarzan)
  35. */
  36. /**
  37. * \file
  38. * \brief Simple & fast malloc library
  39. * \ingroup mem
  40. */
  41. #if !defined(q_malloc) && !(defined F_MALLOC)
  42. #define q_malloc
  43. #include <stdlib.h>
  44. #include <string.h>
  45. #include "q_malloc.h"
  46. #include "../dprint.h"
  47. #include "../globals.h"
  48. #include "memdbg.h"
  49. #include "../cfg/cfg.h" /* memlog */
  50. #ifdef MALLOC_STATS
  51. #include "../events.h"
  52. #endif
  53. /*useful macros*/
  54. #define FRAG_END(f) \
  55. ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
  56. (f)->size))
  57. #define FRAG_NEXT(f) \
  58. ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
  59. sizeof(struct qm_frag_end)))
  60. #define FRAG_PREV(f) \
  61. ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
  62. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
  63. sizeof(struct qm_frag) ) )
  64. #define PREV_FRAG_END(f) \
  65. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
  66. #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
  67. #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
  68. #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
  69. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  70. /* finds the hash value for s, s=ROUNDTO multiple*/
  71. #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
  72. (unsigned long)(s)/ROUNDTO: \
  73. QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
  74. QM_MALLOC_OPTIMIZE_FACTOR+1 )
  75. #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
  76. (unsigned long)(h)*ROUNDTO: \
  77. 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
  78. QM_MALLOC_OPTIMIZE_FACTOR-1)\
  79. )
  80. /* mark/test used/unused frags */
  81. #define FRAG_MARK_USED(f)
  82. #define FRAG_CLEAR_USED(f)
  83. #define FRAG_WAS_USED(f) (1)
  84. /* other frag related defines:
  85. * MEM_COALESCE_FRAGS
  86. * MEM_FRAG_AVOIDANCE
  87. */
  88. #define MEM_FRAG_AVOIDANCE
  89. /* computes hash number for big buckets*/
  90. inline static unsigned long big_hash_idx(unsigned long s)
  91. {
  92. int idx;
  93. /* s is rounded => s = k*2^n (ROUNDTO=2^n)
  94. * index= i such that 2^i > s >= 2^(i-1)
  95. *
  96. * => index = number of the first non null bit in s*/
  97. idx=sizeof(long)*8-1;
  98. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  99. return idx;
  100. }
  101. #ifdef DBG_QM_MALLOC
  102. #define ST_CHECK_PATTERN 0xf0f0f0f0
  103. #define END_CHECK_PATTERN1 0xc0c0c0c0
  104. #define END_CHECK_PATTERN2 0xabcdefed
  105. static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
  106. {
  107. if (f->check!=ST_CHECK_PATTERN){
  108. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  109. "beginning overwritten(%lx)!\n",
  110. f, (char*)f+sizeof(struct qm_frag),
  111. f->check);
  112. qm_status(qm);
  113. abort();
  114. };
  115. if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)||
  116. (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){
  117. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
  118. " end overwritten(%lx, %lx)!\n",
  119. f, (char*)f+sizeof(struct qm_frag),
  120. FRAG_END(f)->check1, FRAG_END(f)->check2);
  121. qm_status(qm);
  122. abort();
  123. }
  124. if ((f>qm->first_frag)&&
  125. ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) ||
  126. (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){
  127. LOG(L_CRIT, "BUG: qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p]!"
  128. "\n",
  129. PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f,
  130. (char*)f+sizeof(struct qm_frag));
  131. qm_status(qm);
  132. abort();
  133. }
  134. }
  135. #endif
  136. static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
  137. {
  138. struct qm_frag* f;
  139. struct qm_frag* prev;
  140. int hash;
  141. hash=GET_HASH(frag->size);
  142. for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
  143. f=f->u.nxt_free){
  144. if (frag->size <= f->size) break;
  145. }
  146. /*insert it here*/
  147. prev=FRAG_END(f)->prev_free;
  148. prev->u.nxt_free=frag;
  149. FRAG_END(frag)->prev_free=prev;
  150. frag->u.nxt_free=f;
  151. FRAG_END(f)->prev_free=frag;
  152. qm->free_hash[hash].no++;
  153. }
  154. /* init malloc and return a qm_block*/
  155. struct qm_block* qm_malloc_init(char* address, unsigned long size)
  156. {
  157. char* start;
  158. char* end;
  159. struct qm_block* qm;
  160. unsigned long init_overhead;
  161. int h;
  162. /* make address and size multiple of 8*/
  163. start=(char*)ROUNDUP((unsigned long) address);
  164. DBG("qm_malloc_init: QM_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
  165. QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
  166. DBG("qm_malloc_init: QM_HASH_SIZE=%lu, qm_block size=%lu\n",
  167. QM_HASH_SIZE, (long)sizeof(struct qm_block));
  168. DBG("qm_malloc_init(%p, %lu), start=%p\n", address, size, start);
  169. if (size<start-address) return 0;
  170. size-=(start-address);
  171. if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  172. size=ROUNDDOWN(size);
  173. init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
  174. sizeof(struct qm_frag_end);
  175. DBG("qm_malloc_init: size= %lu, init_overhead=%lu\n", size, init_overhead);
  176. if (size < init_overhead)
  177. {
  178. /* not enough mem to create our control structures !!!*/
  179. return 0;
  180. }
  181. end=start+size;
  182. qm=(struct qm_block*)start;
  183. memset(qm, 0, sizeof(struct qm_block));
  184. qm->size=size;
  185. qm->real_used=init_overhead;
  186. qm->max_real_used=qm->real_used;
  187. size-=init_overhead;
  188. qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
  189. qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
  190. /* init initial fragment*/
  191. qm->first_frag->size=size;
  192. qm->last_frag_end->size=size;
  193. #ifdef DBG_QM_MALLOC
  194. qm->first_frag->check=ST_CHECK_PATTERN;
  195. qm->last_frag_end->check1=END_CHECK_PATTERN1;
  196. qm->last_frag_end->check2=END_CHECK_PATTERN2;
  197. #endif
  198. /* init free_hash* */
  199. for (h=0; h<QM_HASH_SIZE;h++){
  200. qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
  201. qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
  202. qm->free_hash[h].head.size=0;
  203. qm->free_hash[h].tail.size=0;
  204. }
  205. /* link initial fragment into the free list*/
  206. qm_insert_free(qm, qm->first_frag);
  207. /*qm->first_frag->u.nxt_free=&(qm->free_lst);
  208. qm->last_frag_end->prev_free=&(qm->free_lst);
  209. */
  210. return qm;
  211. }
  212. static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
  213. {
  214. struct qm_frag *prev;
  215. struct qm_frag *next;
  216. prev=FRAG_END(frag)->prev_free;
  217. next=frag->u.nxt_free;
  218. prev->u.nxt_free=next;
  219. FRAG_END(next)->prev_free=prev;
  220. }
  221. #ifdef DBG_QM_MALLOC
  222. static inline struct qm_frag* qm_find_free(struct qm_block* qm,
  223. unsigned long size,
  224. int *h,
  225. unsigned int *count)
  226. #else
  227. static inline struct qm_frag* qm_find_free(struct qm_block* qm,
  228. unsigned long size,
  229. int* h)
  230. #endif
  231. {
  232. int hash;
  233. struct qm_frag* f;
  234. for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
  235. for (f=qm->free_hash[hash].head.u.nxt_free;
  236. f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
  237. #ifdef DBG_QM_MALLOC
  238. *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
  239. #endif
  240. if (f->size>=size){ *h=hash; return f; }
  241. }
  242. /*try in a bigger bucket*/
  243. }
  244. /* not found */
  245. return 0;
  246. }
  247. /* returns 0 on success, -1 on error;
  248. * new_size < size & rounded-up already!*/
  249. static inline
  250. #ifdef DBG_QM_MALLOC
  251. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size,
  252. const char* file, const char* func, unsigned int line)
  253. #else
  254. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
  255. #endif
  256. {
  257. unsigned long rest;
  258. struct qm_frag* n;
  259. struct qm_frag_end* end;
  260. rest=f->size-new_size;
  261. #ifdef MEM_FRAG_AVOIDANCE
  262. if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
  263. (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
  264. #else
  265. if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
  266. #endif
  267. f->size=new_size;
  268. /*split the fragment*/
  269. end=FRAG_END(f);
  270. end->size=new_size;
  271. n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
  272. n->size=rest-FRAG_OVERHEAD;
  273. FRAG_END(n)->size=n->size;
  274. FRAG_CLEAR_USED(n); /* never used */
  275. qm->real_used+=FRAG_OVERHEAD;
  276. #ifdef DBG_QM_MALLOC
  277. end->check1=END_CHECK_PATTERN1;
  278. end->check2=END_CHECK_PATTERN2;
  279. /* frag created by malloc, mark it*/
  280. n->file=file;
  281. n->func=func;
  282. n->line=line;
  283. n->check=ST_CHECK_PATTERN;
  284. #endif
  285. /* reinsert n in free list*/
  286. qm_insert_free(qm, n);
  287. return 0;
  288. }else{
  289. /* we cannot split this fragment any more */
  290. return -1;
  291. }
  292. }
  293. #ifdef DBG_QM_MALLOC
  294. void* qm_malloc(struct qm_block* qm, unsigned long size,
  295. const char* file, const char* func, unsigned int line)
  296. #else
  297. void* qm_malloc(struct qm_block* qm, unsigned long size)
  298. #endif
  299. {
  300. struct qm_frag* f;
  301. int hash;
  302. #ifdef DBG_QM_MALLOC
  303. unsigned int list_cntr;
  304. list_cntr = 0;
  305. MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
  306. line);
  307. #endif
  308. /*size must be a multiple of 8*/
  309. size=ROUNDUP(size);
  310. if (size>(qm->size-qm->real_used)) return 0;
  311. /*search for a suitable free frag*/
  312. #ifdef DBG_QM_MALLOC
  313. if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
  314. #else
  315. if ((f=qm_find_free(qm, size, &hash))!=0){
  316. #endif
  317. /* we found it!*/
  318. /*detach it from the free list*/
  319. #ifdef DBG_QM_MALLOC
  320. qm_debug_frag(qm, f);
  321. #endif
  322. qm_detach_free(qm, f);
  323. /*mark it as "busy"*/
  324. f->u.is_free=0;
  325. qm->free_hash[hash].no--;
  326. /* we ignore split return */
  327. #ifdef DBG_QM_MALLOC
  328. split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
  329. #else
  330. split_frag(qm, f, size);
  331. #endif
  332. qm->real_used+=f->size;
  333. qm->used+=f->size;
  334. if (qm->max_real_used<qm->real_used)
  335. qm->max_real_used=qm->real_used;
  336. #ifdef MALLOC_STATS
  337. sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
  338. sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
  339. #endif
  340. #ifdef DBG_QM_MALLOC
  341. f->file=file;
  342. f->func=func;
  343. f->line=line;
  344. f->check=ST_CHECK_PATTERN;
  345. /* FRAG_END(f)->check1=END_CHECK_PATTERN1;
  346. FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
  347. MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
  348. " -th hit\n",
  349. qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
  350. #endif
  351. return (char*)f+sizeof(struct qm_frag);
  352. }
  353. return 0;
  354. }
  355. #ifdef DBG_QM_MALLOC
  356. void qm_free(struct qm_block* qm, void* p, const char* file, const char* func,
  357. unsigned int line)
  358. #else
  359. void qm_free(struct qm_block* qm, void* p)
  360. #endif
  361. {
  362. struct qm_frag* f;
  363. unsigned long size;
  364. #ifdef MEM_JOIN_FREE
  365. struct qm_frag* next;
  366. struct qm_frag* prev;
  367. #endif /* MEM_JOIN_FREE*/
  368. #ifdef DBG_QM_MALLOC
  369. MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
  370. #endif
  371. if (p==0) {
  372. #ifdef DBG_QM_MALLOC
  373. LOG(L_WARN, "WARNING:qm_free: free(0) called from %s: %s(%d)\n", file, func, line);
  374. #else
  375. LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
  376. #endif
  377. return;
  378. }
  379. #ifdef DBG_QM_MALLOC
  380. if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
  381. LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!)"
  382. " called from %s: %s(%d) - aborting\n", p, file, func, line);
  383. if(likely(cfg_get(core, core_cfg, mem_safety)==0))
  384. abort();
  385. else return;
  386. }
  387. #endif
  388. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  389. #ifdef DBG_QM_MALLOC
  390. qm_debug_frag(qm, f);
  391. if (f->u.is_free){
  392. LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer (%p),"
  393. " called from %s: %s(%d), first free %s: %s(%ld) - aborting\n",
  394. p, file, func, line, f->file, f->func, f->line);
  395. if(likely(cfg_get(core, core_cfg, mem_safety)==0))
  396. abort();
  397. else return;
  398. }
  399. MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
  400. f, f->file, f->func, f->line);
  401. #endif
  402. size=f->size;
  403. qm->used-=size;
  404. qm->real_used-=size;
  405. #ifdef MALLOC_STATS
  406. sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
  407. sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
  408. #endif
  409. #ifdef MEM_JOIN_FREE
  410. if(unlikely(cfg_get(core, core_cfg, mem_join)!=0)) {
  411. next=prev=0;
  412. /* mark this fragment as used (might fall into the middle of joined frags)
  413. to give us an extra chance of detecting a double free call (if the joined
  414. fragment has not yet been reused) */
  415. f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
  416. /* join packets if possible*/
  417. next=FRAG_NEXT(f);
  418. if (((char*)next < (char*)qm->last_frag_end) && (next->u.is_free)){
  419. /* join next packet */
  420. #ifdef DBG_QM_MALLOC
  421. qm_debug_frag(qm, next);
  422. #endif
  423. qm_detach_free(qm, next);
  424. size+=next->size+FRAG_OVERHEAD;
  425. qm->real_used-=FRAG_OVERHEAD;
  426. qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
  427. }
  428. if (f > qm->first_frag){
  429. prev=FRAG_PREV(f);
  430. /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
  431. sizeof(struct qm_frag_end))->size);*/
  432. if (prev->u.is_free){
  433. /* join prev packet */
  434. #ifdef DBG_QM_MALLOC
  435. qm_debug_frag(qm, prev);
  436. #endif
  437. qm_detach_free(qm, prev);
  438. size+=prev->size+FRAG_OVERHEAD;
  439. qm->real_used-=FRAG_OVERHEAD;
  440. qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
  441. f=prev;
  442. }
  443. }
  444. f->size=size;
  445. FRAG_END(f)->size=f->size;
  446. } /* if cfg_core->mem_join */
  447. #endif /* MEM_JOIN_FREE*/
  448. #ifdef DBG_QM_MALLOC
  449. f->file=file;
  450. f->func=func;
  451. f->line=line;
  452. #endif
  453. qm_insert_free(qm, f);
  454. }
  455. #ifdef DBG_QM_MALLOC
  456. void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
  457. const char* file, const char* func, unsigned int line)
  458. #else
  459. void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
  460. #endif
  461. {
  462. struct qm_frag* f;
  463. unsigned long diff;
  464. unsigned long orig_size;
  465. struct qm_frag* n;
  466. void* ptr;
  467. #ifdef DBG_QM_MALLOC
  468. MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
  469. file, func, line);
  470. if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
  471. LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
  472. "aborting\n", p);
  473. abort();
  474. }
  475. #endif
  476. if (size==0) {
  477. if (p)
  478. #ifdef DBG_QM_MALLOC
  479. qm_free(qm, p, file, func, line);
  480. #else
  481. qm_free(qm, p);
  482. #endif
  483. return 0;
  484. }
  485. if (p==0)
  486. #ifdef DBG_QM_MALLOC
  487. return qm_malloc(qm, size, file, func, line);
  488. #else
  489. return qm_malloc(qm, size);
  490. #endif
  491. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  492. #ifdef DBG_QM_MALLOC
  493. qm_debug_frag(qm, f);
  494. MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
  495. f, f->file, f->func, f->line);
  496. if (f->u.is_free){
  497. LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
  498. "pointer %p , fragment %p -- aborting\n", p, f);
  499. abort();
  500. }
  501. #endif
  502. /* find first acceptable size */
  503. size=ROUNDUP(size);
  504. if (f->size > size){
  505. orig_size=f->size;
  506. /* shrink */
  507. #ifdef DBG_QM_MALLOC
  508. MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
  509. if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
  510. MDBG("qm_realloc : shrinked successful\n");
  511. #else
  512. if(split_frag(qm, f, size)!=0){
  513. #endif
  514. /* update used sizes: freed the splited frag */
  515. /* split frag already adds FRAG_OVERHEAD for the newly created
  516. free frag, so here we only need orig_size-f->size for real used
  517. */
  518. qm->real_used-=(orig_size-f->size);
  519. qm->used-=(orig_size-f->size);
  520. #ifdef MALLOC_STATS
  521. sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
  522. sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
  523. #endif
  524. }
  525. }else if (f->size < size){
  526. /* grow */
  527. #ifdef DBG_QM_MALLOC
  528. MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
  529. #endif
  530. orig_size=f->size;
  531. diff=size-f->size;
  532. n=FRAG_NEXT(f);
  533. if (((char*)n < (char*)qm->last_frag_end) &&
  534. (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
  535. /* join */
  536. qm_detach_free(qm, n);
  537. qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
  538. f->size+=n->size+FRAG_OVERHEAD;
  539. qm->real_used-=FRAG_OVERHEAD;
  540. FRAG_END(f)->size=f->size;
  541. /* end checks should be ok */
  542. /* split it if necessary */
  543. if (f->size > size ){
  544. #ifdef DBG_QM_MALLOC
  545. split_frag(qm, f, size, file, "fragm. from qm_realloc",
  546. line);
  547. #else
  548. split_frag(qm, f, size);
  549. #endif
  550. }
  551. qm->real_used+=(f->size-orig_size);
  552. qm->used+=(f->size-orig_size);
  553. #ifdef MALLOC_STATS
  554. sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
  555. sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
  556. #endif
  557. }else{
  558. /* could not join => realloc */
  559. #ifdef DBG_QM_MALLOC
  560. ptr=qm_malloc(qm, size, file, func, line);
  561. #else
  562. ptr=qm_malloc(qm, size);
  563. #endif
  564. if (ptr){
  565. /* copy, need by libssl */
  566. memcpy(ptr, p, orig_size);
  567. #ifdef DBG_QM_MALLOC
  568. qm_free(qm, p, file, func, line);
  569. #else
  570. qm_free(qm, p);
  571. #endif
  572. }
  573. p=ptr;
  574. }
  575. }else{
  576. /* do nothing */
  577. #ifdef DBG_QM_MALLOC
  578. MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
  579. f->size, size);
  580. #endif
  581. }
  582. #ifdef DBG_QM_MALLOC
  583. MDBG("qm_realloc: returning %p\n", p);
  584. #endif
  585. return p;
  586. }
  587. void qm_check(struct qm_block* qm)
  588. {
  589. struct qm_frag* f;
  590. long fcount = 0;
  591. int memlog;
  592. memlog=cfg_get(core, core_cfg, memlog);
  593. LOG(memlog, "DEBUG: qm_check()\n");
  594. f = qm->first_frag;
  595. while ((char*)f < (char*)qm->last_frag_end) {
  596. fcount++;
  597. /* check struct qm_frag */
  598. #ifdef DBG_QM_MALLOC
  599. if (f->check!=ST_CHECK_PATTERN){
  600. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  601. "beginning overwritten(%lx)!\n",
  602. f, (char*)f + sizeof(struct qm_frag),
  603. f->check);
  604. qm_status(qm);
  605. abort();
  606. };
  607. #endif
  608. if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
  609. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  610. "bad size: %lu (frag end: %p > end of block: %p)\n",
  611. f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
  612. f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
  613. qm_status(qm);
  614. abort();
  615. }
  616. /* check struct qm_frag_end */
  617. if (FRAG_END(f)->size != f->size) {
  618. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  619. "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
  620. f, (char*)f + sizeof(struct qm_frag),
  621. f->size, FRAG_END(f)->size);
  622. qm_status(qm);
  623. abort();
  624. }
  625. #ifdef DBG_QM_MALLOC
  626. if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
  627. (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
  628. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
  629. " end overwritten(%lx, %lx)!\n",
  630. f, (char*)f + sizeof(struct qm_frag),
  631. FRAG_END(f)->check1, FRAG_END(f)->check2);
  632. qm_status(qm);
  633. abort();
  634. }
  635. #endif
  636. f = FRAG_NEXT(f);
  637. }
  638. LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
  639. }
  640. void qm_status(struct qm_block* qm)
  641. {
  642. struct qm_frag* f;
  643. int i,j;
  644. int h;
  645. int unused;
  646. int memlog;
  647. int mem_summary;
  648. memlog=cfg_get(core, core_cfg, memlog);
  649. mem_summary=cfg_get(core, core_cfg, mem_summary);
  650. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
  651. if (!qm) return;
  652. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
  653. qm->size);
  654. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  655. "used= %lu, used+overhead=%lu, free=%lu\n",
  656. qm->used, qm->real_used, qm->size-qm->real_used);
  657. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  658. "max used (+overhead)= %lu\n", qm->max_real_used);
  659. if (mem_summary & 16) return;
  660. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  661. "dumping all alloc'ed. fragments:\n");
  662. for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
  663. ,i++){
  664. if (! f->u.is_free){
  665. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  666. " %3d. %c address=%p frag=%p size=%lu used=%d\n",
  667. i,
  668. (f->u.is_free)?'a':'N',
  669. (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
  670. #ifdef DBG_QM_MALLOC
  671. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  672. " %s from %s: %s(%ld)\n",
  673. (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
  674. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  675. " start check=%lx, end check= %lx, %lx\n",
  676. f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
  677. #endif
  678. }
  679. }
  680. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  681. "dumping free list stats :\n");
  682. for(h=0,i=0;h<QM_HASH_SIZE;h++){
  683. unused=0;
  684. for (f=qm->free_hash[h].head.u.nxt_free,j=0;
  685. f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
  686. if (!FRAG_WAS_USED(f)){
  687. unused++;
  688. #ifdef DBG_QM_MALLOC
  689. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  690. "unused fragm.: hash = %3d, fragment %p,"
  691. " address %p size %lu, created from %s: %s(%lu)\n",
  692. h, f, (char*)f+sizeof(struct qm_frag), f->size,
  693. f->file, f->func, f->line);
  694. #endif
  695. }
  696. }
  697. if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  698. "hash= %3d. fragments no.: %5d, unused: %5d\n"
  699. "\t\t bucket size: %9lu - %9ld (first %9lu)\n",
  700. h, j, unused, UN_HASH(h),
  701. ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
  702. qm->free_hash[h].head.u.nxt_free->size
  703. );
  704. if (j!=qm->free_hash[h].no){
  705. LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
  706. " for hash %3d\n", j, qm->free_hash[h].no, h);
  707. }
  708. }
  709. LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
  710. "-----------------------------\n");
  711. }
  712. /* fills a malloc info structure with info about the block
  713. * if a parameter is not supported, it will be filled with 0 */
  714. void qm_info(struct qm_block* qm, struct mem_info* info)
  715. {
  716. int r;
  717. long total_frags;
  718. total_frags=0;
  719. memset(info,0, sizeof(*info));
  720. info->total_size=qm->size;
  721. info->min_frag=MIN_FRAG_SIZE;
  722. info->free=qm->size-qm->real_used;
  723. info->used=qm->used;
  724. info->real_used=qm->real_used;
  725. info->max_used=qm->max_real_used;
  726. for(r=0;r<QM_HASH_SIZE; r++){
  727. total_frags+=qm->free_hash[r].no;
  728. }
  729. info->total_frags=total_frags;
  730. }
  731. /* returns how much free memory is available
  732. * it never returns an error (unlike fm_available) */
  733. unsigned long qm_available(struct qm_block* qm)
  734. {
  735. return qm->size-qm->real_used;
  736. }
  737. #ifdef DBG_QM_MALLOC
  738. typedef struct _mem_counter{
  739. const char *file;
  740. const char *func;
  741. unsigned long line;
  742. unsigned long size;
  743. int count;
  744. struct _mem_counter *next;
  745. } mem_counter;
  746. static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
  747. {
  748. mem_counter *x;
  749. if (!*root) goto make_new;
  750. for(x=*root;x;x=x->next)
  751. if (x->file == f->file && x->func == f->func && x->line == f->line)
  752. return x;
  753. make_new:
  754. x = malloc(sizeof(mem_counter));
  755. x->file = f->file;
  756. x->func = f->func;
  757. x->line = f->line;
  758. x->count = 0;
  759. x->size = 0;
  760. x->next = *root;
  761. *root = x;
  762. return x;
  763. }
  764. void qm_sums(struct qm_block* qm)
  765. {
  766. struct qm_frag* f;
  767. int i;
  768. mem_counter *root, *x;
  769. int memlog;
  770. root=0;
  771. if (!qm) return;
  772. memlog=cfg_get(core, core_cfg, memlog);
  773. LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
  774. "summarizing all alloc'ed. fragments:\n");
  775. for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
  776. f=FRAG_NEXT(f),i++){
  777. if (! f->u.is_free){
  778. x = get_mem_counter(&root,f);
  779. x->count++;
  780. x->size+=f->size;
  781. }
  782. }
  783. x = root;
  784. while(x){
  785. LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
  786. " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
  787. x->count,x->size,
  788. x->file, x->func, x->line
  789. );
  790. root = x->next;
  791. free(x);
  792. x = root;
  793. }
  794. LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
  795. "-----------------------------\n");
  796. }
  797. #endif /* DBG_QM_MALLOC */
  798. #endif