q_malloc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /* $Id$
  2. *
  3. *
  4. * Copyright (C) 2001-2003 FhG Fokus
  5. *
  6. * This file is part of ser, a free SIP server.
  7. *
  8. * ser is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version
  12. *
  13. * For a license to use the ser software under conditions
  14. * other than those described here, or to purchase support for this
  15. * software, please contact iptel.org by e-mail at the following addresses:
  16. * [email protected]
  17. *
  18. * ser is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. /*
  28. * History:
  29. * --------
  30. * ????-??-?? created by andrei
  31. * 2003-04-14 more debugging added in DBG_QM_MALLOC mode (andrei)
  32. * 2003-06-29 added qm_realloc (andrei)
  33. * 2004-07-19 fragments book keeping code and support for 64 bits
  34. * memory blocks (64 bits machine & size>=2^32) (andrei)
  35. * GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
  36. * 2004-11-10 support for > 4Gb mem., switched to long (andrei)
  37. * 2005-03-02 added qm_info() (andrei)
  38. * 2005-12-12 fixed realloc shrink real_used & used accounting;
  39. * fixed initial size (andrei)
  40. * 2006-02-03 fixed realloc out of mem. free bug (andrei)
  41. * 2006-04-07 s/DBG/MDBG (andrei)
  42. * 2007-02-23 added fm_available() (andrei)
  43. */
  44. #if !defined(q_malloc) && !(defined VQ_MALLOC) && !(defined F_MALLOC)
  45. #define q_malloc
  46. #include <stdlib.h>
  47. #include <string.h>
  48. #include "q_malloc.h"
  49. #include "../dprint.h"
  50. #include "../globals.h"
  51. #include "memdbg.h"
  52. /*useful macros*/
  53. #define FRAG_END(f) \
  54. ((struct qm_frag_end*)((char*)(f)+sizeof(struct qm_frag)+ \
  55. (f)->size))
  56. #define FRAG_NEXT(f) \
  57. ((struct qm_frag*)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \
  58. sizeof(struct qm_frag_end)))
  59. #define FRAG_PREV(f) \
  60. ( (struct qm_frag*) ( ((char*)(f)-sizeof(struct qm_frag_end))- \
  61. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))->size- \
  62. sizeof(struct qm_frag) ) )
  63. #define PREV_FRAG_END(f) \
  64. ((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
  65. #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
  66. #define ROUNDTO_MASK (~((unsigned long)ROUNDTO-1))
  67. #define ROUNDUP(s) (((s)+(ROUNDTO-1))&ROUNDTO_MASK)
  68. #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK)
  69. /*
  70. #define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  71. #define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
  72. */
  73. /* finds the hash value for s, s=ROUNDTO multiple*/
  74. #define GET_HASH(s) ( ((unsigned long)(s)<=QM_MALLOC_OPTIMIZE)?\
  75. (unsigned long)(s)/ROUNDTO: \
  76. QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
  77. QM_MALLOC_OPTIMIZE_FACTOR+1 )
  78. #define UN_HASH(h) ( ((unsigned long)(h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?\
  79. (unsigned long)(h)*ROUNDTO: \
  80. 1UL<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
  81. QM_MALLOC_OPTIMIZE_FACTOR-1)\
  82. )
  83. /* mark/test used/unused frags */
  84. #define FRAG_MARK_USED(f)
  85. #define FRAG_CLEAR_USED(f)
  86. #define FRAG_WAS_USED(f) (1)
  87. /* other frag related defines:
  88. * MEM_COALESCE_FRAGS
  89. * MEM_FRAG_AVOIDANCE
  90. */
  91. #define MEM_FRAG_AVOIDANCE
  92. /* computes hash number for big buckets*/
  93. inline static unsigned long big_hash_idx(unsigned long s)
  94. {
  95. int idx;
  96. /* s is rounded => s = k*2^n (ROUNDTO=2^n)
  97. * index= i such that 2^i > s >= 2^(i-1)
  98. *
  99. * => index = number of the first non null bit in s*/
  100. idx=sizeof(long)*8-1;
  101. for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
  102. return idx;
  103. }
  104. #ifdef DBG_QM_MALLOC
  105. #define ST_CHECK_PATTERN 0xf0f0f0f0
  106. #define END_CHECK_PATTERN1 0xc0c0c0c0
  107. #define END_CHECK_PATTERN2 0xabcdefed
  108. static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
  109. {
  110. if (f->check!=ST_CHECK_PATTERN){
  111. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  112. "beginning overwritten(%lx)!\n",
  113. f, (char*)f+sizeof(struct qm_frag),
  114. f->check);
  115. qm_status(qm);
  116. abort();
  117. };
  118. if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)||
  119. (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){
  120. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
  121. " end overwritten(%lx, %lx)!\n",
  122. f, (char*)f+sizeof(struct qm_frag),
  123. FRAG_END(f)->check1, FRAG_END(f)->check2);
  124. qm_status(qm);
  125. abort();
  126. }
  127. if ((f>qm->first_frag)&&
  128. ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) ||
  129. (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){
  130. LOG(L_CRIT, "BUG: qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p]!"
  131. "\n",
  132. PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f,
  133. (char*)f+sizeof(struct qm_frag));
  134. qm_status(qm);
  135. abort();
  136. }
  137. }
  138. #endif
  139. static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
  140. {
  141. struct qm_frag* f;
  142. struct qm_frag* prev;
  143. int hash;
  144. hash=GET_HASH(frag->size);
  145. for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
  146. f=f->u.nxt_free){
  147. if (frag->size <= f->size) break;
  148. }
  149. /*insert it here*/
  150. prev=FRAG_END(f)->prev_free;
  151. prev->u.nxt_free=frag;
  152. FRAG_END(frag)->prev_free=prev;
  153. frag->u.nxt_free=f;
  154. FRAG_END(f)->prev_free=frag;
  155. qm->free_hash[hash].no++;
  156. }
  157. /* init malloc and return a qm_block*/
  158. struct qm_block* qm_malloc_init(char* address, unsigned long size)
  159. {
  160. char* start;
  161. char* end;
  162. struct qm_block* qm;
  163. unsigned long init_overhead;
  164. int h;
  165. /* make address and size multiple of 8*/
  166. start=(char*)ROUNDUP((unsigned long) address);
  167. DBG("qm_malloc_init: QM_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
  168. QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
  169. DBG("qm_malloc_init: QM_HASH_SIZE=%lu, qm_block size=%lu\n",
  170. QM_HASH_SIZE, (long)sizeof(struct qm_block));
  171. DBG("qm_malloc_init(%p, %lu), start=%p\n", address, size, start);
  172. if (size<start-address) return 0;
  173. size-=(start-address);
  174. if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
  175. size=ROUNDDOWN(size);
  176. init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+
  177. sizeof(struct qm_frag_end);
  178. DBG("qm_malloc_init: size= %lu, init_overhead=%lu\n", size, init_overhead);
  179. if (size < init_overhead)
  180. {
  181. /* not enough mem to create our control structures !!!*/
  182. return 0;
  183. }
  184. end=start+size;
  185. qm=(struct qm_block*)start;
  186. memset(qm, 0, sizeof(struct qm_block));
  187. qm->size=size;
  188. qm->real_used=init_overhead;
  189. qm->max_real_used=qm->real_used;
  190. size-=init_overhead;
  191. qm->first_frag=(struct qm_frag*)(start+ROUNDUP(sizeof(struct qm_block)));
  192. qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
  193. /* init initial fragment*/
  194. qm->first_frag->size=size;
  195. qm->last_frag_end->size=size;
  196. #ifdef DBG_QM_MALLOC
  197. qm->first_frag->check=ST_CHECK_PATTERN;
  198. qm->last_frag_end->check1=END_CHECK_PATTERN1;
  199. qm->last_frag_end->check2=END_CHECK_PATTERN2;
  200. #endif
  201. /* init free_hash* */
  202. for (h=0; h<QM_HASH_SIZE;h++){
  203. qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
  204. qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
  205. qm->free_hash[h].head.size=0;
  206. qm->free_hash[h].tail.size=0;
  207. }
  208. /* link initial fragment into the free list*/
  209. qm_insert_free(qm, qm->first_frag);
  210. /*qm->first_frag->u.nxt_free=&(qm->free_lst);
  211. qm->last_frag_end->prev_free=&(qm->free_lst);
  212. */
  213. return qm;
  214. }
  215. static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
  216. {
  217. struct qm_frag *prev;
  218. struct qm_frag *next;
  219. prev=FRAG_END(frag)->prev_free;
  220. next=frag->u.nxt_free;
  221. prev->u.nxt_free=next;
  222. FRAG_END(next)->prev_free=prev;
  223. }
  224. #ifdef DBG_QM_MALLOC
  225. static inline struct qm_frag* qm_find_free(struct qm_block* qm,
  226. unsigned long size,
  227. int *h,
  228. unsigned int *count)
  229. #else
  230. static inline struct qm_frag* qm_find_free(struct qm_block* qm,
  231. unsigned long size,
  232. int* h)
  233. #endif
  234. {
  235. int hash;
  236. struct qm_frag* f;
  237. for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
  238. for (f=qm->free_hash[hash].head.u.nxt_free;
  239. f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
  240. #ifdef DBG_QM_MALLOC
  241. *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
  242. #endif
  243. if (f->size>=size){ *h=hash; return f; }
  244. }
  245. /*try in a bigger bucket*/
  246. }
  247. /* not found */
  248. return 0;
  249. }
  250. /* returns 0 on success, -1 on error;
  251. * new_size < size & rounded-up already!*/
  252. static inline
  253. #ifdef DBG_QM_MALLOC
  254. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size,
  255. const char* file, const char* func, unsigned int line)
  256. #else
  257. int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
  258. #endif
  259. {
  260. unsigned long rest;
  261. struct qm_frag* n;
  262. struct qm_frag_end* end;
  263. rest=f->size-new_size;
  264. #ifdef MEM_FRAG_AVOIDANCE
  265. if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
  266. (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
  267. #else
  268. if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
  269. #endif
  270. f->size=new_size;
  271. /*split the fragment*/
  272. end=FRAG_END(f);
  273. end->size=new_size;
  274. n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
  275. n->size=rest-FRAG_OVERHEAD;
  276. FRAG_END(n)->size=n->size;
  277. FRAG_CLEAR_USED(n); /* never used */
  278. qm->real_used+=FRAG_OVERHEAD;
  279. #ifdef DBG_QM_MALLOC
  280. end->check1=END_CHECK_PATTERN1;
  281. end->check2=END_CHECK_PATTERN2;
  282. /* frag created by malloc, mark it*/
  283. n->file=file;
  284. n->func=func;
  285. n->line=line;
  286. n->check=ST_CHECK_PATTERN;
  287. #endif
  288. /* reinsert n in free list*/
  289. qm_insert_free(qm, n);
  290. return 0;
  291. }else{
  292. /* we cannot split this fragment any more */
  293. return -1;
  294. }
  295. }
  296. #ifdef DBG_QM_MALLOC
  297. void* qm_malloc(struct qm_block* qm, unsigned long size,
  298. const char* file, const char* func, unsigned int line)
  299. #else
  300. void* qm_malloc(struct qm_block* qm, unsigned long size)
  301. #endif
  302. {
  303. struct qm_frag* f;
  304. int hash;
  305. #ifdef DBG_QM_MALLOC
  306. unsigned int list_cntr;
  307. list_cntr = 0;
  308. MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
  309. line);
  310. #endif
  311. /*size must be a multiple of 8*/
  312. size=ROUNDUP(size);
  313. if (size>(qm->size-qm->real_used)) return 0;
  314. /*search for a suitable free frag*/
  315. #ifdef DBG_QM_MALLOC
  316. if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
  317. #else
  318. if ((f=qm_find_free(qm, size, &hash))!=0){
  319. #endif
  320. /* we found it!*/
  321. /*detach it from the free list*/
  322. #ifdef DBG_QM_MALLOC
  323. qm_debug_frag(qm, f);
  324. #endif
  325. qm_detach_free(qm, f);
  326. /*mark it as "busy"*/
  327. f->u.is_free=0;
  328. qm->free_hash[hash].no--;
  329. /* we ignore split return */
  330. #ifdef DBG_QM_MALLOC
  331. split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
  332. #else
  333. split_frag(qm, f, size);
  334. #endif
  335. qm->real_used+=f->size;
  336. qm->used+=f->size;
  337. if (qm->max_real_used<qm->real_used)
  338. qm->max_real_used=qm->real_used;
  339. #ifdef DBG_QM_MALLOC
  340. f->file=file;
  341. f->func=func;
  342. f->line=line;
  343. f->check=ST_CHECK_PATTERN;
  344. /* FRAG_END(f)->check1=END_CHECK_PATTERN1;
  345. FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
  346. MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
  347. " -th hit\n",
  348. qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
  349. #endif
  350. return (char*)f+sizeof(struct qm_frag);
  351. }
  352. return 0;
  353. }
  354. #ifdef DBG_QM_MALLOC
  355. void qm_free(struct qm_block* qm, void* p, const char* file, const char* func,
  356. unsigned int line)
  357. #else
  358. void qm_free(struct qm_block* qm, void* p)
  359. #endif
  360. {
  361. struct qm_frag* f;
  362. struct qm_frag* prev;
  363. struct qm_frag* next;
  364. unsigned long size;
  365. #ifdef DBG_QM_MALLOC
  366. MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
  367. if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
  368. LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
  369. "aborting\n", p);
  370. abort();
  371. }
  372. #endif
  373. if (p==0) {
  374. LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
  375. return;
  376. }
  377. prev=next=0;
  378. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  379. #ifdef DBG_QM_MALLOC
  380. qm_debug_frag(qm, f);
  381. if (f->u.is_free){
  382. LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer,"
  383. " first free: %s: %s(%ld) - aborting\n",
  384. f->file, f->func, f->line);
  385. abort();
  386. }
  387. MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
  388. f, f->file, f->func, f->line);
  389. #endif
  390. size=f->size;
  391. qm->used-=size;
  392. qm->real_used-=size;
  393. #ifdef QM_JOIN_FREE
  394. /* mark this fragment as used (might fall into the middle of joined frags)
  395. to give us an extra change of detecting a double free call (if the joined
  396. fragment has not yet been reused) */
  397. f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
  398. /* join packets if possible*/
  399. next=FRAG_NEXT(f);
  400. if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
  401. /* join */
  402. #ifdef DBG_QM_MALLOC
  403. qm_debug_frag(qm, next);
  404. #endif
  405. qm_detach_free(qm, next);
  406. size+=next->size+FRAG_OVERHEAD;
  407. qm->real_used-=FRAG_OVERHEAD;
  408. qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
  409. }
  410. if (f > qm->first_frag){
  411. prev=FRAG_PREV(f);
  412. /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
  413. sizeof(struct qm_frag_end))->size);*/
  414. #ifdef DBG_QM_MALLOC
  415. qm_debug_frag(qm, prev);
  416. #endif
  417. if (prev->u.is_free){
  418. /*join*/
  419. qm_detach_free(qm, prev);
  420. size+=prev->size+FRAG_OVERHEAD;
  421. qm->real_used-=FRAG_OVERHEAD;
  422. qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
  423. f=prev;
  424. }
  425. }
  426. f->size=size;
  427. FRAG_END(f)->size=f->size;
  428. #endif /* QM_JOIN_FREE*/
  429. #ifdef DBG_QM_MALLOC
  430. f->file=file;
  431. f->func=func;
  432. f->line=line;
  433. #endif
  434. qm_insert_free(qm, f);
  435. }
  436. #ifdef DBG_QM_MALLOC
  437. void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
  438. const char* file, const char* func, unsigned int line)
  439. #else
  440. void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
  441. #endif
  442. {
  443. struct qm_frag* f;
  444. unsigned long diff;
  445. unsigned long orig_size;
  446. struct qm_frag* n;
  447. void* ptr;
  448. #ifdef DBG_QM_MALLOC
  449. MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
  450. file, func, line);
  451. if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
  452. LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
  453. "aborting\n", p);
  454. abort();
  455. }
  456. #endif
  457. if (size==0) {
  458. if (p)
  459. #ifdef DBG_QM_MALLOC
  460. qm_free(qm, p, file, func, line);
  461. #else
  462. qm_free(qm, p);
  463. #endif
  464. return 0;
  465. }
  466. if (p==0)
  467. #ifdef DBG_QM_MALLOC
  468. return qm_malloc(qm, size, file, func, line);
  469. #else
  470. return qm_malloc(qm, size);
  471. #endif
  472. f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
  473. #ifdef DBG_QM_MALLOC
  474. qm_debug_frag(qm, f);
  475. MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
  476. f, f->file, f->func, f->line);
  477. if (f->u.is_free){
  478. LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
  479. "pointer %p , fragment %p -- aborting\n", p, f);
  480. abort();
  481. }
  482. #endif
  483. /* find first acceptable size */
  484. size=ROUNDUP(size);
  485. if (f->size > size){
  486. orig_size=f->size;
  487. /* shrink */
  488. #ifdef DBG_QM_MALLOC
  489. MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
  490. if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
  491. MDBG("qm_realloc : shrinked successful\n");
  492. #else
  493. if(split_frag(qm, f, size)!=0){
  494. #endif
  495. /* update used sizes: freed the spitted frag */
  496. qm->real_used-=(orig_size-f->size-FRAG_OVERHEAD);
  497. qm->used-=(orig_size-f->size);
  498. }
  499. }else if (f->size < size){
  500. /* grow */
  501. #ifdef DBG_QM_MALLOC
  502. MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
  503. #endif
  504. orig_size=f->size;
  505. diff=size-f->size;
  506. n=FRAG_NEXT(f);
  507. if (((char*)n < (char*)qm->last_frag_end) &&
  508. (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
  509. /* join */
  510. qm_detach_free(qm, n);
  511. qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
  512. f->size+=n->size+FRAG_OVERHEAD;
  513. qm->real_used-=FRAG_OVERHEAD;
  514. FRAG_END(f)->size=f->size;
  515. /* end checks should be ok */
  516. /* split it if necessary */
  517. if (f->size > size ){
  518. #ifdef DBG_QM_MALLOC
  519. split_frag(qm, f, size, file, "fragm. from qm_realloc",
  520. line);
  521. #else
  522. split_frag(qm, f, size);
  523. #endif
  524. }
  525. qm->real_used+=(f->size-orig_size);
  526. qm->used+=(f->size-orig_size);
  527. }else{
  528. /* could not join => realloc */
  529. #ifdef DBG_QM_MALLOC
  530. ptr=qm_malloc(qm, size, file, func, line);
  531. #else
  532. ptr=qm_malloc(qm, size);
  533. #endif
  534. if (ptr){
  535. /* copy, need by libssl */
  536. memcpy(ptr, p, orig_size);
  537. #ifdef DBG_QM_MALLOC
  538. qm_free(qm, p, file, func, line);
  539. #else
  540. qm_free(qm, p);
  541. #endif
  542. }
  543. p=ptr;
  544. }
  545. }else{
  546. /* do nothing */
  547. #ifdef DBG_QM_MALLOC
  548. MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
  549. f->size, size);
  550. #endif
  551. }
  552. #ifdef DBG_QM_MALLOC
  553. MDBG("qm_realloc: returning %p\n", p);
  554. #endif
  555. return p;
  556. }
  557. void qm_check(struct qm_block* qm)
  558. {
  559. struct qm_frag* f;
  560. long fcount = 0;
  561. LOG(memlog, "DEBUG: qm_check()\n");
  562. f = qm->first_frag;
  563. while ((char*)f < (char*)qm->last_frag_end) {
  564. fcount++;
  565. /* check struct qm_frag */
  566. #ifdef DBG_QM_MALLOC
  567. if (f->check!=ST_CHECK_PATTERN){
  568. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  569. "beginning overwritten(%lx)!\n",
  570. f, (char*)f + sizeof(struct qm_frag),
  571. f->check);
  572. qm_status(qm);
  573. abort();
  574. };
  575. #endif
  576. if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
  577. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  578. "bad size: %lu (frag end: %p > end of block: %p)\n",
  579. f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
  580. f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
  581. qm_status(qm);
  582. abort();
  583. }
  584. /* check struct qm_frag_end */
  585. if (FRAG_END(f)->size != f->size) {
  586. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
  587. "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
  588. f, (char*)f + sizeof(struct qm_frag),
  589. f->size, FRAG_END(f)->size);
  590. qm_status(qm);
  591. abort();
  592. }
  593. #ifdef DBG_QM_MALLOC
  594. if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
  595. (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
  596. LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
  597. " end overwritten(%lx, %lx)!\n",
  598. f, (char*)f + sizeof(struct qm_frag),
  599. FRAG_END(f)->check1, FRAG_END(f)->check2);
  600. qm_status(qm);
  601. abort();
  602. }
  603. #endif
  604. f = FRAG_NEXT(f);
  605. }
  606. LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
  607. }
  608. void qm_status(struct qm_block* qm)
  609. {
  610. struct qm_frag* f;
  611. int i,j;
  612. int h;
  613. int unused;
  614. LOG(memlog, "qm_status (%p):\n", qm);
  615. if (!qm) return;
  616. LOG(memlog, " heap size= %lu\n", qm->size);
  617. LOG(memlog, " used= %lu, used+overhead=%lu, free=%lu\n",
  618. qm->used, qm->real_used, qm->size-qm->real_used);
  619. LOG(memlog, " max used (+overhead)= %lu\n", qm->max_real_used);
  620. LOG(memlog, "dumping all alloc'ed. fragments:\n");
  621. for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
  622. ,i++){
  623. if (! f->u.is_free){
  624. LOG(memlog, " %3d. %c address=%p frag=%p size=%lu used=%d\n",
  625. i,
  626. (f->u.is_free)?'a':'N',
  627. (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
  628. #ifdef DBG_QM_MALLOC
  629. LOG(memlog, " %s from %s: %s(%ld)\n",
  630. (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
  631. LOG(memlog, " start check=%lx, end check= %lx, %lx\n",
  632. f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
  633. #endif
  634. }
  635. }
  636. LOG(memlog, "dumping free list stats :\n");
  637. for(h=0,i=0;h<QM_HASH_SIZE;h++){
  638. unused=0;
  639. for (f=qm->free_hash[h].head.u.nxt_free,j=0;
  640. f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
  641. if (!FRAG_WAS_USED(f)){
  642. unused++;
  643. #ifdef DBG_QM_MALLOC
  644. LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
  645. " address %p size %lu, created from %s: %s(%lu)\n",
  646. h, f, (char*)f+sizeof(struct qm_frag), f->size,
  647. f->file, f->func, f->line);
  648. #endif
  649. }
  650. }
  651. if (j) LOG(memlog, "hash= %3d. fragments no.: %5d, unused: %5d\n"
  652. "\t\t bucket size: %9lu - %9ld (first %9lu)\n",
  653. h, j, unused, UN_HASH(h),
  654. ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
  655. qm->free_hash[h].head.u.nxt_free->size
  656. );
  657. if (j!=qm->free_hash[h].no){
  658. LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
  659. " for hash %3d\n", j, qm->free_hash[h].no, h);
  660. }
  661. }
  662. LOG(memlog, "-----------------------------\n");
  663. }
  664. /* fills a malloc info structure with info about the block
  665. * if a parameter is not supported, it will be filled with 0 */
  666. void qm_info(struct qm_block* qm, struct mem_info* info)
  667. {
  668. int r;
  669. long total_frags;
  670. total_frags=0;
  671. memset(info,0, sizeof(*info));
  672. info->total_size=qm->size;
  673. info->min_frag=MIN_FRAG_SIZE;
  674. info->free=qm->size-qm->real_used;
  675. info->used=qm->used;
  676. info->real_used=qm->real_used;
  677. info->max_used=qm->max_real_used;
  678. for(r=0;r<QM_HASH_SIZE; r++){
  679. total_frags+=qm->free_hash[r].no;
  680. }
  681. info->total_frags=total_frags;
  682. }
  683. /* returns how much free memory is available
  684. * it never returns an error (unlike fm_available) */
  685. unsigned long qm_available(struct qm_block* qm)
  686. {
  687. return qm->size-qm->real_used;
  688. }
  689. #endif