malloc_test.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*$Id$
  2. *
  3. * Memory allocators debugging/test sip-router module.
  4. *
  5. * Copyright (C) 2010 iptelorg GmbH
  6. *
  7. * Permission to use, copy, modify, and distribute this software for any
  8. * purpose with or without fee is hereby granted, provided that the above
  9. * copyright notice and this permission notice appear in all copies.
  10. *
  11. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  12. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  13. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  14. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  15. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /*
  20. * History:
  21. * --------
  22. * 2010-03-10 initial version (andrei)
  23. */
  24. #include "../../sr_module.h"
  25. #include "../../mem/mem.h"
  26. #include "../../str.h"
  27. #include "../../dprint.h"
  28. #include "../../locking.h"
  29. #include "../../atomic_ops.h"
  30. #include "../../cfg/cfg.h"
  31. #include "../../rpc.h"
  32. #include "../../rand/fastrand.h"
  33. #include "../../timer.h"
  34. #include "../../mod_fix.h"
  35. MODULE_VERSION
  36. static int mt_mem_alloc_f(struct sip_msg*, char*,char*);
  37. static int mt_mem_free_f(struct sip_msg*, char*,char*);
  38. static int mod_init(void);
  39. static void mod_destroy(void);
  40. static cmd_export_t cmds[]={
  41. {"mt_mem_alloc", mt_mem_alloc_f, 1, fixup_var_int_1,
  42. REQUEST_ROUTE|ONREPLY_ROUTE|FAILURE_ROUTE|BRANCH_ROUTE|ONSEND_ROUTE},
  43. {"mt_mem_free", mt_mem_free_f, 1, fixup_var_int_1,
  44. REQUEST_ROUTE|ONREPLY_ROUTE|FAILURE_ROUTE|BRANCH_ROUTE|ONSEND_ROUTE},
  45. {0, 0, 0, 0, 0}
  46. };
  47. struct cfg_group_malloc_test {
  48. int check_content;
  49. int realloc_p; /* realloc probability */
  50. };
  51. static struct cfg_group_malloc_test default_mt_cfg = {
  52. 0, /* check_content, off by default */
  53. 0 /* realloc probability, 0 by default */
  54. };
  55. static void * mt_cfg = &default_mt_cfg;
  56. static cfg_def_t malloc_test_cfg_def[] = {
  57. {"check_content", CFG_VAR_INT | CFG_ATOMIC, 0, 1, 0, 0,
  58. "check if allocated memory was overwritten by filling it with "
  59. "a special pattern and checking it on free."},
  60. {"realloc_p", CFG_VAR_INT | CFG_ATOMIC, 0, 90, 0, 0,
  61. "realloc probability in percents. During tests and mem_rnd_alloc"
  62. " realloc_p percents of the allocations will be made by realloc'ing"
  63. " and existing chunk. The maximum value is limited to 90, to avoid"
  64. " very long mem_rnd_alloc runs (a realloc might also free memory)." },
  65. {0, 0, 0, 0, 0, 0}
  66. };
  67. static rpc_export_t mt_rpc[];
  68. static param_export_t params[]={
  69. {"check_content", PARAM_INT, &default_mt_cfg.check_content},
  70. {0,0,0}
  71. };
  72. struct module_exports exports = {
  73. "malloc_test",
  74. cmds,
  75. mt_rpc, /* RPC methods */
  76. params,
  77. mod_init, /* module initialization function */
  78. 0, /* response function*/
  79. mod_destroy, /* destroy function */
  80. 0, /* oncancel function */
  81. 0 /* per-child init function */
  82. };
  83. #define MC_F_CHECK_CONTENTS 1
  84. struct mem_chunk{
  85. struct mem_chunk* next;
  86. void* addr;
  87. unsigned long size;
  88. unsigned long flags;
  89. };
  90. struct allocated_list {
  91. struct mem_chunk* chunks;
  92. gen_lock_t lock;
  93. volatile long size;
  94. volatile int no;
  95. };
  96. struct allocated_list* alloc_lst;
  97. struct rnd_time_test {
  98. unsigned long min;
  99. unsigned long max;
  100. unsigned long total;
  101. unsigned long crt;
  102. ticks_t min_intvrl;
  103. ticks_t max_intvrl;
  104. ticks_t stop_time;
  105. ticks_t start_time;
  106. unsigned long calls;
  107. unsigned long reallocs;
  108. unsigned int errs;
  109. unsigned int overfl;
  110. struct rnd_time_test* next;
  111. struct timer_ln timer;
  112. int id;
  113. };
  114. struct rnd_time_test_lst {
  115. struct rnd_time_test* tests;
  116. gen_lock_t lock;
  117. volatile int last_id;
  118. };
  119. struct rnd_time_test_lst* rndt_lst;
  120. static unsigned long mem_unleak(unsigned long size);
  121. static void mem_destroy_all_tests();
  122. static int mod_init(void)
  123. {
  124. WARN("This is a test/debugging module, don't use it in production\n");
  125. /* declare configuration */
  126. if (cfg_declare("malloc_test", malloc_test_cfg_def, &default_mt_cfg,
  127. cfg_sizeof(malloc_test), &mt_cfg)){
  128. ERR("failed to register the configuration\n");
  129. goto error;
  130. }
  131. alloc_lst = shm_malloc(sizeof(*alloc_lst));
  132. if (alloc_lst == 0)
  133. goto error;
  134. alloc_lst->chunks = 0;
  135. atomic_set_long(&alloc_lst->size, 0);
  136. atomic_set_int(&alloc_lst->no, 0);
  137. if (lock_init(&alloc_lst->lock) == 0)
  138. goto error;
  139. rndt_lst = shm_malloc(sizeof(*rndt_lst));
  140. if (rndt_lst == 0)
  141. goto error;
  142. rndt_lst->tests = 0;
  143. atomic_set_int(&rndt_lst->last_id, 0);
  144. if (lock_init(&rndt_lst->lock) == 0)
  145. goto error;
  146. return 0;
  147. error:
  148. return -1;
  149. }
  150. static void mod_destroy()
  151. {
  152. if (rndt_lst) {
  153. mem_destroy_all_tests();
  154. lock_destroy(&rndt_lst->lock);
  155. shm_free(rndt_lst);
  156. rndt_lst = 0;
  157. }
  158. if (alloc_lst) {
  159. mem_unleak(-1);
  160. lock_destroy(&alloc_lst->lock);
  161. shm_free(alloc_lst);
  162. alloc_lst = 0;
  163. }
  164. }
  165. /** record a memory chunk list entry.
  166. * @param addr - address of the newly allocated memory
  167. * @oaram size - size
  168. * @return 0 on success, -1 on error (no more mem).
  169. */
  170. static int mem_track(void* addr, unsigned long size)
  171. {
  172. struct mem_chunk* mc;
  173. unsigned long* d;
  174. unsigned long r,i;
  175. mc = shm_malloc(sizeof(*mc));
  176. if (mc == 0) goto error;
  177. mc->addr = addr;
  178. mc->size = size;
  179. mc->flags = 0;
  180. if (cfg_get(malloc_test, mt_cfg, check_content)){
  181. mc->flags |= MC_F_CHECK_CONTENTS;
  182. d = addr;
  183. for (r = 0; r < size/sizeof(*d); r++){
  184. d[r]=~(unsigned long)&d[r];
  185. }
  186. for (i=0; i< size % sizeof(*d); i++){
  187. ((char*)&d[r])[i]=~((unsigned long)&d[r] >> i*8);
  188. }
  189. }
  190. lock_get(&alloc_lst->lock);
  191. mc->next = alloc_lst->chunks;
  192. alloc_lst->chunks = mc;
  193. lock_release(&alloc_lst->lock);
  194. atomic_add_long(&alloc_lst->size, size);
  195. atomic_inc_int(&alloc_lst->no);
  196. return 0;
  197. error:
  198. return -1;
  199. }
  200. /** allocate memory.
  201. * Allocates memory, but keeps track of it, so that mem_unleak() can
  202. * free it.
  203. * @param size - how many bytes
  204. * @return 0 on success, -1 on error
  205. */
  206. static int mem_leak(unsigned long size)
  207. {
  208. void *d;
  209. d = shm_malloc(size);
  210. if (d) {
  211. if (mem_track(d, size) < 0){
  212. shm_free(d);
  213. }else
  214. return 0;
  215. }
  216. return -1;
  217. }
  218. /* realloc a chunk, unsafe (requires external locking) version.
  219. * @return 0 on success, -1 on error
  220. */
  221. static int _mem_chunk_realloc_unsafe(struct mem_chunk *c, unsigned long size)
  222. {
  223. unsigned long* d;
  224. int r, i;
  225. d = shm_realloc(c->addr, size);
  226. if (d) {
  227. if (cfg_get(malloc_test, mt_cfg, check_content) &&
  228. c->flags & MC_F_CHECK_CONTENTS) {
  229. /* re-fill the test patterns (the address might have changed
  230. and they depend on it) */
  231. for (r = 0; r < size/sizeof(*d); r++){
  232. d[r]=~(unsigned long)&d[r];
  233. }
  234. for (i=0; i< size % sizeof(*d); i++){
  235. ((char*)&d[r])[i]=~((unsigned long)&d[r] >> i*8);
  236. }
  237. }
  238. c->addr = d;
  239. c->size = size;
  240. return 0;
  241. }
  242. return -1;
  243. }
  244. static void mem_chunk_free(struct mem_chunk* c)
  245. {
  246. unsigned long* d;
  247. unsigned long r,i;
  248. int err;
  249. if (cfg_get(malloc_test, mt_cfg, check_content) &&
  250. c->flags & MC_F_CHECK_CONTENTS) {
  251. d = c->addr;
  252. err = 0;
  253. for (r = 0; r < c->size/sizeof(*d); r++){
  254. if (d[r]!=~(unsigned long)&d[r])
  255. err++;
  256. d[r] = (unsigned long)&d[r]; /* fill it with something else */
  257. }
  258. for (i=0; i< c->size % sizeof(*d); i++){
  259. if (((unsigned char*)&d[r])[i] !=
  260. (unsigned char)~((unsigned long)&d[r] >> i*8))
  261. err++;
  262. ((char*)&d[r])[i] = (unsigned char)((unsigned long)&d[r] >> i*8);
  263. }
  264. if (err)
  265. ERR("%d errors while checking %ld bytes at %p\n", err, c->size, d);
  266. }
  267. shm_free(c->addr);
  268. c->addr = 0;
  269. c->flags = 0;
  270. }
  271. /** free memory.
  272. * Frees previously allocated memory chunks until at least size bytes are
  273. * released. Use -1 to free all,
  274. * @param size - at least free size bytes.
  275. * @return bytes_freed (>=0)
  276. */
  277. static unsigned long mem_unleak(unsigned long size)
  278. {
  279. struct mem_chunk** mc;
  280. struct mem_chunk* t;
  281. struct mem_chunk** min_chunk;
  282. unsigned long freed;
  283. unsigned int no;
  284. freed = 0;
  285. no = 0;
  286. min_chunk = 0;
  287. lock_get(&alloc_lst->lock);
  288. if (size>=atomic_get_long(&alloc_lst->size)){
  289. /* free all */
  290. for (mc = &alloc_lst->chunks; *mc; ){
  291. t = *mc;
  292. mem_chunk_free(t);
  293. freed += t->size;
  294. no++;
  295. *mc = t->next;
  296. shm_free(t);
  297. }
  298. alloc_lst->chunks=0;
  299. } else {
  300. /* free at least size bytes, trying smaller chunks first */
  301. for (mc = &alloc_lst->chunks; *mc && (freed < size);) {
  302. if ((*mc)->size <= (size - freed)) {
  303. t = *mc;
  304. mem_chunk_free(t);
  305. freed += t->size;
  306. no++;
  307. *mc = t->next;
  308. shm_free(t);
  309. continue;
  310. } else if (min_chunk == 0 || (*min_chunk)->size > (*mc)->size) {
  311. /* find minimum remaining chunk */
  312. min_chunk = mc;
  313. }
  314. mc = &(*mc)->next;
  315. }
  316. if (size > freed && min_chunk) {
  317. mc = min_chunk;
  318. t = *mc;
  319. mem_chunk_free(t);
  320. freed += t->size;
  321. no++;
  322. *mc = (*mc)->next;
  323. shm_free(t);
  324. }
  325. }
  326. lock_release(&alloc_lst->lock);
  327. atomic_add_long(&alloc_lst->size, -freed);
  328. atomic_add_int(&alloc_lst->no, -no);
  329. return freed;
  330. }
  331. /** realloc randomly size bytes.
  332. * Chooses randomly a previously allocated chunk and realloc's it.
  333. * @param size - size.
  334. * @param diff - filled with difference, >= 0 means more bytes were alloc.,
  335. * < 0 means bytes were freed.
  336. * @return >= 0 on success, -1 on error/ not found
  337. * (empty list is a valid error reason)
  338. */
  339. static int mem_rnd_realloc(unsigned long size, long* diff)
  340. {
  341. struct mem_chunk* t;
  342. int ret;
  343. int target, i;
  344. *diff = 0;
  345. ret = -1;
  346. lock_get(&alloc_lst->lock);
  347. target = fastrand_max(atomic_get_int(&alloc_lst->no));
  348. for (t = alloc_lst->chunks, i=0; t; t=t->next, i++ ){
  349. if (target == i) {
  350. *diff = (long)size - (long)t->size;
  351. if ((ret=_mem_chunk_realloc_unsafe(t, size)) < 0)
  352. *diff = 0;
  353. break;
  354. }
  355. }
  356. lock_release(&alloc_lst->lock);
  357. atomic_add_long(&alloc_lst->size, *diff);
  358. return ret;
  359. }
  360. #define MIN_ulong(a, b) \
  361. (unsigned long)((unsigned long)(a)<(unsigned long)(b)?(a):(b))
  362. /*
  363. * Randomly alloc. total_size bytes, in chunks of size between
  364. * min & max. max - min should be smaller then 4G.
  365. * @return < 0 if there were some alloc errors, 0 on success.
  366. */
  367. static int mem_rnd_leak(unsigned long min, unsigned long max,
  368. unsigned long total_size)
  369. {
  370. unsigned long size;
  371. unsigned long crt_size, crt_min;
  372. long diff;
  373. int err, p;
  374. size = total_size;
  375. err = 0;
  376. while(size){
  377. crt_min = MIN_ulong(min, size);
  378. crt_size = fastrand_max(MIN_ulong(max, size) - crt_min) + crt_min;
  379. p = cfg_get(malloc_test, mt_cfg, realloc_p);
  380. if (p && ((fastrand_max(99) +1) <= p)){
  381. if (mem_rnd_realloc(crt_size, &diff) == 0){
  382. size -= diff;
  383. continue;
  384. } /* else fallback to normal alloc. */
  385. }
  386. size -= crt_size;
  387. err += mem_leak(crt_size) < 0;
  388. }
  389. return -err;
  390. }
  391. /* test timer */
  392. static ticks_t tst_timer(ticks_t ticks, struct timer_ln* tl, void* data)
  393. {
  394. struct rnd_time_test* tst;
  395. ticks_t next_int;
  396. ticks_t max_int;
  397. unsigned long crt_size, crt_min, remaining;
  398. long diff;
  399. int p;
  400. tst = data;
  401. next_int = 0;
  402. max_int = 0;
  403. if (tst->total <= tst->crt) {
  404. mem_unleak(tst->crt);
  405. tst->crt = 0;
  406. tst->overfl++;
  407. }
  408. remaining = tst->total - tst->crt;
  409. crt_min = MIN_ulong(tst->min, remaining);
  410. crt_size = fastrand_max(MIN_ulong(tst->max, remaining) - crt_min) +
  411. crt_min;
  412. p = cfg_get(malloc_test, mt_cfg, realloc_p);
  413. if (p && ((fastrand_max(99) +1) <= p)) {
  414. if (mem_rnd_realloc(crt_size, &diff) == 0){
  415. tst->crt -= diff;
  416. tst->reallocs++;
  417. goto skip_alloc;
  418. }
  419. }
  420. if (mem_leak(crt_size) >= 0)
  421. tst->crt += crt_size;
  422. else
  423. tst->errs ++;
  424. skip_alloc:
  425. tst->calls++;
  426. if (TICKS_GT(tst->stop_time, ticks)) {
  427. next_int = fastrand_max(tst->max_intvrl - tst->min_intvrl) +
  428. tst->min_intvrl;
  429. max_int = tst->stop_time - ticks;
  430. } else {
  431. /* stop test */
  432. WARN("test %d time expired, stopping"
  433. " (%d s runtime, %ld calls, %d overfl, %d errors,"
  434. " crt %ld bytes)\n",
  435. tst->id, TICKS_TO_S(ticks - tst->start_time),
  436. tst->calls, tst->overfl, tst->errs, tst->crt);
  437. mem_unleak(tst->crt);
  438. /* tst->crt = 0 */;
  439. }
  440. /* 0 means stop stop, so if next_int == 0 => stop */
  441. return MIN_unsigned(next_int, max_int);
  442. }
  443. /*
  444. * start a malloc test of a test_time length:
  445. * - randomly between min_intvrl and max_intvrl, alloc.
  446. * a random number of bytes, between min & max.
  447. * - if total_size is reached, free everything.
  448. *
  449. * @returns test id (>=0) on success, -1 on error.
  450. */
  451. static int mem_leak_time_test(unsigned long min, unsigned long max,
  452. unsigned long total_size,
  453. ticks_t min_intvrl, ticks_t max_intvrl,
  454. ticks_t test_time)
  455. {
  456. struct rnd_time_test* tst;
  457. struct rnd_time_test* l;
  458. ticks_t first_int;
  459. int id;
  460. tst = shm_malloc(sizeof(*tst));
  461. if (tst == 0)
  462. goto error;
  463. memset(tst, 0, sizeof(*tst));
  464. id = tst->id = atomic_add_int(&rndt_lst->last_id, 1);
  465. tst->min = min;
  466. tst->max = max;
  467. tst-> total = total_size;
  468. tst->min_intvrl = min_intvrl;
  469. tst->max_intvrl = max_intvrl;
  470. tst->start_time = get_ticks_raw();
  471. tst->stop_time = get_ticks_raw() + test_time;
  472. first_int = fastrand_max(max_intvrl - min_intvrl) + min_intvrl;
  473. timer_init(&tst->timer, tst_timer, tst, 0);
  474. lock_get(&rndt_lst->lock);
  475. tst->next=rndt_lst->tests;
  476. rndt_lst->tests=tst;
  477. lock_release(&rndt_lst->lock);
  478. if (timer_add(&tst->timer, MIN_unsigned(first_int, test_time)) < 0 )
  479. goto error;
  480. return id;
  481. error:
  482. if (tst) {
  483. lock_get(&rndt_lst->lock);
  484. for (l=rndt_lst->tests; l; l=l->next)
  485. if (l->next == tst) {
  486. l->next = tst->next;
  487. break;
  488. }
  489. lock_release(&rndt_lst->lock);
  490. shm_free(tst);
  491. }
  492. return -1;
  493. }
  494. static int is_mem_test_stopped(struct rnd_time_test* tst)
  495. {
  496. return TICKS_LE(tst->stop_time, get_ticks_raw());
  497. }
  498. /** stops test tst.
  499. * @return 0 on success, -1 on error (test already stopped)
  500. */
  501. static int mem_test_stop_tst(struct rnd_time_test* tst)
  502. {
  503. if (!is_mem_test_stopped(tst)) {
  504. if (timer_del(&tst->timer) == 0) {
  505. tst->stop_time=get_ticks_raw();
  506. return 0;
  507. }
  508. }
  509. return -1;
  510. }
  511. /** stops test id.
  512. * @return 0 on success, -1 on error (not found).
  513. */
  514. static int mem_test_stop(int id)
  515. {
  516. struct rnd_time_test* tst;
  517. lock_get(&rndt_lst->lock);
  518. for (tst = rndt_lst->tests; tst; tst = tst->next)
  519. if (tst->id == id) {
  520. mem_test_stop_tst(tst);
  521. break;
  522. }
  523. lock_release(&rndt_lst->lock);
  524. return -(tst == 0);
  525. }
  526. static void mem_destroy_all_tests()
  527. {
  528. struct rnd_time_test* tst;
  529. struct rnd_time_test* nxt;
  530. lock_get(&rndt_lst->lock);
  531. for (tst = rndt_lst->tests; tst;) {
  532. nxt = tst->next;
  533. mem_test_stop_tst(tst);
  534. shm_free(tst);
  535. tst = nxt;
  536. }
  537. rndt_lst->tests = 0;
  538. lock_release(&rndt_lst->lock);
  539. }
  540. static int mem_test_destroy(int id)
  541. {
  542. struct rnd_time_test* tst;
  543. struct rnd_time_test** crt_lnk;
  544. lock_get(&rndt_lst->lock);
  545. for (tst = 0, crt_lnk = &rndt_lst->tests; *crt_lnk;
  546. crt_lnk = &(*crt_lnk)->next)
  547. if ((*crt_lnk)->id == id) {
  548. tst=*crt_lnk;
  549. mem_test_stop_tst(tst);
  550. *crt_lnk=tst->next;
  551. shm_free(tst);
  552. break;
  553. }
  554. lock_release(&rndt_lst->lock);
  555. return -(tst == 0);
  556. }
  557. /* script functions: */
  558. static int mt_mem_alloc_f(struct sip_msg* msg, char* sz, char* foo)
  559. {
  560. int size;
  561. if (sz == 0 || get_int_fparam(&size, msg, (fparam_t*)sz) < 0)
  562. return -1;
  563. return mem_leak(size)>=0?1:-1;
  564. }
  565. static int mt_mem_free_f(struct sip_msg* msg, char* sz, char* foo)
  566. {
  567. int size;
  568. unsigned long freed;
  569. size=-1;
  570. if (sz != 0 && get_int_fparam(&size, msg, (fparam_t*)sz) < 0)
  571. return -1;
  572. freed=mem_unleak(size);
  573. return (freed==0)?1:freed;
  574. }
  575. /* RPC exports: */
  576. /* helper functions, parses an optional b[ytes]|k|m|g to a numeric shift value
  577. (e.g. b -> 0, k -> 10, ...)
  578. returns bit shift value on success, -1 on error
  579. */
  580. static int rpc_get_size_mod(rpc_t* rpc, void* c)
  581. {
  582. char* m;
  583. if (rpc->scan(c, "*s", &m) > 0) {
  584. switch(*m) {
  585. case 'b':
  586. case 'B':
  587. return 0;
  588. case 'k':
  589. case 'K':
  590. return 10;
  591. case 'm':
  592. case 'M':
  593. return 20;
  594. case 'g':
  595. case 'G':
  596. return 30;
  597. default:
  598. rpc->fault(c, 500, "bad param use b|k|m|g");
  599. return -1;
  600. }
  601. }
  602. return 0;
  603. }
  604. static const char* rpc_mt_alloc_doc[2] = {
  605. "Allocates the specified number of bytes (debugging/test function)."
  606. "Use b|k|m|g to specify the desired size unit",
  607. 0
  608. };
  609. static void rpc_mt_alloc(rpc_t* rpc, void* c)
  610. {
  611. int size;
  612. int rs;
  613. if (rpc->scan(c, "d", &size) < 1) {
  614. return;
  615. }
  616. rs=rpc_get_size_mod(rpc, c);
  617. if (rs<0)
  618. /* fault already generated on rpc_get_size_mod() error */
  619. return;
  620. if (mem_leak((unsigned long)size << rs) < 0) {
  621. rpc->fault(c, 400, "memory allocation failed");
  622. }
  623. return;
  624. }
  625. static const char* rpc_mt_realloc_doc[2] = {
  626. "Reallocates the specified number of bytes from a pre-allocated"
  627. " randomly selected memory chunk. If no pre-allocated memory"
  628. " chunks exists, it will fail."
  629. " Make sure mt.mem_used is non 0 or call mt.mem_alloc prior to calling"
  630. " this function."
  631. " Returns the difference in bytes (<0 if bytes were freed, >0 if more"
  632. " bytes were allocated)."
  633. "Use b|k|m|g to specify the desired size unit",
  634. 0
  635. };
  636. static void rpc_mt_realloc(rpc_t* rpc, void* c)
  637. {
  638. int size;
  639. int rs;
  640. long diff;
  641. if (rpc->scan(c, "d", &size) < 1) {
  642. return;
  643. }
  644. rs=rpc_get_size_mod(rpc, c);
  645. if (rs<0)
  646. /* fault already generated on rpc_get_size_mod() error */
  647. return;
  648. if (mem_rnd_realloc((unsigned long)size << rs, &diff) < 0) {
  649. rpc->fault(c, 400, "memory allocation failed");
  650. }
  651. rpc->add(c, "d", diff >> rs);
  652. return;
  653. }
  654. static const char* rpc_mt_free_doc[2] = {
  655. "Frees the specified number of bytes, previously allocated by one of the"
  656. " other malloc_test functions (e.g. mt.mem_alloc or the script "
  657. "mt_mem_alloc). Use b|k|m|g to specify the desired size unit."
  658. "Returns the number of bytes freed (can be higher or"
  659. " smaller then the requested size)",
  660. 0
  661. };
  662. static void rpc_mt_free(rpc_t* rpc, void* c)
  663. {
  664. int size;
  665. int rs;
  666. size = -1;
  667. rs = 0;
  668. if (rpc->scan(c, "*d", &size) > 0) {
  669. /* found size, look if a size modifier is present */
  670. rs=rpc_get_size_mod(rpc, c);
  671. if (rs<0)
  672. /* fault already generated on rpc_get_size_mod() error */
  673. return;
  674. }
  675. rpc->add(c, "d", (int)(mem_unleak((unsigned long)size << rs) >> rs));
  676. return;
  677. }
  678. static const char* rpc_mt_used_doc[2] = {
  679. "Returns how many memory chunks and how many bytes are currently"
  680. " allocated via the mem_alloc module functions."
  681. " Use b|k|m|g to specify the desired size unit.",
  682. 0
  683. };
  684. static void rpc_mt_used(rpc_t* rpc, void* c)
  685. {
  686. int rs;
  687. rs = 0;
  688. rs=rpc_get_size_mod(rpc, c);
  689. if (rs<0)
  690. /* fault already generated on rpc_get_size_mod() error */
  691. return;
  692. rpc->add(c, "d", atomic_get_int(&alloc_lst->no));
  693. rpc->add(c, "d", (int)(atomic_get_long(&alloc_lst->size) >> rs));
  694. return;
  695. }
  696. static const char* rpc_mt_rnd_alloc_doc[2] = {
  697. "Takes 4 parameters: min, max, total_size and an optional unit (b|k|m|g)."
  698. " It will allocate total_size memory, in pieces of random size between"
  699. "min .. max (inclusive).",
  700. 0
  701. };
  702. static void rpc_mt_rnd_alloc(rpc_t* rpc, void* c)
  703. {
  704. int min, max, total_size;
  705. int rs;
  706. int err;
  707. if (rpc->scan(c, "ddd", &min, &max, &total_size) < 3) {
  708. return;
  709. }
  710. rs=rpc_get_size_mod(rpc, c);
  711. if (rs<0)
  712. /* fault already generated on rpc_get_size_mod() error */
  713. return;
  714. if (min > max || min < 0 || max > total_size) {
  715. rpc->fault(c, 400, "invalid parameter values");
  716. return;
  717. }
  718. if ((err=mem_rnd_leak((unsigned long)min << rs,
  719. (unsigned long)max << rs,
  720. (unsigned long)total_size <<rs )) < 0) {
  721. rpc->fault(c, 400, "memory allocation failed (%d errors)", -err);
  722. }
  723. return;
  724. }
  725. static const char* rpc_mt_test_start_doc[2] = {
  726. "Takes 7 parameters: min, max, total_size, min_interval, max_interval, "
  727. "test_time and an optional size unit (b|k|m|g). All the time units are ms."
  728. " It will run a memory allocation test for test_time ms. At a random"
  729. " interval between min_interval and max_interval ms. it will allocate a"
  730. " memory chunk with random size, between min and max. Each time total_size"
  731. " is reached, it will free all the memory allocated and start again."
  732. "Returns the test id (integer)",
  733. 0
  734. };
  735. static void rpc_mt_test_start(rpc_t* rpc, void* c)
  736. {
  737. int min, max, total_size;
  738. int min_intvrl, max_intvrl, total_time;
  739. int rs;
  740. int id;
  741. if (rpc->scan(c, "dddddd", &min, &max, &total_size,
  742. &min_intvrl, &max_intvrl, &total_time) < 6) {
  743. return;
  744. }
  745. rs=rpc_get_size_mod(rpc, c);
  746. if (rs<0)
  747. /* fault already generated on rpc_get_size_mod() error */
  748. return;
  749. if (min > max || min < 0 || max > total_size) {
  750. rpc->fault(c, 400, "invalid size parameters values");
  751. return;
  752. }
  753. if (min_intvrl > max_intvrl || min_intvrl <= 0 || max_intvrl > total_time){
  754. rpc->fault(c, 400, "invalid time intervals values");
  755. return;
  756. }
  757. if ((id=mem_leak_time_test((unsigned long)min << rs,
  758. (unsigned long)max << rs,
  759. (unsigned long)total_size <<rs,
  760. MS_TO_TICKS(min_intvrl),
  761. MS_TO_TICKS(max_intvrl),
  762. MS_TO_TICKS(total_time)
  763. )) < 0) {
  764. rpc->fault(c, 400, "memory allocation failed");
  765. } else {
  766. rpc->add(c, "d", id);
  767. }
  768. return;
  769. }
  770. static const char* rpc_mt_test_stop_doc[2] = {
  771. "Takes 1 parameter: the test id. It will stop the corresponding test."
  772. "Note: the test is stopped, but not destroyed." ,
  773. 0
  774. };
  775. static void rpc_mt_test_stop(rpc_t* rpc, void* c)
  776. {
  777. int id;
  778. if (rpc->scan(c, "d", &id) < 1) {
  779. return;
  780. }
  781. if (mem_test_stop(id)<0) {
  782. rpc->fault(c, 400, "test %d not found", id);
  783. }
  784. return;
  785. }
  786. static const char* rpc_mt_test_destroy_doc[2] = {
  787. "Takes 1 parameter: the test id. It will destroy the corresponding test.",
  788. 0
  789. };
  790. static void rpc_mt_test_destroy(rpc_t* rpc, void* c)
  791. {
  792. int id;
  793. if (rpc->scan(c, "*d", &id) > 0 && id!=-1) {
  794. if (mem_test_destroy(id) < 0 )
  795. rpc->fault(c, 400, "test %d not found", id);
  796. } else {
  797. mem_destroy_all_tests();
  798. }
  799. return;
  800. }
  801. static const char* rpc_mt_test_destroy_all_doc[2] = {
  802. "It will destroy all the tests (running or stopped).",
  803. 0
  804. };
  805. static void rpc_mt_test_destroy_all(rpc_t* rpc, void* c)
  806. {
  807. mem_destroy_all_tests();
  808. return;
  809. }
  810. static const char* rpc_mt_test_list_doc[2] = {
  811. "If a test id parameter is provided it will list the corresponding test,"
  812. " else it will list all of them. Use b |k | m | g as a second parameter"
  813. " for the size units (default bytes)",
  814. 0
  815. };
  816. static void rpc_mt_test_list(rpc_t* rpc, void* c)
  817. {
  818. int id, rs;
  819. struct rnd_time_test* tst;
  820. void *h;
  821. rs = 0;
  822. if (rpc->scan(c, "*d", &id) < 1) {
  823. id = -1;
  824. } else {
  825. rs=rpc_get_size_mod(rpc, c);
  826. if (rs < 0)
  827. return;
  828. }
  829. lock_get(&rndt_lst->lock);
  830. for (tst = rndt_lst->tests; tst; tst=tst->next)
  831. if (tst->id == id || id == -1) {
  832. rpc->add(c, "{", &h);
  833. rpc->struct_add(h, "ddddddddddd",
  834. "ID ", tst->id,
  835. "run time (s) ", (int)TICKS_TO_S((
  836. TICKS_LE(tst->stop_time,
  837. get_ticks_raw()) ?
  838. tst->stop_time : get_ticks_raw()) -
  839. tst->start_time),
  840. "remaining (s)", TICKS_LE(tst->stop_time,
  841. get_ticks_raw()) ? 0 :
  842. (int)TICKS_TO_S(tst->stop_time -
  843. get_ticks_raw()),
  844. "total calls ", (int)tst->calls,
  845. "reallocs ", (int)tst->reallocs,
  846. "errors ", (int)tst->errs,
  847. "overflows ", (int)tst->overfl,
  848. "total alloc ", (int)((tst->crt +
  849. tst->overfl * tst->total)>>rs),
  850. "min ", (int)(tst->min>>rs),
  851. "max ", (int)(tst->max>>rs),
  852. "total ", (int)(tst->total>>rs) );
  853. if (id != -1) break;
  854. }
  855. lock_release(&rndt_lst->lock);
  856. return;
  857. }
  858. static rpc_export_t mt_rpc[] = {
  859. {"mt.mem_alloc", rpc_mt_alloc, rpc_mt_alloc_doc, 0},
  860. {"mt.mem_free", rpc_mt_free, rpc_mt_free_doc, 0},
  861. {"mt.mem_realloc", rpc_mt_realloc, rpc_mt_realloc_doc, 0},
  862. {"mt.mem_used", rpc_mt_used, rpc_mt_used_doc, 0},
  863. {"mt.mem_rnd_alloc", rpc_mt_rnd_alloc, rpc_mt_rnd_alloc_doc, 0},
  864. {"mt.mem_test_start", rpc_mt_test_start, rpc_mt_test_start_doc, 0},
  865. {"mt.mem_test_stop", rpc_mt_test_stop, rpc_mt_test_stop_doc, 0},
  866. {"mt.mem_test_destroy", rpc_mt_test_destroy, rpc_mt_test_destroy_doc, 0},
  867. {"mt.mem_test_destroy_all", rpc_mt_test_destroy_all,
  868. rpc_mt_test_destroy_all_doc, 0},
  869. {"mt.mem_test_list", rpc_mt_test_list, rpc_mt_test_list_doc, 0},
  870. {0, 0, 0, 0}
  871. };