counters.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * Copyright (C) 2010 iptelorg GmbH
  3. *
  4. * Permission to use, copy, modify, and distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /**
  17. * @brief Kamailio core :: counters/stats
  18. * @file
  19. * @ingroup: core
  20. */
  21. #include "counters.h"
  22. #include "str_hash.h"
  23. #include "str.h"
  24. #include "compiler_opt.h"
  25. #include "mem/mem.h"
  26. #include "mem/shm_mem.h"
  27. #define CNT_HASH_SIZE 64
  28. /* group hash size (rpc use) */
  29. #define GRP_HASH_SIZE 16
  30. /* initial sorted groups array size (rpc use) */
  31. #define GRP_SORTED_SIZE 16
  32. /* intial counter id 2 record array size */
  33. #define CNT_ID2RECORD_SIZE 64
  34. #define CACHELINE_PAD 128
  35. /* leave space for one flag */
  36. #define MAX_COUNTER_ID 32767
  37. /* size (number of entries) of the temporary array used for keeping stats
  38. pre-prefork init. Note: if more counters are registered then this size,
  39. the array will be dynamically increased (doubled each time). The value
  40. here is meant only to optimize startup/memory fragmentation. */
  41. #define PREINIT_CNTS_VALS_SIZE 128
  42. struct counter_record {
  43. str group;
  44. str name;
  45. counter_handle_t h;
  46. unsigned short flags;
  47. void* cbk_param;
  48. counter_cbk_f cbk;
  49. struct counter_record* grp_next; /* next in group */
  50. str doc;
  51. };
  52. struct grp_record {
  53. str group;
  54. struct counter_record* first;
  55. };
  56. /** hash table mapping a counter name to an id */
  57. static struct str_hash_table cnts_hash_table;
  58. /** array maping id 2 record */
  59. struct counter_record** cnt_id2record;
  60. static int cnt_id2record_size;
  61. /** hash table for groups (maps a group name to a counter list) */
  62. static struct str_hash_table grp_hash_table;
  63. /** array of groups, sorted */
  64. static struct grp_record** grp_sorted;
  65. static int grp_sorted_max_size;
  66. static int grp_sorted_crt_size;
  67. static int grp_no; /* number of groups */
  68. /** counters array. a[proc_no][counter_id] =>
  69. _cnst_vals[proc_no*cnts_no+counter_id] */
  70. counter_array_t* _cnts_vals = 0;
  71. int _cnts_row_len; /* number of elements per row */
  72. static int cnts_no; /* number of registered counters */
  73. static int cnts_max_rows; /* set to 0 if not yet fully init */
  74. int counters_initialized(void)
  75. {
  76. if (unlikely(_cnts_vals == 0)) {
  77. /* not init yet */
  78. return 0;
  79. }
  80. return 1;
  81. }
  82. /** init the coutner hash table(s).
  83. * @return 0 on success, -1 on error.
  84. */
  85. int init_counters()
  86. {
  87. if (str_hash_alloc(&cnts_hash_table, CNT_HASH_SIZE) < 0)
  88. goto error;
  89. str_hash_init(&cnts_hash_table);
  90. if (str_hash_alloc(&grp_hash_table, GRP_HASH_SIZE) < 0)
  91. goto error;
  92. str_hash_init(&grp_hash_table);
  93. cnts_no = 1; /* start at 1 (0 used only for invalid counters) */
  94. cnts_max_rows = 0; /* 0 initially, !=0 after full init
  95. (counters_prefork_init()) */
  96. grp_no = 0;
  97. cnt_id2record_size = CNT_ID2RECORD_SIZE;
  98. cnt_id2record = pkg_malloc(sizeof(*cnt_id2record) * cnt_id2record_size);
  99. if (cnt_id2record == 0)
  100. goto error;
  101. memset(cnt_id2record, 0, sizeof(*cnt_id2record) * cnt_id2record_size);
  102. grp_sorted_max_size = GRP_SORTED_SIZE;
  103. grp_sorted_crt_size = 0;
  104. grp_sorted = pkg_malloc(sizeof(*grp_sorted) * grp_sorted_max_size);
  105. if (grp_sorted == 0)
  106. goto error;
  107. memset(grp_sorted, 0, sizeof(*grp_sorted) * grp_sorted_max_size);
  108. return 0;
  109. error:
  110. destroy_counters();
  111. return -1;
  112. }
  113. void destroy_counters()
  114. {
  115. int r;
  116. struct str_hash_entry* e;
  117. struct str_hash_entry* bak;
  118. if (_cnts_vals) {
  119. if (cnts_max_rows)
  120. /* fully init => it is in shm */
  121. shm_free(_cnts_vals);
  122. else
  123. /* partially init (before prefork) => pkg */
  124. pkg_free(_cnts_vals);
  125. _cnts_vals = 0;
  126. }
  127. if (cnts_hash_table.table) {
  128. for (r=0; r< cnts_hash_table.size; r++) {
  129. clist_foreach_safe(&cnts_hash_table.table[r], e, bak, next) {
  130. pkg_free(e);
  131. }
  132. }
  133. pkg_free(cnts_hash_table.table);
  134. }
  135. if (grp_hash_table.table) {
  136. for (r=0; r< grp_hash_table.size; r++) {
  137. clist_foreach_safe(&grp_hash_table.table[r], e, bak, next) {
  138. pkg_free(e);
  139. }
  140. }
  141. pkg_free(grp_hash_table.table);
  142. }
  143. if (cnt_id2record)
  144. pkg_free(cnt_id2record);
  145. if (grp_sorted)
  146. pkg_free(grp_sorted);
  147. cnts_hash_table.table = 0;
  148. cnts_hash_table.size = 0;
  149. cnt_id2record = 0;
  150. grp_sorted = 0;
  151. grp_hash_table.table = 0;
  152. grp_hash_table.size = 0;
  153. grp_sorted_crt_size = 0;
  154. grp_sorted_max_size = 0;
  155. cnts_no = 0;
  156. _cnts_row_len = 0;
  157. cnts_max_rows = 0;
  158. grp_no = 0;
  159. }
  160. /** complete counter intialization, when the number of processes is known.
  161. * shm must be available.
  162. * @return 0 on success, < 0 on error
  163. */
  164. int counters_prefork_init(int max_process_no)
  165. {
  166. counter_array_t* old;
  167. int size, row_size;
  168. counter_handle_t h;
  169. /* round cnts_no so that cnts_no * sizeof(counter) it's a CACHELINE_PAD
  170. multiple */
  171. /* round-up row_size to a CACHELINE_PAD multiple if needed */
  172. row_size = ((sizeof(*_cnts_vals) * cnts_no - 1) / CACHELINE_PAD + 1) *
  173. CACHELINE_PAD;
  174. /* round-up the resulted row_siue to a sizeof(*_cnts_vals) multiple */
  175. row_size = ((row_size -1) / sizeof(*_cnts_vals) + 1) *
  176. sizeof(*_cnts_vals);
  177. /* get updated cnts_no (row length) */
  178. _cnts_row_len = row_size / sizeof(*_cnts_vals);
  179. size = max_process_no * row_size;
  180. /* replace the temporary pre-fork pkg array (with only 1 row) with
  181. the final shm version (with max_process_no rows) */
  182. old = _cnts_vals;
  183. _cnts_vals = shm_malloc(size);
  184. if (_cnts_vals == 0)
  185. return -1;
  186. memset(_cnts_vals, 0, size);
  187. cnts_max_rows = max_process_no;
  188. /* copy prefork values into the newly shm array */
  189. if (old) {
  190. for (h.id = 0; h.id < cnts_no; h.id++)
  191. counter_pprocess_val(process_no, h) = old[h.id].v;
  192. pkg_free(old);
  193. }
  194. return 0;
  195. }
  196. /** adds new group to the group hash table (no checks, internal version).
  197. * @return pointer to new group record on success, 0 on error.
  198. */
  199. static struct grp_record* grp_hash_add(str* group)
  200. {
  201. struct str_hash_entry* g;
  202. struct grp_record* grp_rec;
  203. struct grp_record** r;
  204. /* grp_rec copied at &g->u.data */
  205. g = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(g->u.data) +
  206. sizeof(*grp_rec) + group->len + 1);
  207. if (g == 0)
  208. goto error;
  209. grp_rec = (struct grp_record*)&g->u.data[0];
  210. grp_rec->group.s = (char*)(grp_rec + 1);
  211. grp_rec->group.len = group->len;
  212. grp_rec->first = 0;
  213. memcpy(grp_rec->group.s, group->s, group->len + 1);
  214. g->key = grp_rec->group;
  215. g->flags = 0;
  216. /* insert group into the sorted group array */
  217. if (grp_sorted_max_size <= grp_sorted_crt_size) {
  218. /* must increase the array */
  219. r = pkg_realloc(grp_sorted, 2 * grp_sorted_max_size *
  220. sizeof(*grp_sorted));
  221. if (r == 0)
  222. goto error;
  223. grp_sorted= r;
  224. grp_sorted_max_size *= 2;
  225. memset(&grp_sorted[grp_sorted_crt_size], 0,
  226. (grp_sorted_max_size - grp_sorted_crt_size) *
  227. sizeof(*grp_sorted));
  228. }
  229. for (r = grp_sorted; r < (grp_sorted + grp_sorted_crt_size); r++)
  230. if (strcmp(grp_rec->group.s, (*r)->group.s) < 0)
  231. break;
  232. if (r != (grp_sorted + grp_sorted_crt_size))
  233. memmove(r+1, r, (int)(long)((char*)(grp_sorted + grp_sorted_crt_size) -
  234. (char*)r));
  235. grp_sorted_crt_size++;
  236. *r = grp_rec;
  237. /* insert into the hash only on success */
  238. str_hash_add(&grp_hash_table, g);
  239. return grp_rec;
  240. error:
  241. if (g)
  242. pkg_free(g);
  243. return 0;
  244. }
  245. /** lookup a group into the group hash (internal version).
  246. * @return pointer to grp_record on success, 0 on failure (not found).
  247. */
  248. static struct grp_record* grp_hash_lookup(str* group)
  249. {
  250. struct str_hash_entry* e;
  251. e = str_hash_get(&grp_hash_table, group->s, group->len);
  252. return e?(struct grp_record*)&e->u.data[0]:0;
  253. }
  254. /** lookup a group and if not found create a new group record.
  255. * @return pointer to grp_record on succes, 0 on failure ( not found and
  256. * failed to create new group record).
  257. */
  258. static struct grp_record* grp_hash_get_create(str* group)
  259. {
  260. struct grp_record* ret;
  261. ret = grp_hash_lookup(group);
  262. if (ret)
  263. return ret;
  264. return grp_hash_add(group);
  265. }
  266. /** adds new counter to the hash table (no checks, internal version).
  267. * @return pointer to new record on success, 0 on error.
  268. */
  269. static struct counter_record* cnt_hash_add(
  270. str* group, str* name,
  271. int flags, counter_cbk_f cbk,
  272. void* param, const char* doc)
  273. {
  274. struct str_hash_entry* e;
  275. struct counter_record* cnt_rec;
  276. struct grp_record* grp_rec;
  277. struct counter_record** p;
  278. counter_array_t* v;
  279. int doc_len;
  280. int n;
  281. e = 0;
  282. if (cnts_no >= MAX_COUNTER_ID)
  283. /* too many counters */
  284. goto error;
  285. grp_rec = grp_hash_get_create(group);
  286. if (grp_rec == 0)
  287. /* non existing group an no new one could be created */
  288. goto error;
  289. doc_len = doc?strlen(doc):0;
  290. /* cnt_rec copied at &e->u.data[0] */
  291. e = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(e->u.data) +
  292. sizeof(*cnt_rec) + name->len + 1 + group->len + 1 +
  293. doc_len + 1);
  294. if (e == 0)
  295. goto error;
  296. cnt_rec = (struct counter_record*)&e->u.data[0];
  297. cnt_rec->group.s = (char*)(cnt_rec + 1);
  298. cnt_rec->group.len = group->len;
  299. cnt_rec->name.s = cnt_rec->group.s + group->len + 1;
  300. cnt_rec->name.len = name->len;
  301. cnt_rec->doc.s = cnt_rec->name.s + name->len +1;
  302. cnt_rec->doc.len = doc_len;
  303. cnt_rec->h.id = cnts_no++;
  304. cnt_rec->flags = flags;
  305. cnt_rec->cbk_param = param;
  306. cnt_rec->cbk = cbk;
  307. cnt_rec->grp_next = 0;
  308. memcpy(cnt_rec->group.s, group->s, group->len + 1);
  309. memcpy(cnt_rec->name.s, name->s, name->len + 1);
  310. if (doc)
  311. memcpy(cnt_rec->doc.s, doc, doc_len + 1);
  312. else
  313. cnt_rec->doc.s[0] = 0;
  314. e->key = cnt_rec->name;
  315. e->flags = 0;
  316. /* check to see if it fits in the prefork tmp. vals array.
  317. This array contains only one "row", is allocated in pkg and
  318. is used only until counters_prefork_init() (after that the
  319. array is replaced with a shm version with all the needed rows).
  320. */
  321. if (cnt_rec->h.id >= _cnts_row_len || _cnts_vals == 0) {
  322. /* array to small or not yet allocated => reallocate/allocate it
  323. (min size PREINIT_CNTS_VALS_SIZE, max MAX_COUNTER_ID)
  324. */
  325. n = (cnt_rec->h.id < PREINIT_CNTS_VALS_SIZE) ?
  326. PREINIT_CNTS_VALS_SIZE :
  327. ((2 * (cnt_rec->h.id + (cnt_rec->h.id == 0)) < MAX_COUNTER_ID)?
  328. (2 * (cnt_rec->h.id + (cnt_rec->h.id == 0))) :
  329. MAX_COUNTER_ID + 1);
  330. v = pkg_realloc(_cnts_vals, n * sizeof(*_cnts_vals));
  331. if (v == 0)
  332. /* realloc/malloc error */
  333. goto error;
  334. _cnts_vals = v;
  335. /* zero newly allocated memory */
  336. memset(&_cnts_vals[_cnts_row_len], 0,
  337. (n - _cnts_row_len) * sizeof(*_cnts_vals));
  338. _cnts_row_len = n; /* record new length */
  339. }
  340. /* add a pointer to it in the records array */
  341. if (cnt_id2record_size <= cnt_rec->h.id) {
  342. /* must increase the array */
  343. p = pkg_realloc(cnt_id2record,
  344. 2 * cnt_id2record_size * sizeof(*cnt_id2record));
  345. if (p == 0)
  346. goto error;
  347. cnt_id2record = p;
  348. cnt_id2record_size *= 2;
  349. memset(&cnt_id2record[cnt_rec->h.id], 0,
  350. (cnt_id2record_size - cnt_rec->h.id) * sizeof(*cnt_id2record));
  351. }
  352. cnt_id2record[cnt_rec->h.id] = cnt_rec;
  353. /* add into the hash */
  354. str_hash_add(&cnts_hash_table, e);
  355. /* insert it sorted in the per group list */
  356. for (p = &grp_rec->first; *p; p = &((*p)->grp_next))
  357. if (strcmp(cnt_rec->name.s, (*p)->name.s) < 0)
  358. break;
  359. cnt_rec->grp_next = *p;
  360. *p = cnt_rec;
  361. return cnt_rec;
  362. error:
  363. if (e)
  364. pkg_free(e);
  365. return 0;
  366. }
  367. /** lookup a (group, name) pair into the cnts hash (internal version).
  368. * @param group - counter group name. If "" the first matching counter with
  369. * the given name will be returned (k compat).
  370. * @param name
  371. * @return pointer to counter_record on success, 0 on failure (not found).
  372. */
  373. static struct counter_record* cnt_hash_lookup(str* group, str* name)
  374. {
  375. struct str_hash_entry* e;
  376. struct str_hash_entry* first;
  377. struct counter_record* cnt_rec;
  378. e = str_hash_get(&cnts_hash_table, name->s, name->len);
  379. /* fast path */
  380. if (likely(e)) {
  381. cnt_rec = (struct counter_record*)&e->u.data[0];
  382. if (likely( group->len == 0 ||
  383. (cnt_rec->group.len == group->len &&
  384. memcmp(cnt_rec->group.s, group->s, group->len) == 0)))
  385. return cnt_rec;
  386. } else
  387. return 0;
  388. /* search between records with same name, but different groups */
  389. first = e;
  390. do {
  391. cnt_rec = (struct counter_record*)&e->u.data[0];
  392. if (cnt_rec->group.len == group->len &&
  393. cnt_rec->name.len == name->len &&
  394. memcmp(cnt_rec->group.s, group->s, group->len) == 0 &&
  395. memcmp(cnt_rec->name.s, name->s, name->len) == 0)
  396. /* found */
  397. return cnt_rec;
  398. e = e->next;
  399. } while(e != first);
  400. return 0;
  401. }
  402. /** lookup a counter and if not found create a new counter record.
  403. * @return pointer to counter_record on succes, 0 on failure ( not found and
  404. * failed to create new group record).
  405. */
  406. static struct counter_record* cnt_hash_get_create(
  407. str* group, str* name,
  408. int flags,
  409. counter_cbk_f cbk,
  410. void* param, const char* doc)
  411. {
  412. struct counter_record* ret;
  413. ret = cnt_hash_lookup(group, name);
  414. if (ret)
  415. return ret;
  416. return cnt_hash_add(group, name, flags, cbk, param, doc);
  417. }
  418. /** register a new counter.
  419. * Can be called only before forking (e.g. from mod_init() or
  420. * init_child(PROC_INIT)).
  421. * @param handle - result parameter, it will be filled with the counter
  422. * handle on success (can be null if not needed).
  423. * @param group - group name
  424. * @param name - counter name (group.name must be unique).
  425. * @param flags - counter flags: one of CNT_F_*.
  426. * @param cbk - read callback function (if set it will be called each time
  427. * someone will call counter_get()).
  428. * @param cbk_param - callback param.
  429. * @param doc - description/documentation string.
  430. * @param reg_flags - register flags: 1 - don't fail if counter already
  431. * registered (act like counter_lookup(handle, group, name).
  432. * @return 0 on succes, < 0 on error (-1 not init or malloc error, -2 already
  433. * registered (and register_flags & 1 == 0).
  434. */
  435. int counter_register( counter_handle_t* handle, const char* group,
  436. const char* name, int flags,
  437. counter_cbk_f cbk, void* cbk_param,
  438. const char* doc,
  439. int reg_flags)
  440. {
  441. str grp;
  442. str n;
  443. struct counter_record* cnt_rec;
  444. if (unlikely(cnts_max_rows)) {
  445. /* too late */
  446. BUG("late attempt to register counter: %s.%s\n", group, name);
  447. goto error;
  448. }
  449. n.s = (char*)name;
  450. n.len = strlen(name);
  451. if (unlikely(group == 0 || *group == 0)) {
  452. BUG("attempt to register counter %s without a group\n", name);
  453. goto error;
  454. }
  455. grp.s = (char*)group;
  456. grp.len = strlen(group);
  457. cnt_rec = cnt_hash_lookup(&grp, &n);
  458. if (cnt_rec) {
  459. if (reg_flags & 1)
  460. goto found;
  461. else {
  462. if (handle) handle->id = 0;
  463. return -2;
  464. }
  465. } else
  466. cnt_rec = cnt_hash_get_create(&grp, &n, flags, cbk, cbk_param, doc);
  467. if (unlikely(cnt_rec == 0))
  468. goto error;
  469. found:
  470. if (handle) *handle = cnt_rec->h;
  471. return 0;
  472. error:
  473. if (handle) handle->id = 0;
  474. return -1;
  475. }
  476. /** fill in the handle of an existing counter (str parameters).
  477. * @param handle - filled with the corresp. handle on success.
  478. * @param group - counter group name. If "" the first matching
  479. * counter with the given name will be returned
  480. * (k compat).
  481. * @param name - counter name.
  482. * @return 0 on success, < 0 on error
  483. */
  484. int counter_lookup_str(counter_handle_t* handle, str* group, str* name)
  485. {
  486. struct counter_record* cnt_rec;
  487. cnt_rec = cnt_hash_lookup(group, name);
  488. if (likely(cnt_rec)) {
  489. *handle = cnt_rec->h;
  490. return 0;
  491. }
  492. handle->id = 0;
  493. return -1;
  494. }
  495. /** fill in the handle of an existing counter (asciiz parameters).
  496. * @param handle - filled with the corresp. handle on success.
  497. * @param group - counter group name. If 0 or "" the first matching
  498. * counter with the given name will be returned
  499. * (k compat).
  500. * @param name - counter name.
  501. * @return 0 on success, < 0 on error
  502. */
  503. int counter_lookup(counter_handle_t* handle,
  504. const char* group, const char* name)
  505. {
  506. str grp;
  507. str n;
  508. n.s = (char*)name;
  509. n.len = strlen(name);
  510. grp.s = (char*)group;
  511. grp.len = group?strlen(group):0;
  512. return counter_lookup_str(handle, &grp, &n);
  513. }
  514. /** register all the counters declared in a null-terminated array.
  515. * @param group - counters group.
  516. * @param defs - null terminated array containing counters definitions.
  517. * @return 0 on success, < 0 on error ( - (counter_number+1))
  518. */
  519. int counter_register_array(const char* group, counter_def_t* defs)
  520. {
  521. int r;
  522. for (r=0; defs[r].name; r++)
  523. if (counter_register( defs[r].handle,
  524. group, defs[r].name, defs[r].flags,
  525. defs[r].get_cbk, defs[r].get_cbk_param,
  526. defs[r].descr, 0) <0)
  527. return -(r+1); /* return - (idx of bad counter + 1) */
  528. return 0;
  529. }
  530. /** get the value of the counter, bypassing callbacks.
  531. * @param handle - counter handle obtained using counter_lookup() or
  532. * counter_register().
  533. * @return counter value.
  534. */
  535. counter_val_t counter_get_raw_val(counter_handle_t handle)
  536. {
  537. int r;
  538. counter_val_t ret;
  539. if (unlikely(_cnts_vals == 0)) {
  540. /* not init yet */
  541. BUG("counters not fully initialized yet\n");
  542. return 0;
  543. }
  544. if (unlikely(handle.id >= cnts_no || (short)handle.id < 0)) {
  545. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  546. return 0;
  547. }
  548. ret = 0;
  549. for (r = 0; r < cnts_max_rows; r++)
  550. ret += counter_pprocess_val(r, handle);
  551. return ret;
  552. }
  553. /** get the value of the counter, using the callbacks (if defined).
  554. * @param handle - counter handle obtained using counter_lookup() or
  555. * counter_register().
  556. * @return counter value. */
  557. counter_val_t counter_get_val(counter_handle_t handle)
  558. {
  559. struct counter_record* cnt_rec;
  560. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  561. /* not init yet */
  562. BUG("counters not fully initialized yet\n");
  563. return 0;
  564. }
  565. cnt_rec = cnt_id2record[handle.id];
  566. if (unlikely(cnt_rec->cbk))
  567. return cnt_rec->cbk(handle, cnt_rec->cbk_param);
  568. return counter_get_raw_val(handle);
  569. }
  570. /** reset the counter.
  571. * Reset a counter, unless it has the CNT_F_NO_RESET flag set.
  572. * @param handle - counter handle obtained using counter_lookup() or
  573. * counter_register().
  574. * Note: it's racy.
  575. */
  576. void counter_reset(counter_handle_t handle)
  577. {
  578. int r;
  579. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  580. /* not init yet */
  581. BUG("counters not fully initialized yet\n");
  582. return;
  583. }
  584. if (unlikely(handle.id >= cnts_no)) {
  585. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  586. return;
  587. }
  588. if (unlikely(cnt_id2record[handle.id]->flags & CNT_F_NO_RESET))
  589. return;
  590. for (r=0; r < cnts_max_rows; r++)
  591. counter_pprocess_val(r, handle) = 0;
  592. return;
  593. }
  594. /** return the name for counter handle.
  595. * @param handle - counter handle obtained using counter_lookup() or
  596. * counter_register().
  597. * @return asciiz pointer on success, 0 on error.
  598. */
  599. char* counter_get_name(counter_handle_t handle)
  600. {
  601. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  602. /* not init yet */
  603. BUG("counters not fully initialized yet\n");
  604. goto error;
  605. }
  606. if (unlikely(handle.id >= cnts_no)) {
  607. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  608. goto error;
  609. }
  610. return cnt_id2record[handle.id]->name.s;
  611. error:
  612. return 0;
  613. }
  614. /** return the group name for counter handle.
  615. * @param handle - counter handle obtained using counter_lookup() or
  616. * counter_register().
  617. * @return asciiz pointer on success, 0 on error.
  618. */
  619. char* counter_get_group(counter_handle_t handle)
  620. {
  621. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  622. /* not init yet */
  623. BUG("counters not fully initialized yet\n");
  624. goto error;
  625. }
  626. if (unlikely(handle.id >= cnts_no)) {
  627. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  628. goto error;
  629. }
  630. return cnt_id2record[handle.id]->group.s;
  631. error:
  632. return 0;
  633. }
  634. /** return the description (doc) string for a given counter.
  635. * @param handle - counter handle obtained using counter_lookup() or
  636. * counter_register().
  637. * @return asciiz pointer on success, 0 on error.
  638. */
  639. char* counter_get_doc(counter_handle_t handle)
  640. {
  641. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  642. /* not init yet */
  643. BUG("counters not fully initialized yet\n");
  644. goto error;
  645. }
  646. if (unlikely(handle.id >= cnts_no)) {
  647. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  648. goto error;
  649. }
  650. return cnt_id2record[handle.id]->doc.s;
  651. error:
  652. return 0;
  653. }
  654. /** iterate on all the counter group names.
  655. * @param cbk - pointer to a callback function that will be called for each
  656. * group name.
  657. * @param p - parameter that will be passed to the callback function
  658. * (along the group name).
  659. */
  660. void counter_iterate_grp_names(void (*cbk)(void* p, str* grp_name), void* p)
  661. {
  662. int r;
  663. for (r=0; r < grp_sorted_crt_size; r++)
  664. cbk(p, &grp_sorted[r]->group);
  665. }
  666. /** iterate on all the variable names in a specified group.
  667. * @param group - group name.
  668. * @param cbk - pointer to a callback function that will be called for each
  669. * variable name.
  670. * @param p - parameter that will be passed to the callback function
  671. * (along the variable name).
  672. */
  673. void counter_iterate_grp_var_names( const char* group,
  674. void (*cbk)(void* p, str* var_name),
  675. void* p)
  676. {
  677. struct counter_record* r;
  678. struct grp_record* g;
  679. str grp;
  680. grp.s = (char*)group;
  681. grp.len = strlen(group);
  682. g = grp_hash_lookup(&grp);
  683. if (g)
  684. for (r = g->first; r; r = r->grp_next)
  685. cbk(p, &r->name);
  686. }
  687. /** iterate on all the variable names and handles in a specified group.
  688. * @param group - group name.
  689. * @param cbk - pointer to a callback function that will be called for each
  690. * [variable name, variable handle] pair.
  691. * @param p - parameter that will be passed to the callback function
  692. * (along the group name, variable name and variable handle).
  693. */
  694. void counter_iterate_grp_vars(const char* group,
  695. void (*cbk)(void* p, str* g, str* n,
  696. counter_handle_t h),
  697. void *p)
  698. {
  699. struct counter_record* r;
  700. struct grp_record* g;
  701. str grp;
  702. grp.s = (char*)group;
  703. grp.len = strlen(group);
  704. g = grp_hash_lookup(&grp);
  705. if (g)
  706. for (r = g->first; r; r = r->grp_next)
  707. cbk(p, &r->group, &r->name, r->h);
  708. }
  709. /* vi: set ts=4 sw=4 tw=79:ai:cindent: */