2
0

counters.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /*
  2. * Copyright (C) 2010 iptelorg GmbH
  3. *
  4. * Permission to use, copy, modify, and distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /**
  17. * @brief counters/stats
  18. * @file
  19. * @ingroup: core
  20. */
  21. /*
  22. * History:
  23. * --------
  24. * 2010-08-06 initial version (andrei)
  25. * 2010-08-24 counters can be used (inc,add) before prefork_init (andrei)
  26. */
  27. #include "counters.h"
  28. #include "str_hash.h"
  29. #include "str.h"
  30. #include "compiler_opt.h"
  31. #include "mem/mem.h"
  32. #include "mem/shm_mem.h"
  33. #define CNT_HASH_SIZE 64
  34. /* group hash size (rpc use) */
  35. #define GRP_HASH_SIZE 16
  36. /* initial sorted groups array size (rpc use) */
  37. #define GRP_SORTED_SIZE 16
  38. /* intial counter id 2 record array size */
  39. #define CNT_ID2RECORD_SIZE 64
  40. #define CACHELINE_PAD 128
  41. /* leave space for one flag */
  42. #define MAX_COUNTER_ID 32767
  43. /* size (number of entries) of the temporary array used for keeping stats
  44. pre-prefork init. Note: if more counters are registered then this size,
  45. the array will be dynamically increased (doubled each time). The value
  46. here is meant only to optimize startup/memory fragmentation. */
  47. #define PREINIT_CNTS_VALS_SIZE 128
  48. struct counter_record {
  49. str group;
  50. str name;
  51. counter_handle_t h;
  52. unsigned short flags;
  53. void* cbk_param;
  54. counter_cbk_f cbk;
  55. struct counter_record* grp_next; /* next in group */
  56. str doc;
  57. };
  58. struct grp_record {
  59. str group;
  60. struct counter_record* first;
  61. };
  62. /** hash table mapping a counter name to an id */
  63. static struct str_hash_table cnts_hash_table;
  64. /** array maping id 2 record */
  65. struct counter_record** cnt_id2record;
  66. static int cnt_id2record_size;
  67. /** hash table for groups (maps a group name to a counter list) */
  68. static struct str_hash_table grp_hash_table;
  69. /** array of groups, sorted */
  70. static struct grp_record** grp_sorted;
  71. static int grp_sorted_max_size;
  72. static int grp_sorted_crt_size;
  73. static int grp_no; /* number of groups */
  74. /** counters array. a[proc_no][counter_id] =>
  75. _cnst_vals[proc_no*cnts_no+counter_id] */
  76. counter_array_t* _cnts_vals;
  77. int _cnts_row_len; /* number of elements per row */
  78. static int cnts_no; /* number of registered counters */
  79. static int cnts_max_rows; /* set to 0 if not yet fully init */
  80. /** init the coutner hash table(s).
  81. * @return 0 on success, -1 on error.
  82. */
  83. int init_counters()
  84. {
  85. if (str_hash_alloc(&cnts_hash_table, CNT_HASH_SIZE) < 0)
  86. goto error;
  87. str_hash_init(&cnts_hash_table);
  88. if (str_hash_alloc(&grp_hash_table, GRP_HASH_SIZE) < 0)
  89. goto error;
  90. str_hash_init(&grp_hash_table);
  91. cnts_no = 1; /* start at 1 (0 used only for invalid counters) */
  92. cnts_max_rows = 0; /* 0 initially, !=0 after full init
  93. (counters_prefork_init()) */
  94. grp_no = 0;
  95. cnt_id2record_size = CNT_ID2RECORD_SIZE;
  96. cnt_id2record = pkg_malloc(sizeof(*cnt_id2record) * cnt_id2record_size);
  97. if (cnt_id2record == 0)
  98. goto error;
  99. memset(cnt_id2record, 0, sizeof(*cnt_id2record) * cnt_id2record_size);
  100. grp_sorted_max_size = GRP_SORTED_SIZE;
  101. grp_sorted_crt_size = 0;
  102. grp_sorted = pkg_malloc(sizeof(*grp_sorted) * grp_sorted_max_size);
  103. if (grp_sorted == 0)
  104. goto error;
  105. memset(grp_sorted, 0, sizeof(*grp_sorted) * grp_sorted_max_size);
  106. return 0;
  107. error:
  108. destroy_counters();
  109. return -1;
  110. }
  111. void destroy_counters()
  112. {
  113. int r;
  114. struct str_hash_entry* e;
  115. struct str_hash_entry* bak;
  116. if (_cnts_vals) {
  117. if (cnts_max_rows)
  118. /* fully init => it is in shm */
  119. shm_free(_cnts_vals);
  120. else
  121. /* partially init (before prefork) => pkg */
  122. pkg_free(_cnts_vals);
  123. _cnts_vals = 0;
  124. }
  125. if (cnts_hash_table.table) {
  126. for (r=0; r< cnts_hash_table.size; r++) {
  127. clist_foreach_safe(&cnts_hash_table.table[r], e, bak, next) {
  128. pkg_free(e);
  129. }
  130. }
  131. pkg_free(cnts_hash_table.table);
  132. }
  133. if (grp_hash_table.table) {
  134. for (r=0; r< grp_hash_table.size; r++) {
  135. clist_foreach_safe(&grp_hash_table.table[r], e, bak, next) {
  136. pkg_free(e);
  137. }
  138. }
  139. pkg_free(grp_hash_table.table);
  140. }
  141. if (cnt_id2record)
  142. pkg_free(cnt_id2record);
  143. if (grp_sorted)
  144. pkg_free(grp_sorted);
  145. cnts_hash_table.table = 0;
  146. cnts_hash_table.size = 0;
  147. cnt_id2record = 0;
  148. grp_sorted = 0;
  149. grp_hash_table.table = 0;
  150. grp_hash_table.size = 0;
  151. grp_sorted_crt_size = 0;
  152. grp_sorted_max_size = 0;
  153. cnts_no = 0;
  154. _cnts_row_len = 0;
  155. cnts_max_rows = 0;
  156. grp_no = 0;
  157. }
  158. /** complete counter intialization, when the number of processes is known.
  159. * shm must be available.
  160. * @return 0 on success, < 0 on error
  161. */
  162. int counters_prefork_init(int max_process_no)
  163. {
  164. counter_array_t* old;
  165. int size, row_size;
  166. counter_handle_t h;
  167. /* round cnts_no so that cnts_no * sizeof(counter) it's a CACHELINE_PAD
  168. multiple */
  169. /* round-up row_size to a CACHELINE_PAD multiple if needed */
  170. row_size = ((sizeof(*_cnts_vals) * cnts_no - 1) / CACHELINE_PAD + 1) *
  171. CACHELINE_PAD;
  172. /* round-up the resulted row_siue to a sizeof(*_cnts_vals) multiple */
  173. row_size = ((row_size -1) / sizeof(*_cnts_vals) + 1) *
  174. sizeof(*_cnts_vals);
  175. /* get updated cnts_no (row length) */
  176. _cnts_row_len = row_size / sizeof(*_cnts_vals);
  177. size = max_process_no * row_size;
  178. /* replace the temporary pre-fork pkg array (with only 1 row) with
  179. the final shm version (with max_process_no rows) */
  180. old = _cnts_vals;
  181. _cnts_vals = shm_malloc(size);
  182. if (_cnts_vals == 0)
  183. return -1;
  184. memset(_cnts_vals, 0, size);
  185. cnts_max_rows = max_process_no;
  186. /* copy prefork values into the newly shm array */
  187. if (old) {
  188. for (h.id = 0; h.id < cnts_no; h.id++)
  189. counter_pprocess_val(process_no, h) = old[h.id].v;
  190. pkg_free(old);
  191. }
  192. return 0;
  193. }
  194. /** adds new group to the group hash table (no checks, internal version).
  195. * @return pointer to new group record on success, 0 on error.
  196. */
  197. static struct grp_record* grp_hash_add(str* group)
  198. {
  199. struct str_hash_entry* g;
  200. struct grp_record* grp_rec;
  201. struct grp_record** r;
  202. /* grp_rec copied at &g->u.data */
  203. g = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(g->u.data) +
  204. sizeof(*grp_rec) + group->len + 1);
  205. if (g == 0)
  206. goto error;
  207. grp_rec = (struct grp_record*)&g->u.data[0];
  208. grp_rec->group.s = (char*)(grp_rec + 1);
  209. grp_rec->group.len = group->len;
  210. grp_rec->first = 0;
  211. memcpy(grp_rec->group.s, group->s, group->len + 1);
  212. g->key = grp_rec->group;
  213. g->flags = 0;
  214. /* insert group into the sorted group array */
  215. if (grp_sorted_max_size <= grp_sorted_crt_size) {
  216. /* must increase the array */
  217. r = pkg_realloc(grp_sorted, 2 * grp_sorted_max_size *
  218. sizeof(*grp_sorted));
  219. if (r == 0)
  220. goto error;
  221. grp_sorted= r;
  222. grp_sorted_max_size *= 2;
  223. memset(&grp_sorted[grp_sorted_crt_size], 0,
  224. (grp_sorted_max_size - grp_sorted_crt_size) *
  225. sizeof(*grp_sorted));
  226. }
  227. for (r = grp_sorted; r < (grp_sorted + grp_sorted_crt_size); r++)
  228. if (strcmp(grp_rec->group.s, (*r)->group.s) < 0)
  229. break;
  230. if (r != (grp_sorted + grp_sorted_crt_size))
  231. memmove(r+1, r, (int)(long)((char*)(grp_sorted + grp_sorted_crt_size) -
  232. (char*)r));
  233. grp_sorted_crt_size++;
  234. *r = grp_rec;
  235. /* insert into the hash only on success */
  236. str_hash_add(&grp_hash_table, g);
  237. return grp_rec;
  238. error:
  239. if (g)
  240. pkg_free(g);
  241. return 0;
  242. }
  243. /** lookup a group into the group hash (internal version).
  244. * @return pointer to grp_record on success, 0 on failure (not found).
  245. */
  246. static struct grp_record* grp_hash_lookup(str* group)
  247. {
  248. struct str_hash_entry* e;
  249. e = str_hash_get(&grp_hash_table, group->s, group->len);
  250. return e?(struct grp_record*)&e->u.data[0]:0;
  251. }
  252. /** lookup a group and if not found create a new group record.
  253. * @return pointer to grp_record on succes, 0 on failure ( not found and
  254. * failed to create new group record).
  255. */
  256. static struct grp_record* grp_hash_get_create(str* group)
  257. {
  258. struct grp_record* ret;
  259. ret = grp_hash_lookup(group);
  260. if (ret)
  261. return ret;
  262. return grp_hash_add(group);
  263. }
  264. /** adds new counter to the hash table (no checks, internal version).
  265. * @return pointer to new record on success, 0 on error.
  266. */
  267. static struct counter_record* cnt_hash_add(
  268. str* group, str* name,
  269. int flags, counter_cbk_f cbk,
  270. void* param, const char* doc)
  271. {
  272. struct str_hash_entry* e;
  273. struct counter_record* cnt_rec;
  274. struct grp_record* grp_rec;
  275. struct counter_record** p;
  276. counter_array_t* v;
  277. int doc_len;
  278. int n;
  279. e = 0;
  280. if (cnts_no >= MAX_COUNTER_ID)
  281. /* too many counters */
  282. goto error;
  283. grp_rec = grp_hash_get_create(group);
  284. if (grp_rec == 0)
  285. /* non existing group an no new one could be created */
  286. goto error;
  287. doc_len = doc?strlen(doc):0;
  288. /* cnt_rec copied at &e->u.data[0] */
  289. e = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(e->u.data) +
  290. sizeof(*cnt_rec) + name->len + 1 + group->len + 1 +
  291. doc_len + 1);
  292. if (e == 0)
  293. goto error;
  294. cnt_rec = (struct counter_record*)&e->u.data[0];
  295. cnt_rec->group.s = (char*)(cnt_rec + 1);
  296. cnt_rec->group.len = group->len;
  297. cnt_rec->name.s = cnt_rec->group.s + group->len + 1;
  298. cnt_rec->name.len = name->len;
  299. cnt_rec->doc.s = cnt_rec->name.s + name->len +1;
  300. cnt_rec->doc.len = doc_len;
  301. cnt_rec->h.id = cnts_no++;
  302. cnt_rec->flags = flags;
  303. cnt_rec->cbk_param = param;
  304. cnt_rec->cbk = cbk;
  305. cnt_rec->grp_next = 0;
  306. memcpy(cnt_rec->group.s, group->s, group->len + 1);
  307. memcpy(cnt_rec->name.s, name->s, name->len + 1);
  308. if (doc)
  309. memcpy(cnt_rec->doc.s, doc, doc_len + 1);
  310. else
  311. cnt_rec->doc.s[0] = 0;
  312. e->key = cnt_rec->name;
  313. e->flags = 0;
  314. /* check to see if it fits in the prefork tmp. vals array.
  315. This array contains only one "row", is allocated in pkg and
  316. is used only until counters_prefork_init() (after that the
  317. array is replaced with a shm version with all the needed rows).
  318. */
  319. if (cnt_rec->h.id >= _cnts_row_len || _cnts_vals == 0) {
  320. /* array to small or not yet allocated => reallocate/allocate it
  321. (min size PREINIT_CNTS_VALS_SIZE, max MAX_COUNTER_ID)
  322. */
  323. n = (cnt_rec->h.id < PREINIT_CNTS_VALS_SIZE) ?
  324. PREINIT_CNTS_VALS_SIZE :
  325. ((2 * (cnt_rec->h.id + (cnt_rec->h.id == 0)) < MAX_COUNTER_ID)?
  326. (2 * (cnt_rec->h.id + (cnt_rec->h.id == 0))) :
  327. MAX_COUNTER_ID + 1);
  328. v = pkg_realloc(_cnts_vals, n * sizeof(*_cnts_vals));
  329. if (v == 0)
  330. /* realloc/malloc error */
  331. goto error;
  332. _cnts_vals = v;
  333. /* zero newly allocated memory */
  334. memset(&_cnts_vals[_cnts_row_len], 0,
  335. (n - _cnts_row_len) * sizeof(*_cnts_vals));
  336. _cnts_row_len = n; /* record new length */
  337. }
  338. /* add a pointer to it in the records array */
  339. if (cnt_id2record_size <= cnt_rec->h.id) {
  340. /* must increase the array */
  341. p = pkg_realloc(cnt_id2record,
  342. 2 * cnt_id2record_size * sizeof(*cnt_id2record));
  343. if (p == 0)
  344. goto error;
  345. cnt_id2record = p;
  346. cnt_id2record_size *= 2;
  347. memset(&cnt_id2record[cnt_rec->h.id], 0,
  348. (cnt_id2record_size - cnt_rec->h.id) * sizeof(*cnt_id2record));
  349. }
  350. cnt_id2record[cnt_rec->h.id] = cnt_rec;
  351. /* add into the hash */
  352. str_hash_add(&cnts_hash_table, e);
  353. /* insert it sorted in the per group list */
  354. for (p = &grp_rec->first; *p; p = &((*p)->grp_next))
  355. if (strcmp(cnt_rec->name.s, (*p)->name.s) < 0)
  356. break;
  357. cnt_rec->grp_next = *p;
  358. *p = cnt_rec;
  359. return cnt_rec;
  360. error:
  361. if (e)
  362. pkg_free(e);
  363. return 0;
  364. }
  365. /** lookup a (group, name) pair into the cnts hash (internal version).
  366. * @param group - counter group name. If "" the first matching counter with
  367. * the given name will be returned (k compat).
  368. * @param name
  369. * @return pointer to counter_record on success, 0 on failure (not found).
  370. */
  371. static struct counter_record* cnt_hash_lookup(str* group, str* name)
  372. {
  373. struct str_hash_entry* e;
  374. struct str_hash_entry* first;
  375. struct counter_record* cnt_rec;
  376. e = str_hash_get(&cnts_hash_table, name->s, name->len);
  377. /* fast path */
  378. if (likely(e)) {
  379. cnt_rec = (struct counter_record*)&e->u.data[0];
  380. if (likely( group->len == 0 ||
  381. (cnt_rec->group.len == group->len &&
  382. memcmp(cnt_rec->group.s, group->s, group->len) == 0)))
  383. return cnt_rec;
  384. } else
  385. return 0;
  386. /* search between records with same name, but different groups */
  387. first = e;
  388. do {
  389. cnt_rec = (struct counter_record*)&e->u.data[0];
  390. if (cnt_rec->group.len == group->len &&
  391. cnt_rec->name.len == name->len &&
  392. memcmp(cnt_rec->group.s, group->s, group->len) == 0 &&
  393. memcmp(cnt_rec->name.s, name->s, name->len) == 0)
  394. /* found */
  395. return cnt_rec;
  396. e = e->next;
  397. } while(e != first);
  398. return 0;
  399. }
  400. /** lookup a counter and if not found create a new counter record.
  401. * @return pointer to counter_record on succes, 0 on failure ( not found and
  402. * failed to create new group record).
  403. */
  404. static struct counter_record* cnt_hash_get_create(
  405. str* group, str* name,
  406. int flags,
  407. counter_cbk_f cbk,
  408. void* param, const char* doc)
  409. {
  410. struct counter_record* ret;
  411. ret = cnt_hash_lookup(group, name);
  412. if (ret)
  413. return ret;
  414. return cnt_hash_add(group, name, flags, cbk, param, doc);
  415. }
  416. /** register a new counter.
  417. * Can be called only before forking (e.g. from mod_init() or
  418. * init_child(PROC_INIT)).
  419. * @param handle - result parameter, it will be filled with the counter
  420. * handle on success (can be null if not needed).
  421. * @param group - group name
  422. * @param name - counter name (group.name must be unique).
  423. * @param flags - counter flags: one of CNT_F_*.
  424. * @param cbk - read callback function (if set it will be called each time
  425. * someone will call counter_get()).
  426. * @param cbk_param - callback param.
  427. * @param doc - description/documentation string.
  428. * @param reg_flags - register flags: 1 - don't fail if counter already
  429. * registered (act like counter_lookup(handle, group, name).
  430. * @return 0 on succes, < 0 on error (-1 not init or malloc error, -2 already
  431. * registered (and register_flags & 1 == 0).
  432. */
  433. int counter_register( counter_handle_t* handle, const char* group,
  434. const char* name, int flags,
  435. counter_cbk_f cbk, void* cbk_param,
  436. const char* doc,
  437. int reg_flags)
  438. {
  439. str grp;
  440. str n;
  441. struct counter_record* cnt_rec;
  442. if (unlikely(cnts_max_rows)) {
  443. /* too late */
  444. BUG("late attempt to register counter: %s.%s\n", group, name);
  445. goto error;
  446. }
  447. n.s = (char*)name;
  448. n.len = strlen(name);
  449. if (unlikely(group == 0 || *group == 0)) {
  450. BUG("attempt to register counter %s without a group\n", name);
  451. goto error;
  452. }
  453. grp.s = (char*)group;
  454. grp.len = strlen(group);
  455. cnt_rec = cnt_hash_lookup(&grp, &n);
  456. if (cnt_rec) {
  457. if (reg_flags & 1)
  458. goto found;
  459. else {
  460. if (handle) handle->id = 0;
  461. return -2;
  462. }
  463. } else
  464. cnt_rec = cnt_hash_get_create(&grp, &n, flags, cbk, cbk_param, doc);
  465. if (unlikely(cnt_rec == 0))
  466. goto error;
  467. found:
  468. if (handle) *handle = cnt_rec->h;
  469. return 0;
  470. error:
  471. if (handle) handle->id = 0;
  472. return -1;
  473. }
  474. /** fill in the handle of an existing counter (str parameters).
  475. * @param handle - filled with the corresp. handle on success.
  476. * @param group - counter group name. If "" the first matching
  477. * counter with the given name will be returned
  478. * (k compat).
  479. * @param name - counter name.
  480. * @return 0 on success, < 0 on error
  481. */
  482. int counter_lookup_str(counter_handle_t* handle, str* group, str* name)
  483. {
  484. struct counter_record* cnt_rec;
  485. cnt_rec = cnt_hash_lookup(group, name);
  486. if (likely(cnt_rec)) {
  487. *handle = cnt_rec->h;
  488. return 0;
  489. }
  490. handle->id = 0;
  491. return -1;
  492. }
  493. /** fill in the handle of an existing counter (asciiz parameters).
  494. * @param handle - filled with the corresp. handle on success.
  495. * @param group - counter group name. If 0 or "" the first matching
  496. * counter with the given name will be returned
  497. * (k compat).
  498. * @param name - counter name.
  499. * @return 0 on success, < 0 on error
  500. */
  501. int counter_lookup(counter_handle_t* handle,
  502. const char* group, const char* name)
  503. {
  504. str grp;
  505. str n;
  506. n.s = (char*)name;
  507. n.len = strlen(name);
  508. grp.s = (char*)group;
  509. grp.len = group?strlen(group):0;
  510. return counter_lookup_str(handle, &grp, &n);
  511. }
  512. /** register all the counters declared in a null-terminated array.
  513. * @param group - counters group.
  514. * @param defs - null terminated array containing counters definitions.
  515. * @return 0 on success, < 0 on error ( - (counter_number+1))
  516. */
  517. int counter_register_array(const char* group, counter_def_t* defs)
  518. {
  519. int r;
  520. for (r=0; defs[r].name; r++)
  521. if (counter_register( defs[r].handle,
  522. group, defs[r].name, defs[r].flags,
  523. defs[r].get_cbk, defs[r].get_cbk_param,
  524. defs[r].descr, 0) <0)
  525. return -(r+1); /* return - (idx of bad counter + 1) */
  526. return 0;
  527. }
  528. /** get the value of the counter, bypassing callbacks.
  529. * @param handle - counter handle obtained using counter_lookup() or
  530. * counter_register().
  531. * @return counter value.
  532. */
  533. counter_val_t counter_get_raw_val(counter_handle_t handle)
  534. {
  535. int r;
  536. counter_val_t ret;
  537. if (unlikely(_cnts_vals == 0)) {
  538. /* not init yet */
  539. BUG("counters not fully initialized yet\n");
  540. return 0;
  541. }
  542. if (unlikely(handle.id >= cnts_no || (short)handle.id < 0)) {
  543. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  544. return 0;
  545. }
  546. ret = 0;
  547. for (r = 0; r < cnts_max_rows; r++)
  548. ret += counter_pprocess_val(r, handle);
  549. return ret;
  550. }
  551. /** get the value of the counter, using the callbacks (if defined).
  552. * @param handle - counter handle obtained using counter_lookup() or
  553. * counter_register().
  554. * @return counter value. */
  555. counter_val_t counter_get_val(counter_handle_t handle)
  556. {
  557. struct counter_record* cnt_rec;
  558. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  559. /* not init yet */
  560. BUG("counters not fully initialized yet\n");
  561. return 0;
  562. }
  563. cnt_rec = cnt_id2record[handle.id];
  564. if (unlikely(cnt_rec->cbk))
  565. return cnt_rec->cbk(handle, cnt_rec->cbk_param);
  566. return counter_get_raw_val(handle);
  567. }
  568. /** reset the counter.
  569. * Reset a counter, unless it has the CNT_F_NO_RESET flag set.
  570. * @param handle - counter handle obtained using counter_lookup() or
  571. * counter_register().
  572. * Note: it's racy.
  573. */
  574. void counter_reset(counter_handle_t handle)
  575. {
  576. int r;
  577. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  578. /* not init yet */
  579. BUG("counters not fully initialized yet\n");
  580. return;
  581. }
  582. if (unlikely(handle.id >= cnts_no)) {
  583. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  584. return;
  585. }
  586. if (unlikely(cnt_id2record[handle.id]->flags & CNT_F_NO_RESET))
  587. return;
  588. for (r=0; r < cnts_max_rows; r++)
  589. counter_pprocess_val(r, handle) = 0;
  590. return;
  591. }
  592. /** return the name for counter handle.
  593. * @param handle - counter handle obtained using counter_lookup() or
  594. * counter_register().
  595. * @return asciiz pointer on success, 0 on error.
  596. */
  597. char* counter_get_name(counter_handle_t handle)
  598. {
  599. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  600. /* not init yet */
  601. BUG("counters not fully initialized yet\n");
  602. goto error;
  603. }
  604. if (unlikely(handle.id >= cnts_no)) {
  605. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  606. goto error;
  607. }
  608. return cnt_id2record[handle.id]->name.s;
  609. error:
  610. return 0;
  611. }
  612. /** return the group name for counter handle.
  613. * @param handle - counter handle obtained using counter_lookup() or
  614. * counter_register().
  615. * @return asciiz pointer on success, 0 on error.
  616. */
  617. char* counter_get_group(counter_handle_t handle)
  618. {
  619. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  620. /* not init yet */
  621. BUG("counters not fully initialized yet\n");
  622. goto error;
  623. }
  624. if (unlikely(handle.id >= cnts_no)) {
  625. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  626. goto error;
  627. }
  628. return cnt_id2record[handle.id]->group.s;
  629. error:
  630. return 0;
  631. }
  632. /** return the description (doc) string for a given counter.
  633. * @param handle - counter handle obtained using counter_lookup() or
  634. * counter_register().
  635. * @return asciiz pointer on success, 0 on error.
  636. */
  637. char* counter_get_doc(counter_handle_t handle)
  638. {
  639. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  640. /* not init yet */
  641. BUG("counters not fully initialized yet\n");
  642. goto error;
  643. }
  644. if (unlikely(handle.id >= cnts_no)) {
  645. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  646. goto error;
  647. }
  648. return cnt_id2record[handle.id]->doc.s;
  649. error:
  650. return 0;
  651. }
  652. /** iterate on all the counter group names.
  653. * @param cbk - pointer to a callback function that will be called for each
  654. * group name.
  655. * @param p - parameter that will be passed to the callback function
  656. * (along the group name).
  657. */
  658. void counter_iterate_grp_names(void (*cbk)(void* p, str* grp_name), void* p)
  659. {
  660. int r;
  661. for (r=0; r < grp_sorted_crt_size; r++)
  662. cbk(p, &grp_sorted[r]->group);
  663. }
  664. /** iterate on all the variable names in a specified group.
  665. * @param group - group name.
  666. * @param cbk - pointer to a callback function that will be called for each
  667. * variable name.
  668. * @param p - parameter that will be passed to the callback function
  669. * (along the variable name).
  670. */
  671. void counter_iterate_grp_var_names( const char* group,
  672. void (*cbk)(void* p, str* var_name),
  673. void* p)
  674. {
  675. struct counter_record* r;
  676. struct grp_record* g;
  677. str grp;
  678. grp.s = (char*)group;
  679. grp.len = strlen(group);
  680. g = grp_hash_lookup(&grp);
  681. if (g)
  682. for (r = g->first; r; r = r->grp_next)
  683. cbk(p, &r->name);
  684. }
  685. /** iterate on all the variable names and handles in a specified group.
  686. * @param group - group name.
  687. * @param cbk - pointer to a callback function that will be called for each
  688. * [variable name, variable handle] pair.
  689. * @param p - parameter that will be passed to the callback function
  690. * (along the group name, variable name and variable handle).
  691. */
  692. void counter_iterate_grp_vars(const char* group,
  693. void (*cbk)(void* p, str* g, str* n,
  694. counter_handle_t h),
  695. void *p)
  696. {
  697. struct counter_record* r;
  698. struct grp_record* g;
  699. str grp;
  700. grp.s = (char*)group;
  701. grp.len = strlen(group);
  702. g = grp_hash_lookup(&grp);
  703. if (g)
  704. for (r = g->first; r; r = r->grp_next)
  705. cbk(p, &r->group, &r->name, r->h);
  706. }
  707. /* vi: set ts=4 sw=4 tw=79:ai:cindent: */