counters.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * Copyright (C) 2010 iptelorg GmbH
  3. *
  4. * Permission to use, copy, modify, and distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /**
  17. * @brief counters/stats
  18. * @file
  19. * @ingroup: core
  20. */
  21. /*
  22. * History:
  23. * --------
  24. * 2010-08-06 initial version (andrei)
  25. * 2010-08-24 counters can be used (inc,add) before prefork_init (andrei)
  26. */
  27. #include "counters.h"
  28. #include "str_hash.h"
  29. #include "str.h"
  30. #include "compiler_opt.h"
  31. #include "mem/mem.h"
  32. #include "mem/shm_mem.h"
  33. #define CNT_HASH_SIZE 64
  34. /* group hash size (rpc use) */
  35. #define GRP_HASH_SIZE 16
  36. /* initial sorted groups array size (rpc use) */
  37. #define GRP_SORTED_SIZE 16
  38. /* intial counter id 2 record array size */
  39. #define CNT_ID2RECORD_SIZE 64
  40. #define CACHELINE_PAD 128
  41. /* leave space for one flag */
  42. #define MAX_COUNTER_ID 32767
  43. /* size (number of entries) of the temporary array used for keeping stats
  44. pre-prefork init. Note: if more counters are registered then this size,
  45. the array will be dynamically increased (doubled each time). The value
  46. here is meant only to optimize startup/memory fragmentation. */
  47. #define PREINIT_CNTS_VALS_SIZE 128
  48. struct counter_record {
  49. str group;
  50. str name;
  51. counter_handle_t h;
  52. unsigned short flags;
  53. void* cbk_param;
  54. counter_cbk_f cbk;
  55. struct counter_record* grp_next; /* next in group */
  56. str doc;
  57. };
  58. struct grp_record {
  59. str group;
  60. struct counter_record* first;
  61. };
  62. /** hash table mapping a counter name to an id */
  63. static struct str_hash_table cnts_hash_table;
  64. /** array maping id 2 record */
  65. struct counter_record** cnt_id2record;
  66. static int cnt_id2record_size;
  67. /** hash table for groups (maps a group name to a counter list) */
  68. static struct str_hash_table grp_hash_table;
  69. /** array of groups, sorted */
  70. static struct grp_record** grp_sorted;
  71. static int grp_sorted_max_size;
  72. static int grp_sorted_crt_size;
  73. static int grp_no; /* number of groups */
  74. /** counters array. a[proc_no][counter_id] =>
  75. _cnst_vals[proc_no*cnts_no+counter_id] */
  76. counter_array_t* _cnts_vals = 0;
  77. int _cnts_row_len; /* number of elements per row */
  78. static int cnts_no; /* number of registered counters */
  79. static int cnts_max_rows; /* set to 0 if not yet fully init */
  80. int counters_initialized(void)
  81. {
  82. if (unlikely(_cnts_vals == 0)) {
  83. /* not init yet */
  84. return 0;
  85. }
  86. return 1;
  87. }
  88. /** init the coutner hash table(s).
  89. * @return 0 on success, -1 on error.
  90. */
  91. int init_counters()
  92. {
  93. if (str_hash_alloc(&cnts_hash_table, CNT_HASH_SIZE) < 0)
  94. goto error;
  95. str_hash_init(&cnts_hash_table);
  96. if (str_hash_alloc(&grp_hash_table, GRP_HASH_SIZE) < 0)
  97. goto error;
  98. str_hash_init(&grp_hash_table);
  99. cnts_no = 1; /* start at 1 (0 used only for invalid counters) */
  100. cnts_max_rows = 0; /* 0 initially, !=0 after full init
  101. (counters_prefork_init()) */
  102. grp_no = 0;
  103. cnt_id2record_size = CNT_ID2RECORD_SIZE;
  104. cnt_id2record = pkg_malloc(sizeof(*cnt_id2record) * cnt_id2record_size);
  105. if (cnt_id2record == 0)
  106. goto error;
  107. memset(cnt_id2record, 0, sizeof(*cnt_id2record) * cnt_id2record_size);
  108. grp_sorted_max_size = GRP_SORTED_SIZE;
  109. grp_sorted_crt_size = 0;
  110. grp_sorted = pkg_malloc(sizeof(*grp_sorted) * grp_sorted_max_size);
  111. if (grp_sorted == 0)
  112. goto error;
  113. memset(grp_sorted, 0, sizeof(*grp_sorted) * grp_sorted_max_size);
  114. return 0;
  115. error:
  116. destroy_counters();
  117. return -1;
  118. }
  119. void destroy_counters()
  120. {
  121. int r;
  122. struct str_hash_entry* e;
  123. struct str_hash_entry* bak;
  124. if (_cnts_vals) {
  125. if (cnts_max_rows)
  126. /* fully init => it is in shm */
  127. shm_free(_cnts_vals);
  128. else
  129. /* partially init (before prefork) => pkg */
  130. pkg_free(_cnts_vals);
  131. _cnts_vals = 0;
  132. }
  133. if (cnts_hash_table.table) {
  134. for (r=0; r< cnts_hash_table.size; r++) {
  135. clist_foreach_safe(&cnts_hash_table.table[r], e, bak, next) {
  136. pkg_free(e);
  137. }
  138. }
  139. pkg_free(cnts_hash_table.table);
  140. }
  141. if (grp_hash_table.table) {
  142. for (r=0; r< grp_hash_table.size; r++) {
  143. clist_foreach_safe(&grp_hash_table.table[r], e, bak, next) {
  144. pkg_free(e);
  145. }
  146. }
  147. pkg_free(grp_hash_table.table);
  148. }
  149. if (cnt_id2record)
  150. pkg_free(cnt_id2record);
  151. if (grp_sorted)
  152. pkg_free(grp_sorted);
  153. cnts_hash_table.table = 0;
  154. cnts_hash_table.size = 0;
  155. cnt_id2record = 0;
  156. grp_sorted = 0;
  157. grp_hash_table.table = 0;
  158. grp_hash_table.size = 0;
  159. grp_sorted_crt_size = 0;
  160. grp_sorted_max_size = 0;
  161. cnts_no = 0;
  162. _cnts_row_len = 0;
  163. cnts_max_rows = 0;
  164. grp_no = 0;
  165. }
  166. /** complete counter intialization, when the number of processes is known.
  167. * shm must be available.
  168. * @return 0 on success, < 0 on error
  169. */
  170. int counters_prefork_init(int max_process_no)
  171. {
  172. counter_array_t* old;
  173. int size, row_size;
  174. counter_handle_t h;
  175. /* round cnts_no so that cnts_no * sizeof(counter) it's a CACHELINE_PAD
  176. multiple */
  177. /* round-up row_size to a CACHELINE_PAD multiple if needed */
  178. row_size = ((sizeof(*_cnts_vals) * cnts_no - 1) / CACHELINE_PAD + 1) *
  179. CACHELINE_PAD;
  180. /* round-up the resulted row_siue to a sizeof(*_cnts_vals) multiple */
  181. row_size = ((row_size -1) / sizeof(*_cnts_vals) + 1) *
  182. sizeof(*_cnts_vals);
  183. /* get updated cnts_no (row length) */
  184. _cnts_row_len = row_size / sizeof(*_cnts_vals);
  185. size = max_process_no * row_size;
  186. /* replace the temporary pre-fork pkg array (with only 1 row) with
  187. the final shm version (with max_process_no rows) */
  188. old = _cnts_vals;
  189. _cnts_vals = shm_malloc(size);
  190. if (_cnts_vals == 0)
  191. return -1;
  192. memset(_cnts_vals, 0, size);
  193. cnts_max_rows = max_process_no;
  194. /* copy prefork values into the newly shm array */
  195. if (old) {
  196. for (h.id = 0; h.id < cnts_no; h.id++)
  197. counter_pprocess_val(process_no, h) = old[h.id].v;
  198. pkg_free(old);
  199. }
  200. return 0;
  201. }
  202. /** adds new group to the group hash table (no checks, internal version).
  203. * @return pointer to new group record on success, 0 on error.
  204. */
  205. static struct grp_record* grp_hash_add(str* group)
  206. {
  207. struct str_hash_entry* g;
  208. struct grp_record* grp_rec;
  209. struct grp_record** r;
  210. /* grp_rec copied at &g->u.data */
  211. g = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(g->u.data) +
  212. sizeof(*grp_rec) + group->len + 1);
  213. if (g == 0)
  214. goto error;
  215. grp_rec = (struct grp_record*)&g->u.data[0];
  216. grp_rec->group.s = (char*)(grp_rec + 1);
  217. grp_rec->group.len = group->len;
  218. grp_rec->first = 0;
  219. memcpy(grp_rec->group.s, group->s, group->len + 1);
  220. g->key = grp_rec->group;
  221. g->flags = 0;
  222. /* insert group into the sorted group array */
  223. if (grp_sorted_max_size <= grp_sorted_crt_size) {
  224. /* must increase the array */
  225. r = pkg_realloc(grp_sorted, 2 * grp_sorted_max_size *
  226. sizeof(*grp_sorted));
  227. if (r == 0)
  228. goto error;
  229. grp_sorted= r;
  230. grp_sorted_max_size *= 2;
  231. memset(&grp_sorted[grp_sorted_crt_size], 0,
  232. (grp_sorted_max_size - grp_sorted_crt_size) *
  233. sizeof(*grp_sorted));
  234. }
  235. for (r = grp_sorted; r < (grp_sorted + grp_sorted_crt_size); r++)
  236. if (strcmp(grp_rec->group.s, (*r)->group.s) < 0)
  237. break;
  238. if (r != (grp_sorted + grp_sorted_crt_size))
  239. memmove(r+1, r, (int)(long)((char*)(grp_sorted + grp_sorted_crt_size) -
  240. (char*)r));
  241. grp_sorted_crt_size++;
  242. *r = grp_rec;
  243. /* insert into the hash only on success */
  244. str_hash_add(&grp_hash_table, g);
  245. return grp_rec;
  246. error:
  247. if (g)
  248. pkg_free(g);
  249. return 0;
  250. }
  251. /** lookup a group into the group hash (internal version).
  252. * @return pointer to grp_record on success, 0 on failure (not found).
  253. */
  254. static struct grp_record* grp_hash_lookup(str* group)
  255. {
  256. struct str_hash_entry* e;
  257. e = str_hash_get(&grp_hash_table, group->s, group->len);
  258. return e?(struct grp_record*)&e->u.data[0]:0;
  259. }
  260. /** lookup a group and if not found create a new group record.
  261. * @return pointer to grp_record on succes, 0 on failure ( not found and
  262. * failed to create new group record).
  263. */
  264. static struct grp_record* grp_hash_get_create(str* group)
  265. {
  266. struct grp_record* ret;
  267. ret = grp_hash_lookup(group);
  268. if (ret)
  269. return ret;
  270. return grp_hash_add(group);
  271. }
  272. /** adds new counter to the hash table (no checks, internal version).
  273. * @return pointer to new record on success, 0 on error.
  274. */
  275. static struct counter_record* cnt_hash_add(
  276. str* group, str* name,
  277. int flags, counter_cbk_f cbk,
  278. void* param, const char* doc)
  279. {
  280. struct str_hash_entry* e;
  281. struct counter_record* cnt_rec;
  282. struct grp_record* grp_rec;
  283. struct counter_record** p;
  284. counter_array_t* v;
  285. int doc_len;
  286. int n;
  287. e = 0;
  288. if (cnts_no >= MAX_COUNTER_ID)
  289. /* too many counters */
  290. goto error;
  291. grp_rec = grp_hash_get_create(group);
  292. if (grp_rec == 0)
  293. /* non existing group an no new one could be created */
  294. goto error;
  295. doc_len = doc?strlen(doc):0;
  296. /* cnt_rec copied at &e->u.data[0] */
  297. e = pkg_malloc(sizeof(struct str_hash_entry) - sizeof(e->u.data) +
  298. sizeof(*cnt_rec) + name->len + 1 + group->len + 1 +
  299. doc_len + 1);
  300. if (e == 0)
  301. goto error;
  302. cnt_rec = (struct counter_record*)&e->u.data[0];
  303. cnt_rec->group.s = (char*)(cnt_rec + 1);
  304. cnt_rec->group.len = group->len;
  305. cnt_rec->name.s = cnt_rec->group.s + group->len + 1;
  306. cnt_rec->name.len = name->len;
  307. cnt_rec->doc.s = cnt_rec->name.s + name->len +1;
  308. cnt_rec->doc.len = doc_len;
  309. cnt_rec->h.id = cnts_no++;
  310. cnt_rec->flags = flags;
  311. cnt_rec->cbk_param = param;
  312. cnt_rec->cbk = cbk;
  313. cnt_rec->grp_next = 0;
  314. memcpy(cnt_rec->group.s, group->s, group->len + 1);
  315. memcpy(cnt_rec->name.s, name->s, name->len + 1);
  316. if (doc)
  317. memcpy(cnt_rec->doc.s, doc, doc_len + 1);
  318. else
  319. cnt_rec->doc.s[0] = 0;
  320. e->key = cnt_rec->name;
  321. e->flags = 0;
  322. /* check to see if it fits in the prefork tmp. vals array.
  323. This array contains only one "row", is allocated in pkg and
  324. is used only until counters_prefork_init() (after that the
  325. array is replaced with a shm version with all the needed rows).
  326. */
  327. if (cnt_rec->h.id >= _cnts_row_len || _cnts_vals == 0) {
  328. /* array to small or not yet allocated => reallocate/allocate it
  329. (min size PREINIT_CNTS_VALS_SIZE, max MAX_COUNTER_ID)
  330. */
  331. n = (cnt_rec->h.id < PREINIT_CNTS_VALS_SIZE) ?
  332. PREINIT_CNTS_VALS_SIZE :
  333. ((2 * (cnt_rec->h.id + (cnt_rec->h.id == 0)) < MAX_COUNTER_ID)?
  334. (2 * (cnt_rec->h.id + (cnt_rec->h.id == 0))) :
  335. MAX_COUNTER_ID + 1);
  336. v = pkg_realloc(_cnts_vals, n * sizeof(*_cnts_vals));
  337. if (v == 0)
  338. /* realloc/malloc error */
  339. goto error;
  340. _cnts_vals = v;
  341. /* zero newly allocated memory */
  342. memset(&_cnts_vals[_cnts_row_len], 0,
  343. (n - _cnts_row_len) * sizeof(*_cnts_vals));
  344. _cnts_row_len = n; /* record new length */
  345. }
  346. /* add a pointer to it in the records array */
  347. if (cnt_id2record_size <= cnt_rec->h.id) {
  348. /* must increase the array */
  349. p = pkg_realloc(cnt_id2record,
  350. 2 * cnt_id2record_size * sizeof(*cnt_id2record));
  351. if (p == 0)
  352. goto error;
  353. cnt_id2record = p;
  354. cnt_id2record_size *= 2;
  355. memset(&cnt_id2record[cnt_rec->h.id], 0,
  356. (cnt_id2record_size - cnt_rec->h.id) * sizeof(*cnt_id2record));
  357. }
  358. cnt_id2record[cnt_rec->h.id] = cnt_rec;
  359. /* add into the hash */
  360. str_hash_add(&cnts_hash_table, e);
  361. /* insert it sorted in the per group list */
  362. for (p = &grp_rec->first; *p; p = &((*p)->grp_next))
  363. if (strcmp(cnt_rec->name.s, (*p)->name.s) < 0)
  364. break;
  365. cnt_rec->grp_next = *p;
  366. *p = cnt_rec;
  367. return cnt_rec;
  368. error:
  369. if (e)
  370. pkg_free(e);
  371. return 0;
  372. }
  373. /** lookup a (group, name) pair into the cnts hash (internal version).
  374. * @param group - counter group name. If "" the first matching counter with
  375. * the given name will be returned (k compat).
  376. * @param name
  377. * @return pointer to counter_record on success, 0 on failure (not found).
  378. */
  379. static struct counter_record* cnt_hash_lookup(str* group, str* name)
  380. {
  381. struct str_hash_entry* e;
  382. struct str_hash_entry* first;
  383. struct counter_record* cnt_rec;
  384. e = str_hash_get(&cnts_hash_table, name->s, name->len);
  385. /* fast path */
  386. if (likely(e)) {
  387. cnt_rec = (struct counter_record*)&e->u.data[0];
  388. if (likely( group->len == 0 ||
  389. (cnt_rec->group.len == group->len &&
  390. memcmp(cnt_rec->group.s, group->s, group->len) == 0)))
  391. return cnt_rec;
  392. } else
  393. return 0;
  394. /* search between records with same name, but different groups */
  395. first = e;
  396. do {
  397. cnt_rec = (struct counter_record*)&e->u.data[0];
  398. if (cnt_rec->group.len == group->len &&
  399. cnt_rec->name.len == name->len &&
  400. memcmp(cnt_rec->group.s, group->s, group->len) == 0 &&
  401. memcmp(cnt_rec->name.s, name->s, name->len) == 0)
  402. /* found */
  403. return cnt_rec;
  404. e = e->next;
  405. } while(e != first);
  406. return 0;
  407. }
  408. /** lookup a counter and if not found create a new counter record.
  409. * @return pointer to counter_record on succes, 0 on failure ( not found and
  410. * failed to create new group record).
  411. */
  412. static struct counter_record* cnt_hash_get_create(
  413. str* group, str* name,
  414. int flags,
  415. counter_cbk_f cbk,
  416. void* param, const char* doc)
  417. {
  418. struct counter_record* ret;
  419. ret = cnt_hash_lookup(group, name);
  420. if (ret)
  421. return ret;
  422. return cnt_hash_add(group, name, flags, cbk, param, doc);
  423. }
  424. /** register a new counter.
  425. * Can be called only before forking (e.g. from mod_init() or
  426. * init_child(PROC_INIT)).
  427. * @param handle - result parameter, it will be filled with the counter
  428. * handle on success (can be null if not needed).
  429. * @param group - group name
  430. * @param name - counter name (group.name must be unique).
  431. * @param flags - counter flags: one of CNT_F_*.
  432. * @param cbk - read callback function (if set it will be called each time
  433. * someone will call counter_get()).
  434. * @param cbk_param - callback param.
  435. * @param doc - description/documentation string.
  436. * @param reg_flags - register flags: 1 - don't fail if counter already
  437. * registered (act like counter_lookup(handle, group, name).
  438. * @return 0 on succes, < 0 on error (-1 not init or malloc error, -2 already
  439. * registered (and register_flags & 1 == 0).
  440. */
  441. int counter_register( counter_handle_t* handle, const char* group,
  442. const char* name, int flags,
  443. counter_cbk_f cbk, void* cbk_param,
  444. const char* doc,
  445. int reg_flags)
  446. {
  447. str grp;
  448. str n;
  449. struct counter_record* cnt_rec;
  450. if (unlikely(cnts_max_rows)) {
  451. /* too late */
  452. BUG("late attempt to register counter: %s.%s\n", group, name);
  453. goto error;
  454. }
  455. n.s = (char*)name;
  456. n.len = strlen(name);
  457. if (unlikely(group == 0 || *group == 0)) {
  458. BUG("attempt to register counter %s without a group\n", name);
  459. goto error;
  460. }
  461. grp.s = (char*)group;
  462. grp.len = strlen(group);
  463. cnt_rec = cnt_hash_lookup(&grp, &n);
  464. if (cnt_rec) {
  465. if (reg_flags & 1)
  466. goto found;
  467. else {
  468. if (handle) handle->id = 0;
  469. return -2;
  470. }
  471. } else
  472. cnt_rec = cnt_hash_get_create(&grp, &n, flags, cbk, cbk_param, doc);
  473. if (unlikely(cnt_rec == 0))
  474. goto error;
  475. found:
  476. if (handle) *handle = cnt_rec->h;
  477. return 0;
  478. error:
  479. if (handle) handle->id = 0;
  480. return -1;
  481. }
  482. /** fill in the handle of an existing counter (str parameters).
  483. * @param handle - filled with the corresp. handle on success.
  484. * @param group - counter group name. If "" the first matching
  485. * counter with the given name will be returned
  486. * (k compat).
  487. * @param name - counter name.
  488. * @return 0 on success, < 0 on error
  489. */
  490. int counter_lookup_str(counter_handle_t* handle, str* group, str* name)
  491. {
  492. struct counter_record* cnt_rec;
  493. cnt_rec = cnt_hash_lookup(group, name);
  494. if (likely(cnt_rec)) {
  495. *handle = cnt_rec->h;
  496. return 0;
  497. }
  498. handle->id = 0;
  499. return -1;
  500. }
  501. /** fill in the handle of an existing counter (asciiz parameters).
  502. * @param handle - filled with the corresp. handle on success.
  503. * @param group - counter group name. If 0 or "" the first matching
  504. * counter with the given name will be returned
  505. * (k compat).
  506. * @param name - counter name.
  507. * @return 0 on success, < 0 on error
  508. */
  509. int counter_lookup(counter_handle_t* handle,
  510. const char* group, const char* name)
  511. {
  512. str grp;
  513. str n;
  514. n.s = (char*)name;
  515. n.len = strlen(name);
  516. grp.s = (char*)group;
  517. grp.len = group?strlen(group):0;
  518. return counter_lookup_str(handle, &grp, &n);
  519. }
  520. /** register all the counters declared in a null-terminated array.
  521. * @param group - counters group.
  522. * @param defs - null terminated array containing counters definitions.
  523. * @return 0 on success, < 0 on error ( - (counter_number+1))
  524. */
  525. int counter_register_array(const char* group, counter_def_t* defs)
  526. {
  527. int r;
  528. for (r=0; defs[r].name; r++)
  529. if (counter_register( defs[r].handle,
  530. group, defs[r].name, defs[r].flags,
  531. defs[r].get_cbk, defs[r].get_cbk_param,
  532. defs[r].descr, 0) <0)
  533. return -(r+1); /* return - (idx of bad counter + 1) */
  534. return 0;
  535. }
  536. /** get the value of the counter, bypassing callbacks.
  537. * @param handle - counter handle obtained using counter_lookup() or
  538. * counter_register().
  539. * @return counter value.
  540. */
  541. counter_val_t counter_get_raw_val(counter_handle_t handle)
  542. {
  543. int r;
  544. counter_val_t ret;
  545. if (unlikely(_cnts_vals == 0)) {
  546. /* not init yet */
  547. BUG("counters not fully initialized yet\n");
  548. return 0;
  549. }
  550. if (unlikely(handle.id >= cnts_no || (short)handle.id < 0)) {
  551. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  552. return 0;
  553. }
  554. ret = 0;
  555. for (r = 0; r < cnts_max_rows; r++)
  556. ret += counter_pprocess_val(r, handle);
  557. return ret;
  558. }
  559. /** get the value of the counter, using the callbacks (if defined).
  560. * @param handle - counter handle obtained using counter_lookup() or
  561. * counter_register().
  562. * @return counter value. */
  563. counter_val_t counter_get_val(counter_handle_t handle)
  564. {
  565. struct counter_record* cnt_rec;
  566. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  567. /* not init yet */
  568. BUG("counters not fully initialized yet\n");
  569. return 0;
  570. }
  571. cnt_rec = cnt_id2record[handle.id];
  572. if (unlikely(cnt_rec->cbk))
  573. return cnt_rec->cbk(handle, cnt_rec->cbk_param);
  574. return counter_get_raw_val(handle);
  575. }
  576. /** reset the counter.
  577. * Reset a counter, unless it has the CNT_F_NO_RESET flag set.
  578. * @param handle - counter handle obtained using counter_lookup() or
  579. * counter_register().
  580. * Note: it's racy.
  581. */
  582. void counter_reset(counter_handle_t handle)
  583. {
  584. int r;
  585. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  586. /* not init yet */
  587. BUG("counters not fully initialized yet\n");
  588. return;
  589. }
  590. if (unlikely(handle.id >= cnts_no)) {
  591. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  592. return;
  593. }
  594. if (unlikely(cnt_id2record[handle.id]->flags & CNT_F_NO_RESET))
  595. return;
  596. for (r=0; r < cnts_max_rows; r++)
  597. counter_pprocess_val(r, handle) = 0;
  598. return;
  599. }
  600. /** return the name for counter handle.
  601. * @param handle - counter handle obtained using counter_lookup() or
  602. * counter_register().
  603. * @return asciiz pointer on success, 0 on error.
  604. */
  605. char* counter_get_name(counter_handle_t handle)
  606. {
  607. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  608. /* not init yet */
  609. BUG("counters not fully initialized yet\n");
  610. goto error;
  611. }
  612. if (unlikely(handle.id >= cnts_no)) {
  613. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  614. goto error;
  615. }
  616. return cnt_id2record[handle.id]->name.s;
  617. error:
  618. return 0;
  619. }
  620. /** return the group name for counter handle.
  621. * @param handle - counter handle obtained using counter_lookup() or
  622. * counter_register().
  623. * @return asciiz pointer on success, 0 on error.
  624. */
  625. char* counter_get_group(counter_handle_t handle)
  626. {
  627. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  628. /* not init yet */
  629. BUG("counters not fully initialized yet\n");
  630. goto error;
  631. }
  632. if (unlikely(handle.id >= cnts_no)) {
  633. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  634. goto error;
  635. }
  636. return cnt_id2record[handle.id]->group.s;
  637. error:
  638. return 0;
  639. }
  640. /** return the description (doc) string for a given counter.
  641. * @param handle - counter handle obtained using counter_lookup() or
  642. * counter_register().
  643. * @return asciiz pointer on success, 0 on error.
  644. */
  645. char* counter_get_doc(counter_handle_t handle)
  646. {
  647. if (unlikely(_cnts_vals == 0 || cnt_id2record == 0)) {
  648. /* not init yet */
  649. BUG("counters not fully initialized yet\n");
  650. goto error;
  651. }
  652. if (unlikely(handle.id >= cnts_no)) {
  653. BUG("invalid counter id %d (max %d)\n", handle.id, cnts_no - 1);
  654. goto error;
  655. }
  656. return cnt_id2record[handle.id]->doc.s;
  657. error:
  658. return 0;
  659. }
  660. /** iterate on all the counter group names.
  661. * @param cbk - pointer to a callback function that will be called for each
  662. * group name.
  663. * @param p - parameter that will be passed to the callback function
  664. * (along the group name).
  665. */
  666. void counter_iterate_grp_names(void (*cbk)(void* p, str* grp_name), void* p)
  667. {
  668. int r;
  669. for (r=0; r < grp_sorted_crt_size; r++)
  670. cbk(p, &grp_sorted[r]->group);
  671. }
  672. /** iterate on all the variable names in a specified group.
  673. * @param group - group name.
  674. * @param cbk - pointer to a callback function that will be called for each
  675. * variable name.
  676. * @param p - parameter that will be passed to the callback function
  677. * (along the variable name).
  678. */
  679. void counter_iterate_grp_var_names( const char* group,
  680. void (*cbk)(void* p, str* var_name),
  681. void* p)
  682. {
  683. struct counter_record* r;
  684. struct grp_record* g;
  685. str grp;
  686. grp.s = (char*)group;
  687. grp.len = strlen(group);
  688. g = grp_hash_lookup(&grp);
  689. if (g)
  690. for (r = g->first; r; r = r->grp_next)
  691. cbk(p, &r->name);
  692. }
  693. /** iterate on all the variable names and handles in a specified group.
  694. * @param group - group name.
  695. * @param cbk - pointer to a callback function that will be called for each
  696. * [variable name, variable handle] pair.
  697. * @param p - parameter that will be passed to the callback function
  698. * (along the group name, variable name and variable handle).
  699. */
  700. void counter_iterate_grp_vars(const char* group,
  701. void (*cbk)(void* p, str* g, str* n,
  702. counter_handle_t h),
  703. void *p)
  704. {
  705. struct counter_record* r;
  706. struct grp_record* g;
  707. str grp;
  708. grp.s = (char*)group;
  709. grp.len = strlen(group);
  710. g = grp_hash_lookup(&grp);
  711. if (g)
  712. for (r = g->first; r; r = r->grp_next)
  713. cbk(p, &r->group, &r->name, r->h);
  714. }
  715. /* vi: set ts=4 sw=4 tw=79:ai:cindent: */