|
@@ -98,6 +98,7 @@
|
|
|
* 2003-06-27 timers are not unlinked if timerlist is 0 (andrei)
|
|
|
* 2004-02-13 t->is_invite, t->local, t->noisy_ctimer replaced;
|
|
|
* timer_link.payload removed (bogdan)
|
|
|
+ * 2005-10-03 almost completely rewritten to use the new timers (andrei)
|
|
|
*/
|
|
|
|
|
|
#include "defs.h"
|
|
@@ -116,125 +117,108 @@
|
|
|
#include "../../config.h"
|
|
|
#include "../../parser/parser_f.h"
|
|
|
#include "../../ut.h"
|
|
|
+#include "../../timer_ticks.h"
|
|
|
#include "t_funcs.h"
|
|
|
#include "t_reply.h"
|
|
|
#include "t_cancel.h"
|
|
|
|
|
|
|
|
|
-static struct timer_table *timertable=0;
|
|
|
-static struct timer detached_timer; /* just to have a value to compare with*/
|
|
|
|
|
|
-#define DETACHED_LIST (&detached_timer)
|
|
|
+int noisy_ctimer=0;
|
|
|
|
|
|
-#define is_in_timer_list2(_tl) ( (_tl)->timer_list && \
|
|
|
- ((_tl)->timer_list!=DETACHED_LIST) )
|
|
|
+struct msgid_var user_fr_timeout;
|
|
|
+struct msgid_var user_fr_inv_timeout;
|
|
|
|
|
|
-int noisy_ctimer=0;
|
|
|
+/* default values of timeouts for all the timer list */
|
|
|
|
|
|
+ticks_t fr_timeout = FR_TIME_OUT;
|
|
|
+ticks_t fr_inv_timeout = INV_FR_TIME_OUT;
|
|
|
+ticks_t wait_timeout = WT_TIME_OUT;
|
|
|
+ticks_t delete_timeout = DEL_TIME_OUT;
|
|
|
+ticks_t rt_t1_timeout = RETR_T1;
|
|
|
+ticks_t rt_t2_timeout = RETR_T2;
|
|
|
|
|
|
-int timer_group[NR_OF_TIMER_LISTS] =
|
|
|
+/* fix timer values to ticks */
|
|
|
+int tm_init_timers()
|
|
|
{
|
|
|
- TG_FR, TG_FR,
|
|
|
- TG_WT,
|
|
|
- TG_DEL,
|
|
|
- TG_RT, TG_RT, TG_RT, TG_RT
|
|
|
-};
|
|
|
-
|
|
|
-/* default values of timeouts for all the timer list
|
|
|
- (see timer.h for enumeration of timer lists)
|
|
|
-*/
|
|
|
-unsigned int timer_id2timeout[NR_OF_TIMER_LISTS] = {
|
|
|
- FR_TIME_OUT, /* FR_TIMER_LIST */
|
|
|
- INV_FR_TIME_OUT, /* FR_INV_TIMER_LIST */
|
|
|
- WT_TIME_OUT, /* WT_TIMER_LIST */
|
|
|
- DEL_TIME_OUT, /* DELETE_LIST */
|
|
|
- RETR_T1, /* RT_T1_TO_1 */
|
|
|
- RETR_T1 << 1, /* RT_T1_TO_2 */
|
|
|
- RETR_T1 << 2, /* RT_T1_TO_3 */
|
|
|
- RETR_T2 /* RT_T2 */
|
|
|
- /* NR_OF_TIMER_LISTS */
|
|
|
-};
|
|
|
+ fr_timeout=MS_TO_TICKS(fr_timeout);
|
|
|
+ fr_inv_timeout=MS_TO_TICKS(fr_inv_timeout);
|
|
|
+ wait_timeout=MS_TO_TICKS(wait_timeout);
|
|
|
+ delete_timeout=MS_TO_TICKS(delete_timeout);
|
|
|
+ rt_t1_timeout=MS_TO_TICKS(rt_t1_timeout);
|
|
|
+ rt_t2_timeout=MS_TO_TICKS(rt_t2_timeout);
|
|
|
+ /* fix 0 values to 1 tick (minimum possible wait time ) */
|
|
|
+ if (fr_timeout==0) fr_timeout=1;
|
|
|
+ if (fr_inv_timeout==0) fr_inv_timeout=1;
|
|
|
+ if (wait_timeout==0) wait_timeout=1;
|
|
|
+ if (delete_timeout==0) delete_timeout=1;
|
|
|
+ if (rt_t2_timeout==0) rt_t2_timeout=1;
|
|
|
+ if (rt_t1_timeout==0) rt_t1_timeout=1;
|
|
|
+
|
|
|
+ memset(&user_fr_timeout, 0, sizeof(user_fr_timeout));
|
|
|
+ memset(&user_fr_inv_timeout, 0, sizeof(user_fr_inv_timeout));
|
|
|
+
|
|
|
+ DBG("tm: tm_init_timers: fr=%d fr_inv=%d wait=%d delete=%d t1=%d t2=%d\n",
|
|
|
+ fr_timeout, fr_inv_timeout, wait_timeout, delete_timeout,
|
|
|
+ rt_t1_timeout, rt_t2_timeout);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
/******************** handlers ***************************/
|
|
|
|
|
|
|
|
|
-static void unlink_timers( struct cell *t );
|
|
|
|
|
|
-static void delete_cell( struct cell *p_cell, int unlock )
|
|
|
+inline static void cleanup_localcancel_timers( struct cell *t )
|
|
|
{
|
|
|
+ int i;
|
|
|
+ for (i=0; i<t->nr_of_outgoings; i++ )
|
|
|
+ stop_rb_timers(&t->uac[i].local_cancel);
|
|
|
+}
|
|
|
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
+
|
|
|
+
|
|
|
+inline static void unlink_timers( struct cell *t )
|
|
|
+{
|
|
|
int i;
|
|
|
-#endif
|
|
|
|
|
|
+ stop_rb_timers(&t->uas.response);
|
|
|
+ for (i=0; i<t->nr_of_outgoings; i++)
|
|
|
+ stop_rb_timers(&t->uac[i].request);
|
|
|
+ cleanup_localcancel_timers(t);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+/* returns number of ticks before retrying the del, or 0 if the del.
|
|
|
+ * was succesfull */
|
|
|
+inline static ticks_t delete_cell( struct cell *p_cell, int unlock )
|
|
|
+{
|
|
|
/* there may still be FR/RETR timers, which have been reset
|
|
|
(i.e., time_out==TIMER_DELETED) but are stilled linked to
|
|
|
timer lists and must be removed from there before the
|
|
|
structures are released
|
|
|
*/
|
|
|
unlink_timers( p_cell );
|
|
|
-
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
-
|
|
|
- if (is_in_timer_list2(& p_cell->wait_tl )) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on WAIT, timeout=%d\n", p_cell, p_cell->wait_tl.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- if (is_in_timer_list2(& p_cell->uas.response.retr_timer )) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on RETR (rep), timeout=%d\n",
|
|
|
- p_cell, p_cell->uas.response.retr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- if (is_in_timer_list2(& p_cell->uas.response.fr_timer )) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on FR (rep), timeout=%d\n", p_cell,
|
|
|
- p_cell->uas.response.fr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- for (i=0; i<p_cell->nr_of_outgoings; i++) {
|
|
|
- if (is_in_timer_list2(& p_cell->uac[i].request.retr_timer)) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on RETR (req %d), timeout %d\n", p_cell, i,
|
|
|
- p_cell->uac[i].request.retr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- if (is_in_timer_list2(& p_cell->uac[i].request.fr_timer)) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on FR (req %d), timeout %d\n", p_cell, i,
|
|
|
- p_cell->uac[i].request.fr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- if (is_in_timer_list2(& p_cell->uac[i].local_cancel.retr_timer)) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on RETR/cancel (req %d), timeout %d\n", p_cell, i,
|
|
|
- p_cell->uac[i].request.retr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- if (is_in_timer_list2(& p_cell->uac[i].local_cancel.fr_timer)) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " still on FR/cancel (req %d), timeout %d\n", p_cell, i,
|
|
|
- p_cell->uac[i].request.fr_timer.time_out);
|
|
|
- abort();
|
|
|
- }
|
|
|
- }
|
|
|
- /* reset_retr_timers( hash__XX_table, p_cell ); */
|
|
|
-#endif
|
|
|
/* still in use ... don't delete */
|
|
|
if ( IS_REFFED_UNSAFE(p_cell) ) {
|
|
|
if (unlock) UNLOCK_HASH(p_cell->hash_index);
|
|
|
- DBG("DEBUG: delete_cell %p: can't delete -- still reffed\n",
|
|
|
- p_cell);
|
|
|
- /* it's added to del list for future del */
|
|
|
- set_timer( &(p_cell->dele_tl), DELETE_LIST, 0 );
|
|
|
+ DBG("DEBUG: delete_cell %p: can't delete -- still reffed (%d)\n",
|
|
|
+ p_cell, p_cell->ref_count);
|
|
|
+ /* delay the delete */
|
|
|
+ /* TODO: change refcnts and delete on refcnt==0 */
|
|
|
+ return delete_timeout;
|
|
|
} else {
|
|
|
if (unlock) UNLOCK_HASH(p_cell->hash_index);
|
|
|
+#ifdef EXTRA_DEBUG
|
|
|
DBG("DEBUG: delete transaction %p\n", p_cell );
|
|
|
+#endif
|
|
|
free_cell( p_cell );
|
|
|
+ return 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+
|
|
|
static void fake_reply(struct cell *t, int branch, int code )
|
|
|
{
|
|
|
branch_bm_t cancel_bitmap;
|
|
@@ -283,92 +267,72 @@ static void fake_reply(struct cell *t, int branch, int code )
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
-inline static void retransmission_handler( struct timer_link *retr_tl )
|
|
|
+/* return (ticks_t)-1 on error/disable and 0 on success */
|
|
|
+inline static ticks_t retransmission_handler( struct retr_buf *r_buf )
|
|
|
{
|
|
|
- struct retr_buf* r_buf ;
|
|
|
- enum lists id;
|
|
|
-
|
|
|
- r_buf = get_retr_timer_payload(retr_tl);
|
|
|
#ifdef EXTRA_DEBUG
|
|
|
- if (r_buf->my_T->damocles) {
|
|
|
+ if (r_buf->my_T->flags & T_IN_AGONY) {
|
|
|
LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " called from RETR timer\n",r_buf->my_T);
|
|
|
+ " called from RETR timer (flags %x)\n",
|
|
|
+ r_buf->my_T, r_buf->my_T->flags );
|
|
|
abort();
|
|
|
}
|
|
|
#endif
|
|
|
-
|
|
|
- /*the transaction is already removed from RETRANSMISSION_LIST by timer*/
|
|
|
- /* retransmission */
|
|
|
if ( r_buf->activ_type==TYPE_LOCAL_CANCEL
|
|
|
|| r_buf->activ_type==TYPE_REQUEST ) {
|
|
|
+#ifdef EXTRA_DEBUG
|
|
|
DBG("DEBUG: retransmission_handler : "
|
|
|
"request resending (t=%p, %.9s ... )\n",
|
|
|
r_buf->my_T, r_buf->buffer);
|
|
|
+#endif
|
|
|
if (SEND_BUFFER( r_buf )==-1) {
|
|
|
- reset_timer( &r_buf->fr_timer );
|
|
|
+ /* disable retr. timers => return -1 */
|
|
|
fake_reply(r_buf->my_T, r_buf->branch, 503 );
|
|
|
- return;
|
|
|
+ return (ticks_t)-1;
|
|
|
}
|
|
|
} else {
|
|
|
+#ifdef EXTRA_DEBUG
|
|
|
DBG("DEBUG: retransmission_handler : "
|
|
|
"reply resending (t=%p, %.9s ... )\n",
|
|
|
r_buf->my_T, r_buf->buffer);
|
|
|
+#endif
|
|
|
t_retransmit_reply(r_buf->my_T);
|
|
|
}
|
|
|
-
|
|
|
- id = r_buf->retr_list;
|
|
|
- r_buf->retr_list = id < RT_T2 ? id + 1 : RT_T2;
|
|
|
|
|
|
- retr_tl->timer_list= NULL; /* set to NULL so that set_timer will work */
|
|
|
- set_timer( retr_tl, id < RT_T2 ? id + 1 : RT_T2, 0 );
|
|
|
-
|
|
|
- DBG("DEBUG: retransmission_handler : done\n");
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
-inline static void final_response_handler( struct timer_link *fr_tl )
|
|
|
+inline static void final_response_handler( struct retr_buf* r_buf,
|
|
|
+ struct cell* t)
|
|
|
{
|
|
|
- int silent, reply_code;
|
|
|
- struct retr_buf* r_buf;
|
|
|
- struct cell *t;
|
|
|
-
|
|
|
- if (fr_tl==0){
|
|
|
- /* or BUG?, ignoring it for now */
|
|
|
- LOG(L_CRIT, "ERROR: final_response_handler(0) called\n");
|
|
|
- return;
|
|
|
- }
|
|
|
- r_buf = get_fr_timer_payload(fr_tl);
|
|
|
- t=r_buf->my_T;
|
|
|
+ int silent;
|
|
|
+ int reply_code;
|
|
|
|
|
|
# ifdef EXTRA_DEBUG
|
|
|
- if (t->damocles)
|
|
|
+ if (t->flags & T_IN_AGONY)
|
|
|
{
|
|
|
LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " called from FR timer\n",r_buf->my_T);
|
|
|
+ " called from FR timer (flags %x)\n", t, t->flags);
|
|
|
abort();
|
|
|
}
|
|
|
# endif
|
|
|
-
|
|
|
- reset_timer( &(r_buf->retr_timer) );
|
|
|
-
|
|
|
- /* the transaction is already removed from FR_LIST by the timer */
|
|
|
-
|
|
|
/* FR for local cancels.... */
|
|
|
if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
|
|
|
{
|
|
|
+#ifdef TIMER_DEBUG
|
|
|
DBG("DEBUG: final_response_handler: stop retr for Local Cancel\n");
|
|
|
+#endif
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
/* FR for replies (negative INVITE replies) */
|
|
|
if (r_buf->activ_type>0) {
|
|
|
# ifdef EXTRA_DEBUG
|
|
|
if (t->uas.request->REQ_METHOD!=METHOD_INVITE
|
|
|
|| t->uas.status < 200 ) {
|
|
|
- LOG(L_ERR, "ERROR: final_response_handler: unknown type reply buffer\n");
|
|
|
+ LOG(L_CRIT, "BUG: final_response_handler: unknown type reply"
|
|
|
+ " buffer\n");
|
|
|
abort();
|
|
|
}
|
|
|
# endif
|
|
@@ -402,15 +366,17 @@ inline static void final_response_handler( struct timer_link *fr_tl )
|
|
|
&& has_noisy_ctimer(t) == 0;
|
|
|
if (silent) {
|
|
|
UNLOCK_REPLIES(t);
|
|
|
+#ifdef EXTRA_DEBUG
|
|
|
DBG("DEBUG: final_response_handler: transaction silently dropped (%p)\n",t);
|
|
|
+#endif
|
|
|
put_on_wait( t );
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
+#ifdef EXTRA_DEBUG
|
|
|
DBG("DEBUG: final_response_handler:stop retr. and send CANCEL (%p)\n", t);
|
|
|
-
|
|
|
+#endif
|
|
|
if (is_invite(t) &&
|
|
|
- r_buf->branch < MAX_BRANCHES && r_buf->branch >= 0 &&
|
|
|
+ r_buf->branch < MAX_BRANCHES && /* r_buf->branch is always >=0 */
|
|
|
t->uac[r_buf->branch].last_received > 0) {
|
|
|
reply_code = 480; /* Request Terminated */
|
|
|
} else {
|
|
@@ -418,554 +384,143 @@ inline static void final_response_handler( struct timer_link *fr_tl )
|
|
|
}
|
|
|
|
|
|
fake_reply(t, r_buf->branch, reply_code );
|
|
|
-
|
|
|
- DBG("DEBUG: final_response_handler : done\n");
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-void cleanup_localcancel_timers( struct cell *t )
|
|
|
-{
|
|
|
- int i;
|
|
|
- for (i=0; i<t->nr_of_outgoings; i++ ) {
|
|
|
- reset_timer( &t->uac[i].local_cancel.retr_timer );
|
|
|
- reset_timer( &t->uac[i].local_cancel.fr_timer );
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-inline static void wait_handler( struct timer_link *wait_tl )
|
|
|
-{
|
|
|
- struct cell *p_cell;
|
|
|
-
|
|
|
- p_cell = get_wait_timer_payload( wait_tl );
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if (p_cell->damocles) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
|
|
|
- " called from WAIT timer\n",p_cell);
|
|
|
- abort();
|
|
|
- }
|
|
|
- DBG("DEBUG: WAIT timer hit\n");
|
|
|
-#endif
|
|
|
-
|
|
|
- /* stop cancel timers if any running */
|
|
|
- if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
|
|
|
-
|
|
|
- /* the transaction is already removed from WT_LIST by the timer */
|
|
|
- /* remove the cell from the hash table */
|
|
|
- DBG("DEBUG: wait_handler : removing %p from table \n", p_cell );
|
|
|
- LOCK_HASH( p_cell->hash_index );
|
|
|
- remove_from_hash_table_unsafe( p_cell );
|
|
|
- /* jku: no more here -- we do it when we put a transaction on wait */
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- p_cell->damocles = 1;
|
|
|
-#endif
|
|
|
- /* delete (returns with UNLOCK-ed_HASH) */
|
|
|
- delete_cell( p_cell, 1 /* unlock on return */ );
|
|
|
- DBG("DEBUG: wait_handler : done\n");
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-inline static void delete_handler( struct timer_link *dele_tl )
|
|
|
-{
|
|
|
- struct cell *p_cell;
|
|
|
-
|
|
|
- p_cell = get_dele_timer_payload( dele_tl );
|
|
|
- DBG("DEBUG: delete_handler : removing %p \n", p_cell );
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if (p_cell->damocles==0) {
|
|
|
- LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion"
|
|
|
- " and called from DELETE timer\n",p_cell);
|
|
|
- abort();
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
- /* we call delete now without any locking on hash/ref_count;
|
|
|
- we can do that because delete_handler is only entered after
|
|
|
- the delete timer was installed from wait_handler, which
|
|
|
- removed transaction from hash table and did not destroy it
|
|
|
- because some processes were using it; that means that the
|
|
|
- processes currently using the transaction can unref and no
|
|
|
- new processes can ref -- we can wait until ref_count is
|
|
|
- zero safely without locking
|
|
|
- */
|
|
|
- delete_cell( p_cell, 0 /* don't unlock on return */ );
|
|
|
- DBG("DEBUG: delete_handler : done\n");
|
|
|
}
|
|
|
|
|
|
|
|
|
-/***********************************************************/
|
|
|
|
|
|
-struct timer_table *get_timertable()
|
|
|
+/* handles retransmissions and fr timers */
|
|
|
+/* the following assumption are made (to avoid deleting/re-adding the timer):
|
|
|
+ * retr_buf->retr_interval < ( 1<<((sizeof(ticks_t)*8-1) )
|
|
|
+ * if retr_buf->retr_interval==0 => timer disabled
|
|
|
+ * ==(ticks_t) -1 => retr. disabled (fr working)
|
|
|
+ * retr_buf->retr_interval & (1 <<(sizeof(ticks_t)*8-1) => retr. & fr reset
|
|
|
+ * (we never reset only retr, it's either reset both of them or retr
|
|
|
+ * disabled & reset fr). In this case the fr_origin will contain the
|
|
|
+ * "time" of the reset and next retr should occur at
|
|
|
+ * fr->origin+retr_interval (we also assume that we'll never reset retr
|
|
|
+ * to a lower value then the current one)
|
|
|
+ */
|
|
|
+ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
|
|
|
{
|
|
|
- return timertable;
|
|
|
-}
|
|
|
-
|
|
|
+ struct retr_buf* rbuf ;
|
|
|
+ ticks_t fr_remainder;
|
|
|
+ ticks_t retr_remainder;
|
|
|
+ ticks_t retr_interval;
|
|
|
+ struct cell *t;
|
|
|
|
|
|
-void unlink_timer_lists()
|
|
|
-{
|
|
|
- struct timer_link *tl, *end, *tmp;
|
|
|
- enum lists i;
|
|
|
-
|
|
|
- if (timertable==0) return; /* nothing to do */
|
|
|
- /* remember the DELETE LIST */
|
|
|
- tl = timertable->timers[DELETE_LIST].first_tl.next_tl;
|
|
|
- end = & timertable->timers[DELETE_LIST].last_tl;
|
|
|
- /* unlink the timer lists */
|
|
|
- for( i=0; i<NR_OF_TIMER_LISTS ; i++ )
|
|
|
- reset_timer_list( i );
|
|
|
- DBG("DEBUG: unlink_timer_lists : emptying DELETE list\n");
|
|
|
- /* deletes all cells from DELETE_LIST list
|
|
|
- (they are no more accessible from entrys) */
|
|
|
- while (tl!=end) {
|
|
|
- tmp=tl->next_tl;
|
|
|
- free_cell( get_dele_timer_payload(tl) );
|
|
|
- tl=tmp;
|
|
|
- }
|
|
|
+ rbuf=(struct retr_buf*)
|
|
|
+ ((void*)tl-(void*)(&((struct retr_buf*)0)->timer));
|
|
|
+ t=rbuf->my_T;
|
|
|
|
|
|
-}
|
|
|
-
|
|
|
-struct timer_table *tm_init_timers()
|
|
|
-{
|
|
|
- enum lists i;
|
|
|
-
|
|
|
- timertable=(struct timer_table *) shm_malloc(sizeof(struct timer_table));
|
|
|
- if (!timertable) {
|
|
|
- LOG(L_ERR, "ERROR: tm_init_timers: no shmem for timer_Table\n");
|
|
|
- goto error0;
|
|
|
- }
|
|
|
- memset(timertable, 0, sizeof (struct timer_table));
|
|
|
-
|
|
|
-
|
|
|
- /* inits the timers*/
|
|
|
- for( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
|
|
|
- init_timer_list( i );
|
|
|
-
|
|
|
- /* init. timer lists */
|
|
|
- timertable->timers[RT_T1_TO_1].id = RT_T1_TO_1;
|
|
|
- timertable->timers[RT_T1_TO_2].id = RT_T1_TO_2;
|
|
|
- timertable->timers[RT_T1_TO_3].id = RT_T1_TO_3;
|
|
|
- timertable->timers[RT_T2].id = RT_T2;
|
|
|
- timertable->timers[FR_TIMER_LIST].id = FR_TIMER_LIST;
|
|
|
- timertable->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST;
|
|
|
- timertable->timers[WT_TIMER_LIST].id = WT_TIMER_LIST;
|
|
|
- timertable->timers[DELETE_LIST].id = DELETE_LIST;
|
|
|
-
|
|
|
- return timertable;
|
|
|
-
|
|
|
-error0:
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-void free_timer_table()
|
|
|
-{
|
|
|
- enum lists i;
|
|
|
-
|
|
|
- if (timertable) {
|
|
|
- /* the mutexs for sync the lists are released*/
|
|
|
- for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
|
|
|
- release_timerlist_lock( &timertable->timers[i] );
|
|
|
- shm_free(timertable);
|
|
|
- }
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-void reset_timer_list( enum lists list_id)
|
|
|
-{
|
|
|
- timertable->timers[list_id].first_tl.next_tl =
|
|
|
- &(timertable->timers[list_id].last_tl );
|
|
|
- timertable->timers[list_id].last_tl.prev_tl =
|
|
|
- &(timertable->timers[list_id].first_tl );
|
|
|
- timertable->timers[list_id].first_tl.prev_tl =
|
|
|
- timertable->timers[list_id].last_tl.next_tl = NULL;
|
|
|
- timertable->timers[list_id].last_tl.time_out = -1;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-void init_timer_list( /* struct s_table* ht, */ enum lists list_id)
|
|
|
-{
|
|
|
- reset_timer_list( /* ht, */ list_id );
|
|
|
- init_timerlist_lock( /* ht, */ list_id );
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-void print_timer_list( enum lists list_id)
|
|
|
-{
|
|
|
- struct timer* timer_list=&(timertable->timers[ list_id ]);
|
|
|
- struct timer_link *tl ;
|
|
|
-
|
|
|
- tl = timer_list->first_tl.next_tl;
|
|
|
- while (tl!=& timer_list->last_tl)
|
|
|
- {
|
|
|
- DBG("DEBUG: print_timer_list[%d]: %p, next=%p \n",
|
|
|
- list_id, tl, tl->next_tl);
|
|
|
- tl = tl->next_tl;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-static void remove_timer_unsafe( struct timer_link* tl )
|
|
|
-{
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if (tl && is_in_timer_list2(tl) &&
|
|
|
- tl->timer_list->last_tl.prev_tl==0) {
|
|
|
- LOG( L_CRIT,
|
|
|
- "CRITICAL : Oh no, zero link in trailing timer element\n");
|
|
|
- abort();
|
|
|
- };
|
|
|
-#endif
|
|
|
- if (is_in_timer_list2( tl )) {
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- DBG("DEBUG: unlinking timer: tl=%p, timeout=%d, group=%d\n",
|
|
|
- tl, tl->time_out, tl->tg);
|
|
|
-#endif
|
|
|
- tl->prev_tl->next_tl = tl->next_tl;
|
|
|
- tl->next_tl->prev_tl = tl->prev_tl;
|
|
|
- tl->next_tl = 0;
|
|
|
- tl->prev_tl = 0;
|
|
|
- tl->timer_list = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-/* put a new cell into a list nr. list_id */
|
|
|
-static void insert_timer_unsafe( struct timer *timer_list, struct timer_link *tl,
|
|
|
- unsigned int time_out )
|
|
|
-{
|
|
|
- struct timer_link* ptr;
|
|
|
-
|
|
|
- tl->time_out = time_out;
|
|
|
- tl->timer_list = timer_list;
|
|
|
-
|
|
|
- for(ptr = timer_list->last_tl.prev_tl;
|
|
|
- ptr != &timer_list->first_tl;
|
|
|
- ptr = ptr->prev_tl) {
|
|
|
- if ((ptr->time_out != TIMER_DELETED) && (ptr->time_out <= time_out)) break;
|
|
|
- }
|
|
|
-
|
|
|
- tl->prev_tl = ptr;
|
|
|
- tl->next_tl = ptr->next_tl;
|
|
|
- tl->prev_tl->next_tl = tl;
|
|
|
- tl->next_tl->prev_tl = tl;
|
|
|
-
|
|
|
- DBG("DEBUG: add_to_tail_of_timer[%d]: %p\n",timer_list->id,tl);
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-#if 0 /* not used anymore */
|
|
|
-/* put a new cell into a list nr. list_id */
|
|
|
-static void add_timer_unsafe( struct timer *timer_list, struct timer_link *tl,
|
|
|
- unsigned int time_out )
|
|
|
-{
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if (timer_list->last_tl.prev_tl==0) {
|
|
|
- LOG( L_CRIT,
|
|
|
- "CRITICAL : Oh no, zero link in trailing timer element\n");
|
|
|
- abort();
|
|
|
- };
|
|
|
+#ifdef TIMER_DEBUG
|
|
|
+ DBG("tm: timer retr_buf_handler @%d (%p -> %p -> %p)\n",
|
|
|
+ ticks, tl, rbuf, t);
|
|
|
#endif
|
|
|
-
|
|
|
- tl->time_out = time_out;
|
|
|
- tl->prev_tl = timer_list->last_tl.prev_tl;
|
|
|
- tl->next_tl = & timer_list->last_tl;
|
|
|
- timer_list->last_tl.prev_tl = tl;
|
|
|
- tl->prev_tl->next_tl = tl;
|
|
|
- tl->timer_list = timer_list;
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if ( tl->tg != timer_group[ timer_list->id ] ) {
|
|
|
- LOG( L_CRIT, "CRITICAL error: changing timer group\n");
|
|
|
- abort();
|
|
|
- }
|
|
|
+ /* overflow safe check (should work ok for fr_intervals < max ticks_t/2) */
|
|
|
+ if ((s_ticks_t)(rbuf->fr_expire-ticks)<=0){
|
|
|
+ /* final response */
|
|
|
+ final_response_handler(rbuf, t);
|
|
|
+ rbuf->t_active=0; /* mark the timer as removed
|
|
|
+ (both timers disabled)
|
|
|
+ a little race risk, but
|
|
|
+ nothing bad would happen */
|
|
|
+ return 0;
|
|
|
+ }else{
|
|
|
+ /* 4 possible states running (t1), t2, paused, disabled */
|
|
|
+ if ((s_ticks_t)(rbuf->retr_expire-ticks)<=0){
|
|
|
+ if (rbuf->flags & F_RB_RETR_DISABLED)
|
|
|
+ goto disabled;
|
|
|
+ /* retr_interval= min (2*ri, rt_t2) */
|
|
|
+ /* no branch version:
|
|
|
+ #idef CC_SIGNED_RIGHT_SHIFT
|
|
|
+ ri= rt_t2+((2*ri-rt_t2) &
|
|
|
+ ((signed)(2*ri-rt_t2)>>(sizeof(ticks_t)*8-1));
|
|
|
+ #else
|
|
|
+ ri=rt_t2+((2*ri-rt_t2)& -(2*ri<rt_t2));
|
|
|
+ #endif
|
|
|
+ */
|
|
|
+
|
|
|
+ /* get the current interval from timer param. */
|
|
|
+ if ((rbuf->flags & F_RB_T2) ||
|
|
|
+ (((ticks_t)(unsigned long)p<<1)>rt_t2_timeout))
|
|
|
+ retr_interval=rt_t2_timeout;
|
|
|
+ else
|
|
|
+ retr_interval=(ticks_t)(unsigned long)p<<1;
|
|
|
+#ifdef TIMER_DEBUG
|
|
|
+ DBG("tm: timer: retr: new interval %d (max %d)\n",
|
|
|
+ retr_interval, rt_t2_timeout);
|
|
|
#endif
|
|
|
- DBG("DEBUG: add_timer_unsafe[%d]: %p\n",timer_list->id,tl);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-/* detach items passed by the time from timer list */
|
|
|
-static struct timer_link *check_and_split_time_list( struct timer *timer_list,
|
|
|
- int time )
|
|
|
-{
|
|
|
- struct timer_link *tl , *end, *ret;
|
|
|
-
|
|
|
-
|
|
|
- /* quick check whether it is worth entering the lock */
|
|
|
- if (timer_list->first_tl.next_tl==&timer_list->last_tl
|
|
|
- || ( /* timer_list->first_tl.next_tl
|
|
|
- && */ timer_list->first_tl.next_tl->time_out > time) )
|
|
|
- return NULL;
|
|
|
-
|
|
|
- /* the entire timer list is locked now -- noone else can manipulate it */
|
|
|
- lock(timer_list->mutex);
|
|
|
-
|
|
|
- end = &timer_list->last_tl;
|
|
|
- tl = timer_list->first_tl.next_tl;
|
|
|
- while( tl!=end && tl->time_out <= time) {
|
|
|
- tl->timer_list = DETACHED_LIST;
|
|
|
- tl=tl->next_tl;
|
|
|
- }
|
|
|
-
|
|
|
- /* nothing to delete found */
|
|
|
- if (tl->prev_tl==&(timer_list->first_tl)) {
|
|
|
- ret = NULL;
|
|
|
- } else { /* we did find timers to be fired! */
|
|
|
- /* the detached list begins with current beginning */
|
|
|
- ret = timer_list->first_tl.next_tl;
|
|
|
- /* and we mark the end of the split list */
|
|
|
- tl->prev_tl->next_tl = NULL;
|
|
|
- /* the shortened list starts from where we suspended */
|
|
|
- timer_list->first_tl.next_tl = tl;
|
|
|
- tl->prev_tl = & timer_list->first_tl;
|
|
|
+ /* we could race with the reply_received code, but the
|
|
|
+ * worst thing that can happen is to delay a reset_to_t2
|
|
|
+ * for crt_interval and send an extra retr.*/
|
|
|
+ rbuf->retr_expire=ticks+retr_interval;
|
|
|
+ /* set new interval to -1 on error, or retr_int. on success */
|
|
|
+ retr_remainder=retransmission_handler(rbuf) | retr_interval;
|
|
|
+ retr_remainder=retr_interval;
|
|
|
+ /* store the crt. retr. interval inside the timer struct,
|
|
|
+ * in the data member */
|
|
|
+ tl->data=(void*)(unsigned long)retr_interval;
|
|
|
+ }else{
|
|
|
+ retr_remainder= rbuf->retr_expire-ticks;
|
|
|
+ DBG("tm: timer: retr: nothing to do, expire in %d\n",
|
|
|
+ retr_remainder);
|
|
|
+ }
|
|
|
}
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- if (timer_list->last_tl.prev_tl==0) {
|
|
|
- LOG( L_CRIT,
|
|
|
- "CRITICAL : Oh no, zero link in trailing timer element\n");
|
|
|
- abort();
|
|
|
- };
|
|
|
-#endif
|
|
|
- /* give the list lock away */
|
|
|
- unlock(timer_list->mutex);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-/* stop timer
|
|
|
- * WARNING: a reset'ed timer will be lost forever
|
|
|
- * (successive set_timer won't work unless you're lucky
|
|
|
- * an catch the race condition, the idea here is there is no
|
|
|
- * guarantee you can do anything after a timer_reset)*/
|
|
|
-void reset_timer( struct timer_link* tl )
|
|
|
-{
|
|
|
- /* disqualify this timer from execution by setting its time_out
|
|
|
- to zero; it will stay in timer-list until the timer process
|
|
|
- starts removing outdated elements; then it will remove it
|
|
|
- but not execute; there is a race condition, though -- see
|
|
|
- timer.c for more details
|
|
|
- */
|
|
|
- tl->time_out = TIMER_DELETED;
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- DBG("DEBUG: reset_timer (group %d, tl=%p)\n", tl->tg, tl );
|
|
|
+/* skip: */
|
|
|
+ /* return minimum of the next retransmission handler and the
|
|
|
+ * final response (side benefit: it properly cancels timer if ret==0 and
|
|
|
+ * sleeps for fr_remainder if retr. is canceled [==(ticks_t)-1]) */
|
|
|
+ fr_remainder=rbuf->fr_expire-ticks; /* to be more precise use
|
|
|
+ get_ticks_raw() instead of ticks
|
|
|
+ (but make sure that
|
|
|
+ crt. ticks < fr_expire */
|
|
|
+#ifdef TIMER_DEBUG
|
|
|
+ DBG("tm: timer retr_buf_handler @%d (%p ->%p->%p) exiting min (%d, %d)\n",
|
|
|
+ ticks, tl, rbuf, t, retr_remainder, fr_remainder);
|
|
|
#endif
|
|
|
+ if (retr_remainder<fr_remainder)
|
|
|
+ return retr_remainder;
|
|
|
+ else
|
|
|
+ return fr_remainder;
|
|
|
+disabled:
|
|
|
+ return rbuf->fr_expire-ticks;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
-/* determine timer length and put on a correct timer list
|
|
|
- * WARNING: - don't try to use it to "move" a timer from one list
|
|
|
- * to another, you'll run into races
|
|
|
- * - reset_timer; set_timer might not work, a reset'ed timer
|
|
|
- * has no set_timer guarantee, it might be lost;
|
|
|
- * same for an expired timer: only it's handler can
|
|
|
- * set it again, an external set_timer has no guarantee
|
|
|
- */
|
|
|
-void set_timer( struct timer_link *new_tl, enum lists list_id, unsigned int* ext_timeout )
|
|
|
+ticks_t wait_handler(ticks_t ti, struct timer_ln *wait_tl, void* data)
|
|
|
{
|
|
|
- unsigned int timeout;
|
|
|
- struct timer* list;
|
|
|
-
|
|
|
-
|
|
|
- if (list_id<FR_TIMER_LIST || list_id>=NR_OF_TIMER_LISTS) {
|
|
|
- LOG(L_CRIT, "ERROR: set_timer: unknown list: %d\n", list_id);
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- abort();
|
|
|
-#endif
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (!ext_timeout) {
|
|
|
- timeout = timer_id2timeout[ list_id ];
|
|
|
- } else {
|
|
|
- timeout = *ext_timeout;
|
|
|
- }
|
|
|
-
|
|
|
- list= &(timertable->timers[ list_id ]);
|
|
|
-
|
|
|
- lock(list->mutex);
|
|
|
- /* check first if we are on the "detached" timer_routine list,
|
|
|
- * if so do nothing, the timer is not valid anymore
|
|
|
- * (sideffect: reset_timer ; set_timer is not safe, a reseted timer
|
|
|
- * might be lost, depending on this race condition ) */
|
|
|
- if (new_tl->timer_list==DETACHED_LIST){
|
|
|
- LOG(L_CRIT, "WARNING: set_timer called on a \"detached\" timer"
|
|
|
- " -- ignoring: %p\n", new_tl);
|
|
|
- goto end;
|
|
|
- }
|
|
|
- /* make sure I'm not already on a list */
|
|
|
- remove_timer_unsafe( new_tl );
|
|
|
- /*
|
|
|
- add_timer_unsafe( list, new_tl, get_ticks()+timeout);
|
|
|
- */
|
|
|
- insert_timer_unsafe( list, new_tl, get_ticks()+timeout);
|
|
|
-end:
|
|
|
- unlock(list->mutex);
|
|
|
-}
|
|
|
-
|
|
|
-/* similar to set_timer, except it allows only one-time
|
|
|
- timer setting and all later attempts are ignored */
|
|
|
-void set_1timer( struct timer_link *new_tl, enum lists list_id, unsigned int* ext_timeout )
|
|
|
-{
|
|
|
- unsigned int timeout;
|
|
|
- struct timer* list;
|
|
|
-
|
|
|
+ struct cell *p_cell;
|
|
|
+ ticks_t ret;
|
|
|
|
|
|
- if (list_id<FR_TIMER_LIST || list_id>=NR_OF_TIMER_LISTS) {
|
|
|
- LOG(L_CRIT, "ERROR: set_timer: unknown list: %d\n", list_id);
|
|
|
-#ifdef EXTRA_DEBUG
|
|
|
- abort();
|
|
|
+ p_cell=(struct cell*)data;
|
|
|
+#ifdef TIMER_DEBUG
|
|
|
+ DBG("DEBUG: WAIT timer hit @%d for %p (timer_lm %p)\n",
|
|
|
+ ti, p_cell, wait_tl);
|
|
|
#endif
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (!ext_timeout) {
|
|
|
- timeout = timer_id2timeout[ list_id ];
|
|
|
- } else {
|
|
|
- timeout = *ext_timeout;
|
|
|
- }
|
|
|
-
|
|
|
- list= &(timertable->timers[ list_id ]);
|
|
|
-
|
|
|
- lock(list->mutex);
|
|
|
- if (!(new_tl->time_out>TIMER_DELETED)) {
|
|
|
- /* make sure I'm not already on a list */
|
|
|
- /* remove_timer_unsafe( new_tl ); */
|
|
|
- /*
|
|
|
- add_timer_unsafe( list, new_tl, get_ticks()+timeout);
|
|
|
- */
|
|
|
- insert_timer_unsafe( list, new_tl, get_ticks()+timeout);
|
|
|
-
|
|
|
- /* set_1timer is used only by WAIT -- that's why we can
|
|
|
- afford updating wait statistics; I admit its not nice
|
|
|
- but it greatly utilizes existing lock
|
|
|
- */
|
|
|
- }
|
|
|
- unlock(list->mutex);
|
|
|
- t_stats_wait();
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-/* should be called only from timer process context,
|
|
|
- * else it's unsafe */
|
|
|
-static void unlink_timers( struct cell *t )
|
|
|
-{
|
|
|
- int i;
|
|
|
- int remove_fr, remove_retr;
|
|
|
-
|
|
|
- remove_fr=0; remove_retr=0;
|
|
|
-
|
|
|
- /* first look if we need to remove timers and play with
|
|
|
- costly locks at all
|
|
|
|
|
|
- note that is_in_timer_list2 is unsafe but it does not
|
|
|
- hurt -- transaction is already dead (wait state) so that
|
|
|
- noone else will install a FR/RETR timer and it can only
|
|
|
- be removed from timer process itself -> it is safe to
|
|
|
- use it without any protection
|
|
|
- */
|
|
|
- if (is_in_timer_list2(&t->uas.response.fr_timer)) remove_fr=1;
|
|
|
- else for (i=0; i<t->nr_of_outgoings; i++)
|
|
|
- if (is_in_timer_list2(&t->uac[i].request.fr_timer)
|
|
|
- || is_in_timer_list2(&t->uac[i].local_cancel.fr_timer)) {
|
|
|
- remove_fr=1;
|
|
|
- break;
|
|
|
- }
|
|
|
- if (is_in_timer_list2(&t->uas.response.retr_timer)) remove_retr=1;
|
|
|
- else for (i=0; i<t->nr_of_outgoings; i++)
|
|
|
- if (is_in_timer_list2(&t->uac[i].request.retr_timer)
|
|
|
- || is_in_timer_list2(&t->uac[i].local_cancel.retr_timer)) {
|
|
|
- remove_retr=1;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- /* do what we have to do....*/
|
|
|
- if (remove_retr) {
|
|
|
- /* RT_T1 lock is shared by all other RT timer
|
|
|
- lists -- we can safely lock just one
|
|
|
+ if (p_cell->flags & T_IN_AGONY){
|
|
|
+ /* delayed delete */
|
|
|
+ /* we call delete now without any locking on hash/ref_count;
|
|
|
+ we can do that because delete_handler is only entered after
|
|
|
+ the delete timer was installed from wait_handler, which
|
|
|
+ removed transaction from hash table and did not destroy it
|
|
|
+ because some processes were using it; that means that the
|
|
|
+ processes currently using the transaction can unref and no
|
|
|
+ new processes can ref -- we can wait until ref_count is
|
|
|
+ zero safely without locking
|
|
|
*/
|
|
|
- lock(timertable->timers[RT_T1_TO_1].mutex);
|
|
|
- remove_timer_unsafe(&t->uas.response.retr_timer);
|
|
|
- for (i=0; i<t->nr_of_outgoings; i++) {
|
|
|
- remove_timer_unsafe(&t->uac[i].request.retr_timer);
|
|
|
- remove_timer_unsafe(&t->uac[i].local_cancel.retr_timer);
|
|
|
- }
|
|
|
- unlock(timertable->timers[RT_T1_TO_1].mutex);
|
|
|
- }
|
|
|
- if (remove_fr) {
|
|
|
- /* FR lock is shared by all other FR timer
|
|
|
- lists -- we can safely lock just one
|
|
|
- */
|
|
|
- lock(timertable->timers[FR_TIMER_LIST].mutex);
|
|
|
- remove_timer_unsafe(&t->uas.response.fr_timer);
|
|
|
- for (i=0; i<t->nr_of_outgoings; i++) {
|
|
|
- remove_timer_unsafe(&t->uac[i].request.fr_timer);
|
|
|
- remove_timer_unsafe(&t->uac[i].local_cancel.fr_timer);
|
|
|
- }
|
|
|
- unlock(timertable->timers[FR_TIMER_LIST].mutex);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-#define run_handler_for_each( _tl , _handler ) \
|
|
|
- while ((_tl))\
|
|
|
- {\
|
|
|
- /* reset the timer list linkage */\
|
|
|
- tmp_tl = (_tl)->next_tl;\
|
|
|
- (_tl)->next_tl = (_tl)->prev_tl = 0;\
|
|
|
- DBG("DEBUG: timer routine:%d,tl=%p next=%p\n",\
|
|
|
- id,(_tl),tmp_tl);\
|
|
|
- if ((_tl)->time_out>TIMER_DELETED) \
|
|
|
- (_handler)( _tl );\
|
|
|
- (_tl) = tmp_tl;\
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-void timer_routine(unsigned int ticks , void * attr)
|
|
|
-{
|
|
|
- /* struct timer_table *tt= (struct timer_table*)attr; */
|
|
|
- struct timer_link *tl, *tmp_tl;
|
|
|
- int id;
|
|
|
-
|
|
|
- for( id=0 ; id<NR_OF_TIMER_LISTS ; id++ )
|
|
|
- {
|
|
|
- /* to waste as little time in lock as possible, detach list
|
|
|
- with expired items and process them after leaving the lock */
|
|
|
- tl=check_and_split_time_list( &timertable->timers[ id ], ticks);
|
|
|
- /* process items now */
|
|
|
- switch (id)
|
|
|
- {
|
|
|
- case FR_TIMER_LIST:
|
|
|
- case FR_INV_TIMER_LIST:
|
|
|
- run_handler_for_each(tl,final_response_handler);
|
|
|
- break;
|
|
|
- case RT_T1_TO_1:
|
|
|
- case RT_T1_TO_2:
|
|
|
- case RT_T1_TO_3:
|
|
|
- case RT_T2:
|
|
|
- run_handler_for_each(tl,retransmission_handler);
|
|
|
- break;
|
|
|
- case WT_TIMER_LIST:
|
|
|
- run_handler_for_each(tl,wait_handler);
|
|
|
- break;
|
|
|
- case DELETE_LIST:
|
|
|
- run_handler_for_each(tl,delete_handler);
|
|
|
- break;
|
|
|
- }
|
|
|
+ ret=delete_cell( p_cell, 0 /* don't unlock on return */ );
|
|
|
+ }else{
|
|
|
+ /* stop cancel timers if any running */
|
|
|
+ if ( is_invite(p_cell) ) cleanup_localcancel_timers( p_cell );
|
|
|
+ /* remove the cell from the hash table */
|
|
|
+ LOCK_HASH( p_cell->hash_index );
|
|
|
+ remove_from_hash_table_unsafe( p_cell );
|
|
|
+ p_cell->flags |= T_IN_AGONY;
|
|
|
+ /* delete (returns with UNLOCK-ed_HASH) */
|
|
|
+ ret=delete_cell( p_cell, 1 /* unlock on return */ );
|
|
|
}
|
|
|
+ return ret;
|
|
|
}
|
|
|
|