瀏覽代碼

- changed tm hash bucket lists (simplified & a little bit faster)
- inlined some used-a-lot functions
- don't allocate space for md5 if syn_branches is on (default) =>
32 bytes saved/transaction

Andrei Pelinescu-Onciul 18 年之前
父節點
當前提交
99e9192a48
共有 4 個文件被更改,包括 168 次插入146 次删除
  1. 27 83
      modules/tm/h_table.c
  2. 88 22
      modules/tm/h_table.h
  3. 1 1
      modules/tm/lock.c
  4. 52 40
      modules/tm/t_lookup.c

+ 27 - 83
modules/tm/h_table.c

@@ -43,6 +43,9 @@
  *             transactions (bogdan)
  * 2006-08-11  dns failover support (andrei)
  * 2007-05-16  callbacks called on destroy (andrei)
+ * 2007-06-06  don't allocate extra space for md5 if not used: syn_branch==1 
+ *              (andrei)
+ * 2007-06-06  switched tm bucket list to a simpler and faster clist (andrei)
  */
 
 #include <stdlib.h>
@@ -68,7 +71,7 @@ static enum kill_reason kr;
 
 /* pointer to the big table where all the transaction data
    lives */
-static struct s_table*  tm_table;
+struct s_table*  _tm_table;
 
 
 void reset_kr() {
@@ -88,22 +91,18 @@ enum kill_reason get_kr() {
 
 void lock_hash(int i) 
 {
-	lock(&tm_table->entrys[i].mutex);
+	lock(&_tm_table->entries[i].mutex);
 }
 
 
 void unlock_hash(int i) 
 {
-	unlock(&tm_table->entrys[i].mutex);
+	unlock(&_tm_table->entries[i].mutex);
 }
 
 
-struct s_table* get_tm_table()
-{
-	return tm_table;
-}
-
 
+#ifdef TM_HASH_STATS
 unsigned int transaction_count( void )
 {
 	unsigned int i;
@@ -111,9 +110,10 @@ unsigned int transaction_count( void )
 
 	count=0;	
 	for (i=0; i<TABLE_ENTRIES; i++) 
-		count+=tm_table->entrys[i].cur_entries;
+		count+=_tm_table->entries[i].cur_entries;
 	return count;
 }
+#endif
 
 
 
@@ -259,7 +259,9 @@ struct cell*  build_cell( struct sip_msg* p_msg )
 	avp_list_t* old;
 
 	/* allocs a new cell */
-	new_cell = (struct cell*)shm_malloc( sizeof( struct cell ) );
+	/* if syn_branch==0 add space for md5 (MD5_LEN -sizeof(struct cell.md5)) */
+	new_cell = (struct cell*)shm_malloc( sizeof( struct cell )+
+			((MD5_LEN-sizeof(((struct cell*)0)->md5))&((syn_branch!=0)-1)) );
 	if  ( !new_cell ) {
 		ser_error=E_OUT_OF_MEM;
 		return NULL;
@@ -348,21 +350,19 @@ void free_hash_table(  )
 	struct cell* tmp_cell;
 	int    i;
 
-	if (tm_table)
+	if (_tm_table)
 	{
 		/* remove the data contained by each entry */
 		for( i = 0 ; i<TABLE_ENTRIES; i++)
 		{
-			release_entry_lock( (tm_table->entrys)+i );
+			release_entry_lock( (_tm_table->entries)+i );
 			/* delete all synonyms at hash-collision-slot i */
-			p_cell=tm_table->entrys[i].first_cell;
-			for( ; p_cell; p_cell = tmp_cell )
-			{
-				tmp_cell = p_cell->next_cell;
-				free_cell( p_cell );
+			clist_foreach_safe(&_tm_table->entries[i], p_cell, tmp_cell,
+									next_c){
+				free_cell(p_cell);
 			}
 		}
-		shm_free(tm_table);
+		shm_free(_tm_table);
 	}
 }
 
@@ -376,26 +376,28 @@ struct s_table* init_hash_table()
 	int              i;
 
 	/*allocs the table*/
-	tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
-	if ( !tm_table) {
+	_tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
+	if ( !_tm_table) {
 		LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n");
 		goto error0;
 	}
 
-	memset( tm_table, 0, sizeof (struct s_table ) );
+	memset( _tm_table, 0, sizeof (struct s_table ) );
 
 	/* try first allocating all the structures needed for syncing */
 	if (lock_initialize()==-1)
 		goto error1;
 
-	/* inits the entrys */
+	/* inits the entriess */
 	for(  i=0 ; i<TABLE_ENTRIES; i++ )
 	{
-		init_entry_lock( tm_table, (tm_table->entrys)+i );
-		tm_table->entrys[i].next_label = rand();
+		init_entry_lock( _tm_table, (_tm_table->entries)+i );
+		_tm_table->entries[i].next_label = rand();
+		/* init cell list */
+		clist_init(&_tm_table->entries[i], next_c, prev_c);
 	}
 
-	return  tm_table;
+	return  _tm_table;
 
 error1:
 	free_hash_table( );
@@ -405,61 +407,3 @@ error0:
 
 
 
-
-/*  Takes an already created cell and links it into hash table on the
- *  appropriate entry. */
-void insert_into_hash_table_unsafe( struct cell * p_cell, unsigned int _hash )
-{
-	struct entry* p_entry;
-
-	p_cell->hash_index=_hash;
-
-	/* locates the appropriate entry */
-	p_entry = &tm_table->entrys[ _hash ];
-
-	p_cell->label = p_entry->next_label++;
-	if ( p_entry->last_cell )
-	{
-		p_entry->last_cell->next_cell = p_cell;
-		p_cell->prev_cell = p_entry->last_cell;
-	} else p_entry->first_cell = p_cell;
-
-	p_entry->last_cell = p_cell;
-
-	/* update stats */
-	p_entry->cur_entries++;
-	p_entry->acc_entries++;
-	t_stats_new( is_local(p_cell) );
-}
-
-
-/*  Un-link a  cell from hash_table, but the cell itself is not released */
-void remove_from_hash_table_unsafe( struct cell * p_cell)
-{
-	struct entry*  p_entry  = &(tm_table->entrys[p_cell->hash_index]);
-
-	/* unlink the cell from entry list */
-	/* lock( &(p_entry->mutex) ); */
-
-	if ( p_cell->prev_cell )
-		p_cell->prev_cell->next_cell = p_cell->next_cell;
-	else
-		p_entry->first_cell = p_cell->next_cell;
-
-	if ( p_cell->next_cell )
-		p_cell->next_cell->prev_cell = p_cell->prev_cell;
-	else
-		p_entry->last_cell = p_cell->prev_cell;
-	/* update stats */
-#	ifdef EXTRA_DEBUG
-	if (p_entry->cur_entries==0) {
-		LOG(L_CRIT, "BUG: bad things happened: cur_entries=0\n");
-		abort();
-	}
-#	endif
-	p_entry->cur_entries--;
-	t_stats_deleted( is_local(p_cell) );
-
-	/* unlock( &(p_entry->mutex) ); */
-}
-

+ 88 - 22
modules/tm/h_table.h

@@ -41,21 +41,30 @@
  *             UNREF if the ref_count reaches 0 (andrei)
  * 2007-06-01  support for different retransmissions intervals per transaction;
  *             added maximum inv. and non-inv. transaction life time (andrei)
+ * 2007-06-06  switched tm bucket list to a simpler and faster clist;
+ *              inlined often used functions (andrei)
  */
 
-#include "defs.h"
-
-
-#define TM_DEL_UNREF
-
 #ifndef _H_TABLE_H
 #define _H_TABLE_H
 
+#include "defs.h"
+#include "t_stats.h"
+
+#define TM_DEL_UNREF
+/* uncomment the next define if you wish to keep hash statistics*/
 /*
- #include <stdio.h>
- #include <stdlib.h>
+#define TM_HASH_STATS
 */
+/* use hash stats always in debug mode */
+#ifdef EXTRA_DEBUG
+#ifndef TM_HASH_STATS
+#define TM_HASH_STATS
+#endif
+#endif
 
+
+#include "../../clist.h"
 #include "../../parser/msg_parser.h"
 #include "../../types.h"
 #include "../../md5utils.h"
@@ -236,8 +245,10 @@ typedef unsigned short retr_timeout_t;
 typedef struct cell
 {
 	/* linking data */
-	struct cell*     next_cell;
-	struct cell*     prev_cell;
+	/* WARNING: don't move or change order of next_c or prev_c
+	 * or breakage will occur */
+	struct cell*     next_c;
+	struct cell*     prev_c;
 	/* tells in which hash table entry the cell lives */
 	unsigned int  hash_index;
 	/* sequence number within hash collision slot */
@@ -329,24 +340,39 @@ typedef struct cell
 	 /* The route to take for each downstream branch separately */
 	unsigned short on_branch;
 
-	/* MD5checksum  (meaningful only if syn_branch=0) */
-	char md5[MD5_LEN];
+	/* place holder for MD5checksum  (meaningful only if syn_branch=0) */
+	char md5[0]; /* if syn_branch==0 then MD5_LEN bytes are extra alloc'ed*/
 
 }cell_type;
 
 
+#if 0
+/* warning: padding too much => big size increase */
+#define ENTRY_PAD_TO  128 /* should be a multiple of cacheline size for 
+                             best performance*/
+#define ENTRY_PAD_BYTES	 \
+	(ENTRY_PAD_TO-2*sizeof(struct cell*)+sizeof(ser_lock_t)+sizeof(int)+ \
+	 				2*sizeof(long))
+#else
+#define ENTRY_PAD_BYTES 0
+#endif
 
 /* double-linked list of cells with hash synonyms */
 typedef struct entry
 {
-	struct cell*    first_cell;
-	struct cell*    last_cell;
-	/* currently highest sequence number in a synonym list */
-	unsigned int    next_label;
+	/* WARNING: don't move or change order of next_c or prev_c
+	 * or breakage will occur */
+	struct cell*    next_c; 
+	struct cell*    prev_c;
 	/* sync mutex */
 	ser_lock_t      mutex;
+	/* currently highest sequence number in a synonym list */
+	unsigned int    next_label;
+#ifdef TM_HASH_STATS
 	unsigned long acc_entries;
 	unsigned long cur_entries;
+#endif
+	char _pad[ENTRY_PAD_BYTES];
 }entry_type;
 
 
@@ -355,9 +381,12 @@ typedef struct entry
 struct s_table
 {
 	/* table of hash entries; each of them is a list of synonyms  */
-	struct entry   entrys[ TABLE_ENTRIES ];
+	struct entry   entries[ TABLE_ENTRIES ];
 };
 
+/* pointer to the big table where all the transaction data
+   lives */
+struct s_table*  _tm_table; /* private internal stuff, don't touch directly */
 
 #define list_entry(ptr, type, member) \
 	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
@@ -390,18 +419,55 @@ void reset_kr();
 void set_kr( enum kill_reason kr );
 enum kill_reason get_kr();
 
-struct s_table* get_tm_table();
+#define get_tm_table() (_tm_table)
+
 struct s_table* init_hash_table();
 void   free_hash_table( );
 void   free_cell( struct cell* dead_cell );
 struct cell*  build_cell( struct sip_msg* p_msg );
-void   remove_from_hash_table_unsafe( struct cell * p_cell);
-#ifdef OBSOLETED
-void   insert_into_hash_table( struct cell * p_cell, unsigned int _hash);
-#endif
-void   insert_into_hash_table_unsafe( struct cell * p_cell, unsigned int _hash );
 
+#ifdef TM_HASH_STATS
 unsigned int transaction_count( void );
+#endif
+
+
+/*  Takes an already created cell and links it into hash table on the
+ *  appropriate entry. */
+inline static void insert_into_hash_table_unsafe( struct cell * p_cell,
+													unsigned int hash )
+{
+	p_cell->label = _tm_table->entries[hash].next_label++;
+	p_cell->hash_index=hash;
+	/* insert at the beginning */
+	clist_insert(&_tm_table->entries[hash], p_cell, next_c, prev_c);
+
+	/* update stats */
+#ifdef TM_HASH_STATS
+	_tm_table->entries[hash].cur_entries++;
+	_tm_table->entries[hash].acc_entries++;
+#endif
+	t_stats_new( is_local(p_cell) );
+}
+
+
+
+/*  Un-link a  cell from hash_table, but the cell itself is not released */
+inline static void remove_from_hash_table_unsafe( struct cell * p_cell)
+{
+	clist_rm(p_cell, next_c, prev_c);
+#	ifdef EXTRA_DEBUG
+#ifdef TM_HASH_STATS
+	if (_tm_table->entries[p_cell->hash_index].cur_entries==0){
+		LOG(L_CRIT, "BUG: bad things happened: cur_entries=0\n");
+		abort();
+	}
+#endif
+#	endif
+#ifdef TM_HASH_STATS
+	_tm_table->entries[p_cell->hash_index].cur_entries--;
+#endif
+	t_stats_deleted( is_local(p_cell) );
+}
 
 #endif
 

+ 1 - 1
modules/tm/lock.c

@@ -223,7 +223,7 @@ int init_entry_lock( struct s_table* ht, struct entry *entry )
 	   many partitions as number of available semaphores allows
         */
 	entry->mutex.semaphore_set=entry_semaphore;
-	entry->mutex.semaphore_index = ( ((char *)entry - (char *)(ht->entrys ) )
+	entry->mutex.semaphore_index = ( ((char *)entry - (char *)(ht->entries ) )
                / sizeof(struct entry) ) % sem_nr;
 #endif
 	return 0;

+ 52 - 40
modules/tm/t_lookup.c

@@ -96,6 +96,7 @@
 *              added support for turning off 100 repl. sending on inv. (andrei)
  * 2007-06-01  support for different retransmissions intervals per transaction;
  *             added maximum inv. and non-inv. transaction life time (andrei)
+ * 2007-06-06  switched tm bucket list to a simpler and faster clist;
  */
 
 #include "defs.h"
@@ -317,6 +318,7 @@ static int matching_3261( struct sip_msg *p_msg, struct cell **trans,
 	int dlg_parsed;
 	int ret = 0;
 	struct cell *e2e_ack_trans;
+	struct entry* hash_bucket;
 
 	*cancel=0;
 	e2e_ack_trans=0;
@@ -327,9 +329,9 @@ static int matching_3261( struct sip_msg *p_msg, struct cell **trans,
 	via1->tid.s=via1->branch->value.s+MCOOKIE_LEN;
 	via1->tid.len=via1->branch->value.len-MCOOKIE_LEN;
 
-	for ( p_cell = get_tm_table()->entrys[p_msg->hash_index].first_cell;
-		p_cell; p_cell = p_cell->next_cell ) 
-	{
+	hash_bucket=&(get_tm_table()->entries[p_msg->hash_index]);
+	clist_foreach(hash_bucket, p_cell, next_c){
+		prefetch_loc_r(p_cell->next_c, 1);
 		t_msg=p_cell->uas.request;
 		if (!t_msg) continue;  /* don't try matching UAC transactions */
 		/* we want to set *cancel for transaction for which there is
@@ -384,10 +386,10 @@ static int matching_3261( struct sip_msg *p_msg, struct cell **trans,
 			}
 			if (skip_method & t_msg->REQ_METHOD) continue;
 		}
+		prefetch_w(p_cell); /* great chance of modifiying it */
 		/* all matched -- we found the transaction ! */
 		DBG("DEBUG: RFC3261 transaction matched, tid=%.*s\n",
 			via1->tid.len, via1->tid.s);
-
 		*trans=p_cell;
 		return 1;
 	}
@@ -420,9 +422,10 @@ int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked,
 	struct via_param *branch;
 	int match_status;
 	struct cell *e2e_ack_trans;
+	struct entry* hash_bucket;
 
 	/* parse all*/
-	if (check_transaction_quadruple(p_msg)==0)
+	if (unlikely(check_transaction_quadruple(p_msg)==0))
 	{
 		LOG(L_ERR, "ERROR: TM module: t_lookup_request: too few headers\n");
 		set_t(0);	
@@ -465,7 +468,7 @@ int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked,
 				cancel);
 		switch(match_status) {
 				case 0:	goto notfound;	/* no match */
-				case 1:	goto found; 	/* match */
+				case 1:	 goto found; 	/* match */
 				case 2:	goto e2e_ack;	/* e2e proxy ACK */
 		}
 	}
@@ -478,10 +481,10 @@ int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked,
 	/* lock the whole entry*/
 	LOCK_HASH(p_msg->hash_index);
 
+	hash_bucket=&(get_tm_table()->entries[p_msg->hash_index]);
 	/* all the transactions from the entry are compared */
-	for ( p_cell = get_tm_table()->entrys[p_msg->hash_index].first_cell;
-		  p_cell; p_cell = p_cell->next_cell ) 
-	{
+	clist_foreach(hash_bucket, p_cell, next_c){
+		prefetch_loc_r(p_cell->next_c, 1);
 		t_msg = p_cell->uas.request;
 
 		if (!t_msg) continue; /* skip UAC transactions */
@@ -618,6 +621,7 @@ struct cell* t_lookupOriginalT(  struct sip_msg* p_msg )
 	unsigned int     hash_index;
 	struct sip_msg  *t_msg;
 	struct via_param *branch;
+	struct entry* hash_bucket;
 	int foo;
 	int ret;
 
@@ -664,16 +668,16 @@ struct cell* t_lookupOriginalT(  struct sip_msg* p_msg )
 
 	LOCK_HASH(hash_index);
 
+	hash_bucket=&(get_tm_table()->entries[hash_index]);
 	/* all the transactions from the entry are compared */
-	for (p_cell=get_tm_table()->entrys[hash_index].first_cell;
-		p_cell; p_cell = p_cell->next_cell )
-	{
+	clist_foreach(hash_bucket, p_cell, next_c){
+		prefetch_loc_r(p_cell->next_c, 1);
 		t_msg = p_cell->uas.request;
 
 		if (!t_msg) continue; /* skip UAC transactions */
 
 		/* we don't cancel CANCELs ;-) */
-		if (t_msg->REQ_METHOD==METHOD_CANCEL)
+		if (unlikely(t_msg->REQ_METHOD==METHOD_CANCEL))
 			continue;
 
 		/* check lengths now */	
@@ -753,6 +757,7 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
 	unsigned int entry_label  = 0;
 	unsigned int branch_id    = 0;
 	char  *hashi, *branchi, *p, *n;
+	struct entry* hash_bucket;
 	int hashl, branchl;
 	int scan_space;
 	str cseq_method;
@@ -823,12 +828,12 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
 	branchi=p;
 
 	/* sanity check */
-	if (reverse_hex2int(hashi, hashl, &hash_index)<0
+	if (unlikely(reverse_hex2int(hashi, hashl, &hash_index)<0
 		||hash_index>=TABLE_ENTRIES
 		|| reverse_hex2int(branchi, branchl, &branch_id)<0
 		||branch_id>=MAX_BRANCHES
 		|| (syn_branch ? (reverse_hex2int(syni, synl, &entry_label))<0 
-			: loopl!=MD5_LEN )
+			: loopl!=MD5_LEN ))
 	) {
 		DBG("DEBUG: t_reply_matching: poor reply labels %d label %d "
 			"branch %d\n", hash_index, entry_label, branch_id );
@@ -847,12 +852,12 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
 	is_cancel=cseq_method.len==CANCEL_LEN 
 		&& memcmp(cseq_method.s, CANCEL, CANCEL_LEN)==0;
 	LOCK_HASH(hash_index);
-	for (p_cell = get_tm_table()->entrys[hash_index].first_cell; p_cell; 
-		p_cell=p_cell->next_cell) {
-
+	hash_bucket=&(get_tm_table()->entries[hash_index]);
+	/* all the transactions from the entry are compared */
+	clist_foreach(hash_bucket, p_cell, next_c){
+		prefetch_loc_r(p_cell->next_c, 1);
 		/* first look if branch matches */
-
-		if (syn_branch) {
+		if (likely(syn_branch)) {
 			if (p_cell->label != entry_label) 
 				continue;
 		} else {
@@ -861,7 +866,7 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
 		}
 
 		/* sanity check ... too high branch ? */
-		if ( branch_id>=p_cell->nr_of_outgoings )
+		if (unlikely(branch_id>=p_cell->nr_of_outgoings))
 			continue;
 
 		/* does method match ? (remember -- CANCELs have the same branch
@@ -953,6 +958,7 @@ int t_check( struct sip_msg* p_msg , int *param_branch )
 		/* transaction lookup */
 		if ( p_msg->first_line.type==SIP_REQUEST ) {
 			/* force parsing all the needed headers*/
+			prefetch_loc_r(p_msg->unparsed+64, 1);
 			if (parse_headers(p_msg, HDR_EOH_F, 0 )==-1) {
 				LOG(L_ERR, "ERROR: t_check: parsing error\n");
 				return -1;
@@ -1380,21 +1386,23 @@ int t_get_trans_ident(struct sip_msg* p_msg, unsigned int* hash_index, unsigned
     return 1;
 }
 
-int t_lookup_ident(struct cell ** trans, unsigned int hash_index, unsigned int label)
+int t_lookup_ident(struct cell ** trans, unsigned int hash_index, 
+					unsigned int label)
 {
-    struct cell* p_cell;
+	struct cell* p_cell;
+	struct entry* hash_bucket;
 
-    if(hash_index >= TABLE_ENTRIES){
+	if(unlikely(hash_index >= TABLE_ENTRIES)){
 		LOG(L_ERR,"ERROR: t_lookup_ident: invalid hash_index=%u\n",hash_index);
 		return -1;
-    }
-
-    LOCK_HASH(hash_index);
-
-    /* all the transactions from the entry are compared */
-    for ( p_cell = get_tm_table()->entrys[hash_index].first_cell;
-	  p_cell; p_cell = p_cell->next_cell ) 
-    {
+	}
+	
+	LOCK_HASH(hash_index);
+	
+	hash_bucket=&(get_tm_table()->entries[hash_index]);
+	/* all the transactions from the entry are compared */
+	clist_foreach(hash_bucket, p_cell, next_c){
+		prefetch_loc_r(p_cell->next_c, 1);
 		if(p_cell->label == label){
 			REF_UNSAFE(p_cell);
     			UNLOCK_HASH(hash_index);
@@ -1436,6 +1444,7 @@ int t_is_local(struct sip_msg* p_msg)
 int t_lookup_callid(struct cell ** trans, str callid, str cseq) {
 	struct cell* p_cell;
 	unsigned hash_index;
+	struct entry* hash_bucket;
 
 	/* I use MAX_HEADER, not sure if this is a good choice... */
 	char callid_header[MAX_HEADER];
@@ -1454,7 +1463,7 @@ int t_lookup_callid(struct cell ** trans, str callid, str cseq) {
 	/* lookup the hash index where the transaction is stored */
 	hash_index=hash(callid, cseq);
 
-	if(hash_index >= TABLE_ENTRIES){
+	if(unlikely(hash_index >= TABLE_ENTRIES)){
 		LOG(L_ERR,"ERROR: t_lookup_callid: invalid hash_index=%u\n",hash_index);
 		return -1;
 	}
@@ -1471,15 +1480,18 @@ int t_lookup_callid(struct cell ** trans, str callid, str cseq) {
 	LOCK_HASH(hash_index);
 	DBG("just locked hash index %u, looking for transactions there:\n", hash_index);
 
+	hash_bucket=&(get_tm_table()->entries[hash_index]);
 	/* all the transactions from the entry are compared */
-	for ( p_cell = get_tm_table()->entrys[hash_index].first_cell;
-	  p_cell; p_cell = p_cell->next_cell ) {
+	clist_foreach(hash_bucket, p_cell, next_c){
 		
-		/* compare complete header fields, casecmp to make sure invite=INVITE */
-		if ( (strncmp(callid_header, p_cell->callid.s, p_cell->callid.len) == 0)
-			&& (strncasecmp(cseq_header, p_cell->cseq_n.s, p_cell->cseq_n.len) == 0) ) {
-			DBG("we have a match: callid=>>%.*s<< cseq=>>%.*s<<\n", p_cell->callid.len, 
-				p_cell->callid.s, p_cell->cseq_n.len, p_cell->cseq_n.s);
+		prefetch_loc_r(p_cell->next_c, 1);
+		/* compare complete header fields, casecmp to make sure invite=INVITE*/
+		if ((strncmp(callid_header, p_cell->callid.s, p_cell->callid.len) == 0)
+			&& (strncasecmp(cseq_header, p_cell->cseq_n.s, p_cell->cseq_n.len)
+				== 0)) {
+			DBG("we have a match: callid=>>%.*s<< cseq=>>%.*s<<\n",
+					p_cell->callid.len, p_cell->callid.s, p_cell->cseq_n.len,
+					p_cell->cseq_n.s);
 			REF_UNSAFE(p_cell);
 			UNLOCK_HASH(hash_index);
 			set_t(p_cell);