Bladeren bron

- improved qm_malloc (added free lst hashtable)
- quick i386 locking (in the dirty hack state)

Andrei Pelinescu-Onciul 23 jaren geleden
bovenliggende
commit
eca7f442ce
9 gewijzigde bestanden met toevoegingen van 305 en 123 verwijderingen
  1. 3 1
      Makefile.defs
  2. 66 0
      fastlock.h
  3. 165 94
      mem/q_malloc.c
  4. 31 2
      mem/q_malloc.h
  5. 19 1
      mem/shm_mem.c
  6. 16 1
      mem/shm_mem.h
  7. 1 1
      modules/tm/lock.c
  8. 3 3
      test/stateless.cfg
  9. 1 20
      test/th-uri.cfg

+ 3 - 1
Makefile.defs

@@ -53,7 +53,9 @@ ARCH = $(shell uname -s)
 
 DEFS+= -DNAME='"$(NAME)"' -DVERSION='"$(RELEASE)"' -DARCH='"$(ARCH)"' \
 	 -DDNS_IP_HACK  -DPKG_MALLOC -DSHM_MEM  -DSHM_MMAP \
-	-DVQ_MALLOC -DUSE_SYNONIM #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM
+	 -DUSE_SYNONIM \
+	 -DFAST_LOCK -Di386
+	 #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM
 	#-DEXTRA_DEBUG -DBRUT_HACK \
 	#-DVQ_MALLOC  -DDBG_LOCK  #-DSTATS
 	  #-DDBG_QM_MALLOC #-DVQ_MALLOC #-DNO_DEBUG

+ 66 - 0
fastlock.h

@@ -0,0 +1,66 @@
+/*
+ * fast arhitecture specific locking
+ *
+ * $Id$
+ *
+ * 
+ */
+
+
+
+#ifndef fastlock_h
+#define fastlock_h
+
+
+#include <sched.h>
+
+
+#ifdef i386
+
+
+typedef  volatile int lock_t;
+
+
+
+#define init_lock( l ) (l)=0
+
+
+
+/*test and set lock, ret 1 if lock held by someone else, 0 otherwise*/
+inline static int tsl(lock_t* lock)
+{
+	volatile char val;
+	
+	val=1;
+	asm volatile( 
+		" xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory"
+	);
+	return val;
+}
+
+
+
+inline static void get_lock(lock_t* lock)
+{
+	
+	while(tsl(lock)){
+		sched_yield();
+	}
+}
+
+
+
+inline static void release_lock(lock_t* lock)
+{
+	char val;
+
+	val=0;
+	asm volatile(
+		" xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory"
+	);
+}
+
+#endif
+
+
+#endif

+ 165 - 94
mem/q_malloc.c

@@ -26,6 +26,34 @@
 #define PREV_FRAG_END(f) \
 	((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
 
+
+#define FRAG_OVERHEAD	(sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
+
+
+#define ROUNDUP(s)		(((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
+#define ROUNDDOWN(s)	(((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
+
+
+
+	/* finds the hash value for s, s=ROUNDTO multiple*/
+#define GET_HASH(s)   ( ((s)<QM_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
+						QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
+							QM_MALLOC_OPTIMIZE_FACTOR+1 )
+
+
+/* computes hash number for big buckets*/
+inline static int big_hash_idx(int s)
+{
+	int idx;
+	/* s is rounded => s = k*2^n (ROUNDTO=2^n) 
+	 * index= i such that 2^i > s >= 2^(i-1)
+	 *
+	 * => index = number of the first non null bit in s*/
+	for (idx=31; !(s&0x80000000) ; s<<=1, idx--);
+	return idx;
+}
+
+
 #ifdef DBG_QM_MALLOC
 #define ST_CHECK_PATTERN   0xf0f0f0f0
 #define END_CHECK_PATTERN1 0xc0c0c0c0
@@ -60,6 +88,27 @@ static  void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
 
 
 
+static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
+{
+	struct qm_frag* f;
+	struct qm_frag* prev;
+	int hash;
+	
+	hash=GET_HASH(frag->size);
+	for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
+			f=f->u.nxt_free){
+		if (frag->size <= f->size) break;
+	}
+	/*insert it here*/
+	prev=FRAG_END(f)->prev_free;
+	prev->u.nxt_free=frag;
+	FRAG_END(frag)->prev_free=prev;
+	frag->u.nxt_free=f;
+	FRAG_END(f)->prev_free=frag;
+}
+
+
+
 /* init malloc and return a qm_block*/
 struct qm_block* qm_malloc_init(char* address, unsigned int size)
 {
@@ -67,17 +116,24 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size)
 	char* end;
 	struct qm_block* qm;
 	unsigned int init_overhead;
+	int h;
 	
 	/* make address and size multiple of 8*/
-	start=(char*)( ((unsigned int)address%8)?((unsigned int)address+8)/8*8:
-			(unsigned int)address);
+	start=(char*)ROUNDUP((unsigned int) address);
+	printf("qm_malloc_init: QM_OPTIMIZE=%d, /ROUNDTO=%d\n",
+			QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
+	printf("qm_malloc_init: QM_HASH_SIZE=%d, qm_block size=%d\n",
+			QM_HASH_SIZE, sizeof(struct qm_block));
+	printf("qm_malloc_init(%x, %d), start=%x\n", address, size, start);
 	if (size<start-address) return 0;
 	size-=(start-address);
-	if (size <8) return 0;
-	size=(size%8)?(size-8)/8*8:size;
+	if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
+	size=ROUNDDOWN(size);
 	
 	init_overhead=sizeof(struct qm_block)+sizeof(struct qm_frag)+
 		sizeof(struct qm_frag_end);
+	printf("qm_malloc_init: size= %d, init_overhead=%d\n", size, init_overhead);
+	
 	if (size < init_overhead)
 	{
 		/* not enough mem to create our control structures !!!*/
@@ -95,42 +151,34 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size)
 	qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
 	/* init initial fragment*/
 	qm->first_frag->size=size;
-	qm->first_frag->u.nxt_free=&(qm->free_lst);
 	qm->last_frag_end->size=size;
-	qm->last_frag_end->prev_free=&(qm->free_lst);
+	
 #ifdef DBG_QM_MALLOC
 	qm->first_frag->check=ST_CHECK_PATTERN;
 	qm->last_frag_end->check1=END_CHECK_PATTERN1;
 	qm->last_frag_end->check2=END_CHECK_PATTERN2;
 #endif
-	/* init free_lst* */
-	qm->free_lst.u.nxt_free=qm->first_frag;
-	qm->free_lst_end.prev_free=qm->first_frag;
-	qm->free_lst.size=0;
-	qm->free_lst_end.size=0;
+	/* init free_hash* */
+	for (h=0; h<QM_HASH_SIZE;h++){
+		qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
+		qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
+		qm->free_hash[h].head.size=0;
+		qm->free_hash[h].tail.size=0;
+	}
+	
+	/* link initial fragment into the free list*/
+	
+	qm_insert_free(qm, qm->first_frag);
+	
+	/*qm->first_frag->u.nxt_free=&(qm->free_lst);
+	  qm->last_frag_end->prev_free=&(qm->free_lst);
+	*/
 	
 	
 	return qm;
 }
 
 
-static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
-{
-	struct qm_frag* f;
-	struct qm_frag* prev;
-
-	for(f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){
-		if (frag->size < f->size) break;
-	}
-	/*insert it here*/
-	prev=FRAG_END(f)->prev_free;
-	prev->u.nxt_free=frag;
-	FRAG_END(frag)->prev_free=prev;
-	frag->u.nxt_free=f;
-	FRAG_END(f)->prev_free=frag;
-}
-
-
 
 static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
 {
@@ -148,6 +196,28 @@ static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
 
 
 
+static inline struct qm_frag* qm_find_free(struct qm_block* qm, 
+										unsigned int size)
+{
+	int hash;
+	struct qm_frag* f;
+
+	for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
+		for (f=qm->free_hash[hash].head.u.nxt_free; 
+					f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
+	#ifdef DBG_QM_MALLOC
+			list_cntr++;
+	#endif
+			if (f->size>=size) return f;
+		}
+	/*try in a bigger bucket*/
+	}
+	/* not found */
+	return 0;
+}
+
+
+
 #ifdef DBG_QM_MALLOC
 void* qm_malloc(struct qm_block* qm, unsigned int size, char* file, char* func,
 					unsigned int line)
@@ -159,7 +229,6 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
 	struct qm_frag_end* end;
 	struct qm_frag* n;
 	unsigned int rest;
-	unsigned int overhead;
 	
 #ifdef DBG_QM_MALLOC
 	unsigned int list_cntr;
@@ -169,69 +238,63 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
 			line);
 #endif
 	/*size must be a multiple of 8*/
-	size=(size%8)?(size+8)/8*8:size;
+	size=ROUNDUP(size);
 	if (size>(qm->size-qm->real_used)) return 0;
-	if (qm->free_lst.u.nxt_free==&(qm->free_lst)) return 0;
 	/*search for a suitable free frag*/
-	for (f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){
-#ifdef DBG_QM_MALLOC
-		list_cntr++;
-#endif
-		
-		if (f->size>=size){
-			/* we found it!*/
-			/*detach it from the free list*/
+	f=qm_find_free(qm, size);
+
+	if ((f=qm_find_free(qm, size))!=0){
+		/* we found it!*/
+		/*detach it from the free list*/
 #ifdef DBG_QM_MALLOC
 			qm_debug_frag(qm, f);
 #endif
-			qm_detach_free(qm, f);
-			/*mark it as "busy"*/
-			f->u.is_free=0;
-			
-			/*see if we'll use full frag, or we'll split it in 2*/
-			rest=f->size-size;
-			overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end);
-			if (rest>overhead){
-				f->size=size;
-				/*split the fragment*/
-				end=FRAG_END(f);
-				end->size=size;
-				n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
-				n->size=rest-overhead;
-				FRAG_END(n)->size=n->size;
-				qm->real_used+=overhead;
+		qm_detach_free(qm, f);
+		/*mark it as "busy"*/
+		f->u.is_free=0;
+		
+		/*see if we'll use full frag, or we'll split it in 2*/
+		rest=f->size-size;
+		if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
+			f->size=size;
+			/*split the fragment*/
+			end=FRAG_END(f);
+			end->size=size;
+			n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
+			n->size=rest-FRAG_OVERHEAD;
+			FRAG_END(n)->size=n->size;
+			qm->real_used+=FRAG_OVERHEAD;
 #ifdef DBG_QM_MALLOC
-				end->check1=END_CHECK_PATTERN1;
-				end->check2=END_CHECK_PATTERN2;
-				/* frag created by malloc, mark it*/
-				n->file=file;
-				n->func="frag. from qm_malloc";
-				n->line=line;
-				n->check=ST_CHECK_PATTERN;
-/*				FRAG_END(n)->check1=END_CHECK_PATTERN1;
-				FRAG_END(n)->check2=END_CHECK_PATTERN2; */
+			end->check1=END_CHECK_PATTERN1;
+			end->check2=END_CHECK_PATTERN2;
+			/* frag created by malloc, mark it*/
+			n->file=file;
+			n->func="frag. from qm_malloc";
+			n->line=line;
+			n->check=ST_CHECK_PATTERN;
+/*			FRAG_END(n)->check1=END_CHECK_PATTERN1;
+			FRAG_END(n)->check2=END_CHECK_PATTERN2; */
 #endif
-				/* reinsert n in free list*/
-				qm_insert_free(qm, n);
-			}else{
-				/* we cannot split this fragment any more => alloc all of it*/
-			}
-			qm->real_used+=f->size;
-			qm->used+=f->size;
-			if (qm->max_real_used<qm->real_used)
-				qm->max_real_used=qm->real_used;
+			/* reinsert n in free list*/
+			qm_insert_free(qm, n);
+		}else{
+			/* we cannot split this fragment any more => alloc all of it*/
+		}
+		qm->real_used+=f->size;
+		qm->used+=f->size;
+		if (qm->max_real_used<qm->real_used)
+			qm->max_real_used=qm->real_used;
 #ifdef DBG_QM_MALLOC
-			f->file=file;
-			f->func=func;
-			f->line=line;
-			f->check=ST_CHECK_PATTERN;
+		f->file=file;
+		f->func=func;
+		f->line=line;
+		f->check=ST_CHECK_PATTERN;
 		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
 			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
-	DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size,
+		DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size,
 			(char*)f+sizeof(struct qm_frag), list_cntr );
 #endif
-			return (char*)f+sizeof(struct qm_frag);
-		}
+		return (char*)f+sizeof(struct qm_frag);
 	}
 	return 0;
 }
@@ -249,7 +312,6 @@ void qm_free(struct qm_block* qm, void* p)
 	struct qm_frag* prev;
 	struct qm_frag* next;
 	struct qm_frag_end *end;
-	unsigned int overhead;
 	unsigned int size;
 
 #ifdef DBG_QM_MALLOC
@@ -261,7 +323,7 @@ void qm_free(struct qm_block* qm, void* p)
 	}
 #endif
 	if (p==0) {
-		DBG("WARNING:qm_free: free(0) called\n");
+		LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
 		return;
 	}
 	prev=next=0;
@@ -277,7 +339,6 @@ void qm_free(struct qm_block* qm, void* p)
 	DBG("qm_free: freeing block alloc'ed from %s: %s(%d)\n", f->file, f->func,
 			f->line);
 #endif
-	overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end);
 	next=FRAG_NEXT(f);
 	size=f->size;
 	qm->used-=size;
@@ -285,11 +346,15 @@ void qm_free(struct qm_block* qm, void* p)
 #ifdef DBG_QM_MALLOC
 	qm_debug_frag(qm, f);
 #endif
+
+#ifdef QM_JOIN_FREE
+	/* join packets if possible*/
+
 	if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
 		/* join */
 		qm_detach_free(qm, next);
-		size+=next->size+overhead;
-		qm->real_used-=overhead;
+		size+=next->size+FRAG_OVERHEAD;
+		qm->real_used-=FRAG_OVERHEAD;
 	}
 	
 	if (f > qm->first_frag){
@@ -302,13 +367,14 @@ void qm_free(struct qm_block* qm, void* p)
 		if (prev->u.is_free){
 			/*join*/
 			qm_detach_free(qm, prev);
-			size+=prev->size+overhead;
-			qm->real_used-=overhead;
+			size+=prev->size+FRAG_OVERHEAD;
+			qm->real_used-=FRAG_OVERHEAD;
 			f=prev;
 		}
 	}
 	f->size=size;
 	FRAG_END(f)->size=f->size;
+#endif /* QM_JOIN_FREE*/
 #ifdef DBG_QM_MALLOC
 	f->file=file;
 	f->func=func;
@@ -322,7 +388,8 @@ void qm_free(struct qm_block* qm, void* p)
 void qm_status(struct qm_block* qm)
 {
 	struct qm_frag* f;
-	int i;
+	int i,j;
+	int h;
 
 	LOG(L_INFO, "qm_status (%x):\n", qm);
 	if (!qm) return;
@@ -345,15 +412,19 @@ void qm_status(struct qm_block* qm)
 				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
 #endif
 	}
-	DBG("dumping free list:\n");
-	for (f=qm->free_lst.u.nxt_free,i=0; f!=&(qm->free_lst); f=f->u.nxt_free,
-			i++){
-		DBG("    %3d. %c  address=%x  size=%d\n", i, (f->u.is_free)?'a':'N',
-				(char*)f+sizeof(struct qm_frag), f->size);
+	LOG(L_INFO, "dumping free list:\n");
+	for(h=0,i=0;h<QM_HASH_SIZE;h++){
+		
+		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
+				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
+			LOG(L_INFO, "   %5d.[%3d:%3d] %c  address=%x  size=%d\n", i, h, j,
+					(f->u.is_free)?'a':'N',
+					(char*)f+sizeof(struct qm_frag), f->size);
 #ifdef DBG_QM_MALLOC
-		DBG("            %s from %s: %s(%d)\n", 
+			DBG("            %s from %s: %s(%d)\n", 
 				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
 #endif
+		}
 	}
 	LOG(L_INFO, "-----------------------------\n");
 }

+ 31 - 2
mem/q_malloc.h

@@ -7,6 +7,28 @@
 #define q_malloc_h
 
 
+
+/* defs*/
+
+#define ROUNDTO		16 /* size we round to, must be = 2^n */
+#define MIN_FRAG_SIZE	ROUNDTO
+
+
+
+#define QM_MALLOC_OPTIMIZE_FACTOR 10 /*used below */
+#define QM_MALLOC_OPTIMIZE  (1<<QM_MALLOC_OPTIMIZE_FACTOR)
+								/* size to optimize for,
+									(most allocs < this size),
+									must be 2^k */
+
+#define QM_HASH_SIZE (QM_MALLOC_OPTIMIZE/ROUNDTO + \
+		(32-QM_MALLOC_OPTIMIZE_FACTOR)+1)
+
+/* hash structure:
+ * 0 .... QM_MALLOC_OPTIMIE/ROUNDTO  - small buckets, size increases with
+ *                            ROUNDTO from bucket to bucket
+ * +1 .... end -  size = 2^k, big buckets */
+
 struct qm_frag{
 	unsigned int size;
 	union{
@@ -31,6 +53,13 @@ struct qm_frag_end{
 };
 
 
+
+struct qm_frag_full{
+	struct qm_frag head;
+	struct qm_frag_end tail;
+};
+
+
 struct qm_block{
 	unsigned int size; /* total size */
 	unsigned int used; /* alloc'ed size*/
@@ -40,8 +69,8 @@ struct qm_block{
 	struct qm_frag* first_frag;
 	struct qm_frag_end* last_frag_end;
 	
-	struct qm_frag free_lst;
-	struct qm_frag_end free_lst_end;
+	struct qm_frag_full free_hash[QM_HASH_SIZE];
+	/*struct qm_frag_end free_lst_end;*/
 };
 
 

+ 19 - 1
mem/shm_mem.c

@@ -18,6 +18,10 @@
 
 #endif
 
+#ifdef FAST_LOCK
+#include "../fastlock.h"
+#endif
+
 
 /* define semun */
 #if defined(__GNU_LIBRARY__) && !defined(_SEM_SEMUN_UNDEFINED)
@@ -38,8 +42,12 @@
 static int shm_shmid=-1; /*shared memory id*/
 #endif
 
-
+#ifdef FAST_LOCK
+lock_t* mem_lock=0;
+#else
 int shm_semid=-1; /*semaphore id*/
+#endif
+
 static void* shm_mempool=(void*)-1;
 #ifdef VQ_MALLOC
 	struct vqm_block* shm_block;
@@ -153,6 +161,8 @@ int shm_mem_init()
 		shm_mem_destroy();
 		return -1;
 	}
+
+#ifndef FAST_LOCK
 	/* alloc a semaphore (for malloc)*/
 	shm_semid=semget(IPC_PRIVATE, 1, 0700);
 	if (shm_semid==-1){
@@ -170,6 +180,7 @@ int shm_mem_init()
 		shm_mem_destroy();
 		return -1;
 	}
+#endif
 	/* init it for malloc*/
 #	ifdef VQ_MALLOC
 		shm_block=vqm_malloc_init(shm_mempool, SHM_MEM_SIZE);
@@ -182,6 +193,11 @@ int shm_mem_init()
 		shm_mem_destroy();
 		return -1;
 	}
+#ifdef FAST_LOCK
+	mem_lock=shm_malloc_unsafe(sizeof(lock_t));
+	init_lock(*mem_lock);
+#endif
+	
 	DBG("shm_mem_init: success\n");
 	
 	return 0;
@@ -210,10 +226,12 @@ void shm_mem_destroy()
 		shm_shmid=-1;
 	}
 #endif
+#ifndef FAST_LOCK
 	if (shm_semid!=-1) {
 		semctl(shm_semid, 0, IPC_RMID, (union semun)0);
 		shm_semid=-1;
 	}
+#endif
 }
 
 

+ 16 - 1
mem/shm_mem.h

@@ -40,16 +40,30 @@
 #	define MY_FREE qm_free
 #	define MY_STATUS qm_status
 #endif
-extern int shm_semid;
+
+#ifdef FAST_LOCK
+#include "../fastlock.h"
+	
+	extern lock_t* mem_lock;
+#else
+extern  int shm_semid;
+#endif
+
 
 int shm_mem_init();
 void shm_mem_destroy();
 
 
+#ifdef FAST_LOCK
 
+#define shm_lock()    get_lock(mem_lock)
+#define shm_unlock()  release_lock(mem_lock)
+
+#else
 /* inline functions (do not move them to *.c, they won't be inlined anymore) */
 static inline void shm_lock()
 {
+
 	struct sembuf sop;
 	
 	sop.sem_num=0;
@@ -98,6 +112,7 @@ again:
 }
 
 /* ret -1 on erro*/
+#endif
 
 
 

+ 1 - 1
modules/tm/lock.c

@@ -232,7 +232,6 @@ void lock_cleanup()
 
 
 
-
 /* lock sempahore s */
 #ifdef DBG_LOCK
 inline int _lock( ser_lock_t s , char *file, char *function, unsigned int line )
@@ -255,6 +254,7 @@ inline int _unlock( ser_lock_t s )
 #ifdef DBG_LOCK
 	DBG("DEBUG: unlock : entered from %s, %s:%d\n", file, function, line );
 #endif
+	
 	return change_semaphore( s, +1 );
 }
 

+ 3 - 3
test/stateless.cfg

@@ -5,7 +5,7 @@
 #
 
 
-debug=1          # debug level (cmd line: -dddddddddd)
+debug=3          # debug level (cmd line: -dddddddddd)
 #fork=yes          # (cmd. line: -D)
 fork=no
 log_stderror=yes # (cmd line: -E)
@@ -17,8 +17,8 @@ check_via=no     # (cmd. line: -v)
 dns=on           # (cmd. line: -r)
 rev_dns=yes      # (cmd. line: -R)
 #port=5070
-#listen=127.0.0.1
-listen=192.168.57.33
+listen=127.0.0.1
+#listen=192.168.57.33
 loop_checks=0
 # for more info: sip_router -h
 

+ 1 - 20
test/th-uri.cfg

@@ -5,7 +5,7 @@
 #
 
 
-debug=9          # debug level (cmd line: -dddddddddd)
+debug=3          # debug level (cmd line: -dddddddddd)
 #fork=yes          # (cmd. line: -D)
 fork=no
 log_stderror=yes # (cmd line: -E)
@@ -27,7 +27,6 @@ loop_checks=0
 loadmodule "modules/tm/tm.so"
 loadmodule "modules/rr/rr.so"
 loadmodule "modules/maxfwd/maxfwd.so"
-loadmodule "modules/cpl/cpl.so"
 
 
 route{
@@ -46,24 +45,6 @@ route{
                    mf_add_maxfwd_header( "10" );
              };
 
-             if (method=="INVITE")
-             {
-                log("SER : runing CPL!! :)\n");
-                if ( !cpl_run_script() )
-                {
-                   log("SER : Error during running CPL script!\n");
-                }else{
-                   if ( cpl_is_response_reject() )
-                   {
-                       t_add_transaction();
-                       t_send_reply("486","I am not available!");
-                       drop();
-                   }else if ( cpl_is_response_redirect() ) {
-                         log("SER : redirect\n");
-                   };
-                };
-             };
-
              #if ( !rewriteFromRoute() )
              #{
                 log( " SER : no route found!\n");