Explorar el Código

- added sf_malloc: a multi-process safe (internal locking), multi-pool
f_malloc version. Should perform better on lots of CPU once properly
tunned (experimental for now)
- added ll_malloc: same as above but most operation are lockless
(except for "big" fragment handling). For now needs tunning and
it still keeps too many debugging statistics. (experimental for now)

Andrei Pelinescu-Onciul hace 18 años
padre
commit
fbe119140b
Se han modificado 10 ficheros con 2699 adiciones y 11 borrados
  1. 11 1
      Makefile.defs
  2. 2 0
      main.c
  3. 1095 0
      mem/ll_malloc.c
  4. 178 0
      mem/ll_malloc.h
  5. 1114 0
      mem/sf_malloc.c
  6. 177 0
      mem/sf_malloc.h
  7. 16 1
      mem/shm_mem.c
  8. 74 7
      mem/shm_mem.h
  9. 2 0
      pt.c
  10. 30 2
      version.h

+ 11 - 1
Makefile.defs

@@ -75,7 +75,7 @@ MAIN_NAME=ser
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 1
 PATCHLEVEL = 1
 SUBLEVEL =  0
 SUBLEVEL =  0
-EXTRAVERSION = -dev7-tm
+EXTRAVERSION = -dev8-sf_malloc
 
 
 SER_VER = $(shell expr $(VERSION) \* 1000000 + $(PATCHLEVEL) \* 1000 + \
 SER_VER = $(shell expr $(VERSION) \* 1000000 + $(PATCHLEVEL) \* 1000 + \
 			$(SUBLEVEL) )
 			$(SUBLEVEL) )
@@ -318,6 +318,14 @@ endif
 #		an even faster malloc, not recommended for debugging
 #		an even faster malloc, not recommended for debugging
 # -DDL_MALLOC
 # -DDL_MALLOC
 #		a malloc implementation based on Doug Lea's dl_malloc
 #		a malloc implementation based on Doug Lea's dl_malloc
+# -DSF_MALLOC 
+#		an experimental multi-CPU, pool based, multi-process safe version of 
+#		F_MALLOC. Should give better performance on machines with lots of CPUs
+#		after some tunning.
+# -DLL_MALLOC
+#		an experimental multi-CPU, pool based, multi-process safe, mostly
+#		lockless version of SF_MALLOC/F_MALLOC. Not for production use for
+#		now.
 # -DDBG_MALLOC
 # -DDBG_MALLOC
 #		issues additional debugging information if lock/unlock is called
 #		issues additional debugging information if lock/unlock is called
 # -DFAST_LOCK
 # -DFAST_LOCK
@@ -413,6 +421,8 @@ DEFS+= $(extra_defs) \
 	 -DUSE_DNS_FAILOVER \
 	 -DUSE_DNS_FAILOVER \
 	 -DUSE_DST_BLACKLIST \
 	 -DUSE_DST_BLACKLIST \
 	 -DDBG_QM_MALLOC \
 	 -DDBG_QM_MALLOC \
+	 #-DLL_MALLOC \
+	 #-DSF_MALLOC \
 	 #-DDL_MALLOC \
 	 #-DDL_MALLOC \
 	 #-DF_MALLOC \
 	 #-DF_MALLOC \
 	 #-DDBG_F_MALLOC \
 	 #-DDBG_F_MALLOC \

+ 2 - 0
main.c

@@ -453,10 +453,12 @@ char* pgid_file = 0;
 void cleanup(show_status)
 void cleanup(show_status)
 {
 {
 	/*clean-up*/
 	/*clean-up*/
+#ifndef SHM_SAFE_MALLOC
 	if (mem_lock)
 	if (mem_lock)
 		shm_unlock(); /* hack: force-unlock the shared memory lock in case
 		shm_unlock(); /* hack: force-unlock the shared memory lock in case
 					 some process crashed and let it locked; this will
 					 some process crashed and let it locked; this will
 					 allow an almost gracious shutdown */
 					 allow an almost gracious shutdown */
+#endif
 	destroy_modules();
 	destroy_modules();
 #ifdef USE_DNS_CACHE
 #ifdef USE_DNS_CACHE
 	destroy_dns_cache();
 	destroy_dns_cache();

+ 1095 - 0
mem/ll_malloc.c

@@ -0,0 +1,1095 @@
+/* $Id$
+ *
+ * shared memory, multi-process safe, pool based, mostly lockless version of 
+ *  f_malloc
+ *
+ * Copyright (C) 2007 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * History:
+ * --------
+ *              created by andrei
+ *  2003-07-06  added fm_realloc (andrei)
+ *  2004-07-19  fragments book keeping code and support for 64 bits
+ *               memory blocks (64 bits machine & size >=2^32) 
+ *              GET_HASH s/</<=/ (avoids waste of 1 hash cell)   (andrei)
+ *  2004-11-10  support for > 4Gb mem., switched to long (andrei)
+ *  2005-03-02  added fm_info() (andrei)
+ *  2005-12-12  fixed realloc shrink real_used accounting (andrei)
+ *              fixed initial size (andrei)
+ *  2006-02-03  fixed realloc out of mem. free bug (andrei)
+ *  2006-04-07  s/DBG/MDBG (andrei)
+ *  2007-02-23  added fm_available() (andrei)
+ *  2007-06-09  forked from the fm_maloc code (andrei)
+ *  2007-06-11  forked from the sfm_maloc code (andrei)
+ */
+
+
+#ifdef LL_MALLOC
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "ll_malloc.h"
+#include "../dprint.h"
+#include "../globals.h"
+#include "memdbg.h"
+
+#define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
+#define MIN_POOL_FRAGS 10    /* min fragments per pool hash bucket */
+
+/*useful macros*/
+
+#define FRAG_NEXT(f) \
+	((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
+
+
+/* SF_ROUNDTO= 2^k so the following works */
+#define ROUNDTO_MASK	(~((unsigned long)SF_ROUNDTO-1))
+#define ROUNDUP(s)		(((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
+#define ROUNDDOWN(s)	((s)&ROUNDTO_MASK)
+
+#define FRAG_OVERHEAD	(sizeof(struct sfm_frag))
+#define INIT_OVERHEAD	\
+	(ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
+
+
+
+/* finds hash if s <=SF_MALLOC_OPTIMIZE */
+#define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
+/* finds hash if s > SF_MALLOC_OPTIMIZE */
+#define GET_BIG_HASH(s) \
+	(SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
+
+/* finds the hash value for s, s=SF_ROUNDTO multiple*/
+#define GET_HASH(s)   ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
+							GET_SMALL_HASH(s): GET_BIG_HASH(s) )
+
+
+#define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
+#define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
+							SF_MALLOC_OPTIMIZE_FACTOR-1))
+
+#define UN_HASH(h)	( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
+						UN_HASH_SMALL(h): UN_HASH_BIG(h) )
+
+#define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
+#define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
+/* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
+#define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
+#define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
+#define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
+
+
+
+/* mark/test used/unused frags */
+#define FRAG_MARK_USED(f)
+#define FRAG_CLEAR_USED(f)
+#define FRAG_WAS_USED(f)   (1)
+
+/* other frag related defines:
+ * MEM_COALESCE_FRAGS 
+ * MEM_FRAG_AVOIDANCE
+ */
+#define MEM_FRAG_AVOIDANCE
+
+
+#define SFM_REALLOC_REMALLOC
+
+/* computes hash number for big buckets*/
+inline static unsigned long big_hash_idx(unsigned long s)
+{
+	unsigned long idx;
+	/* s is rounded => s = k*2^n (SF_ROUNDTO=2^n) 
+	 * index= i such that 2^i > s >= 2^(i-1)
+	 *
+	 * => index = number of the first non null bit in s*/
+	idx=sizeof(long)*8-1;
+	for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
+	return idx;
+}
+
+
+#ifdef DBG_F_MALLOC
+#define ST_CHECK_PATTERN   0xf0f0f0f0
+#define END_CHECK_PATTERN1 0xc0c0c0c0
+#define END_CHECK_PATTERN2 0xabcdefed
+#endif
+
+
+#ifdef SFM_ONE_LOCK
+
+#define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
+#define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
+#define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
+#define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
+
+#warn "degraded performance, only one lock"
+
+#elif defined SFM_LOCK_PER_BUCKET
+
+#define SFM_MAIN_HASH_LOCK(qm, hash) \
+	lock_get(&(qm)->free_hash[(hash)].lock)
+#define SFM_MAIN_HASH_UNLOCK(qm, hash)  \
+	lock_release(&(qm)->free_hash[(hash)].lock)
+#define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
+#define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
+#else
+#error no locks defined
+#endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
+
+#define SFM_BIG_GET_AND_SPLIT_LOCK(qm)   lock_get(&(qm)->get_and_split)
+#define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
+
+static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
+										 searching further) */
+static unsigned long pool_id=(unsigned long)-1;
+
+
+/* call for each child */
+int sfm_pool_reset()
+{
+	pool_id=(unsigned long)-1;
+	return 0;
+}
+
+
+#define sfm_fix_pool_id(qm) \
+	do{ \
+		if (unlikely(pool_id>=SFM_POOLS_NO)) \
+			pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
+	}while(0)
+
+
+
+static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
+{
+	register struct sfm_frag* old;
+	register struct sfm_frag* crt;
+	
+	crt=(void*)atomic_get_long(head);
+	do{
+		frag->u.nxt_free=crt;
+		old=crt;
+		membar_write_atomic_op();
+		crt=(void*)atomic_cmpxchg_long((void*)head, (long)old, (long)frag);
+	}while(crt!=old);
+}
+
+
+static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
+{
+	register struct sfm_frag* old;
+	register struct sfm_frag* crt;
+	register struct sfm_frag* nxt;
+	
+	crt=(void*)atomic_get_long(head);
+	do{
+		/* if circular list, test not needed */
+		nxt=crt?crt->u.nxt_free:0;
+		old=crt;
+		membar_read_atomic_op();
+		crt=(void*)atomic_cmpxchg_long((void*)head, (long)old, (long)nxt);
+	}while(crt!=old);
+	return crt;
+}
+
+
+static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
+								struct sfm_frag* frag)
+{
+	unsigned long hash_bit;
+
+	frag_push(&pool->pool_hash[hash].first, frag);
+	atomic_inc_long((long*)&pool->pool_hash[hash].no);
+	/* set it only if not already set (avoids an expensive
+	 * cache trashing atomic write op) */
+	hash_bit=HASH_TO_BITMAP(hash);
+	if  (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
+		atomic_or_long((long*)&pool->bitmap, hash_bit);
+}
+
+
+
+/* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
+static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
+									int hash, int split)
+{
+	/* TODO: come up with something better
+	 * if fragment is some  split/rest from an allocation, that is
+	 *  >= requested size, accept it, else
+	 *  look at misses and current fragments and decide based on them */
+	return (p_id<SFM_POOLS_NO) && (split ||
+			( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
+			  ((qm->pool[p_id].pool_hash[hash].misses > 
+				 qm->pool[p_id].pool_hash[hash].no) &&
+				(qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
+}
+
+
+/* choose on which pool to add a free'd packet
+ * return - pool idx or -1 if it should be added to main*/
+static inline unsigned long  sfm_choose_pool(struct sfm_block* qm,
+												struct sfm_frag* frag,
+												int hash, int split)
+{
+	/* check original pool first */
+	if (sfm_check_pool(qm, frag->id, hash, split))
+		return frag->id;
+	else{
+		/* check if our pool is properly set */
+		sfm_fix_pool_id(qm);
+		/* check if my pool needs some frags */
+		if ((pool_id!=frag->id) && (sfm_check_pool(qm,  pool_id, hash, 0))){
+			frag->id=pool_id;
+			return pool_id;
+		}
+	}
+	/* else add it back to main */
+	frag->id=(unsigned long)(-1);
+	return frag->id;
+}
+
+
+static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
+									int split)
+{
+	struct sfm_frag** f;
+	unsigned long p_id;
+	int hash;
+	unsigned long hash_bit;
+	
+	if (likely(frag->size<=SF_POOL_MAX_SIZE)){
+		hash=GET_SMALL_HASH(frag->size);
+		if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
+					(unsigned long)-1)){
+			/* add it back to the "main" hash */
+				frag->id=(unsigned long)(-1); /* main hash marker */
+				/*insert it here*/
+				frag_push(&(qm->free_hash[hash].first), frag);
+				atomic_inc_long((long*)&qm->free_hash[hash].no);
+				/* set it only if not already set (avoids an expensive
+		 		* cache trashing atomic write op) */
+				hash_bit=HASH_TO_BITMAP(hash);
+				if  (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
+					atomic_or_long((long*)&qm->bitmap, hash_bit);
+		}else{
+			/* add it to one of the pools pool */
+			sfm_pool_insert(&qm->pool[p_id], hash, frag);
+		}
+	}else{
+		hash=GET_BIG_HASH(frag->size);
+		SFM_MAIN_HASH_LOCK(qm, hash);
+			f=&(qm->free_hash[hash].first);
+			for(; *f; f=&((*f)->u.nxt_free))
+				if (frag->size <= (*f)->size) break;
+			frag->id=(unsigned long)(-1); /* main hash marker */
+			/*insert it here*/
+			frag->u.nxt_free=*f;
+			*f=frag;
+			qm->free_hash[hash].no++;
+			/* inc. big hash free size ? */
+		SFM_MAIN_HASH_UNLOCK(qm, hash);
+	}
+	
+}
+
+
+
+ /* size should be already rounded-up */
+static inline
+#ifdef DBG_F_MALLOC 
+void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
+					unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
+					unsigned long size)
+#endif
+{
+	unsigned long rest;
+	struct sfm_frag* n;
+	int bigger_rest;
+	
+	rest=frag->size-size;
+#ifdef MEM_FRAG_AVOIDANCE
+	if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
+		(rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
+		bigger_rest=1;
+#else
+	if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
+		bigger_rest=rest>=(size+FRAG_OVERHEAD);
+#endif
+		frag->size=size;
+		/*split the fragment*/
+		n=FRAG_NEXT(frag);
+		n->size=rest-FRAG_OVERHEAD;
+		n->id=pool_id;
+		FRAG_CLEAR_USED(n); /* never used */
+#ifdef DBG_F_MALLOC
+		/* frag created by malloc, mark it*/
+		n->file=file;
+		n->func="frag. from sfm_malloc";
+		n->line=line;
+		n->check=ST_CHECK_PATTERN;
+#endif
+		/* reinsert n in free list*/
+		sfm_insert_free(qm, n, bigger_rest);
+	}else{
+		/* we cannot split this fragment any more => alloc all of it*/
+	}
+}
+
+
+
+/* init malloc and return a sfm_block*/
+struct sfm_block* sfm_malloc_init(char* address, unsigned long size)
+{
+	char* start;
+	char* end;
+	struct sfm_block* qm;
+	unsigned long init_overhead;
+	int r;
+#ifdef SFM_LOCK_PER_BUCKET
+	int i;
+#endif
+	
+	/* make address and size multiple of 8*/
+	start=(char*)ROUNDUP((unsigned long) address);
+	DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
+			SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
+	DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
+			SF_HASH_SIZE, (long)sizeof(struct sfm_block));
+	DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
+
+	if (size<start-address) return 0;
+	size-=(start-address);
+	if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
+	size=ROUNDDOWN(size);
+
+	init_overhead=INIT_OVERHEAD;
+	
+	
+	if (size < init_overhead)
+	{
+		/* not enough mem to create our control structures !!!*/
+		return 0;
+	}
+	end=start+size;
+	qm=(struct sfm_block*)start;
+	memset(qm, 0, sizeof(struct sfm_block));
+	qm->size=size;
+	size-=init_overhead;
+	
+	qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
+	qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
+	/* init initial fragment*/
+	qm->first_frag->size=size;
+	qm->first_frag->id=(unsigned long)-1; /* not in a pool */
+	qm->last_frag->size=0;
+	
+#ifdef DBG_F_MALLOC
+	qm->first_frag->check=ST_CHECK_PATTERN;
+	qm->last_frag->check=END_CHECK_PATTERN1;
+#endif
+	
+	/* link initial fragment into the free list*/
+	
+	sfm_insert_free(qm, qm->first_frag, 0);
+	sfm_max_hash=GET_HASH(size);
+	
+	/* init locks */
+	if (lock_init(&qm->get_and_split)==0)
+		goto error;
+#ifdef SFM_ONE_LOCK
+	if (lock_init(&qm->lock)==0){
+		lock_destroy(&qm->get_and_split);
+		goto error;
+	}
+	for (r=0; r<SFM_POOLS_NO; r++){
+		if (lock_init(&qm->pool[r].lock)==0){
+			for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
+			lock_destroy(&qm->lock);
+			lock_destroy(&qm->get_and_split);
+			goto error;
+		}
+	}
+#elif defined(SFM_LOCK_PER_BUCKET)
+	for (r=0; r<SF_HASH_SIZE; r++)
+		if (lock_init(&qm->free_hash[r].lock)==0){
+			for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
+			lock_destroy(&qm->get_and_split);
+			goto error;
+		}
+	for (i=0; i<SFM_POOLS_NO; i++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++)
+			if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
+				for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
+				for(; i>0; i--){
+					for (r=0; r<SF_HASH_POOL_SIZE; r++)
+						lock_destroy(&qm->pool[i].pool_hash[r].lock);
+				}
+				for (r=0; r<SF_HASH_SIZE; r++)
+					lock_destroy(&qm->free_hash[r].lock);
+				lock_destroy(&qm->get_and_split);
+				goto error;
+			}
+	}
+#endif
+	qm->is_init=1;
+	return qm;
+error:
+	return 0;
+}
+
+
+
+/* cleanup */
+void sfm_malloc_destroy(struct sfm_block* qm)
+{
+	int r, i;
+	/* destroy all the locks */
+	if (!qm || !qm->is_init)
+		return; /* nothing to do */
+	lock_destroy(&qm->get_and_split);
+#ifdef SFM_ONE_LOCK
+	lock_destroy(&qm->lock);
+	for (r=0; r<SFM_POOLS_NO; r++){
+		lock_destroy(&qm->pool[r].lock);
+	}
+#elif defined(SFM_LOCK_PER_BUCKET)
+	for (r=0; r<SF_HASH_SIZE; r++)
+		lock_destroy(&qm->free_hash[r].lock);
+	for (i=0; i<SFM_POOLS_NO; i++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++)
+			lock_destroy(&qm->pool[i].pool_hash[r].lock);
+	}
+#endif
+	qm->is_init=0;
+
+}
+
+
+/* returns next set bit in bitmap, starts at b
+ * if b is set, returns b
+ * if not found returns BITMAP_BITS */
+static inline unsigned long _next_set_bit(unsigned long b,
+											unsigned long* bitmap)
+{
+	for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
+	return b;
+}
+
+/* returns start of block b and sets *end
+ * (handles also the "rest" block at the end ) */
+static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
+{
+	unsigned long s;
+	
+	if ((unlikely(b>=BITMAP_BITS))){
+		s=BIT_TO_HASH(BITMAP_BITS);
+		*end=SF_HASH_POOL_SIZE; /* last, possible rest block */
+	}else{
+		s=BIT_TO_HASH(b);
+		*end=s+BITMAP_BLOCK_SIZE;
+	}
+	return s;
+}
+
+
+#ifdef DBG_F_MALLOC
+static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
+						struct sfm_pool*  pool, int hash, unisgned long size,
+						const char* file, const char* func, unsigned int line)
+#else
+static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
+											struct sfm_pool*  pool,
+											int hash, unsigned long size)
+#endif
+{
+	int r;
+	int next_block;
+	struct sfm_frag* volatile* f;
+	struct sfm_frag* frag;
+	unsigned long b;
+	unsigned long eob;
+
+	/* special case for r=hash */
+	r=hash;
+	f=&pool->pool_hash[r].first;
+
+	/* detach it from the free list */
+	if ((frag=frag_pop((struct sfm_frag**)f))==0)
+		goto not_found;
+found:
+	atomic_dec_long((long*)&pool->pool_hash[r].no);
+	frag->u.nxt_free=0; /* mark it as 'taken' */
+	frag->id=pool_id;
+#ifdef DBG_F_MALLOC
+	sfm_split_frag(qm, frag, size, file, func, line);
+#else
+	sfm_split_frag(qm, frag, size);
+#endif
+	if (&qm->pool[pool_id]==pool)
+		atomic_inc_long((long*)&pool->hits);
+	return frag;
+	
+not_found:
+	atomic_inc_long((long*)&pool->pool_hash[r].misses);
+	r++;
+	b=HASH_BIT_POS(r);
+	
+	while(r<SF_HASH_POOL_SIZE){
+		b=_next_set_bit(b, &pool->bitmap);
+		next_block=_hash_range(b, &eob);
+		r=(r<next_block)?next_block:r;
+		for (; r<eob; r++){
+			f=&pool->pool_hash[r].first;
+			if ((frag=frag_pop((struct sfm_frag**)f))!=0)
+				goto found;
+			atomic_inc_long((long*)&pool->pool_hash[r].misses);
+		}
+		b++;
+	}
+	atomic_inc_long((long*)&pool->missed);
+	return 0;
+}
+
+
+
+#ifdef DBG_F_MALLOC
+static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
+						unsigned long size,
+						const char* file, const char* func, unsigned int line)
+#else
+static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
+												unsigned long size)
+#endif
+{
+	int r;
+	int next_block;
+	struct sfm_frag* volatile* f;
+	struct sfm_frag* frag;
+	unsigned long b;
+	unsigned long eob;
+
+	r=hash;
+	b=HASH_BIT_POS(r);
+	while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
+			b=_next_set_bit(b, &qm->bitmap);
+			next_block=_hash_range(b, &eob);
+			r=(r<next_block)?next_block:r;
+			for (; r<eob; r++){
+				f=&qm->free_hash[r].first;
+				if ((frag=frag_pop((struct sfm_frag**)f))!=0){
+					atomic_dec_long((long*)&qm->free_hash[r].no);
+					frag->u.nxt_free=0; /* mark it as 'taken' */
+					frag->id=pool_id;
+#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, frag, size, file, func, line);
+#else
+					sfm_split_frag(qm, frag, size);
+#endif
+					return frag;
+				}
+			}
+			b++;
+	}
+	/* big fragments */
+	SFM_BIG_GET_AND_SPLIT_LOCK(qm);
+	for (; r<= sfm_max_hash ; r++){
+		f=&qm->free_hash[r].first;
+		if (*f){
+			SFM_MAIN_HASH_LOCK(qm, r);
+			if (unlikely((*f)==0)){
+				/* not found */
+				SFM_MAIN_HASH_UNLOCK(qm, r);
+				continue; 
+			}
+			for(;(*f); f=&((*f)->u.nxt_free))
+				if ((*f)->size>=size){
+					/* found, detach it from the free list*/
+					frag=*f;
+					*f=frag->u.nxt_free;
+					frag->u.nxt_free=0; /* mark it as 'taken' */
+					qm->free_hash[r].no--;
+					SFM_MAIN_HASH_UNLOCK(qm, r);
+					frag->id=pool_id;
+#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, frag, size, file, func, line);
+#else
+					sfm_split_frag(qm, frag, size);
+#endif
+					SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
+					return frag;
+				};
+			SFM_MAIN_HASH_UNLOCK(qm, r);
+			/* try in a bigger bucket */
+		}
+	}
+	SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
+	return 0;
+}
+
+
+
+#ifdef DBG_F_MALLOC
+void* sfm_malloc(struct sfm_block* qm, unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void* sfm_malloc(struct sfm_block* qm, unsigned long size)
+#endif
+{
+	struct sfm_frag* frag;
+	int hash;
+	unsigned int i;
+	
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
+			line);
+#endif
+	/*size must be a multiple of 8*/
+	size=ROUNDUP(size);
+/*	if (size>(qm->size-qm->real_used)) return 0; */
+
+	/* check if our pool id is set */
+	sfm_fix_pool_id(qm);
+	
+	/*search for a suitable free frag*/
+	if (likely(size<=SF_POOL_MAX_SIZE)){
+		hash=GET_SMALL_HASH(size);
+		/* try first in our pool */
+#ifdef DBG_F_MALLOC
+		if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
+										file, func, line))!=0))
+			goto found;
+		/* try in the "main" free hash, go through all the hash */
+		if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
+			goto found;
+		/* really low mem , try in other pools */
+		for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
+			if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
+										file, func, line))!=0)
+				goto found;
+		}
+#else
+		if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
+					!=0 ))
+			goto found;
+		/* try in the "main" free hash, go through all the hash */
+		if (likely((frag=main_get_frag(qm, hash, size))!=0))
+			goto found;
+		/* really low mem , try in other pools */
+		for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
+			if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
+					!=0 )
+				goto found;
+		}
+#endif
+		/* not found, bad! */
+		return 0;
+	}else{
+		hash=GET_BIG_HASH(size);
+#ifdef DBG_F_MALLOC
+		if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
+			return 0; /* not found, bad! */
+#else
+		if ((frag=main_get_frag(qm, hash, size))==0)
+			return 0; /* not found, bad! */
+#endif
+	}
+
+found:
+	/* we found it!*/
+#ifdef DBG_F_MALLOC
+	frag->file=file;
+	frag->func=func;
+	frag->line=line;
+	frag->check=ST_CHECK_PATTERN;
+	MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
+		(char*)frag+sizeof(struct sfm_frag));
+#endif
+	FRAG_MARK_USED(frag); /* mark it as used */
+	return (char*)frag+sizeof(struct sfm_frag);
+}
+
+
+
+#ifdef DBG_F_MALLOC
+void sfm_free(struct sfm_block* qm, void* p, const char* file,
+				const char* func, unsigned int line)
+#else
+void sfm_free(struct sfm_block* qm, void* p)
+#endif
+{
+	struct sfm_frag* f;
+
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
+				line);
+	if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
+		LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
+				"aborting\n", p);
+		abort();
+	}
+#endif
+	if (unlikely(p==0)) {
+		LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
+		return;
+	}
+	f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
+			f->file, f->func, f->line);
+#endif
+#ifdef DBG_F_MALLOC
+	f->file=file;
+	f->func=func;
+	f->line=line;
+#endif
+	sfm_insert_free(qm, f, 0);
+}
+
+
+#ifdef DBG_F_MALLOC
+void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
+#endif
+{
+	struct sfm_frag *f;
+	unsigned long orig_size;
+	void *ptr;
+#ifndef SFM_REALLOC_REMALLOC
+	struct sfm_frag *n;
+	struct sfm_frag **pf;
+	unsigned long diff;
+	unsigned long p_id;
+	int hash;
+	unsigned long n_size;
+	struct sfm_pool * pool;
+#endif
+	
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
+			file, func, line);
+	if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
+		LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
+				"aborting\n", p);
+		abort();
+	}
+#endif
+	if (size==0) {
+		if (p)
+#ifdef DBG_F_MALLOC
+			sfm_free(qm, p, file, func, line);
+#else
+			sfm_free(qm, p);
+#endif
+		return 0;
+	}
+	if (p==0)
+#ifdef DBG_F_MALLOC
+		return sfm_malloc(qm, size, file, func, line);
+#else
+		return sfm_malloc(qm, size);
+#endif
+	f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
+			f, f->file, f->func, f->line);
+#endif
+	size=ROUNDUP(size);
+	orig_size=f->size;
+	if (f->size > size){
+		/* shrink */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
+		sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
+#else
+		sfm_split_frag(qm, f, size);
+#endif
+	}else if (f->size<size){
+		/* grow */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
+#endif
+#ifndef SFM_REALLOC_REMALLOC
+/* should set a magic value in list head and in push/pop if magic value =>
+ * lock and wait */
+#error LL_MALLOC realloc not finished yet
+		diff=size-f->size;
+		n=FRAG_NEXT(f);
+		if (((char*)n < (char*)qm->last_frag) && 
+				(n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
+			/* join  */
+			/* detach n from the free list */
+try_again:
+			p_id=n->id;
+			n_size=n->size;
+			if ((unlikely(p_id >=SFM_POOLS_NO))){
+				hash=GET_HASH(n_size);
+				SFM_MAIN_HASH_LOCK(qm, hash);
+				if (unlikely((n->u.nxt_free==0) ||
+							((n->size+FRAG_OVERHEAD)<diff))){ 
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					goto not_found;
+				}
+				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
+					/* fragment still free, but changed, either 
+					 * moved to another pool or has a diff. size */
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					goto try_again;
+				}
+				pf=&(qm->free_hash[hash].first);
+				/* find it */
+				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
+				if (*pf==0){
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					/* not found, bad! */
+					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
+							    "free " "list (hash=%d)\n", n, hash);
+					/* somebody is in the process of changing it ? */
+					goto not_found;
+				}
+				/* detach */
+				*pf=n->u.nxt_free;
+				n->u.nxt_free=0; /* mark it immediately as detached */
+				qm->free_hash[hash].no--;
+				SFM_MAIN_HASH_UNLOCK(qm, hash);
+				/* join */
+				f->size+=n->size+FRAG_OVERHEAD;
+				/* split it if necessary */
+				if (f->size > size){
+			#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, f, size, file, "fragm. from "
+									"sfm_realloc", line);
+			#else
+					sfm_split_frag(qm, f, size);
+			#endif
+				}
+			}else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
+				hash=GET_SMALL_HASH(n_size);
+				pool=&qm->pool[p_id];
+				SFM_POOL_LOCK(pool, hash);
+				if (unlikely((n->u.nxt_free==0) ||
+							((n->size+FRAG_OVERHEAD)<diff))){
+					SFM_POOL_UNLOCK(pool, hash);
+					goto not_found;
+				}
+				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
+					/* fragment still free, but changed, either 
+					 * moved to another pool or has a diff. size */
+					SFM_POOL_UNLOCK(pool, hash);
+					goto try_again;
+				}
+				pf=&(pool->pool_hash[hash].first);
+				/* find it */
+				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
+				if (*pf==0){
+					SFM_POOL_UNLOCK(pool, hash);
+					/* not found, bad! */
+					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
+							    "free " "list (hash=%d)\n", n, hash);
+					/* somebody is in the process of changing it ? */
+					goto not_found;
+				}
+				/* detach */
+				*pf=n->u.nxt_free;
+				n->u.nxt_free=0; /* mark it immediately as detached */
+				pool->pool_hash[hash].no--;
+				SFM_POOL_UNLOCK(pool, hash);
+				/* join */
+				f->size+=n->size+FRAG_OVERHEAD;
+				/* split it if necessary */
+				if (f->size > size){
+			#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, f, size, file, "fragm. from "
+									"sfm_realloc", line);
+			#else
+					sfm_split_frag(qm, f, size);
+			#endif
+				}
+			}
+		}else{
+not_found:
+			/* could not join => realloc */
+#else/* SFM_REALLOC_REMALLOC */ 
+		{
+#endif /* SFM_REALLOC_REMALLOC */
+	#ifdef DBG_F_MALLOC
+			ptr=sfm_malloc(qm, size, file, func, line);
+	#else
+			ptr=sfm_malloc(qm, size);
+	#endif
+			if (ptr){
+				/* copy, need by libssl */
+				memcpy(ptr, p, orig_size);
+	#ifdef DBG_F_MALLOC
+				sfm_free(qm, p, file, func, line);
+	#else
+				sfm_free(qm, p);
+	#endif
+			}
+			p=ptr;
+		}
+	}else{
+		/* do nothing */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n", 
+				f->size, size);
+#endif
+	}
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc: returning %p\n", p);
+#endif
+	return p;
+}
+
+
+
+void sfm_status(struct sfm_block* qm)
+{
+	struct sfm_frag* f;
+	int i,j;
+	int h;
+	int unused;
+	unsigned long size;
+	int k;
+
+#warning ll_status doesn't work (might crash if used)
+
+	LOG(memlog, "sfm_status (%p):\n", qm);
+	if (!qm) return;
+
+	LOG(memlog, " heap size= %ld\n", qm->size);
+	LOG(memlog, "dumping free list:\n");
+	for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
+		SFM_MAIN_HASH_LOCK(qm, h);
+		unused=0;
+		for (f=qm->free_hash[h].first,j=0; f;
+				size+=f->size,f=f->u.nxt_free,i++,j++){
+			if (!FRAG_WAS_USED(f)){
+				unused++;
+#ifdef DBG_F_MALLOC
+				LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
+							" address %p size %lu, created from %s: %s(%ld)\n",
+						    h, f, (char*)f+sizeof(struct sfm_frag), f->size,
+							f->file, f->func, f->line);
+#endif
+			};
+		}
+		if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
+							" bucket size: %9lu - %9lu (first %9lu)\n",
+							h, j, unused, UN_HASH(h),
+						((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
+							qm->free_hash[h].first->size
+				);
+		if (j!=qm->free_hash[h].no){
+			LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
+					" for hash %3d\n", j, qm->free_hash[h].no, h);
+		}
+		SFM_MAIN_HASH_UNLOCK(qm, h);
+	}
+	for (k=0; k<SFM_POOLS_NO; k++){
+		for(h=0;h<SF_HASH_POOL_SIZE;h++){
+			SFM_POOL_LOCK(&qm->pool[k], h);
+			unused=0;
+			for (f=qm->pool[k].pool_hash[h].first,j=0; f;
+					size+=f->size,f=f->u.nxt_free,i++,j++){
+				if (!FRAG_WAS_USED(f)){
+					unused++;
+#ifdef DBG_F_MALLOC
+					LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
+								" address %p size %lu, created from %s: "
+								"%s(%ld)\n", k
+								h, f, (char*)f+sizeof(struct sfm_frag),
+								f->size, f->file, f->func, f->line);
+#endif
+				};
+			}
+			if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
+								"%5d\n\t\t bucket size: %9lu - %9lu "
+								"(first %9lu)\n",
+								k, h, j, unused, UN_HASH(h),
+							((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
+								UN_HASH(h),
+								qm->pool[k].pool_hash[h].first->size
+					);
+			if (j!=qm->pool[k].pool_hash[h].no){
+				LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
+							" count: %d!=%ld for hash %3d\n",
+							k, j, qm->pool[k].pool_hash[h].no, h);
+			}
+			SFM_POOL_UNLOCK(&qm->pool[k], h);
+		}
+	}
+	LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
+	LOG(memlog, "-----------------------------\n");
+}
+
+
+
+/* fills a malloc info structure with info about the block
+ * if a parameter is not supported, it will be filled with 0 */
+void sfm_info(struct sfm_block* qm, struct mem_info* info)
+{
+	int r, k;
+	unsigned long total_frags;
+	struct sfm_frag* f;
+	
+	memset(info,0, sizeof(*info));
+	total_frags=0;
+	info->total_size=qm->size;
+	info->min_frag=SF_MIN_FRAG_SIZE;
+	/* we'll have to compute it all */
+	for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
+		info->free+=qm->free_hash[r].no*UN_HASH(r);
+		total_frags+=qm->free_hash[r].no;
+	}
+	for(;r<=sfm_max_hash; r++){
+		total_frags+=qm->free_hash[r].no;
+		SFM_MAIN_HASH_LOCK(qm, r);
+		for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
+			info->free+=f->size;
+		}
+		SFM_MAIN_HASH_UNLOCK(qm, r);
+	}
+	for (k=0; k<SFM_POOLS_NO; k++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++){
+			info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
+			total_frags+=qm->pool[k].pool_hash[r].no;
+		}
+	}
+	info->real_used=info->total_size-info->free;
+	info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
+				-FRAG_OVERHEAD;
+	info->max_used=0; /* we don't really know */
+	info->total_frags=total_frags;
+}
+
+
+
+/* returns how much free memory is available
+ * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
+unsigned long sfm_available(struct sfm_block* qm)
+{
+	/* we don't know how much free memory we have and it's to expensive
+	 * to compute it */
+	return ((unsigned long)-1);
+}
+
+#endif

+ 178 - 0
mem/ll_malloc.h

@@ -0,0 +1,178 @@
+/* $Id$
+ *
+ * shared memory, multi-process safe, pool based, mostly lockless version of 
+ *  f_malloc
+ *
+ * Copyright (C) 2007 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * History:
+ * --------
+ *  2003-05-21  on sparc64 roundto 8 even in debugging mode (so malloc'ed
+ *               long longs will be 64 bit aligned) (andrei)
+ *  2004-07-19  support for 64 bit (2^64 mem. block) and more info
+ *               for the future de-fragmentation support (andrei)
+ *  2004-11-10  support for > 4Gb mem., switched to long (andrei)
+ *  2007-06-11  forked from the sf_malloc code (andrei)
+ */
+
+
+#if !defined(ll_malloc_h)  
+#define ll_malloc_h
+
+
+#include "meminfo.h"
+
+#include "../lock_ops.h"
+#include "../atomic_ops.h"
+#include "../compiler_opt.h"
+/* defs*/
+
+
+#ifdef GEN_LOCK_T_UNLIMITED
+#define SFM_LOCK_PER_BUCKET
+#else
+#define SFM_ONE_LOCK
+#endif
+
+#ifdef DBG_SF_MALLOC
+#if defined(__CPU_sparc64) || defined(__CPU_sparc)
+/* tricky, on sun in 32 bits mode long long must be 64 bits aligned
+ * but long can be 32 bits aligned => malloc should return long long
+ * aligned memory */
+	#define SF_ROUNDTO	sizeof(long long)
+#else
+	#define SF_ROUNDTO	sizeof(void*) /* size we round to, must be = 2^n, and
+                      sizeof(sfm_frag) must be multiple of SF_ROUNDTO !*/
+#endif
+#else /* DBG_SF_MALLOC */
+	#define SF_ROUNDTO 8UL
+#endif
+#define SF_MIN_FRAG_SIZE	SF_ROUNDTO
+
+#define SFM_POOLS_NO 4U /* the more the better, but higher initial
+                            mem. consumption */
+
+#define SF_MALLOC_OPTIMIZE_FACTOR 14UL /*used below */
+#define SF_MALLOC_OPTIMIZE  (1UL<<SF_MALLOC_OPTIMIZE_FACTOR)
+								/* size to optimize for,
+									(most allocs <= this size),
+									must be 2^k */
+
+#define SF_HASH_POOL_SIZE	(SF_MALLOC_OPTIMIZE/SF_ROUNDTO + 1)
+#define SF_POOL_MAX_SIZE	SF_MALLOC_OPTIMIZE
+
+#define SF_HASH_SIZE (SF_MALLOC_OPTIMIZE/SF_ROUNDTO + \
+		(sizeof(long)*8-SF_MALLOC_OPTIMIZE_FACTOR)+1)
+
+/* hash structure:
+ * 0 .... SF_MALLOC_OPTIMIZE/SF_ROUNDTO  - small buckets, size increases with
+ *                            SF_ROUNDTO from bucket to bucket
+ * +1 .... end -  size = 2^k, big buckets */
+
+struct sfm_frag{
+	union{
+		struct sfm_frag* nxt_free;
+		long reserved;
+	}u;
+	unsigned long size;
+	unsigned long id; /* TODO better optimize the size */
+	/* pad to SF_ROUNDTO multiple */
+	char _pad[((3*sizeof(long)+SF_ROUNDTO-1)&~(SF_ROUNDTO-1))-3*sizeof(long)];
+#ifdef DBG_SF_MALLOC
+	const char* file;
+	const char* func;
+	unsigned long line;
+	unsigned long check;
+#endif
+};
+
+struct sfm_frag_lnk{
+	struct sfm_frag* first;
+#ifdef SFM_LOCK_PER_BUCKET
+	gen_lock_t lock;
+#endif
+	unsigned long no;
+};
+
+struct sfm_pool_head{
+	struct sfm_frag* first;
+#ifdef SFM_LOCK_PER_BUCKET
+	gen_lock_t lock;
+#endif
+	unsigned long no;
+	unsigned long misses;
+};
+
+struct sfm_pool{
+#ifdef SFM_ONE_LOCK
+	gen_lock_t lock;
+#endif
+	unsigned long missed;
+	unsigned long hits; /* debugging only TODO: remove */
+	unsigned long bitmap;
+	struct sfm_pool_head pool_hash[SF_HASH_POOL_SIZE];
+};
+
+struct sfm_block{
+#ifdef SFM_ONE_LOCK
+	gen_lock_t lock;
+#endif
+	atomic_t crt_id; /* current pool */
+	unsigned long size; /* total size */
+	/* stats are kept now per bucket */
+	struct sfm_frag* first_frag;
+	struct sfm_frag* last_frag;
+	unsigned long bitmap; /* only up to SF_MALLOC_OPTIMIZE */
+	struct sfm_frag_lnk free_hash[SF_HASH_SIZE];
+	struct sfm_pool pool[SFM_POOLS_NO];
+	int is_init;
+	gen_lock_t get_and_split;
+	char _pad[256];
+};
+
+
+
+struct sfm_block* sfm_malloc_init(char* address, unsigned long size);
+void sfm_malloc_destroy(struct sfm_block* qm);
+int sfm_pool_reset();
+
+#ifdef DBG_SF_MALLOC
+void* sfm_malloc(struct sfm_block*, unsigned long size,
+					const char* file, const char* func, unsigned int line);
+#else
+void* sfm_malloc(struct sfm_block*, unsigned long size);
+#endif
+
+#ifdef DBG_SF_MALLOC
+void  sfm_free(struct sfm_block*, void* p, const char* file, const char* func, 
+				unsigned int line);
+#else
+void  sfm_free(struct sfm_block*, void* p);
+#endif
+
+#ifdef DBG_SF_MALLOC
+void*  sfm_realloc(struct sfm_block*, void* p, unsigned long size, 
+					const char* file, const char* func, unsigned int line);
+#else
+void*  sfm_realloc(struct sfm_block*, void* p, unsigned long size);
+#endif
+
+void  sfm_status(struct sfm_block*);
+void  sfm_info(struct sfm_block*, struct mem_info*);
+
+unsigned long sfm_available(struct sfm_block*);
+
+#endif

+ 1114 - 0
mem/sf_malloc.c

@@ -0,0 +1,1114 @@
+/* $Id$
+ *
+ * shared memory, multi-process safe, pool based version of f_malloc
+ *
+ * Copyright (C) 2007 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * History:
+ * --------
+ *              created by andrei
+ *  2003-07-06  added fm_realloc (andrei)
+ *  2004-07-19  fragments book keeping code and support for 64 bits
+ *               memory blocks (64 bits machine & size >=2^32) 
+ *              GET_HASH s/</<=/ (avoids waste of 1 hash cell)   (andrei)
+ *  2004-11-10  support for > 4Gb mem., switched to long (andrei)
+ *  2005-03-02  added fm_info() (andrei)
+ *  2005-12-12  fixed realloc shrink real_used accounting (andrei)
+ *              fixed initial size (andrei)
+ *  2006-02-03  fixed realloc out of mem. free bug (andrei)
+ *  2006-04-07  s/DBG/MDBG (andrei)
+ *  2007-02-23  added fm_available() (andrei)
+ *  2007-06-09  forked from the fm_maloc code (andrei)
+ */
+
+
+#ifdef SF_MALLOC
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "sf_malloc.h"
+#include "../dprint.h"
+#include "../globals.h"
+#include "memdbg.h"
+
+#define MAX_POOL_FRAGS 10000 /* max fragments per pool hash bucket */
+#define MIN_POOL_FRAGS 10    /* min fragments per pool hash bucket */
+
+/*useful macros*/
+
+#define FRAG_NEXT(f) \
+	((struct sfm_frag*)((char*)(f)+sizeof(struct sfm_frag)+(f)->size ))
+
+
+/* SF_ROUNDTO= 2^k so the following works */
+#define ROUNDTO_MASK	(~((unsigned long)SF_ROUNDTO-1))
+#define ROUNDUP(s)		(((s)+(SF_ROUNDTO-1))&ROUNDTO_MASK)
+#define ROUNDDOWN(s)	((s)&ROUNDTO_MASK)
+
+#define FRAG_OVERHEAD	(sizeof(struct sfm_frag))
+#define INIT_OVERHEAD	\
+	(ROUNDUP(sizeof(struct sfm_block))+sizeof(struct sfm_frag))
+
+
+
+/* finds hash if s <=SF_MALLOC_OPTIMIZE */
+#define GET_SMALL_HASH(s) (unsigned long)(s)/SF_ROUNDTO
+/* finds hash if s > SF_MALLOC_OPTIMIZE */
+#define GET_BIG_HASH(s) \
+	(SF_MALLOC_OPTIMIZE/SF_ROUNDTO+big_hash_idx((s))-SF_MALLOC_OPTIMIZE_FACTOR+1)
+
+/* finds the hash value for s, s=SF_ROUNDTO multiple*/
+#define GET_HASH(s)   ( ((unsigned long)(s)<=SF_MALLOC_OPTIMIZE)?\
+							GET_SMALL_HASH(s): GET_BIG_HASH(s) )
+
+
+#define UN_HASH_SMALL(h) ((unsigned long)(h)*SF_ROUNDTO)
+#define UN_HASH_BIG(h) (1UL<<((unsigned long)(h)-SF_MALLOC_OPTIMIZE/SF_ROUNDTO+\
+							SF_MALLOC_OPTIMIZE_FACTOR-1))
+
+#define UN_HASH(h)	( ((unsigned long)(h)<=(SF_MALLOC_OPTIMIZE/SF_ROUNDTO))?\
+						UN_HASH_SMALL(h): UN_HASH_BIG(h) )
+
+#define BITMAP_BITS (sizeof(((struct sfm_block*)0)->bitmap)*8)
+#define BITMAP_BLOCK_SIZE ((SF_MALLOC_OPTIMIZE/SF_ROUNDTO)/ BITMAP_BITS)
+/* only for "small" hashes (up to HASH(SF_MALLOC_OPTIMIZE) */
+#define HASH_BIT_POS(h) (((unsigned long)(h))/BITMAP_BLOCK_SIZE)
+#define HASH_TO_BITMAP(h) (1UL<<HASH_BIT_POS(h))
+#define BIT_TO_HASH(b) ((b)*BITMAP_BLOCK_SIZE)
+
+
+
+/* mark/test used/unused frags */
+#define FRAG_MARK_USED(f)
+#define FRAG_CLEAR_USED(f)
+#define FRAG_WAS_USED(f)   (1)
+
+/* other frag related defines:
+ * MEM_COALESCE_FRAGS 
+ * MEM_FRAG_AVOIDANCE
+ */
+#define MEM_FRAG_AVOIDANCE
+
+
+#define SFM_REALLOC_REMALLOC
+
+/* computes hash number for big buckets*/
+inline static unsigned long big_hash_idx(unsigned long s)
+{
+	unsigned long idx;
+	/* s is rounded => s = k*2^n (SF_ROUNDTO=2^n) 
+	 * index= i such that 2^i > s >= 2^(i-1)
+	 *
+	 * => index = number of the first non null bit in s*/
+	idx=sizeof(long)*8-1;
+	for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--);
+	return idx;
+}
+
+
+#ifdef DBG_F_MALLOC
+#define ST_CHECK_PATTERN   0xf0f0f0f0
+#define END_CHECK_PATTERN1 0xc0c0c0c0
+#define END_CHECK_PATTERN2 0xabcdefed
+#endif
+
+
+#ifdef SFM_ONE_LOCK
+
+#define SFM_MAIN_HASH_LOCK(qm, hash) lock_get(&(qm)->lock)
+#define SFM_MAIN_HASH_UNLOCK(qm, hash) lock_release(&(qm)->lock)
+#define SFM_POOL_LOCK(p, hash) lock_get(&(p)->lock)
+#define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->lock)
+
+#warn "degraded performance, only one lock"
+
+#elif defined SFM_LOCK_PER_BUCKET
+
+#define SFM_MAIN_HASH_LOCK(qm, hash) \
+	lock_get(&(qm)->free_hash[(hash)].lock)
+#define SFM_MAIN_HASH_UNLOCK(qm, hash)  \
+	lock_release(&(qm)->free_hash[(hash)].lock)
+#define SFM_POOL_LOCK(p, hash) lock_get(&(p)->pool_hash[(hash)].lock)
+#define SFM_POOL_UNLOCK(p, hash) lock_release(&(p)->pool_hash[(hash)].lock)
+#else
+#error no locks defined
+#endif /* SFM_ONE_LOCK/SFM_LOCK_PER_BUCKET */
+
+#define SFM_BIG_GET_AND_SPLIT_LOCK(qm)   lock_get(&(qm)->get_and_split)
+#define SFM_BIG_GET_AND_SPLIT_UNLOCK(qm) lock_release(&(qm)->get_and_split)
+
+static unsigned long sfm_max_hash=0; /* maximum hash value (no point in
+										 searching further) */
+static unsigned long pool_id=(unsigned long)-1;
+
+
+/* call for each child */
+int sfm_pool_reset()
+{
+	pool_id=(unsigned long)-1;
+	return 0;
+}
+
+
+#define sfm_fix_pool_id(qm) \
+	do{ \
+		if (unlikely(pool_id>=SFM_POOLS_NO)) \
+			pool_id=((unsigned)atomic_add(&(qm)->crt_id, 1))%SFM_POOLS_NO; \
+	}while(0)
+
+
+
+static inline void frag_push(struct sfm_frag** head, struct sfm_frag* frag)
+{
+	frag->u.nxt_free=*head;
+	*head=frag;
+}
+
+
+static inline struct sfm_frag* frag_pop(struct sfm_frag** head)
+{
+	struct sfm_frag* frag;
+	frag=*head;
+	*head=frag->u.nxt_free;
+	return frag;
+}
+
+static inline void sfm_pool_insert (struct sfm_pool* pool, int hash,
+								struct sfm_frag* frag)
+{
+	unsigned long hash_bit;
+
+	SFM_POOL_LOCK(pool, hash);
+	frag_push(&pool->pool_hash[hash].first, frag);
+	pool->pool_hash[hash].no++;
+	/* set it only if not already set (avoids an expensive
+	 * cache trashing atomic write op) */
+	hash_bit=HASH_TO_BITMAP(hash);
+	if  (!(atomic_get_long((long*)&pool->bitmap) & hash_bit))
+		atomic_or_long((long*)&pool->bitmap, hash_bit);
+	SFM_POOL_UNLOCK(pool, hash);
+}
+
+
+
+/* returns 1 if it's ok to add a fragm. to pool p_id @ hash, 0 otherwise */
+static inline int sfm_check_pool(struct sfm_block* qm, unsigned long p_id,
+									int hash, int split)
+{
+	/* TODO: come up with something better
+	 * if fragment is some  split/rest from an allocation, that is
+	 *  >= requested size, accept it, else
+	 *  look at misses and current fragments and decide based on them */
+	return (p_id<SFM_POOLS_NO) && (split ||
+			( (qm->pool[p_id].pool_hash[hash].no < MIN_POOL_FRAGS) ||
+			  ((qm->pool[p_id].pool_hash[hash].misses > 
+				 qm->pool[p_id].pool_hash[hash].no) &&
+				(qm->pool[p_id].pool_hash[hash].no<MAX_POOL_FRAGS) ) ) );
+}
+
+
+/* choose on which pool to add a free'd packet
+ * return - pool idx or -1 if it should be added to main*/
+static inline unsigned long  sfm_choose_pool(struct sfm_block* qm,
+												struct sfm_frag* frag,
+												int hash, int split)
+{
+	/* check original pool first */
+	if (sfm_check_pool(qm, frag->id, hash, split))
+		return frag->id;
+	else{
+		/* check if our pool is properly set */
+		sfm_fix_pool_id(qm);
+		/* check if my pool needs some frags */
+		if ((pool_id!=frag->id) && (sfm_check_pool(qm,  pool_id, hash, 0))){
+			frag->id=pool_id;
+			return pool_id;
+		}
+	}
+	/* else add it back to main */
+	frag->id=(unsigned long)(-1);
+	return frag->id;
+}
+
+
+static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag,
+									int split)
+{
+	struct sfm_frag** f;
+	unsigned long p_id;
+	int hash;
+	unsigned long hash_bit;
+	
+	if (likely(frag->size<=SF_POOL_MAX_SIZE)){
+		hash=GET_SMALL_HASH(frag->size);
+		if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))==
+					(unsigned long)-1)){
+			/* add it back to the "main" hash */
+			SFM_MAIN_HASH_LOCK(qm, hash);
+				frag->id=(unsigned long)(-1); /* main hash marker */
+				/*insert it here*/
+				frag_push(&(qm->free_hash[hash].first), frag);
+				qm->free_hash[hash].no++;
+				/* set it only if not already set (avoids an expensive
+		 		* cache trashing atomic write op) */
+				hash_bit=HASH_TO_BITMAP(hash);
+				if  (!(atomic_get_long((long*)&qm->bitmap) & hash_bit))
+					atomic_or_long((long*)&qm->bitmap, hash_bit);
+			SFM_MAIN_HASH_UNLOCK(qm, hash);
+		}else{
+			/* add it to one of the pools pool */
+			sfm_pool_insert(&qm->pool[p_id], hash, frag);
+		}
+	}else{
+		hash=GET_BIG_HASH(frag->size);
+		SFM_MAIN_HASH_LOCK(qm, hash);
+			f=&(qm->free_hash[hash].first);
+			for(; *f; f=&((*f)->u.nxt_free))
+				if (frag->size <= (*f)->size) break;
+			frag->id=(unsigned long)(-1); /* main hash marker */
+			/*insert it here*/
+			frag->u.nxt_free=*f;
+			*f=frag;
+			qm->free_hash[hash].no++;
+			/* inc. big hash free size ? */
+		SFM_MAIN_HASH_UNLOCK(qm, hash);
+	}
+	
+}
+
+
+
+ /* size should be already rounded-up */
+static inline
+#ifdef DBG_F_MALLOC 
+void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
+					unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
+					unsigned long size)
+#endif
+{
+	unsigned long rest;
+	struct sfm_frag* n;
+	int bigger_rest;
+	
+	rest=frag->size-size;
+#ifdef MEM_FRAG_AVOIDANCE
+	if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
+		(rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
+		bigger_rest=1;
+#else
+	if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
+		bigger_rest=rest>=(size+FRAG_OVERHEAD);
+#endif
+		frag->size=size;
+		/*split the fragment*/
+		n=FRAG_NEXT(frag);
+		n->size=rest-FRAG_OVERHEAD;
+		n->id=pool_id;
+		FRAG_CLEAR_USED(n); /* never used */
+#ifdef DBG_F_MALLOC
+		/* frag created by malloc, mark it*/
+		n->file=file;
+		n->func="frag. from sfm_malloc";
+		n->line=line;
+		n->check=ST_CHECK_PATTERN;
+#endif
+		/* reinsert n in free list*/
+		sfm_insert_free(qm, n, bigger_rest);
+	}else{
+		/* we cannot split this fragment any more => alloc all of it*/
+	}
+}
+
+
+
+/* init malloc and return a sfm_block*/
+struct sfm_block* sfm_malloc_init(char* address, unsigned long size)
+{
+	char* start;
+	char* end;
+	struct sfm_block* qm;
+	unsigned long init_overhead;
+	int r;
+#ifdef SFM_LOCK_PER_BUCKET
+	int i;
+#endif
+	
+	/* make address and size multiple of 8*/
+	start=(char*)ROUNDUP((unsigned long) address);
+	DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
+			SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
+	DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
+			SF_HASH_SIZE, (long)sizeof(struct sfm_block));
+	DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);
+
+	if (size<start-address) return 0;
+	size-=(start-address);
+	if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
+	size=ROUNDDOWN(size);
+
+	init_overhead=INIT_OVERHEAD;
+	
+	
+	if (size < init_overhead)
+	{
+		/* not enough mem to create our control structures !!!*/
+		return 0;
+	}
+	end=start+size;
+	qm=(struct sfm_block*)start;
+	memset(qm, 0, sizeof(struct sfm_block));
+	qm->size=size;
+	size-=init_overhead;
+	
+	qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
+	qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
+	/* init initial fragment*/
+	qm->first_frag->size=size;
+	qm->first_frag->id=(unsigned long)-1; /* not in a pool */
+	qm->last_frag->size=0;
+	
+#ifdef DBG_F_MALLOC
+	qm->first_frag->check=ST_CHECK_PATTERN;
+	qm->last_frag->check=END_CHECK_PATTERN1;
+#endif
+	
+	/* link initial fragment into the free list*/
+	
+	sfm_insert_free(qm, qm->first_frag, 0);
+	sfm_max_hash=GET_HASH(size);
+	
+	/* init locks */
+	if (lock_init(&qm->get_and_split)==0)
+		goto error;
+#ifdef SFM_ONE_LOCK
+	if (lock_init(&qm->lock)==0){
+		lock_destroy(&qm->get_and_split);
+		goto error;
+	}
+	for (r=0; r<SFM_POOLS_NO; r++){
+		if (lock_init(&qm->pool[r].lock)==0){
+			for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
+			lock_destroy(&qm->lock);
+			lock_destroy(&qm->get_and_split);
+			goto error;
+		}
+	}
+#elif defined(SFM_LOCK_PER_BUCKET)
+	for (r=0; r<SF_HASH_SIZE; r++)
+		if (lock_init(&qm->free_hash[r].lock)==0){
+			for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
+			lock_destroy(&qm->get_and_split);
+			goto error;
+		}
+	for (i=0; i<SFM_POOLS_NO; i++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++)
+			if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
+				for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
+				for(; i>0; i--){
+					for (r=0; r<SF_HASH_POOL_SIZE; r++)
+						lock_destroy(&qm->pool[i].pool_hash[r].lock);
+				}
+				for (r=0; r<SF_HASH_SIZE; r++)
+					lock_destroy(&qm->free_hash[r].lock);
+				lock_destroy(&qm->get_and_split);
+				goto error;
+			}
+	}
+#endif
+	qm->is_init=1;
+	return qm;
+error:
+	return 0;
+}
+
+
+
+/* cleanup */
+void sfm_malloc_destroy(struct sfm_block* qm)
+{
+	int r, i;
+	/* destroy all the locks */
+	if (!qm || !qm->is_init)
+		return; /* nothing to do */
+	lock_destroy(&qm->get_and_split);
+#ifdef SFM_ONE_LOCK
+	lock_destroy(&qm->lock);
+	for (r=0; r<SFM_POOLS_NO; r++){
+		lock_destroy(&qm->pool[r].lock);
+	}
+#elif defined(SFM_LOCK_PER_BUCKET)
+	for (r=0; r<SF_HASH_SIZE; r++)
+		lock_destroy(&qm->free_hash[r].lock);
+	for (i=0; i<SFM_POOLS_NO; i++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++)
+			lock_destroy(&qm->pool[i].pool_hash[r].lock);
+	}
+#endif
+	qm->is_init=0;
+
+}
+
+
+/* returns next set bit in bitmap, starts at b
+ * if b is set, returns b
+ * if not found returns BITMAP_BITS */
+static inline unsigned long _next_set_bit(unsigned long b,
+											unsigned long* bitmap)
+{
+	for (; !((1UL<<b)& *bitmap) && b<BITMAP_BITS; b++);
+	return b;
+}
+
+/* returns start of block b and sets *end
+ * (handles also the "rest" block at the end ) */
+static inline unsigned long _hash_range(unsigned long b, unsigned long* end)
+{
+	unsigned long s;
+	
+	if ((unlikely(b>=BITMAP_BITS))){
+		s=BIT_TO_HASH(BITMAP_BITS);
+		*end=SF_HASH_POOL_SIZE; /* last, possible rest block */
+	}else{
+		s=BIT_TO_HASH(b);
+		*end=s+BITMAP_BLOCK_SIZE;
+	}
+	return s;
+}
+
+
+#ifdef DBG_F_MALLOC
+static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
+						struct sfm_pool*  pool, int hash, unisgned long size,
+						const char* file, const char* func, unsigned int line)
+#else
+static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm,
+											struct sfm_pool*  pool,
+											int hash, unsigned long size)
+#endif
+{
+	int r;
+	int next_block;
+	struct sfm_frag* volatile* f;
+	struct sfm_frag* frag;
+	unsigned long b;
+	unsigned long eob;
+
+	/* special case for r=hash */
+	r=hash;
+	f=&pool->pool_hash[r].first;
+	if (*f==0) 
+		goto not_found;
+	SFM_POOL_LOCK(pool, r);
+	if (unlikely(*f==0)){
+		SFM_POOL_UNLOCK(pool, r);
+		goto not_found;
+	}
+found:
+	/* detach it from the free list*/
+	frag=frag_pop((struct sfm_frag**)f);
+	frag->u.nxt_free=0; /* mark it as 'taken' */
+	frag->id=pool_id;
+	pool->pool_hash[r].no--;
+	SFM_POOL_UNLOCK(pool, r);
+#ifdef DBG_F_MALLOC
+	sfm_split_frag(qm, frag, size, file, func, line);
+#else
+	sfm_split_frag(qm, frag, size);
+#endif
+	if (&qm->pool[pool_id]==pool)
+		atomic_inc_long((long*)&pool->hits);
+	return frag;
+	
+not_found:
+	atomic_inc_long((long*)&pool->pool_hash[r].misses);
+	r++;
+	b=HASH_BIT_POS(r);
+	
+	while(r<SF_HASH_POOL_SIZE){
+		b=_next_set_bit(b, &pool->bitmap);
+		next_block=_hash_range(b, &eob);
+		r=(r<next_block)?next_block:r;
+		for (; r<eob; r++){
+			f=&pool->pool_hash[r].first;
+			if (*f){
+				SFM_POOL_LOCK(pool, r);
+				if (unlikely(*f==0)){
+					/* not found */
+					SFM_POOL_UNLOCK(pool, r);
+				}else
+					goto found;
+			}
+			atomic_inc_long((long*)&pool->pool_hash[r].misses);
+		}
+		b++;
+	}
+#if 0 /* EXPENSIVE BUG CHECK */
+	for (r=hash; r<SF_HASH_POOL_SIZE; r++){
+		f=&pool->pool_hash[r].first;
+		if (*f){
+				SFM_POOL_LOCK(pool, r);
+				if (unlikely(*f==0)){
+					/* not found */
+					SFM_POOL_UNLOCK(pool, r);
+				}else{
+					b=_next_set_bit(HASH_BIT_POS(r), &pool->bitmap);
+					next_block=_hash_range(b, &eob);
+					BUG("pool_get_frag: found fragm. %d at %d (bit %ld range %ld-%ld), next set bit=%ld"
+							" bitmap %ld (%p)\n", hash, r, HASH_BIT_POS(r),
+							next_block, eob, b, pool->bitmap, &pool->bitmap);
+					goto found;
+				}
+		}
+	}
+#endif
+	atomic_inc_long((long*)&pool->missed);
+	return 0;
+}
+
+
+
+#ifdef DBG_F_MALLOC
+static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
+						unsigned long size,
+						const char* file, const char* func, unsigned int line)
+#else
+static inline struct sfm_frag* main_get_frag(struct sfm_block* qm, int hash,
+												unsigned long size)
+#endif
+{
+	int r;
+	int next_block;
+	struct sfm_frag* volatile* f;
+	struct sfm_frag* frag;
+	unsigned long b;
+	unsigned long eob;
+
+	r=hash;
+	b=HASH_BIT_POS(r);
+	while(r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO){
+			b=_next_set_bit(b, &qm->bitmap);
+			next_block=_hash_range(b, &eob);
+			r=(r<next_block)?next_block:r;
+			for (; r<eob; r++){
+				f=&qm->free_hash[r].first;
+				if (*f){
+					SFM_MAIN_HASH_LOCK(qm, r);
+					if (unlikely(*f==0)){
+						/* not found, somebody stole it */
+						SFM_MAIN_HASH_UNLOCK(qm, r);
+						continue; 
+					}
+					/* detach it from the free list*/
+					frag=frag_pop((struct sfm_frag**)f);
+					frag->u.nxt_free=0; /* mark it as 'taken' */
+					qm->free_hash[r].no--;
+					SFM_MAIN_HASH_UNLOCK(qm, r);
+					frag->id=pool_id;
+#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, frag, size, file, func, line);
+#else
+					sfm_split_frag(qm, frag, size);
+#endif
+					return frag;
+				}
+			}
+			b++;
+	}
+	/* big fragments */
+	SFM_BIG_GET_AND_SPLIT_LOCK(qm);
+	for (; r<= sfm_max_hash ; r++){
+		f=&qm->free_hash[r].first;
+		if (*f){
+			SFM_MAIN_HASH_LOCK(qm, r);
+			if (unlikely((*f)==0)){
+				/* not found */
+				SFM_MAIN_HASH_UNLOCK(qm, r);
+				continue; 
+			}
+			for(;(*f); f=&((*f)->u.nxt_free))
+				if ((*f)->size>=size){
+					/* found, detach it from the free list*/
+					frag=*f;
+					*f=frag->u.nxt_free;
+					frag->u.nxt_free=0; /* mark it as 'taken' */
+					qm->free_hash[r].no--;
+					SFM_MAIN_HASH_UNLOCK(qm, r);
+					frag->id=pool_id;
+#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, frag, size, file, func, line);
+#else
+					sfm_split_frag(qm, frag, size);
+#endif
+					SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
+					return frag;
+				};
+			SFM_MAIN_HASH_UNLOCK(qm, r);
+			/* try in a bigger bucket */
+		}
+	}
+	SFM_BIG_GET_AND_SPLIT_UNLOCK(qm);
+	return 0;
+}
+
+
+
+#ifdef DBG_F_MALLOC
+void* sfm_malloc(struct sfm_block* qm, unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void* sfm_malloc(struct sfm_block* qm, unsigned long size)
+#endif
+{
+	struct sfm_frag* frag;
+	int hash;
+	unsigned int i;
+	
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
+			line);
+#endif
+	/*size must be a multiple of 8*/
+	size=ROUNDUP(size);
+/*	if (size>(qm->size-qm->real_used)) return 0; */
+
+	/* check if our pool id is set */
+	sfm_fix_pool_id(qm);
+	
+	/*search for a suitable free frag*/
+	if (likely(size<=SF_POOL_MAX_SIZE)){
+		hash=GET_SMALL_HASH(size);
+		/* try first in our pool */
+#ifdef DBG_F_MALLOC
+		if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size,
+										file, func, line))!=0))
+			goto found;
+		/* try in the "main" free hash, go through all the hash */
+		if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0))
+			goto found;
+		/* really low mem , try in other pools */
+		for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
+			if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size,
+										file, func, line))!=0)
+				goto found;
+		}
+#else
+		if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size))
+					!=0 ))
+			goto found;
+		/* try in the "main" free hash, go through all the hash */
+		if (likely((frag=main_get_frag(qm, hash, size))!=0))
+			goto found;
+		/* really low mem , try in other pools */
+		for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){
+			if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size))
+					!=0 )
+				goto found;
+		}
+#endif
+		/* not found, bad! */
+		return 0;
+	}else{
+		hash=GET_BIG_HASH(size);
+#ifdef DBG_F_MALLOC
+		if ((frag=main_get_frag(qm, hash, size, file, func, line))==0)
+			return 0; /* not found, bad! */
+#else
+		if ((frag=main_get_frag(qm, hash, size))==0)
+			return 0; /* not found, bad! */
+#endif
+	}
+
+found:
+	/* we found it!*/
+#ifdef DBG_F_MALLOC
+	frag->file=file;
+	frag->func=func;
+	frag->line=line;
+	frag->check=ST_CHECK_PATTERN;
+	MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size,
+		(char*)frag+sizeof(struct sfm_frag));
+#endif
+	FRAG_MARK_USED(frag); /* mark it as used */
+	return (char*)frag+sizeof(struct sfm_frag);
+}
+
+
+
+#ifdef DBG_F_MALLOC
+void sfm_free(struct sfm_block* qm, void* p, const char* file,
+				const char* func, unsigned int line)
+#else
+void sfm_free(struct sfm_block* qm, void* p)
+#endif
+{
+	struct sfm_frag* f;
+
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func,
+				line);
+	if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
+		LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
+				"aborting\n", p);
+		abort();
+	}
+#endif
+	if (unlikely(p==0)) {
+		LOG(L_WARN, "WARNING: sfm_free: free(0) called\n");
+		return;
+	}
+	f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n",
+			f->file, f->func, f->line);
+#endif
+#ifdef DBG_F_MALLOC
+	f->file=file;
+	f->func=func;
+	f->line=line;
+#endif
+	sfm_insert_free(qm, f, 0);
+}
+
+
+#ifdef DBG_F_MALLOC
+void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size,
+					const char* file, const char* func, unsigned int line)
+#else
+void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
+#endif
+{
+	struct sfm_frag *f;
+	unsigned long orig_size;
+	void *ptr;
+#ifndef SFM_REALLOC_REMALLOC
+	struct sfm_frag *n;
+	struct sfm_frag **pf;
+	unsigned long diff;
+	unsigned long p_id;
+	int hash;
+	unsigned long n_size;
+	struct sfm_pool * pool;
+#endif
+	
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
+			file, func, line);
+	if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
+		LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
+				"aborting\n", p);
+		abort();
+	}
+#endif
+	if (size==0) {
+		if (p)
+#ifdef DBG_F_MALLOC
+			sfm_free(qm, p, file, func, line);
+#else
+			sfm_free(qm, p);
+#endif
+		return 0;
+	}
+	if (p==0)
+#ifdef DBG_F_MALLOC
+		return sfm_malloc(qm, size, file, func, line);
+#else
+		return sfm_malloc(qm, size);
+#endif
+	f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
+			f, f->file, f->func, f->line);
+#endif
+	size=ROUNDUP(size);
+	orig_size=f->size;
+	if (f->size > size){
+		/* shrink */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
+		sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
+#else
+		sfm_split_frag(qm, f, size);
+#endif
+	}else if (f->size<size){
+		/* grow */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
+#endif
+#ifndef SFM_REALLOC_REMALLOC
+		diff=size-f->size;
+		n=FRAG_NEXT(f);
+		if (((char*)n < (char*)qm->last_frag) && 
+				(n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
+			/* join  */
+			/* detach n from the free list */
+try_again:
+			p_id=n->id;
+			n_size=n->size;
+			if ((unlikely(p_id >=SFM_POOLS_NO))){
+				hash=GET_HASH(n_size);
+				SFM_MAIN_HASH_LOCK(qm, hash);
+				if (unlikely((n->u.nxt_free==0) ||
+							((n->size+FRAG_OVERHEAD)<diff))){ 
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					goto not_found;
+				}
+				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
+					/* fragment still free, but changed, either 
+					 * moved to another pool or has a diff. size */
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					goto try_again;
+				}
+				pf=&(qm->free_hash[hash].first);
+				/* find it */
+				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
+				if (*pf==0){
+					SFM_MAIN_HASH_UNLOCK(qm, hash);
+					/* not found, bad! */
+					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
+							    "free " "list (hash=%d)\n", n, hash);
+					/* somebody is in the process of changing it ? */
+					goto not_found;
+				}
+				/* detach */
+				*pf=n->u.nxt_free;
+				n->u.nxt_free=0; /* mark it immediately as detached */
+				qm->free_hash[hash].no--;
+				SFM_MAIN_HASH_UNLOCK(qm, hash);
+				/* join */
+				f->size+=n->size+FRAG_OVERHEAD;
+				/* split it if necessary */
+				if (f->size > size){
+			#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, f, size, file, "fragm. from "
+									"sfm_realloc", line);
+			#else
+					sfm_split_frag(qm, f, size);
+			#endif
+				}
+			}else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
+				hash=GET_SMALL_HASH(n_size);
+				pool=&qm->pool[p_id];
+				SFM_POOL_LOCK(pool, hash);
+				if (unlikely((n->u.nxt_free==0) ||
+							((n->size+FRAG_OVERHEAD)<diff))){
+					SFM_POOL_UNLOCK(pool, hash);
+					goto not_found;
+				}
+				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
+					/* fragment still free, but changed, either 
+					 * moved to another pool or has a diff. size */
+					SFM_POOL_UNLOCK(pool, hash);
+					goto try_again;
+				}
+				pf=&(pool->pool_hash[hash].first);
+				/* find it */
+				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
+				if (*pf==0){
+					SFM_POOL_UNLOCK(pool, hash);
+					/* not found, bad! */
+					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
+							    "free " "list (hash=%d)\n", n, hash);
+					/* somebody is in the process of changing it ? */
+					goto not_found;
+				}
+				/* detach */
+				*pf=n->u.nxt_free;
+				n->u.nxt_free=0; /* mark it immediately as detached */
+				pool->pool_hash[hash].no--;
+				SFM_POOL_UNLOCK(pool, hash);
+				/* join */
+				f->size+=n->size+FRAG_OVERHEAD;
+				/* split it if necessary */
+				if (f->size > size){
+			#ifdef DBG_F_MALLOC
+					sfm_split_frag(qm, f, size, file, "fragm. from "
+									"sfm_realloc", line);
+			#else
+					sfm_split_frag(qm, f, size);
+			#endif
+				}
+			}
+		}else{
+not_found:
+			/* could not join => realloc */
+#else/* SFM_REALLOC_REMALLOC */ 
+		{
+#endif /* SFM_REALLOC_REMALLOC */
+	#ifdef DBG_F_MALLOC
+			ptr=sfm_malloc(qm, size, file, func, line);
+	#else
+			ptr=sfm_malloc(qm, size);
+	#endif
+			if (ptr){
+				/* copy, need by libssl */
+				memcpy(ptr, p, orig_size);
+	#ifdef DBG_F_MALLOC
+				sfm_free(qm, p, file, func, line);
+	#else
+				sfm_free(qm, p);
+	#endif
+			}
+			p=ptr;
+		}
+	}else{
+		/* do nothing */
+#ifdef DBG_F_MALLOC
+		MDBG("sfm_realloc: doing nothing, same size: %lu - %lu\n", 
+				f->size, size);
+#endif
+	}
+#ifdef DBG_F_MALLOC
+	MDBG("sfm_realloc: returning %p\n", p);
+#endif
+	return p;
+}
+
+
+
+void sfm_status(struct sfm_block* qm)
+{
+	struct sfm_frag* f;
+	int i,j;
+	int h;
+	int unused;
+	unsigned long size;
+	int k;
+
+	LOG(memlog, "sfm_status (%p):\n", qm);
+	if (!qm) return;
+
+	LOG(memlog, " heap size= %ld\n", qm->size);
+	LOG(memlog, "dumping free list:\n");
+	for(h=0,i=0,size=0;h<=sfm_max_hash;h++){
+		SFM_MAIN_HASH_LOCK(qm, h);
+		unused=0;
+		for (f=qm->free_hash[h].first,j=0; f;
+				size+=f->size,f=f->u.nxt_free,i++,j++){
+			if (!FRAG_WAS_USED(f)){
+				unused++;
+#ifdef DBG_F_MALLOC
+				LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
+							" address %p size %lu, created from %s: %s(%ld)\n",
+						    h, f, (char*)f+sizeof(struct sfm_frag), f->size,
+							f->file, f->func, f->line);
+#endif
+			};
+		}
+		if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
+							" bucket size: %9lu - %9lu (first %9lu)\n",
+							h, j, unused, UN_HASH(h),
+						((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2)* UN_HASH(h),
+							qm->free_hash[h].first->size
+				);
+		if (j!=qm->free_hash[h].no){
+			LOG(L_CRIT, "BUG: sfm_status: different free frag. count: %d!=%ld"
+					" for hash %3d\n", j, qm->free_hash[h].no, h);
+		}
+		SFM_MAIN_HASH_UNLOCK(qm, h);
+	}
+	for (k=0; k<SFM_POOLS_NO; k++){
+		for(h=0;h<SF_HASH_POOL_SIZE;h++){
+			SFM_POOL_LOCK(&qm->pool[k], h);
+			unused=0;
+			for (f=qm->pool[k].pool_hash[h].first,j=0; f;
+					size+=f->size,f=f->u.nxt_free,i++,j++){
+				if (!FRAG_WAS_USED(f)){
+					unused++;
+#ifdef DBG_F_MALLOC
+					LOG(memlog, "[%2d] unused fragm.: hash = %3d, fragment %p,"
+								" address %p size %lu, created from %s: "
+								"%s(%ld)\n", k
+								h, f, (char*)f+sizeof(struct sfm_frag),
+								f->size, f->file, f->func, f->line);
+#endif
+				};
+			}
+			if (j) LOG(memlog, "[%2d] hash = %3d fragments no.: %5d, unused: "
+								"%5d\n\t\t bucket size: %9lu - %9lu "
+								"(first %9lu)\n",
+								k, h, j, unused, UN_HASH(h),
+							((h<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO)?1:2) *
+								UN_HASH(h),
+								qm->pool[k].pool_hash[h].first->size
+					);
+			if (j!=qm->pool[k].pool_hash[h].no){
+				LOG(L_CRIT, "BUG: sfm_status: [%d] different free frag."
+							" count: %d!=%ld for hash %3d\n",
+							k, j, qm->pool[k].pool_hash[h].no, h);
+			}
+			SFM_POOL_UNLOCK(&qm->pool[k], h);
+		}
+	}
+	LOG(memlog, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
+	LOG(memlog, "-----------------------------\n");
+}
+
+
+
+/* fills a malloc info structure with info about the block
+ * if a parameter is not supported, it will be filled with 0 */
+void sfm_info(struct sfm_block* qm, struct mem_info* info)
+{
+	int r, k;
+	unsigned long total_frags;
+	struct sfm_frag* f;
+	
+	memset(info,0, sizeof(*info));
+	total_frags=0;
+	info->total_size=qm->size;
+	info->min_frag=SF_MIN_FRAG_SIZE;
+	/* we'll have to compute it all */
+	for (r=0; r<=SF_MALLOC_OPTIMIZE/SF_ROUNDTO; r++){
+		info->free+=qm->free_hash[r].no*UN_HASH(r);
+		total_frags+=qm->free_hash[r].no;
+	}
+	for(;r<=sfm_max_hash; r++){
+		total_frags+=qm->free_hash[r].no;
+		SFM_MAIN_HASH_LOCK(qm, r);
+		for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
+			info->free+=f->size;
+		}
+		SFM_MAIN_HASH_UNLOCK(qm, r);
+	}
+	for (k=0; k<SFM_POOLS_NO; k++){
+		for (r=0; r<SF_HASH_POOL_SIZE; r++){
+			info->free+=qm->pool[k].pool_hash[r].no*UN_HASH(r);
+			total_frags+=qm->pool[k].pool_hash[r].no;
+		}
+	}
+	info->real_used=info->total_size-info->free;
+	info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD
+				-FRAG_OVERHEAD;
+	info->max_used=0; /* we don't really know */
+	info->total_frags=total_frags;
+}
+
+
+
+/* returns how much free memory is available
+ * on error (not compiled with bookkeeping code) returns (unsigned long)(-1) */
+unsigned long sfm_available(struct sfm_block* qm)
+{
+	/* we don't know how much free memory we have and it's to expensive
+	 * to compute it */
+	return ((unsigned long)-1);
+}
+
+#endif

+ 177 - 0
mem/sf_malloc.h

@@ -0,0 +1,177 @@
+/* $Id$
+ *
+ * shared memory, multi-process safe, pool based version of f_malloc
+ *
+ * Copyright (C) 2007 iptelorg GmbH
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * History:
+ * --------
+ *  2003-05-21  on sparc64 roundto 8 even in debugging mode (so malloc'ed
+ *               long longs will be 64 bit aligned) (andrei)
+ *  2004-07-19  support for 64 bit (2^64 mem. block) and more info
+ *               for the future de-fragmentation support (andrei)
+ *  2004-11-10  support for > 4Gb mem., switched to long (andrei)
+ *  2007-06-09  forked from the f_malloc code (andrei)
+ */
+
+
+#if !defined(sf_malloc_h)  
+#define sf_malloc_h
+
+
+#include "meminfo.h"
+
+#include "../lock_ops.h"
+#include "../atomic_ops.h"
+#include "../compiler_opt.h"
+/* defs*/
+
+
+#ifdef GEN_LOCK_T_UNLIMITED
+#define SFM_LOCK_PER_BUCKET
+#else
+#define SFM_ONE_LOCK
+#endif
+
+#ifdef DBG_SF_MALLOC
+#if defined(__CPU_sparc64) || defined(__CPU_sparc)
+/* tricky, on sun in 32 bits mode long long must be 64 bits aligned
+ * but long can be 32 bits aligned => malloc should return long long
+ * aligned memory */
+	#define SF_ROUNDTO	sizeof(long long)
+#else
+	#define SF_ROUNDTO	sizeof(void*) /* size we round to, must be = 2^n, and
+                      sizeof(sfm_frag) must be multiple of SF_ROUNDTO !*/
+#endif
+#else /* DBG_SF_MALLOC */
+	#define SF_ROUNDTO 8UL
+#endif
+#define SF_MIN_FRAG_SIZE	SF_ROUNDTO
+
+#define SFM_POOLS_NO 4U /* the more the better, but higher initial
+                            mem. consumption */
+
+#define SF_MALLOC_OPTIMIZE_FACTOR 14UL /*used below */
+#define SF_MALLOC_OPTIMIZE  (1UL<<SF_MALLOC_OPTIMIZE_FACTOR)
+								/* size to optimize for,
+									(most allocs <= this size),
+									must be 2^k */
+
+#define SF_HASH_POOL_SIZE	(SF_MALLOC_OPTIMIZE/SF_ROUNDTO + 1)
+#define SF_POOL_MAX_SIZE	SF_MALLOC_OPTIMIZE
+
+#define SF_HASH_SIZE (SF_MALLOC_OPTIMIZE/SF_ROUNDTO + \
+		(sizeof(long)*8-SF_MALLOC_OPTIMIZE_FACTOR)+1)
+
+/* hash structure:
+ * 0 .... SF_MALLOC_OPTIMIZE/SF_ROUNDTO  - small buckets, size increases with
+ *                            SF_ROUNDTO from bucket to bucket
+ * +1 .... end -  size = 2^k, big buckets */
+
+struct sfm_frag{
+	union{
+		struct sfm_frag* nxt_free;
+		long reserved;
+	}u;
+	unsigned long size;
+	unsigned long id; /* TODO better optimize the size */
+	/* pad to SF_ROUNDTO multiple */
+	char _pad[((3*sizeof(long)+SF_ROUNDTO-1)&~(SF_ROUNDTO-1))-3*sizeof(long)];
+#ifdef DBG_SF_MALLOC
+	const char* file;
+	const char* func;
+	unsigned long line;
+	unsigned long check;
+#endif
+};
+
+struct sfm_frag_lnk{
+	struct sfm_frag* first;
+#ifdef SFM_LOCK_PER_BUCKET
+	gen_lock_t lock;
+#endif
+	unsigned long no;
+};
+
+struct sfm_pool_head{
+	struct sfm_frag* first;
+#ifdef SFM_LOCK_PER_BUCKET
+	gen_lock_t lock;
+#endif
+	unsigned long no;
+	unsigned long misses;
+};
+
+struct sfm_pool{
+#ifdef SFM_ONE_LOCK
+	gen_lock_t lock;
+#endif
+	unsigned long missed;
+	unsigned long hits; /* debugging only TODO: remove */
+	unsigned long bitmap;
+	struct sfm_pool_head pool_hash[SF_HASH_POOL_SIZE];
+};
+
+struct sfm_block{
+#ifdef SFM_ONE_LOCK
+	gen_lock_t lock;
+#endif
+	atomic_t crt_id; /* current pool */
+	unsigned long size; /* total size */
+	/* stats are kept now per bucket */
+	struct sfm_frag* first_frag;
+	struct sfm_frag* last_frag;
+	unsigned long bitmap; /* only up to SF_MALLOC_OPTIMIZE */
+	struct sfm_frag_lnk free_hash[SF_HASH_SIZE];
+	struct sfm_pool pool[SFM_POOLS_NO];
+	int is_init;
+	gen_lock_t get_and_split;
+	char _pad[256];
+};
+
+
+
+struct sfm_block* sfm_malloc_init(char* address, unsigned long size);
+void sfm_malloc_destroy(struct sfm_block* qm);
+int sfm_pool_reset();
+
+#ifdef DBG_SF_MALLOC
+void* sfm_malloc(struct sfm_block*, unsigned long size,
+					const char* file, const char* func, unsigned int line);
+#else
+void* sfm_malloc(struct sfm_block*, unsigned long size);
+#endif
+
+#ifdef DBG_SF_MALLOC
+void  sfm_free(struct sfm_block*, void* p, const char* file, const char* func, 
+				unsigned int line);
+#else
+void  sfm_free(struct sfm_block*, void* p);
+#endif
+
+#ifdef DBG_SF_MALLOC
+void*  sfm_realloc(struct sfm_block*, void* p, unsigned long size, 
+					const char* file, const char* func, unsigned int line);
+#else
+void*  sfm_realloc(struct sfm_block*, void* p, unsigned long size);
+#endif
+
+void  sfm_status(struct sfm_block*);
+void  sfm_info(struct sfm_block*, struct mem_info*);
+
+unsigned long sfm_available(struct sfm_block*);
+
+#endif

+ 16 - 1
mem/shm_mem.c

@@ -32,6 +32,7 @@
  *               (andrei)
  *               (andrei)
  *  2004-07-27  ANON mmap support, needed on darwin (andrei)
  *  2004-07-27  ANON mmap support, needed on darwin (andrei)
  *  2004-09-19  shm_mem_destroy: destroy first the lock & then unmap (andrei)
  *  2004-09-19  shm_mem_destroy: destroy first the lock & then unmap (andrei)
+ *  2007-06-10   support for sfm_malloc & shm_malloc_destroy() (andrei)
  */
  */
 
 
 
 
@@ -60,10 +61,16 @@
 static int shm_shmid=-1; /*shared memory id*/
 static int shm_shmid=-1; /*shared memory id*/
 #endif
 #endif
 
 
+#ifndef SHM_SAFE_MALLOC
 gen_lock_t* mem_lock=0;
 gen_lock_t* mem_lock=0;
+#endif
 
 
 static void* shm_mempool=(void*)-1;
 static void* shm_mempool=(void*)-1;
-#ifdef VQ_MALLOC
+#ifdef LL_MALLOC
+	struct sfm_block* shm_block;
+#elif SF_MALLOC
+	struct sfm_block* shm_block;
+#elif VQ_MALLOC
 	struct vqm_block* shm_block;
 	struct vqm_block* shm_block;
 #elif F_MALLOC
 #elif F_MALLOC
 	struct fm_block* shm_block;
 	struct fm_block* shm_block;
@@ -194,6 +201,7 @@ int shm_mem_init_mallocs(void* mempool, unsigned long pool_size)
 		shm_mem_destroy();
 		shm_mem_destroy();
 		return -1;
 		return -1;
 	}
 	}
+#ifndef SHM_SAFE_MALLOC
 	mem_lock=shm_malloc_unsafe(sizeof(gen_lock_t)); /* skip lock_alloc, 
 	mem_lock=shm_malloc_unsafe(sizeof(gen_lock_t)); /* skip lock_alloc, 
 													   race cond*/
 													   race cond*/
 	if (mem_lock==0){
 	if (mem_lock==0){
@@ -206,6 +214,7 @@ int shm_mem_init_mallocs(void* mempool, unsigned long pool_size)
 		shm_mem_destroy();
 		shm_mem_destroy();
 		return -1;
 		return -1;
 	}
 	}
+#endif  /*SHM SAFE_MALLOC */
 	
 	
 	DBG("shm_mem_init: success\n");
 	DBG("shm_mem_init: success\n");
 	
 	
@@ -232,10 +241,16 @@ void shm_mem_destroy()
 #endif
 #endif
 	
 	
 	DBG("shm_mem_destroy\n");
 	DBG("shm_mem_destroy\n");
+#ifndef SHM_SAFE_MALLOC
 	if (mem_lock){
 	if (mem_lock){
 		DBG("destroying the shared memory lock\n");
 		DBG("destroying the shared memory lock\n");
 		lock_destroy(mem_lock); /* we don't need to dealloc it*/
 		lock_destroy(mem_lock); /* we don't need to dealloc it*/
 	}
 	}
+#endif  /*SHM SAFE_MALLOC */
+	if (shm_block){
+		shm_malloc_destroy(shm_block);
+		shm_block=0;
+	}
 	if (shm_mempool && (shm_mempool!=(void*)-1)) {
 	if (shm_mempool && (shm_mempool!=(void*)-1)) {
 #ifdef SHM_MMAP
 #ifdef SHM_MMAP
 		munmap(shm_mempool, /* SHM_MEM_SIZE */ shm_mem_size );
 		munmap(shm_mempool, /* SHM_MEM_SIZE */ shm_mem_size );

+ 74 - 7
mem/shm_mem.h

@@ -33,6 +33,7 @@
  *               realloc causes terrible fragmentation  (andrei)
  *               realloc causes terrible fragmentation  (andrei)
  *  2005-03-02   added shm_info() & re-eneabled locking on shm_status (andrei)
  *  2005-03-02   added shm_info() & re-eneabled locking on shm_status (andrei)
  *  2007-02-23   added shm_available() (andrei)
  *  2007-02-23   added shm_available() (andrei)
+ *  2007-06-10   support for sf_malloc (andrei)
  */
  */
 
 
 
 
@@ -74,13 +75,67 @@
 #include "../dprint.h"
 #include "../dprint.h"
 #include "../lock_ops.h" /* we don't include locking.h on purpose */
 #include "../lock_ops.h" /* we don't include locking.h on purpose */
 
 
-#ifdef VQ_MALLOC
+#ifdef LL_MALLOC
+#	include "ll_malloc.h"
+#	define SHM_SAFE_MALLOC /* no need to lock */
+	extern struct sfm_block* shm_block;
+#ifdef __SUNPRO_C
+#	define shm_malloc(...) sfm_malloc(shm_block, __VA_ARGS__)
+#	define shm_free(...) sfm_free(shm_block, __VA_ARGS__)
+#	define shm_realloc(...) sfm_malloc(shm_block, __VA_ARGS__)
+	/* WARNING: test, especially if switched to real realloc */
+#	define shm_resize(...)	sfm_realloc(shm_block, __VA_ARGS__)
+#	define shm_info(...) sfm_info(shm_block, __VA_ARGS__)
+#else /* __SUNPRO_C */
+#	define shm_malloc(args...) sfm_malloc(shm_block, ## args)
+#	define shm_free(args...) sfm_free(shm_block, ## args)
+#	define shm_realloc(args...) sfm_malloc(shm_block, ## args)
+	/* WARNING: test, especially if switched to real realloc */
+#	define shm_resize(args...)	sfm_realloc(shm_block, ## args)
+#	define shm_info(args...) sfm_info(shm_block, ## args)
+#endif /* __SUNPRO_C */
+#	define shm_malloc_unsafe  shm_malloc
+#	define shm_free_unsafe shm_free
+#	define shm_available	sfm_available(shm_block)
+#	define shm_status() sfm_status(shm_block)
+#	define shm_malloc_init sfm_malloc_init
+#	define shm_malloc_destroy(b) sfm_malloc_destroy(b)
+#	define shm_malloc_on_fork()	sfm_pool_reset()
+#elif SF_MALLOC
+#	include "sf_malloc.h"
+#	define SHM_SAFE_MALLOC /* no need to lock */
+	extern struct sfm_block* shm_block;
+#ifdef __SUNPRO_C
+#	define shm_malloc(...) sfm_malloc(shm_block, __VA_ARGS__)
+#	define shm_free(...) sfm_free(shm_block, __VA_ARGS__)
+#	define shm_realloc(...) sfm_malloc(shm_block, __VA_ARGS__)
+	/* WARNING: test, especially if switched to real realloc */
+#	define shm_resize(...)	sfm_realloc(shm_block, __VA_ARGS__)
+#	define shm_info(...) sfm_info(shm_block, __VA_ARGS__)
+#else /* __SUNPRO_C */
+#	define shm_malloc(args...) sfm_malloc(shm_block, ## args)
+#	define shm_free(args...) sfm_free(shm_block, ## args)
+#	define shm_realloc(args...) sfm_malloc(shm_block, ## args)
+	/* WARNING: test, especially if switched to real realloc */
+#	define shm_resize(args...)	sfm_realloc(shm_block, ## args)
+#	define shm_info(args...) sfm_info(shm_block, ## args)
+#endif /* __SUNPRO_C */
+#	define shm_malloc_unsafe  shm_malloc
+#	define shm_free_unsafe shm_free
+#	define shm_available	sfm_available(shm_block)
+#	define shm_status() sfm_status(shm_block)
+#	define shm_malloc_init sfm_malloc_init
+#	define shm_malloc_destroy(b) sfm_malloc_destroy(b)
+#	define shm_malloc_on_fork()	sfm_pool_reset()
+#elif VQ_MALLOC
 #	include "vq_malloc.h"
 #	include "vq_malloc.h"
 	extern struct vqm_block* shm_block;
 	extern struct vqm_block* shm_block;
 #	define MY_MALLOC vqm_malloc
 #	define MY_MALLOC vqm_malloc
 #	define MY_FREE vqm_free
 #	define MY_FREE vqm_free
 #	define MY_STATUS vqm_status
 #	define MY_STATUS vqm_status
 #	define  shm_malloc_init vqm_malloc_init
 #	define  shm_malloc_init vqm_malloc_init
+#	define shm_malloc_destroy(b) do{}while(0)
+#	define shm_malloc_on_fork() do{}while(0)
 #	warn "no proper vq_realloc implementation, try another memory allocator"
 #	warn "no proper vq_realloc implementation, try another memory allocator"
 #elif defined F_MALLOC
 #elif defined F_MALLOC
 #	include "f_malloc.h"
 #	include "f_malloc.h"
@@ -91,7 +146,9 @@
 #	define MY_STATUS fm_status
 #	define MY_STATUS fm_status
 #	define MY_MEMINFO	fm_info
 #	define MY_MEMINFO	fm_info
 #	define  shm_malloc_init fm_malloc_init
 #	define  shm_malloc_init fm_malloc_init
+#	define shm_malloc_destroy(b) do{}while(0)
 #	define shm_available() fm_available(shm_block)
 #	define shm_available() fm_available(shm_block)
+#	define shm_malloc_on_fork() do{}while(0)
 #elif defined DL_MALLOC
 #elif defined DL_MALLOC
 #	include "dl_malloc.h"
 #	include "dl_malloc.h"
 	extern mspace shm_block;
 	extern mspace shm_block;
@@ -101,6 +158,8 @@
 #	define MY_STATUS(...) 0
 #	define MY_STATUS(...) 0
 #	define MY_MEMINFO	mspace_info
 #	define MY_MEMINFO	mspace_info
 #	define  shm_malloc_init(buf, len) create_mspace_with_base(buf, len, 0)
 #	define  shm_malloc_init(buf, len) create_mspace_with_base(buf, len, 0)
+#	define shm_malloc_destroy(b) do{}while(0)
+#	define shm_malloc_on_fork() do{}while(0)
 #else
 #else
 #	include "q_malloc.h"
 #	include "q_malloc.h"
 	extern struct qm_block* shm_block;
 	extern struct qm_block* shm_block;
@@ -110,11 +169,14 @@
 #	define MY_STATUS qm_status
 #	define MY_STATUS qm_status
 #	define MY_MEMINFO	qm_info
 #	define MY_MEMINFO	qm_info
 #	define  shm_malloc_init qm_malloc_init
 #	define  shm_malloc_init qm_malloc_init
+#	define shm_malloc_destroy(b) do{}while(0)
 #	define shm_available() qm_available(shm_block)
 #	define shm_available() qm_available(shm_block)
+#	define shm_malloc_on_fork() do{}while(0)
 #endif
 #endif
 
 
-	
+#ifndef SHM_SAFE_MALLOC
 	extern gen_lock_t* mem_lock;
 	extern gen_lock_t* mem_lock;
+#endif
 
 
 
 
 int shm_mem_init(int); /* calls shm_getmem & shm_mem_init_mallocs */
 int shm_mem_init(int); /* calls shm_getmem & shm_mem_init_mallocs */
@@ -126,17 +188,21 @@ void shm_mem_destroy();
 
 
 
 
 
 
+#ifdef SHM_SAFE_MALLOC
+#define shm_lock() do{}while(0)
+#define shm_unlock() do{}while(0)
+
+#else /* ! SHM_SAFE_MALLOC */
+
 #define shm_lock()    lock_get(mem_lock)
 #define shm_lock()    lock_get(mem_lock)
 #define shm_unlock()  lock_release(mem_lock)
 #define shm_unlock()  lock_release(mem_lock)
 
 
-
 #ifdef DBG_QM_MALLOC
 #ifdef DBG_QM_MALLOC
 
 
 #ifdef __SUNPRO_C
 #ifdef __SUNPRO_C
 		#define __FUNCTION__ ""  /* gcc specific */
 		#define __FUNCTION__ ""  /* gcc specific */
 #endif
 #endif
 
 
-
 #define shm_malloc_unsafe(_size ) \
 #define shm_malloc_unsafe(_size ) \
 	MY_MALLOC(shm_block, (_size), __FILE__, __FUNCTION__, __LINE__ )
 	MY_MALLOC(shm_block, (_size), __FILE__, __FUNCTION__, __LINE__ )
 
 
@@ -234,7 +300,7 @@ void* _shm_resize(void* ptr, unsigned int size);
 /*#define shm_resize(_p, _s) shm_realloc( (_p), (_s))*/
 /*#define shm_resize(_p, _s) shm_realloc( (_p), (_s))*/
 
 
 
 
-#endif
+#endif  /* DBG_QM_MALLOC */
 
 
 
 
 #define shm_status() \
 #define shm_status() \
@@ -252,8 +318,9 @@ do{\
 	shm_unlock(); \
 	shm_unlock(); \
 }while(0)
 }while(0)
 
 
+#endif /* ! SHM_SAFE_MALLOC */
 
 
-#endif
+#endif /* shm_mem_h */
 
 
-#endif
+#endif /* SHM_MEM */
 
 

+ 2 - 0
pt.c

@@ -197,6 +197,7 @@ int fork_process(int child_id, char *desc, int make_sock)
 		process_no=child_process_no;
 		process_no=child_process_no;
 		srand(new_seed1);
 		srand(new_seed1);
 		srandom(new_seed2+time(0));
 		srandom(new_seed2+time(0));
+		shm_malloc_on_fork();
 #ifdef PROFILING
 #ifdef PROFILING
 		monstartup((u_long) &_start, (u_long) &etext);
 		monstartup((u_long) &_start, (u_long) &etext);
 #endif
 #endif
@@ -330,6 +331,7 @@ int fork_tcp_process(int child_id, char *desc, int r, int *reader_fd_1)
 		process_no=child_process_no;
 		process_no=child_process_no;
 		srand(new_seed1);
 		srand(new_seed1);
 		srandom(new_seed2+time(0));
 		srandom(new_seed2+time(0));
+		shm_malloc_on_fork();
 #ifdef PROFILING
 #ifdef PROFILING
 		monstartup((u_long) &_start, (u_long) &etext);
 		monstartup((u_long) &_start, (u_long) &etext);
 #endif
 #endif

+ 30 - 2
version.h

@@ -143,6 +143,18 @@
 #define DL_MALLOC_STR ""
 #define DL_MALLOC_STR ""
 #endif
 #endif
 
 
+#ifdef SF_MALLOC
+#define SF_MALLOC_STR ", SF_MALLOC"
+#else
+#define SF_MALLOC_STR ""
+#endif
+
+#ifdef LL_MALLOC
+#define LL_MALLOC_STR ", LL_MALLOC"
+#else
+#define LL_MALLOC_STR ""
+#endif
+
 #ifdef USE_SHM_MEM
 #ifdef USE_SHM_MEM
 #define USE_SHM_MEM_STR ", USE_SHM_MEM"
 #define USE_SHM_MEM_STR ", USE_SHM_MEM"
 #else
 #else
@@ -167,6 +179,18 @@
 #define DEBUG_DMALLOC_STR ""
 #define DEBUG_DMALLOC_STR ""
 #endif
 #endif
 
 
+#ifdef DBG_SF_MALLOC
+#define DBG_SF_MALLOC_STR ", DBG_SF_MALLOC"
+#else
+#define DBG_SF_MALLOC_STR ""
+#endif
+
+#ifdef DBG_LL_MALLOC
+#define DBG_LL_MALLOC_STR ", DBG_SF_MALLOC"
+#else
+#define DBG_LL_MALLOC_STR ""
+#endif
+
 #ifdef TIMER_DEBUG
 #ifdef TIMER_DEBUG
 #define TIMER_DEBUG_STR ", TIMER_DEBUG"
 #define TIMER_DEBUG_STR ", TIMER_DEBUG"
 #else
 #else
@@ -259,8 +283,12 @@
 	CORE_TLS_STR TLS_HOOKS_STR \
 	CORE_TLS_STR TLS_HOOKS_STR \
 	USE_STUN_STR DISABLE_NAGLE_STR USE_MCAST_STR NO_DEBUG_STR NO_LOG_STR \
 	USE_STUN_STR DISABLE_NAGLE_STR USE_MCAST_STR NO_DEBUG_STR NO_LOG_STR \
 	NO_SIG_DEBUG_STR DNS_IP_HACK_STR  SHM_MEM_STR SHM_MMAP_STR PKG_MALLOC_STR \
 	NO_SIG_DEBUG_STR DNS_IP_HACK_STR  SHM_MEM_STR SHM_MMAP_STR PKG_MALLOC_STR \
-	VQ_MALLOC_STR F_MALLOC_STR DL_MALLOC_STR USE_SHM_MEM_STR DBG_QM_MALLOC_STR \
-	DBG_F_MALLOC_STR DEBUG_DMALLOC_STR TIMER_DEBUG_STR USE_FUTEX_STR \
+	VQ_MALLOC_STR F_MALLOC_STR DL_MALLOC_STR SF_MALLOC_STR  LL_MALLOC_STR \
+	USE_SHM_MEM_STR \
+	DBG_QM_MALLOC_STR \
+	DBG_F_MALLOC_STR DEBUG_DMALLOC_STR DBG_SF_MALLOC_STR DBG_LL_MALLOC_STR \
+	TIMER_DEBUG_STR \
+	USE_FUTEX_STR \
 	FAST_LOCK_STR NOSMP_STR USE_PTHREAD_MUTEX_STR USE_POSIX_SEM_STR \
 	FAST_LOCK_STR NOSMP_STR USE_PTHREAD_MUTEX_STR USE_POSIX_SEM_STR \
 	USE_SYSV_SEM_STR USE_COMP_STR USE_DNS_CACHE_STR USE_DNS_FAILOVER_STR \
 	USE_SYSV_SEM_STR USE_COMP_STR USE_DNS_CACHE_STR USE_DNS_FAILOVER_STR \
 	USE_DST_BLACKLIST_STR
 	USE_DST_BLACKLIST_STR