Browse Source

simplify names

flashmob 6 years ago
parent
commit
fd6bce0c13
8 changed files with 87 additions and 87 deletions
  1. 19 19
      chunk/buffer.go
  2. 1 1
      chunk/decoder.go
  3. 5 5
      chunk/processor.go
  4. 7 7
      chunk/reader.go
  5. 7 7
      chunk/s_chunksaver_test.go
  6. 7 7
      chunk/store.go
  7. 26 26
      chunk/store_memory.go
  8. 15 15
      chunk/store_sql.go

+ 19 - 19
chunk/buffer.go

@@ -12,13 +12,13 @@ import (
 
 
 type flushEvent func() error
 type flushEvent func() error
 
 
-type chunkedBytesBuffer struct {
+type chunkingBuffer struct {
 	buf          []byte
 	buf          []byte
 	flushTrigger flushEvent
 	flushTrigger flushEvent
 }
 }
 
 
 // Flush signals that it's time to write the buffer out to storage
 // Flush signals that it's time to write the buffer out to storage
-func (c *chunkedBytesBuffer) Flush() error {
+func (c *chunkingBuffer) Flush() error {
 	if len(c.buf) == 0 {
 	if len(c.buf) == 0 {
 		return nil
 		return nil
 	}
 	}
@@ -33,13 +33,13 @@ func (c *chunkedBytesBuffer) Flush() error {
 }
 }
 
 
 // Reset sets the length back to 0, making it re-usable
 // Reset sets the length back to 0, making it re-usable
-func (c *chunkedBytesBuffer) Reset() {
+func (c *chunkingBuffer) Reset() {
 	c.buf = c.buf[:0] // set the length back to 0
 	c.buf = c.buf[:0] // set the length back to 0
 }
 }
 
 
 // Write takes a p slice of bytes and writes it to the buffer.
 // Write takes a p slice of bytes and writes it to the buffer.
 // It will never grow the buffer, flushing it as soon as it's full.
 // It will never grow the buffer, flushing it as soon as it's full.
-func (c *chunkedBytesBuffer) Write(p []byte) (i int, err error) {
+func (c *chunkingBuffer) Write(p []byte) (i int, err error) {
 	remaining := len(p)
 	remaining := len(p)
 	bufCap := cap(c.buf)
 	bufCap := cap(c.buf)
 	for {
 	for {
@@ -66,25 +66,25 @@ func (c *chunkedBytesBuffer) Write(p []byte) (i int, err error) {
 }
 }
 
 
 // CapTo caps the internal buffer to specified number of bytes, sets the length back to 0
 // CapTo caps the internal buffer to specified number of bytes, sets the length back to 0
-func (c *chunkedBytesBuffer) CapTo(n int) {
+func (c *chunkingBuffer) CapTo(n int) {
 	if cap(c.buf) == n {
 	if cap(c.buf) == n {
 		return
 		return
 	}
 	}
 	c.buf = make([]byte, 0, n)
 	c.buf = make([]byte, 0, n)
 }
 }
 
 
-// ChunkedBytesBufferMime decorates chunkedBytesBuffer, specifying that to do when a flush event is triggered
-type ChunkedBytesBufferMime struct {
-	chunkedBytesBuffer
+// ChunkingBufferMime decorates chunkingBuffer, specifying that to do when a flush event is triggered
+type ChunkingBufferMime struct {
+	chunkingBuffer
 	current  *mime.Part
 	current  *mime.Part
 	Info     PartsInfo
 	Info     PartsInfo
 	md5      hash.Hash
 	md5      hash.Hash
-	database ChunkSaverStorage
+	database Storage
 }
 }
 
 
-func NewChunkedBytesBufferMime() *ChunkedBytesBufferMime {
-	b := new(ChunkedBytesBufferMime)
-	b.chunkedBytesBuffer.flushTrigger = func() error {
+func NewChunkedBytesBufferMime() *ChunkingBufferMime {
+	b := new(ChunkingBufferMime)
+	b.chunkingBuffer.flushTrigger = func() error {
 		return b.onFlush()
 		return b.onFlush()
 	}
 	}
 	b.md5 = md5.New()
 	b.md5 = md5.New()
@@ -92,14 +92,14 @@ func NewChunkedBytesBufferMime() *ChunkedBytesBufferMime {
 	return b
 	return b
 }
 }
 
 
-func (b *ChunkedBytesBufferMime) SetDatabase(database ChunkSaverStorage) {
+func (b *ChunkingBufferMime) SetDatabase(database Storage) {
 	b.database = database
 	b.database = database
 }
 }
 
 
 // onFlush is called whenever the flush event fires.
 // onFlush is called whenever the flush event fires.
 // - It saves the chunk to disk and adds the chunk's hash to the list.
 // - It saves the chunk to disk and adds the chunk's hash to the list.
 // - It builds the b.Info.Parts structure
 // - It builds the b.Info.Parts structure
-func (b *ChunkedBytesBufferMime) onFlush() error {
+func (b *ChunkingBufferMime) onFlush() error {
 	b.md5.Write(b.buf)
 	b.md5.Write(b.buf)
 	var chash HashKey
 	var chash HashKey
 	copy(chash[:], b.md5.Sum([]byte{}))
 	copy(chash[:], b.md5.Sum([]byte{}))
@@ -130,7 +130,7 @@ func (b *ChunkedBytesBufferMime) onFlush() error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *ChunkedBytesBufferMime) fillInfo(cp *ChunkedPart, index int) {
+func (b *ChunkingBufferMime) fillInfo(cp *ChunkedPart, index int) {
 	if cp.ContentType == "" && b.current.ContentType != nil {
 	if cp.ContentType == "" && b.current.ContentType != nil {
 		cp.ContentType = b.current.ContentType.String()
 		cp.ContentType = b.current.ContentType.String()
 	}
 	}
@@ -155,13 +155,13 @@ func (b *ChunkedBytesBufferMime) fillInfo(cp *ChunkedPart, index int) {
 	}
 	}
 }
 }
 
 
-// Reset decorates the Reset method of the chunkedBytesBuffer
-func (b *ChunkedBytesBufferMime) Reset() {
+// Reset decorates the Reset method of the chunkingBuffer
+func (b *ChunkingBufferMime) Reset() {
 	b.md5.Reset()
 	b.md5.Reset()
-	b.chunkedBytesBuffer.Reset()
+	b.chunkingBuffer.Reset()
 }
 }
 
 
-func (b *ChunkedBytesBufferMime) CurrentPart(cp *mime.Part) {
+func (b *ChunkingBufferMime) CurrentPart(cp *mime.Part) {
 	if b.current == nil {
 	if b.current == nil {
 		b.Info = *NewPartsInfo()
 		b.Info = *NewPartsInfo()
 		b.Info.Parts = make([]ChunkedPart, 0, 3)
 		b.Info.Parts = make([]ChunkedPart, 0, 3)

+ 1 - 1
chunk/decoder.go

@@ -22,7 +22,7 @@ type chunkPartDecoder struct {
 	r io.Reader
 	r io.Reader
 }
 }
 
 
-// db ChunkSaverStorage, email *ChunkSaverEmail, part int)
+// db Storage, email *Email, part int)
 /*
 /*
 
 
 r, err := NewChunkMailReader(db, email, part)
 r, err := NewChunkMailReader(db, email, part)

+ 5 - 5
chunk/processor.go

@@ -63,9 +63,9 @@ func Chunksaver() *backends.StreamDecorator {
 		func(sp backends.StreamProcessor, a ...interface{}) backends.StreamProcessor {
 		func(sp backends.StreamProcessor, a ...interface{}) backends.StreamProcessor {
 			var (
 			var (
 				envelope    *mail.Envelope
 				envelope    *mail.Envelope
-				chunkBuffer *ChunkedBytesBufferMime
+				chunkBuffer *ChunkingBufferMime
 				msgPos      uint
 				msgPos      uint
-				database    ChunkSaverStorage
+				database    Storage
 				written     int64
 				written     int64
 
 
 				// just some headers from the first mime-part
 				// just some headers from the first mime-part
@@ -79,10 +79,10 @@ func Chunksaver() *backends.StreamDecorator {
 			var config *ChunkSaverConfig
 			var config *ChunkSaverConfig
 			// optional dependency injection
 			// optional dependency injection
 			for i := range a {
 			for i := range a {
-				if db, ok := a[i].(ChunkSaverStorage); ok {
+				if db, ok := a[i].(Storage); ok {
 					database = db
 					database = db
 				}
 				}
-				if buff, ok := a[i].(*ChunkedBytesBufferMime); ok {
+				if buff, ok := a[i].(*ChunkingBufferMime); ok {
 					chunkBuffer = buff
 					chunkBuffer = buff
 				}
 				}
 			}
 			}
@@ -101,7 +101,7 @@ func Chunksaver() *backends.StreamDecorator {
 				// configure storage if none was injected
 				// configure storage if none was injected
 				if database == nil {
 				if database == nil {
 					if config.StorageEngine == "memory" {
 					if config.StorageEngine == "memory" {
-						db := new(ChunkSaverMemory)
+						db := new(StoreMemory)
 						db.CompressLevel = config.CompressLevel
 						db.CompressLevel = config.CompressLevel
 						database = db
 						database = db
 					} else {
 					} else {

+ 7 - 7
chunk/reader.go

@@ -7,8 +7,8 @@ import (
 )
 )
 
 
 type chunkMailReader struct {
 type chunkMailReader struct {
-	db    ChunkSaverStorage
-	email *ChunkSaverEmail
+	db    Storage
+	email *Email
 	// part requests a part. If 0, all the parts are read sequentially
 	// part requests a part. If 0, all the parts are read sequentially
 	part int
 	part int
 	i, j int
 	i, j int
@@ -18,7 +18,7 @@ type chunkMailReader struct {
 
 
 // NewChunkMailReader loads the email and selects which mime-part Read will read, starting from 1
 // NewChunkMailReader loads the email and selects which mime-part Read will read, starting from 1
 // if part is 0, Read will read in the entire message. 1 selects the first part, 2 2nd, and so on..
 // if part is 0, Read will read in the entire message. 1 selects the first part, 2 2nd, and so on..
-func NewChunkMailReader(db ChunkSaverStorage, email *ChunkSaverEmail, part int) (*chunkMailReader, error) {
+func NewChunkMailReader(db Storage, email *Email, part int) (*chunkMailReader, error) {
 	r := new(chunkMailReader)
 	r := new(chunkMailReader)
 	r.db = db
 	r.db = db
 	r.part = part
 	r.part = part
@@ -50,9 +50,9 @@ func (r *chunkMailReader) SeekPart(part int) error {
 }
 }
 
 
 type cachedChunks struct {
 type cachedChunks struct {
-	chunks    []*ChunkSaverChunk
+	chunks    []*Chunk
 	hashIndex map[int]HashKey
 	hashIndex map[int]HashKey
-	db        ChunkSaverStorage
+	db        Storage
 }
 }
 
 
 const chunkCachePreload = 2
 const chunkCachePreload = 2
@@ -64,7 +64,7 @@ func (c *cachedChunks) warm(hashes ...HashKey) (int, error) {
 		c.hashIndex = make(map[int]HashKey, len(hashes))
 		c.hashIndex = make(map[int]HashKey, len(hashes))
 	}
 	}
 	if c.chunks == nil {
 	if c.chunks == nil {
-		c.chunks = make([]*ChunkSaverChunk, 0, 100)
+		c.chunks = make([]*Chunk, 0, 100)
 	}
 	}
 	if len(c.chunks) > 0 {
 	if len(c.chunks) > 0 {
 		// already been filled
 		// already been filled
@@ -93,7 +93,7 @@ func (c *cachedChunks) warm(hashes ...HashKey) (int, error) {
 
 
 // get returns a chunk. If the chunk doesn't exist, it gets it and pre-loads the next few
 // get returns a chunk. If the chunk doesn't exist, it gets it and pre-loads the next few
 // also removes the previous chunks that now have become stale
 // also removes the previous chunks that now have become stale
-func (c *cachedChunks) get(i int) (*ChunkSaverChunk, error) {
+func (c *cachedChunks) get(i int) (*Chunk, error) {
 	if i > len(c.chunks) {
 	if i > len(c.chunks) {
 		return nil, errors.New("not enough chunks")
 		return nil, errors.New("not enough chunks")
 	}
 	}

+ 7 - 7
chunk/s_chunksaver_test.go

@@ -13,7 +13,7 @@ import (
 func TestChunkedBytesBuffer(t *testing.T) {
 func TestChunkedBytesBuffer(t *testing.T) {
 	var in string
 	var in string
 
 
-	var buf chunkedBytesBuffer
+	var buf chunkingBuffer
 	buf.CapTo(64)
 	buf.CapTo(64)
 
 
 	// the data to write is over-aligned
 	// the data to write is over-aligned
@@ -24,7 +24,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 	}
 
 
 	// the data to write is aligned
 	// the data to write is aligned
-	var buf2 chunkedBytesBuffer
+	var buf2 chunkingBuffer
 	buf2.CapTo(64)
 	buf2.CapTo(64)
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789abcd` // len == 128
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789abcd` // len == 128
 	i, _ = buf2.Write([]byte(in[:]))
 	i, _ = buf2.Write([]byte(in[:]))
@@ -33,7 +33,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 	}
 
 
 	// the data to write is under-aligned
 	// the data to write is under-aligned
-	var buf3 chunkedBytesBuffer
+	var buf3 chunkingBuffer
 	buf3.CapTo(64)
 	buf3.CapTo(64)
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789ab` // len == 126
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789ab` // len == 126
 	i, _ = buf3.Write([]byte(in[:]))
 	i, _ = buf3.Write([]byte(in[:]))
@@ -42,7 +42,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 	}
 
 
 	// the data to write is smaller than the buffer
 	// the data to write is smaller than the buffer
-	var buf4 chunkedBytesBuffer
+	var buf4 chunkingBuffer
 	buf4.CapTo(64)
 	buf4.CapTo(64)
 	in = `1234567890` // len == 10
 	in = `1234567890` // len == 10
 	i, _ = buf4.Write([]byte(in[:]))
 	i, _ = buf4.Write([]byte(in[:]))
@@ -52,7 +52,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 
 
 	// what if the buffer already contains stuff before Write is called
 	// what if the buffer already contains stuff before Write is called
 	// and the buffer len is smaller than the len of the slice of bytes we pass it?
 	// and the buffer len is smaller than the len of the slice of bytes we pass it?
-	var buf5 chunkedBytesBuffer
+	var buf5 chunkingBuffer
 	buf5.CapTo(5)
 	buf5.CapTo(5)
 	buf5.buf = append(buf5.buf, []byte{'a', 'b', 'c'}...)
 	buf5.buf = append(buf5.buf, []byte{'a', 'b', 'c'}...)
 	in = `1234567890` // len == 10
 	in = `1234567890` // len == 10
@@ -346,7 +346,7 @@ func TestChunkSaverWrite(t *testing.T) {
 	e.RcptTo = append(e.RcptTo, to)
 	e.RcptTo = append(e.RcptTo, to)
 	e.MailFrom, _ = mail.NewAddress("[email protected]")
 	e.MailFrom, _ = mail.NewAddress("[email protected]")
 
 
-	store := new(ChunkSaverMemory)
+	store := new(StoreMemory)
 	chunkBuffer := NewChunkedBytesBufferMime()
 	chunkBuffer := NewChunkedBytesBufferMime()
 	//chunkBuffer.setDatabase(store)
 	//chunkBuffer.setDatabase(store)
 	// instantiate the chunk saver
 	// instantiate the chunk saver
@@ -357,7 +357,7 @@ func TestChunkSaverWrite(t *testing.T) {
 	// and chain it with mimeanalyzer.
 	// and chain it with mimeanalyzer.
 	// Call order: mimeanalyzer -> chunksaver -> default (terminator)
 	// Call order: mimeanalyzer -> chunksaver -> default (terminator)
 	// This will also set our Open, Close and Initialize functions
 	// This will also set our Open, Close and Initialize functions
-	// we also inject a ChunkSaverStorage and a ChunkedBytesBufferMime
+	// we also inject a Storage and a ChunkingBufferMime
 
 
 	stream := mimeanalyzer.Decorate(chunksaver.Decorate(backends.DefaultStreamProcessor{}, store, chunkBuffer))
 	stream := mimeanalyzer.Decorate(chunksaver.Decorate(backends.DefaultStreamProcessor{}, store, chunkBuffer))
 
 

+ 7 - 7
chunk/store.go

@@ -7,8 +7,8 @@ import (
 	"time"
 	"time"
 )
 )
 
 
-// ChunkSaverStorage defines an interface to the storage layer (the database)
-type ChunkSaverStorage interface {
+// Storage defines an interface to the storage layer (the database)
+type Storage interface {
 	// OpenMessage is used to begin saving an email. An email id is returned and used to call CloseMessage later
 	// OpenMessage is used to begin saving an email. An email id is returned and used to call CloseMessage later
 	OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error)
 	OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error)
 	// CloseMessage finalizes the writing of an email. Additional data collected while parsing the email is saved
 	// CloseMessage finalizes the writing of an email. Additional data collected while parsing the email is saved
@@ -16,17 +16,17 @@ type ChunkSaverStorage interface {
 	// AddChunk saves a chunk of bytes to a given hash key
 	// AddChunk saves a chunk of bytes to a given hash key
 	AddChunk(data []byte, hash []byte) error
 	AddChunk(data []byte, hash []byte) error
 	// GetEmail returns an email that's been saved
 	// GetEmail returns an email that's been saved
-	GetEmail(mailID uint64) (*ChunkSaverEmail, error)
+	GetEmail(mailID uint64) (*Email, error)
 	// GetChunks loads in the specified chunks of bytes from storage
 	// GetChunks loads in the specified chunks of bytes from storage
-	GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error)
+	GetChunks(hash ...HashKey) ([]*Chunk, error)
 	// Initialize is called when the backend is started
 	// Initialize is called when the backend is started
 	Initialize(cfg backends.BackendConfig) error
 	Initialize(cfg backends.BackendConfig) error
 	// Shutdown is called when the backend gets shutdown.
 	// Shutdown is called when the backend gets shutdown.
 	Shutdown() (err error)
 	Shutdown() (err error)
 }
 }
 
 
-// ChunkSaverEmail represents an email
-type ChunkSaverEmail struct {
+// Email represents an email
+type Email struct {
 	mailID     uint64
 	mailID     uint64
 	createdAt  time.Time
 	createdAt  time.Time
 	size       int64
 	size       int64
@@ -43,7 +43,7 @@ type ChunkSaverEmail struct {
 	isTLS      bool       // isTLS is true when TLS was used to connect
 	isTLS      bool       // isTLS is true when TLS was used to connect
 }
 }
 
 
-type ChunkSaverChunk struct {
+type Chunk struct {
 	modifiedAt     time.Time
 	modifiedAt     time.Time
 	referenceCount uint // referenceCount counts how many emails reference this chunk
 	referenceCount uint // referenceCount counts how many emails reference this chunk
 	data           io.Reader
 	data           io.Reader

+ 26 - 26
chunk/store_memory.go

@@ -9,15 +9,15 @@ import (
 	"time"
 	"time"
 )
 )
 
 
-type ChunkSaverMemory struct {
-	chunks        map[HashKey]*chunkSaverMemoryChunk
-	emails        []*chunkSaverMemoryEmail
+type StoreMemory struct {
+	chunks        map[HashKey]*memoryChunk
+	emails        []*memoryEmail
 	nextID        uint64
 	nextID        uint64
 	IDOffset      uint64
 	IDOffset      uint64
 	CompressLevel int
 	CompressLevel int
 }
 }
 
 
-type chunkSaverMemoryEmail struct {
+type memoryEmail struct {
 	mailID     uint64
 	mailID     uint64
 	createdAt  time.Time
 	createdAt  time.Time
 	size       int64
 	size       int64
@@ -34,21 +34,21 @@ type chunkSaverMemoryEmail struct {
 	isTLS      bool
 	isTLS      bool
 }
 }
 
 
-type chunkSaverMemoryChunk struct {
+type memoryChunk struct {
 	modifiedAt     time.Time
 	modifiedAt     time.Time
 	referenceCount uint
 	referenceCount uint
 	data           []byte
 	data           []byte
 }
 }
 
 
-// OpenMessage implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
+// OpenMessage implements the Storage interface
+func (m *StoreMemory) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
 	var ip4, ip6 net.IPAddr
 	var ip4, ip6 net.IPAddr
 	if ip := ipAddress.IP.To4(); ip != nil {
 	if ip := ipAddress.IP.To4(); ip != nil {
 		ip4 = ipAddress
 		ip4 = ipAddress
 	} else {
 	} else {
 		ip6 = ipAddress
 		ip6 = ipAddress
 	}
 	}
-	email := chunkSaverMemoryEmail{
+	email := memoryEmail{
 		mailID:     m.nextID,
 		mailID:     m.nextID,
 		createdAt:  time.Now(),
 		createdAt:  time.Now(),
 		from:       from,
 		from:       from,
@@ -64,8 +64,8 @@ func (m *ChunkSaverMemory) OpenMessage(from string, helo string, recipient strin
 	return email.mailID, nil
 	return email.mailID, nil
 }
 }
 
 
-// CloseMessage implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
+// CloseMessage implements the Storage interface
+func (m *StoreMemory) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
 	if email := m.emails[mailID-m.IDOffset]; email == nil {
 	if email := m.emails[mailID-m.IDOffset]; email == nil {
 		return errors.New("email not found")
 		return errors.New("email not found")
 	} else {
 	} else {
@@ -84,8 +84,8 @@ func (m *ChunkSaverMemory) CloseMessage(mailID uint64, size int64, partsInfo *Pa
 	return nil
 	return nil
 }
 }
 
 
-// AddChunk implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
+// AddChunk implements the Storage interface
+func (m *StoreMemory) AddChunk(data []byte, hash []byte) error {
 	var key HashKey
 	var key HashKey
 	if len(hash) != hashByteSize {
 	if len(hash) != hashByteSize {
 		return errors.New("invalid hash")
 		return errors.New("invalid hash")
@@ -108,7 +108,7 @@ func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
 			return err
 			return err
 		}
 		}
 		// add a new chunk
 		// add a new chunk
-		newChunk := chunkSaverMemoryChunk{
+		newChunk := memoryChunk{
 			modifiedAt:     time.Now(),
 			modifiedAt:     time.Now(),
 			referenceCount: 1,
 			referenceCount: 1,
 			data:           compressed.Bytes(),
 			data:           compressed.Bytes(),
@@ -118,25 +118,25 @@ func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
 	return nil
 	return nil
 }
 }
 
 
-// Initialize implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) Initialize(cfg backends.BackendConfig) error {
+// Initialize implements the Storage interface
+func (m *StoreMemory) Initialize(cfg backends.BackendConfig) error {
 	m.IDOffset = 1
 	m.IDOffset = 1
 	m.nextID = m.IDOffset
 	m.nextID = m.IDOffset
-	m.emails = make([]*chunkSaverMemoryEmail, 0, 100)
-	m.chunks = make(map[HashKey]*chunkSaverMemoryChunk, 1000)
+	m.emails = make([]*memoryEmail, 0, 100)
+	m.chunks = make(map[HashKey]*memoryChunk, 1000)
 	m.CompressLevel = zlib.NoCompression
 	m.CompressLevel = zlib.NoCompression
 	return nil
 	return nil
 }
 }
 
 
-// Shutdown implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) Shutdown() (err error) {
+// Shutdown implements the Storage interface
+func (m *StoreMemory) Shutdown() (err error) {
 	m.emails = nil
 	m.emails = nil
 	m.chunks = nil
 	m.chunks = nil
 	return nil
 	return nil
 }
 }
 
 
-// GetEmail implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
+// GetEmail implements the Storage interface
+func (m *StoreMemory) GetEmail(mailID uint64) (*Email, error) {
 	if size := uint64(len(m.emails)) - m.IDOffset; size > mailID-m.IDOffset {
 	if size := uint64(len(m.emails)) - m.IDOffset; size > mailID-m.IDOffset {
 		return nil, errors.New("mail not found")
 		return nil, errors.New("mail not found")
 	}
 	}
@@ -145,7 +145,7 @@ func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
 	if err := pi.UnmarshalJSONZlib(email.partsInfo); err != nil {
 	if err := pi.UnmarshalJSONZlib(email.partsInfo); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return &ChunkSaverEmail{
+	return &Email{
 		mailID:     email.mailID,
 		mailID:     email.mailID,
 		createdAt:  email.createdAt,
 		createdAt:  email.createdAt,
 		size:       email.size,
 		size:       email.size,
@@ -163,9 +163,9 @@ func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
 	}, nil
 	}, nil
 }
 }
 
 
-// GetChunk implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error) {
-	result := make([]*ChunkSaverChunk, 0, len(hash))
+// GetChunk implements the Storage interface
+func (m *StoreMemory) GetChunks(hash ...HashKey) ([]*Chunk, error) {
+	result := make([]*Chunk, 0, len(hash))
 	var key HashKey
 	var key HashKey
 	for i := range hash {
 	for i := range hash {
 		key = hash[i]
 		key = hash[i]
@@ -174,7 +174,7 @@ func (m *ChunkSaverMemory) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
-			result = append(result, &ChunkSaverChunk{
+			result = append(result, &Chunk{
 				modifiedAt:     c.modifiedAt,
 				modifiedAt:     c.modifiedAt,
 				referenceCount: c.referenceCount,
 				referenceCount: c.referenceCount,
 				data:           zwr,
 				data:           zwr,

+ 15 - 15
chunk/store_sql.go

@@ -8,7 +8,7 @@ import (
 	"net"
 	"net"
 )
 )
 
 
-type chunkSaverSQLConfig struct {
+type sqlConfig struct {
 	EmailTable  string `json:"chunksaver_email_table,omitempty"`
 	EmailTable  string `json:"chunksaver_email_table,omitempty"`
 	ChunkTable  string `json:"chunksaver_chunk_table,omitempty"`
 	ChunkTable  string `json:"chunksaver_chunk_table,omitempty"`
 	Driver      string `json:"chunksaver_sql_driver,omitempty"`
 	Driver      string `json:"chunksaver_sql_driver,omitempty"`
@@ -16,9 +16,9 @@ type chunkSaverSQLConfig struct {
 	PrimaryHost string `json:"chunksaver_primary_mail_host,omitempty"`
 	PrimaryHost string `json:"chunksaver_primary_mail_host,omitempty"`
 }
 }
 
 
-// ChunkSaverSQL implements the ChunkSaverStorage interface
+// ChunkSaverSQL implements the Storage interface
 type ChunkSaverSQL struct {
 type ChunkSaverSQL struct {
-	config     *chunkSaverSQLConfig
+	config     *sqlConfig
 	statements map[string]*sql.Stmt
 	statements map[string]*sql.Stmt
 	db         *sql.DB
 	db         *sql.DB
 }
 }
@@ -120,7 +120,7 @@ func (c *ChunkSaverSQL) prepareSql() error {
 	return nil
 	return nil
 }
 }
 
 
-// OpenMessage implements the ChunkSaverStorage interface
+// OpenMessage implements the Storage interface
 func (c *ChunkSaverSQL) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
 func (c *ChunkSaverSQL) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
 
 
 	// if it's ipv4 then we want ipv6 to be 0, and vice-versa
 	// if it's ipv4 then we want ipv6 to be 0, and vice-versa
@@ -142,7 +142,7 @@ func (c *ChunkSaverSQL) OpenMessage(from string, helo string, recipient string,
 	return uint64(id), err
 	return uint64(id), err
 }
 }
 
 
-// AddChunk implements the ChunkSaverStorage interface
+// AddChunk implements the Storage interface
 func (c *ChunkSaverSQL) AddChunk(data []byte, hash []byte) error {
 func (c *ChunkSaverSQL) AddChunk(data []byte, hash []byte) error {
 	// attempt to increment the reference_count (it means the chunk is already in there)
 	// attempt to increment the reference_count (it means the chunk is already in there)
 	r, err := c.statements["chunkReferenceIncr"].Exec(hash)
 	r, err := c.statements["chunkReferenceIncr"].Exec(hash)
@@ -163,7 +163,7 @@ func (c *ChunkSaverSQL) AddChunk(data []byte, hash []byte) error {
 	return nil
 	return nil
 }
 }
 
 
-// CloseMessage implements the ChunkSaverStorage interface
+// CloseMessage implements the Storage interface
 func (c *ChunkSaverSQL) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
 func (c *ChunkSaverSQL) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
 	partsInfoJson, err := json.Marshal(partsInfo)
 	partsInfoJson, err := json.Marshal(partsInfo)
 	if err != nil {
 	if err != nil {
@@ -178,12 +178,12 @@ func (c *ChunkSaverSQL) CloseMessage(mailID uint64, size int64, partsInfo *Parts
 
 
 // Initialize loads the specific database config, connects to the db, prepares statements
 // Initialize loads the specific database config, connects to the db, prepares statements
 func (c *ChunkSaverSQL) Initialize(cfg backends.BackendConfig) error {
 func (c *ChunkSaverSQL) Initialize(cfg backends.BackendConfig) error {
-	configType := backends.BaseConfig(&chunkSaverSQLConfig{})
+	configType := backends.BaseConfig(&sqlConfig{})
 	bcfg, err := backends.Svc.ExtractConfig(cfg, configType)
 	bcfg, err := backends.Svc.ExtractConfig(cfg, configType)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	c.config = bcfg.(*chunkSaverSQLConfig)
+	c.config = bcfg.(*sqlConfig)
 	c.db, err = c.connect()
 	c.db, err = c.connect()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -195,7 +195,7 @@ func (c *ChunkSaverSQL) Initialize(cfg backends.BackendConfig) error {
 	return nil
 	return nil
 }
 }
 
 
-// Shutdown implements the ChunkSaverStorage interface
+// Shutdown implements the Storage interface
 func (c *ChunkSaverSQL) Shutdown() (err error) {
 func (c *ChunkSaverSQL) Shutdown() (err error) {
 	defer func() {
 	defer func() {
 		closeErr := c.db.Close()
 		closeErr := c.db.Close()
@@ -212,13 +212,13 @@ func (c *ChunkSaverSQL) Shutdown() (err error) {
 	return err
 	return err
 }
 }
 
 
-// GetEmail implements the ChunkSaverStorage interface
-func (c *ChunkSaverSQL) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
-	return &ChunkSaverEmail{}, nil
+// GetEmail implements the Storage interface
+func (c *ChunkSaverSQL) GetEmail(mailID uint64) (*Email, error) {
+	return &Email{}, nil
 }
 }
 
 
-// GetChunk implements the ChunkSaverStorage interface
-func (c *ChunkSaverSQL) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error) {
-	result := make([]*ChunkSaverChunk, 0, len(hash))
+// GetChunk implements the Storage interface
+func (c *ChunkSaverSQL) GetChunks(hash ...HashKey) ([]*Chunk, error) {
+	result := make([]*Chunk, 0, len(hash))
 	return result, nil
 	return result, nil
 }
 }