Browse Source

simplify names

flashmob 6 years ago
parent
commit
fd6bce0c13
8 changed files with 87 additions and 87 deletions
  1. 19 19
      chunk/buffer.go
  2. 1 1
      chunk/decoder.go
  3. 5 5
      chunk/processor.go
  4. 7 7
      chunk/reader.go
  5. 7 7
      chunk/s_chunksaver_test.go
  6. 7 7
      chunk/store.go
  7. 26 26
      chunk/store_memory.go
  8. 15 15
      chunk/store_sql.go

+ 19 - 19
chunk/buffer.go

@@ -12,13 +12,13 @@ import (
 
 type flushEvent func() error
 
-type chunkedBytesBuffer struct {
+type chunkingBuffer struct {
 	buf          []byte
 	flushTrigger flushEvent
 }
 
 // Flush signals that it's time to write the buffer out to storage
-func (c *chunkedBytesBuffer) Flush() error {
+func (c *chunkingBuffer) Flush() error {
 	if len(c.buf) == 0 {
 		return nil
 	}
@@ -33,13 +33,13 @@ func (c *chunkedBytesBuffer) Flush() error {
 }
 
 // Reset sets the length back to 0, making it re-usable
-func (c *chunkedBytesBuffer) Reset() {
+func (c *chunkingBuffer) Reset() {
 	c.buf = c.buf[:0] // set the length back to 0
 }
 
 // Write takes a p slice of bytes and writes it to the buffer.
 // It will never grow the buffer, flushing it as soon as it's full.
-func (c *chunkedBytesBuffer) Write(p []byte) (i int, err error) {
+func (c *chunkingBuffer) Write(p []byte) (i int, err error) {
 	remaining := len(p)
 	bufCap := cap(c.buf)
 	for {
@@ -66,25 +66,25 @@ func (c *chunkedBytesBuffer) Write(p []byte) (i int, err error) {
 }
 
 // CapTo caps the internal buffer to specified number of bytes, sets the length back to 0
-func (c *chunkedBytesBuffer) CapTo(n int) {
+func (c *chunkingBuffer) CapTo(n int) {
 	if cap(c.buf) == n {
 		return
 	}
 	c.buf = make([]byte, 0, n)
 }
 
-// ChunkedBytesBufferMime decorates chunkedBytesBuffer, specifying that to do when a flush event is triggered
-type ChunkedBytesBufferMime struct {
-	chunkedBytesBuffer
+// ChunkingBufferMime decorates chunkingBuffer, specifying that to do when a flush event is triggered
+type ChunkingBufferMime struct {
+	chunkingBuffer
 	current  *mime.Part
 	Info     PartsInfo
 	md5      hash.Hash
-	database ChunkSaverStorage
+	database Storage
 }
 
-func NewChunkedBytesBufferMime() *ChunkedBytesBufferMime {
-	b := new(ChunkedBytesBufferMime)
-	b.chunkedBytesBuffer.flushTrigger = func() error {
+func NewChunkedBytesBufferMime() *ChunkingBufferMime {
+	b := new(ChunkingBufferMime)
+	b.chunkingBuffer.flushTrigger = func() error {
 		return b.onFlush()
 	}
 	b.md5 = md5.New()
@@ -92,14 +92,14 @@ func NewChunkedBytesBufferMime() *ChunkedBytesBufferMime {
 	return b
 }
 
-func (b *ChunkedBytesBufferMime) SetDatabase(database ChunkSaverStorage) {
+func (b *ChunkingBufferMime) SetDatabase(database Storage) {
 	b.database = database
 }
 
 // onFlush is called whenever the flush event fires.
 // - It saves the chunk to disk and adds the chunk's hash to the list.
 // - It builds the b.Info.Parts structure
-func (b *ChunkedBytesBufferMime) onFlush() error {
+func (b *ChunkingBufferMime) onFlush() error {
 	b.md5.Write(b.buf)
 	var chash HashKey
 	copy(chash[:], b.md5.Sum([]byte{}))
@@ -130,7 +130,7 @@ func (b *ChunkedBytesBufferMime) onFlush() error {
 	return nil
 }
 
-func (b *ChunkedBytesBufferMime) fillInfo(cp *ChunkedPart, index int) {
+func (b *ChunkingBufferMime) fillInfo(cp *ChunkedPart, index int) {
 	if cp.ContentType == "" && b.current.ContentType != nil {
 		cp.ContentType = b.current.ContentType.String()
 	}
@@ -155,13 +155,13 @@ func (b *ChunkedBytesBufferMime) fillInfo(cp *ChunkedPart, index int) {
 	}
 }
 
-// Reset decorates the Reset method of the chunkedBytesBuffer
-func (b *ChunkedBytesBufferMime) Reset() {
+// Reset decorates the Reset method of the chunkingBuffer
+func (b *ChunkingBufferMime) Reset() {
 	b.md5.Reset()
-	b.chunkedBytesBuffer.Reset()
+	b.chunkingBuffer.Reset()
 }
 
-func (b *ChunkedBytesBufferMime) CurrentPart(cp *mime.Part) {
+func (b *ChunkingBufferMime) CurrentPart(cp *mime.Part) {
 	if b.current == nil {
 		b.Info = *NewPartsInfo()
 		b.Info.Parts = make([]ChunkedPart, 0, 3)

+ 1 - 1
chunk/decoder.go

@@ -22,7 +22,7 @@ type chunkPartDecoder struct {
 	r io.Reader
 }
 
-// db ChunkSaverStorage, email *ChunkSaverEmail, part int)
+// db Storage, email *Email, part int)
 /*
 
 r, err := NewChunkMailReader(db, email, part)

+ 5 - 5
chunk/processor.go

@@ -63,9 +63,9 @@ func Chunksaver() *backends.StreamDecorator {
 		func(sp backends.StreamProcessor, a ...interface{}) backends.StreamProcessor {
 			var (
 				envelope    *mail.Envelope
-				chunkBuffer *ChunkedBytesBufferMime
+				chunkBuffer *ChunkingBufferMime
 				msgPos      uint
-				database    ChunkSaverStorage
+				database    Storage
 				written     int64
 
 				// just some headers from the first mime-part
@@ -79,10 +79,10 @@ func Chunksaver() *backends.StreamDecorator {
 			var config *ChunkSaverConfig
 			// optional dependency injection
 			for i := range a {
-				if db, ok := a[i].(ChunkSaverStorage); ok {
+				if db, ok := a[i].(Storage); ok {
 					database = db
 				}
-				if buff, ok := a[i].(*ChunkedBytesBufferMime); ok {
+				if buff, ok := a[i].(*ChunkingBufferMime); ok {
 					chunkBuffer = buff
 				}
 			}
@@ -101,7 +101,7 @@ func Chunksaver() *backends.StreamDecorator {
 				// configure storage if none was injected
 				if database == nil {
 					if config.StorageEngine == "memory" {
-						db := new(ChunkSaverMemory)
+						db := new(StoreMemory)
 						db.CompressLevel = config.CompressLevel
 						database = db
 					} else {

+ 7 - 7
chunk/reader.go

@@ -7,8 +7,8 @@ import (
 )
 
 type chunkMailReader struct {
-	db    ChunkSaverStorage
-	email *ChunkSaverEmail
+	db    Storage
+	email *Email
 	// part requests a part. If 0, all the parts are read sequentially
 	part int
 	i, j int
@@ -18,7 +18,7 @@ type chunkMailReader struct {
 
 // NewChunkMailReader loads the email and selects which mime-part Read will read, starting from 1
 // if part is 0, Read will read in the entire message. 1 selects the first part, 2 2nd, and so on..
-func NewChunkMailReader(db ChunkSaverStorage, email *ChunkSaverEmail, part int) (*chunkMailReader, error) {
+func NewChunkMailReader(db Storage, email *Email, part int) (*chunkMailReader, error) {
 	r := new(chunkMailReader)
 	r.db = db
 	r.part = part
@@ -50,9 +50,9 @@ func (r *chunkMailReader) SeekPart(part int) error {
 }
 
 type cachedChunks struct {
-	chunks    []*ChunkSaverChunk
+	chunks    []*Chunk
 	hashIndex map[int]HashKey
-	db        ChunkSaverStorage
+	db        Storage
 }
 
 const chunkCachePreload = 2
@@ -64,7 +64,7 @@ func (c *cachedChunks) warm(hashes ...HashKey) (int, error) {
 		c.hashIndex = make(map[int]HashKey, len(hashes))
 	}
 	if c.chunks == nil {
-		c.chunks = make([]*ChunkSaverChunk, 0, 100)
+		c.chunks = make([]*Chunk, 0, 100)
 	}
 	if len(c.chunks) > 0 {
 		// already been filled
@@ -93,7 +93,7 @@ func (c *cachedChunks) warm(hashes ...HashKey) (int, error) {
 
 // get returns a chunk. If the chunk doesn't exist, it gets it and pre-loads the next few
 // also removes the previous chunks that now have become stale
-func (c *cachedChunks) get(i int) (*ChunkSaverChunk, error) {
+func (c *cachedChunks) get(i int) (*Chunk, error) {
 	if i > len(c.chunks) {
 		return nil, errors.New("not enough chunks")
 	}

+ 7 - 7
chunk/s_chunksaver_test.go

@@ -13,7 +13,7 @@ import (
 func TestChunkedBytesBuffer(t *testing.T) {
 	var in string
 
-	var buf chunkedBytesBuffer
+	var buf chunkingBuffer
 	buf.CapTo(64)
 
 	// the data to write is over-aligned
@@ -24,7 +24,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 
 	// the data to write is aligned
-	var buf2 chunkedBytesBuffer
+	var buf2 chunkingBuffer
 	buf2.CapTo(64)
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789abcd` // len == 128
 	i, _ = buf2.Write([]byte(in[:]))
@@ -33,7 +33,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 
 	// the data to write is under-aligned
-	var buf3 chunkedBytesBuffer
+	var buf3 chunkingBuffer
 	buf3.CapTo(64)
 	in = `123456789012345678901234567890123456789012345678901234567890abcde12345678901234567890123456789012345678901234567890123456789ab` // len == 126
 	i, _ = buf3.Write([]byte(in[:]))
@@ -42,7 +42,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 
 	// the data to write is smaller than the buffer
-	var buf4 chunkedBytesBuffer
+	var buf4 chunkingBuffer
 	buf4.CapTo(64)
 	in = `1234567890` // len == 10
 	i, _ = buf4.Write([]byte(in[:]))
@@ -52,7 +52,7 @@ func TestChunkedBytesBuffer(t *testing.T) {
 
 	// what if the buffer already contains stuff before Write is called
 	// and the buffer len is smaller than the len of the slice of bytes we pass it?
-	var buf5 chunkedBytesBuffer
+	var buf5 chunkingBuffer
 	buf5.CapTo(5)
 	buf5.buf = append(buf5.buf, []byte{'a', 'b', 'c'}...)
 	in = `1234567890` // len == 10
@@ -346,7 +346,7 @@ func TestChunkSaverWrite(t *testing.T) {
 	e.RcptTo = append(e.RcptTo, to)
 	e.MailFrom, _ = mail.NewAddress("[email protected]")
 
-	store := new(ChunkSaverMemory)
+	store := new(StoreMemory)
 	chunkBuffer := NewChunkedBytesBufferMime()
 	//chunkBuffer.setDatabase(store)
 	// instantiate the chunk saver
@@ -357,7 +357,7 @@ func TestChunkSaverWrite(t *testing.T) {
 	// and chain it with mimeanalyzer.
 	// Call order: mimeanalyzer -> chunksaver -> default (terminator)
 	// This will also set our Open, Close and Initialize functions
-	// we also inject a ChunkSaverStorage and a ChunkedBytesBufferMime
+	// we also inject a Storage and a ChunkingBufferMime
 
 	stream := mimeanalyzer.Decorate(chunksaver.Decorate(backends.DefaultStreamProcessor{}, store, chunkBuffer))
 

+ 7 - 7
chunk/store.go

@@ -7,8 +7,8 @@ import (
 	"time"
 )
 
-// ChunkSaverStorage defines an interface to the storage layer (the database)
-type ChunkSaverStorage interface {
+// Storage defines an interface to the storage layer (the database)
+type Storage interface {
 	// OpenMessage is used to begin saving an email. An email id is returned and used to call CloseMessage later
 	OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error)
 	// CloseMessage finalizes the writing of an email. Additional data collected while parsing the email is saved
@@ -16,17 +16,17 @@ type ChunkSaverStorage interface {
 	// AddChunk saves a chunk of bytes to a given hash key
 	AddChunk(data []byte, hash []byte) error
 	// GetEmail returns an email that's been saved
-	GetEmail(mailID uint64) (*ChunkSaverEmail, error)
+	GetEmail(mailID uint64) (*Email, error)
 	// GetChunks loads in the specified chunks of bytes from storage
-	GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error)
+	GetChunks(hash ...HashKey) ([]*Chunk, error)
 	// Initialize is called when the backend is started
 	Initialize(cfg backends.BackendConfig) error
 	// Shutdown is called when the backend gets shutdown.
 	Shutdown() (err error)
 }
 
-// ChunkSaverEmail represents an email
-type ChunkSaverEmail struct {
+// Email represents an email
+type Email struct {
 	mailID     uint64
 	createdAt  time.Time
 	size       int64
@@ -43,7 +43,7 @@ type ChunkSaverEmail struct {
 	isTLS      bool       // isTLS is true when TLS was used to connect
 }
 
-type ChunkSaverChunk struct {
+type Chunk struct {
 	modifiedAt     time.Time
 	referenceCount uint // referenceCount counts how many emails reference this chunk
 	data           io.Reader

+ 26 - 26
chunk/store_memory.go

@@ -9,15 +9,15 @@ import (
 	"time"
 )
 
-type ChunkSaverMemory struct {
-	chunks        map[HashKey]*chunkSaverMemoryChunk
-	emails        []*chunkSaverMemoryEmail
+type StoreMemory struct {
+	chunks        map[HashKey]*memoryChunk
+	emails        []*memoryEmail
 	nextID        uint64
 	IDOffset      uint64
 	CompressLevel int
 }
 
-type chunkSaverMemoryEmail struct {
+type memoryEmail struct {
 	mailID     uint64
 	createdAt  time.Time
 	size       int64
@@ -34,21 +34,21 @@ type chunkSaverMemoryEmail struct {
 	isTLS      bool
 }
 
-type chunkSaverMemoryChunk struct {
+type memoryChunk struct {
 	modifiedAt     time.Time
 	referenceCount uint
 	data           []byte
 }
 
-// OpenMessage implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
+// OpenMessage implements the Storage interface
+func (m *StoreMemory) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
 	var ip4, ip6 net.IPAddr
 	if ip := ipAddress.IP.To4(); ip != nil {
 		ip4 = ipAddress
 	} else {
 		ip6 = ipAddress
 	}
-	email := chunkSaverMemoryEmail{
+	email := memoryEmail{
 		mailID:     m.nextID,
 		createdAt:  time.Now(),
 		from:       from,
@@ -64,8 +64,8 @@ func (m *ChunkSaverMemory) OpenMessage(from string, helo string, recipient strin
 	return email.mailID, nil
 }
 
-// CloseMessage implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
+// CloseMessage implements the Storage interface
+func (m *StoreMemory) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
 	if email := m.emails[mailID-m.IDOffset]; email == nil {
 		return errors.New("email not found")
 	} else {
@@ -84,8 +84,8 @@ func (m *ChunkSaverMemory) CloseMessage(mailID uint64, size int64, partsInfo *Pa
 	return nil
 }
 
-// AddChunk implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
+// AddChunk implements the Storage interface
+func (m *StoreMemory) AddChunk(data []byte, hash []byte) error {
 	var key HashKey
 	if len(hash) != hashByteSize {
 		return errors.New("invalid hash")
@@ -108,7 +108,7 @@ func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
 			return err
 		}
 		// add a new chunk
-		newChunk := chunkSaverMemoryChunk{
+		newChunk := memoryChunk{
 			modifiedAt:     time.Now(),
 			referenceCount: 1,
 			data:           compressed.Bytes(),
@@ -118,25 +118,25 @@ func (m *ChunkSaverMemory) AddChunk(data []byte, hash []byte) error {
 	return nil
 }
 
-// Initialize implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) Initialize(cfg backends.BackendConfig) error {
+// Initialize implements the Storage interface
+func (m *StoreMemory) Initialize(cfg backends.BackendConfig) error {
 	m.IDOffset = 1
 	m.nextID = m.IDOffset
-	m.emails = make([]*chunkSaverMemoryEmail, 0, 100)
-	m.chunks = make(map[HashKey]*chunkSaverMemoryChunk, 1000)
+	m.emails = make([]*memoryEmail, 0, 100)
+	m.chunks = make(map[HashKey]*memoryChunk, 1000)
 	m.CompressLevel = zlib.NoCompression
 	return nil
 }
 
-// Shutdown implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) Shutdown() (err error) {
+// Shutdown implements the Storage interface
+func (m *StoreMemory) Shutdown() (err error) {
 	m.emails = nil
 	m.chunks = nil
 	return nil
 }
 
-// GetEmail implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
+// GetEmail implements the Storage interface
+func (m *StoreMemory) GetEmail(mailID uint64) (*Email, error) {
 	if size := uint64(len(m.emails)) - m.IDOffset; size > mailID-m.IDOffset {
 		return nil, errors.New("mail not found")
 	}
@@ -145,7 +145,7 @@ func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
 	if err := pi.UnmarshalJSONZlib(email.partsInfo); err != nil {
 		return nil, err
 	}
-	return &ChunkSaverEmail{
+	return &Email{
 		mailID:     email.mailID,
 		createdAt:  email.createdAt,
 		size:       email.size,
@@ -163,9 +163,9 @@ func (m *ChunkSaverMemory) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
 	}, nil
 }
 
-// GetChunk implements the ChunkSaverStorage interface
-func (m *ChunkSaverMemory) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error) {
-	result := make([]*ChunkSaverChunk, 0, len(hash))
+// GetChunk implements the Storage interface
+func (m *StoreMemory) GetChunks(hash ...HashKey) ([]*Chunk, error) {
+	result := make([]*Chunk, 0, len(hash))
 	var key HashKey
 	for i := range hash {
 		key = hash[i]
@@ -174,7 +174,7 @@ func (m *ChunkSaverMemory) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error
 			if err != nil {
 				return nil, err
 			}
-			result = append(result, &ChunkSaverChunk{
+			result = append(result, &Chunk{
 				modifiedAt:     c.modifiedAt,
 				referenceCount: c.referenceCount,
 				data:           zwr,

+ 15 - 15
chunk/store_sql.go

@@ -8,7 +8,7 @@ import (
 	"net"
 )
 
-type chunkSaverSQLConfig struct {
+type sqlConfig struct {
 	EmailTable  string `json:"chunksaver_email_table,omitempty"`
 	ChunkTable  string `json:"chunksaver_chunk_table,omitempty"`
 	Driver      string `json:"chunksaver_sql_driver,omitempty"`
@@ -16,9 +16,9 @@ type chunkSaverSQLConfig struct {
 	PrimaryHost string `json:"chunksaver_primary_mail_host,omitempty"`
 }
 
-// ChunkSaverSQL implements the ChunkSaverStorage interface
+// ChunkSaverSQL implements the Storage interface
 type ChunkSaverSQL struct {
-	config     *chunkSaverSQLConfig
+	config     *sqlConfig
 	statements map[string]*sql.Stmt
 	db         *sql.DB
 }
@@ -120,7 +120,7 @@ func (c *ChunkSaverSQL) prepareSql() error {
 	return nil
 }
 
-// OpenMessage implements the ChunkSaverStorage interface
+// OpenMessage implements the Storage interface
 func (c *ChunkSaverSQL) OpenMessage(from string, helo string, recipient string, ipAddress net.IPAddr, returnPath string, isTLS bool) (mailID uint64, err error) {
 
 	// if it's ipv4 then we want ipv6 to be 0, and vice-versa
@@ -142,7 +142,7 @@ func (c *ChunkSaverSQL) OpenMessage(from string, helo string, recipient string,
 	return uint64(id), err
 }
 
-// AddChunk implements the ChunkSaverStorage interface
+// AddChunk implements the Storage interface
 func (c *ChunkSaverSQL) AddChunk(data []byte, hash []byte) error {
 	// attempt to increment the reference_count (it means the chunk is already in there)
 	r, err := c.statements["chunkReferenceIncr"].Exec(hash)
@@ -163,7 +163,7 @@ func (c *ChunkSaverSQL) AddChunk(data []byte, hash []byte) error {
 	return nil
 }
 
-// CloseMessage implements the ChunkSaverStorage interface
+// CloseMessage implements the Storage interface
 func (c *ChunkSaverSQL) CloseMessage(mailID uint64, size int64, partsInfo *PartsInfo, subject string, deliveryID string, to string, from string) error {
 	partsInfoJson, err := json.Marshal(partsInfo)
 	if err != nil {
@@ -178,12 +178,12 @@ func (c *ChunkSaverSQL) CloseMessage(mailID uint64, size int64, partsInfo *Parts
 
 // Initialize loads the specific database config, connects to the db, prepares statements
 func (c *ChunkSaverSQL) Initialize(cfg backends.BackendConfig) error {
-	configType := backends.BaseConfig(&chunkSaverSQLConfig{})
+	configType := backends.BaseConfig(&sqlConfig{})
 	bcfg, err := backends.Svc.ExtractConfig(cfg, configType)
 	if err != nil {
 		return err
 	}
-	c.config = bcfg.(*chunkSaverSQLConfig)
+	c.config = bcfg.(*sqlConfig)
 	c.db, err = c.connect()
 	if err != nil {
 		return err
@@ -195,7 +195,7 @@ func (c *ChunkSaverSQL) Initialize(cfg backends.BackendConfig) error {
 	return nil
 }
 
-// Shutdown implements the ChunkSaverStorage interface
+// Shutdown implements the Storage interface
 func (c *ChunkSaverSQL) Shutdown() (err error) {
 	defer func() {
 		closeErr := c.db.Close()
@@ -212,13 +212,13 @@ func (c *ChunkSaverSQL) Shutdown() (err error) {
 	return err
 }
 
-// GetEmail implements the ChunkSaverStorage interface
-func (c *ChunkSaverSQL) GetEmail(mailID uint64) (*ChunkSaverEmail, error) {
-	return &ChunkSaverEmail{}, nil
+// GetEmail implements the Storage interface
+func (c *ChunkSaverSQL) GetEmail(mailID uint64) (*Email, error) {
+	return &Email{}, nil
 }
 
-// GetChunk implements the ChunkSaverStorage interface
-func (c *ChunkSaverSQL) GetChunks(hash ...HashKey) ([]*ChunkSaverChunk, error) {
-	result := make([]*ChunkSaverChunk, 0, len(hash))
+// GetChunk implements the Storage interface
+func (c *ChunkSaverSQL) GetChunks(hash ...HashKey) ([]*Chunk, error) {
+	result := make([]*Chunk, 0, len(hash))
 	return result, nil
 }