Browse Source

decoder wip

flashmob 6 years ago
parent
commit
392dbf8c4b
2 changed files with 94 additions and 5 deletions
  1. 68 2
      backends/s_chunksaver.go
  2. 26 3
      backends/s_chunksaver_test.go

+ 68 - 2
backends/s_chunksaver.go

@@ -45,8 +45,8 @@ import (
 type chunkSaverConfig struct {
 	// ChunkMaxBytes controls the maximum buffer size for saving
 	// 16KB default.
-	ChunkMaxBytes int    `json:"chunksaver_chunk_size"`
-	StorageEngine string `json:"chunksaver_storage_engine"`
+	ChunkMaxBytes int    `json:"chunksaver_chunk_size,omitempty"`
+	StorageEngine string `json:"chunksaver_storage_engine,omitempty"`
 	CompressLevel int    `json:"chunksaver_compress_level,omitempty"`
 }
 
@@ -824,6 +824,72 @@ func (r *chunkMailReader) Read(p []byte) (n int, err error) {
 	return n, err
 }
 
+// chunkPartDecoder decodes base64 and q-printable, then converting charset to utf8-8
+type chunkPartDecoder struct {
+	*chunkMailReader
+	buf   []byte
+	state int
+}
+
+func NewChunkPartDecoder(db ChunkSaverStorage, email *ChunkSaverEmail, part int) (*chunkPartDecoder, error) {
+	r, err := NewChunkMailReader(db, email, part)
+	if err != nil {
+		return nil, err
+	}
+	decoder := new(chunkPartDecoder)
+	decoder.chunkMailReader = r
+	return decoder, nil
+}
+
+const chunkSaverNL = '\n'
+
+func (r *chunkPartDecoder) Read(p []byte) (n int, err error) {
+	var part *ChunkedPart
+	//if cap(p) != cap(r.buf) {
+	r.buf = make([]byte, len(p), cap(p))
+	//} else {
+	//	r.buf = r.buf[:0] // length back to 0
+	//}
+	part = &r.email.partsInfo.Parts[r.part]
+	_ = part
+	var offset int
+
+	for {
+		n, err = r.chunkMailReader.Read(r.buf)
+		if n == 0 {
+			return
+		}
+		switch r.state {
+		case 0:
+			// finding the start of the header
+			if i := bytes.IndexByte(r.buf, chunkSaverNL); i != -1 {
+				if i+1 < len(r.buf) {
+					if r.buf[i+1] == chunkSaverNL {
+						r.state = 3 // found the header
+					}
+				}
+				r.state = 1
+			}
+			// a new []byte will be loaded on next iteration
+
+		case 1:
+
+			if i := bytes.Index(r.buf, []byte("\n")); i != -1 {
+				r.state = 2
+			}
+		}
+		//offset++
+		if offset > len(p) {
+			break
+		}
+	}
+
+	return
+
+	//if r.email.partsInfo.Parts[r.part].
+
+}
+
 const chunkMaxBytes = 1024 * 16 // 16Kb is the default, change using chunksaver_chunk_size config setting
 /**
 *

+ 26 - 3
backends/s_chunksaver_test.go

@@ -204,15 +204,38 @@ func TestChunkSaverWrite(t *testing.T) {
 
 		// this should read all parts
 		r, err := NewChunkMailReader(store, email, 0)
-		io.Copy(os.Stdout, r)
+		if w, err := io.Copy(os.Stdout, r); err != nil {
+			t.Error(err)
+		} else if w != email.size {
+			t.Error("email.size != number of bytes copied from reader")
+		}
 
 		// test the seek feature
 		r, err = NewChunkMailReader(store, email, 1)
+		if err != nil {
+			t.Error(err)
+			t.FailNow()
+		}
+		// we start from 1 because if the start from 0, all the parts will be read
 		for i := 1; i < len(email.partsInfo.Parts); i++ {
 			fmt.Println("seeking to", i)
-			r.SeekPart(i)
-			io.Copy(os.Stdout, r)
+			err = r.SeekPart(i)
+			if err != nil {
+				t.Error(err)
+			}
+			w, err := io.Copy(os.Stdout, r)
+			if err != nil {
+				t.Error(err)
+			}
+			if w != int64(email.partsInfo.Parts[i].Size) {
+				t.Error("incorrect size, expecting", email.partsInfo.Parts[i].Size, "but read:", w)
+			}
 		}
 
+		dr, err := NewChunkPartDecoder(store, email, 5)
+		_ = dr
+		var decoded bytes.Buffer
+		io.Copy(&decoded, dr)
+
 	}
 }