Browse Source

milestone: email transforming working

flashmob 5 years ago
parent
commit
276f2e433e
7 changed files with 231 additions and 89 deletions
  1. 92 32
      backends/s_transformer.go
  2. 1 3
      chunk/buffer.go
  3. 5 4
      chunk/chunk.go
  4. 78 20
      chunk/chunk_test.go
  5. 40 23
      chunk/processor.go
  6. 9 5
      chunk/reader.go
  7. 6 2
      mail/mime/mime.go

+ 92 - 32
backends/s_transformer.go

@@ -2,11 +2,11 @@ package backends
 
 import (
 	"bytes"
-	"github.com/flashmob/go-guerrilla/chunk/transfer"
 	"io"
 	"regexp"
 	"sync"
 
+	"github.com/flashmob/go-guerrilla/chunk/transfer"
 	"github.com/flashmob/go-guerrilla/mail"
 	"github.com/flashmob/go-guerrilla/mail/mime"
 )
@@ -75,15 +75,17 @@ func (t *Transform) unswap() {
 
 var regexpCharset = regexp.MustCompile("(?i)charset=\"?(.+)\"?") // (?i) is a flag for case-insensitive
 
-// todo: we may optimize this by looking at t.partsCachedOriginal, implement a Reader for it, re-write the header as we read from it
-
-func (t *Transform) ReWrite(b []byte, last bool) (count int, err error) {
+func (t *Transform) ReWrite(b []byte, last bool, offset uint) (count int, err error) {
 	defer func() {
 		count = len(b)
 	}()
 	if !t.isBody {
+		// Header re-write, how it works
 		// we place the partial header's bytes on a buffer from which we can read one line at a time
-		// then we match and replace the lines we want
+		// then we match and replace the lines we want, output replaced live.
+		// The following re-writes are mde:
+		// - base64 => 8bit
+		// - supported non-utf8 charset => utf8
 		if i, err := io.Copy(&t.buf, bytes.NewReader(b)); err != nil {
 			return int(i), err
 		}
@@ -130,10 +132,18 @@ func (t *Transform) ReWrite(b []byte, last bool) (count int, err error) {
 	} else {
 
 		if ct := t.current.ContentType.Supertype(); ct == "multipart" || ct == "message" {
+			_, err = io.Copy(t.parser, bytes.NewReader(b))
 			return
 		}
 
-		// do body decode here
+		// Body Decode, how it works:
+		// First, the decoder is setup, depending on the source encoding type.
+		// Next, since the decoder is an io.Reader, we need to use a pipe to connect it.
+		// Subsequent calls write to the pipe in a gouritine and the parent-thread copies the result to the output stream
+		// The routine stops feeding the decoder data before EndingPosBody, and not decoding anything after, but still
+		// outputting the un-decoded remainder.
+		// The decoder is destroyed at the end of the body (when last == true)
+
 		t.pr, t.pw = io.Pipe()
 		if t.decoder == nil {
 			t.buf.Reset()
@@ -156,10 +166,22 @@ func (t *Transform) ReWrite(b []byte, last bool) (count int, err error) {
 		wg := sync.WaitGroup{}
 		wg.Add(1)
 
+		// out is the slice that will be decoded
+		var out []byte
+		// remainder will not be decoded. Typically, this contains the boundary maker, and we want to preserve it
+		var remainder []byte
+		if t.current.EndingPosBody > 0 {
+			size := t.current.EndingPosBody - t.current.StartingPosBody - 1 // -1 since we do not want \n
+			out = b[:size]
+			remainder = b[size:]
+		} else {
+			// use the entire slice
+			out = b
+		}
 		go func() {
 			// stream our slice to the pipe
 			defer wg.Done()
-			_, pRrr := io.Copy(t.pw, bytes.NewReader(b))
+			_, pRrr := io.Copy(t.pw, bytes.NewReader(out))
 			if pRrr != nil {
 				_ = t.pw.CloseWithError(err)
 				return
@@ -170,13 +192,24 @@ func (t *Transform) ReWrite(b []byte, last bool) (count int, err error) {
 		var i int64
 		i, err = io.Copy(t.parser, t.decoder)
 		// wait for the pipe to finish
-		_ = i
 		wg.Wait()
 		_ = t.pr.Close()
 
 		if last {
 			t.decoder = nil
 		}
+		count += int(i)
+		if err != nil {
+			return
+		}
+		// flush any remainder
+		if len(remainder) > 0 {
+			i, err = io.Copy(t.parser, bytes.NewReader(remainder))
+			count += int(i)
+			if err != nil {
+				return
+			}
+		}
 	}
 	return count, err
 }
@@ -200,22 +233,32 @@ func Transformer() *StreamDecorator {
 		return nil
 	}))
 
-	var msgPos uint
-	var progress int
+	var (
+		msgPos   uint
+		progress int
+	)
 	reWriter := Transform{}
 
 	sd := &StreamDecorator{}
 	sd.Decorate =
 
 		func(sp StreamProcessor, a ...interface{}) StreamProcessor {
-			var envelope *mail.Envelope
+			var (
+				envelope *mail.Envelope
+				// total is the total number of bytes written
+				total int64
+				// pos tracks the current position of the output slice
+				pos int
+				// written is the number of bytes written out in this call
+				written int
+			)
+
 			if reWriter.sp == nil {
 				reWriter.sp = sp
 			}
 
 			sd.Open = func(e *mail.Envelope) error {
 				envelope = e
-				_ = envelope
 				if reWriter.parser == nil {
 					reWriter.parser = mime.NewMimeParserWriter(sp)
 					reWriter.parser.Open()
@@ -224,51 +267,68 @@ func Transformer() *StreamDecorator {
 				return nil
 			}
 
+			sd.Close = func() error {
+				total = 0
+				return reWriter.parser.Close()
+			}
+
+			end := func(part *mime.Part, offset uint, p []byte, start uint) (int, error) {
+				var err error
+				var count int
+
+				count, err = reWriter.ReWrite(p[pos:start-offset], true, offset)
+
+				written += count
+				if err != nil {
+					return count, err
+				}
+				reWriter.current = part
+				pos += count
+				return count, nil
+			}
+
 			return StreamProcessWith(func(p []byte) (count int, err error) {
-				var total int
+				pos = 0
+				written = 0
 				if parts, ok := envelope.Values["MimeParts"].(*mime.Parts); ok && len(*parts) > 0 {
 
 					// we are going to change envelope.Values["MimeParts"] to our own copy with our own counts
 					envelope.Values["MimeParts"] = reWriter.swap()
-					defer reWriter.unswap()
-					var pos int
+					defer func() {
+						reWriter.unswap()
+						total += int64(written)
+					}()
 
 					offset := msgPos
 					reWriter.current = (*parts)[0]
 					for i := progress; i < len(*parts); i++ {
 						part := (*parts)[i]
-
 						// break chunk on new part
-						if part.StartingPos > 0 && part.StartingPos > msgPos {
-							cbLen := len(part.ContentBoundary) + 3
-							count, err = reWriter.ReWrite(p[pos:part.StartingPos-offset-uint(cbLen)], true)
-
-							total += count
+						if part.StartingPos > 0 && part.StartingPos >= msgPos {
+							count, err = end(part, offset, p, part.StartingPos)
 							if err != nil {
 								break
 							}
-							reWriter.current = part
-							pos += count
 							msgPos = part.StartingPos
 							reWriter.isBody = false
+
 						}
 						// break chunk on header (found the body)
 						if part.StartingPosBody > 0 && part.StartingPosBody >= msgPos {
-							count, err = reWriter.ReWrite(p[pos:part.StartingPosBody-offset], true)
-							total += count
+							count, err = end(part, offset, p, part.StartingPosBody)
 							if err != nil {
 								break
 							}
-							_, _ = reWriter.parser.Write([]byte{'\n'}) // send an end of header to the parser
 							reWriter.isBody = true
-							reWriter.current = part
-							pos += count
-							msgPos = part.StartingPosBody
+							msgPos += uint(count)
+
 						}
+
 						// if on the latest (last) part, and yet there is still data to be written out
 						if len(*parts)-1 == i && len(p)-1 > pos {
-							count, err = reWriter.ReWrite(p[pos:], false)
-							total += count
+							count, err = reWriter.ReWrite(p[pos:], false, offset)
+
+							written += count
 							if err != nil {
 								break
 							}
@@ -286,7 +346,7 @@ func Transformer() *StreamDecorator {
 				}
 				// note that in this case, ReWrite method will output the stream to further processors down the line
 				// here we just return back with the result
-				return total, err
+				return written, err
 			})
 		}
 	return sd

+ 1 - 3
chunk/buffer.go

@@ -3,7 +3,6 @@ package chunk
 import (
 	"crypto/md5"
 	"errors"
-	"fmt"
 	"hash"
 	"strings"
 
@@ -22,7 +21,6 @@ func (c *chunkingBuffer) Flush() error {
 	if len(c.buf) == 0 {
 		return nil
 	}
-	fmt.Print(string(c.buf))
 	if c.flushTrigger != nil {
 		if err := c.flushTrigger(); err != nil {
 			return err
@@ -40,7 +38,7 @@ func (c *chunkingBuffer) Reset() {
 // Write takes a p slice of bytes and writes it to the buffer.
 // It will never grow the buffer, flushing it as soon as it's full.
 func (c *chunkingBuffer) Write(p []byte) (i int, err error) {
-	remaining := len(p)
+	remaining := len(p) // number of bytes remaining to write
 	bufCap := cap(c.buf)
 	for {
 		free := bufCap - len(c.buf)

+ 5 - 4
chunk/chunk.go

@@ -54,9 +54,10 @@ type PartsInfo struct {
 	Parts       []ChunkedPart `json:"p"`   // info describing a mime-part
 	CBoundaries []string      `json:"cbl"` // content boundaries list
 
-	bp sync.Pool // bytes.buffer pool
 }
 
+var bp sync.Pool // bytes.buffer pool
+
 // ChunkedPart contains header information about a mime-part, including keys pointing to where the data is stored at
 type ChunkedPart struct {
 	PartId             string    `json:"i"`
@@ -71,7 +72,7 @@ type ChunkedPart struct {
 
 func NewPartsInfo() *PartsInfo {
 	pi := new(PartsInfo)
-	pi.bp = sync.Pool{
+	bp = sync.Pool{
 		// if not available, then create a new one
 		New: func() interface{} {
 			var b bytes.Buffer
@@ -118,11 +119,11 @@ func (info *PartsInfo) MarshalJSONZlib() ([]byte, error) {
 		return buf, err
 	}
 	// borrow a buffer form the pool
-	compressed := info.bp.Get().(*bytes.Buffer)
+	compressed := bp.Get().(*bytes.Buffer)
 	// put back in the pool
 	defer func() {
 		compressed.Reset()
-		info.bp.Put(compressed)
+		bp.Put(compressed)
 	}()
 
 	zlibw, err := zlib.NewWriterLevel(compressed, 9)

+ 78 - 20
chunk/chunk_test.go

@@ -65,6 +65,76 @@ func TestChunkedBytesBuffer(t *testing.T) {
 	}
 }
 
+var n1 = `From:  Al Gore <[email protected]>
+To:  White House Transportation Coordinator <[email protected]>
+Subject: [Fwd: Map of Argentina with Description]
+MIME-Version: 1.0
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed; s=ncr424; d=reliancegeneral.co.in;
+ h=List-Unsubscribe:MIME-Version:From:To:Reply-To:Date:Subject:Content-Type:Content-Transfer-Encoding:Message-ID; [email protected];
+ bh=F4UQPGEkpmh54C7v3DL8mm2db1QhZU4gRHR1jDqffG8=;
+ b=MVltcq6/I9b218a370fuNFLNinR9zQcdBSmzttFkZ7TvV2mOsGrzrwORT8PKYq4KNJNOLBahswXf
+   GwaMjDKT/5TXzegdX/L3f/X4bMAEO1einn+nUkVGLK4zVQus+KGqm4oP7uVXjqp70PWXScyWWkbT
+   1PGUwRfPd/HTJG5IUqs=
+Content-Type: multipart/mixed;
+ boundary="D7F------------D7FD5A0B8AB9C65CCDBFA872"
+
+This is a multi-part message in MIME format.
+--D7F------------D7FD5A0B8AB9C65CCDBFA872
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 7bit
+
+Fred,
+
+Fire up Air Force One!  We're going South!
+
+Thanks,
+Al
+--D7F------------D7FD5A0B8AB9C65CCDBFA872
+Content-Type: message/rfc822
+Content-Transfer-Encoding: 7bit
+Content-Disposition: inline
+
+Return-Path: <[email protected]>
+Received: from mailhost.whitehouse.gov ([192.168.51.200])
+ by heartbeat.whitehouse.gov (8.8.8/8.8.8) with ESMTP id SAA22453
+ for <[email protected]>;
+ Mon, 13 Aug 1998 l8:14:23 +1000
+Received: from the_big_box.whitehouse.gov ([192.168.51.50])
+ by mailhost.whitehouse.gov (8.8.8/8.8.7) with ESMTP id RAA20366
+ for [email protected]; Mon, 13 Aug 1998 17:42:41 +1000
+ Date: Mon, 13 Aug 1998 17:42:41 +1000
+Message-Id: <[email protected]>
+From: Bill Clinton <[email protected]>
+To: A1 (The Enforcer) Gore <[email protected]>
+Subject:  Map of Argentina with Description
+MIME-Version: 1.0
+Content-Type: multipart/mixed;
+ boundary="DC8------------DC8638F443D87A7F0726DEF7"
+
+This is a multi-part message in MIME format.
+--DC8------------DC8638F443D87A7F0726DEF7
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 7bit
+
+Hi A1,
+
+I finally figured out this MIME thing.  Pretty cool.  I'll send you
+some sax music in .au files next week!
+
+Anyway, the attached image is really too small to get a good look at
+Argentina.  Try this for a much better map:
+
+http://www.1one1yp1anet.com/dest/sam/graphics/map-arg.htm
+
+Then again, shouldn't the CIA have something like that?
+
+Bill
+--DC8------------DC8638F443D87A7F0726DEF7
+Content-Type: image/png; name="three.png"
+Content-Transfer-Encoding: 8bit
+
+`
+
 var email = `From:  Al Gore <[email protected]>
 To:  White House Transportation Coordinator <[email protected]>
 Subject: [Fwd: Map of Argentina with Description]
@@ -130,24 +200,12 @@ Then again, shouldn't the CIA have something like that?
 
 Bill
 --DC8------------DC8638F443D87A7F0726DEF7
-Content-Type: image/gif; name="map_of_Argentina.gif"
+Content-Type: image/gif; name="three.gif"
 Content-Transfer-Encoding: base64
-Content-Disposition: attachment; filename="map_of_Argentina.gif"
-
-iVBORw0KGgoAAAANSUhEUgAAAG4AAAAyCAIAAAAydXkgAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAA
-B3RJTUUH1gYEExgGfYkXIAAAAAd0RVh0QXV0aG9yAKmuzEgAAAAMdEVYdERlc2NyaXB0aW9uABMJ
-ISMAAAAKdEVYdENvcHlyaWdodACsD8w6AAAADnRFWHRDcmVhdGlvbiB0aW1lADX3DwkAAAAJdEVY
-dFNvZnR3YXJlAF1w/zoAAAALdEVYdERpc2NsYWltZXIAt8C0jwAAAAh0RVh0V2FybmluZwDAG+aH
-AAAAB3RFWHRTb3VyY2UA9f+D6wAAAAh0RVh0Q29tbWVudAD2zJa/AAAABnRFWHRUaXRsZQCo7tIn
-AAABAElEQVR4nO2ZUY6DIBCG66YH88FGvQLHEI+hHsMriPFw7AMJIYAwoO269v+eSDPDmKn5HOXx
-AAAAAAAAAPxblmWRZJZlSU3RCCE451Z1IUQ00c1ScM7p15zHT1J0URSpwUkpmrquh2HY60uA1+vl
-/b2qKkp63tUCcA8otrK8k+dKr7+I1V0tEEUppRRCZDcnzZUZHLdP6g6uFomiBACYeHUTTnF9ZwV4
-3dp1HaW0V5dRUR6ZJU3e7kqLaK+9ZpymKamKOV3uTZrhigCAU1wZhV7aRE2IlKn2tq60WNeVHtz3
-vV7Xdc05b5pmL0ADVwLg5QOu3BNZhhxVwH1cmYoluwDqX2zbj2bPFgAAAMdJREFUNnUruBIALxmu
-dF1mBXhlSimtPzW6O5hfIQOJB7mcK72NSzrk2bYt+ku0IvhL8PCKwxhTi3meT9s06aBGOSjjpduF
-Ut1UnlnUUmG4kDtj6j5aa5c3noOfhX4ND1eXhvJMOYZFGYYxNs8zY6wsS73O3u2rUY1jjOkOBlp5
-uSf4NTn/fsw4Bz/oSnMMCm9laU4FuzMj5ZpN6K58JrVSfnAEW9d127ZxHInVLZM2TSOlpL/C72He
-j2c+wQEAAAAAAAAAfB2/3ihTGANzPd8AAAAASUVORK5CYII=
+Content-Disposition: attachment; filename="three.gif"
+
+R0lGODlhEAAeAPAAAP///wAAACH5BAEAAAAALAAAAAAQAB4AAAIfhI+py+0Po5y0onCD3lzbD15K
+R4ZmiAHk6p3uC8dWAQA7
 --DC8------------DC8638F443D87A7F0726DEF7--
 
 --D7F------------D7FD5A0B8AB9C65CCDBFA872--
@@ -475,7 +533,7 @@ func TestChunkSaverWrite(t *testing.T) {
 	store, chunksaver, mimeanalyzer, stream := initTestStream(true)
 	var out bytes.Buffer
 	buf := make([]byte, 128)
-	if written, err := io.CopyBuffer(stream, bytes.NewBuffer([]byte(email3)), buf); err != nil {
+	if written, err := io.CopyBuffer(stream, bytes.NewBuffer([]byte(email)), buf); err != nil {
 		t.Error(err)
 	} else {
 		_ = mimeanalyzer.Close()
@@ -498,8 +556,8 @@ func TestChunkSaverWrite(t *testing.T) {
 			t.Error(err)
 		} else if w != email.size {
 			t.Error("email.size != number of bytes copied from reader", w, email.size)
-		} else if !strings.Contains(out.String(), "</html>") {
-			t.Error("The email didn't decode properly, expecting </html>")
+		} else if !strings.Contains(out.String(), "GIF89") {
+			t.Error("The email didn't decode properly, expecting GIF89")
 		}
 		out.Reset()
 

+ 40 - 23
chunk/processor.go

@@ -2,7 +2,6 @@ package chunk
 
 import (
 	"errors"
-	"fmt"
 	"net"
 
 	"github.com/flashmob/go-guerrilla/backends"
@@ -129,6 +128,9 @@ func Chunksaver() *backends.StreamDecorator {
 				return err
 			}))
 
+			var writeTo uint
+			var pos int
+
 			sd.Open = func(e *mail.Envelope) error {
 				// create a new entry & grab the id
 				written = 0
@@ -201,17 +203,44 @@ func Chunksaver() *backends.StreamDecorator {
 							}
 						}
 					}
-
 				}
 				return subject, to, from
 			}
 
+			// end() triggers a buffer flush, at the end of a header or part-boundary
+			end := func(part *mime.Part, offset uint, p []byte, start uint) (int, error) {
+				var err error
+				var count int
+				// write out any unwritten bytes
+				writeTo = start - offset
+				size := uint(len(p))
+				if writeTo > size {
+					writeTo = size
+				}
+				if writeTo > 0 {
+					count, err = chunkBuffer.Write(p[pos:writeTo])
+					written += int64(count)
+					pos += count
+					if err != nil {
+						return count, err
+					}
+				} else {
+					count = 0
+				}
+				err = chunkBuffer.Flush()
+				if err != nil {
+					return count, err
+				}
+				chunkBuffer.CurrentPart(part)
+				return count, nil
+			}
+
 			return backends.StreamProcessWith(func(p []byte) (count int, err error) {
+				pos = 0
 				if envelope.Values == nil {
 					return count, errors.New("no message headers found")
 				}
 				if parts, ok := envelope.Values["MimeParts"].(*mime.Parts); ok && len(*parts) > 0 {
-					var pos int
 
 					subject, to, from = fillVars(parts, subject, to, from)
 					offset := msgPos
@@ -220,41 +249,29 @@ func Chunksaver() *backends.StreamDecorator {
 						part := (*parts)[i]
 
 						// break chunk on new part
-						if part.StartingPos > 0 && part.StartingPos > msgPos {
-							count, _ = chunkBuffer.Write(p[pos : part.StartingPos-offset])
-							written += int64(count)
-
-							err = chunkBuffer.Flush()
+						if part.StartingPos > 0 && part.StartingPos >= msgPos {
+							count, err = end(part, offset, p, part.StartingPos)
 							if err != nil {
 								return count, err
 							}
-							chunkBuffer.CurrentPart(part)
 							// end of a part here
-							fmt.Println("->N")
-							pos += count
+							//fmt.Println("->N --end of part ---")
+
 							msgPos = part.StartingPos
 						}
 						// break chunk on header
 						if part.StartingPosBody > 0 && part.StartingPosBody >= msgPos {
-							to := part.StartingPosBody - offset
-							if lenp := len(p); int(to) > lenp {
-								to = uint(lenp)
-							}
-							count, _ = chunkBuffer.Write(p[pos:to])
-							written += int64(count)
 
-							err = chunkBuffer.Flush()
+							count, err = end(part, offset, p, part.StartingPosBody)
 							if err != nil {
 								return count, err
 							}
-							chunkBuffer.CurrentPart(part)
 							// end of a header here
-							fmt.Println("->H")
-							pos += count
-							msgPos = part.StartingPosBody
+							//fmt.Println("->H --end of header --")
+							msgPos += uint(count)
 						}
 						// if on the latest (last) part, and yet there is still data to be written out
-						if len(*parts)-1 == i && len(p)-1 > pos {
+						if len(*parts)-1 == i && len(p) > pos {
 							count, _ = chunkBuffer.Write(p[pos:])
 							written += int64(count)
 							pos += count

+ 9 - 5
chunk/reader.go

@@ -22,7 +22,6 @@ type chunkedReader struct {
 // if part is 0, Read will read in the entire message. 1 selects the first part, 2 2nd, and so on..
 func NewChunkedReader(db Storage, email *Email, part int) (*chunkedReader, error) {
 	r := new(chunkedReader)
-	fmt.Println("new reader")
 	r.db = db
 	if email == nil {
 		return nil, errors.New("nil email")
@@ -56,11 +55,16 @@ func (r *chunkedReader) SeekPart(part int) error {
 }
 
 type cachedChunks struct {
-	chunks    []*Chunk
+	// chunks stores the cached chunks. It stores the latest chunk being read
+	// and the next few chunks that are yet to be read
+	// (see the chunkCachePreload constant)
+	chunks []*Chunk
+	// hashIndex is a look-up table that returns the hash of a given index
 	hashIndex map[int]HashKey
 	db        Storage
 }
 
+// chunkCachePreload controls how many to pre-load in the
 const chunkCachePreload = 2
 
 // warm allocates the chunk cache, and gets the first few and stores them in the cache
@@ -97,7 +101,7 @@ func (c *cachedChunks) warm(hashes ...HashKey) (int, error) {
 	return len(c.chunks), nil
 }
 
-// get returns a chunk. If the chunk doesn't exist, it gets it and pre-loads the next few
+// get returns a previously saved chunk and pre-loads the next few
 // also removes the previous chunks that now have become stale
 func (c *cachedChunks) get(i int) (*Chunk, error) {
 	if i > len(c.chunks) {
@@ -114,7 +118,7 @@ func (c *cachedChunks) get(i int) (*Chunk, error) {
 			return nil, errors.New(fmt.Sprintf("hash for key [%s] not found", key))
 		}
 		// make a list of chunks to load (extra ones to be pre-loaded)
-		for to := i + 1; to < len(c.chunks) || to > chunkCachePreload+i; to++ {
+		for to := i + 1; to < len(c.chunks) && to < chunkCachePreload+i; to++ {
 			if key, ok := c.hashIndex[to]; ok {
 				toGet = append(toGet, key)
 			}
@@ -123,7 +127,7 @@ func (c *cachedChunks) get(i int) (*Chunk, error) {
 			return nil, err
 		} else {
 			// cache the pre-loaded chunks
-			for j := i; j < len(c.chunks); j++ {
+			for j := i; j-i < len(chunks); j++ {
 				c.chunks[j] = chunks[j-i]
 				c.hashIndex[j] = toGet[j-i]
 			}

+ 6 - 2
mail/mime/mime.go

@@ -83,6 +83,8 @@ type Parser struct {
 	maxNodes int // the desired number of maximum nodes the parser is limited to
 
 	w io.Writer // underlying io.Writer
+
+	temp string
 }
 
 type Parts []*Part
@@ -114,8 +116,8 @@ type Part struct {
 	ContentType *contentType
 	// ContentBase is typically a url
 	ContentBase string
-	// DispositionFi1eName what file-nme to use for the part, eg. image.jpeg
-	DispositionFi1eName string
+	// DispositionFileName what file-nme to use for the part, eg. image.jpeg
+	DispositionFileName string
 	// ContentDisposition describes how to display the part, eg. attachment
 	ContentDisposition string
 	// ContentName as name implies
@@ -985,6 +987,8 @@ func (p *Parser) Close() error {
 }
 
 func (p *Parser) Write(buf []byte) (int, error) {
+	p.temp = p.temp + string(buf)
+
 	if err := p.Parse(buf); err != nil {
 		return len(buf), err
 	}