Browse Source

query logging wip

Ask Bjørn Hansen 9 years ago
parent
commit
0aab8e6380

+ 6 - 1
Godeps/Godeps.json

@@ -1,6 +1,6 @@
 {
 {
 	"ImportPath": "github.com/abh/geodns",
 	"ImportPath": "github.com/abh/geodns",
-	"GoVersion": "go1.6",
+	"GoVersion": "go1.7",
 	"GodepVersion": "v74",
 	"GodepVersion": "v74",
 	"Deps": [
 	"Deps": [
 		{
 		{
@@ -58,6 +58,11 @@
 			"ImportPath": "gopkg.in/gcfg.v1/types",
 			"ImportPath": "gopkg.in/gcfg.v1/types",
 			"Comment": "v1.0.0",
 			"Comment": "v1.0.0",
 			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
 			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
+		},
+		{
+			"ImportPath": "gopkg.in/natefinch/lumberjack.v2",
+			"Comment": "v1.0-21-g514cbda",
+			"Rev": "514cbda263a734ae8caac038dadf05f8f3f9f738"
 		}
 		}
 	]
 	]
 }
 }

+ 3 - 0
Makefile

@@ -4,6 +4,9 @@ all: templates.go
 templates.go: templates/*.html monitor.go
 templates.go: templates/*.html monitor.go
 	go generate
 	go generate
 
 
+test:
+	go test -race $(go list ./... | grep -v /vendor/)
+
 devel:
 devel:
 	go build -tags devel
 	go build -tags devel
 
 

+ 11 - 0
countries/countries.go

@@ -251,3 +251,14 @@ var CountryContinent = map[string]string{
 	"zm": "africa",
 	"zm": "africa",
 	"zw": "africa",
 	"zw": "africa",
 }
 }
+
+var ContinentCountries = map[string][]string{}
+
+func init() {
+	for cc, co := range CountryContinent {
+		if _, ok := ContinentCountries[co]; !ok {
+			ContinentCountries[co] = []string{}
+		}
+		ContinentCountries[co] = append(ContinentCountries[co], cc)
+	}
+}

+ 65 - 57
countries/regiongroups.go

@@ -4,72 +4,80 @@ import (
 	"log"
 	"log"
 )
 )
 
 
-func CountryRegionGroup(country, region string) string {
+var RegionGroups = map[string]string{
+	"us-ak": "us-west",
+	"us-az": "us-west",
+	"us-ca": "us-west",
+	"us-co": "us-west",
+	"us-hi": "us-west",
+	"us-id": "us-west",
+	"us-mt": "us-west",
+	"us-nm": "us-west",
+	"us-nv": "us-west",
+	"us-or": "us-west",
+	"us-ut": "us-west",
+	"us-wa": "us-west",
+	"us-wy": "us-west",
 
 
-	if country != "us" {
-		return ""
-	}
+	"us-ar": "us-central",
+	"us-ia": "us-central",
+	"us-il": "us-central",
+	"us-in": "us-central",
+	"us-ks": "us-central",
+	"us-la": "us-central",
+	"us-mn": "us-central",
+	"us-mo": "us-central",
+	"us-nd": "us-central",
+	"us-ne": "us-central",
+	"us-ok": "us-central",
+	"us-sd": "us-central",
+	"us-tx": "us-central",
+	"us-wi": "us-central",
+
+	"us-al": "us-east",
+	"us-ct": "us-east",
+	"us-dc": "us-east",
+	"us-de": "us-east",
+	"us-fl": "us-east",
+	"us-ga": "us-east",
+	"us-ky": "us-east",
+	"us-ma": "us-east",
+	"us-md": "us-east",
+	"us-me": "us-east",
+	"us-mi": "us-east",
+	"us-ms": "us-east",
+	"us-nc": "us-east",
+	"us-nh": "us-east",
+	"us-nj": "us-east",
+	"us-ny": "us-east",
+	"us-oh": "us-east",
+	"us-pa": "us-east",
+	"us-ri": "us-east",
+	"us-sc": "us-east",
+	"us-tn": "us-east",
+	"us-va": "us-east",
+	"us-vt": "us-east",
+	"us-wv": "us-east",
+}
 
 
-	regions := map[string]string{
-		"us-ak": "us-west",
-		"us-az": "us-west",
-		"us-ca": "us-west",
-		"us-co": "us-west",
-		"us-hi": "us-west",
-		"us-id": "us-west",
-		"us-mt": "us-west",
-		"us-nm": "us-west",
-		"us-nv": "us-west",
-		"us-or": "us-west",
-		"us-ut": "us-west",
-		"us-wa": "us-west",
-		"us-wy": "us-west",
+var RegionGroupRegions = map[string][]string{}
 
 
-		"us-ar": "us-central",
-		"us-ia": "us-central",
-		"us-il": "us-central",
-		"us-in": "us-central",
-		"us-ks": "us-central",
-		"us-la": "us-central",
-		"us-mn": "us-central",
-		"us-mo": "us-central",
-		"us-nd": "us-central",
-		"us-ne": "us-central",
-		"us-ok": "us-central",
-		"us-sd": "us-central",
-		"us-tx": "us-central",
-		"us-wi": "us-central",
+func CountryRegionGroup(country, region string) string {
 
 
-		"us-al": "us-east",
-		"us-ct": "us-east",
-		"us-dc": "us-east",
-		"us-de": "us-east",
-		"us-fl": "us-east",
-		"us-ga": "us-east",
-		"us-ky": "us-east",
-		"us-ma": "us-east",
-		"us-md": "us-east",
-		"us-me": "us-east",
-		"us-mi": "us-east",
-		"us-ms": "us-east",
-		"us-nc": "us-east",
-		"us-nh": "us-east",
-		"us-nj": "us-east",
-		"us-ny": "us-east",
-		"us-oh": "us-east",
-		"us-pa": "us-east",
-		"us-ri": "us-east",
-		"us-sc": "us-east",
-		"us-tn": "us-east",
-		"us-va": "us-east",
-		"us-vt": "us-east",
-		"us-wv": "us-east",
+	if country != "us" {
+		return ""
 	}
 	}
 
 
-	if group, ok := regions[region]; ok {
+	if group, ok := RegionGroups[region]; ok {
 		return group
 		return group
 	}
 	}
 
 
 	log.Printf("Did not find a region group for '%s'/'%s'", country, region)
 	log.Printf("Did not find a region group for '%s'/'%s'", country, region)
 	return ""
 	return ""
 }
 }
+
+func init() {
+	for ccrc, rg := range RegionGroups {
+		RegionGroupRegions[rg] = append(RegionGroupRegions[rg], ccrc)
+	}
+}

+ 2 - 1
geodns.go

@@ -33,7 +33,7 @@ import (
 )
 )
 
 
 // VERSION is the current version of GeoDNS
 // VERSION is the current version of GeoDNS
-var VERSION string = "2.6.0"
+var VERSION string = "2.7.0"
 var buildTime string
 var buildTime string
 var gitVersion string
 var gitVersion string
 
 
@@ -95,6 +95,7 @@ func main() {
 	}
 	}
 
 
 	srv := Server{}
 	srv := Server{}
+	srv.SetQueryLogger("log/queries.log")
 
 
 	if len(*flagLogFile) > 0 {
 	if len(*flagLogFile) > 0 {
 		logToFileOpen(*flagLogFile)
 		logToFileOpen(*flagLogFile)

+ 51 - 0
querylog/querylog.go

@@ -0,0 +1,51 @@
+package querylog
+
+import (
+	"encoding/json"
+
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+type QueryLogger interface {
+	Write(*Entry) error
+}
+
+// easyjson:json
+type Entry struct {
+	Time       int64
+	Origin     string
+	Name       string
+	Qtype      uint16
+	Rcode      int
+	Answers    int
+	Targets    []string
+	LabelName  string
+	RemoteAddr string
+	ClientAddr string
+	HasECS     bool
+}
+
+type FileLogger struct {
+	logger lumberjack.Logger
+}
+
+func NewFileLogger(filename string) (*FileLogger, error) {
+	fl := &FileLogger{}
+	fl.logger = lumberjack.Logger{
+		Filename:   filename,
+		MaxSize:    500, // megabytes
+		MaxBackups: 3,
+		MaxAge:     28, //days
+	}
+	return fl, nil
+}
+
+func (l *FileLogger) Write(e *Entry) error {
+	js, err := json.Marshal(e)
+	if err != nil {
+		return err
+	}
+	js = append(js, []byte("\n")...)
+	_, err = l.logger.Write(js)
+	return err
+}

+ 207 - 0
querylog/querylog_easyjson.go

@@ -0,0 +1,207 @@
+// AUTOGENERATED FILE: easyjson marshaller/unmarshallers.
+
+package querylog
+
+import (
+	json "encoding/json"
+	jlexer "github.com/mailru/easyjson/jlexer"
+	jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+var _ = json.RawMessage{} // suppress unused package warning
+
+func easyjson_320612c2_decode_github_com_abh_geodns_querylog_FileLogger(in *jlexer.Lexer, out *FileLogger) {
+	if in.IsNull() {
+		in.Skip()
+		return
+	}
+	in.Delim('{')
+	for !in.IsDelim('}') {
+		key := in.UnsafeString()
+		in.WantColon()
+		if in.IsNull() {
+			in.Skip()
+			in.WantComma()
+			continue
+		}
+		switch key {
+		default:
+			in.SkipRecursive()
+		}
+		in.WantComma()
+	}
+	in.Delim('}')
+}
+func easyjson_320612c2_encode_github_com_abh_geodns_querylog_FileLogger(out *jwriter.Writer, in FileLogger) {
+	out.RawByte('{')
+	first := true
+	_ = first
+	out.RawByte('}')
+}
+func (v FileLogger) MarshalJSON() ([]byte, error) {
+	w := jwriter.Writer{}
+	easyjson_320612c2_encode_github_com_abh_geodns_querylog_FileLogger(&w, v)
+	return w.Buffer.BuildBytes(), w.Error
+}
+func (v FileLogger) MarshalEasyJSON(w *jwriter.Writer) {
+	easyjson_320612c2_encode_github_com_abh_geodns_querylog_FileLogger(w, v)
+}
+func (v *FileLogger) UnmarshalJSON(data []byte) error {
+	r := jlexer.Lexer{Data: data}
+	easyjson_320612c2_decode_github_com_abh_geodns_querylog_FileLogger(&r, v)
+	return r.Error()
+}
+func (v *FileLogger) UnmarshalEasyJSON(l *jlexer.Lexer) {
+	easyjson_320612c2_decode_github_com_abh_geodns_querylog_FileLogger(l, v)
+}
+func easyjson_320612c2_decode_github_com_abh_geodns_querylog_Entry(in *jlexer.Lexer, out *Entry) {
+	if in.IsNull() {
+		in.Skip()
+		return
+	}
+	in.Delim('{')
+	for !in.IsDelim('}') {
+		key := in.UnsafeString()
+		in.WantColon()
+		if in.IsNull() {
+			in.Skip()
+			in.WantComma()
+			continue
+		}
+		switch key {
+		case "Time":
+			out.Time = int64(in.Int64())
+		case "Origin":
+			out.Origin = string(in.String())
+		case "Name":
+			out.Name = string(in.String())
+		case "Qtype":
+			out.Qtype = uint16(in.Uint16())
+		case "Rcode":
+			out.Rcode = int(in.Int())
+		case "Answers":
+			out.Answers = int(in.Int())
+		case "Targets":
+			in.Delim('[')
+			if !in.IsDelim(']') {
+				out.Targets = make([]string, 0, 4)
+			} else {
+				out.Targets = nil
+			}
+			for !in.IsDelim(']') {
+				var v1 string
+				v1 = string(in.String())
+				out.Targets = append(out.Targets, v1)
+				in.WantComma()
+			}
+			in.Delim(']')
+		case "LabelName":
+			out.LabelName = string(in.String())
+		case "RemoteAddr":
+			out.RemoteAddr = string(in.String())
+		case "ClientAddr":
+			out.ClientAddr = string(in.String())
+		case "HasECS":
+			out.HasECS = bool(in.Bool())
+		default:
+			in.SkipRecursive()
+		}
+		in.WantComma()
+	}
+	in.Delim('}')
+}
+func easyjson_320612c2_encode_github_com_abh_geodns_querylog_Entry(out *jwriter.Writer, in Entry) {
+	out.RawByte('{')
+	first := true
+	_ = first
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Time\":")
+	out.Int64(int64(in.Time))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Origin\":")
+	out.String(string(in.Origin))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Name\":")
+	out.String(string(in.Name))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Qtype\":")
+	out.Uint16(uint16(in.Qtype))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Rcode\":")
+	out.Int(int(in.Rcode))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Answers\":")
+	out.Int(int(in.Answers))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"Targets\":")
+	out.RawByte('[')
+	for v2, v3 := range in.Targets {
+		if v2 > 0 {
+			out.RawByte(',')
+		}
+		out.String(string(v3))
+	}
+	out.RawByte(']')
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"LabelName\":")
+	out.String(string(in.LabelName))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"RemoteAddr\":")
+	out.String(string(in.RemoteAddr))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"ClientAddr\":")
+	out.String(string(in.ClientAddr))
+	if !first {
+		out.RawByte(',')
+	}
+	first = false
+	out.RawString("\"HasECS\":")
+	out.Bool(bool(in.HasECS))
+	out.RawByte('}')
+}
+func (v Entry) MarshalJSON() ([]byte, error) {
+	w := jwriter.Writer{}
+	easyjson_320612c2_encode_github_com_abh_geodns_querylog_Entry(&w, v)
+	return w.Buffer.BuildBytes(), w.Error
+}
+func (v Entry) MarshalEasyJSON(w *jwriter.Writer) {
+	easyjson_320612c2_encode_github_com_abh_geodns_querylog_Entry(w, v)
+}
+func (v *Entry) UnmarshalJSON(data []byte) error {
+	r := jlexer.Lexer{Data: data}
+	easyjson_320612c2_decode_github_com_abh_geodns_querylog_Entry(&r, v)
+	return r.Error()
+}
+func (v *Entry) UnmarshalEasyJSON(l *jlexer.Lexer) {
+	easyjson_320612c2_decode_github_com_abh_geodns_querylog_Entry(l, v)
+}

+ 50 - 3
serve.go

@@ -10,6 +10,7 @@ import (
 	"strings"
 	"strings"
 	"time"
 	"time"
 
 
+	"github.com/abh/geodns/querylog"
 	"github.com/miekg/dns"
 	"github.com/miekg/dns"
 	"github.com/rcrowley/go-metrics"
 	"github.com/rcrowley/go-metrics"
 )
 )
@@ -20,11 +21,24 @@ func getQuestionName(z *Zone, req *dns.Msg) string {
 	return strings.ToLower(strings.Join(ql, "."))
 	return strings.ToLower(strings.Join(ql, "."))
 }
 }
 
 
-func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
+func (srv *Server) serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 
 
+	qname := req.Question[0].Name
 	qtype := req.Question[0].Qtype
 	qtype := req.Question[0].Qtype
 
 
-	logPrintf("[zone %s] incoming  %s %s (id %d) from %s\n", z.Origin, req.Question[0].Name,
+	var qle *querylog.Entry
+
+	if srv.queryLogger != nil {
+		qle = &querylog.Entry{
+			Time:   time.Now().UnixNano(),
+			Origin: z.Origin,
+			Name:   qname,
+			Qtype:  qtype,
+		}
+		defer srv.queryLogger.Write(qle)
+	}
+
+	logPrintf("[zone %s] incoming  %s %s (id %d) from %s\n", z.Origin, qname,
 		dns.TypeToString[qtype], req.Id, w.RemoteAddr())
 		dns.TypeToString[qtype], req.Id, w.RemoteAddr())
 
 
 	// Global meter
 	// Global meter
@@ -49,6 +63,9 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 		realIP = make(net.IP, len(addr.IP))
 		realIP = make(net.IP, len(addr.IP))
 		copy(realIP, addr.IP)
 		copy(realIP, addr.IP)
 	}
 	}
+	if qle != nil {
+		qle.RemoteAddr = realIP.String()
+	}
 
 
 	z.Metrics.ClientStats.Add(realIP.String())
 	z.Metrics.ClientStats.Add(realIP.String())
 
 
@@ -71,6 +88,11 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 					if e.Address != nil {
 					if e.Address != nil {
 						edns = e
 						edns = e
 						ip = e.Address
 						ip = e.Address
+
+						if qle != nil {
+							qle.HasECS = true
+							qle.ClientAddr = fmt.Sprintf("%s/%d", ip, e.SourceNetmask)
+						}
 					}
 					}
 				}
 				}
 			}
 			}
@@ -79,11 +101,26 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 
 
 	if len(ip) == 0 { // no edns subnet
 	if len(ip) == 0 { // no edns subnet
 		ip = realIP
 		ip = realIP
+		if qle != nil {
+			qle.ClientAddr = fmt.Sprintf("%s/%d", ip, len(ip)*8)
+		}
 	}
 	}
 
 
 	targets, netmask := z.Options.Targeting.GetTargets(ip)
 	targets, netmask := z.Options.Targeting.GetTargets(ip)
 
 
+	if qle != nil {
+		qle.Targets = targets
+	}
+
 	m := new(dns.Msg)
 	m := new(dns.Msg)
+
+	if qle != nil {
+		defer func() {
+			qle.Rcode = m.Rcode
+			qle.Answers = len(m.Answer)
+		}()
+	}
+
 	m.SetReply(req)
 	m.SetReply(req)
 	if e := m.IsEdns0(); e != nil {
 	if e := m.IsEdns0(); e != nil {
 		m.SetEdns0(4096, e.Do())
 		m.SetEdns0(4096, e.Do())
@@ -112,6 +149,10 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 
 
 		firstLabel := (strings.Split(label, "."))[0]
 		firstLabel := (strings.Split(label, "."))[0]
 
 
+		if qle != nil {
+			qle.LabelName = firstLabel
+		}
+
 		if permitDebug && firstLabel == "_status" {
 		if permitDebug && firstLabel == "_status" {
 			if qtype == dns.TypeANY || qtype == dns.TypeTXT {
 			if qtype == dns.TypeANY || qtype == dns.TypeTXT {
 				m.Answer = statusRR(label + "." + z.Origin + ".")
 				m.Answer = statusRR(label + "." + z.Origin + ".")
@@ -145,6 +186,7 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 			}
 			}
 
 
 			m.Authoritative = true
 			m.Authoritative = true
+
 			w.WriteMsg(m)
 			w.WriteMsg(m)
 			return
 			return
 		}
 		}
@@ -163,7 +205,7 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 		var rrs []dns.RR
 		var rrs []dns.RR
 		for _, record := range servers {
 		for _, record := range servers {
 			rr := dns.Copy(record.RR)
 			rr := dns.Copy(record.RR)
-			rr.Header().Name = req.Question[0].Name
+			rr.Header().Name = qname
 			rrs = append(rrs, rr)
 			rrs = append(rrs, rr)
 		}
 		}
 		m.Answer = rrs
 		m.Answer = rrs
@@ -176,6 +218,11 @@ func serve(w dns.ResponseWriter, req *dns.Msg, z *Zone) {
 
 
 	logPrintln(m)
 	logPrintln(m)
 
 
+	if qle != nil {
+		qle.LabelName = labels.Label
+		qle.Answers = len(m.Answer)
+		qle.Rcode = m.Rcode
+	}
 	err := w.WriteMsg(m)
 	err := w.WriteMsg(m)
 	if err != nil {
 	if err != nil {
 		// if Pack'ing fails the Write fails. Return SERVFAIL.
 		// if Pack'ing fails the Write fails. Return SERVFAIL.

+ 19 - 3
server.go

@@ -3,14 +3,30 @@ package main
 import (
 import (
 	"log"
 	"log"
 	"time"
 	"time"
+
+	"github.com/abh/geodns/querylog"
+	"github.com/miekg/dns"
 )
 )
-import "github.com/miekg/dns"
 
 
-type Server struct{}
+type Server struct {
+	queryLogger querylog.QueryLogger
+}
+
+func NewServer() *Server {
+	return &Server{}
+}
+
+// Setup the QueryLogger. For now it only supports writing to a file (and all
+// zones get logged to the same file).
+func (srv *Server) SetQueryLogger(file string) error {
+	var err error
+	srv.queryLogger, err = querylog.NewFileLogger(file)
+	return err
+}
 
 
 func (srv *Server) setupServerFunc(Zone *Zone) func(dns.ResponseWriter, *dns.Msg) {
 func (srv *Server) setupServerFunc(Zone *Zone) func(dns.ResponseWriter, *dns.Msg) {
 	return func(w dns.ResponseWriter, r *dns.Msg) {
 	return func(w dns.ResponseWriter, r *dns.Msg) {
-		serve(w, r, Zone)
+		srv.serve(w, r, Zone)
 	}
 	}
 }
 }
 
 

+ 23 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore

@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test

+ 21 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Nate Finch 
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 166 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/README.md

@@ -0,0 +1,166 @@
+# lumberjack  [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
+
+### Lumberjack is a Go package for writing logs to rolling files.
+
+Package lumberjack provides a rolling logger.
+
+Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+thusly:
+
+    import "gopkg.in/natefinch/lumberjack.v2"
+
+The package name remains simply lumberjack, and the code resides at
+https://github.com/natefinch/lumberjack under the v2.0 branch.
+
+Lumberjack is intended to be one part of a logging infrastructure.
+It is not an all-in-one solution, but instead is a pluggable
+component at the bottom of the logging stack that simply controls the files
+to which logs are written.
+
+Lumberjack plays well with any logging package that can write to an
+io.Writer, including the standard library's log package.
+
+Lumberjack assumes that only one process is writing to the output files.
+Using the same lumberjack configuration from multiple processes on the same
+machine will result in improper behavior.
+
+
+**Example**
+
+To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
+
+Code:
+
+```go
+log.SetOutput(&lumberjack.Logger{
+    Filename:   "/var/log/myapp/foo.log",
+    MaxSize:    500, // megabytes
+    MaxBackups: 3,
+    MaxAge:     28, //days
+})
+```
+
+
+
+## type Logger
+``` go
+type Logger struct {
+    // Filename is the file to write logs to.  Backup log files will be retained
+    // in the same directory.  It uses <processname>-lumberjack.log in
+    // os.TempDir() if empty.
+    Filename string `json:"filename" yaml:"filename"`
+
+    // MaxSize is the maximum size in megabytes of the log file before it gets
+    // rotated. It defaults to 100 megabytes.
+    MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+    // MaxAge is the maximum number of days to retain old log files based on the
+    // timestamp encoded in their filename.  Note that a day is defined as 24
+    // hours and may not exactly correspond to calendar days due to daylight
+    // savings, leap seconds, etc. The default is not to remove old log files
+    // based on age.
+    MaxAge int `json:"maxage" yaml:"maxage"`
+
+    // MaxBackups is the maximum number of old log files to retain.  The default
+    // is to retain all old log files (though MaxAge may still cause them to get
+    // deleted.)
+    MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+    // LocalTime determines if the time used for formatting the timestamps in
+    // backup files is the computer's local time.  The default is to use UTC
+    // time.
+    LocalTime bool `json:"localtime" yaml:"localtime"`
+    // contains filtered or unexported fields
+}
+```
+Logger is an io.WriteCloser that writes to the specified filename.
+
+Logger opens or creates the logfile on first Write.  If the file exists and
+is less than MaxSize megabytes, lumberjack will open and append to that file.
+If the file exists and its size is >= MaxSize megabytes, the file is renamed
+by putting the current time in a timestamp in the name immediately before the
+file's extension (or the end of the filename if there's no extension). A new
+log file is then created using original filename.
+
+Whenever a write would cause the current log file exceed MaxSize megabytes,
+the current file is closed, renamed, and a new log file created with the
+original name. Thus, the filename you give Logger is always the "current" log
+file.
+
+### Cleaning Up Old Log Files
+Whenever a new logfile gets created, old log files may be deleted.  The most
+recent files according to the encoded timestamp will be retained, up to a
+number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
+with an encoded timestamp older than MaxAge days are deleted, regardless of
+MaxBackups.  Note that the time encoded in the timestamp is the rotation
+time, which may differ from the last time that file was written to.
+
+If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*Logger) Close
+``` go
+func (l *Logger) Close() error
+```
+Close implements io.Closer, and closes the current logfile.
+
+
+
+### func (\*Logger) Rotate
+``` go
+func (l *Logger) Rotate() error
+```
+Rotate causes Logger to close the existing log file and immediately create a
+new one.  This is a helper function for applications that want to initiate
+rotations outside of the normal rotation rules, such as in response to
+SIGHUP.  After rotating, this initiates a cleanup of old log files according
+to the normal rules.
+
+**Example**
+
+Example of how to rotate in response to SIGHUP.
+
+Code:
+
+```go
+l := &lumberjack.Logger{}
+log.SetOutput(l)
+c := make(chan os.Signal, 1)
+signal.Notify(c, syscall.SIGHUP)
+
+go func() {
+    for {
+        <-c
+        l.Rotate()
+    }
+}()
+```
+
+### func (\*Logger) Write
+``` go
+func (l *Logger) Write(p []byte) (n int, err error)
+```
+Write implements io.Writer.  If a write would cause the log file to be larger
+than MaxSize, the file is closed, renamed to include a timestamp of the
+current time, and a new log file is created using the original log file name.
+If the length of the write is greater than MaxSize, an error is returned.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

+ 11 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/chown.go

@@ -0,0 +1,11 @@
+// +build !linux
+
+package lumberjack
+
+import (
+	"os"
+)
+
+func chown(_ string, _ os.FileInfo) error {
+	return nil
+}

+ 19 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go

@@ -0,0 +1,19 @@
+package lumberjack
+
+import (
+	"os"
+	"syscall"
+)
+
+// os_Chown is a var so we can mock it out during tests.
+var os_Chown = os.Chown
+
+func chown(name string, info os.FileInfo) error {
+	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
+	if err != nil {
+		return err
+	}
+	f.Close()
+	stat := info.Sys().(*syscall.Stat_t)
+	return os_Chown(name, int(stat.Uid), int(stat.Gid))
+}

+ 417 - 0
vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go

@@ -0,0 +1,417 @@
+// Package lumberjack provides a rolling logger.
+//
+// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+// thusly:
+//
+//   import "gopkg.in/natefinch/lumberjack.v2"
+//
+// The package name remains simply lumberjack, and the code resides at
+// https://github.com/natefinch/lumberjack under the v2.0 branch.
+//
+// Lumberjack is intended to be one part of a logging infrastructure.
+// It is not an all-in-one solution, but instead is a pluggable
+// component at the bottom of the logging stack that simply controls the files
+// to which logs are written.
+//
+// Lumberjack plays well with any logging package that can write to an
+// io.Writer, including the standard library's log package.
+//
+// Lumberjack assumes that only one process is writing to the output files.
+// Using the same lumberjack configuration from multiple processes on the same
+// machine will result in improper behavior.
+package lumberjack
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	backupTimeFormat = "2006-01-02T15-04-05.000"
+	defaultMaxSize   = 100
+)
+
+// ensure we always implement io.WriteCloser
+var _ io.WriteCloser = (*Logger)(nil)
+
+// Logger is an io.WriteCloser that writes to the specified filename.
+//
+// Logger opens or creates the logfile on first Write.  If the file exists and
+// is less than MaxSize megabytes, lumberjack will open and append to that file.
+// If the file exists and its size is >= MaxSize megabytes, the file is renamed
+// by putting the current time in a timestamp in the name immediately before the
+// file's extension (or the end of the filename if there's no extension). A new
+// log file is then created using original filename.
+//
+// Whenever a write would cause the current log file exceed MaxSize megabytes,
+// the current file is closed, renamed, and a new log file created with the
+// original name. Thus, the filename you give Logger is always the "current" log
+// file.
+//
+// Cleaning Up Old Log Files
+//
+// Whenever a new logfile gets created, old log files may be deleted.  The most
+// recent files according to the encoded timestamp will be retained, up to a
+// number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
+// with an encoded timestamp older than MaxAge days are deleted, regardless of
+// MaxBackups.  Note that the time encoded in the timestamp is the rotation
+// time, which may differ from the last time that file was written to.
+//
+// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+type Logger struct {
+	// Filename is the file to write logs to.  Backup log files will be retained
+	// in the same directory.  It uses <processname>-lumberjack.log in
+	// os.TempDir() if empty.
+	Filename string `json:"filename" yaml:"filename"`
+
+	// MaxSize is the maximum size in megabytes of the log file before it gets
+	// rotated. It defaults to 100 megabytes.
+	MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+	// MaxAge is the maximum number of days to retain old log files based on the
+	// timestamp encoded in their filename.  Note that a day is defined as 24
+	// hours and may not exactly correspond to calendar days due to daylight
+	// savings, leap seconds, etc. The default is not to remove old log files
+	// based on age.
+	MaxAge int `json:"maxage" yaml:"maxage"`
+
+	// MaxBackups is the maximum number of old log files to retain.  The default
+	// is to retain all old log files (though MaxAge may still cause them to get
+	// deleted.)
+	MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+	// LocalTime determines if the time used for formatting the timestamps in
+	// backup files is the computer's local time.  The default is to use UTC
+	// time.
+	LocalTime bool `json:"localtime" yaml:"localtime"`
+
+	size int64
+	file *os.File
+	mu   sync.Mutex
+}
+
+var (
+	// currentTime exists so it can be mocked out by tests.
+	currentTime = time.Now
+
+	// os_Stat exists so it can be mocked out by tests.
+	os_Stat = os.Stat
+
+	// megabyte is the conversion factor between MaxSize and bytes.  It is a
+	// variable so tests can mock it out and not need to write megabytes of data
+	// to disk.
+	megabyte = 1024 * 1024
+)
+
+// Write implements io.Writer.  If a write would cause the log file to be larger
+// than MaxSize, the file is closed, renamed to include a timestamp of the
+// current time, and a new log file is created using the original log file name.
+// If the length of the write is greater than MaxSize, an error is returned.
+func (l *Logger) Write(p []byte) (n int, err error) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	writeLen := int64(len(p))
+	if writeLen > l.max() {
+		return 0, fmt.Errorf(
+			"write length %d exceeds maximum file size %d", writeLen, l.max(),
+		)
+	}
+
+	if l.file == nil {
+		if err = l.openExistingOrNew(len(p)); err != nil {
+			return 0, err
+		}
+	}
+
+	if l.size+writeLen > l.max() {
+		if err := l.rotate(); err != nil {
+			return 0, err
+		}
+	}
+
+	n, err = l.file.Write(p)
+	l.size += int64(n)
+
+	return n, err
+}
+
+// Close implements io.Closer, and closes the current logfile.
+func (l *Logger) Close() error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	return l.close()
+}
+
+// close closes the file if it is open.
+func (l *Logger) close() error {
+	if l.file == nil {
+		return nil
+	}
+	err := l.file.Close()
+	l.file = nil
+	return err
+}
+
+// Rotate causes Logger to close the existing log file and immediately create a
+// new one.  This is a helper function for applications that want to initiate
+// rotations outside of the normal rotation rules, such as in response to
+// SIGHUP.  After rotating, this initiates a cleanup of old log files according
+// to the normal rules.
+func (l *Logger) Rotate() error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	return l.rotate()
+}
+
+// rotate closes the current file, moves it aside with a timestamp in the name,
+// (if it exists), opens a new file with the original filename, and then runs
+// cleanup.
+func (l *Logger) rotate() error {
+	if err := l.close(); err != nil {
+		return err
+	}
+
+	if err := l.openNew(); err != nil {
+		return err
+	}
+	return l.cleanup()
+}
+
+// openNew opens a new log file for writing, moving any old log file out of the
+// way.  This methods assumes the file has already been closed.
+func (l *Logger) openNew() error {
+	err := os.MkdirAll(l.dir(), 0744)
+	if err != nil {
+		return fmt.Errorf("can't make directories for new logfile: %s", err)
+	}
+
+	name := l.filename()
+	mode := os.FileMode(0644)
+	info, err := os_Stat(name)
+	if err == nil {
+		// Copy the mode off the old logfile.
+		mode = info.Mode()
+		// move the existing file
+		newname := backupName(name, l.LocalTime)
+		if err := os.Rename(name, newname); err != nil {
+			return fmt.Errorf("can't rename log file: %s", err)
+		}
+
+		// this is a no-op anywhere but linux
+		if err := chown(name, info); err != nil {
+			return err
+		}
+	}
+
+	// we use truncate here because this should only get called when we've moved
+	// the file ourselves. if someone else creates the file in the meantime,
+	// just wipe out the contents.
+	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
+	if err != nil {
+		return fmt.Errorf("can't open new logfile: %s", err)
+	}
+	l.file = f
+	l.size = 0
+	return nil
+}
+
+// backupName creates a new filename from the given name, inserting a timestamp
+// between the filename and the extension, using the local time if requested
+// (otherwise UTC).
+func backupName(name string, local bool) string {
+	dir := filepath.Dir(name)
+	filename := filepath.Base(name)
+	ext := filepath.Ext(filename)
+	prefix := filename[:len(filename)-len(ext)]
+	t := currentTime()
+	if !local {
+		t = t.UTC()
+	}
+
+	timestamp := t.Format(backupTimeFormat)
+	return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
+}
+
+// openExistingOrNew opens the logfile if it exists and if the current write
+// would not put it over MaxSize.  If there is no such file or the write would
+// put it over the MaxSize, a new file is created.
+func (l *Logger) openExistingOrNew(writeLen int) error {
+	filename := l.filename()
+	info, err := os_Stat(filename)
+	if os.IsNotExist(err) {
+		return l.openNew()
+	}
+	if err != nil {
+		return fmt.Errorf("error getting log file info: %s", err)
+	}
+
+	if info.Size()+int64(writeLen) >= l.max() {
+		return l.rotate()
+	}
+
+	file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
+	if err != nil {
+		// if we fail to open the old log file for some reason, just ignore
+		// it and open a new log file.
+		return l.openNew()
+	}
+	l.file = file
+	l.size = info.Size()
+	return nil
+}
+
+// genFilename generates the name of the logfile from the current time.
+func (l *Logger) filename() string {
+	if l.Filename != "" {
+		return l.Filename
+	}
+	name := filepath.Base(os.Args[0]) + "-lumberjack.log"
+	return filepath.Join(os.TempDir(), name)
+}
+
+// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as
+// none of them are older than MaxAge.
+func (l *Logger) cleanup() error {
+	if l.MaxBackups == 0 && l.MaxAge == 0 {
+		return nil
+	}
+
+	files, err := l.oldLogFiles()
+	if err != nil {
+		return err
+	}
+
+	var deletes []logInfo
+
+	if l.MaxBackups > 0 && l.MaxBackups < len(files) {
+		deletes = files[l.MaxBackups:]
+		files = files[:l.MaxBackups]
+	}
+	if l.MaxAge > 0 {
+		diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
+
+		cutoff := currentTime().Add(-1 * diff)
+
+		for _, f := range files {
+			if f.timestamp.Before(cutoff) {
+				deletes = append(deletes, f)
+			}
+		}
+	}
+
+	if len(deletes) == 0 {
+		return nil
+	}
+
+	go deleteAll(l.dir(), deletes)
+
+	return nil
+}
+
+func deleteAll(dir string, files []logInfo) {
+	// remove files on a separate goroutine
+	for _, f := range files {
+		// what am I going to do, log this?
+		_ = os.Remove(filepath.Join(dir, f.Name()))
+	}
+}
+
+// oldLogFiles returns the list of backup log files stored in the same
+// directory as the current log file, sorted by ModTime
+func (l *Logger) oldLogFiles() ([]logInfo, error) {
+	files, err := ioutil.ReadDir(l.dir())
+	if err != nil {
+		return nil, fmt.Errorf("can't read log file directory: %s", err)
+	}
+	logFiles := []logInfo{}
+
+	prefix, ext := l.prefixAndExt()
+
+	for _, f := range files {
+		if f.IsDir() {
+			continue
+		}
+		name := l.timeFromName(f.Name(), prefix, ext)
+		if name == "" {
+			continue
+		}
+		t, err := time.Parse(backupTimeFormat, name)
+		if err == nil {
+			logFiles = append(logFiles, logInfo{t, f})
+		}
+		// error parsing means that the suffix at the end was not generated
+		// by lumberjack, and therefore it's not a backup file.
+	}
+
+	sort.Sort(byFormatTime(logFiles))
+
+	return logFiles, nil
+}
+
+// timeFromName extracts the formatted time from the filename by stripping off
+// the filename's prefix and extension. This prevents someone's filename from
+// confusing time.parse.
+func (l *Logger) timeFromName(filename, prefix, ext string) string {
+	if !strings.HasPrefix(filename, prefix) {
+		return ""
+	}
+	filename = filename[len(prefix):]
+
+	if !strings.HasSuffix(filename, ext) {
+		return ""
+	}
+	filename = filename[:len(filename)-len(ext)]
+	return filename
+}
+
+// max returns the maximum size in bytes of log files before rolling.
+func (l *Logger) max() int64 {
+	if l.MaxSize == 0 {
+		return int64(defaultMaxSize * megabyte)
+	}
+	return int64(l.MaxSize) * int64(megabyte)
+}
+
+// dir returns the directory for the current filename.
+func (l *Logger) dir() string {
+	return filepath.Dir(l.filename())
+}
+
+// prefixAndExt returns the filename part and extension part from the Logger's
+// filename.
+func (l *Logger) prefixAndExt() (prefix, ext string) {
+	filename := filepath.Base(l.filename())
+	ext = filepath.Ext(filename)
+	prefix = filename[:len(filename)-len(ext)] + "-"
+	return prefix, ext
+}
+
+// logInfo is a convenience struct to return the filename and its embedded
+// timestamp.
+type logInfo struct {
+	timestamp time.Time
+	os.FileInfo
+}
+
+// byFormatTime sorts by newest time formatted in the name.
+type byFormatTime []logInfo
+
+func (b byFormatTime) Less(i, j int) bool {
+	return b[i].timestamp.After(b[j].timestamp)
+}
+
+func (b byFormatTime) Swap(i, j int) {
+	b[i], b[j] = b[j], b[i]
+}
+
+func (b byFormatTime) Len() int {
+	return len(b)
+}