Browse Source

Update dependencies

Ask Bjørn Hansen 6 years ago
parent
commit
fbc5e0067f
100 changed files with 3433 additions and 6275 deletions
  1. 40 132
      Gopkg.lock
  2. 0 8
      Gopkg.toml
  3. 1 1
      Makefile
  4. 2 2
      geodns-logs/process-stats.go
  5. 1 1
      geodns.go
  6. 2 2
      server/server.go
  7. 1 1
      vendor/github.com/davecgh/go-spew/LICENSE
  8. 90 97
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  9. 1 1
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  10. 1 1
      vendor/github.com/davecgh/go-spew/spew/common.go
  11. 5 5
      vendor/github.com/davecgh/go-spew/spew/dump.go
  12. 2 2
      vendor/github.com/davecgh/go-spew/spew/format.go
  13. 9 4
      vendor/github.com/golang/geo/r1/interval.go
  14. 4 4
      vendor/github.com/golang/geo/s1/angle.go
  15. 1 1
      vendor/github.com/golang/geo/s2/cellunion.go
  16. 39 6
      vendor/github.com/golang/geo/s2/contains_point_query.go
  17. 239 0
      vendor/github.com/golang/geo/s2/convex_hull_query.go
  18. 13 11
      vendor/github.com/golang/geo/s2/edge_crossings.go
  19. 11 7
      vendor/github.com/golang/geo/s2/edge_distances.go
  20. 167 0
      vendor/github.com/golang/geo/s2/edge_tessellator.go
  21. 20 10
      vendor/github.com/golang/geo/s2/loop.go
  22. 41 0
      vendor/github.com/golang/geo/s2/point_vector.go
  23. 158 56
      vendor/github.com/golang/geo/s2/polygon.go
  24. 83 11
      vendor/github.com/golang/geo/s2/polyline.go
  25. 119 13
      vendor/github.com/golang/geo/s2/predicates.go
  26. 2 2
      vendor/github.com/golang/geo/s2/projections.go
  27. 47 3
      vendor/github.com/golang/geo/s2/regioncoverer.go
  28. 13 21
      vendor/github.com/golang/geo/s2/shape.go
  29. 39 10
      vendor/github.com/golang/geo/s2/shapeindex.go
  30. 1 1
      vendor/github.com/golang/geo/s2/shapeutil.go
  31. 0 3
      vendor/github.com/golang/protobuf/LICENSE
  32. 0 1
      vendor/github.com/golang/protobuf/proto/decode.go
  33. 63 0
      vendor/github.com/golang/protobuf/proto/deprecated.go
  34. 0 18
      vendor/github.com/golang/protobuf/proto/encode.go
  35. 2 1
      vendor/github.com/golang/protobuf/proto/equal.go
  36. 71 7
      vendor/github.com/golang/protobuf/proto/extensions.go
  37. 72 28
      vendor/github.com/golang/protobuf/proto/lib.go
  38. 2 135
      vendor/github.com/golang/protobuf/proto/message_set.go
  39. 4 1
      vendor/github.com/golang/protobuf/proto/pointer_reflect.go
  40. 10 5
      vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
  41. 24 23
      vendor/github.com/golang/protobuf/proto/properties.go
  42. 162 67
      vendor/github.com/golang/protobuf/proto/table_marshal.go
  43. 141 55
      vendor/github.com/golang/protobuf/proto/table_unmarshal.go
  44. 2 2
      vendor/github.com/golang/protobuf/proto/text.go
  45. 3 3
      vendor/github.com/golang/protobuf/proto/text_parser.go
  46. 9 0
      vendor/github.com/google/uuid/CONTRIBUTORS
  47. 27 0
      vendor/github.com/google/uuid/LICENSE
  48. 80 0
      vendor/github.com/google/uuid/dce.go
  49. 12 0
      vendor/github.com/google/uuid/doc.go
  50. 53 0
      vendor/github.com/google/uuid/hash.go
  51. 37 0
      vendor/github.com/google/uuid/marshal.go
  52. 90 0
      vendor/github.com/google/uuid/node.go
  53. 12 0
      vendor/github.com/google/uuid/node_js.go
  54. 33 0
      vendor/github.com/google/uuid/node_net.go
  55. 59 0
      vendor/github.com/google/uuid/sql.go
  56. 123 0
      vendor/github.com/google/uuid/time.go
  57. 43 0
      vendor/github.com/google/uuid/util.go
  58. 245 0
      vendor/github.com/google/uuid/uuid.go
  59. 44 0
      vendor/github.com/google/uuid/version1.go
  60. 38 0
      vendor/github.com/google/uuid/version4.go
  61. 0 20
      vendor/github.com/influxdata/influxdb/LICENSE
  62. 0 61
      vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
  63. 0 662
      vendor/github.com/influxdata/influxdb/client/v2/client.go
  64. 0 112
      vendor/github.com/influxdata/influxdb/client/v2/udp.go
  65. 0 48
      vendor/github.com/influxdata/influxdb/models/consistency.go
  66. 0 32
      vendor/github.com/influxdata/influxdb/models/inline_fnv.go
  67. 0 44
      vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
  68. 0 2455
      vendor/github.com/influxdata/influxdb/models/points.go
  69. 0 62
      vendor/github.com/influxdata/influxdb/models/rows.go
  70. 0 42
      vendor/github.com/influxdata/influxdb/models/statistic.go
  71. 0 74
      vendor/github.com/influxdata/influxdb/models/time.go
  72. 0 7
      vendor/github.com/influxdata/influxdb/models/uint_support.go
  73. 0 115
      vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
  74. 0 21
      vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
  75. 0 21
      vendor/github.com/kr/pretty/License
  76. 0 265
      vendor/github.com/kr/pretty/diff.go
  77. 0 328
      vendor/github.com/kr/pretty/formatter.go
  78. 0 108
      vendor/github.com/kr/pretty/pretty.go
  79. 0 41
      vendor/github.com/kr/pretty/zero.go
  80. 0 19
      vendor/github.com/kr/text/License
  81. 0 3
      vendor/github.com/kr/text/doc.go
  82. 0 74
      vendor/github.com/kr/text/indent.go
  83. 0 86
      vendor/github.com/kr/text/wrap.go
  84. 56 0
      vendor/github.com/miekg/dns/acceptfunc.go
  85. 45 210
      vendor/github.com/miekg/dns/client.go
  86. 2 6
      vendor/github.com/miekg/dns/clientconfig.go
  87. 0 198
      vendor/github.com/miekg/dns/compress_generate.go
  88. 106 16
      vendor/github.com/miekg/dns/defaults.go
  89. 49 12
      vendor/github.com/miekg/dns/dns.go
  90. 30 40
      vendor/github.com/miekg/dns/dnssec.go
  91. 132 77
      vendor/github.com/miekg/dns/dnssec_keyscan.go
  92. 8 7
      vendor/github.com/miekg/dns/dnssec_privkey.go
  93. 52 55
      vendor/github.com/miekg/dns/doc.go
  94. 20 7
      vendor/github.com/miekg/dns/duplicate.go
  95. 10 24
      vendor/github.com/miekg/dns/duplicate_generate.go
  96. 68 39
      vendor/github.com/miekg/dns/edns.go
  97. 7 1
      vendor/github.com/miekg/dns/format.go
  98. 186 113
      vendor/github.com/miekg/dns/generate.go
  99. 5 8
      vendor/github.com/miekg/dns/labels.go
  100. 44 0
      vendor/github.com/miekg/dns/listen_go111.go

+ 40 - 132
Gopkg.lock

@@ -3,296 +3,204 @@
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:ad61071aaffb5343a2b2eaa9df42308409a233edea13bffc8c1837a6da208738"
   name = "github.com/abh/errorutil"
   name = "github.com/abh/errorutil"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
   name = "github.com/beorn7/perks"
   name = "github.com/beorn7/perks"
   packages = ["quantile"]
   packages = ["quantile"]
-  pruneopts = "NUT"
   revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
   revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
   name = "github.com/davecgh/go-spew"
   name = "github.com/davecgh/go-spew"
   packages = ["spew"]
   packages = ["spew"]
-  pruneopts = "NUT"
-  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
-  version = "v1.1.0"
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
   name = "github.com/fsnotify/fsnotify"
   name = "github.com/fsnotify/fsnotify"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
   revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
   revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
   version = "v1.4.7"
   version = "v1.4.7"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:ca7e30de06a78980af00a12b0a9d80736cea5c7f4186275c473a334ece008a84"
   name = "github.com/golang/geo"
   name = "github.com/golang/geo"
   packages = [
   packages = [
     "r1",
     "r1",
     "r2",
     "r2",
     "r3",
     "r3",
     "s1",
     "s1",
-    "s2",
+    "s2"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "e41ca803f92c4c1770133cfa5b4fc8249a7dbe82"
+  revision = "476085157cff9aaeef4d4f124649436542d4114a"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:15042ad3498153684d09f393bbaec6b216c8eec6d61f63dff711de7d64ed8861"
   name = "github.com/golang/protobuf"
   name = "github.com/golang/protobuf"
   packages = ["proto"]
   packages = ["proto"]
-  pruneopts = "NUT"
-  revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
-  version = "v1.1.0"
+  revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
+  version = "v1.3.1"
+
+[[projects]]
+  name = "github.com/google/uuid"
+  packages = ["."]
+  revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
+  version = "v1.1.1"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:0c5b1e3165c7a4c70c1e26c30c5483ee8350791849ace0c493bd058d5d801011"
   name = "github.com/hpcloud/tail"
   name = "github.com/hpcloud/tail"
   packages = [
   packages = [
     ".",
     ".",
     "ratelimiter",
     "ratelimiter",
     "util",
     "util",
     "watch",
     "watch",
-    "winfile",
+    "winfile"
   ]
   ]
-  pruneopts = "NUT"
   revision = "7d02b9cfe313d6d68d4a184d56d490b5a8ba4163"
   revision = "7d02b9cfe313d6d68d4a184d56d490b5a8ba4163"
   source = "https://github.com/abh/tail.git"
   source = "https://github.com/abh/tail.git"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
-  digest = "1:37efe318465073201023f1dda8e9e8fe32407c6a3d9fef2b2e75544e43750e40"
-  name = "github.com/influxdata/influxdb"
-  packages = [
-    "client/v2",
-    "models",
-    "pkg/escape",
-  ]
-  pruneopts = "NUT"
-  revision = "d977c0ac2494a59d72f41dc277771a3d297b8e98"
-
-[[projects]]
-  branch = "master"
-  digest = "1:7b21c7fc5551b46d1308b4ffa9e9e49b66c7a8b0ba88c0130474b0e7a20d859f"
-  name = "github.com/kr/pretty"
-  packages = ["."]
-  pruneopts = "NUT"
-  revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712"
-
-[[projects]]
-  digest = "1:c3a7836b5904db0f8b609595b619916a6831cb35b8b714aec39f96d00c6155d8"
-  name = "github.com/kr/text"
-  packages = ["."]
-  pruneopts = "NUT"
-  revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f"
-  version = "v0.1.0"
-
-[[projects]]
-  digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
   name = "github.com/matttproud/golang_protobuf_extensions"
   name = "github.com/matttproud/golang_protobuf_extensions"
   packages = ["pbutil"]
   packages = ["pbutil"]
-  pruneopts = "NUT"
   revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
   revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
   version = "v1.0.1"
   version = "v1.0.1"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
-  digest = "1:11eb12a259ff9792c596b53252c3955f264b8f0d57bfce6a1d65061dd11792eb"
   name = "github.com/miekg/dns"
   name = "github.com/miekg/dns"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "3e6e47bc11bc7f93f9e2f1c7bd6481ba4802808b"
+  revision = "73601d4aed9d844322611759d7f3619110b7c88e"
+  version = "v1.1.8"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:b07ac3d21c03b19da0bec9c83d5e9625d3d4c7d62df62d5c23e9b4f172d173ab"
   name = "github.com/oschwald/geoip2-golang"
   name = "github.com/oschwald/geoip2-golang"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "7118115686e16b77967cdbf55d1b944fe14ad312"
+  revision = "42d566f218c8e6131d26c31d07a294c4c7eecfd8"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:c503e149d838cd0852612a6211eb72225ae4888e0a23a26262512eef42370a82"
   name = "github.com/oschwald/maxminddb-golang"
   name = "github.com/oschwald/maxminddb-golang"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
   revision = "c5bec84d1963260297932a1b7a1753c8420717a7"
   revision = "c5bec84d1963260297932a1b7a1753c8420717a7"
   version = "v1.3.0"
   version = "v1.3.0"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:cce3a18fb0b96b5015cd8ca03a57d20a662679de03c4dc4b6ff5f17ea2050fa6"
   name = "github.com/pborman/uuid"
   name = "github.com/pborman/uuid"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
-  version = "v1.1"
+  revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
+  version = "v1.2"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
   name = "github.com/pmezard/go-difflib"
   name = "github.com/pmezard/go-difflib"
   packages = ["difflib"]
   packages = ["difflib"]
-  pruneopts = "NUT"
   revision = "792786c7400a136282c1664665ae0a8db921c6c2"
   revision = "792786c7400a136282c1664665ae0a8db921c6c2"
   version = "v1.0.0"
   version = "v1.0.0"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:43b2d599cf31e3998d0ecac54fc45c0c0c2a2c363b62aaad9b664f01b3a84ac7"
   name = "github.com/prometheus/client_golang"
   name = "github.com/prometheus/client_golang"
   packages = [
   packages = [
     "prometheus",
     "prometheus",
-    "prometheus/promhttp",
+    "prometheus/internal",
+    "prometheus/promhttp"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "c5b7fccd204277076155f10851dad72b76a49317"
-  version = "v0.8.0"
+  revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
+  version = "v0.9.2"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a"
   name = "github.com/prometheus/client_model"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
   packages = ["go"]
-  pruneopts = "NUT"
-  revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
+  revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
-  digest = "1:3bbebf77a04cda10bd07834d1686141608aca01c1aeb400d504d8aa026793b5a"
   name = "github.com/prometheus/common"
   name = "github.com/prometheus/common"
   packages = [
   packages = [
     "expfmt",
     "expfmt",
     "internal/bitbucket.org/ww/goautoneg",
     "internal/bitbucket.org/ww/goautoneg",
-    "model",
+    "model"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
+  revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
+  version = "v0.2.0"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:37e418257b05a9e9fabbf836df2d8f3613313e80a909da6b9597b759ebca61cd"
   name = "github.com/prometheus/procfs"
   name = "github.com/prometheus/procfs"
-  packages = [
-    ".",
-    "internal/util",
-    "nfs",
-    "xfs",
-  ]
-  pruneopts = "NUT"
-  revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
+  packages = ["."]
+  revision = "ea9eea63887261e4d8ed8315f4078e88d540c725"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:a7cb71e7cef4a320ae6d91424df8f991ed4161aede6dea7ba8d8f3af1b589a6c"
   name = "github.com/stretchr/testify"
   name = "github.com/stretchr/testify"
   packages = [
   packages = [
     "assert",
     "assert",
-    "require",
+    "require"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
+  revision = "34c6fa2dc70986bccbbffcc6130f6920a924b075"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:d5891c5bca9c62e5d394ca26491d2b710a1dc08cedeb0ca8f9ac4c3305120b02"
   name = "golang.org/x/crypto"
   name = "golang.org/x/crypto"
   packages = [
   packages = [
     "ed25519",
     "ed25519",
-    "ed25519/internal/edwards25519",
+    "ed25519/internal/edwards25519"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
+  revision = "38d8ce5564a5b71b2e3a00553993f1b9a7ae852f"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:809955aecc90b55dbed131c7a47d523f347b7dc64726732cfb5e437748729798"
   name = "golang.org/x/net"
   name = "golang.org/x/net"
   packages = [
   packages = [
     "bpf",
     "bpf",
     "internal/iana",
     "internal/iana",
     "internal/socket",
     "internal/socket",
     "ipv4",
     "ipv4",
-    "ipv6",
+    "ipv6"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "d0887baf81f4598189d4e12a37c6da86f0bba4d0"
+  revision = "eb5bcb51f2a31c7d5141d810b70815c05d9c9146"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
-  digest = "1:64107e3c8f52f341891a565d117bf263ed396fe0c16bad19634b25be25debfaa"
   name = "golang.org/x/sys"
   name = "golang.org/x/sys"
   packages = [
   packages = [
     "unix",
     "unix",
-    "windows",
+    "windows"
   ]
   ]
-  pruneopts = "NUT"
-  revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4"
+  revision = "4b34438f7a67ee5f45cc6132e2bad873a20324e9"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:9679d687b448ea889cec3abd07d1117b03afed3ae845ef1dce91e4070f75dee0"
   name = "gopkg.in/gcfg.v1"
   name = "gopkg.in/gcfg.v1"
   packages = [
   packages = [
     ".",
     ".",
     "scanner",
     "scanner",
     "token",
     "token",
-    "types",
+    "types"
   ]
   ]
-  pruneopts = "NUT"
   revision = "61b2c08bc8f6068f7c5ca684372f9a6cb1c45ebe"
   revision = "61b2c08bc8f6068f7c5ca684372f9a6cb1c45ebe"
   version = "v1.2.3"
   version = "v1.2.3"
 
 
 [[projects]]
 [[projects]]
-  branch = "v2.0"
-  digest = "1:e65976b7642a86b0375bed80613dffd753f8fac463703d2d57051512f8508e6a"
   name = "gopkg.in/natefinch/lumberjack.v2"
   name = "gopkg.in/natefinch/lumberjack.v2"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
-  revision = "aee4629129445bbdfb69aa565537dcfa16544311"
+  revision = "7d6a1875575e09256dc552b4c0e450dcd02bd10e"
+  version = "v2.0.0"
 
 
 [[projects]]
 [[projects]]
   branch = "v1"
   branch = "v1"
-  digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1"
   name = "gopkg.in/tomb.v1"
   name = "gopkg.in/tomb.v1"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
 
 
 [[projects]]
 [[projects]]
-  digest = "1:b233ad4ec87ac916e7bf5e678e98a2cb9e8b52f6de6ad3e11834fc7a71b8e3bf"
   name = "gopkg.in/warnings.v0"
   name = "gopkg.in/warnings.v0"
   packages = ["."]
   packages = ["."]
-  pruneopts = "NUT"
   revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
   revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
   version = "v0.1.2"
   version = "v0.1.2"
 
 
 [solve-meta]
 [solve-meta]
   analyzer-name = "dep"
   analyzer-name = "dep"
   analyzer-version = 1
   analyzer-version = 1
-  input-imports = [
-    "github.com/abh/errorutil",
-    "github.com/fsnotify/fsnotify",
-    "github.com/golang/geo/s2",
-    "github.com/hpcloud/tail",
-    "github.com/influxdata/influxdb/client/v2",
-    "github.com/kr/pretty",
-    "github.com/miekg/dns",
-    "github.com/oschwald/geoip2-golang",
-    "github.com/pborman/uuid",
-    "github.com/prometheus/client_golang/prometheus",
-    "github.com/prometheus/client_golang/prometheus/promhttp",
-    "github.com/stretchr/testify/assert",
-    "github.com/stretchr/testify/require",
-    "gopkg.in/gcfg.v1",
-    "gopkg.in/natefinch/lumberjack.v2",
-  ]
+  inputs-digest = "05c26536c9dbe29c5d47910fece572eab1118cf3f2c3b700802211e0e9a7afcd"
   solver-name = "gps-cdcl"
   solver-name = "gps-cdcl"
   solver-version = 1
   solver-version = 1

+ 0 - 8
Gopkg.toml

@@ -12,14 +12,6 @@
   source = "https://github.com/abh/tail.git"
   source = "https://github.com/abh/tail.git"
   name = "github.com/hpcloud/tail"
   name = "github.com/hpcloud/tail"
 
 
-[[constraint]]
-  branch = "master"
-  name = "github.com/influxdata/influxdb"
-
-[[constraint]]
-  branch = "master"
-  name = "github.com/kr/pretty"
-
 [[constraint]]
 [[constraint]]
   branch = "master"
   branch = "master"
   name = "github.com/miekg/dns"
   name = "github.com/miekg/dns"

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 
 
 # where to rsync builds
 # where to rsync builds
 DIST?=dist/publish
 DIST?=dist/publish
-DISTSUB=2018/07b
+DISTSUB=2019/04
 
 
 test: .PHONY
 test: .PHONY
 	go test -v $(shell go list ./... | grep -v /vendor/)
 	go test -v $(shell go list ./... | grep -v /vendor/)

+ 2 - 2
geodns-logs/process-stats.go

@@ -62,8 +62,8 @@ func main() {
 	)
 	)
 	prometheus.MustRegister(queries)
 	prometheus.MustRegister(queries)
 
 
-	buildInfo := prometheus.NewCounterVec(
-		prometheus.CounterOpts{
+	buildInfo := prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
 			Name: "geodns_logs_build_info",
 			Name: "geodns_logs_build_info",
 			Help: "GeoDNS logs build information (in labels)",
 			Help: "GeoDNS logs build information (in labels)",
 		},
 		},

+ 1 - 1
geodns.go

@@ -41,7 +41,7 @@ import (
 )
 )
 
 
 // VERSION is the current version of GeoDNS
 // VERSION is the current version of GeoDNS
-var VERSION string = "3.0.1"
+var VERSION string = "3.0.2"
 var buildTime string
 var buildTime string
 var gitVersion string
 var gitVersion string
 
 

+ 2 - 2
server/server.go

@@ -35,8 +35,8 @@ func NewServer(si *monitor.ServerInfo) *Server {
 	)
 	)
 	prometheus.MustRegister(queries)
 	prometheus.MustRegister(queries)
 
 
-	buildInfo := prometheus.NewCounterVec(
-		prometheus.CounterOpts{
+	buildInfo := prometheus.NewGaugeVec(
+		prometheus.GaugeOpts{
 			Name: "geodns_build_info",
 			Name: "geodns_build_info",
 			Help: "GeoDNS build information (in labels)",
 			Help: "GeoDNS build information (in labels)",
 		},
 		},

+ 1 - 1
vendor/github.com/davecgh/go-spew/LICENSE

@@ -2,7 +2,7 @@ ISC License
 
 
 Copyright (c) 2012-2016 Dave Collins <[email protected]>
 Copyright (c) 2012-2016 Dave Collins <[email protected]>
 
 
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
 purpose with or without fee is hereby granted, provided that the above
 purpose with or without fee is hereby granted, provided that the above
 copyright notice and this permission notice appear in all copies.
 copyright notice and this permission notice appear in all copies.
 
 

+ 90 - 97
vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -16,7 +16,9 @@
 // when the code is not running on Google App Engine, compiled by GopherJS, and
 // when the code is not running on Google App Engine, compiled by GopherJS, and
 // "-tags safe" is not added to the go build command line.  The "disableunsafe"
 // "-tags safe" is not added to the go build command line.  The "disableunsafe"
 // tag is deprecated and thus should not be used.
 // tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
 
 
 package spew
 package spew
 
 
@@ -34,80 +36,49 @@ const (
 	ptrSize = unsafe.Sizeof((*byte)(nil))
 	ptrSize = unsafe.Sizeof((*byte)(nil))
 )
 )
 
 
+type flag uintptr
+
 var (
 var (
-	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
-	// internal reflect.Value fields.  These values are valid before golang
-	// commit ecccf07e7f9d which changed the format.  The are also valid
-	// after commit 82f48826c6c7 which changed the format again to mirror
-	// the original format.  Code in the init function updates these offsets
-	// as necessary.
-	offsetPtr    = uintptr(ptrSize)
-	offsetScalar = uintptr(0)
-	offsetFlag   = uintptr(ptrSize * 2)
-
-	// flagKindWidth and flagKindShift indicate various bits that the
-	// reflect package uses internally to track kind information.
-	//
-	// flagRO indicates whether or not the value field of a reflect.Value is
-	// read-only.
-	//
-	// flagIndir indicates whether the value field of a reflect.Value is
-	// the actual data or a pointer to the data.
-	//
-	// These values are valid before golang commit 90a7c3c86944 which
-	// changed their positions.  Code in the init function updates these
-	// flags as necessary.
-	flagKindWidth = uintptr(5)
-	flagKindShift = uintptr(flagKindWidth - 1)
-	flagRO        = uintptr(1 << 0)
-	flagIndir     = uintptr(1 << 1)
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
 )
 )
 
 
-func init() {
-	// Older versions of reflect.Value stored small integers directly in the
-	// ptr field (which is named val in the older versions).  Versions
-	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
-	// scalar for this purpose which unfortunately came before the flag
-	// field, so the offset of the flag field is different for those
-	// versions.
-	//
-	// This code constructs a new reflect.Value from a known small integer
-	// and checks if the size of the reflect.Value struct indicates it has
-	// the scalar field. When it does, the offsets are updated accordingly.
-	vv := reflect.ValueOf(0xf00)
-	if unsafe.Sizeof(vv) == (ptrSize * 4) {
-		offsetScalar = ptrSize * 2
-		offsetFlag = ptrSize * 3
-	}
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
 
 
-	// Commit 90a7c3c86944 changed the flag positions such that the low
-	// order bits are the kind.  This code extracts the kind from the flags
-	// field and ensures it's the correct type.  When it's not, the flag
-	// order has been changed to the newer format, so the flags are updated
-	// accordingly.
-	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
-	upfv := *(*uintptr)(upf)
-	flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
-	if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
-		flagKindShift = 0
-		flagRO = 1 << 5
-		flagIndir = 1 << 6
-
-		// Commit adf9b30e5594 modified the flags to separate the
-		// flagRO flag into two bits which specifies whether or not the
-		// field is embedded.  This causes flagIndir to move over a bit
-		// and means that flagRO is the combination of either of the
-		// original flagRO bit and the new bit.
-		//
-		// This code detects the change by extracting what used to be
-		// the indirect bit to ensure it's set.  When it's not, the flag
-		// order has been changed to the newer format, so the flags are
-		// updated accordingly.
-		if upfv&flagIndir == 0 {
-			flagRO = 3 << 5
-			flagIndir = 1 << 7
-		}
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
 	}
 	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
 }
 }
 
 
 // unsafeReflectValue converts the passed reflect.Value into a one that bypasses
 // unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
 // This allows us to check for implementations of the Stringer and error
 // This allows us to check for implementations of the Stringer and error
 // interfaces to be used for pretty printing ordinarily unaddressable and
 // interfaces to be used for pretty printing ordinarily unaddressable and
 // inaccessible values such as unexported struct fields.
 // inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
-	indirects := 1
-	vt := v.Type()
-	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
-	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
-	if rvf&flagIndir != 0 {
-		vt = reflect.PtrTo(v.Type())
-		indirects++
-	} else if offsetScalar != 0 {
-		// The value is in the scalar field when it's not one of the
-		// reference types.
-		switch vt.Kind() {
-		case reflect.Uintptr:
-		case reflect.Chan:
-		case reflect.Func:
-		case reflect.Map:
-		case reflect.Ptr:
-		case reflect.UnsafePointer:
-		default:
-			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
-				offsetScalar)
-		}
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
 	}
 	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
 
 
-	pv := reflect.NewAt(vt, upv)
-	rv = pv
-	for i := 0; i < indirects; i++ {
-		rv = rv.Elem()
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
 	}
 	}
-	return rv
+	panic("reflect.Value read-only flag has changed semantics")
 }
 }

+ 1 - 1
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -16,7 +16,7 @@
 // when the code is running on Google App Engine, compiled by GopherJS, or
 // when the code is running on Google App Engine, compiled by GopherJS, or
 // "-tags safe" is added to the go build command line.  The "disableunsafe"
 // "-tags safe" is added to the go build command line.  The "disableunsafe"
 // tag is deprecated and thus should not be used.
 // tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
+// +build js appengine safe disableunsafe !go1.4
 
 
 package spew
 package spew
 
 

+ 1 - 1
vendor/github.com/davecgh/go-spew/spew/common.go

@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
 	w.Write(closeParenBytes)
 	w.Write(closeParenBytes)
 }
 }
 
 
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
 // prefix to Writer w.
 // prefix to Writer w.
 func printHexPtr(w io.Writer, p uintptr) {
 func printHexPtr(w io.Writer, p uintptr) {
 	// Null pointer.
 	// Null pointer.

+ 5 - 5
vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -35,16 +35,16 @@ var (
 
 
 	// cCharRE is a regular expression that matches a cgo char.
 	// cCharRE is a regular expression that matches a cgo char.
 	// It is used to detect character arrays to hexdump them.
 	// It is used to detect character arrays to hexdump them.
-	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
 
 
 	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
 	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
 	// char.  It is used to detect unsigned character arrays to hexdump
 	// char.  It is used to detect unsigned character arrays to hexdump
 	// them.
 	// them.
-	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
 
 
 	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
 	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
 	// It is used to detect uint8_t arrays to hexdump them.
 	// It is used to detect uint8_t arrays to hexdump them.
-	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
 )
 )
 
 
 // dumpState contains information about the state of a dump operation.
 // dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
 	// Display dereferenced value.
 	// Display dereferenced value.
 	d.w.Write(openParenBytes)
 	d.w.Write(openParenBytes)
 	switch {
 	switch {
-	case nilFound == true:
+	case nilFound:
 		d.w.Write(nilAngleBytes)
 		d.w.Write(nilAngleBytes)
 
 
-	case cycleFound == true:
+	case cycleFound:
 		d.w.Write(circularBytes)
 		d.w.Write(circularBytes)
 
 
 	default:
 	default:

+ 2 - 2
vendor/github.com/davecgh/go-spew/spew/format.go

@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
 
 
 	// Display dereferenced value.
 	// Display dereferenced value.
 	switch {
 	switch {
-	case nilFound == true:
+	case nilFound:
 		f.fs.Write(nilAngleBytes)
 		f.fs.Write(nilAngleBytes)
 
 
-	case cycleFound == true:
+	case cycleFound:
 		f.fs.Write(circularShortBytes)
 		f.fs.Write(circularShortBytes)
 
 
 	default:
 	default:

+ 9 - 4
vendor/github.com/golang/geo/r1/interval.go

@@ -59,7 +59,7 @@ func (i Interval) ContainsInterval(oi Interval) bool {
 	return i.Lo <= oi.Lo && oi.Hi <= i.Hi
 	return i.Lo <= oi.Lo && oi.Hi <= i.Hi
 }
 }
 
 
-// InteriorContains returns true iff the the interval strictly contains p.
+// InteriorContains returns true iff the interval strictly contains p.
 func (i Interval) InteriorContains(p float64) bool {
 func (i Interval) InteriorContains(p float64) bool {
 	return i.Lo < p && p < i.Hi
 	return i.Lo < p && p < i.Hi
 }
 }
@@ -138,9 +138,14 @@ func (i Interval) Union(other Interval) Interval {
 
 
 func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
 func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
 
 
-// epsilon is a small number that represents a reasonable level of noise between two
-// values that can be considered to be equal.
-const epsilon = 1e-14
+const (
+	// epsilon is a small number that represents a reasonable level of noise between two
+	// values that can be considered to be equal.
+	epsilon = 1e-15
+	// dblEpsilon is a smaller number for values that require more precision.
+	// This is the C++ DBL_EPSILON equivalent.
+	dblEpsilon = 2.220446049250313e-16
+)
 
 
 // ApproxEqual reports whether the interval can be transformed into the
 // ApproxEqual reports whether the interval can be transformed into the
 // given interval by moving each endpoint a small distance.
 // given interval by moving each endpoint a small distance.

+ 4 - 4
vendor/github.com/golang/geo/s1/angle.go

@@ -98,11 +98,11 @@ func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
 // Abs returns the absolute value of the angle.
 // Abs returns the absolute value of the angle.
 func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
 func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
 
 
-// Normalized returns an equivalent angle in [0, 2π).
+// Normalized returns an equivalent angle in (-2π, 2π].
 func (a Angle) Normalized() Angle {
 func (a Angle) Normalized() Angle {
-	rad := math.Mod(float64(a), 2*math.Pi)
-	if rad < 0 {
-		rad += 2 * math.Pi
+	rad := math.Remainder(float64(a), 2*math.Pi)
+	if rad <= -math.Pi {
+		rad = math.Pi
 	}
 	}
 	return Angle(rad)
 	return Angle(rad)
 }
 }

+ 1 - 1
vendor/github.com/golang/geo/s2/cellunion.go

@@ -405,7 +405,7 @@ func (cu *CellUnion) Contains(o CellUnion) bool {
 // Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
 // Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
 func (cu *CellUnion) Intersects(o CellUnion) bool {
 func (cu *CellUnion) Intersects(o CellUnion) bool {
 	for _, c := range *cu {
 	for _, c := range *cu {
-		if o.ContainsCellID(c) {
+		if o.IntersectsCellID(c) {
 			return true
 			return true
 		}
 		}
 	}
 	}

+ 39 - 6
vendor/github.com/golang/geo/s2/contains_point_query.go

@@ -45,7 +45,9 @@ const (
 // modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are
 // modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are
 // considered to contain their vertices).
 // considered to contain their vertices).
 //
 //
-// Note that if you need to do a large number of point containment
+// This type is not safe for concurrent use.
+//
+// However, note that if you need to do a large number of point containment
 // tests, it is more efficient to re-use the query rather than creating a new
 // tests, it is more efficient to re-use the query rather than creating a new
 // one each time.
 // one each time.
 type ContainsPointQuery struct {
 type ContainsPointQuery struct {
@@ -90,7 +92,7 @@ func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Poin
 	}
 	}
 
 
 	shape := q.index.Shape(clipped.shapeID)
 	shape := q.index.Shape(clipped.shapeID)
-	if !shape.HasInterior() {
+	if shape.Dimension() != 2 {
 		// Points and polylines can be ignored unless the vertex model is Closed.
 		// Points and polylines can be ignored unless the vertex model is Closed.
 		if q.model != VertexModelClosed {
 		if q.model != VertexModelClosed {
 			return false
 			return false
@@ -149,9 +151,40 @@ func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool {
 	return q.shapeContains(clipped, q.iter.Center(), p)
 	return q.shapeContains(clipped, q.iter.Center(), p)
 }
 }
 
 
+// shapeVisitorFunc is a type of function that can be called against shaped in an index.
+type shapeVisitorFunc func(shape Shape) bool
+
+// visitContainingShapes visits all shapes in the given index that contain the
+// given point p, terminating early if the given visitor function returns false,
+// in which case visitContainingShapes returns false. Each shape is
+// visited at most once.
+func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool {
+	// This function returns false only if the algorithm terminates early
+	// because the visitor function returned false.
+	if !q.iter.LocatePoint(p) {
+		return true
+	}
+
+	cell := q.iter.IndexCell()
+	for _, clipped := range cell.shapes {
+		if q.shapeContains(clipped, q.iter.Center(), p) &&
+			!f(q.index.Shape(clipped.shapeID)) {
+			return false
+		}
+	}
+	return true
+}
+
+// ContainingShapes returns a slice of all shapes that contain the given point.
+func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape {
+	var shapes []Shape
+	q.visitContainingShapes(p, func(shape Shape) bool {
+		shapes = append(shapes, shape)
+		return true
+	})
+	return shapes
+}
+
 // TODO(roberts): Remaining methods from C++
 // TODO(roberts): Remaining methods from C++
-// func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape
-// type shapeVisitorFunc func(shape Shape) bool
-// func (q *ContainsPointQuery) VisitContainingShapes(p Point, v shapeVisitorFunc) bool
 // type edgeVisitorFunc func(shape ShapeEdge) bool
 // type edgeVisitorFunc func(shape ShapeEdge) bool
-// func (q *ContainsPointQuery) VisitIncidentEdges(p Point, v edgeVisitorFunc) bool
+// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool

+ 239 - 0
vendor/github.com/golang/geo/s2/convex_hull_query.go

@@ -0,0 +1,239 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"sort"
+)
+
+// ConvexHullQuery builds the convex hull of any collection of points,
+// polylines, loops, and polygons. It returns a single convex loop.
+//
+// The convex hull is defined as the smallest convex region on the sphere that
+// contains all of your input geometry. Recall that a region is "convex" if
+// for every pair of points inside the region, the straight edge between them
+// is also inside the region. In our case, a "straight" edge is a geodesic,
+// i.e. the shortest path on the sphere between two points.
+//
+// Containment of input geometry is defined as follows:
+//
+//  - Each input loop and polygon is contained by the convex hull exactly
+//    (i.e., according to Polygon's Contains(Polygon)).
+//
+//  - Each input point is either contained by the convex hull or is a vertex
+//    of the convex hull. (Recall that S2Loops do not necessarily contain their
+//    vertices.)
+//
+//  - For each input polyline, the convex hull contains all of its vertices
+//    according to the rule for points above. (The definition of convexity
+//    then ensures that the convex hull also contains the polyline edges.)
+//
+// To use this type, call the various Add... methods to add your input geometry, and
+// then call ConvexHull. Note that ConvexHull does *not* reset the
+// state; you can continue adding geometry if desired and compute the convex
+// hull again. If you want to start from scratch, simply create a new
+// ConvexHullQuery value.
+//
+// This implement Andrew's monotone chain algorithm, which is a variant of the
+// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time
+// complexity is O(n log n), and the space required is O(n). In fact only the
+// call to "sort" takes O(n log n) time; the rest of the algorithm is linear.
+//
+// Demonstration of the algorithm and code:
+// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
+//
+// This type is not safe for concurrent use.
+type ConvexHullQuery struct {
+	bound  Rect
+	points []Point
+}
+
+// NewConvexHullQuery creates a new ConvexHullQuery.
+func NewConvexHullQuery() *ConvexHullQuery {
+	return &ConvexHullQuery{
+		bound: EmptyRect(),
+	}
+}
+
+// AddPoint adds the given point to the input geometry.
+func (q *ConvexHullQuery) AddPoint(p Point) {
+	q.bound = q.bound.AddPoint(LatLngFromPoint(p))
+	q.points = append(q.points, p)
+}
+
+// AddPolyline adds the given polyline to the input geometry.
+func (q *ConvexHullQuery) AddPolyline(p *Polyline) {
+	q.bound = q.bound.Union(p.RectBound())
+	q.points = append(q.points, (*p)...)
+}
+
+// AddLoop adds the given loop to the input geometry.
+func (q *ConvexHullQuery) AddLoop(l *Loop) {
+	q.bound = q.bound.Union(l.RectBound())
+	if l.isEmptyOrFull() {
+		return
+	}
+	q.points = append(q.points, l.vertices...)
+}
+
+// AddPolygon adds the given polygon to the input geometry.
+func (q *ConvexHullQuery) AddPolygon(p *Polygon) {
+	q.bound = q.bound.Union(p.RectBound())
+	for _, l := range p.loops {
+		// Only loops at depth 0 can contribute to the convex hull.
+		if l.depth == 0 {
+			q.AddLoop(l)
+		}
+	}
+}
+
+// CapBound returns a bounding cap for the input geometry provided.
+//
+// Note that this method does not clear the geometry; you can continue
+// adding to it and call this method again if desired.
+func (q *ConvexHullQuery) CapBound() Cap {
+	// We keep track of a rectangular bound rather than a spherical cap because
+	// it is easy to compute a tight bound for a union of rectangles, whereas it
+	// is quite difficult to compute a tight bound around a union of caps.
+	// Also, polygons and polylines implement CapBound() in terms of
+	// RectBound() for this same reason, so it is much better to keep track
+	// of a rectangular bound as we go along and convert it at the end.
+	//
+	// TODO(roberts): We could compute an optimal bound by implementing Welzl's
+	// algorithm. However we would still need to have special handling of loops
+	// and polygons, since if a loop spans more than 180 degrees in any
+	// direction (i.e., if it contains two antipodal points), then it is not
+	// enough just to bound its vertices. In this case the only convex bounding
+	// cap is FullCap(), and the only convex bounding loop is the full loop.
+	return q.bound.CapBound()
+}
+
+// ConvexHull returns a Loop representing the convex hull of the input geometry provided.
+//
+// If there is no geometry, this method returns an empty loop containing no
+// points.
+//
+// If the geometry spans more than half of the sphere, this method returns a
+// full loop containing the entire sphere.
+//
+// If the geometry contains 1 or 2 points, or a single edge, this method
+// returns a very small loop consisting of three vertices (which are a
+// superset of the input vertices).
+//
+// Note that this method does not clear the geometry; you can continue
+// adding to the query and call this method again.
+func (q *ConvexHullQuery) ConvexHull() *Loop {
+	c := q.CapBound()
+	if c.Height() >= 1 {
+		// The bounding cap is not convex. The current bounding cap
+		// implementation is not optimal, but nevertheless it is likely that the
+		// input geometry itself is not contained by any convex polygon. In any
+		// case, we need a convex bounding cap to proceed with the algorithm below
+		// (in order to construct a point "origin" that is definitely outside the
+		// convex hull).
+		return FullLoop()
+	}
+
+	// Remove duplicates. We need to do this before checking whether there are
+	// fewer than 3 points.
+	x := make(map[Point]bool)
+	r, w := 0, 0 // read/write indexes
+	for ; r < len(q.points); r++ {
+		if x[q.points[r]] {
+			continue
+		}
+		q.points[w] = q.points[r]
+		x[q.points[r]] = true
+		w++
+	}
+	q.points = q.points[:w]
+
+	// This code implements Andrew's monotone chain algorithm, which is a simple
+	// variant of the Graham scan. Rather than sorting by x-coordinate, instead
+	// we sort the points in CCW order around an origin O such that all points
+	// are guaranteed to be on one side of some geodesic through O. This
+	// ensures that as we scan through the points, each new point can only
+	// belong at the end of the chain (i.e., the chain is monotone in terms of
+	// the angle around O from the starting point).
+	origin := Point{c.Center().Ortho()}
+	sort.Slice(q.points, func(i, j int) bool {
+		return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise
+	})
+
+	// Special cases for fewer than 3 points.
+	switch len(q.points) {
+	case 0:
+		return EmptyLoop()
+	case 1:
+		return singlePointLoop(q.points[0])
+	case 2:
+		return singleEdgeLoop(q.points[0], q.points[1])
+	}
+
+	// Generate the lower and upper halves of the convex hull. Each half
+	// consists of the maximal subset of vertices such that the edge chain
+	// makes only left (CCW) turns.
+	lower := q.monotoneChain()
+
+	// reverse the points
+	for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 {
+		q.points[left], q.points[right] = q.points[right], q.points[left]
+	}
+	upper := q.monotoneChain()
+
+	// Remove the duplicate vertices and combine the chains.
+	lower = lower[:len(lower)-1]
+	upper = upper[:len(upper)-1]
+	lower = append(lower, upper...)
+
+	return LoopFromPoints(lower)
+}
+
+// monotoneChain iterates through the points, selecting the maximal subset of points
+// such that the edge chain makes only left (CCW) turns.
+func (q *ConvexHullQuery) monotoneChain() []Point {
+	var output []Point
+	for _, p := range q.points {
+		// Remove any points that would cause the chain to make a clockwise turn.
+		for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise {
+			output = output[:len(output)-1]
+		}
+		output = append(output, p)
+	}
+	return output
+}
+
+// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby
+// vertices. Note that ContainsPoint(p) may be false for the resulting loop.
+func singlePointLoop(p Point) *Loop {
+	const offset = 1e-15
+	d0 := p.Ortho()
+	d1 := p.Cross(d0)
+	vertices := []Point{
+		p,
+		Point{p.Add(d0.Mul(offset)).Normalize()},
+		Point{p.Add(d1.Mul(offset)).Normalize()},
+	}
+	return LoopFromPoints(vertices)
+}
+
+// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint.
+func singleEdgeLoop(a, b Point) *Loop {
+	vertices := []Point{a, b, Point{a.Add(b.Vector).Normalize()}}
+	loop := LoopFromPoints(vertices)
+	// The resulting loop may be clockwise, so invert it if necessary.
+	loop.Normalize()
+	return loop
+}

+ 13 - 11
vendor/github.com/golang/geo/s2/edge_crossings.go

@@ -29,7 +29,7 @@ const (
 	// radians. However, using a larger error tolerance makes the algorithm more
 	// radians. However, using a larger error tolerance makes the algorithm more
 	// efficient because it reduces the number of cases where exact arithmetic is
 	// efficient because it reduces the number of cases where exact arithmetic is
 	// needed.
 	// needed.
-	intersectionError = s1.Angle(8 * dblEpsilon)
+	intersectionError = s1.Angle(8 * dblError)
 
 
 	// intersectionMergeRadius is used to ensure that intersection points that
 	// intersectionMergeRadius is used to ensure that intersection points that
 	// are supposed to be coincident are merged back together into a single
 	// are supposed to be coincident are merged back together into a single
@@ -125,15 +125,17 @@ func VertexCrossing(a, b, c, d Point) bool {
 	// if OrderedCCW indicates that the edge AB is further CCW around the
 	// if OrderedCCW indicates that the edge AB is further CCW around the
 	// shared vertex O (either A or B) than the edge CD, starting from an
 	// shared vertex O (either A or B) than the edge CD, starting from an
 	// arbitrary fixed reference point.
 	// arbitrary fixed reference point.
+
+	// Optimization: if AB=CD or AB=DC, we can avoid most of the calculations.
 	switch {
 	switch {
-	case a == d:
-		return OrderedCCW(Point{a.Ortho()}, c, b, a)
-	case b == c:
-		return OrderedCCW(Point{b.Ortho()}, d, a, b)
 	case a == c:
 	case a == c:
-		return OrderedCCW(Point{a.Ortho()}, d, b, a)
+		return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a)
 	case b == d:
 	case b == d:
 		return OrderedCCW(Point{b.Ortho()}, c, a, b)
 		return OrderedCCW(Point{b.Ortho()}, c, a, b)
+	case a == d:
+		return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a)
+	case b == c:
+		return OrderedCCW(Point{b.Ortho()}, d, a, b)
 	}
 	}
 
 
 	return false
 	return false
@@ -251,14 +253,14 @@ func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound
 
 
 	// This calculation bounds the error from all sources: the computation of
 	// This calculation bounds the error from all sources: the computation of
 	// the normal, the subtraction of one endpoint, and the dot product itself.
 	// the normal, the subtraction of one endpoint, and the dot product itself.
-	// dblEpsilon appears because the input points are assumed to be
+	// dblError appears because the input points are assumed to be
 	// normalized in double precision.
 	// normalized in double precision.
 	//
 	//
 	// For reference, the bounds that went into this calculation are:
 	// For reference, the bounds that went into this calculation are:
-	// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblEpsilon) * epsilon
+	// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon
 	// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
 	// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
 	// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
 	// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
-	bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblEpsilon)*dist + 1.5*math.Abs(proj)) * epsilon
+	bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon
 	return proj, bound
 	return proj, bound
 }
 }
 
 
@@ -363,8 +365,8 @@ func intersectionExact(a0, a1, b0, b1 Point) Point {
 	xP := aNormP.Cross(bNormP)
 	xP := aNormP.Cross(bNormP)
 
 
 	// The final Normalize() call is done in double precision, which creates a
 	// The final Normalize() call is done in double precision, which creates a
-	// directional error of up to 2*dblEpsilon. (Precise conversion and Normalize()
-	// each contribute up to dblEpsilon of directional error.)
+	// directional error of up to 2*dblError. (Precise conversion and Normalize()
+	// each contribute up to dblError of directional error.)
 	x := xP.Vector()
 	x := xP.Vector()
 
 
 	if x == (r3.Vector{}) {
 	if x == (r3.Vector{}) {

+ 11 - 7
vendor/github.com/golang/geo/s2/edge_distances.go

@@ -34,7 +34,8 @@ func DistanceFromSegment(x, a, b Point) s1.Angle {
 }
 }
 
 
 // IsDistanceLess reports whether the distance from X to the edge AB is less
 // IsDistanceLess reports whether the distance from X to the edge AB is less
-// than limit. This method is faster than DistanceFromSegment(). If you want to
+// than limit. (For less than or equal to, specify limit.Successor()).
+// This method is faster than DistanceFromSegment(). If you want to
 // compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
 // compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
 // once and save the value, since this conversion is relatively expensive.
 // once and save the value, since this conversion is relatively expensive.
 func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
 func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
@@ -71,9 +72,9 @@ func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, boo
 	return maxDist, false
 	return maxDist, false
 }
 }
 
 
-// IsInteriorDistanceLess reports whether the minimum distance from X to the
-// edge AB is attained at an interior point of AB (i.e., not an endpoint), and
-// that distance is less than limit.
+// IsInteriorDistanceLess reports whether the minimum distance from X to the edge
+// AB is attained at an interior point of AB (i.e., not an endpoint), and that
+// distance is less than limit. (Specify limit.Successor() for less than or equal to).
 func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
 func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
 	_, less := UpdateMinInteriorDistance(x, a, b, limit)
 	_, less := UpdateMinInteriorDistance(x, a, b, limit)
 	return less
 	return less
@@ -193,7 +194,7 @@ func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
 	// This bound includes all source of error, assuming that the input points
 	// This bound includes all source of error, assuming that the input points
 	// are normalized. a and b are components of chord length that are
 	// are normalized. a and b are components of chord length that are
 	// perpendicular and parallel to a plane containing the edge respectively.
 	// perpendicular and parallel to a plane containing the edge respectively.
-	b := math.Min(1.0, 0.5*float64(dist)*float64(dist))
+	b := math.Min(1.0, 0.5*float64(dist))
 	a := math.Sqrt(b * (2 - b))
 	a := math.Sqrt(b * (2 - b))
 	return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
 	return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
 		(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
 		(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
@@ -262,8 +263,11 @@ func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.C
 	c2 := c.Norm2()
 	c2 := c.Norm2()
 	xDotC := x.Dot(c.Vector)
 	xDotC := x.Dot(c.Vector)
 	xDotC2 := xDotC * xDotC
 	xDotC2 := xDotC * xDotC
-	if !alwaysUpdate && xDotC2 >= c2*float64(minDist) {
-		// The closest point on the great circle AB is too far away.
+	if !alwaysUpdate && xDotC2 > c2*float64(minDist) {
+		// The closest point on the great circle AB is too far away.  We need to
+		// test this using ">" rather than ">=" because the actual minimum bound
+		// on the distance is (xDotC2 / c2), which can be rounded differently
+		// than the (more efficient) multiplicative test above.
 		return minDist, false
 		return minDist, false
 	}
 	}
 
 

+ 167 - 0
vendor/github.com/golang/geo/s2/edge_tessellator.go

@@ -0,0 +1,167 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/s1"
+)
+
+const (
+	// MinTessellationTolerance is the minimum supported tolerance (which
+	// corresponds to a distance less than 1 micrometer on the Earth's
+	// surface, but is still much larger than the expected projection and
+	// interpolation errors).
+	MinTessellationTolerance s1.Angle = 1e-13
+)
+
+// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into
+// a chain of spherical geodesic edges such that the maximum distance between
+// the original edge and the geodesic edge chain is at most the requested
+// tolerance. Similarly, it can convert a spherical geodesic edge into a chain
+// of edges in a given 2D projection such that the maximum distance between the
+// geodesic edge and the chain of projected edges is at most the requested tolerance.
+//
+// Method      | Input                  | Output
+// ------------|------------------------|-----------------------
+// Projected   | S2 geodesics           | Planar projected edges
+// Unprojected | Planar projected edges | S2 geodesics
+type EdgeTessellator struct {
+	projection   Projection
+	tolerance    s1.ChordAngle
+	wrapDistance r2.Point
+}
+
+// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance.
+func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator {
+	return &EdgeTessellator{
+		projection:   p,
+		tolerance:    s1.ChordAngleFromAngle(maxAngle(tolerance, MinTessellationTolerance)),
+		wrapDistance: p.WrapDistance(),
+	}
+}
+
+// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges
+// in the given projection and returns the corresponding vertices.
+//
+// If the given projection has one or more coordinate axes that wrap, then
+// every vertex's coordinates will be as close as possible to the previous
+// vertex's coordinates. Note that this may yield vertices whose
+// coordinates are outside the usual range. For example, tessellating the
+// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190).
+func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point {
+	pa := e.projection.Project(a)
+	if len(vertices) == 0 {
+		vertices = []r2.Point{pa}
+	} else {
+		pa = e.wrapDestination(vertices[len(vertices)-1], pa)
+	}
+
+	pb := e.wrapDestination(pa, e.projection.Project(b))
+	return e.appendProjected(pa, a, pb, b, vertices)
+}
+
+// appendProjected splits a geodesic edge AB as necessary and returns the
+// projected vertices appended to the given vertices.
+//
+// The maximum recursion depth is (math.Pi / MinTessellationTolerance) < 45
+func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []r2.Point) []r2.Point {
+	// It's impossible to robustly test whether a projected edge is close enough
+	// to a geodesic edge without knowing the details of the projection
+	// function, but the following heuristic works well for a wide range of map
+	// projections. The idea is simply to test whether the midpoint of the
+	// projected edge is close enough to the midpoint of the geodesic edge.
+	//
+	// This measures the distance between the two edges by treating them as
+	// parametric curves rather than geometric ones. The problem with
+	// measuring, say, the minimum distance from the projected midpoint to the
+	// geodesic edge is that this is a lower bound on the value we want, because
+	// the maximum separation between the two curves is generally not attained
+	// at the midpoint of the projected edge. The distance between the curve
+	// midpoints is at least an upper bound on the distance from either midpoint
+	// to opposite curve. It's not necessarily an upper bound on the maximum
+	// distance between the two curves, but it is a powerful requirement because
+	// it demands that the two curves stay parametrically close together. This
+	// turns out to be much more robust with respect for projections with
+	// singularities (e.g., the North and South poles in the rectangular and
+	// Mercator projections) because the curve parameterization speed changes
+	// rapidly near such singularities.
+	mid := Point{a.Add(b.Vector).Normalize()}
+	testMid := e.projection.Unproject(e.projection.Interpolate(0.5, pa, pb))
+
+	if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
+		return append(vertices, pb)
+	}
+
+	pmid := e.wrapDestination(pa, e.projection.Project(mid))
+	vertices = e.appendProjected(pa, a, pmid, mid, vertices)
+	return e.appendProjected(pmid, mid, pb, b, vertices)
+}
+
+// AppendUnprojected converts the planar edge AB in the given projection to a chain of
+// spherical geodesic edges and returns the vertices.
+//
+// Note that to construct a Loop, you must eliminate the duplicate first and last
+// vertex. Note also that if the given projection involves coordinate wrapping
+// (e.g. across the 180 degree meridian) then the first and last vertices may not
+// be exactly the same.
+func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point {
+	pb2 := e.wrapDestination(pa, pb)
+	a := e.projection.Unproject(pa)
+	b := e.projection.Unproject(pb)
+
+	if len(vertices) == 0 {
+		vertices = []Point{a}
+	}
+
+	// Note that coordinate wrapping can create a small amount of error. For
+	// example in the edge chain "0:-175, 0:179, 0:-177", the first edge is
+	// transformed into "0:-175, 0:-181" while the second is transformed into
+	// "0:179, 0:183". The two coordinate pairs for the middle vertex
+	// ("0:-181" and "0:179") may not yield exactly the same S2Point.
+	return e.appendUnprojected(pa, a, pb2, b, vertices)
+}
+
+// appendUnprojected interpolates a projected edge and appends the corresponding
+// points on the sphere.
+func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []Point) []Point {
+	pmid := e.projection.Interpolate(0.5, pa, pb)
+	mid := e.projection.Unproject(pmid)
+	testMid := Point{a.Add(b.Vector).Normalize()}
+
+	if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
+		return append(vertices, b)
+	}
+
+	vertices = e.appendUnprojected(pa, a, pmid, mid, vertices)
+	return e.appendUnprojected(pmid, mid, pb, b, vertices)
+}
+
+// wrapDestination returns the coordinates of the edge destination wrapped if
+// necessary to obtain the shortest edge.
+func (e *EdgeTessellator) wrapDestination(pa, pb r2.Point) r2.Point {
+	x := pb.X
+	y := pb.Y
+	// The code below ensures that pb is unmodified unless wrapping is required.
+	if e.wrapDistance.X > 0 && math.Abs(x-pa.X) > 0.5*e.wrapDistance.X {
+		x = pa.X + math.Remainder(x-pa.X, e.wrapDistance.X)
+	}
+	if e.wrapDistance.Y > 0 && math.Abs(y-pa.Y) > 0.5*e.wrapDistance.Y {
+		y = pa.Y + math.Remainder(y-pa.Y, e.wrapDistance.Y)
+	}
+	return r2.Point{x, y}
+}

+ 20 - 10
vendor/github.com/golang/geo/s2/loop.go

@@ -356,6 +356,21 @@ func (l *Loop) Intersects(o *Loop) bool {
 	return false
 	return false
 }
 }
 
 
+// Equal reports whether two loops have the same vertices in the same linear order
+// (i.e., cyclic rotations are not allowed).
+func (l *Loop) Equal(other *Loop) bool {
+	if len(l.vertices) != len(other.vertices) {
+		return false
+	}
+
+	for i, v := range l.vertices {
+		if v != other.Vertex(i) {
+			return false
+		}
+	}
+	return true
+}
+
 // BoundaryEqual reports whether the two loops have the same boundary. This is
 // BoundaryEqual reports whether the two loops have the same boundary. This is
 // true if and only if the loops have the same vertices in the same cyclic order
 // true if and only if the loops have the same vertices in the same cyclic order
 // (i.e., the vertices may be cyclically rotated). The empty and full loops are
 // (i.e., the vertices may be cyclically rotated). The empty and full loops are
@@ -451,11 +466,6 @@ func (l *Loop) ReferencePoint() ReferencePoint {
 	return OriginReferencePoint(l.originInside)
 	return OriginReferencePoint(l.originInside)
 }
 }
 
 
-// HasInterior returns true because all loops have an interior.
-func (l *Loop) HasInterior() bool {
-	return true
-}
-
 // NumEdges returns the number of edges in this shape.
 // NumEdges returns the number of edges in this shape.
 func (l *Loop) NumEdges() int {
 func (l *Loop) NumEdges() int {
 	if l.isEmptyOrFull() {
 	if l.isEmptyOrFull() {
@@ -493,8 +503,10 @@ func (l *Loop) ChainPosition(edgeID int) ChainPosition {
 	return ChainPosition{0, edgeID}
 	return ChainPosition{0, edgeID}
 }
 }
 
 
-// dimension returns the dimension of the geometry represented by this Loop.
-func (l *Loop) dimension() dimension { return polygonGeometry }
+// Dimension returns the dimension of the geometry represented by this Loop.
+func (l *Loop) Dimension() int { return 2 }
+
+func (l *Loop) privateInterface() {}
 
 
 // IsEmpty reports true if this is the special empty loop that contains no points.
 // IsEmpty reports true if this is the special empty loop that contains no points.
 func (l *Loop) IsEmpty() bool {
 func (l *Loop) IsEmpty() bool {
@@ -550,7 +562,7 @@ func (l *Loop) OrientedVertex(i int) Point {
 	if l.IsHole() {
 	if l.IsHole() {
 		j = len(l.vertices) - 1 - j
 		j = len(l.vertices) - 1 - j
 	}
 	}
-	return l.Vertex(i)
+	return l.Vertex(j)
 }
 }
 
 
 // NumVertices returns the number of vertices in this loop.
 // NumVertices returns the number of vertices in this loop.
@@ -1798,7 +1810,5 @@ func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool
 // DistanceToBoundary
 // DistanceToBoundary
 // Project
 // Project
 // ProjectToBoundary
 // ProjectToBoundary
-// Equal
-// BoundaryEqual
 // BoundaryApproxEqual
 // BoundaryApproxEqual
 // BoundaryNear
 // BoundaryNear

+ 41 - 0
vendor/github.com/golang/geo/s2/point_vector.go

@@ -0,0 +1,41 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// Shape interface enforcement
+var (
+	_ Shape = (*PointVector)(nil)
+)
+
+// PointVector is a Shape representing a set of Points. Each point
+// is represented as a degenerate point with the same starting and ending
+// vertices.
+//
+// This type is useful for adding a collection of points to an ShapeIndex.
+//
+// Its methods are on *PointVector due to implementation details of ShapeIndex.
+type PointVector []Point
+
+func (p *PointVector) NumEdges() int                     { return len(*p) }
+func (p *PointVector) Edge(i int) Edge                   { return Edge{(*p)[i], (*p)[i]} }
+func (p *PointVector) ReferencePoint() ReferencePoint    { return OriginReferencePoint(false) }
+func (p *PointVector) NumChains() int                    { return len(*p) }
+func (p *PointVector) Chain(i int) Chain                 { return Chain{i, 1} }
+func (p *PointVector) ChainEdge(i, j int) Edge           { return Edge{(*p)[i], (*p)[j]} }
+func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{e, 0} }
+func (p *PointVector) Dimension() int                    { return 0 }
+func (p *PointVector) IsEmpty() bool                     { return defaultShapeIsEmpty(p) }
+func (p *PointVector) IsFull() bool                      { return defaultShapeIsFull(p) }
+func (p *PointVector) privateInterface()                 {}

+ 158 - 56
vendor/github.com/golang/geo/s2/polygon.go

@@ -17,6 +17,7 @@ package s2
 import (
 import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+	"math"
 )
 )
 
 
 // Polygon represents a sequence of zero or more loops; recall that the
 // Polygon represents a sequence of zero or more loops; recall that the
@@ -97,61 +98,15 @@ func PolygonFromLoops(loops []*Loop) *Polygon {
 	p.loops = loops
 	p.loops = loops
 	p.initNested()
 	p.initNested()
 	return p
 	return p
-
 }
 }
 
 
-// TODO(roberts): Implement initOriented
-/*
-// PolygonFromOrientedLoops, like PolygonFromLoops, returns a Polygon from the
-// given set of loops. It expects loops to be oriented such that the polygon
+// PolygonFromOrientedLoops returns a Polygon from the given set of loops,
+// like PolygonFromLoops. It expects loops to be oriented such that the polygon
 // interior is on the left-hand side of all loops. This implies that shells
 // interior is on the left-hand side of all loops. This implies that shells
 // and holes should have opposite orientations in the input to this method.
 // and holes should have opposite orientations in the input to this method.
 // (During initialization, loops representing holes will automatically be
 // (During initialization, loops representing holes will automatically be
 // inverted.)
 // inverted.)
 func PolygonFromOrientedLoops(loops []*Loop) *Polygon {
 func PolygonFromOrientedLoops(loops []*Loop) *Polygon {
-	panic("PolygonFromOrientedLoops not yet implemented")
-	p := &Polygon{
-		loops: loops,
-	}
-	p.initOriented()
-	return p
-}
-*/
-
-// PolygonFromCell returns a Polygon from a single loop created from the given Cell.
-func PolygonFromCell(cell Cell) *Polygon {
-	return PolygonFromLoops([]*Loop{LoopFromCell(cell)})
-}
-
-// initNested takes the set of loops in this polygon and performs the nesting
-// computations to set the proper nesting and parent/child relationships.
-func (p *Polygon) initNested() {
-	if len(p.loops) == 1 {
-		p.initOneLoop()
-		return
-	}
-
-	lm := make(loopMap)
-
-	for _, l := range p.loops {
-		lm.insertLoop(l, nil)
-	}
-	// The loops have all been added to the loopMap for ordering. Clear the
-	// loops slice because we add all the loops in-order in initLoops.
-	p.loops = nil
-
-	// Reorder the loops in depth-first traversal order.
-	p.initLoops(lm)
-	p.initLoopProperties()
-}
-
-// initOriented takes the loops in this polygon and performs the nesting
-// computations. It expects the loops to be oriented such that the polygon
-// interior is on the left-hand side of all loops. This implies that shells
-// and holes should have opposite orientations in the input to this method.
-// (During initialization, loops representing holes will automatically be
-// inverted.)
-func (p *Polygon) initOriented() {
 	// Here is the algorithm:
 	// Here is the algorithm:
 	//
 	//
 	// 1. Remember which of the given loops contain OriginPoint.
 	// 1. Remember which of the given loops contain OriginPoint.
@@ -187,7 +142,157 @@ func (p *Polygon) initOriented() {
 	//    that because we normalized all the loops initially, this step is only
 	//    that because we normalized all the loops initially, this step is only
 	//    necessary if the polygon requires at least one non-normalized loop to
 	//    necessary if the polygon requires at least one non-normalized loop to
 	//    represent it.
 	//    represent it.
-	panic("initOriented not yet implemented")
+
+	containedOrigin := make(map[*Loop]bool)
+	for _, l := range loops {
+		containedOrigin[l] = l.ContainsOrigin()
+	}
+
+	for _, l := range loops {
+		angle := l.TurningAngle()
+		if math.Abs(angle) > l.turningAngleMaxError() {
+			// Normalize the loop.
+			if angle < 0 {
+				l.Invert()
+			}
+		} else {
+			// Ensure that the loop does not contain the origin.
+			if l.ContainsOrigin() {
+				l.Invert()
+			}
+		}
+	}
+
+	p := PolygonFromLoops(loops)
+
+	if p.NumLoops() > 0 {
+		originLoop := p.Loop(0)
+		polygonContainsOrigin := false
+		for _, l := range p.Loops() {
+			if l.ContainsOrigin() {
+				polygonContainsOrigin = !polygonContainsOrigin
+
+				originLoop = l
+			}
+		}
+		if containedOrigin[originLoop] != polygonContainsOrigin {
+			p.Invert()
+		}
+	}
+
+	return p
+}
+
+// Invert inverts the polygon (replaces it by its complement).
+func (p *Polygon) Invert() {
+	// Inverting any one loop will invert the polygon.  The best loop to invert
+	// is the one whose area is largest, since this yields the smallest area
+	// after inversion. The loop with the largest area is always at depth 0.
+	// The descendents of this loop all have their depth reduced by 1, while the
+	// former siblings of this loop all have their depth increased by 1.
+
+	// The empty and full polygons are handled specially.
+	if p.IsEmpty() {
+		*p = *FullPolygon()
+		return
+	}
+	if p.IsFull() {
+		*p = Polygon{}
+		return
+	}
+
+	// Find the loop whose area is largest (i.e., whose turning angle is
+	// smallest), minimizing calls to TurningAngle(). In particular, for
+	// polygons with a single shell at level 0 there is no need to call
+	// TurningAngle() at all. (This method is relatively expensive.)
+	best := 0
+	const none = 10.0 // Flag that means "not computed yet"
+	bestAngle := none
+	for i := 1; i < p.NumLoops(); i++ {
+		if p.Loop(i).depth != 0 {
+			continue
+		}
+		// We defer computing the turning angle of loop 0 until we discover
+		// that the polygon has another top-level shell.
+		if bestAngle == none {
+			bestAngle = p.Loop(best).TurningAngle()
+		}
+		angle := p.Loop(i).TurningAngle()
+		// We break ties deterministically in order to avoid having the output
+		// depend on the input order of the loops.
+		if angle < bestAngle || (angle == bestAngle && compareLoops(p.Loop(i), p.Loop(best)) < 0) {
+			best = i
+			bestAngle = angle
+		}
+	}
+	// Build the new loops vector, starting with the inverted loop.
+	p.Loop(best).Invert()
+	newLoops := make([]*Loop, 0, p.NumLoops())
+	// Add the former siblings of this loop as descendants.
+	lastBest := p.LastDescendant(best)
+	newLoops = append(newLoops, p.Loop(best))
+	for i, l := range p.Loops() {
+		if i < best || i > lastBest {
+			l.depth++
+			newLoops = append(newLoops, l)
+		}
+	}
+	// Add the former children of this loop as siblings.
+	for i, l := range p.Loops() {
+		if i > best && i <= lastBest {
+			l.depth--
+			newLoops = append(newLoops, l)
+		}
+	}
+	p.loops = newLoops
+	p.initLoopProperties()
+}
+
+// Defines a total ordering on Loops that does not depend on the cyclic
+// order of loop vertices. This function is used to choose which loop to
+// invert in the case where several loops have exactly the same area.
+func compareLoops(a, b *Loop) int {
+	if na, nb := a.NumVertices(), b.NumVertices(); na != nb {
+		return na - nb
+	}
+	ai, aDir := a.CanonicalFirstVertex()
+	bi, bDir := b.CanonicalFirstVertex()
+	if aDir != bDir {
+		return aDir - bDir
+	}
+	for n := a.NumVertices() - 1; n >= 0; n, ai, bi = n-1, ai+aDir, bi+bDir {
+		if cmp := a.Vertex(ai).Cmp(b.Vertex(bi).Vector); cmp != 0 {
+			return cmp
+		}
+	}
+	return 0
+}
+
+// PolygonFromCell returns a Polygon from a single loop created from the given Cell.
+func PolygonFromCell(cell Cell) *Polygon {
+	return PolygonFromLoops([]*Loop{LoopFromCell(cell)})
+}
+
+// initNested takes the set of loops in this polygon and performs the nesting
+// computations to set the proper nesting and parent/child relationships.
+func (p *Polygon) initNested() {
+	if len(p.loops) == 1 {
+		p.initOneLoop()
+		return
+	}
+
+	lm := make(loopMap)
+
+	for _, l := range p.loops {
+		lm.insertLoop(l, nil)
+	}
+	// The loops have all been added to the loopMap for ordering. Clear the
+	// loops slice because we add all the loops in-order in initLoops.
+	p.loops = nil
+
+	// Reorder the loops in depth-first traversal order.
+	p.initLoops(lm)
+	p.initLoopProperties()
 }
 }
 
 
 // loopMap is a map of a loop to its immediate children with respect to nesting.
 // loopMap is a map of a loop to its immediate children with respect to nesting.
@@ -646,11 +751,6 @@ func (p *Polygon) Edge(e int) Edge {
 	return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)}
 	return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)}
 }
 }
 
 
-// HasInterior reports whether this Polygon has an interior.
-func (p *Polygon) HasInterior() bool {
-	return p.dimension() == polygonGeometry
-}
-
 // ReferencePoint returns the reference point for this polygon.
 // ReferencePoint returns the reference point for this polygon.
 func (p *Polygon) ReferencePoint() ReferencePoint {
 func (p *Polygon) ReferencePoint() ReferencePoint {
 	containsOrigin := false
 	containsOrigin := false
@@ -711,8 +811,10 @@ func (p *Polygon) ChainPosition(edgeID int) ChainPosition {
 	return ChainPosition{i, edgeID}
 	return ChainPosition{i, edgeID}
 }
 }
 
 
-// dimension returns the dimension of the geometry represented by this Polygon.
-func (p *Polygon) dimension() dimension { return polygonGeometry }
+// Dimension returns the dimension of the geometry represented by this Polygon.
+func (p *Polygon) Dimension() int { return 2 }
+
+func (p *Polygon) privateInterface() {}
 
 
 // Contains reports whether this polygon contains the other polygon.
 // Contains reports whether this polygon contains the other polygon.
 // Specifically, it reports whether all the points in the other polygon
 // Specifically, it reports whether all the points in the other polygon

+ 83 - 11
vendor/github.com/golang/geo/s2/polyline.go

@@ -164,11 +164,6 @@ func (p *Polyline) Edge(i int) Edge {
 	return Edge{(*p)[i], (*p)[i+1]}
 	return Edge{(*p)[i], (*p)[i+1]}
 }
 }
 
 
-// HasInterior returns false as Polylines are not closed.
-func (p *Polyline) HasInterior() bool {
-	return false
-}
-
 // ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
 // ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
 func (p *Polyline) ReferencePoint() ReferencePoint {
 func (p *Polyline) ReferencePoint() ReferencePoint {
 	return OriginReferencePoint(false)
 	return OriginReferencePoint(false)
@@ -194,8 +189,8 @@ func (p *Polyline) ChainPosition(edgeID int) ChainPosition {
 	return ChainPosition{0, edgeID}
 	return ChainPosition{0, edgeID}
 }
 }
 
 
-// dimension returns the dimension of the geometry represented by this Polyline.
-func (p *Polyline) dimension() dimension { return polylineGeometry }
+// Dimension returns the dimension of the geometry represented by this Polyline.
+func (p *Polyline) Dimension() int { return 1 }
 
 
 // IsEmpty reports whether this shape contains no points.
 // IsEmpty reports whether this shape contains no points.
 func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
 func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
@@ -203,6 +198,8 @@ func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
 // IsFull reports whether this shape contains all points on the sphere.
 // IsFull reports whether this shape contains all points on the sphere.
 func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
 func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
 
 
+func (p *Polyline) privateInterface() {}
+
 // findEndVertex reports the maximal end index such that the line segment between
 // findEndVertex reports the maximal end index such that the line segment between
 // the start index and this one such that the line segment between these two
 // the start index and this one such that the line segment between these two
 // vertices passes within the given tolerance of all interior vertices, in order.
 // vertices passes within the given tolerance of all interior vertices, in order.
@@ -384,13 +381,88 @@ func (p *Polyline) decode(d decoder) {
 	}
 	}
 }
 }
 
 
+// Project returns a point on the polyline that is closest to the given point,
+// and the index of the next vertex after the projected point. The
+// value of that index is always in the range [1, len(polyline)].
+// The polyline must not be empty.
+func (p *Polyline) Project(point Point) (Point, int) {
+	if len(*p) == 1 {
+		// If there is only one vertex, it is always closest to any given point.
+		return (*p)[0], 1
+	}
+
+	// Initial value larger than any possible distance on the unit sphere.
+	minDist := 10 * s1.Radian
+	minIndex := -1
+
+	// Find the line segment in the polyline that is closest to the point given.
+	for i := 1; i < len(*p); i++ {
+		if dist := DistanceFromSegment(point, (*p)[i-1], (*p)[i]); dist < minDist {
+			minDist = dist
+			minIndex = i
+		}
+	}
+
+	// Compute the point on the segment found that is closest to the point given.
+	closest := Project(point, (*p)[minIndex-1], (*p)[minIndex])
+	if closest == (*p)[minIndex] {
+		minIndex++
+	}
+
+	return closest, minIndex
+}
+
+// IsOnRight reports whether the point given is on the right hand side of the
+// polyline, using a naive definition of "right-hand-sideness" where the point
+// is on the RHS of the polyline iff the point is on the RHS of the line segment
+// in the polyline which it is closest to.
+// The polyline must have at least 2 vertices.
+func (p *Polyline) IsOnRight(point Point) bool {
+	// If the closest point C is an interior vertex of the polyline, let B and D
+	// be the previous and next vertices. The given point P is on the right of
+	// the polyline (locally) if B, P, D are ordered CCW around vertex C.
+	closest, next := p.Project(point)
+	if closest == (*p)[next-1] && next > 1 && next < len(*p) {
+		if point == (*p)[next-1] {
+			// Polyline vertices are not on the RHS.
+			return false
+		}
+		return OrderedCCW((*p)[next-2], point, (*p)[next], (*p)[next-1])
+	}
+	// Otherwise, the closest point C is incident to exactly one polyline edge.
+	// We test the point P against that edge.
+	if next == len(*p) {
+		next--
+	}
+	return Sign(point, (*p)[next], (*p)[next-1])
+}
+
+// Validate checks whether this is a valid polyline or not.
+func (p *Polyline) Validate() error {
+	// All vertices must be unit length.
+	for i, pt := range *p {
+		if !pt.IsUnit() {
+			return fmt.Errorf("vertex %d is not unit length", i)
+		}
+	}
+
+	// Adjacent vertices must not be identical or antipodal.
+	for i := 1; i < len(*p); i++ {
+		prev, cur := (*p)[i-1], (*p)[i]
+		if prev == cur {
+			return fmt.Errorf("vertices %d and %d are identical", i-1, i)
+		}
+		if prev == (Point{cur.Mul(-1)}) {
+			return fmt.Errorf("vertices %d and %d are antipodal", i-1, i)
+		}
+	}
+
+	return nil
+}
+
 // TODO(roberts): Differences from C++.
 // TODO(roberts): Differences from C++.
-// IsValid
 // Suffix
 // Suffix
 // Interpolate/UnInterpolate
 // Interpolate/UnInterpolate
-// Project
-// IsPointOnRight
 // Intersects(Polyline)
 // Intersects(Polyline)
-// Reverse
 // ApproxEqual
 // ApproxEqual
 // NearlyCoversPolyline
 // NearlyCoversPolyline

+ 119 - 13
vendor/github.com/golang/geo/s2/predicates.go

@@ -28,14 +28,21 @@ import (
 	"math/big"
 	"math/big"
 
 
 	"github.com/golang/geo/r3"
 	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
 )
 )
 
 
 const (
 const (
+	// If any other machine architectures need to be suppported, these next three
+	// values will need to be updated.
+
 	// epsilon is a small number that represents a reasonable level of noise between two
 	// epsilon is a small number that represents a reasonable level of noise between two
 	// values that can be considered to be equal.
 	// values that can be considered to be equal.
 	epsilon = 1e-15
 	epsilon = 1e-15
 	// dblEpsilon is a smaller number for values that require more precision.
 	// dblEpsilon is a smaller number for values that require more precision.
+	// This is the C++ DBL_EPSILON equivalent.
 	dblEpsilon = 2.220446049250313e-16
 	dblEpsilon = 2.220446049250313e-16
+	// dblError is the C++ value for S2 rounding_epsilon().
+	dblError = 1.110223024625156e-16
 
 
 	// maxDeterminantError is the maximum error in computing (AxB).C where all vectors
 	// maxDeterminantError is the maximum error in computing (AxB).C where all vectors
 	// are unit length. Using standard inequalities, it can be shown that
 	// are unit length. Using standard inequalities, it can be shown that
@@ -76,6 +83,9 @@ const (
 	CounterClockwise           = 1
 	CounterClockwise           = 1
 )
 )
 
 
+// newBigFloat constructs a new big.Float with maximum precision.
+func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) }
+
 // Sign returns true if the points A, B, C are strictly counterclockwise,
 // Sign returns true if the points A, B, C are strictly counterclockwise,
 // and returns false if the points are clockwise or collinear (i.e. if they are all
 // and returns false if the points are clockwise or collinear (i.e. if they are all
 // contained on some great circle).
 // contained on some great circle).
@@ -362,7 +372,7 @@ func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
 		return Direction(detSign)
 		return Direction(detSign)
 	}
 	}
 
 
-	detSign = new(big.Float).Sub(new(big.Float).Mul(c.X, a.Y), new(big.Float).Mul(c.Y, a.X)).Sign() // db.Z
+	detSign = newBigFloat().Sub(newBigFloat().Mul(c.X, a.Y), newBigFloat().Mul(c.Y, a.X)).Sign() // db.Z
 	if detSign != 0 {
 	if detSign != 0 {
 		return Direction(detSign)
 		return Direction(detSign)
 	}
 	}
@@ -375,7 +385,7 @@ func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
 		return Direction(detSign)
 		return Direction(detSign)
 	}
 	}
 
 
-	detSign = new(big.Float).Sub(new(big.Float).Mul(c.Z, a.X), new(big.Float).Mul(c.X, a.Z)).Sign() // db.Y
+	detSign = newBigFloat().Sub(newBigFloat().Mul(c.Z, a.X), newBigFloat().Mul(c.X, a.Z)).Sign() // db.Y
 	if detSign != 0 {
 	if detSign != 0 {
 		return Direction(detSign)
 		return Direction(detSign)
 	}
 	}
@@ -388,7 +398,7 @@ func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
 	// the previous tests guarantee that C == (0, 0, 0).
 	// the previous tests guarantee that C == (0, 0, 0).
 	// (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
 	// (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
 
 
-	detSign = new(big.Float).Sub(new(big.Float).Mul(a.X, b.Y), new(big.Float).Mul(a.Y, b.X)).Sign() // dc.Z
+	detSign = newBigFloat().Sub(newBigFloat().Mul(a.X, b.Y), newBigFloat().Mul(a.Y, b.X)).Sign() // dc.Z
 	if detSign != 0 {
 	if detSign != 0 {
 		return Direction(detSign)
 		return Direction(detSign)
 	}
 	}
@@ -462,7 +472,7 @@ func CompareDistances(x, a, b Point) int {
 // maximum error amount in the result. This requires X and Y be normalized.
 // maximum error amount in the result. This requires X and Y be normalized.
 func cosDistance(x, y Point) (cos, err float64) {
 func cosDistance(x, y Point) (cos, err float64) {
 	cos = x.Dot(y.Vector)
 	cos = x.Dot(y.Vector)
-	return cos, 9.5*dblEpsilon*math.Abs(cos) + 1.5*dblEpsilon
+	return cos, 9.5*dblError*math.Abs(cos) + 1.5*dblError
 }
 }
 
 
 // sin2Distance returns sin**2(XY), where XY is the angle between X and Y,
 // sin2Distance returns sin**2(XY), where XY is the angle between X and Y,
@@ -470,13 +480,13 @@ func cosDistance(x, y Point) (cos, err float64) {
 func sin2Distance(x, y Point) (sin2, err float64) {
 func sin2Distance(x, y Point) (sin2, err float64) {
 	// The (x-y).Cross(x+y) trick eliminates almost all of error due to x
 	// The (x-y).Cross(x+y) trick eliminates almost all of error due to x
 	// and y being not quite unit length. This method is extremely accurate
 	// and y being not quite unit length. This method is extremely accurate
-	// for small distances; the *relative* error in the result is O(dblEpsilon) for
-	// distances as small as dblEpsilon.
+	// for small distances; the *relative* error in the result is O(dblError) for
+	// distances as small as dblError.
 	n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
 	n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
 	sin2 = 0.25 * n.Norm2()
 	sin2 = 0.25 * n.Norm2()
-	err = ((21+4*math.Sqrt(3))*dblEpsilon*sin2 +
-		32*math.Sqrt(3)*dblEpsilon*dblEpsilon*math.Sqrt(sin2) +
-		768*dblEpsilon*dblEpsilon*dblEpsilon*dblEpsilon)
+	err = ((21+4*math.Sqrt(3))*dblError*sin2 +
+		32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) +
+		768*dblError*dblError*dblError*dblError)
 	return sin2, err
 	return sin2, err
 }
 }
 
 
@@ -533,9 +543,9 @@ func exactCompareDistances(x, a, b r3.PreciseVector) int {
 		}
 		}
 		return 1
 		return 1
 	}
 	}
-	cosAX2 := new(big.Float).Mul(cosAX, cosAX)
-	cosBX2 := new(big.Float).Mul(cosBX, cosBX)
-	cmp := new(big.Float).Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2()))
+	cosAX2 := newBigFloat().Mul(cosAX, cosAX)
+	cosBX2 := newBigFloat().Mul(cosBX, cosBX)
+	cmp := newBigFloat().Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2()))
 	return aSign * cmp.Sign()
 	return aSign * cmp.Sign()
 }
 }
 
 
@@ -570,8 +580,104 @@ func symbolicCompareDistances(x, a, b Point) int {
 	}
 	}
 }
 }
 
 
+var (
+	// ca45Degrees is a predefined ChordAngle representing (approximately) 45 degrees.
+	ca45Degrees = s1.ChordAngleFromSquaredLength(2 - math.Sqrt2)
+)
+
+// CompareDistance returns -1, 0, or +1 according to whether the distance XY is
+// respectively less than, equal to, or greater than the provided chord angle. Distances are measured
+// with respect to the positions of all points as though they are projected to lie
+// exactly on the surface of the unit sphere.
+func CompareDistance(x, y Point, r s1.ChordAngle) int {
+	// As with CompareDistances, we start by comparing dot products because
+	// the sin^2 method is only valid when the distance XY and the limit "r" are
+	// both less than 90 degrees.
+	sign := triageCompareCosDistance(x, y, float64(r))
+	if sign != 0 {
+		return sign
+	}
+
+	// Unlike with CompareDistances, it's not worth using the sin^2 method
+	// when the distance limit is near 180 degrees because the ChordAngle
+	// representation itself has has a rounding error of up to 2e-8 radians for
+	// distances near 180 degrees.
+	if r < ca45Degrees {
+		sign = triageCompareSin2Distance(x, y, float64(r))
+		if sign != 0 {
+			return sign
+		}
+	}
+	return exactCompareDistance(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(y.Vector), big.NewFloat(float64(r)).SetPrec(big.MaxPrec))
+}
+
+// triageCompareCosDistance returns -1, 0, or +1 according to whether the distance XY is
+// less than, equal to, or greater than r2 respectively using cos distance.
+func triageCompareCosDistance(x, y Point, r2 float64) int {
+	cosXY, cosXYError := cosDistance(x, y)
+	cosR := 1.0 - 0.5*r2
+	cosRError := 2.0 * dblError * cosR
+	diff := cosXY - cosR
+	err := cosXYError + cosRError
+	if diff > err {
+		return -1
+	}
+	if diff < -err {
+		return 1
+	}
+	return 0
+}
+
+// triageCompareSin2Distance returns -1, 0, or +1 according to whether the distance XY is
+// less than, equal to, or greater than r2 respectively using sin^2 distance.
+func triageCompareSin2Distance(x, y Point, r2 float64) int {
+	// Only valid for distance limits < 90 degrees.
+	sin2XY, sin2XYError := sin2Distance(x, y)
+	sin2R := r2 * (1.0 - 0.25*r2)
+	sin2RError := 3.0 * dblError * sin2R
+	diff := sin2XY - sin2R
+	err := sin2XYError + sin2RError
+	if diff > err {
+		return 1
+	}
+	if diff < -err {
+		return -1
+	}
+	return 0
+}
+
+var (
+	bigOne  = big.NewFloat(1.0).SetPrec(big.MaxPrec)
+	bigHalf = big.NewFloat(0.5).SetPrec(big.MaxPrec)
+)
+
+// exactCompareDistance returns -1, 0, or +1 after comparing using PreciseVectors.
+func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int {
+	// This code produces the same result as though all points were reprojected
+	// to lie exactly on the surface of the unit sphere.  It is based on
+	// comparing the cosine of the angle XY (when both points are projected to
+	// lie exactly on the sphere) to the given threshold.
+	cosXY := x.Dot(y)
+	cosR := newBigFloat().Sub(bigOne, newBigFloat().Mul(bigHalf, r2))
+
+	// If the two values have different signs, we need to handle that case now
+	// before squaring them below.
+	xySign := cosXY.Sign()
+	rSign := cosR.Sign()
+	if xySign != rSign {
+		if xySign > rSign {
+			return -1
+		}
+		return 1 // If cos(XY) > cos(r), then XY < r.
+	}
+	cmp := newBigFloat().Sub(
+		newBigFloat().Mul(
+			newBigFloat().Mul(cosR, cosR), newBigFloat().Mul(x.Norm2(), y.Norm2())),
+		newBigFloat().Mul(cosXY, cosXY))
+	return xySign * cmp.Sign()
+}
+
 // TODO(roberts): Differences from C++
 // TODO(roberts): Differences from C++
-// CompareDistance
 // CompareEdgeDistance
 // CompareEdgeDistance
 // CompareEdgeDirections
 // CompareEdgeDirections
 // EdgeCircumcenterSign
 // EdgeCircumcenterSign

+ 2 - 2
vendor/github.com/golang/geo/s2/projections.go

@@ -111,8 +111,8 @@ func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point {
 // ToLatLng returns the LatLng projected from the given R2 Point.
 // ToLatLng returns the LatLng projected from the given R2 Point.
 func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng {
 func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng {
 	return LatLng{
 	return LatLng{
-		Lat: s1.Angle(p.fromRadians * pt.Y),
-		Lng: s1.Angle(p.fromRadians * math.Remainder(pt.X, p.xWrap)),
+		Lat: s1.Angle(p.toRadians * pt.Y),
+		Lng: s1.Angle(p.toRadians * math.Remainder(pt.X, p.xWrap)),
 	}
 	}
 }
 }
 
 

+ 47 - 3
vendor/github.com/golang/geo/s2/regioncoverer.go

@@ -32,7 +32,7 @@ import (
 //
 //
 // For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used.
 // For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used.
 // This effectively allows the branching factor of the S2 CellID hierarchy to be increased.
 // This effectively allows the branching factor of the S2 CellID hierarchy to be increased.
-// Currently the only parameter values allowed are 0/1, 2, or 3, corresponding to
+// Currently the only parameter values allowed are 1, 2, or 3, corresponding to
 // branching factors of 4, 16, and 64 respectively.
 // branching factors of 4, 16, and 64 respectively.
 //
 //
 // Note the following:
 // Note the following:
@@ -293,7 +293,7 @@ func (c *coverer) coveringInternal(region Region) {
 		// For exterior covering we cannot do this, because result has to cover the
 		// For exterior covering we cannot do this, because result has to cover the
 		// whole region, so all children have to be used.
 		// whole region, so all children have to be used.
 		// candidate.numChildren == 1 case takes care of the situation when we
 		// candidate.numChildren == 1 case takes care of the situation when we
-		// already have more then MaxCells in result (minLevel is too high).
+		// already have more than MaxCells in result (minLevel is too high).
 		// Subdividing of the candidate with one child does no harm in this case.
 		// Subdividing of the candidate with one child does no harm in this case.
 		if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells {
 		if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells {
 			for _, child := range cand.children {
 			for _, child := range cand.children {
@@ -430,4 +430,48 @@ func (c *coverer) normalizeCovering(covering *CellUnion) {
 	}
 	}
 }
 }
 
 
-// BUG(akashagrawal): The differences from the C++ version FloodFill, SimpleCovering
+// SimpleRegionCovering returns a set of cells at the given level that cover
+// the connected region and a starting point on the boundary or inside the
+// region. The cells are returned in arbitrary order.
+//
+// Note that this method is not faster than the regular Covering
+// method for most region types, such as Cap or Polygon, and in fact it
+// can be much slower when the output consists of a large number of cells.
+// Currently it can be faster at generating coverings of long narrow regions
+// such as polylines, but this may change in the future.
+func SimpleRegionCovering(region Region, start Point, level int) []CellID {
+	return FloodFillRegionCovering(region, cellIDFromPoint(start).Parent(level))
+}
+
+// FloodFillRegionCovering returns all edge-connected cells at the same level as
+// the given CellID that intersect the given region, in arbitrary order.
+func FloodFillRegionCovering(region Region, start CellID) []CellID {
+	var output []CellID
+	all := map[CellID]bool{
+		start: true,
+	}
+	frontier := []CellID{start}
+	for len(frontier) > 0 {
+		id := frontier[len(frontier)-1]
+		frontier = frontier[:len(frontier)-1]
+		if !region.IntersectsCell(CellFromCellID(id)) {
+			continue
+		}
+		output = append(output, id)
+		for _, nbr := range id.EdgeNeighbors() {
+			if !all[nbr] {
+				all[nbr] = true
+				frontier = append(frontier, nbr)
+			}
+		}
+	}
+
+	return output
+}
+
+// TODO(roberts): The differences from the C++ version
+// finish up FastCovering to match C++
+// IsCanonical
+// CanonicalizeCovering
+// containsAllChildren
+// replaceCellsWithAncestor

+ 13 - 21
vendor/github.com/golang/geo/s2/shape.go

@@ -18,15 +18,6 @@ import (
 	"sort"
 	"sort"
 )
 )
 
 
-// dimension defines the types of geometry dimensions that a Shape supports.
-type dimension int
-
-const (
-	pointGeometry dimension = iota
-	polylineGeometry
-	polygonGeometry
-)
-
 // Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
 // Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
 // allowed, and can be used to represent points.
 // allowed, and can be used to represent points.
 type Edge struct {
 type Edge struct {
@@ -157,9 +148,6 @@ type Shape interface {
 	// Edge returns the edge for the given edge index.
 	// Edge returns the edge for the given edge index.
 	Edge(i int) Edge
 	Edge(i int) Edge
 
 
-	// HasInterior reports whether this shape has an interior.
-	HasInterior() bool
-
 	// ReferencePoint returns an arbitrary reference point for the shape. (The
 	// ReferencePoint returns an arbitrary reference point for the shape. (The
 	// containment boolean value must be false for shapes that do not have an interior.)
 	// containment boolean value must be false for shapes that do not have an interior.)
 	//
 	//
@@ -202,14 +190,15 @@ type Shape interface {
 	// where pos == shape.ChainPosition(edgeID).
 	// where pos == shape.ChainPosition(edgeID).
 	ChainPosition(edgeID int) ChainPosition
 	ChainPosition(edgeID int) ChainPosition
 
 
-	// dimension returns the dimension of the geometry represented by this shape.
+	// Dimension returns the dimension of the geometry represented by this shape,
+	// either 0, 1 or 2 for point, polyline and polygon geometry respectively.
 	//
 	//
-	//  pointGeometry: Each point is represented as a degenerate edge.
+	//  0 - Point geometry. Each point is represented as a degenerate edge.
 	//
 	//
-	//  polylineGeometry:  Polyline edges may be degenerate. A shape may
+	//  1 - Polyline geometry. Polyline edges may be degenerate. A shape may
 	//      represent any number of polylines. Polylines edges may intersect.
 	//      represent any number of polylines. Polylines edges may intersect.
 	//
 	//
-	//  polygonGeometry:  Edges should be oriented such that the polygon
+	//  2 - Polygon geometry. Edges should be oriented such that the polygon
 	//      interior is always on the left. In theory the edges may be returned
 	//      interior is always on the left. In theory the edges may be returned
 	//      in any order, but typically the edges are organized as a collection
 	//      in any order, but typically the edges are organized as a collection
 	//      of edge chains where each chain represents one polygon loop.
 	//      of edge chains where each chain represents one polygon loop.
@@ -217,12 +206,12 @@ type Shape interface {
 	//      pairs consisting of an edge and its corresponding reversed edge).
 	//      pairs consisting of an edge and its corresponding reversed edge).
 	//      A polygon loop may also be full (containing all points on the
 	//      A polygon loop may also be full (containing all points on the
 	//      sphere); by convention this is represented as a chain with no edges.
 	//      sphere); by convention this is represented as a chain with no edges.
-	//      (See laxPolygon for more details.)
+	//      (See laxPolygon for details.)
 	//
 	//
-	// Note that this method allows degenerate geometry of different dimensions
+	// This method allows degenerate geometry of different dimensions
 	// to be distinguished, e.g. it allows a point to be distinguished from a
 	// to be distinguished, e.g. it allows a point to be distinguished from a
 	// polyline or polygon that has been simplified to a single point.
 	// polyline or polygon that has been simplified to a single point.
-	dimension() dimension
+	Dimension() int
 
 
 	// IsEmpty reports whether the Shape contains no points. (Note that the full
 	// IsEmpty reports whether the Shape contains no points. (Note that the full
 	// polygon is represented as a chain with zero edges.)
 	// polygon is represented as a chain with zero edges.)
@@ -230,16 +219,19 @@ type Shape interface {
 
 
 	// IsFull reports whether the Shape contains all points on the sphere.
 	// IsFull reports whether the Shape contains all points on the sphere.
 	IsFull() bool
 	IsFull() bool
+
+	// We do not support implementations of this interface outside this package.
+	privateInterface()
 }
 }
 
 
 // defaultShapeIsEmpty reports whether this shape contains no points.
 // defaultShapeIsEmpty reports whether this shape contains no points.
 func defaultShapeIsEmpty(s Shape) bool {
 func defaultShapeIsEmpty(s Shape) bool {
-	return s.NumEdges() == 0 && (!s.HasInterior() || s.NumChains() == 0)
+	return s.NumEdges() == 0 && (s.Dimension() != 2 || s.NumChains() == 0)
 }
 }
 
 
 // defaultShapeIsFull reports whether this shape contains all points on the sphere.
 // defaultShapeIsFull reports whether this shape contains all points on the sphere.
 func defaultShapeIsFull(s Shape) bool {
 func defaultShapeIsFull(s Shape) bool {
-	return s.NumEdges() == 0 && s.HasInterior() && s.NumChains() > 0
+	return s.NumEdges() == 0 && s.Dimension() == 2 && s.NumChains() > 0
 }
 }
 
 
 // A minimal check for types that should satisfy the Shape interface.
 // A minimal check for types that should satisfy the Shape interface.

+ 39 - 10
vendor/github.com/golang/geo/s2/shapeindex.go

@@ -130,6 +130,8 @@ func (s *ShapeIndexCell) numEdges() int {
 
 
 // add adds the given clipped shape to this index cell.
 // add adds the given clipped shape to this index cell.
 func (s *ShapeIndexCell) add(c *clippedShape) {
 func (s *ShapeIndexCell) add(c *clippedShape) {
+	// C++ uses a set, so it's ordered and unique. We don't currently catch
+	// the case when a duplicate value is added.
 	s.shapes = append(s.shapes, c)
 	s.shapes = append(s.shapes, c)
 }
 }
 
 
@@ -168,7 +170,7 @@ type faceEdge struct {
 	shapeID     int32    // The ID of shape that this edge belongs to
 	shapeID     int32    // The ID of shape that this edge belongs to
 	edgeID      int      // Edge ID within that shape
 	edgeID      int      // Edge ID within that shape
 	maxLevel    int      // Not desirable to subdivide this edge beyond this level
 	maxLevel    int      // Not desirable to subdivide this edge beyond this level
-	hasInterior bool     // Belongs to a shape that has an interior
+	hasInterior bool     // Belongs to a shape that has a dimension of 2
 	a, b        r2.Point // The edge endpoints, clipped to a given face
 	a, b        r2.Point // The edge endpoints, clipped to a given face
 	edge        Edge     // The original edge.
 	edge        Edge     // The original edge.
 }
 }
@@ -681,6 +683,28 @@ func (s *ShapeIndex) NumEdges() int {
 	return numEdges
 	return numEdges
 }
 }
 
 
+// NumEdgesUpTo returns the number of edges in the given index, up to the given
+// limit. If the limit is encountered, the current running total is returned,
+// which may be more than the limit.
+func (s *ShapeIndex) NumEdgesUpTo(limit int) int {
+	var numEdges int
+	// We choose to iterate over the shapes in order to match the counting
+	// up behavior in C++ and for test compatibility instead of using a
+	// more idiomatic range over the shape map.
+	for i := int32(0); i <= s.nextID; i++ {
+		s := s.Shape(i)
+		if s == nil {
+			continue
+		}
+		numEdges += s.NumEdges()
+		if numEdges >= limit {
+			break
+		}
+	}
+
+	return numEdges
+}
+
 // Shape returns the shape with the given ID, or nil if the shape has been removed from the index.
 // Shape returns the shape with the given ID, or nil if the shape has been removed from the index.
 func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] }
 func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] }
 
 
@@ -731,7 +755,7 @@ func (s *ShapeIndex) Remove(shape Shape) {
 	numEdges := shape.NumEdges()
 	numEdges := shape.NumEdges()
 	removed := &removedShape{
 	removed := &removedShape{
 		shapeID:               id,
 		shapeID:               id,
-		hasInterior:           shape.HasInterior(),
+		hasInterior:           shape.Dimension() == 2,
 		containsTrackerOrigin: shape.ReferencePoint().Contained,
 		containsTrackerOrigin: shape.ReferencePoint().Contained,
 		edges:                 make([]Edge, numEdges),
 		edges:                 make([]Edge, numEdges),
 	}
 	}
@@ -825,7 +849,7 @@ func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *t
 
 
 	faceEdge := faceEdge{
 	faceEdge := faceEdge{
 		shapeID:     shapeID,
 		shapeID:     shapeID,
-		hasInterior: shape.HasInterior(),
+		hasInterior: shape.Dimension() == 2,
 	}
 	}
 
 
 	if faceEdge.hasInterior {
 	if faceEdge.hasInterior {
@@ -1169,7 +1193,7 @@ func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *track
 		if eNext != len(edges) {
 		if eNext != len(edges) {
 			eshapeID = edges[eNext].faceEdge.shapeID
 			eshapeID = edges[eNext].faceEdge.shapeID
 		}
 		}
-		if cNextIdx != len(cshapeIDs) {
+		if cNextIdx < len(cshapeIDs) {
 			cshapeID = cshapeIDs[cNextIdx]
 			cshapeID = cshapeIDs[cNextIdx]
 		}
 		}
 		eBegin := eNext
 		eBegin := eNext
@@ -1374,7 +1398,7 @@ func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, ed
 		// line segment from the cell center to the entry vertex.
 		// line segment from the cell center to the entry vertex.
 		edge := &faceEdge{
 		edge := &faceEdge{
 			shapeID:     shapeID,
 			shapeID:     shapeID,
-			hasInterior: shape.HasInterior(),
+			hasInterior: shape.Dimension() == 2,
 		}
 		}
 
 
 		if edge.hasInterior {
 		if edge.hasInterior {
@@ -1444,7 +1468,11 @@ func (s *ShapeIndex) testAllEdges(edges []*clippedEdge, t *tracker) {
 func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
 func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
 	count := 0
 	count := 0
 	lastShapeID := int32(-1)
 	lastShapeID := int32(-1)
-	cNext := int32(0)
+
+	// next clipped shape id in the shapeIDs list.
+	clippedNext := int32(0)
+	// index of the current element in the shapeIDs list.
+	shapeIDidx := 0
 	for _, edge := range edges {
 	for _, edge := range edges {
 		if edge.faceEdge.shapeID == lastShapeID {
 		if edge.faceEdge.shapeID == lastShapeID {
 			continue
 			continue
@@ -1455,18 +1483,19 @@ func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
 
 
 		// Skip over any containing shapes up to and including this one,
 		// Skip over any containing shapes up to and including this one,
 		// updating count as appropriate.
 		// updating count as appropriate.
-		for ; cNext < int32(len(shapeIDs)); cNext++ {
-			if cNext > lastShapeID {
+		for ; shapeIDidx < len(shapeIDs); shapeIDidx++ {
+			clippedNext = shapeIDs[shapeIDidx]
+			if clippedNext > lastShapeID {
 				break
 				break
 			}
 			}
-			if cNext < lastShapeID {
+			if clippedNext < lastShapeID {
 				count++
 				count++
 			}
 			}
 		}
 		}
 	}
 	}
 
 
 	// Count any remaining containing shapes.
 	// Count any remaining containing shapes.
-	count += int(len(shapeIDs) - int(cNext))
+	count += int(len(shapeIDs)) - int(shapeIDidx)
 	return count
 	return count
 }
 }
 
 

+ 1 - 1
vendor/github.com/golang/geo/s2/shapeutil.go

@@ -209,7 +209,7 @@ func referencePointAtVertex(shape Shape, vTest Point) (ReferencePoint, bool) {
 // Polygon boundaries are treated as being semi-open (see ContainsPointQuery
 // Polygon boundaries are treated as being semi-open (see ContainsPointQuery
 // and VertexModel for other options).
 // and VertexModel for other options).
 func containsBruteForce(shape Shape, point Point) bool {
 func containsBruteForce(shape Shape, point Point) bool {
-	if !shape.HasInterior() {
+	if shape.Dimension() != 2 {
 		return false
 		return false
 	}
 	}
 
 

+ 0 - 3
vendor/github.com/golang/protobuf/LICENSE

@@ -1,7 +1,4 @@
-Go support for Protocol Buffers - Google's data interchange format
-
 Copyright 2010 The Go Authors.  All rights reserved.
 Copyright 2010 The Go Authors.  All rights reserved.
-https://github.com/golang/protobuf
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
 modification, are permitted provided that the following conditions are

+ 0 - 1
vendor/github.com/golang/protobuf/proto/decode.go

@@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
 	if b&0x80 == 0 {
 	if b&0x80 == 0 {
 		goto done
 		goto done
 	}
 	}
-	// x -= 0x80 << 63 // Always zero.
 
 
 	return 0, errOverflow
 	return 0, errOverflow
 
 

+ 63 - 0
vendor/github.com/golang/protobuf/proto/deprecated.go

@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}

+ 0 - 18
vendor/github.com/golang/protobuf/proto/encode.go

@@ -37,27 +37,9 @@ package proto
 
 
 import (
 import (
 	"errors"
 	"errors"
-	"fmt"
 	"reflect"
 	"reflect"
 )
 )
 
 
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
-	field string
-}
-
-func (e *RequiredNotSetError) Error() string {
-	return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
 var (
 var (
 	// errRepeatedHasNil is the error returned if Marshal is called with
 	// errRepeatedHasNil is the error returned if Marshal is called with
 	// a struct with a repeated field containing a nil element.
 	// a struct with a repeated field containing a nil element.

+ 2 - 1
vendor/github.com/golang/protobuf/proto/equal.go

@@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
 			return false
 			return false
 		}
 		}
 
 
-		m1, m2 := e1.value, e2.value
+		m1 := extensionAsLegacyType(e1.value)
+		m2 := extensionAsLegacyType(e2.value)
 
 
 		if m1 == nil && m2 == nil {
 		if m1 == nil && m2 == nil {
 			// Both have only encoded form.
 			// Both have only encoded form.

+ 71 - 7
vendor/github.com/golang/protobuf/proto/extensions.go

@@ -185,9 +185,25 @@ type Extension struct {
 	// extension will have only enc set. When such an extension is
 	// extension will have only enc set. When such an extension is
 	// accessed using GetExtension (or GetExtensions) desc and value
 	// accessed using GetExtension (or GetExtensions) desc and value
 	// will be set.
 	// will be set.
-	desc  *ExtensionDesc
+	desc *ExtensionDesc
+
+	// value is a concrete value for the extension field. Let the type of
+	// desc.ExtensionType be the "API type" and the type of Extension.value
+	// be the "storage type". The API type and storage type are the same except:
+	//	* For scalars (except []byte), the API type uses *T,
+	//	while the storage type uses T.
+	//	* For repeated fields, the API type uses []T, while the storage type
+	//	uses *[]T.
+	//
+	// The reason for the divergence is so that the storage type more naturally
+	// matches what is expected of when retrieving the values through the
+	// protobuf reflection APIs.
+	//
+	// The value may only be populated if desc is also populated.
 	value interface{}
 	value interface{}
-	enc   []byte
+
+	// enc is the raw bytes for the extension field.
+	enc []byte
 }
 }
 
 
 // SetRawExtension is for testing only.
 // SetRawExtension is for testing only.
@@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
 			// descriptors with the same field number.
 			// descriptors with the same field number.
 			return nil, errors.New("proto: descriptor conflict")
 			return nil, errors.New("proto: descriptor conflict")
 		}
 		}
-		return e.value, nil
+		return extensionAsLegacyType(e.value), nil
 	}
 	}
 
 
 	if extension.ExtensionType == nil {
 	if extension.ExtensionType == nil {
@@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
 
 
 	// Remember the decoded version and drop the encoded version.
 	// Remember the decoded version and drop the encoded version.
 	// That way it is safe to mutate what we return.
 	// That way it is safe to mutate what we return.
-	e.value = v
+	e.value = extensionAsStorageType(v)
 	e.desc = extension
 	e.desc = extension
 	e.enc = nil
 	e.enc = nil
 	emap[extension.Field] = e
 	emap[extension.Field] = e
-	return e.value, nil
+	return extensionAsLegacyType(e.value), nil
 }
 }
 
 
 // defaultExtensionValue returns the default value for extension.
 // defaultExtensionValue returns the default value for extension.
@@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
 	}
 	}
 	typ := reflect.TypeOf(extension.ExtensionType)
 	typ := reflect.TypeOf(extension.ExtensionType)
 	if typ != reflect.TypeOf(value) {
 	if typ != reflect.TypeOf(value) {
-		return errors.New("proto: bad extension value type")
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
 	}
 	}
 	// nil extension values need to be caught early, because the
 	// nil extension values need to be caught early, because the
 	// encoder can't distinguish an ErrNil due to a nil extension
 	// encoder can't distinguish an ErrNil due to a nil extension
@@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
 	}
 	}
 
 
 	extmap := epb.extensionsWrite()
 	extmap := epb.extensionsWrite()
-	extmap[extension.Field] = Extension{desc: extension, value: value}
+	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
 	return nil
 	return nil
 }
 }
 
 
@@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
 func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
 func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
 	return extensionMaps[reflect.TypeOf(pb).Elem()]
 	return extensionMaps[reflect.TypeOf(pb).Elem()]
 }
 }
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		// Represent primitive types as a pointer to the value.
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Slice:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	}
+	return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	case reflect.Slice:
+		// Represent slice types as a pointer to the value.
+		if rv.Type().Elem().Kind() != reflect.Uint8 {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			v = rv2.Interface()
+		}
+	}
+	return v
+}

+ 72 - 28
vendor/github.com/golang/protobuf/proto/lib.go

@@ -265,7 +265,6 @@ package proto
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
-	"errors"
 	"fmt"
 	"fmt"
 	"log"
 	"log"
 	"reflect"
 	"reflect"
@@ -274,34 +273,73 @@ import (
 	"sync"
 	"sync"
 )
 )
 
 
-var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
 
 
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
-	Reset()
-	String() string
-	ProtoMessage()
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
 }
 }
 
 
-// Stats records allocation details about the protocol buffer encoders
-// and decoders.  Useful for tuning the library itself.
-type Stats struct {
-	Emalloc uint64 // mallocs in encode
-	Dmalloc uint64 // mallocs in decode
-	Encode  uint64 // number of encodes
-	Decode  uint64 // number of decodes
-	Chit    uint64 // number of cache hits
-	Cmiss   uint64 // number of cache misses
-	Size    uint64 // number of sizes
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
 }
 }
 
 
-// Set to true to enable stats collection.
-const collectStats = false
+type nonFatal struct{ E error }
 
 
-var stats Stats
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
 
 
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
 
 
 // A Buffer is a buffer manager for marshaling and unmarshaling
 // A Buffer is a buffer manager for marshaling and unmarshaling
 // protocol buffers.  It may be reused between invocations to
 // protocol buffers.  It may be reused between invocations to
@@ -902,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
 	return false
 	return false
 }
 }
 
 
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion2 = true
 
 
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion1 = true
+)
 
 
 // InternalMessageInfo is a type used internally by generated .pb.go files.
 // InternalMessageInfo is a type used internally by generated .pb.go files.
 // This type is not intended to be used by non-generated code.
 // This type is not intended to be used by non-generated code.

+ 2 - 135
vendor/github.com/golang/protobuf/proto/message_set.go

@@ -36,13 +36,7 @@ package proto
  */
  */
 
 
 import (
 import (
-	"bytes"
-	"encoding/json"
 	"errors"
 	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-	"sync"
 )
 )
 
 
 // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
 // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
 	return buf[i+1:]
 	return buf[i+1:]
 }
 }
 
 
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
-	return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var u marshalInfo
-		siz := u.sizeMessageSet(exts)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, exts, deterministic)
-
-	case map[int32]Extension:
-		// This is an old-style extension map.
-		// Wrap it in a new-style XXX_InternalExtensions.
-		ie := XXX_InternalExtensions{
-			p: &struct {
-				mu           sync.Mutex
-				extensionMap map[int32]Extension
-			}{
-				extensionMap: exts,
-			},
-		}
-
-		var u marshalInfo
-		siz := u.sizeMessageSet(&ie)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, &ie, deterministic)
-
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
 // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
 // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
 	var m map[int32]Extension
 	var m map[int32]Extension
 	switch exts := exts.(type) {
 	switch exts := exts.(type) {
 	case *XXX_InternalExtensions:
 	case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var mu sync.Locker
-		m, mu = exts.extensionsRead()
-		if m != nil {
-			// Keep the extensions map locked until we're done marshaling to prevent
-			// races between marshaling and unmarshaling the lazily-{en,de}coded
-			// values.
-			mu.Lock()
-			defer mu.Unlock()
-		}
-	case map[int32]Extension:
-		m = exts
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-	var b bytes.Buffer
-	b.WriteByte('{')
-
-	// Process the map in key order for deterministic output.
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
-	for i, id := range ids {
-		ext := m[id]
-		msd, ok := messageSetMap[id]
-		if !ok {
-			// Unknown type; we can't render it, so skip it.
-			continue
-		}
-
-		if i > 0 && b.Len() > 1 {
-			b.WriteByte(',')
-		}
-
-		fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
-		x := ext.value
-		if x == nil {
-			x = reflect.New(msd.t.Elem()).Interface()
-			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
-				return nil, err
-			}
-		}
-		d, err := json.Marshal(x)
-		if err != nil {
-			return nil, err
-		}
-		b.Write(d)
-	}
-	b.WriteByte('}')
-	return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
-	// Common-case fast path.
-	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
-		return nil
-	}
-
-	// This is fairly tricky, and it's not clear that it is needed.
-	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
-	t    reflect.Type // pointer to struct
-	name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
-	messageSetMap[fieldNum] = messageSetDesc{
-		t:    reflect.TypeOf(m),
-		name: name,
-	}
-}

+ 4 - 1
vendor/github.com/golang/protobuf/proto/pointer_reflect.go

@@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
 
 
 // toAddrPointer converts an interface to a pointer that points to
 // toAddrPointer converts an interface to a pointer that points to
 // the interface data.
 // the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
 	v := reflect.ValueOf(*i)
 	v := reflect.ValueOf(*i)
 	u := reflect.New(v.Type())
 	u := reflect.New(v.Type())
 	u.Elem().Set(v)
 	u.Elem().Set(v)
+	if deref {
+		u = u.Elem()
+	}
 	return pointer{v: u}
 	return pointer{v: u}
 }
 }
 
 

+ 10 - 5
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go

@@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
 
 
 // toAddrPointer converts an interface to a pointer that points to
 // toAddrPointer converts an interface to a pointer that points to
 // the interface data.
 // the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
 	// Super-tricky - read or get the address of data word of interface value.
 	// Super-tricky - read or get the address of data word of interface value.
 	if isptr {
 	if isptr {
 		// The interface is of pointer type, thus it is a direct interface.
 		// The interface is of pointer type, thus it is a direct interface.
 		// The data word is the pointer data itself. We take its address.
 		// The data word is the pointer data itself. We take its address.
-		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	} else {
+		// The interface is not of pointer type. The data word is the pointer
+		// to the data.
+		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
 	}
 	}
-	// The interface is not of pointer type. The data word is the pointer
-	// to the data.
-	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+	if deref {
+		p.p = *(*unsafe.Pointer)(p.p)
+	}
+	return p
 }
 }
 
 
 // valToPointer converts v to a pointer. v must be of pointer type.
 // valToPointer converts v to a pointer. v must be of pointer type.

+ 24 - 23
vendor/github.com/golang/protobuf/proto/properties.go

@@ -139,7 +139,7 @@ type Properties struct {
 	Repeated bool
 	Repeated bool
 	Packed   bool   // relevant for repeated primitives only
 	Packed   bool   // relevant for repeated primitives only
 	Enum     string // set for enum types only
 	Enum     string // set for enum types only
-	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
+	proto3   bool   // whether this is known to be a proto3 field
 	oneof    bool   // whether this is a oneof field
 	oneof    bool   // whether this is a oneof field
 
 
 	Default    string // default value
 	Default    string // default value
@@ -148,9 +148,9 @@ type Properties struct {
 	stype reflect.Type      // set for struct types only
 	stype reflect.Type      // set for struct types only
 	sprop *StructProperties // set for struct types only
 	sprop *StructProperties // set for struct types only
 
 
-	mtype    reflect.Type // set for map types only
-	mkeyprop *Properties  // set for map types only
-	mvalprop *Properties  // set for map types only
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
 }
 }
 
 
 // String formats the properties in the protobuf struct field tag style.
 // String formats the properties in the protobuf struct field tag style.
@@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
 
 
 	case reflect.Map:
 	case reflect.Map:
 		p.mtype = t1
 		p.mtype = t1
-		p.mkeyprop = &Properties{}
-		p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
-		p.mvalprop = &Properties{}
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
 		vtype := p.mtype.Elem()
 		vtype := p.mtype.Elem()
 		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
 		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
 			// The value type is not a message (*T) or bytes ([]byte),
 			// The value type is not a message (*T) or bytes ([]byte),
 			// so we need encoders for the pointer to this type.
 			// so we need encoders for the pointer to this type.
 			vtype = reflect.PtrTo(vtype)
 			vtype = reflect.PtrTo(vtype)
 		}
 		}
-		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
 	}
 	}
 
 
 	if p.stype != nil {
 	if p.stype != nil {
@@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
 	sprop, ok := propertiesMap[t]
 	sprop, ok := propertiesMap[t]
 	propertiesMu.RUnlock()
 	propertiesMu.RUnlock()
 	if ok {
 	if ok {
-		if collectStats {
-			stats.Chit++
-		}
 		return sprop
 		return sprop
 	}
 	}
 
 
@@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
 	return sprop
 	return sprop
 }
 }
 
 
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
 // getPropertiesLocked requires that propertiesMu is held.
 // getPropertiesLocked requires that propertiesMu is held.
 func getPropertiesLocked(t reflect.Type) *StructProperties {
 func getPropertiesLocked(t reflect.Type) *StructProperties {
 	if prop, ok := propertiesMap[t]; ok {
 	if prop, ok := propertiesMap[t]; ok {
-		if collectStats {
-			stats.Chit++
-		}
 		return prop
 		return prop
 	}
 	}
-	if collectStats {
-		stats.Cmiss++
-	}
 
 
 	prop := new(StructProperties)
 	prop := new(StructProperties)
 	// in case of recursive protos, fill this in now.
 	// in case of recursive protos, fill this in now.
@@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
 	// Re-order prop.order.
 	// Re-order prop.order.
 	sort.Sort(prop)
 	sort.Sort(prop)
 
 
-	type oneofMessage interface {
-		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	var oots []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oots = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oots = m.XXX_OneofWrappers()
 	}
 	}
-	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
-		var oots []interface{}
-		_, _, _, oots = om.XXX_OneofFuncs()
-
+	if len(oots) > 0 {
 		// Interpret oneof metadata.
 		// Interpret oneof metadata.
 		prop.OneofTypes = make(map[string]*OneofProperties)
 		prop.OneofTypes = make(map[string]*OneofProperties)
 		for _, oot := range oots {
 		for _, oot := range oots {

+ 162 - 67
vendor/github.com/golang/protobuf/proto/table_marshal.go

@@ -87,6 +87,7 @@ type marshalElemInfo struct {
 	sizer     sizer
 	sizer     sizer
 	marshaler marshaler
 	marshaler marshaler
 	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
 	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+	deref     bool // dereference the pointer before operating on it; implies isptr
 }
 }
 
 
 var (
 var (
@@ -231,7 +232,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
 		return b, err
 		return b, err
 	}
 	}
 
 
-	var err, errreq error
+	var err, errLater error
 	// The old marshaler encodes extensions at beginning.
 	// The old marshaler encodes extensions at beginning.
 	if u.extensions.IsValid() {
 	if u.extensions.IsValid() {
 		e := ptr.offset(u.extensions).toExtensions()
 		e := ptr.offset(u.extensions).toExtensions()
@@ -252,11 +253,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
 		}
 		}
 	}
 	}
 	for _, f := range u.fields {
 	for _, f := range u.fields {
-		if f.required && errreq == nil {
+		if f.required {
 			if ptr.offset(f.field).getPointer().isNil() {
 			if ptr.offset(f.field).getPointer().isNil() {
 				// Required field is not set.
 				// Required field is not set.
 				// We record the error but keep going, to give a complete marshaling.
 				// We record the error but keep going, to give a complete marshaling.
-				errreq = &RequiredNotSetError{f.name}
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
 				continue
 				continue
 			}
 			}
 		}
 		}
@@ -269,14 +272,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
 			if err1, ok := err.(*RequiredNotSetError); ok {
 			if err1, ok := err.(*RequiredNotSetError); ok {
 				// Required field in submessage is not set.
 				// Required field in submessage is not set.
 				// We record the error but keep going, to give a complete marshaling.
 				// We record the error but keep going, to give a complete marshaling.
-				if errreq == nil {
-					errreq = &RequiredNotSetError{f.name + "." + err1.field}
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
 				}
 				}
 				continue
 				continue
 			}
 			}
 			if err == errRepeatedHasNil {
 			if err == errRepeatedHasNil {
 				err = errors.New("proto: repeated field " + f.name + " has nil element")
 				err = errors.New("proto: repeated field " + f.name + " has nil element")
 			}
 			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
 			return b, err
 			return b, err
 		}
 		}
 	}
 	}
@@ -284,7 +294,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
 		s := *ptr.offset(u.unrecognized).toBytes()
 		s := *ptr.offset(u.unrecognized).toBytes()
 		b = append(b, s...)
 		b = append(b, s...)
 	}
 	}
-	return b, errreq
+	return b, errLater
 }
 }
 
 
 // computeMarshalInfo initializes the marshal info.
 // computeMarshalInfo initializes the marshal info.
@@ -311,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
 
 
 	// get oneof implementers
 	// get oneof implementers
 	var oneofImplementers []interface{}
 	var oneofImplementers []interface{}
-	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
 		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
 		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
 	}
 	}
 
 
 	n := t.NumField()
 	n := t.NumField()
@@ -398,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
 		panic("tag is not an integer")
 		panic("tag is not an integer")
 	}
 	}
 	wt := wiretype(tags[0])
 	wt := wiretype(tags[0])
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+		t = t.Elem()
+	}
 	sizer, marshaler := typeMarshaler(t, tags, false, false)
 	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	var deref bool
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		t = reflect.PtrTo(t)
+		deref = true
+	}
 	e = &marshalElemInfo{
 	e = &marshalElemInfo{
 		wiretag:   uint64(tag)<<3 | wt,
 		wiretag:   uint64(tag)<<3 | wt,
 		tagsize:   SizeVarint(uint64(tag) << 3),
 		tagsize:   SizeVarint(uint64(tag) << 3),
 		sizer:     sizer,
 		sizer:     sizer,
 		marshaler: marshaler,
 		marshaler: marshaler,
 		isptr:     t.Kind() == reflect.Ptr,
 		isptr:     t.Kind() == reflect.Ptr,
+		deref:     deref,
 	}
 	}
 
 
 	// update cache
 	// update cache
@@ -439,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
 
 
 func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
 func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
 	fi.field = toField(f)
 	fi.field = toField(f)
-	fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
 	fi.isPointer = true
 	fi.isPointer = true
 	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
 	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
 	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
 	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@@ -467,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
 	}
 	}
 }
 }
 
 
-type oneofMessage interface {
-	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
 // wiretype returns the wire encoding of the type.
 // wiretype returns the wire encoding of the type.
 func wiretype(encoding string) uint64 {
 func wiretype(encoding string) uint64 {
 	switch encoding {
 	switch encoding {
@@ -530,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
 
 
 	packed := false
 	packed := false
 	proto3 := false
 	proto3 := false
+	validateUTF8 := true
 	for i := 2; i < len(tags); i++ {
 	for i := 2; i < len(tags); i++ {
 		if tags[i] == "packed" {
 		if tags[i] == "packed" {
 			packed = true
 			packed = true
@@ -538,6 +557,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
 			proto3 = true
 			proto3 = true
 		}
 		}
 	}
 	}
+	validateUTF8 = validateUTF8 && proto3
 
 
 	switch t.Kind() {
 	switch t.Kind() {
 	case reflect.Bool:
 	case reflect.Bool:
@@ -735,6 +755,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
 		}
 		}
 		return sizeFloat64Value, appendFloat64Value
 		return sizeFloat64Value, appendFloat64Value
 	case reflect.String:
 	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
 		if pointer {
 		if pointer {
 			return sizeStringPtr, appendStringPtr
 			return sizeStringPtr, appendStringPtr
 		}
 		}
@@ -1983,52 +2015,105 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
 	return b, nil
 	return b, nil
 }
 }
 func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
 func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
 	v := *ptr.toString()
 	v := *ptr.toString()
 	if !utf8.ValidString(v) {
 	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
+		invalidUTF8 = true
 	}
 	}
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, uint64(len(v)))
 	b = appendVarint(b, uint64(len(v)))
 	b = append(b, v...)
 	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
 	return b, nil
 	return b, nil
 }
 }
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
 	v := *ptr.toString()
 	v := *ptr.toString()
 	if v == "" {
 	if v == "" {
 		return b, nil
 		return b, nil
 	}
 	}
 	if !utf8.ValidString(v) {
 	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
+		invalidUTF8 = true
 	}
 	}
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, uint64(len(v)))
 	b = appendVarint(b, uint64(len(v)))
 	b = append(b, v...)
 	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
 	return b, nil
 	return b, nil
 }
 }
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
 	p := *ptr.toStringPtr()
 	p := *ptr.toStringPtr()
 	if p == nil {
 	if p == nil {
 		return b, nil
 		return b, nil
 	}
 	}
 	v := *p
 	v := *p
 	if !utf8.ValidString(v) {
 	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
+		invalidUTF8 = true
 	}
 	}
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, wiretag)
 	b = appendVarint(b, uint64(len(v)))
 	b = appendVarint(b, uint64(len(v)))
 	b = append(b, v...)
 	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
 	return b, nil
 	return b, nil
 }
 }
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
 	s := *ptr.toStringSlice()
 	s := *ptr.toStringSlice()
 	for _, v := range s {
 	for _, v := range s {
 		if !utf8.ValidString(v) {
 		if !utf8.ValidString(v) {
-			return nil, errInvalidUTF8
+			invalidUTF8 = true
 		}
 		}
 		b = appendVarint(b, wiretag)
 		b = appendVarint(b, wiretag)
 		b = appendVarint(b, uint64(len(v)))
 		b = appendVarint(b, uint64(len(v)))
 		b = append(b, v...)
 		b = append(b, v...)
 	}
 	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
 	return b, nil
 	return b, nil
 }
 }
 func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
 func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
@@ -2107,7 +2192,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
 		},
 		},
 		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
 		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
 			s := ptr.getPointerSlice()
 			s := ptr.getPointerSlice()
-			var err, errreq error
+			var err error
+			var nerr nonFatal
 			for _, v := range s {
 			for _, v := range s {
 				if v.isNil() {
 				if v.isNil() {
 					return b, errRepeatedHasNil
 					return b, errRepeatedHasNil
@@ -2115,22 +2201,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
 				b = appendVarint(b, wiretag) // start group
 				b = appendVarint(b, wiretag) // start group
 				b, err = u.marshal(b, v, deterministic)
 				b, err = u.marshal(b, v, deterministic)
 				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
 				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
-				if err != nil {
-					if _, ok := err.(*RequiredNotSetError); ok {
-						// Required field in submessage is not set.
-						// We record the error but keep going, to give a complete marshaling.
-						if errreq == nil {
-							errreq = err
-						}
-						continue
-					}
+				if !nerr.Merge(err) {
 					if err == ErrNil {
 					if err == ErrNil {
 						err = errRepeatedHasNil
 						err = errRepeatedHasNil
 					}
 					}
 					return b, err
 					return b, err
 				}
 				}
 			}
 			}
-			return b, errreq
+			return b, nerr.E
 		}
 		}
 }
 }
 
 
@@ -2174,7 +2252,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
 		},
 		},
 		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
 		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
 			s := ptr.getPointerSlice()
 			s := ptr.getPointerSlice()
-			var err, errreq error
+			var err error
+			var nerr nonFatal
 			for _, v := range s {
 			for _, v := range s {
 				if v.isNil() {
 				if v.isNil() {
 					return b, errRepeatedHasNil
 					return b, errRepeatedHasNil
@@ -2184,22 +2263,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
 				b = appendVarint(b, uint64(siz))
 				b = appendVarint(b, uint64(siz))
 				b, err = u.marshal(b, v, deterministic)
 				b, err = u.marshal(b, v, deterministic)
 
 
-				if err != nil {
-					if _, ok := err.(*RequiredNotSetError); ok {
-						// Required field in submessage is not set.
-						// We record the error but keep going, to give a complete marshaling.
-						if errreq == nil {
-							errreq = err
-						}
-						continue
-					}
+				if !nerr.Merge(err) {
 					if err == ErrNil {
 					if err == ErrNil {
 						err = errRepeatedHasNil
 						err = errRepeatedHasNil
 					}
 					}
 					return b, err
 					return b, err
 				}
 				}
 			}
 			}
-			return b, errreq
+			return b, nerr.E
 		}
 		}
 }
 }
 
 
@@ -2223,14 +2294,33 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
 	// value.
 	// value.
 	// Key cannot be pointer-typed.
 	// Key cannot be pointer-typed.
 	valIsPtr := valType.Kind() == reflect.Ptr
 	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
 	return func(ptr pointer, tagsize int) int {
 	return func(ptr pointer, tagsize int) int {
 			m := ptr.asPointerTo(t).Elem() // the map
 			m := ptr.asPointerTo(t).Elem() // the map
 			n := 0
 			n := 0
 			for _, k := range m.MapKeys() {
 			for _, k := range m.MapKeys() {
 				ki := k.Interface()
 				ki := k.Interface()
 				vi := m.MapIndex(k).Interface()
 				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false)             // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
+				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
 				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
 				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
 				n += siz + SizeVarint(uint64(siz)) + tagsize
 				n += siz + SizeVarint(uint64(siz)) + tagsize
 			}
 			}
@@ -2243,24 +2333,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
 			if len(keys) > 1 && deterministic {
 			if len(keys) > 1 && deterministic {
 				sort.Sort(mapKeys(keys))
 				sort.Sort(mapKeys(keys))
 			}
 			}
+
+			var nerr nonFatal
 			for _, k := range keys {
 			for _, k := range keys {
 				ki := k.Interface()
 				ki := k.Interface()
 				vi := m.MapIndex(k).Interface()
 				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false)    // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
 				b = appendVarint(b, tag)
 				b = appendVarint(b, tag)
-				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
 				b = appendVarint(b, uint64(siz))
 				b = appendVarint(b, uint64(siz))
 				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
 				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
-				if err != nil {
+				if !nerr.Merge(err) {
 					return b, err
 					return b, err
 				}
 				}
 				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
 				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
-				if err != nil && err != ErrNil { // allow nil value in map
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
 					return b, err
 					return b, err
 				}
 				}
 			}
 			}
-			return b, nil
+			return b, nerr.E
 		}
 		}
 }
 }
 
 
@@ -2316,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
 		// the last time this function was called.
 		// the last time this function was called.
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		n += ei.sizer(p, ei.tagsize)
 		n += ei.sizer(p, ei.tagsize)
 	}
 	}
 	mu.Unlock()
 	mu.Unlock()
@@ -2333,6 +2425,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
 	defer mu.Unlock()
 	defer mu.Unlock()
 
 
 	var err error
 	var err error
+	var nerr nonFatal
 
 
 	// Fast-path for common cases: zero or one extensions.
 	// Fast-path for common cases: zero or one extensions.
 	// Don't bother sorting the keys.
 	// Don't bother sorting the keys.
@@ -2350,13 +2443,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
 
 
 			ei := u.getExtElemInfo(e.desc)
 			ei := u.getExtElemInfo(e.desc)
 			v := e.value
 			v := e.value
-			p := toAddrPointer(&v, ei.isptr)
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
 			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
 			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-			if err != nil {
+			if !nerr.Merge(err) {
 				return b, err
 				return b, err
 			}
 			}
 		}
 		}
-		return b, nil
+		return b, nerr.E
 	}
 	}
 
 
 	// Sort the keys to provide a deterministic encoding.
 	// Sort the keys to provide a deterministic encoding.
@@ -2381,13 +2474,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
 
 
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
 		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if err != nil {
+		if !nerr.Merge(err) {
 			return b, err
 			return b, err
 		}
 		}
 	}
 	}
-	return b, nil
+	return b, nerr.E
 }
 }
 
 
 // message set format is:
 // message set format is:
@@ -2426,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
 
 
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
 		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
 	}
 	}
 	mu.Unlock()
 	mu.Unlock()
@@ -2444,6 +2537,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
 	defer mu.Unlock()
 	defer mu.Unlock()
 
 
 	var err error
 	var err error
+	var nerr nonFatal
 
 
 	// Fast-path for common cases: zero or one extensions.
 	// Fast-path for common cases: zero or one extensions.
 	// Don't bother sorting the keys.
 	// Don't bother sorting the keys.
@@ -2468,14 +2562,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
 
 
 			ei := u.getExtElemInfo(e.desc)
 			ei := u.getExtElemInfo(e.desc)
 			v := e.value
 			v := e.value
-			p := toAddrPointer(&v, ei.isptr)
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
 			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
 			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
-			if err != nil {
+			if !nerr.Merge(err) {
 				return b, err
 				return b, err
 			}
 			}
 			b = append(b, 1<<3|WireEndGroup)
 			b = append(b, 1<<3|WireEndGroup)
 		}
 		}
-		return b, nil
+		return b, nerr.E
 	}
 	}
 
 
 	// Sort the keys to provide a deterministic encoding.
 	// Sort the keys to provide a deterministic encoding.
@@ -2506,14 +2600,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
 
 
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
 		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
 		b = append(b, 1<<3|WireEndGroup)
 		b = append(b, 1<<3|WireEndGroup)
-		if err != nil {
+		if !nerr.Merge(err) {
 			return b, err
 			return b, err
 		}
 		}
 	}
 	}
-	return b, nil
+	return b, nerr.E
 }
 }
 
 
 // sizeV1Extensions computes the size of encoded data for a V1-API extension field.
 // sizeV1Extensions computes the size of encoded data for a V1-API extension field.
@@ -2536,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
 
 
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		n += ei.sizer(p, ei.tagsize)
 		n += ei.sizer(p, ei.tagsize)
 	}
 	}
 	return n
 	return n
@@ -2556,6 +2650,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
 	sort.Ints(keys)
 	sort.Ints(keys)
 
 
 	var err error
 	var err error
+	var nerr nonFatal
 	for _, k := range keys {
 	for _, k := range keys {
 		e := m[int32(k)]
 		e := m[int32(k)]
 		if e.value == nil || e.desc == nil {
 		if e.value == nil || e.desc == nil {
@@ -2570,13 +2665,13 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
 
 
 		ei := u.getExtElemInfo(e.desc)
 		ei := u.getExtElemInfo(e.desc)
 		v := e.value
 		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
 		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
 		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if err != nil {
+		if !nerr.Merge(err) {
 			return b, err
 			return b, err
 		}
 		}
 	}
 	}
-	return b, nil
+	return b, nerr.E
 }
 }
 
 
 // newMarshaler is the interface representing objects that can marshal themselves.
 // newMarshaler is the interface representing objects that can marshal themselves.

+ 141 - 55
vendor/github.com/golang/protobuf/proto/table_unmarshal.go

@@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
 
 
 	// if a required field, contains a single set bit at this field's index in the required field list.
 	// if a required field, contains a single set bit at this field's index in the required field list.
 	reqMask uint64
 	reqMask uint64
+
+	name string // name of the field, for error reporting
 }
 }
 
 
 var (
 var (
@@ -134,10 +136,10 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
 		u.computeUnmarshalInfo()
 		u.computeUnmarshalInfo()
 	}
 	}
 	if u.isMessageSet {
 	if u.isMessageSet {
-		return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
 	}
 	}
-	var reqMask uint64            // bitmask of required fields we've seen.
-	var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
 	for len(b) > 0 {
 	for len(b) > 0 {
 		// Read tag and wire type.
 		// Read tag and wire type.
 		// Special case 1 and 2 byte varints.
 		// Special case 1 and 2 byte varints.
@@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
 			if r, ok := err.(*RequiredNotSetError); ok {
 			if r, ok := err.(*RequiredNotSetError); ok {
 				// Remember this error, but keep parsing. We need to produce
 				// Remember this error, but keep parsing. We need to produce
 				// a full parse even if a required field is missing.
 				// a full parse even if a required field is missing.
-				rnse = r
+				if errLater == nil {
+					errLater = r
+				}
 				reqMask |= f.reqMask
 				reqMask |= f.reqMask
 				continue
 				continue
 			}
 			}
 			if err != errInternalBadWireType {
 			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
 				return err
 				return err
 			}
 			}
 			// Fragments with bad wire type are treated as unknown fields.
 			// Fragments with bad wire type are treated as unknown fields.
@@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
 			emap[int32(tag)] = e
 			emap[int32(tag)] = e
 		}
 		}
 	}
 	}
-	if rnse != nil {
-		// A required field of a submessage/group is missing. Return that error.
-		return rnse
-	}
-	if reqMask != u.reqMask {
+	if reqMask != u.reqMask && errLater == nil {
 		// A required field of this message is missing.
 		// A required field of this message is missing.
 		for _, n := range u.reqFields {
 		for _, n := range u.reqFields {
 			if reqMask&1 == 0 {
 			if reqMask&1 == 0 {
-				return &RequiredNotSetError{n}
+				errLater = &RequiredNotSetError{n}
 			}
 			}
 			reqMask >>= 1
 			reqMask >>= 1
 		}
 		}
 	}
 	}
-	return nil
+	return errLater
 }
 }
 
 
 // computeUnmarshalInfo fills in u with information for use
 // computeUnmarshalInfo fills in u with information for use
@@ -351,43 +358,52 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
 		}
 		}
 
 
 		// Store the info in the correct slot in the message.
 		// Store the info in the correct slot in the message.
-		u.setTag(tag, toField(&f), unmarshal, reqMask)
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
 	}
 	}
 
 
 	// Find any types associated with oneof fields.
 	// Find any types associated with oneof fields.
-	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
-	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
-	if fn.IsValid() {
-		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
-		for i := res.Len() - 1; i >= 0; i-- {
-			v := res.Index(i)                             // interface{}
-			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
-			typ := tptr.Elem()                            // Msg_X
-
-			f := typ.Field(0) // oneof implementers have one field
-			baseUnmarshal := fieldUnmarshaler(&f)
-			tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
-			tag, err := strconv.Atoi(tagstr)
-			if err != nil {
-				panic("protobuf tag field not an integer: " + tagstr)
-			}
-
-			// Find the oneof field that this struct implements.
-			// Might take O(n^2) to process all of the oneofs, but who cares.
-			for _, of := range oneofFields {
-				if tptr.Implements(of.ityp) {
-					// We have found the corresponding interface for this struct.
-					// That lets us know where this struct should be stored
-					// when we encounter it during unmarshaling.
-					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
-					u.setTag(tag, of.field, unmarshal, 0)
-				}
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+	for _, v := range oneofImplementers {
+		tptr := reflect.TypeOf(v) // *Msg_X
+		typ := tptr.Elem()        // Msg_X
+
+		f := typ.Field(0) // oneof implementers have one field
+		baseUnmarshal := fieldUnmarshaler(&f)
+		tags := strings.Split(f.Tag.Get("protobuf"), ",")
+		fieldNum, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tags[1])
+		}
+		var name string
+		for _, tag := range tags {
+			if strings.HasPrefix(tag, "name=") {
+				name = strings.TrimPrefix(tag, "name=")
+				break
 			}
 			}
 		}
 		}
+
+		// Find the oneof field that this struct implements.
+		// Might take O(n^2) to process all of the oneofs, but who cares.
+		for _, of := range oneofFields {
+			if tptr.Implements(of.ityp) {
+				// We have found the corresponding interface for this struct.
+				// That lets us know where this struct should be stored
+				// when we encounter it during unmarshaling.
+				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+				u.setTag(fieldNum, of.field, unmarshal, 0, name)
+			}
+		}
+
 	}
 	}
 
 
 	// Get extension ranges, if any.
 	// Get extension ranges, if any.
-	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
 	if fn.IsValid() {
 	if fn.IsValid() {
 		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
 		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
 			panic("a message with extensions, but no extensions field in " + t.Name())
 			panic("a message with extensions, but no extensions field in " + t.Name())
@@ -401,7 +417,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
 	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
 	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
 	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
 	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
 		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
 		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
-	}, 0)
+	}, 0, "")
 
 
 	// Set mask for required field check.
 	// Set mask for required field check.
 	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
 	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
@@ -413,8 +429,9 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
 // tag = tag # for field
 // tag = tag # for field
 // field/unmarshal = unmarshal info for that field.
 // field/unmarshal = unmarshal info for that field.
 // reqMask = if required, bitmask for field position in required field list. 0 otherwise.
 // reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
-	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
 	n := u.typ.NumField()
 	n := u.typ.NumField()
 	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
 	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
 		for len(u.dense) <= tag {
 		for len(u.dense) <= tag {
@@ -442,11 +459,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
 	tagArray := strings.Split(tags, ",")
 	tagArray := strings.Split(tags, ",")
 	encoding := tagArray[0]
 	encoding := tagArray[0]
 	name := "unknown"
 	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
 	for _, tag := range tagArray[3:] {
 	for _, tag := range tagArray[3:] {
 		if strings.HasPrefix(tag, "name=") {
 		if strings.HasPrefix(tag, "name=") {
 			name = tag[5:]
 			name = tag[5:]
 		}
 		}
+		if tag == "proto3" {
+			proto3 = true
+		}
 	}
 	}
+	validateUTF8 = validateUTF8 && proto3
 
 
 	// Figure out packaging (pointer, slice, or both)
 	// Figure out packaging (pointer, slice, or both)
 	slice := false
 	slice := false
@@ -594,6 +617,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
 		}
 		}
 		return unmarshalBytesValue
 		return unmarshalBytesValue
 	case reflect.String:
 	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
 		if pointer {
 		if pointer {
 			return unmarshalStringPtr
 			return unmarshalStringPtr
 		}
 		}
@@ -1448,9 +1480,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
 		return nil, io.ErrUnexpectedEOF
 		return nil, io.ErrUnexpectedEOF
 	}
 	}
 	v := string(b[:x])
 	v := string(b[:x])
-	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
-	}
 	*f.toString() = v
 	*f.toString() = v
 	return b[x:], nil
 	return b[x:], nil
 }
 }
@@ -1468,9 +1497,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
 		return nil, io.ErrUnexpectedEOF
 		return nil, io.ErrUnexpectedEOF
 	}
 	}
 	v := string(b[:x])
 	v := string(b[:x])
-	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
-	}
 	*f.toStringPtr() = &v
 	*f.toStringPtr() = &v
 	return b[x:], nil
 	return b[x:], nil
 }
 }
@@ -1488,11 +1514,69 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
 		return nil, io.ErrUnexpectedEOF
 		return nil, io.ErrUnexpectedEOF
 	}
 	}
 	v := string(b[:x])
 	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
 	if !utf8.ValidString(v) {
 	if !utf8.ValidString(v) {
-		return nil, errInvalidUTF8
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
 	}
 	}
+	v := string(b[:x])
 	s := f.toStringSlice()
 	s := f.toStringSlice()
 	*s = append(*s, v)
 	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
 	return b[x:], nil
 	return b[x:], nil
 }
 }
 
 
@@ -1674,6 +1758,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
 		// Maps will be somewhat slow. Oh well.
 		// Maps will be somewhat slow. Oh well.
 
 
 		// Read key and value from data.
 		// Read key and value from data.
+		var nerr nonFatal
 		k := reflect.New(kt)
 		k := reflect.New(kt)
 		v := reflect.New(vt)
 		v := reflect.New(vt)
 		for len(b) > 0 {
 		for len(b) > 0 {
@@ -1694,7 +1779,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
 				err = errInternalBadWireType // skip unknown tag
 				err = errInternalBadWireType // skip unknown tag
 			}
 			}
 
 
-			if err == nil {
+			if nerr.Merge(err) {
 				continue
 				continue
 			}
 			}
 			if err != errInternalBadWireType {
 			if err != errInternalBadWireType {
@@ -1717,7 +1802,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
 		// Insert into map.
 		// Insert into map.
 		m.SetMapIndex(k.Elem(), v.Elem())
 		m.SetMapIndex(k.Elem(), v.Elem())
 
 
-		return r, nil
+		return r, nerr.E
 	}
 	}
 }
 }
 
 
@@ -1743,15 +1828,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
 		// Unmarshal data into holder.
 		// Unmarshal data into holder.
 		// We unmarshal into the first field of the holder object.
 		// We unmarshal into the first field of the holder object.
 		var err error
 		var err error
+		var nerr nonFatal
 		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
 		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
-		if err != nil {
+		if !nerr.Merge(err) {
 			return nil, err
 			return nil, err
 		}
 		}
 
 
 		// Write pointer to holder into target field.
 		// Write pointer to holder into target field.
 		f.asPointerTo(ityp).Elem().Set(v)
 		f.asPointerTo(ityp).Elem().Set(v)
 
 
-		return b, nil
+		return b, nerr.E
 	}
 	}
 }
 }
 
 
@@ -1864,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
 // If there is an error, it returns 0,0.
 // If there is an error, it returns 0,0.
 func decodeVarint(b []byte) (uint64, int) {
 func decodeVarint(b []byte) (uint64, int) {
 	var x, y uint64
 	var x, y uint64
-	if len(b) <= 0 {
+	if len(b) == 0 {
 		goto bad
 		goto bad
 	}
 	}
 	x = uint64(b[0])
 	x = uint64(b[0])

+ 2 - 2
vendor/github.com/golang/protobuf/proto/text.go

@@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
 						return err
 						return err
 					}
 					}
 				}
 				}
-				if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
 					return err
 					return err
 				}
 				}
 				if err := w.WriteByte('\n'); err != nil {
 				if err := w.WriteByte('\n'); err != nil {
@@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
 							return err
 							return err
 						}
 						}
 					}
 					}
-					if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
 						return err
 						return err
 					}
 					}
 					if err := w.WriteByte('\n'); err != nil {
 					if err := w.WriteByte('\n'); err != nil {

+ 3 - 3
vendor/github.com/golang/protobuf/proto/text_parser.go

@@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
 					if err := p.consumeToken(":"); err != nil {
 					if err := p.consumeToken(":"); err != nil {
 						return err
 						return err
 					}
 					}
-					if err := p.readAny(key, props.mkeyprop); err != nil {
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
 						return err
 						return err
 					}
 					}
 					if err := p.consumeOptionalSeparator(); err != nil {
 					if err := p.consumeOptionalSeparator(); err != nil {
 						return err
 						return err
 					}
 					}
 				case "value":
 				case "value":
-					if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
 						return err
 						return err
 					}
 					}
-					if err := p.readAny(val, props.mvalprop); err != nil {
+					if err := p.readAny(val, props.MapValProp); err != nil {
 						return err
 						return err
 					}
 					}
 					if err := p.consumeOptionalSeparator(); err != nil {
 					if err := p.consumeOptionalSeparator(); err != nil {

+ 9 - 0
vendor/github.com/google/uuid/CONTRIBUTORS

@@ -0,0 +1,9 @@
+Paul Borman <[email protected]>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza

+ 27 - 0
vendor/github.com/google/uuid/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 80 - 0
vendor/github.com/google/uuid/dce.go

@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+	Person = Domain(0)
+	Group  = Domain(1)
+	Org    = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group.  The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+	uuid, err := NewUUID()
+	if err == nil {
+		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+		uuid[9] = byte(domain)
+		binary.BigEndian.PutUint32(uuid[0:], id)
+	}
+	return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+//  NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+	return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+//  NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+	return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID.  Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+	return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+	return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+	switch d {
+	case Person:
+		return "Person"
+	case Group:
+		return "Group"
+	case Org:
+		return "Org"
+	}
+	return fmt.Sprintf("Domain%d", int(d))
+}

+ 12 - 0
vendor/github.com/google/uuid/doc.go

@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array.  UUIDs may be used as keys to
+// maps or compared directly.
+package uuid

+ 53 - 0
vendor/github.com/google/uuid/hash.go

@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+	NameSpaceDNS  = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceURL  = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+	Nil           UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h.  The hash should be at least 16 byte in length.  The
+// first 16 bytes of the hash are used to form the UUID.  The version of the
+// UUID will be the lower 4 bits of version.  NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+	h.Reset()
+	h.Write(space[:])
+	h.Write(data)
+	s := h.Sum(nil)
+	var uuid UUID
+	copy(uuid[:], s)
+	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+	return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+	return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+	return NewHash(sha1.New(), space, data, 5)
+}

+ 37 - 0
vendor/github.com/google/uuid/marshal.go

@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+	var js [36]byte
+	encodeHex(js[:], uuid)
+	return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err == nil {
+		*uuid = id
+	}
+	return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+	return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(uuid[:], data)
+	return nil
+}

+ 90 - 0
vendor/github.com/google/uuid/node.go

@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"sync"
+)
+
+var (
+	nodeMu sync.Mutex
+	ifname string  // name of interface being used
+	nodeID [6]byte // hardware for version 1 UUIDs
+	zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived.  The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated.  If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+	iname, addr := getHardwareInterface(name) // null implementation for js
+	if iname != "" && addr != nil {
+		ifname = iname
+		copy(nodeID[:], addr)
+		return true
+	}
+
+	// We found no interfaces with a valid hardware address.  If name
+	// does not specify a specific interface generate a random Node ID
+	// (section 4.1.6)
+	if name == "" {
+		ifname = "random"
+		randomBits(nodeID[:])
+		return true
+	}
+	return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nid := nodeID
+	return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
+// of id are used.  If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+	if len(id) < 6 {
+		return false
+	}
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	copy(nodeID[:], id)
+	ifname = "user"
+	return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
+// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+	var node [6]byte
+	copy(node[:], uuid[10:])
+	return node[:]
+}

+ 12 - 0
vendor/github.com/google/uuid/node_js.go

@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }

+ 33 - 0
vendor/github.com/google/uuid/node_net.go

@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned.  If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+	if interfaces == nil {
+		var err error
+		interfaces, err = net.Interfaces()
+		if err != nil {
+			return "", nil
+		}
+	}
+	for _, ifs := range interfaces {
+		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+			return ifs.Name, ifs.HardwareAddr
+		}
+	}
+	return "", nil
+}

+ 59 - 0
vendor/github.com/google/uuid/sql.go

@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"database/sql/driver"
+	"fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case nil:
+		return nil
+
+	case string:
+		// if an empty UUID comes from a table, we return a null UUID
+		if src == "" {
+			return nil
+		}
+
+		// see Parse for required string format
+		u, err := Parse(src)
+		if err != nil {
+			return fmt.Errorf("Scan: %v", err)
+		}
+
+		*uuid = u
+
+	case []byte:
+		// if an empty UUID comes from a table, we return a null UUID
+		if len(src) == 0 {
+			return nil
+		}
+
+		// assumes a simple slice of bytes if 16 bytes
+		// otherwise attempts to parse
+		if len(src) != 16 {
+			return uuid.Scan(string(src))
+		}
+		copy((*uuid)[:], src)
+
+	default:
+		return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+	}
+
+	return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+	return uuid.String(), nil
+}

+ 123 - 0
vendor/github.com/google/uuid/time.go

@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"sync"
+	"time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+	lillian    = 2299160          // Julian day of 15 Oct 1582
+	unix       = 2440587          // Julian day of 1 Jan 1970
+	epoch      = unix - lillian   // Days between epochs
+	g1582      = epoch * 86400    // seconds between epochs
+	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+	timeMu   sync.Mutex
+	lasttime uint64 // last time we returned
+	clockSeq uint16 // clock sequence for this run
+
+	timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+	sec = int64(t - g1582ns100)
+	nsec = (sec % 10000000) * 100
+	sec /= 10000000
+	return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed.  An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+	t := timeNow()
+
+	// If we don't have a clock sequence already, set one.
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	now := uint64(t.UnixNano()/100) + g1582ns100
+
+	// If time has gone backwards with this clock sequence then we
+	// increment the clock sequence
+	if now <= lasttime {
+		clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+	}
+	lasttime = now
+	return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set.  The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated.  Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1)
+func ClockSequence() int {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return clockSequence()
+}
+
+func clockSequence() int {
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq.  Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+	if seq == -1 {
+		var b [2]byte
+		randomBits(b[:]) // clock sequence
+		seq = int(b[0])<<8 | int(b[1])
+	}
+	oldSeq := clockSeq
+	clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+	if oldSeq != clockSeq {
+		lasttime = 0
+	}
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid.  The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+	return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}

+ 43 - 0
vendor/github.com/google/uuid/util.go

@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+	if _, err := io.ReadFull(rander, b); err != nil {
+		panic(err.Error()) // rand should never fail
+	}
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+	b1 := xvalues[x1]
+	b2 := xvalues[x2]
+	return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}

+ 245 - 0
vendor/github.com/google/uuid/uuid.go

@@ -0,0 +1,245 @@
+// Copyright 2018 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+	Invalid   = Variant(iota) // Invalid UUID
+	RFC4122                   // The variant specified in RFC4122
+	Reserved                  // Reserved, NCS backward compatibility.
+	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
+	Future                    // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error.  Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+	var uuid UUID
+	switch len(s) {
+	// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36:
+
+	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9:
+		if strings.ToLower(s[:9]) != "urn:uuid:" {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+	case 36 + 2:
+		s = s[1:]
+
+	// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+	case 32:
+		var ok bool
+		for i := range uuid {
+			uuid[i], ok = xtob(s[i*2], s[i*2+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(s[x], s[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+	var uuid UUID
+	switch len(b) {
+	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+		}
+		b = b[9:]
+	case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+		b = b[1:]
+	case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+		var ok bool
+		for i := 0; i < 32; i += 2 {
+			uuid[i/2], ok = xtob(b[i], b[i+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(b[x], b[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+	uuid, err := Parse(s)
+	if err != nil {
+		panic(`uuid: Parse(` + s + `): ` + err.Error())
+	}
+	return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+	err = uuid.UnmarshalBinary(b)
+	return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+	var buf [36]byte
+	encodeHex(buf[:], uuid)
+	return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+	var buf [36 + 9]byte
+	copy(buf[:], "urn:uuid:")
+	encodeHex(buf[9:], uuid)
+	return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+	hex.Encode(dst, uuid[:4])
+	dst[8] = '-'
+	hex.Encode(dst[9:13], uuid[4:6])
+	dst[13] = '-'
+	hex.Encode(dst[14:18], uuid[6:8])
+	dst[18] = '-'
+	hex.Encode(dst[19:23], uuid[8:10])
+	dst[23] = '-'
+	hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+	switch {
+	case (uuid[8] & 0xc0) == 0x80:
+		return RFC4122
+	case (uuid[8] & 0xe0) == 0xc0:
+		return Microsoft
+	case (uuid[8] & 0xe0) == 0xe0:
+		return Future
+	default:
+		return Reserved
+	}
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+	return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+	if v > 15 {
+		return fmt.Sprintf("BAD_VERSION_%d", v)
+	}
+	return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+	switch v {
+	case RFC4122:
+		return "RFC4122"
+	case Reserved:
+		return "Reserved"
+	case Microsoft:
+		return "Microsoft"
+	case Future:
+		return "Future"
+	case Invalid:
+		return "Invalid"
+	}
+	return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+	if r == nil {
+		rander = rand.Reader
+		return
+	}
+	rander = r
+}

+ 44 - 0
vendor/github.com/google/uuid/version1.go

@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time.  If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
+// be set NewUUID returns nil.  If clock sequence has not been set by
+// SetClockSequence then it will be set automatically.  If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nodeMu.Unlock()
+
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	timeLow := uint32(now & 0xffffffff)
+	timeMid := uint16((now >> 32) & 0xffff)
+	timeHi := uint16((now >> 48) & 0x0fff)
+	timeHi |= 0x1000 // Version 1
+
+	binary.BigEndian.PutUint32(uuid[0:], timeLow)
+	binary.BigEndian.PutUint16(uuid[4:], timeMid)
+	binary.BigEndian.PutUint16(uuid[6:], timeHi)
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+	copy(uuid[10:], nodeID[:])
+
+	return uuid, nil
+}

+ 38 - 0
vendor/github.com/google/uuid/version4.go

@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics.  New is equivalent to
+// the expression
+//
+//    uuid.Must(uuid.NewRandom())
+func New() UUID {
+	return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
+//  hit by a meteorite is estimated to be one chance in 17 billion, that
+//  means the probability is about 0.00000000006 (6 × 10−11),
+//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
+//  year and having one duplicate.
+func NewRandom() (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return Nil, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}

+ 0 - 20
vendor/github.com/influxdata/influxdb/LICENSE

@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2016 Errplane Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 61
vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md

@@ -1,61 +0,0 @@
-- # List
-- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
-- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
-- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
-- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE)
-- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE)
-- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
-- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
-- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
-- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
-- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
-- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
-- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
-- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
-- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE)
-- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
-- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
-- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
-- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
-- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE)
-- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
-- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE)
-- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
-- github.com/klauspost/pgzip [MIT LICENSE](https://github.com/klauspost/pgzip/blob/master/LICENSE)
-- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE)
-- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
-- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
-- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
-- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
-- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
-- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
-- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE)
-- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE)
-- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE)
-- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
-- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
-- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
-- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
-- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt)
-- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
-- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
-- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE)
-- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE)
-- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE)
-- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE)
-- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
-- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)
-
-
-
-
-
-
-
-
-
-
-
-
-
-

+ 0 - 662
vendor/github.com/influxdata/influxdb/client/v2/client.go

@@ -1,662 +0,0 @@
-// Package client (v2) is the current official Go client for InfluxDB.
-package client // import "github.com/influxdata/influxdb/client/v2"
-
-import (
-	"bytes"
-	"crypto/tls"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"mime"
-	"net/http"
-	"net/url"
-	"path"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/influxdata/influxdb/models"
-)
-
-// HTTPConfig is the config data needed to create an HTTP Client.
-type HTTPConfig struct {
-	// Addr should be of the form "http://host:port"
-	// or "http://[ipv6-host%zone]:port".
-	Addr string
-
-	// Username is the influxdb username, optional.
-	Username string
-
-	// Password is the influxdb password, optional.
-	Password string
-
-	// UserAgent is the http User Agent, defaults to "InfluxDBClient".
-	UserAgent string
-
-	// Timeout for influxdb writes, defaults to no timeout.
-	Timeout time.Duration
-
-	// InsecureSkipVerify gets passed to the http client, if true, it will
-	// skip https certificate verification. Defaults to false.
-	InsecureSkipVerify bool
-
-	// TLSConfig allows the user to set their own TLS config for the HTTP
-	// Client. If set, this option overrides InsecureSkipVerify.
-	TLSConfig *tls.Config
-
-	// Proxy configures the Proxy function on the HTTP client.
-	Proxy func(req *http.Request) (*url.URL, error)
-}
-
-// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
-type BatchPointsConfig struct {
-	// Precision is the write precision of the points, defaults to "ns".
-	Precision string
-
-	// Database is the database to write points to.
-	Database string
-
-	// RetentionPolicy is the retention policy of the points.
-	RetentionPolicy string
-
-	// Write consistency is the number of servers required to confirm write.
-	WriteConsistency string
-}
-
-// Client is a client interface for writing & querying the database.
-type Client interface {
-	// Ping checks that status of cluster, and will always return 0 time and no
-	// error for UDP clients.
-	Ping(timeout time.Duration) (time.Duration, string, error)
-
-	// Write takes a BatchPoints object and writes all Points to InfluxDB.
-	Write(bp BatchPoints) error
-
-	// Query makes an InfluxDB Query on the database. This will fail if using
-	// the UDP client.
-	Query(q Query) (*Response, error)
-
-	// Close releases any resources a Client may be using.
-	Close() error
-}
-
-// NewHTTPClient returns a new Client from the provided config.
-// Client is safe for concurrent use by multiple goroutines.
-func NewHTTPClient(conf HTTPConfig) (Client, error) {
-	if conf.UserAgent == "" {
-		conf.UserAgent = "InfluxDBClient"
-	}
-
-	u, err := url.Parse(conf.Addr)
-	if err != nil {
-		return nil, err
-	} else if u.Scheme != "http" && u.Scheme != "https" {
-		m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
-			" must start with http:// or https://", u.Scheme)
-		return nil, errors.New(m)
-	}
-
-	tr := &http.Transport{
-		TLSClientConfig: &tls.Config{
-			InsecureSkipVerify: conf.InsecureSkipVerify,
-		},
-		Proxy: conf.Proxy,
-	}
-	if conf.TLSConfig != nil {
-		tr.TLSClientConfig = conf.TLSConfig
-	}
-	return &client{
-		url:       *u,
-		username:  conf.Username,
-		password:  conf.Password,
-		useragent: conf.UserAgent,
-		httpClient: &http.Client{
-			Timeout:   conf.Timeout,
-			Transport: tr,
-		},
-		transport: tr,
-	}, nil
-}
-
-// Ping will check to see if the server is up with an optional timeout on waiting for leader.
-// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
-func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
-	now := time.Now()
-
-	u := c.url
-	u.Path = path.Join(u.Path, "ping")
-
-	req, err := http.NewRequest("GET", u.String(), nil)
-	if err != nil {
-		return 0, "", err
-	}
-
-	req.Header.Set("User-Agent", c.useragent)
-
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	if timeout > 0 {
-		params := req.URL.Query()
-		params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
-		req.URL.RawQuery = params.Encode()
-	}
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return 0, "", err
-	}
-	defer resp.Body.Close()
-
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return 0, "", err
-	}
-
-	if resp.StatusCode != http.StatusNoContent {
-		var err = fmt.Errorf(string(body))
-		return 0, "", err
-	}
-
-	version := resp.Header.Get("X-Influxdb-Version")
-	return time.Since(now), version, nil
-}
-
-// Close releases the client's resources.
-func (c *client) Close() error {
-	c.transport.CloseIdleConnections()
-	return nil
-}
-
-// client is safe for concurrent use as the fields are all read-only
-// once the client is instantiated.
-type client struct {
-	// N.B - if url.UserInfo is accessed in future modifications to the
-	// methods on client, you will need to synchronize access to url.
-	url        url.URL
-	username   string
-	password   string
-	useragent  string
-	httpClient *http.Client
-	transport  *http.Transport
-}
-
-// BatchPoints is an interface into a batched grouping of points to write into
-// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
-// batch for each goroutine.
-type BatchPoints interface {
-	// AddPoint adds the given point to the Batch of points.
-	AddPoint(p *Point)
-	// AddPoints adds the given points to the Batch of points.
-	AddPoints(ps []*Point)
-	// Points lists the points in the Batch.
-	Points() []*Point
-
-	// Precision returns the currently set precision of this Batch.
-	Precision() string
-	// SetPrecision sets the precision of this batch.
-	SetPrecision(s string) error
-
-	// Database returns the currently set database of this Batch.
-	Database() string
-	// SetDatabase sets the database of this Batch.
-	SetDatabase(s string)
-
-	// WriteConsistency returns the currently set write consistency of this Batch.
-	WriteConsistency() string
-	// SetWriteConsistency sets the write consistency of this Batch.
-	SetWriteConsistency(s string)
-
-	// RetentionPolicy returns the currently set retention policy of this Batch.
-	RetentionPolicy() string
-	// SetRetentionPolicy sets the retention policy of this Batch.
-	SetRetentionPolicy(s string)
-}
-
-// NewBatchPoints returns a BatchPoints interface based on the given config.
-func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
-	if conf.Precision == "" {
-		conf.Precision = "ns"
-	}
-	if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
-		return nil, err
-	}
-	bp := &batchpoints{
-		database:         conf.Database,
-		precision:        conf.Precision,
-		retentionPolicy:  conf.RetentionPolicy,
-		writeConsistency: conf.WriteConsistency,
-	}
-	return bp, nil
-}
-
-type batchpoints struct {
-	points           []*Point
-	database         string
-	precision        string
-	retentionPolicy  string
-	writeConsistency string
-}
-
-func (bp *batchpoints) AddPoint(p *Point) {
-	bp.points = append(bp.points, p)
-}
-
-func (bp *batchpoints) AddPoints(ps []*Point) {
-	bp.points = append(bp.points, ps...)
-}
-
-func (bp *batchpoints) Points() []*Point {
-	return bp.points
-}
-
-func (bp *batchpoints) Precision() string {
-	return bp.precision
-}
-
-func (bp *batchpoints) Database() string {
-	return bp.database
-}
-
-func (bp *batchpoints) WriteConsistency() string {
-	return bp.writeConsistency
-}
-
-func (bp *batchpoints) RetentionPolicy() string {
-	return bp.retentionPolicy
-}
-
-func (bp *batchpoints) SetPrecision(p string) error {
-	if _, err := time.ParseDuration("1" + p); err != nil {
-		return err
-	}
-	bp.precision = p
-	return nil
-}
-
-func (bp *batchpoints) SetDatabase(db string) {
-	bp.database = db
-}
-
-func (bp *batchpoints) SetWriteConsistency(wc string) {
-	bp.writeConsistency = wc
-}
-
-func (bp *batchpoints) SetRetentionPolicy(rp string) {
-	bp.retentionPolicy = rp
-}
-
-// Point represents a single data point.
-type Point struct {
-	pt models.Point
-}
-
-// NewPoint returns a point with the given timestamp. If a timestamp is not
-// given, then data is sent to the database without a timestamp, in which case
-// the server will assign local time upon reception. NOTE: it is recommended to
-// send data with a timestamp.
-func NewPoint(
-	name string,
-	tags map[string]string,
-	fields map[string]interface{},
-	t ...time.Time,
-) (*Point, error) {
-	var T time.Time
-	if len(t) > 0 {
-		T = t[0]
-	}
-
-	pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
-	if err != nil {
-		return nil, err
-	}
-	return &Point{
-		pt: pt,
-	}, nil
-}
-
-// String returns a line-protocol string of the Point.
-func (p *Point) String() string {
-	return p.pt.String()
-}
-
-// PrecisionString returns a line-protocol string of the Point,
-// with the timestamp formatted for the given precision.
-func (p *Point) PrecisionString(precision string) string {
-	return p.pt.PrecisionString(precision)
-}
-
-// Name returns the measurement name of the point.
-func (p *Point) Name() string {
-	return string(p.pt.Name())
-}
-
-// Tags returns the tags associated with the point.
-func (p *Point) Tags() map[string]string {
-	return p.pt.Tags().Map()
-}
-
-// Time return the timestamp for the point.
-func (p *Point) Time() time.Time {
-	return p.pt.Time()
-}
-
-// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
-func (p *Point) UnixNano() int64 {
-	return p.pt.UnixNano()
-}
-
-// Fields returns the fields for the point.
-func (p *Point) Fields() (map[string]interface{}, error) {
-	return p.pt.Fields()
-}
-
-// NewPointFrom returns a point from the provided models.Point.
-func NewPointFrom(pt models.Point) *Point {
-	return &Point{pt: pt}
-}
-
-func (c *client) Write(bp BatchPoints) error {
-	var b bytes.Buffer
-
-	for _, p := range bp.Points() {
-		if p == nil {
-			continue
-		}
-		if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
-			return err
-		}
-
-		if err := b.WriteByte('\n'); err != nil {
-			return err
-		}
-	}
-
-	u := c.url
-	u.Path = path.Join(u.Path, "write")
-
-	req, err := http.NewRequest("POST", u.String(), &b)
-	if err != nil {
-		return err
-	}
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.useragent)
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	params := req.URL.Query()
-	params.Set("db", bp.Database())
-	params.Set("rp", bp.RetentionPolicy())
-	params.Set("precision", bp.Precision())
-	params.Set("consistency", bp.WriteConsistency())
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return err
-	}
-	defer resp.Body.Close()
-
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return err
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
-		var err = fmt.Errorf(string(body))
-		return err
-	}
-
-	return nil
-}
-
-// Query defines a query to send to the server.
-type Query struct {
-	Command         string
-	Database        string
-	RetentionPolicy string
-	Precision       string
-	Chunked         bool
-	ChunkSize       int
-	Parameters      map[string]interface{}
-}
-
-// NewQuery returns a query object.
-// The database and precision arguments can be empty strings if they are not needed for the query.
-func NewQuery(command, database, precision string) Query {
-	return Query{
-		Command:    command,
-		Database:   database,
-		Precision:  precision,
-		Parameters: make(map[string]interface{}),
-	}
-}
-
-// NewQueryWithRP returns a query object.
-// The database, retention policy, and precision arguments can be empty strings if they are not needed
-// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater.
-func NewQueryWithRP(command, database, retentionPolicy, precision string) Query {
-	return Query{
-		Command:         command,
-		Database:        database,
-		RetentionPolicy: retentionPolicy,
-		Precision:       precision,
-		Parameters:      make(map[string]interface{}),
-	}
-}
-
-// NewQueryWithParameters returns a query object.
-// The database and precision arguments can be empty strings if they are not needed for the query.
-// parameters is a map of the parameter names used in the command to their values.
-func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
-	return Query{
-		Command:    command,
-		Database:   database,
-		Precision:  precision,
-		Parameters: parameters,
-	}
-}
-
-// Response represents a list of statement results.
-type Response struct {
-	Results []Result
-	Err     string `json:"error,omitempty"`
-}
-
-// Error returns the first error from any statement.
-// It returns nil if no errors occurred on any statements.
-func (r *Response) Error() error {
-	if r.Err != "" {
-		return fmt.Errorf(r.Err)
-	}
-	for _, result := range r.Results {
-		if result.Err != "" {
-			return fmt.Errorf(result.Err)
-		}
-	}
-	return nil
-}
-
-// Message represents a user message.
-type Message struct {
-	Level string
-	Text  string
-}
-
-// Result represents a resultset returned from a single statement.
-type Result struct {
-	Series   []models.Row
-	Messages []*Message
-	Err      string `json:"error,omitempty"`
-}
-
-// Query sends a command to the server and returns the Response.
-func (c *client) Query(q Query) (*Response, error) {
-	u := c.url
-	u.Path = path.Join(u.Path, "query")
-
-	jsonParameters, err := json.Marshal(q.Parameters)
-
-	if err != nil {
-		return nil, err
-	}
-
-	req, err := http.NewRequest("POST", u.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-
-	req.Header.Set("Content-Type", "")
-	req.Header.Set("User-Agent", c.useragent)
-
-	if c.username != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-
-	params := req.URL.Query()
-	params.Set("q", q.Command)
-	params.Set("db", q.Database)
-	if q.RetentionPolicy != "" {
-		params.Set("rp", q.RetentionPolicy)
-	}
-	params.Set("params", string(jsonParameters))
-	if q.Chunked {
-		params.Set("chunked", "true")
-		if q.ChunkSize > 0 {
-			params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
-		}
-	}
-
-	if q.Precision != "" {
-		params.Set("epoch", q.Precision)
-	}
-	req.URL.RawQuery = params.Encode()
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	// If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
-	// but instead some other service. If the error code is also a 500+ code, then some
-	// downstream loadbalancer/proxy/etc had an issue and we should report that.
-	if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
-		body, err := ioutil.ReadAll(resp.Body)
-		if err != nil || len(body) == 0 {
-			return nil, fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
-		}
-
-		return nil, fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
-	}
-
-	// If we get an unexpected content type, then it is also not from influx direct and therefore
-	// we want to know what we received and what status code was returned for debugging purposes.
-	if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
-		// Read up to 1kb of the body to help identify downstream errors and limit the impact of things
-		// like downstream serving a large file
-		body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
-		if err != nil || len(body) == 0 {
-			return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
-		}
-
-		return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
-	}
-
-	var response Response
-	if q.Chunked {
-		cr := NewChunkedResponse(resp.Body)
-		for {
-			r, err := cr.NextResponse()
-			if err != nil {
-				// If we got an error while decoding the response, send that back.
-				return nil, err
-			}
-
-			if r == nil {
-				break
-			}
-
-			response.Results = append(response.Results, r.Results...)
-			if r.Err != "" {
-				response.Err = r.Err
-				break
-			}
-		}
-	} else {
-		dec := json.NewDecoder(resp.Body)
-		dec.UseNumber()
-		decErr := dec.Decode(&response)
-
-		// ignore this error if we got an invalid status code
-		if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
-			decErr = nil
-		}
-		// If we got a valid decode error, send that back
-		if decErr != nil {
-			return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
-		}
-	}
-
-	// If we don't have an error in our json response, and didn't get statusOK
-	// then send back an error
-	if resp.StatusCode != http.StatusOK && response.Error() == nil {
-		return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
-	}
-	return &response, nil
-}
-
-// duplexReader reads responses and writes it to another writer while
-// satisfying the reader interface.
-type duplexReader struct {
-	r io.Reader
-	w io.Writer
-}
-
-func (r *duplexReader) Read(p []byte) (n int, err error) {
-	n, err = r.r.Read(p)
-	if err == nil {
-		r.w.Write(p[:n])
-	}
-	return n, err
-}
-
-// ChunkedResponse represents a response from the server that
-// uses chunking to stream the output.
-type ChunkedResponse struct {
-	dec    *json.Decoder
-	duplex *duplexReader
-	buf    bytes.Buffer
-}
-
-// NewChunkedResponse reads a stream and produces responses from the stream.
-func NewChunkedResponse(r io.Reader) *ChunkedResponse {
-	resp := &ChunkedResponse{}
-	resp.duplex = &duplexReader{r: r, w: &resp.buf}
-	resp.dec = json.NewDecoder(resp.duplex)
-	resp.dec.UseNumber()
-	return resp
-}
-
-// NextResponse reads the next line of the stream and returns a response.
-func (r *ChunkedResponse) NextResponse() (*Response, error) {
-	var response Response
-
-	if err := r.dec.Decode(&response); err != nil {
-		if err == io.EOF {
-			return nil, nil
-		}
-		// A decoding error happened. This probably means the server crashed
-		// and sent a last-ditch error message to us. Ensure we have read the
-		// entirety of the connection to get any remaining error text.
-		io.Copy(ioutil.Discard, r.duplex)
-		return nil, errors.New(strings.TrimSpace(r.buf.String()))
-	}
-
-	r.buf.Reset()
-	return &response, nil
-}

+ 0 - 112
vendor/github.com/influxdata/influxdb/client/v2/udp.go

@@ -1,112 +0,0 @@
-package client
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"time"
-)
-
-const (
-	// UDPPayloadSize is a reasonable default payload size for UDP packets that
-	// could be travelling over the internet.
-	UDPPayloadSize = 512
-)
-
-// UDPConfig is the config data needed to create a UDP Client.
-type UDPConfig struct {
-	// Addr should be of the form "host:port"
-	// or "[ipv6-host%zone]:port".
-	Addr string
-
-	// PayloadSize is the maximum size of a UDP client message, optional
-	// Tune this based on your network. Defaults to UDPPayloadSize.
-	PayloadSize int
-}
-
-// NewUDPClient returns a client interface for writing to an InfluxDB UDP
-// service from the given config.
-func NewUDPClient(conf UDPConfig) (Client, error) {
-	var udpAddr *net.UDPAddr
-	udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
-	if err != nil {
-		return nil, err
-	}
-
-	conn, err := net.DialUDP("udp", nil, udpAddr)
-	if err != nil {
-		return nil, err
-	}
-
-	payloadSize := conf.PayloadSize
-	if payloadSize == 0 {
-		payloadSize = UDPPayloadSize
-	}
-
-	return &udpclient{
-		conn:        conn,
-		payloadSize: payloadSize,
-	}, nil
-}
-
-// Close releases the udpclient's resources.
-func (uc *udpclient) Close() error {
-	return uc.conn.Close()
-}
-
-type udpclient struct {
-	conn        io.WriteCloser
-	payloadSize int
-}
-
-func (uc *udpclient) Write(bp BatchPoints) error {
-	var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
-	var d, _ = time.ParseDuration("1" + bp.Precision())
-
-	var delayedError error
-
-	var checkBuffer = func(n int) {
-		if len(b) > 0 && len(b)+n > uc.payloadSize {
-			if _, err := uc.conn.Write(b); err != nil {
-				delayedError = err
-			}
-			b = b[:0]
-		}
-	}
-
-	for _, p := range bp.Points() {
-		p.pt.Round(d)
-		pointSize := p.pt.StringSize() + 1 // include newline in size
-		//point := p.pt.RoundedString(d) + "\n"
-
-		checkBuffer(pointSize)
-
-		if p.Time().IsZero() || pointSize <= uc.payloadSize {
-			b = p.pt.AppendString(b)
-			b = append(b, '\n')
-			continue
-		}
-
-		points := p.pt.Split(uc.payloadSize - 1) // account for newline character
-		for _, sp := range points {
-			checkBuffer(sp.StringSize() + 1)
-			b = sp.AppendString(b)
-			b = append(b, '\n')
-		}
-	}
-
-	if len(b) > 0 {
-		if _, err := uc.conn.Write(b); err != nil {
-			return err
-		}
-	}
-	return delayedError
-}
-
-func (uc *udpclient) Query(q Query) (*Response, error) {
-	return nil, fmt.Errorf("Querying via UDP is not supported")
-}
-
-func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
-	return 0, "", nil
-}

+ 0 - 48
vendor/github.com/influxdata/influxdb/models/consistency.go

@@ -1,48 +0,0 @@
-package models
-
-import (
-	"errors"
-	"strings"
-)
-
-// ConsistencyLevel represent a required replication criteria before a write can
-// be returned as successful.
-//
-// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
-type ConsistencyLevel int
-
-const (
-	// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
-	ConsistencyLevelAny ConsistencyLevel = iota
-
-	// ConsistencyLevelOne requires at least one data node acknowledged a write.
-	ConsistencyLevelOne
-
-	// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
-	ConsistencyLevelQuorum
-
-	// ConsistencyLevelAll requires all data nodes to acknowledge a write.
-	ConsistencyLevelAll
-)
-
-var (
-	// ErrInvalidConsistencyLevel is returned when parsing the string version
-	// of a consistency level.
-	ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
-)
-
-// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
-func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
-	switch strings.ToLower(level) {
-	case "any":
-		return ConsistencyLevelAny, nil
-	case "one":
-		return ConsistencyLevelOne, nil
-	case "quorum":
-		return ConsistencyLevelQuorum, nil
-	case "all":
-		return ConsistencyLevelAll, nil
-	default:
-		return 0, ErrInvalidConsistencyLevel
-	}
-}

+ 0 - 32
vendor/github.com/influxdata/influxdb/models/inline_fnv.go

@@ -1,32 +0,0 @@
-package models // import "github.com/influxdata/influxdb/models"
-
-// from stdlib hash/fnv/fnv.go
-const (
-	prime64  = 1099511628211
-	offset64 = 14695981039346656037
-)
-
-// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
-// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
-type InlineFNV64a uint64
-
-// NewInlineFNV64a returns a new instance of InlineFNV64a.
-func NewInlineFNV64a() InlineFNV64a {
-	return offset64
-}
-
-// Write adds data to the running hash.
-func (s *InlineFNV64a) Write(data []byte) (int, error) {
-	hash := uint64(*s)
-	for _, c := range data {
-		hash ^= uint64(c)
-		hash *= prime64
-	}
-	*s = InlineFNV64a(hash)
-	return len(data), nil
-}
-
-// Sum64 returns the uint64 of the current resulting hash.
-func (s *InlineFNV64a) Sum64() uint64 {
-	return uint64(*s)
-}

+ 0 - 44
vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go

@@ -1,44 +0,0 @@
-package models // import "github.com/influxdata/influxdb/models"
-
-import (
-	"reflect"
-	"strconv"
-	"unsafe"
-)
-
-// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
-func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
-	s := unsafeBytesToString(b)
-	return strconv.ParseInt(s, base, bitSize)
-}
-
-// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
-func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
-	s := unsafeBytesToString(b)
-	return strconv.ParseUint(s, base, bitSize)
-}
-
-// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
-func parseFloatBytes(b []byte, bitSize int) (float64, error) {
-	s := unsafeBytesToString(b)
-	return strconv.ParseFloat(s, bitSize)
-}
-
-// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
-func parseBoolBytes(b []byte) (bool, error) {
-	return strconv.ParseBool(unsafeBytesToString(b))
-}
-
-// unsafeBytesToString converts a []byte to a string without a heap allocation.
-//
-// It is unsafe, and is intended to prepare input to short-lived functions
-// that require strings.
-func unsafeBytesToString(in []byte) string {
-	src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
-	dst := reflect.StringHeader{
-		Data: src.Data,
-		Len:  src.Len,
-	}
-	s := *(*string)(unsafe.Pointer(&dst))
-	return s
-}

+ 0 - 2455
vendor/github.com/influxdata/influxdb/models/points.go

@@ -1,2455 +0,0 @@
-// Package models implements basic objects used throughout the TICK stack.
-package models // import "github.com/influxdata/influxdb/models"
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-	"unicode"
-	"unicode/utf8"
-
-	"github.com/influxdata/influxdb/pkg/escape"
-)
-
-type escapeSet struct {
-	k   [1]byte
-	esc [2]byte
-}
-
-var (
-	measurementEscapeCodes = [...]escapeSet{
-		{k: [1]byte{','}, esc: [2]byte{'\\', ','}},
-		{k: [1]byte{' '}, esc: [2]byte{'\\', ' '}},
-	}
-
-	tagEscapeCodes = [...]escapeSet{
-		{k: [1]byte{','}, esc: [2]byte{'\\', ','}},
-		{k: [1]byte{' '}, esc: [2]byte{'\\', ' '}},
-		{k: [1]byte{'='}, esc: [2]byte{'\\', '='}},
-	}
-
-	// ErrPointMustHaveAField is returned when operating on a point that does not have any fields.
-	ErrPointMustHaveAField = errors.New("point without fields is unsupported")
-
-	// ErrInvalidNumber is returned when a number is expected but not provided.
-	ErrInvalidNumber = errors.New("invalid number")
-
-	// ErrInvalidPoint is returned when a point cannot be parsed correctly.
-	ErrInvalidPoint = errors.New("point is invalid")
-)
-
-const (
-	// MaxKeyLength is the largest allowed size of the combined measurement and tag keys.
-	MaxKeyLength = 65535
-)
-
-// enableUint64Support will enable uint64 support if set to true.
-var enableUint64Support = false
-
-// EnableUintSupport manually enables uint support for the point parser.
-// This function will be removed in the future and only exists for unit tests during the
-// transition.
-func EnableUintSupport() {
-	enableUint64Support = true
-}
-
-// Point defines the values that will be written to the database.
-type Point interface {
-	// Name return the measurement name for the point.
-	Name() []byte
-
-	// SetName updates the measurement name for the point.
-	SetName(string)
-
-	// Tags returns the tag set for the point.
-	Tags() Tags
-
-	// ForEachTag iterates over each tag invoking fn.  If fn return false, iteration stops.
-	ForEachTag(fn func(k, v []byte) bool)
-
-	// AddTag adds or replaces a tag value for a point.
-	AddTag(key, value string)
-
-	// SetTags replaces the tags for the point.
-	SetTags(tags Tags)
-
-	// HasTag returns true if the tag exists for the point.
-	HasTag(tag []byte) bool
-
-	// Fields returns the fields for the point.
-	Fields() (Fields, error)
-
-	// Time return the timestamp for the point.
-	Time() time.Time
-
-	// SetTime updates the timestamp for the point.
-	SetTime(t time.Time)
-
-	// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
-	UnixNano() int64
-
-	// HashID returns a non-cryptographic checksum of the point's key.
-	HashID() uint64
-
-	// Key returns the key (measurement joined with tags) of the point.
-	Key() []byte
-
-	// String returns a string representation of the point. If there is a
-	// timestamp associated with the point then it will be specified with the default
-	// precision of nanoseconds.
-	String() string
-
-	// MarshalBinary returns a binary representation of the point.
-	MarshalBinary() ([]byte, error)
-
-	// PrecisionString returns a string representation of the point. If there
-	// is a timestamp associated with the point then it will be specified in the
-	// given unit.
-	PrecisionString(precision string) string
-
-	// RoundedString returns a string representation of the point. If there
-	// is a timestamp associated with the point, then it will be rounded to the
-	// given duration.
-	RoundedString(d time.Duration) string
-
-	// Split will attempt to return multiple points with the same timestamp whose
-	// string representations are no longer than size. Points with a single field or
-	// a point without a timestamp may exceed the requested size.
-	Split(size int) []Point
-
-	// Round will round the timestamp of the point to the given duration.
-	Round(d time.Duration)
-
-	// StringSize returns the length of the string that would be returned by String().
-	StringSize() int
-
-	// AppendString appends the result of String() to the provided buffer and returns
-	// the result, potentially reducing string allocations.
-	AppendString(buf []byte) []byte
-
-	// FieldIterator retuns a FieldIterator that can be used to traverse the
-	// fields of a point without constructing the in-memory map.
-	FieldIterator() FieldIterator
-}
-
-// FieldType represents the type of a field.
-type FieldType int
-
-const (
-	// Integer indicates the field's type is integer.
-	Integer FieldType = iota
-
-	// Float indicates the field's type is float.
-	Float
-
-	// Boolean indicates the field's type is boolean.
-	Boolean
-
-	// String indicates the field's type is string.
-	String
-
-	// Empty is used to indicate that there is no field.
-	Empty
-
-	// Unsigned indicates the field's type is an unsigned integer.
-	Unsigned
-)
-
-// FieldIterator provides a low-allocation interface to iterate through a point's fields.
-type FieldIterator interface {
-	// Next indicates whether there any fields remaining.
-	Next() bool
-
-	// FieldKey returns the key of the current field.
-	FieldKey() []byte
-
-	// Type returns the FieldType of the current field.
-	Type() FieldType
-
-	// StringValue returns the string value of the current field.
-	StringValue() string
-
-	// IntegerValue returns the integer value of the current field.
-	IntegerValue() (int64, error)
-
-	// UnsignedValue returns the unsigned value of the current field.
-	UnsignedValue() (uint64, error)
-
-	// BooleanValue returns the boolean value of the current field.
-	BooleanValue() (bool, error)
-
-	// FloatValue returns the float value of the current field.
-	FloatValue() (float64, error)
-
-	// Reset resets the iterator to its initial state.
-	Reset()
-}
-
-// Points represents a sortable list of points by timestamp.
-type Points []Point
-
-// Len implements sort.Interface.
-func (a Points) Len() int { return len(a) }
-
-// Less implements sort.Interface.
-func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
-
-// Swap implements sort.Interface.
-func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// point is the default implementation of Point.
-type point struct {
-	time time.Time
-
-	// text encoding of measurement and tags
-	// key must always be stored sorted by tags, if the original line was not sorted,
-	// we need to resort it
-	key []byte
-
-	// text encoding of field data
-	fields []byte
-
-	// text encoding of timestamp
-	ts []byte
-
-	// cached version of parsed fields from data
-	cachedFields map[string]interface{}
-
-	// cached version of parsed name from key
-	cachedName string
-
-	// cached version of parsed tags
-	cachedTags Tags
-
-	it fieldIterator
-}
-
-// type assertions
-var (
-	_ Point         = (*point)(nil)
-	_ FieldIterator = (*point)(nil)
-)
-
-const (
-	// the number of characters for the largest possible int64 (9223372036854775807)
-	maxInt64Digits = 19
-
-	// the number of characters for the smallest possible int64 (-9223372036854775808)
-	minInt64Digits = 20
-
-	// the number of characters for the largest possible uint64 (18446744073709551615)
-	maxUint64Digits = 20
-
-	// the number of characters required for the largest float64 before a range check
-	// would occur during parsing
-	maxFloat64Digits = 25
-
-	// the number of characters required for smallest float64 before a range check occur
-	// would occur during parsing
-	minFloat64Digits = 27
-)
-
-// ParsePoints returns a slice of Points from a text representation of a point
-// with each point separated by newlines.  If any points fail to parse, a non-nil error
-// will be returned in addition to the points that parsed successfully.
-func ParsePoints(buf []byte) ([]Point, error) {
-	return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
-}
-
-// ParsePointsString is identical to ParsePoints but accepts a string.
-func ParsePointsString(buf string) ([]Point, error) {
-	return ParsePoints([]byte(buf))
-}
-
-// ParseKey returns the measurement name and tags from a point.
-//
-// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.
-// This can have the unintended effect preventing buf from being garbage collected.
-func ParseKey(buf []byte) (string, Tags) {
-	name, tags := ParseKeyBytes(buf)
-	return string(name), tags
-}
-
-func ParseKeyBytes(buf []byte) ([]byte, Tags) {
-	return ParseKeyBytesWithTags(buf, nil)
-}
-
-func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) {
-	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
-	// when just parsing a key
-	state, i, _ := scanMeasurement(buf, 0)
-
-	var name []byte
-	if state == tagKeyState {
-		tags = parseTags(buf, tags)
-		// scanMeasurement returns the location of the comma if there are tags, strip that off
-		name = buf[:i-1]
-	} else {
-		name = buf[:i]
-	}
-	return unescapeMeasurement(name), tags
-}
-
-func ParseTags(buf []byte) Tags {
-	return parseTags(buf, nil)
-}
-
-func ParseName(buf []byte) []byte {
-	// Ignore the error because scanMeasurement returns "missing fields" which we ignore
-	// when just parsing a key
-	state, i, _ := scanMeasurement(buf, 0)
-	var name []byte
-	if state == tagKeyState {
-		name = buf[:i-1]
-	} else {
-		name = buf[:i]
-	}
-
-	return unescapeMeasurement(name)
-}
-
-// ParsePointsWithPrecision is similar to ParsePoints, but allows the
-// caller to provide a precision for time.
-//
-// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf.
-// This can have the unintended effect preventing buf from being garbage collected.
-func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
-	points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1)
-	var (
-		pos    int
-		block  []byte
-		failed []string
-	)
-	for pos < len(buf) {
-		pos, block = scanLine(buf, pos)
-		pos++
-
-		if len(block) == 0 {
-			continue
-		}
-
-		// lines which start with '#' are comments
-		start := skipWhitespace(block, 0)
-
-		// If line is all whitespace, just skip it
-		if start >= len(block) {
-			continue
-		}
-
-		if block[start] == '#' {
-			continue
-		}
-
-		// strip the newline if one is present
-		if block[len(block)-1] == '\n' {
-			block = block[:len(block)-1]
-		}
-
-		pt, err := parsePoint(block[start:], defaultTime, precision)
-		if err != nil {
-			failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err))
-		} else {
-			points = append(points, pt)
-		}
-
-	}
-	if len(failed) > 0 {
-		return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
-	}
-	return points, nil
-
-}
-
-func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
-	// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
-	pos, key, err := scanKey(buf, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	// measurement name is required
-	if len(key) == 0 {
-		return nil, fmt.Errorf("missing measurement")
-	}
-
-	if len(key) > MaxKeyLength {
-		return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
-	}
-
-	// scan the second block is which is field1=value1[,field2=value2,...]
-	pos, fields, err := scanFields(buf, pos)
-	if err != nil {
-		return nil, err
-	}
-
-	// at least one field is required
-	if len(fields) == 0 {
-		return nil, fmt.Errorf("missing fields")
-	}
-
-	var maxKeyErr error
-	walkFields(fields, func(k, v []byte) bool {
-		if sz := seriesKeySize(key, k); sz > MaxKeyLength {
-			maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
-			return false
-		}
-		return true
-	})
-
-	if maxKeyErr != nil {
-		return nil, maxKeyErr
-	}
-
-	// scan the last block which is an optional integer timestamp
-	pos, ts, err := scanTime(buf, pos)
-	if err != nil {
-		return nil, err
-	}
-
-	pt := &point{
-		key:    key,
-		fields: fields,
-		ts:     ts,
-	}
-
-	if len(ts) == 0 {
-		pt.time = defaultTime
-		pt.SetPrecision(precision)
-	} else {
-		ts, err := parseIntBytes(ts, 10, 64)
-		if err != nil {
-			return nil, err
-		}
-		pt.time, err = SafeCalcTime(ts, precision)
-		if err != nil {
-			return nil, err
-		}
-
-		// Determine if there are illegal non-whitespace characters after the
-		// timestamp block.
-		for pos < len(buf) {
-			if buf[pos] != ' ' {
-				return nil, ErrInvalidPoint
-			}
-			pos++
-		}
-	}
-	return pt, nil
-}
-
-// GetPrecisionMultiplier will return a multiplier for the precision specified.
-func GetPrecisionMultiplier(precision string) int64 {
-	d := time.Nanosecond
-	switch precision {
-	case "u":
-		d = time.Microsecond
-	case "ms":
-		d = time.Millisecond
-	case "s":
-		d = time.Second
-	case "m":
-		d = time.Minute
-	case "h":
-		d = time.Hour
-	}
-	return int64(d)
-}
-
-// scanKey scans buf starting at i for the measurement and tag portion of the point.
-// It returns the ending position and the byte slice of key within buf.  If there
-// are tags, they will be sorted if they are not already.
-func scanKey(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-
-	i = start
-
-	// Determines whether the tags are sort, assume they are
-	sorted := true
-
-	// indices holds the indexes within buf of the start of each tag.  For example,
-	// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
-	// which indicates that the first tag starts at buf[4], seconds at buf[11], and
-	// last at buf[20]
-	indices := make([]int, 100)
-
-	// tracks how many commas we've seen so we know how many values are indices.
-	// Since indices is an arbitrarily large slice,
-	// we need to know how many values in the buffer are in use.
-	commas := 0
-
-	// First scan the Point's measurement.
-	state, i, err := scanMeasurement(buf, i)
-	if err != nil {
-		return i, buf[start:i], err
-	}
-
-	// Optionally scan tags if needed.
-	if state == tagKeyState {
-		i, commas, indices, err = scanTags(buf, i, indices)
-		if err != nil {
-			return i, buf[start:i], err
-		}
-	}
-
-	// Now we know where the key region is within buf, and the location of tags, we
-	// need to determine if duplicate tags exist and if the tags are sorted. This iterates
-	// over the list comparing each tag in the sequence with each other.
-	for j := 0; j < commas-1; j++ {
-		// get the left and right tags
-		_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
-		_, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=')
-
-		// If left is greater than right, the tags are not sorted. We do not have to
-		// continue because the short path no longer works.
-		// If the tags are equal, then there are duplicate tags, and we should abort.
-		// If the tags are not sorted, this pass may not find duplicate tags and we
-		// need to do a more exhaustive search later.
-		if cmp := bytes.Compare(left, right); cmp > 0 {
-			sorted = false
-			break
-		} else if cmp == 0 {
-			return i, buf[start:i], fmt.Errorf("duplicate tags")
-		}
-	}
-
-	// If the tags are not sorted, then sort them.  This sort is inline and
-	// uses the tag indices we created earlier.  The actual buffer is not sorted, the
-	// indices are using the buffer for value comparison.  After the indices are sorted,
-	// the buffer is reconstructed from the sorted indices.
-	if !sorted && commas > 0 {
-		// Get the measurement name for later
-		measurement := buf[start : indices[0]-1]
-
-		// Sort the indices
-		indices := indices[:commas]
-		insertionSort(0, commas, buf, indices)
-
-		// Create a new key using the measurement and sorted indices
-		b := make([]byte, len(buf[start:i]))
-		pos := copy(b, measurement)
-		for _, i := range indices {
-			b[pos] = ','
-			pos++
-			_, v := scanToSpaceOr(buf, i, ',')
-			pos += copy(b[pos:], v)
-		}
-
-		// Check again for duplicate tags now that the tags are sorted.
-		for j := 0; j < commas-1; j++ {
-			// get the left and right tags
-			_, left := scanTo(buf[indices[j]:], 0, '=')
-			_, right := scanTo(buf[indices[j+1]:], 0, '=')
-
-			// If the tags are equal, then there are duplicate tags, and we should abort.
-			// If the tags are not sorted, this pass may not find duplicate tags and we
-			// need to do a more exhaustive search later.
-			if bytes.Equal(left, right) {
-				return i, b, fmt.Errorf("duplicate tags")
-			}
-		}
-
-		return i, b, nil
-	}
-
-	return i, buf[start:i], nil
-}
-
-// The following constants allow us to specify which state to move to
-// next, when scanning sections of a Point.
-const (
-	tagKeyState = iota
-	tagValueState
-	fieldsState
-)
-
-// scanMeasurement examines the measurement part of a Point, returning
-// the next state to move to, and the current location in the buffer.
-func scanMeasurement(buf []byte, i int) (int, int, error) {
-	// Check first byte of measurement, anything except a comma is fine.
-	// It can't be a space, since whitespace is stripped prior to this
-	// function call.
-	if i >= len(buf) || buf[i] == ',' {
-		return -1, i, fmt.Errorf("missing measurement")
-	}
-
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		if buf[i-1] == '\\' {
-			// Skip character (it's escaped).
-			continue
-		}
-
-		// Unescaped comma; move onto scanning the tags.
-		if buf[i] == ',' {
-			return tagKeyState, i + 1, nil
-		}
-
-		// Unescaped space; move onto scanning the fields.
-		if buf[i] == ' ' {
-			// cpu value=1.0
-			return fieldsState, i, nil
-		}
-	}
-}
-
-// scanTags examines all the tags in a Point, keeping track of and
-// returning the updated indices slice, number of commas and location
-// in buf where to start examining the Point fields.
-func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
-	var (
-		err    error
-		commas int
-		state  = tagKeyState
-	)
-
-	for {
-		switch state {
-		case tagKeyState:
-			// Grow our indices slice if we have too many tags.
-			if commas >= len(indices) {
-				newIndics := make([]int, cap(indices)*2)
-				copy(newIndics, indices)
-				indices = newIndics
-			}
-			indices[commas] = i
-			commas++
-
-			i, err = scanTagsKey(buf, i)
-			state = tagValueState // tag value always follows a tag key
-		case tagValueState:
-			state, i, err = scanTagsValue(buf, i)
-		case fieldsState:
-			indices[commas] = i + 1
-			return i, commas, indices, nil
-		}
-
-		if err != nil {
-			return i, commas, indices, err
-		}
-	}
-}
-
-// scanTagsKey scans each character in a tag key.
-func scanTagsKey(buf []byte, i int) (int, error) {
-	// First character of the key.
-	if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
-		// cpu,{'', ' ', ',', '='}
-		return i, fmt.Errorf("missing tag key")
-	}
-
-	// Examine each character in the tag key until we hit an unescaped
-	// equals (the tag value), or we hit an error (i.e., unescaped
-	// space or comma).
-	for {
-		i++
-
-		// Either we reached the end of the buffer or we hit an
-		// unescaped comma or space.
-		if i >= len(buf) ||
-			((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
-			// cpu,tag{'', ' ', ','}
-			return i, fmt.Errorf("missing tag value")
-		}
-
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag=
-			return i + 1, nil
-		}
-	}
-}
-
-// scanTagsValue scans each character in a tag value.
-func scanTagsValue(buf []byte, i int) (int, int, error) {
-	// Tag value cannot be empty.
-	if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
-		// cpu,tag={',', ' '}
-		return -1, i, fmt.Errorf("missing tag value")
-	}
-
-	// Examine each character in the tag value until we hit an unescaped
-	// comma (move onto next tag key), an unescaped space (move onto
-	// fields), or we error out.
-	for {
-		i++
-		if i >= len(buf) {
-			// cpu,tag=value
-			return -1, i, fmt.Errorf("missing fields")
-		}
-
-		// An unescaped equals sign is an invalid tag value.
-		if buf[i] == '=' && buf[i-1] != '\\' {
-			// cpu,tag={'=', 'fo=o'}
-			return -1, i, fmt.Errorf("invalid tag format")
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			// cpu,tag=foo,
-			return tagKeyState, i + 1, nil
-		}
-
-		// cpu,tag=foo value=1.0
-		// cpu, tag=foo\= value=1.0
-		if buf[i] == ' ' && buf[i-1] != '\\' {
-			return fieldsState, i, nil
-		}
-	}
-}
-
-func insertionSort(l, r int, buf []byte, indices []int) {
-	for i := l + 1; i < r; i++ {
-		for j := i; j > l && less(buf, indices, j, j-1); j-- {
-			indices[j], indices[j-1] = indices[j-1], indices[j]
-		}
-	}
-}
-
-func less(buf []byte, indices []int, i, j int) bool {
-	// This grabs the tag names for i & j, it ignores the values
-	_, a := scanTo(buf, indices[i], '=')
-	_, b := scanTo(buf, indices[j], '=')
-	return bytes.Compare(a, b) < 0
-}
-
-// scanFields scans buf, starting at i for the fields section of a point.  It returns
-// the ending position and the byte slice of the fields within buf.
-func scanFields(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-	quoted := false
-
-	// tracks how many '=' we've seen
-	equals := 0
-
-	// tracks how many commas we've seen
-	commas := 0
-
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// escaped characters?
-		if buf[i] == '\\' && i+1 < len(buf) {
-			i += 2
-			continue
-		}
-
-		// If the value is quoted, scan until we get to the end quote
-		// Only quote values in the field value since quotes are not significant
-		// in the field key
-		if buf[i] == '"' && equals > commas {
-			quoted = !quoted
-			i++
-			continue
-		}
-
-		// If we see an =, ensure that there is at least on char before and after it
-		if buf[i] == '=' && !quoted {
-			equals++
-
-			// check for "... =123" but allow "a\ =123"
-			if buf[i-1] == ' ' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "...a=123,=456" but allow "a=123,a\,=456"
-			if buf[i-1] == ',' && buf[i-2] != '\\' {
-				return i, buf[start:i], fmt.Errorf("missing field key")
-			}
-
-			// check for "... value="
-			if i+1 >= len(buf) {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			// check for "... value=,value2=..."
-			if buf[i+1] == ',' || buf[i+1] == ' ' {
-				return i, buf[start:i], fmt.Errorf("missing field value")
-			}
-
-			if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
-				var err error
-				i, err = scanNumber(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-			// If next byte is not a double-quote, the value must be a boolean
-			if buf[i+1] != '"' {
-				var err error
-				i, _, err = scanBoolean(buf, i+1)
-				if err != nil {
-					return i, buf[start:i], err
-				}
-				continue
-			}
-		}
-
-		if buf[i] == ',' && !quoted {
-			commas++
-		}
-
-		// reached end of block?
-		if buf[i] == ' ' && !quoted {
-			break
-		}
-		i++
-	}
-
-	if quoted {
-		return i, buf[start:i], fmt.Errorf("unbalanced quotes")
-	}
-
-	// check that all field sections had key and values (e.g. prevent "a=1,b"
-	if equals == 0 || commas != equals-1 {
-		return i, buf[start:i], fmt.Errorf("invalid field format")
-	}
-
-	return i, buf[start:i], nil
-}
-
-// scanTime scans buf, starting at i for the time section of a point. It
-// returns the ending position and the byte slice of the timestamp within buf
-// and and error if the timestamp is not in the correct numeric format.
-func scanTime(buf []byte, i int) (int, []byte, error) {
-	start := skipWhitespace(buf, i)
-	i = start
-
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Reached end of block or trailing whitespace?
-		if buf[i] == '\n' || buf[i] == ' ' {
-			break
-		}
-
-		// Handle negative timestamps
-		if i == start && buf[i] == '-' {
-			i++
-			continue
-		}
-
-		// Timestamps should be integers, make sure they are so we don't need
-		// to actually  parse the timestamp until needed.
-		if buf[i] < '0' || buf[i] > '9' {
-			return i, buf[start:i], fmt.Errorf("bad timestamp")
-		}
-		i++
-	}
-	return i, buf[start:i], nil
-}
-
-func isNumeric(b byte) bool {
-	return (b >= '0' && b <= '9') || b == '.'
-}
-
-// scanNumber returns the end position within buf, start at i after
-// scanning over buf for an integer, or float.  It returns an
-// error if a invalid number is scanned.
-func scanNumber(buf []byte, i int) (int, error) {
-	start := i
-	var isInt, isUnsigned bool
-
-	// Is negative number?
-	if i < len(buf) && buf[i] == '-' {
-		i++
-		// There must be more characters now, as just '-' is illegal.
-		if i == len(buf) {
-			return i, ErrInvalidNumber
-		}
-	}
-
-	// how many decimal points we've see
-	decimal := false
-
-	// indicates the number is float in scientific notation
-	scientific := false
-
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-
-		if buf[i] == 'i' && i > start && !(isInt || isUnsigned) {
-			isInt = true
-			i++
-			continue
-		} else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) {
-			isUnsigned = true
-			i++
-			continue
-		}
-
-		if buf[i] == '.' {
-			// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
-			if decimal {
-				return i, ErrInvalidNumber
-			}
-			decimal = true
-		}
-
-		// `e` is valid for floats but not as the first char
-		if i > start && (buf[i] == 'e' || buf[i] == 'E') {
-			scientific = true
-			i++
-			continue
-		}
-
-		// + and - are only valid at this point if they follow an e (scientific notation)
-		if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
-			i++
-			continue
-		}
-
-		// NaN is an unsupported value
-		if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
-			return i, ErrInvalidNumber
-		}
-
-		if !isNumeric(buf[i]) {
-			return i, ErrInvalidNumber
-		}
-		i++
-	}
-
-	if (isInt || isUnsigned) && (decimal || scientific) {
-		return i, ErrInvalidNumber
-	}
-
-	numericDigits := i - start
-	if isInt {
-		numericDigits--
-	}
-	if decimal {
-		numericDigits--
-	}
-	if buf[start] == '-' {
-		numericDigits--
-	}
-
-	if numericDigits == 0 {
-		return i, ErrInvalidNumber
-	}
-
-	// It's more common that numbers will be within min/max range for their type but we need to prevent
-	// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide
-	// if we should parse the number to the actual type.  It does not do it all the time because it incurs
-	// extra allocations and we end up converting the type again when writing points to disk.
-	if isInt {
-		// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
-		if buf[i-1] != 'i' {
-			return i, ErrInvalidNumber
-		}
-		// Parse the int to check bounds the number of digits could be larger than the max range
-		// We subtract 1 from the index to remove the `i` from our tests
-		if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
-			if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
-				return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
-			}
-		}
-	} else if isUnsigned {
-		// Return an error if uint64 support has not been enabled.
-		if !enableUint64Support {
-			return i, ErrInvalidNumber
-		}
-		// Make sure the last char is a 'u' for unsigned
-		if buf[i-1] != 'u' {
-			return i, ErrInvalidNumber
-		}
-		// Make sure the first char is not a '-' for unsigned
-		if buf[start] == '-' {
-			return i, ErrInvalidNumber
-		}
-		// Parse the uint to check bounds the number of digits could be larger than the max range
-		// We subtract 1 from the index to remove the `u` from our tests
-		if len(buf[start:i-1]) >= maxUint64Digits {
-			if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil {
-				return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err)
-			}
-		}
-	} else {
-		// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
-		if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
-			if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
-				return i, fmt.Errorf("invalid float")
-			}
-		}
-	}
-
-	return i, nil
-}
-
-// scanBoolean returns the end position within buf, start at i after
-// scanning over buf for boolean. Valid values for a boolean are
-// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean
-// is scanned.
-func scanBoolean(buf []byte, i int) (int, []byte, error) {
-	start := i
-
-	if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	i++
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' || buf[i] == ' ' {
-			break
-		}
-		i++
-	}
-
-	// Single char bool (t, T, f, F) is ok
-	if i-start == 1 {
-		return i, buf[start:i], nil
-	}
-
-	// length must be 4 for true or TRUE
-	if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// length must be 5 for false or FALSE
-	if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	// Otherwise
-	valid := false
-	switch buf[start] {
-	case 't':
-		valid = bytes.Equal(buf[start:i], []byte("true"))
-	case 'f':
-		valid = bytes.Equal(buf[start:i], []byte("false"))
-	case 'T':
-		valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
-	case 'F':
-		valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
-	}
-
-	if !valid {
-		return i, buf[start:i], fmt.Errorf("invalid boolean")
-	}
-
-	return i, buf[start:i], nil
-
-}
-
-// skipWhitespace returns the end position within buf, starting at i after
-// scanning over spaces in tags.
-func skipWhitespace(buf []byte, i int) int {
-	for i < len(buf) {
-		if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
-			break
-		}
-		i++
-	}
-	return i
-}
-
-// scanLine returns the end position in buf and the next line found within
-// buf.
-func scanLine(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	fields := false
-
-	// tracks how many '=' and commas we've seen
-	// this duplicates some of the functionality in scanFields
-	equals := 0
-	commas := 0
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// skip past escaped characters
-		if buf[i] == '\\' && i+2 < len(buf) {
-			i += 2
-			continue
-		}
-
-		if buf[i] == ' ' {
-			fields = true
-		}
-
-		// If we see a double quote, makes sure it is not escaped
-		if fields {
-			if !quoted && buf[i] == '=' {
-				i++
-				equals++
-				continue
-			} else if !quoted && buf[i] == ',' {
-				i++
-				commas++
-				continue
-			} else if buf[i] == '"' && equals > commas {
-				i++
-				quoted = !quoted
-				continue
-			}
-		}
-
-		if buf[i] == '\n' && !quoted {
-			break
-		}
-
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte, where stop byte
-// has not been escaped.
-//
-// If there are leading spaces, they are skipped.
-func scanTo(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	for {
-		// reached the end of buf?
-		if i >= len(buf) {
-			break
-		}
-
-		// Reached unescaped stop value?
-		if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
-			break
-		}
-		i++
-	}
-
-	return i, buf[start:i]
-}
-
-// scanTo returns the end position in buf and the next consecutive block
-// of bytes, starting from i and ending with stop byte.  If there are leading
-// spaces, they are skipped.
-func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
-	start := i
-	if buf[i] == stop || buf[i] == ' ' {
-		return i, buf[start:i]
-	}
-
-	for {
-		i++
-		if buf[i-1] == '\\' {
-			continue
-		}
-
-		// reached the end of buf?
-		if i >= len(buf) {
-			return i, buf[start:i]
-		}
-
-		// reached end of block?
-		if buf[i] == stop || buf[i] == ' ' {
-			return i, buf[start:i]
-		}
-	}
-}
-
-func scanTagValue(buf []byte, i int) (int, []byte) {
-	start := i
-	for {
-		if i >= len(buf) {
-			break
-		}
-
-		if buf[i] == ',' && buf[i-1] != '\\' {
-			break
-		}
-		i++
-	}
-	if i > len(buf) {
-		return i, nil
-	}
-	return i, buf[start:i]
-}
-
-func scanFieldValue(buf []byte, i int) (int, []byte) {
-	start := i
-	quoted := false
-	for i < len(buf) {
-		// Only escape char for a field value is a double-quote and backslash
-		if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') {
-			i += 2
-			continue
-		}
-
-		// Quoted value? (e.g. string)
-		if buf[i] == '"' {
-			i++
-			quoted = !quoted
-			continue
-		}
-
-		if buf[i] == ',' && !quoted {
-			break
-		}
-		i++
-	}
-	return i, buf[start:i]
-}
-
-func EscapeMeasurement(in []byte) []byte {
-	for _, c := range measurementEscapeCodes {
-		if bytes.IndexByte(in, c.k[0]) != -1 {
-			in = bytes.Replace(in, c.k[:], c.esc[:], -1)
-		}
-	}
-	return in
-}
-
-func unescapeMeasurement(in []byte) []byte {
-	if bytes.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	for i := range measurementEscapeCodes {
-		c := &measurementEscapeCodes[i]
-		if bytes.IndexByte(in, c.k[0]) != -1 {
-			in = bytes.Replace(in, c.esc[:], c.k[:], -1)
-		}
-	}
-	return in
-}
-
-func escapeTag(in []byte) []byte {
-	for i := range tagEscapeCodes {
-		c := &tagEscapeCodes[i]
-		if bytes.IndexByte(in, c.k[0]) != -1 {
-			in = bytes.Replace(in, c.k[:], c.esc[:], -1)
-		}
-	}
-	return in
-}
-
-func unescapeTag(in []byte) []byte {
-	if bytes.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	for i := range tagEscapeCodes {
-		c := &tagEscapeCodes[i]
-		if bytes.IndexByte(in, c.k[0]) != -1 {
-			in = bytes.Replace(in, c.esc[:], c.k[:], -1)
-		}
-	}
-	return in
-}
-
-// escapeStringFieldReplacer replaces double quotes and backslashes
-// with the same character preceded by a backslash.
-// As of Go 1.7 this benchmarked better in allocations and CPU time
-// compared to iterating through a string byte-by-byte and appending to a new byte slice,
-// calling strings.Replace twice, and better than (*Regex).ReplaceAllString.
-var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
-
-// EscapeStringField returns a copy of in with any double quotes or
-// backslashes with escaped values.
-func EscapeStringField(in string) string {
-	return escapeStringFieldReplacer.Replace(in)
-}
-
-// unescapeStringField returns a copy of in with any escaped double-quotes
-// or backslashes unescaped.
-func unescapeStringField(in string) string {
-	if strings.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	var out []byte
-	i := 0
-	for {
-		if i >= len(in) {
-			break
-		}
-		// unescape backslashes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
-			out = append(out, '\\')
-			i += 2
-			continue
-		}
-		// unescape double-quotes
-		if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
-			out = append(out, '"')
-			i += 2
-			continue
-		}
-		out = append(out, in[i])
-		i++
-
-	}
-	return string(out)
-}
-
-// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function
-// returns an error.
-func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {
-	key, err := pointKey(name, tags, fields, t)
-	if err != nil {
-		return nil, err
-	}
-
-	return &point{
-		key:    key,
-		time:   t,
-		fields: fields.MarshalBinary(),
-	}, nil
-}
-
-// pointKey checks some basic requirements for valid points, and returns the
-// key, along with an possible error.
-func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {
-	if len(fields) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-
-	if !t.IsZero() {
-		if err := CheckTime(t); err != nil {
-			return nil, err
-		}
-	}
-
-	for key, value := range fields {
-		switch value := value.(type) {
-		case float64:
-			// Ensure the caller validates and handles invalid field values
-			if math.IsInf(value, 0) {
-				return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key)
-			}
-			if math.IsNaN(value) {
-				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
-			}
-		case float32:
-			// Ensure the caller validates and handles invalid field values
-			if math.IsInf(float64(value), 0) {
-				return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key)
-			}
-			if math.IsNaN(float64(value)) {
-				return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
-			}
-		}
-		if len(key) == 0 {
-			return nil, fmt.Errorf("all fields must have non-empty names")
-		}
-	}
-
-	key := MakeKey([]byte(measurement), tags)
-	for field := range fields {
-		sz := seriesKeySize(key, []byte(field))
-		if sz > MaxKeyLength {
-			return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
-		}
-	}
-
-	return key, nil
-}
-
-func seriesKeySize(key, field []byte) int {
-	// 4 is the length of the tsm1.fieldKeySeparator constant.  It's inlined here to avoid a circular
-	// dependency.
-	return len(key) + 4 + len(field)
-}
-
-// NewPointFromBytes returns a new Point from a marshalled Point.
-func NewPointFromBytes(b []byte) (Point, error) {
-	p := &point{}
-	if err := p.UnmarshalBinary(b); err != nil {
-		return nil, err
-	}
-
-	// This does some basic validation to ensure there are fields and they
-	// can be unmarshalled as well.
-	iter := p.FieldIterator()
-	var hasField bool
-	for iter.Next() {
-		if len(iter.FieldKey()) == 0 {
-			continue
-		}
-		hasField = true
-		switch iter.Type() {
-		case Float:
-			_, err := iter.FloatValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-		case Integer:
-			_, err := iter.IntegerValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-		case Unsigned:
-			_, err := iter.UnsignedValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-		case String:
-			// Skip since this won't return an error
-		case Boolean:
-			_, err := iter.BooleanValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-		}
-	}
-
-	if !hasField {
-		return nil, ErrPointMustHaveAField
-	}
-
-	return p, nil
-}
-
-// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If
-// an unsupported field value (NaN) is passed, this function panics.
-func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
-	pt, err := NewPoint(name, tags, fields, time)
-	if err != nil {
-		panic(err.Error())
-	}
-	return pt
-}
-
-// Key returns the key (measurement joined with tags) of the point.
-func (p *point) Key() []byte {
-	return p.key
-}
-
-func (p *point) name() []byte {
-	_, name := scanTo(p.key, 0, ',')
-	return name
-}
-
-func (p *point) Name() []byte {
-	return escape.Unescape(p.name())
-}
-
-// SetName updates the measurement name for the point.
-func (p *point) SetName(name string) {
-	p.cachedName = ""
-	p.key = MakeKey([]byte(name), p.Tags())
-}
-
-// Time return the timestamp for the point.
-func (p *point) Time() time.Time {
-	return p.time
-}
-
-// SetTime updates the timestamp for the point.
-func (p *point) SetTime(t time.Time) {
-	p.time = t
-}
-
-// Round will round the timestamp of the point to the given duration.
-func (p *point) Round(d time.Duration) {
-	p.time = p.time.Round(d)
-}
-
-// Tags returns the tag set for the point.
-func (p *point) Tags() Tags {
-	if p.cachedTags != nil {
-		return p.cachedTags
-	}
-	p.cachedTags = parseTags(p.key, nil)
-	return p.cachedTags
-}
-
-func (p *point) ForEachTag(fn func(k, v []byte) bool) {
-	walkTags(p.key, fn)
-}
-
-func (p *point) HasTag(tag []byte) bool {
-	if len(p.key) == 0 {
-		return false
-	}
-
-	var exists bool
-	walkTags(p.key, func(key, value []byte) bool {
-		if bytes.Equal(tag, key) {
-			exists = true
-			return false
-		}
-		return true
-	})
-
-	return exists
-}
-
-func walkTags(buf []byte, fn func(key, value []byte) bool) {
-	if len(buf) == 0 {
-		return
-	}
-
-	pos, name := scanTo(buf, 0, ',')
-
-	// it's an empty key, so there are no tags
-	if len(name) == 0 {
-		return
-	}
-
-	hasEscape := bytes.IndexByte(buf, '\\') != -1
-	i := pos + 1
-	var key, value []byte
-	for {
-		if i >= len(buf) {
-			break
-		}
-		i, key = scanTo(buf, i, '=')
-		i, value = scanTagValue(buf, i+1)
-
-		if len(value) == 0 {
-			continue
-		}
-
-		if hasEscape {
-			if !fn(unescapeTag(key), unescapeTag(value)) {
-				return
-			}
-		} else {
-			if !fn(key, value) {
-				return
-			}
-		}
-
-		i++
-	}
-}
-
-// walkFields walks each field key and value via fn.  If fn returns false, the iteration
-// is stopped.  The values are the raw byte slices and not the converted types.
-func walkFields(buf []byte, fn func(key, value []byte) bool) {
-	var i int
-	var key, val []byte
-	for len(buf) > 0 {
-		i, key = scanTo(buf, 0, '=')
-		buf = buf[i+1:]
-		i, val = scanFieldValue(buf, 0)
-		buf = buf[i:]
-		if !fn(key, val) {
-			break
-		}
-
-		// slice off comma
-		if len(buf) > 0 {
-			buf = buf[1:]
-		}
-	}
-}
-
-// parseTags parses buf into the provided destination tags, returning destination
-// Tags, which may have a different length and capacity.
-func parseTags(buf []byte, dst Tags) Tags {
-	if len(buf) == 0 {
-		return nil
-	}
-
-	n := bytes.Count(buf, []byte(","))
-	if cap(dst) < n {
-		dst = make(Tags, n)
-	} else {
-		dst = dst[:n]
-	}
-
-	// Ensure existing behaviour when point has no tags and nil slice passed in.
-	if dst == nil {
-		dst = Tags{}
-	}
-
-	// Series keys can contain escaped commas, therefore the number of commas
-	// in a series key only gives an estimation of the upper bound on the number
-	// of tags.
-	var i int
-	walkTags(buf, func(key, value []byte) bool {
-		dst[i].Key, dst[i].Value = key, value
-		i++
-		return true
-	})
-	return dst[:i]
-}
-
-// MakeKey creates a key for a set of tags.
-func MakeKey(name []byte, tags Tags) []byte {
-	return AppendMakeKey(nil, name, tags)
-}
-
-// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer.
-func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte {
-	// unescape the name and then re-escape it to avoid double escaping.
-	// The key should always be stored in escaped form.
-	dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...)
-	dst = tags.AppendHashKey(dst)
-	return dst
-}
-
-// SetTags replaces the tags for the point.
-func (p *point) SetTags(tags Tags) {
-	p.key = MakeKey(p.Name(), tags)
-	p.cachedTags = tags
-}
-
-// AddTag adds or replaces a tag value for a point.
-func (p *point) AddTag(key, value string) {
-	tags := p.Tags()
-	tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
-	sort.Sort(tags)
-	p.cachedTags = tags
-	p.key = MakeKey(p.Name(), tags)
-}
-
-// Fields returns the fields for the point.
-func (p *point) Fields() (Fields, error) {
-	if p.cachedFields != nil {
-		return p.cachedFields, nil
-	}
-	cf, err := p.unmarshalBinary()
-	if err != nil {
-		return nil, err
-	}
-	p.cachedFields = cf
-	return p.cachedFields, nil
-}
-
-// SetPrecision will round a time to the specified precision.
-func (p *point) SetPrecision(precision string) {
-	switch precision {
-	case "n":
-	case "u":
-		p.SetTime(p.Time().Truncate(time.Microsecond))
-	case "ms":
-		p.SetTime(p.Time().Truncate(time.Millisecond))
-	case "s":
-		p.SetTime(p.Time().Truncate(time.Second))
-	case "m":
-		p.SetTime(p.Time().Truncate(time.Minute))
-	case "h":
-		p.SetTime(p.Time().Truncate(time.Hour))
-	}
-}
-
-// String returns the string representation of the point.
-func (p *point) String() string {
-	if p.Time().IsZero() {
-		return string(p.Key()) + " " + string(p.fields)
-	}
-	return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
-}
-
-// AppendString appends the string representation of the point to buf.
-func (p *point) AppendString(buf []byte) []byte {
-	buf = append(buf, p.key...)
-	buf = append(buf, ' ')
-	buf = append(buf, p.fields...)
-
-	if !p.time.IsZero() {
-		buf = append(buf, ' ')
-		buf = strconv.AppendInt(buf, p.UnixNano(), 10)
-	}
-
-	return buf
-}
-
-// StringSize returns the length of the string that would be returned by String().
-func (p *point) StringSize() int {
-	size := len(p.key) + len(p.fields) + 1
-
-	if !p.time.IsZero() {
-		digits := 1 // even "0" has one digit
-		t := p.UnixNano()
-		if t < 0 {
-			// account for negative sign, then negate
-			digits++
-			t = -t
-		}
-		for t > 9 { // already accounted for one digit
-			digits++
-			t /= 10
-		}
-		size += digits + 1 // digits and a space
-	}
-
-	return size
-}
-
-// MarshalBinary returns a binary representation of the point.
-func (p *point) MarshalBinary() ([]byte, error) {
-	if len(p.fields) == 0 {
-		return nil, ErrPointMustHaveAField
-	}
-
-	tb, err := p.time.MarshalBinary()
-	if err != nil {
-		return nil, err
-	}
-
-	b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
-	i := 0
-
-	binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
-	i += 4
-
-	i += copy(b[i:], p.key)
-
-	binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
-	i += 4
-
-	i += copy(b[i:], p.fields)
-
-	copy(b[i:], tb)
-	return b, nil
-}
-
-// UnmarshalBinary decodes a binary representation of the point into a point struct.
-func (p *point) UnmarshalBinary(b []byte) error {
-	var n int
-
-	// Read key length.
-	if len(b) < 4 {
-		return io.ErrShortBuffer
-	}
-	n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
-
-	// Read key.
-	if len(b) < n {
-		return io.ErrShortBuffer
-	}
-	p.key, b = b[:n], b[n:]
-
-	// Read fields length.
-	if len(b) < 4 {
-		return io.ErrShortBuffer
-	}
-	n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
-
-	// Read fields.
-	if len(b) < n {
-		return io.ErrShortBuffer
-	}
-	p.fields, b = b[:n], b[n:]
-
-	// Read timestamp.
-	return p.time.UnmarshalBinary(b)
-}
-
-// PrecisionString returns a string representation of the point. If there
-// is a timestamp associated with the point then it will be specified in the
-// given unit.
-func (p *point) PrecisionString(precision string) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.UnixNano()/GetPrecisionMultiplier(precision))
-}
-
-// RoundedString returns a string representation of the point. If there
-// is a timestamp associated with the point, then it will be rounded to the
-// given duration.
-func (p *point) RoundedString(d time.Duration) string {
-	if p.Time().IsZero() {
-		return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
-	}
-	return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
-		p.time.Round(d).UnixNano())
-}
-
-func (p *point) unmarshalBinary() (Fields, error) {
-	iter := p.FieldIterator()
-	fields := make(Fields, 8)
-	for iter.Next() {
-		if len(iter.FieldKey()) == 0 {
-			continue
-		}
-		switch iter.Type() {
-		case Float:
-			v, err := iter.FloatValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-			fields[string(iter.FieldKey())] = v
-		case Integer:
-			v, err := iter.IntegerValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-			fields[string(iter.FieldKey())] = v
-		case Unsigned:
-			v, err := iter.UnsignedValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-			fields[string(iter.FieldKey())] = v
-		case String:
-			fields[string(iter.FieldKey())] = iter.StringValue()
-		case Boolean:
-			v, err := iter.BooleanValue()
-			if err != nil {
-				return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
-			}
-			fields[string(iter.FieldKey())] = v
-		}
-	}
-	return fields, nil
-}
-
-// HashID returns a non-cryptographic checksum of the point's key.
-func (p *point) HashID() uint64 {
-	h := NewInlineFNV64a()
-	h.Write(p.key)
-	sum := h.Sum64()
-	return sum
-}
-
-// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
-func (p *point) UnixNano() int64 {
-	return p.Time().UnixNano()
-}
-
-// Split will attempt to return multiple points with the same timestamp whose
-// string representations are no longer than size. Points with a single field or
-// a point without a timestamp may exceed the requested size.
-func (p *point) Split(size int) []Point {
-	if p.time.IsZero() || p.StringSize() <= size {
-		return []Point{p}
-	}
-
-	// key string, timestamp string, spaces
-	size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2
-
-	var points []Point
-	var start, cur int
-
-	for cur < len(p.fields) {
-		end, _ := scanTo(p.fields, cur, '=')
-		end, _ = scanFieldValue(p.fields, end+1)
-
-		if cur > start && end-start > size {
-			points = append(points, &point{
-				key:    p.key,
-				time:   p.time,
-				fields: p.fields[start : cur-1],
-			})
-			start = cur
-		}
-
-		cur = end + 1
-	}
-
-	points = append(points, &point{
-		key:    p.key,
-		time:   p.time,
-		fields: p.fields[start:],
-	})
-
-	return points
-}
-
-// Tag represents a single key/value tag pair.
-type Tag struct {
-	Key   []byte
-	Value []byte
-}
-
-// NewTag returns a new Tag.
-func NewTag(key, value []byte) Tag {
-	return Tag{
-		Key:   key,
-		Value: value,
-	}
-}
-
-// Size returns the size of the key and value.
-func (t Tag) Size() int { return len(t.Key) + len(t.Value) }
-
-// Clone returns a shallow copy of Tag.
-//
-// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
-// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
-func (t Tag) Clone() Tag {
-	other := Tag{
-		Key:   make([]byte, len(t.Key)),
-		Value: make([]byte, len(t.Value)),
-	}
-
-	copy(other.Key, t.Key)
-	copy(other.Value, t.Value)
-
-	return other
-}
-
-// String returns the string reprsentation of the tag.
-func (t *Tag) String() string {
-	var buf bytes.Buffer
-	buf.WriteByte('{')
-	buf.WriteString(string(t.Key))
-	buf.WriteByte(' ')
-	buf.WriteString(string(t.Value))
-	buf.WriteByte('}')
-	return buf.String()
-}
-
-// Tags represents a sorted list of tags.
-type Tags []Tag
-
-// NewTags returns a new Tags from a map.
-func NewTags(m map[string]string) Tags {
-	if len(m) == 0 {
-		return nil
-	}
-	a := make(Tags, 0, len(m))
-	for k, v := range m {
-		a = append(a, NewTag([]byte(k), []byte(v)))
-	}
-	sort.Sort(a)
-	return a
-}
-
-// Keys returns the list of keys for a tag set.
-func (a Tags) Keys() []string {
-	if len(a) == 0 {
-		return nil
-	}
-	keys := make([]string, len(a))
-	for i, tag := range a {
-		keys[i] = string(tag.Key)
-	}
-	return keys
-}
-
-// Values returns the list of values for a tag set.
-func (a Tags) Values() []string {
-	if len(a) == 0 {
-		return nil
-	}
-	values := make([]string, len(a))
-	for i, tag := range a {
-		values[i] = string(tag.Value)
-	}
-	return values
-}
-
-// String returns the string representation of the tags.
-func (a Tags) String() string {
-	var buf bytes.Buffer
-	buf.WriteByte('[')
-	for i := range a {
-		buf.WriteString(a[i].String())
-		if i < len(a)-1 {
-			buf.WriteByte(' ')
-		}
-	}
-	buf.WriteByte(']')
-	return buf.String()
-}
-
-// Size returns the number of bytes needed to store all tags. Note, this is
-// the number of bytes needed to store all keys and values and does not account
-// for data structures or delimiters for example.
-func (a Tags) Size() int {
-	var total int
-	for i := range a {
-		total += a[i].Size()
-	}
-	return total
-}
-
-// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements
-//
-// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
-// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
-func (a Tags) Clone() Tags {
-	if len(a) == 0 {
-		return nil
-	}
-
-	others := make(Tags, len(a))
-	for i := range a {
-		others[i] = a[i].Clone()
-	}
-
-	return others
-}
-
-func (a Tags) Len() int           { return len(a) }
-func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 }
-func (a Tags) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-// Equal returns true if a equals other.
-func (a Tags) Equal(other Tags) bool {
-	if len(a) != len(other) {
-		return false
-	}
-	for i := range a {
-		if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) {
-			return false
-		}
-	}
-	return true
-}
-
-// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b.
-func CompareTags(a, b Tags) int {
-	// Compare each key & value until a mismatch.
-	for i := 0; i < len(a) && i < len(b); i++ {
-		if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 {
-			return cmp
-		}
-		if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 {
-			return cmp
-		}
-	}
-
-	// If all tags are equal up to this point then return shorter tagset.
-	if len(a) < len(b) {
-		return -1
-	} else if len(a) > len(b) {
-		return 1
-	}
-
-	// All tags are equal.
-	return 0
-}
-
-// Get returns the value for a key.
-func (a Tags) Get(key []byte) []byte {
-	// OPTIMIZE: Use sort.Search if tagset is large.
-
-	for _, t := range a {
-		if bytes.Equal(t.Key, key) {
-			return t.Value
-		}
-	}
-	return nil
-}
-
-// GetString returns the string value for a string key.
-func (a Tags) GetString(key string) string {
-	return string(a.Get([]byte(key)))
-}
-
-// Set sets the value for a key.
-func (a *Tags) Set(key, value []byte) {
-	for i, t := range *a {
-		if bytes.Equal(t.Key, key) {
-			(*a)[i].Value = value
-			return
-		}
-	}
-	*a = append(*a, Tag{Key: key, Value: value})
-	sort.Sort(*a)
-}
-
-// SetString sets the string value for a string key.
-func (a *Tags) SetString(key, value string) {
-	a.Set([]byte(key), []byte(value))
-}
-
-// Delete removes a tag by key.
-func (a *Tags) Delete(key []byte) {
-	for i, t := range *a {
-		if bytes.Equal(t.Key, key) {
-			copy((*a)[i:], (*a)[i+1:])
-			(*a)[len(*a)-1] = Tag{}
-			*a = (*a)[:len(*a)-1]
-			return
-		}
-	}
-}
-
-// Map returns a map representation of the tags.
-func (a Tags) Map() map[string]string {
-	m := make(map[string]string, len(a))
-	for _, t := range a {
-		m[string(t.Key)] = string(t.Value)
-	}
-	return m
-}
-
-// Merge merges the tags combining the two. If both define a tag with the
-// same key, the merged value overwrites the old value.
-// A new map is returned.
-func (a Tags) Merge(other map[string]string) Tags {
-	merged := make(map[string]string, len(a)+len(other))
-	for _, t := range a {
-		merged[string(t.Key)] = string(t.Value)
-	}
-	for k, v := range other {
-		merged[k] = v
-	}
-	return NewTags(merged)
-}
-
-// HashKey hashes all of a tag's keys.
-func (a Tags) HashKey() []byte {
-	return a.AppendHashKey(nil)
-}
-
-func (a Tags) needsEscape() bool {
-	for i := range a {
-		t := &a[i]
-		for j := range tagEscapeCodes {
-			c := &tagEscapeCodes[j]
-			if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer.
-func (a Tags) AppendHashKey(dst []byte) []byte {
-	// Empty maps marshal to empty bytes.
-	if len(a) == 0 {
-		return dst
-	}
-
-	// Type invariant: Tags are sorted
-
-	sz := 0
-	var escaped Tags
-	if a.needsEscape() {
-		var tmp [20]Tag
-		if len(a) < len(tmp) {
-			escaped = tmp[:len(a)]
-		} else {
-			escaped = make(Tags, len(a))
-		}
-
-		for i := range a {
-			t := &a[i]
-			nt := &escaped[i]
-			nt.Key = escapeTag(t.Key)
-			nt.Value = escapeTag(t.Value)
-			sz += len(nt.Key) + len(nt.Value)
-		}
-	} else {
-		sz = a.Size()
-		escaped = a
-	}
-
-	sz += len(escaped) + (len(escaped) * 2) // separators
-
-	// Generate marshaled bytes.
-	if cap(dst)-len(dst) < sz {
-		nd := make([]byte, len(dst), len(dst)+sz)
-		copy(nd, dst)
-		dst = nd
-	}
-	buf := dst[len(dst) : len(dst)+sz]
-	idx := 0
-	for i := range escaped {
-		k := &escaped[i]
-		if len(k.Value) == 0 {
-			continue
-		}
-		buf[idx] = ','
-		idx++
-		copy(buf[idx:], k.Key)
-		idx += len(k.Key)
-		buf[idx] = '='
-		idx++
-		copy(buf[idx:], k.Value)
-		idx += len(k.Value)
-	}
-	return dst[:len(dst)+idx]
-}
-
-// CopyTags returns a shallow copy of tags.
-func CopyTags(a Tags) Tags {
-	other := make(Tags, len(a))
-	copy(other, a)
-	return other
-}
-
-// DeepCopyTags returns a deep copy of tags.
-func DeepCopyTags(a Tags) Tags {
-	// Calculate size of keys/values in bytes.
-	var n int
-	for _, t := range a {
-		n += len(t.Key) + len(t.Value)
-	}
-
-	// Build single allocation for all key/values.
-	buf := make([]byte, n)
-
-	// Copy tags to new set.
-	other := make(Tags, len(a))
-	for i, t := range a {
-		copy(buf, t.Key)
-		other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):]
-
-		copy(buf, t.Value)
-		other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):]
-	}
-
-	return other
-}
-
-// Fields represents a mapping between a Point's field names and their
-// values.
-type Fields map[string]interface{}
-
-// FieldIterator retuns a FieldIterator that can be used to traverse the
-// fields of a point without constructing the in-memory map.
-func (p *point) FieldIterator() FieldIterator {
-	p.Reset()
-	return p
-}
-
-type fieldIterator struct {
-	start, end  int
-	key, keybuf []byte
-	valueBuf    []byte
-	fieldType   FieldType
-}
-
-// Next indicates whether there any fields remaining.
-func (p *point) Next() bool {
-	p.it.start = p.it.end
-	if p.it.start >= len(p.fields) {
-		return false
-	}
-
-	p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=')
-	if escape.IsEscaped(p.it.key) {
-		p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key)
-		p.it.key = p.it.keybuf
-	}
-
-	p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1)
-	p.it.end++
-
-	if len(p.it.valueBuf) == 0 {
-		p.it.fieldType = Empty
-		return true
-	}
-
-	c := p.it.valueBuf[0]
-
-	if c == '"' {
-		p.it.fieldType = String
-		return true
-	}
-
-	if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 {
-		if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {
-			p.it.fieldType = Integer
-			p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
-		} else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' {
-			p.it.fieldType = Unsigned
-			p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
-		} else {
-			p.it.fieldType = Float
-		}
-		return true
-	}
-
-	// to keep the same behavior that currently exists, default to boolean
-	p.it.fieldType = Boolean
-	return true
-}
-
-// FieldKey returns the key of the current field.
-func (p *point) FieldKey() []byte {
-	return p.it.key
-}
-
-// Type returns the FieldType of the current field.
-func (p *point) Type() FieldType {
-	return p.it.fieldType
-}
-
-// StringValue returns the string value of the current field.
-func (p *point) StringValue() string {
-	return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1]))
-}
-
-// IntegerValue returns the integer value of the current field.
-func (p *point) IntegerValue() (int64, error) {
-	n, err := parseIntBytes(p.it.valueBuf, 10, 64)
-	if err != nil {
-		return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err)
-	}
-	return n, nil
-}
-
-// UnsignedValue returns the unsigned value of the current field.
-func (p *point) UnsignedValue() (uint64, error) {
-	n, err := parseUintBytes(p.it.valueBuf, 10, 64)
-	if err != nil {
-		return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err)
-	}
-	return n, nil
-}
-
-// BooleanValue returns the boolean value of the current field.
-func (p *point) BooleanValue() (bool, error) {
-	b, err := parseBoolBytes(p.it.valueBuf)
-	if err != nil {
-		return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err)
-	}
-	return b, nil
-}
-
-// FloatValue returns the float value of the current field.
-func (p *point) FloatValue() (float64, error) {
-	f, err := parseFloatBytes(p.it.valueBuf, 64)
-	if err != nil {
-		return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err)
-	}
-	return f, nil
-}
-
-// Reset resets the iterator to its initial state.
-func (p *point) Reset() {
-	p.it.fieldType = Empty
-	p.it.key = nil
-	p.it.valueBuf = nil
-	p.it.start = 0
-	p.it.end = 0
-}
-
-// MarshalBinary encodes all the fields to their proper type and returns the binary
-// represenation
-// NOTE: uint64 is specifically not supported due to potential overflow when we decode
-// again later to an int64
-// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...
-func (p Fields) MarshalBinary() []byte {
-	var b []byte
-	keys := make([]string, 0, len(p))
-
-	for k := range p {
-		keys = append(keys, k)
-	}
-
-	// Not really necessary, can probably be removed.
-	sort.Strings(keys)
-
-	for i, k := range keys {
-		if i > 0 {
-			b = append(b, ',')
-		}
-		b = appendField(b, k, p[k])
-	}
-
-	return b
-}
-
-func appendField(b []byte, k string, v interface{}) []byte {
-	b = append(b, []byte(escape.String(k))...)
-	b = append(b, '=')
-
-	// check popular types first
-	switch v := v.(type) {
-	case float64:
-		b = strconv.AppendFloat(b, v, 'f', -1, 64)
-	case int64:
-		b = strconv.AppendInt(b, v, 10)
-		b = append(b, 'i')
-	case string:
-		b = append(b, '"')
-		b = append(b, []byte(EscapeStringField(v))...)
-		b = append(b, '"')
-	case bool:
-		b = strconv.AppendBool(b, v)
-	case int32:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case int16:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case int8:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case int:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case uint64:
-		b = strconv.AppendUint(b, v, 10)
-		b = append(b, 'u')
-	case uint32:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case uint16:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case uint8:
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case uint:
-		// TODO: 'uint' should be converted to writing as an unsigned integer,
-		// but we cannot since that would break backwards compatibility.
-		b = strconv.AppendInt(b, int64(v), 10)
-		b = append(b, 'i')
-	case float32:
-		b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
-	case []byte:
-		b = append(b, v...)
-	case nil:
-		// skip
-	default:
-		// Can't determine the type, so convert to string
-		b = append(b, '"')
-		b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...)
-		b = append(b, '"')
-
-	}
-
-	return b
-}
-
-// ValidKeyToken returns true if the token used for measurement, tag key, or tag
-// value is a valid unicode string and only contains printable, non-replacement characters.
-func ValidKeyToken(s string) bool {
-	if !utf8.ValidString(s) {
-		return false
-	}
-	for _, r := range s {
-		if !unicode.IsPrint(r) || r == unicode.ReplacementChar {
-			return false
-		}
-	}
-	return true
-}
-
-// ValidKeyTokens returns true if the measurement name and all tags are valid.
-func ValidKeyTokens(name string, tags Tags) bool {
-	if !ValidKeyToken(name) {
-		return false
-	}
-	for _, tag := range tags {
-		if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) {
-			return false
-		}
-	}
-	return true
-}

+ 0 - 62
vendor/github.com/influxdata/influxdb/models/rows.go

@@ -1,62 +0,0 @@
-package models
-
-import (
-	"sort"
-)
-
-// Row represents a single row returned from the execution of a statement.
-type Row struct {
-	Name    string            `json:"name,omitempty"`
-	Tags    map[string]string `json:"tags,omitempty"`
-	Columns []string          `json:"columns,omitempty"`
-	Values  [][]interface{}   `json:"values,omitempty"`
-	Partial bool              `json:"partial,omitempty"`
-}
-
-// SameSeries returns true if r contains values for the same series as o.
-func (r *Row) SameSeries(o *Row) bool {
-	return r.tagsHash() == o.tagsHash() && r.Name == o.Name
-}
-
-// tagsHash returns a hash of tag key/value pairs.
-func (r *Row) tagsHash() uint64 {
-	h := NewInlineFNV64a()
-	keys := r.tagsKeys()
-	for _, k := range keys {
-		h.Write([]byte(k))
-		h.Write([]byte(r.Tags[k]))
-	}
-	return h.Sum64()
-}
-
-// tagKeys returns a sorted list of tag keys.
-func (r *Row) tagsKeys() []string {
-	a := make([]string, 0, len(r.Tags))
-	for k := range r.Tags {
-		a = append(a, k)
-	}
-	sort.Strings(a)
-	return a
-}
-
-// Rows represents a collection of rows. Rows implements sort.Interface.
-type Rows []*Row
-
-// Len implements sort.Interface.
-func (p Rows) Len() int { return len(p) }
-
-// Less implements sort.Interface.
-func (p Rows) Less(i, j int) bool {
-	// Sort by name first.
-	if p[i].Name != p[j].Name {
-		return p[i].Name < p[j].Name
-	}
-
-	// Sort by tag set hash. Tags don't have a meaningful sort order so we
-	// just compute a hash and sort by that instead. This allows the tests
-	// to receive rows in a predictable order every time.
-	return p[i].tagsHash() < p[j].tagsHash()
-}
-
-// Swap implements sort.Interface.
-func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

+ 0 - 42
vendor/github.com/influxdata/influxdb/models/statistic.go

@@ -1,42 +0,0 @@
-package models
-
-// Statistic is the representation of a statistic used by the monitoring service.
-type Statistic struct {
-	Name   string                 `json:"name"`
-	Tags   map[string]string      `json:"tags"`
-	Values map[string]interface{} `json:"values"`
-}
-
-// NewStatistic returns an initialized Statistic.
-func NewStatistic(name string) Statistic {
-	return Statistic{
-		Name:   name,
-		Tags:   make(map[string]string),
-		Values: make(map[string]interface{}),
-	}
-}
-
-// StatisticTags is a map that can be merged with others without causing
-// mutations to either map.
-type StatisticTags map[string]string
-
-// Merge creates a new map containing the merged contents of tags and t.
-// If both tags and the receiver map contain the same key, the value in tags
-// is used in the resulting map.
-//
-// Merge always returns a usable map.
-func (t StatisticTags) Merge(tags map[string]string) map[string]string {
-	// Add everything in tags to the result.
-	out := make(map[string]string, len(tags))
-	for k, v := range tags {
-		out[k] = v
-	}
-
-	// Only add values from t that don't appear in tags.
-	for k, v := range t {
-		if _, ok := tags[k]; !ok {
-			out[k] = v
-		}
-	}
-	return out
-}

+ 0 - 74
vendor/github.com/influxdata/influxdb/models/time.go

@@ -1,74 +0,0 @@
-package models
-
-// Helper time methods since parsing time can easily overflow and we only support a
-// specific time range.
-
-import (
-	"fmt"
-	"math"
-	"time"
-)
-
-const (
-	// MinNanoTime is the minumum time that can be represented.
-	//
-	// 1677-09-21 00:12:43.145224194 +0000 UTC
-	//
-	// The two lowest minimum integers are used as sentinel values.  The
-	// minimum value needs to be used as a value lower than any other value for
-	// comparisons and another separate value is needed to act as a sentinel
-	// default value that is unusable by the user, but usable internally.
-	// Because these two values need to be used for a special purpose, we do
-	// not allow users to write points at these two times.
-	MinNanoTime = int64(math.MinInt64) + 2
-
-	// MaxNanoTime is the maximum time that can be represented.
-	//
-	// 2262-04-11 23:47:16.854775806 +0000 UTC
-	//
-	// The highest time represented by a nanosecond needs to be used for an
-	// exclusive range in the shard group, so the maximum time needs to be one
-	// less than the possible maximum number of nanoseconds representable by an
-	// int64 so that we don't lose a point at that one time.
-	MaxNanoTime = int64(math.MaxInt64) - 1
-)
-
-var (
-	minNanoTime = time.Unix(0, MinNanoTime).UTC()
-	maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
-
-	// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
-	ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
-)
-
-// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
-// supported range.
-func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
-	mult := GetPrecisionMultiplier(precision)
-	if t, ok := safeSignedMult(timestamp, mult); ok {
-		tme := time.Unix(0, t).UTC()
-		return tme, CheckTime(tme)
-	}
-
-	return time.Time{}, ErrTimeOutOfRange
-}
-
-// CheckTime checks that a time is within the safe range.
-func CheckTime(t time.Time) error {
-	if t.Before(minNanoTime) || t.After(maxNanoTime) {
-		return ErrTimeOutOfRange
-	}
-	return nil
-}
-
-// Perform the multiplication and check to make sure it didn't overflow.
-func safeSignedMult(a, b int64) (int64, bool) {
-	if a == 0 || b == 0 || a == 1 || b == 1 {
-		return a * b, true
-	}
-	if a == MinNanoTime || b == MaxNanoTime {
-		return 0, false
-	}
-	c := a * b
-	return c, c/b == a
-}

+ 0 - 7
vendor/github.com/influxdata/influxdb/models/uint_support.go

@@ -1,7 +0,0 @@
-// +build uint uint64
-
-package models
-
-func init() {
-	EnableUintSupport()
-}

+ 0 - 115
vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go

@@ -1,115 +0,0 @@
-// Package escape contains utilities for escaping parts of InfluxQL
-// and InfluxDB line protocol.
-package escape // import "github.com/influxdata/influxdb/pkg/escape"
-
-import (
-	"bytes"
-	"strings"
-)
-
-// Codes is a map of bytes to be escaped.
-var Codes = map[byte][]byte{
-	',': []byte(`\,`),
-	'"': []byte(`\"`),
-	' ': []byte(`\ `),
-	'=': []byte(`\=`),
-}
-
-// Bytes escapes characters on the input slice, as defined by Codes.
-func Bytes(in []byte) []byte {
-	for b, esc := range Codes {
-		in = bytes.Replace(in, []byte{b}, esc, -1)
-	}
-	return in
-}
-
-const escapeChars = `," =`
-
-// IsEscaped returns whether b has any escaped characters,
-// i.e. whether b seems to have been processed by Bytes.
-func IsEscaped(b []byte) bool {
-	for len(b) > 0 {
-		i := bytes.IndexByte(b, '\\')
-		if i < 0 {
-			return false
-		}
-
-		if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
-			return true
-		}
-		b = b[i+1:]
-	}
-	return false
-}
-
-// AppendUnescaped appends the unescaped version of src to dst
-// and returns the resulting slice.
-func AppendUnescaped(dst, src []byte) []byte {
-	var pos int
-	for len(src) > 0 {
-		next := bytes.IndexByte(src[pos:], '\\')
-		if next < 0 || pos+next+1 >= len(src) {
-			return append(dst, src...)
-		}
-
-		if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
-			if pos+next > 0 {
-				dst = append(dst, src[:pos+next]...)
-			}
-			src = src[pos+next+1:]
-			pos = 0
-		} else {
-			pos += next + 1
-		}
-	}
-
-	return dst
-}
-
-// Unescape returns a new slice containing the unescaped version of in.
-func Unescape(in []byte) []byte {
-	if len(in) == 0 {
-		return nil
-	}
-
-	if bytes.IndexByte(in, '\\') == -1 {
-		return in
-	}
-
-	i := 0
-	inLen := len(in)
-
-	// The output size will be no more than inLen. Preallocating the
-	// capacity of the output is faster and uses less memory than
-	// letting append() do its own (over)allocation.
-	out := make([]byte, 0, inLen)
-
-	for {
-		if i >= inLen {
-			break
-		}
-		if in[i] == '\\' && i+1 < inLen {
-			switch in[i+1] {
-			case ',':
-				out = append(out, ',')
-				i += 2
-				continue
-			case '"':
-				out = append(out, '"')
-				i += 2
-				continue
-			case ' ':
-				out = append(out, ' ')
-				i += 2
-				continue
-			case '=':
-				out = append(out, '=')
-				i += 2
-				continue
-			}
-		}
-		out = append(out, in[i])
-		i += 1
-	}
-	return out
-}

+ 0 - 21
vendor/github.com/influxdata/influxdb/pkg/escape/strings.go

@@ -1,21 +0,0 @@
-package escape
-
-import "strings"
-
-var (
-	escaper   = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
-	unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
-)
-
-// UnescapeString returns unescaped version of in.
-func UnescapeString(in string) string {
-	if strings.IndexByte(in, '\\') == -1 {
-		return in
-	}
-	return unescaper.Replace(in)
-}
-
-// String returns the escaped version of in.
-func String(in string) string {
-	return escaper.Replace(in)
-}

+ 0 - 21
vendor/github.com/kr/pretty/License

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright 2012 Keith Rarick
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 265
vendor/github.com/kr/pretty/diff.go

@@ -1,265 +0,0 @@
-package pretty
-
-import (
-	"fmt"
-	"io"
-	"reflect"
-)
-
-type sbuf []string
-
-func (p *sbuf) Printf(format string, a ...interface{}) {
-	s := fmt.Sprintf(format, a...)
-	*p = append(*p, s)
-}
-
-// Diff returns a slice where each element describes
-// a difference between a and b.
-func Diff(a, b interface{}) (desc []string) {
-	Pdiff((*sbuf)(&desc), a, b)
-	return desc
-}
-
-// wprintfer calls Fprintf on w for each Printf call
-// with a trailing newline.
-type wprintfer struct{ w io.Writer }
-
-func (p *wprintfer) Printf(format string, a ...interface{}) {
-	fmt.Fprintf(p.w, format+"\n", a...)
-}
-
-// Fdiff writes to w a description of the differences between a and b.
-func Fdiff(w io.Writer, a, b interface{}) {
-	Pdiff(&wprintfer{w}, a, b)
-}
-
-type Printfer interface {
-	Printf(format string, a ...interface{})
-}
-
-// Pdiff prints to p a description of the differences between a and b.
-// It calls Printf once for each difference, with no trailing newline.
-// The standard library log.Logger is a Printfer.
-func Pdiff(p Printfer, a, b interface{}) {
-	diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
-}
-
-type Logfer interface {
-	Logf(format string, a ...interface{})
-}
-
-// logprintfer calls Fprintf on w for each Printf call
-// with a trailing newline.
-type logprintfer struct{ l Logfer }
-
-func (p *logprintfer) Printf(format string, a ...interface{}) {
-	p.l.Logf(format, a...)
-}
-
-// Ldiff prints to l a description of the differences between a and b.
-// It calls Logf once for each difference, with no trailing newline.
-// The standard library testing.T and testing.B are Logfers.
-func Ldiff(l Logfer, a, b interface{}) {
-	Pdiff(&logprintfer{l}, a, b)
-}
-
-type diffPrinter struct {
-	w Printfer
-	l string // label
-}
-
-func (w diffPrinter) printf(f string, a ...interface{}) {
-	var l string
-	if w.l != "" {
-		l = w.l + ": "
-	}
-	w.w.Printf(l+f, a...)
-}
-
-func (w diffPrinter) diff(av, bv reflect.Value) {
-	if !av.IsValid() && bv.IsValid() {
-		w.printf("nil != %# v", formatter{v: bv, quote: true})
-		return
-	}
-	if av.IsValid() && !bv.IsValid() {
-		w.printf("%# v != nil", formatter{v: av, quote: true})
-		return
-	}
-	if !av.IsValid() && !bv.IsValid() {
-		return
-	}
-
-	at := av.Type()
-	bt := bv.Type()
-	if at != bt {
-		w.printf("%v != %v", at, bt)
-		return
-	}
-
-	switch kind := at.Kind(); kind {
-	case reflect.Bool:
-		if a, b := av.Bool(), bv.Bool(); a != b {
-			w.printf("%v != %v", a, b)
-		}
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		if a, b := av.Int(), bv.Int(); a != b {
-			w.printf("%d != %d", a, b)
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		if a, b := av.Uint(), bv.Uint(); a != b {
-			w.printf("%d != %d", a, b)
-		}
-	case reflect.Float32, reflect.Float64:
-		if a, b := av.Float(), bv.Float(); a != b {
-			w.printf("%v != %v", a, b)
-		}
-	case reflect.Complex64, reflect.Complex128:
-		if a, b := av.Complex(), bv.Complex(); a != b {
-			w.printf("%v != %v", a, b)
-		}
-	case reflect.Array:
-		n := av.Len()
-		for i := 0; i < n; i++ {
-			w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
-		}
-	case reflect.Chan, reflect.Func, reflect.UnsafePointer:
-		if a, b := av.Pointer(), bv.Pointer(); a != b {
-			w.printf("%#x != %#x", a, b)
-		}
-	case reflect.Interface:
-		w.diff(av.Elem(), bv.Elem())
-	case reflect.Map:
-		ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
-		for _, k := range ak {
-			w := w.relabel(fmt.Sprintf("[%#v]", k))
-			w.printf("%q != (missing)", av.MapIndex(k))
-		}
-		for _, k := range both {
-			w := w.relabel(fmt.Sprintf("[%#v]", k))
-			w.diff(av.MapIndex(k), bv.MapIndex(k))
-		}
-		for _, k := range bk {
-			w := w.relabel(fmt.Sprintf("[%#v]", k))
-			w.printf("(missing) != %q", bv.MapIndex(k))
-		}
-	case reflect.Ptr:
-		switch {
-		case av.IsNil() && !bv.IsNil():
-			w.printf("nil != %# v", formatter{v: bv, quote: true})
-		case !av.IsNil() && bv.IsNil():
-			w.printf("%# v != nil", formatter{v: av, quote: true})
-		case !av.IsNil() && !bv.IsNil():
-			w.diff(av.Elem(), bv.Elem())
-		}
-	case reflect.Slice:
-		lenA := av.Len()
-		lenB := bv.Len()
-		if lenA != lenB {
-			w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
-			break
-		}
-		for i := 0; i < lenA; i++ {
-			w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
-		}
-	case reflect.String:
-		if a, b := av.String(), bv.String(); a != b {
-			w.printf("%q != %q", a, b)
-		}
-	case reflect.Struct:
-		for i := 0; i < av.NumField(); i++ {
-			w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
-		}
-	default:
-		panic("unknown reflect Kind: " + kind.String())
-	}
-}
-
-func (d diffPrinter) relabel(name string) (d1 diffPrinter) {
-	d1 = d
-	if d.l != "" && name[0] != '[' {
-		d1.l += "."
-	}
-	d1.l += name
-	return d1
-}
-
-// keyEqual compares a and b for equality.
-// Both a and b must be valid map keys.
-func keyEqual(av, bv reflect.Value) bool {
-	if !av.IsValid() && !bv.IsValid() {
-		return true
-	}
-	if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
-		return false
-	}
-	switch kind := av.Kind(); kind {
-	case reflect.Bool:
-		a, b := av.Bool(), bv.Bool()
-		return a == b
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		a, b := av.Int(), bv.Int()
-		return a == b
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		a, b := av.Uint(), bv.Uint()
-		return a == b
-	case reflect.Float32, reflect.Float64:
-		a, b := av.Float(), bv.Float()
-		return a == b
-	case reflect.Complex64, reflect.Complex128:
-		a, b := av.Complex(), bv.Complex()
-		return a == b
-	case reflect.Array:
-		for i := 0; i < av.Len(); i++ {
-			if !keyEqual(av.Index(i), bv.Index(i)) {
-				return false
-			}
-		}
-		return true
-	case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
-		a, b := av.Pointer(), bv.Pointer()
-		return a == b
-	case reflect.Interface:
-		return keyEqual(av.Elem(), bv.Elem())
-	case reflect.String:
-		a, b := av.String(), bv.String()
-		return a == b
-	case reflect.Struct:
-		for i := 0; i < av.NumField(); i++ {
-			if !keyEqual(av.Field(i), bv.Field(i)) {
-				return false
-			}
-		}
-		return true
-	default:
-		panic("invalid map key type " + av.Type().String())
-	}
-}
-
-func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
-	for _, av := range a {
-		inBoth := false
-		for _, bv := range b {
-			if keyEqual(av, bv) {
-				inBoth = true
-				both = append(both, av)
-				break
-			}
-		}
-		if !inBoth {
-			ak = append(ak, av)
-		}
-	}
-	for _, bv := range b {
-		inBoth := false
-		for _, av := range a {
-			if keyEqual(av, bv) {
-				inBoth = true
-				break
-			}
-		}
-		if !inBoth {
-			bk = append(bk, bv)
-		}
-	}
-	return
-}

+ 0 - 328
vendor/github.com/kr/pretty/formatter.go

@@ -1,328 +0,0 @@
-package pretty
-
-import (
-	"fmt"
-	"io"
-	"reflect"
-	"strconv"
-	"text/tabwriter"
-
-	"github.com/kr/text"
-)
-
-type formatter struct {
-	v     reflect.Value
-	force bool
-	quote bool
-}
-
-// Formatter makes a wrapper, f, that will format x as go source with line
-// breaks and tabs. Object f responds to the "%v" formatting verb when both the
-// "#" and " " (space) flags are set, for example:
-//
-//     fmt.Sprintf("%# v", Formatter(x))
-//
-// If one of these two flags is not set, or any other verb is used, f will
-// format x according to the usual rules of package fmt.
-// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
-func Formatter(x interface{}) (f fmt.Formatter) {
-	return formatter{v: reflect.ValueOf(x), quote: true}
-}
-
-func (fo formatter) String() string {
-	return fmt.Sprint(fo.v.Interface()) // unwrap it
-}
-
-func (fo formatter) passThrough(f fmt.State, c rune) {
-	s := "%"
-	for i := 0; i < 128; i++ {
-		if f.Flag(i) {
-			s += string(i)
-		}
-	}
-	if w, ok := f.Width(); ok {
-		s += fmt.Sprintf("%d", w)
-	}
-	if p, ok := f.Precision(); ok {
-		s += fmt.Sprintf(".%d", p)
-	}
-	s += string(c)
-	fmt.Fprintf(f, s, fo.v.Interface())
-}
-
-func (fo formatter) Format(f fmt.State, c rune) {
-	if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
-		w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
-		p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
-		p.printValue(fo.v, true, fo.quote)
-		w.Flush()
-		return
-	}
-	fo.passThrough(f, c)
-}
-
-type printer struct {
-	io.Writer
-	tw      *tabwriter.Writer
-	visited map[visit]int
-	depth   int
-}
-
-func (p *printer) indent() *printer {
-	q := *p
-	q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
-	q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
-	return &q
-}
-
-func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
-	if showType {
-		io.WriteString(p, v.Type().String())
-		fmt.Fprintf(p, "(%#v)", x)
-	} else {
-		fmt.Fprintf(p, "%#v", x)
-	}
-}
-
-// printValue must keep track of already-printed pointer values to avoid
-// infinite recursion.
-type visit struct {
-	v   uintptr
-	typ reflect.Type
-}
-
-func (p *printer) printValue(v reflect.Value, showType, quote bool) {
-	if p.depth > 10 {
-		io.WriteString(p, "!%v(DEPTH EXCEEDED)")
-		return
-	}
-
-	switch v.Kind() {
-	case reflect.Bool:
-		p.printInline(v, v.Bool(), showType)
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		p.printInline(v, v.Int(), showType)
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		p.printInline(v, v.Uint(), showType)
-	case reflect.Float32, reflect.Float64:
-		p.printInline(v, v.Float(), showType)
-	case reflect.Complex64, reflect.Complex128:
-		fmt.Fprintf(p, "%#v", v.Complex())
-	case reflect.String:
-		p.fmtString(v.String(), quote)
-	case reflect.Map:
-		t := v.Type()
-		if showType {
-			io.WriteString(p, t.String())
-		}
-		writeByte(p, '{')
-		if nonzero(v) {
-			expand := !canInline(v.Type())
-			pp := p
-			if expand {
-				writeByte(p, '\n')
-				pp = p.indent()
-			}
-			keys := v.MapKeys()
-			for i := 0; i < v.Len(); i++ {
-				showTypeInStruct := true
-				k := keys[i]
-				mv := v.MapIndex(k)
-				pp.printValue(k, false, true)
-				writeByte(pp, ':')
-				if expand {
-					writeByte(pp, '\t')
-				}
-				showTypeInStruct = t.Elem().Kind() == reflect.Interface
-				pp.printValue(mv, showTypeInStruct, true)
-				if expand {
-					io.WriteString(pp, ",\n")
-				} else if i < v.Len()-1 {
-					io.WriteString(pp, ", ")
-				}
-			}
-			if expand {
-				pp.tw.Flush()
-			}
-		}
-		writeByte(p, '}')
-	case reflect.Struct:
-		t := v.Type()
-		if v.CanAddr() {
-			addr := v.UnsafeAddr()
-			vis := visit{addr, t}
-			if vd, ok := p.visited[vis]; ok && vd < p.depth {
-				p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
-				break // don't print v again
-			}
-			p.visited[vis] = p.depth
-		}
-
-		if showType {
-			io.WriteString(p, t.String())
-		}
-		writeByte(p, '{')
-		if nonzero(v) {
-			expand := !canInline(v.Type())
-			pp := p
-			if expand {
-				writeByte(p, '\n')
-				pp = p.indent()
-			}
-			for i := 0; i < v.NumField(); i++ {
-				showTypeInStruct := true
-				if f := t.Field(i); f.Name != "" {
-					io.WriteString(pp, f.Name)
-					writeByte(pp, ':')
-					if expand {
-						writeByte(pp, '\t')
-					}
-					showTypeInStruct = labelType(f.Type)
-				}
-				pp.printValue(getField(v, i), showTypeInStruct, true)
-				if expand {
-					io.WriteString(pp, ",\n")
-				} else if i < v.NumField()-1 {
-					io.WriteString(pp, ", ")
-				}
-			}
-			if expand {
-				pp.tw.Flush()
-			}
-		}
-		writeByte(p, '}')
-	case reflect.Interface:
-		switch e := v.Elem(); {
-		case e.Kind() == reflect.Invalid:
-			io.WriteString(p, "nil")
-		case e.IsValid():
-			pp := *p
-			pp.depth++
-			pp.printValue(e, showType, true)
-		default:
-			io.WriteString(p, v.Type().String())
-			io.WriteString(p, "(nil)")
-		}
-	case reflect.Array, reflect.Slice:
-		t := v.Type()
-		if showType {
-			io.WriteString(p, t.String())
-		}
-		if v.Kind() == reflect.Slice && v.IsNil() && showType {
-			io.WriteString(p, "(nil)")
-			break
-		}
-		if v.Kind() == reflect.Slice && v.IsNil() {
-			io.WriteString(p, "nil")
-			break
-		}
-		writeByte(p, '{')
-		expand := !canInline(v.Type())
-		pp := p
-		if expand {
-			writeByte(p, '\n')
-			pp = p.indent()
-		}
-		for i := 0; i < v.Len(); i++ {
-			showTypeInSlice := t.Elem().Kind() == reflect.Interface
-			pp.printValue(v.Index(i), showTypeInSlice, true)
-			if expand {
-				io.WriteString(pp, ",\n")
-			} else if i < v.Len()-1 {
-				io.WriteString(pp, ", ")
-			}
-		}
-		if expand {
-			pp.tw.Flush()
-		}
-		writeByte(p, '}')
-	case reflect.Ptr:
-		e := v.Elem()
-		if !e.IsValid() {
-			writeByte(p, '(')
-			io.WriteString(p, v.Type().String())
-			io.WriteString(p, ")(nil)")
-		} else {
-			pp := *p
-			pp.depth++
-			writeByte(pp, '&')
-			pp.printValue(e, true, true)
-		}
-	case reflect.Chan:
-		x := v.Pointer()
-		if showType {
-			writeByte(p, '(')
-			io.WriteString(p, v.Type().String())
-			fmt.Fprintf(p, ")(%#v)", x)
-		} else {
-			fmt.Fprintf(p, "%#v", x)
-		}
-	case reflect.Func:
-		io.WriteString(p, v.Type().String())
-		io.WriteString(p, " {...}")
-	case reflect.UnsafePointer:
-		p.printInline(v, v.Pointer(), showType)
-	case reflect.Invalid:
-		io.WriteString(p, "nil")
-	}
-}
-
-func canInline(t reflect.Type) bool {
-	switch t.Kind() {
-	case reflect.Map:
-		return !canExpand(t.Elem())
-	case reflect.Struct:
-		for i := 0; i < t.NumField(); i++ {
-			if canExpand(t.Field(i).Type) {
-				return false
-			}
-		}
-		return true
-	case reflect.Interface:
-		return false
-	case reflect.Array, reflect.Slice:
-		return !canExpand(t.Elem())
-	case reflect.Ptr:
-		return false
-	case reflect.Chan, reflect.Func, reflect.UnsafePointer:
-		return false
-	}
-	return true
-}
-
-func canExpand(t reflect.Type) bool {
-	switch t.Kind() {
-	case reflect.Map, reflect.Struct,
-		reflect.Interface, reflect.Array, reflect.Slice,
-		reflect.Ptr:
-		return true
-	}
-	return false
-}
-
-func labelType(t reflect.Type) bool {
-	switch t.Kind() {
-	case reflect.Interface, reflect.Struct:
-		return true
-	}
-	return false
-}
-
-func (p *printer) fmtString(s string, quote bool) {
-	if quote {
-		s = strconv.Quote(s)
-	}
-	io.WriteString(p, s)
-}
-
-func writeByte(w io.Writer, b byte) {
-	w.Write([]byte{b})
-}
-
-func getField(v reflect.Value, i int) reflect.Value {
-	val := v.Field(i)
-	if val.Kind() == reflect.Interface && !val.IsNil() {
-		val = val.Elem()
-	}
-	return val
-}

+ 0 - 108
vendor/github.com/kr/pretty/pretty.go

@@ -1,108 +0,0 @@
-// Package pretty provides pretty-printing for Go values. This is
-// useful during debugging, to avoid wrapping long output lines in
-// the terminal.
-//
-// It provides a function, Formatter, that can be used with any
-// function that accepts a format string. It also provides
-// convenience wrappers for functions in packages fmt and log.
-package pretty
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"reflect"
-)
-
-// Errorf is a convenience wrapper for fmt.Errorf.
-//
-// Calling Errorf(f, x, y) is equivalent to
-// fmt.Errorf(f, Formatter(x), Formatter(y)).
-func Errorf(format string, a ...interface{}) error {
-	return fmt.Errorf(format, wrap(a, false)...)
-}
-
-// Fprintf is a convenience wrapper for fmt.Fprintf.
-//
-// Calling Fprintf(w, f, x, y) is equivalent to
-// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
-func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
-	return fmt.Fprintf(w, format, wrap(a, false)...)
-}
-
-// Log is a convenience wrapper for log.Printf.
-//
-// Calling Log(x, y) is equivalent to
-// log.Print(Formatter(x), Formatter(y)), but each operand is
-// formatted with "%# v".
-func Log(a ...interface{}) {
-	log.Print(wrap(a, true)...)
-}
-
-// Logf is a convenience wrapper for log.Printf.
-//
-// Calling Logf(f, x, y) is equivalent to
-// log.Printf(f, Formatter(x), Formatter(y)).
-func Logf(format string, a ...interface{}) {
-	log.Printf(format, wrap(a, false)...)
-}
-
-// Logln is a convenience wrapper for log.Printf.
-//
-// Calling Logln(x, y) is equivalent to
-// log.Println(Formatter(x), Formatter(y)), but each operand is
-// formatted with "%# v".
-func Logln(a ...interface{}) {
-	log.Println(wrap(a, true)...)
-}
-
-// Print pretty-prints its operands and writes to standard output.
-//
-// Calling Print(x, y) is equivalent to
-// fmt.Print(Formatter(x), Formatter(y)), but each operand is
-// formatted with "%# v".
-func Print(a ...interface{}) (n int, errno error) {
-	return fmt.Print(wrap(a, true)...)
-}
-
-// Printf is a convenience wrapper for fmt.Printf.
-//
-// Calling Printf(f, x, y) is equivalent to
-// fmt.Printf(f, Formatter(x), Formatter(y)).
-func Printf(format string, a ...interface{}) (n int, errno error) {
-	return fmt.Printf(format, wrap(a, false)...)
-}
-
-// Println pretty-prints its operands and writes to standard output.
-//
-// Calling Print(x, y) is equivalent to
-// fmt.Println(Formatter(x), Formatter(y)), but each operand is
-// formatted with "%# v".
-func Println(a ...interface{}) (n int, errno error) {
-	return fmt.Println(wrap(a, true)...)
-}
-
-// Sprint is a convenience wrapper for fmt.Sprintf.
-//
-// Calling Sprint(x, y) is equivalent to
-// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
-// formatted with "%# v".
-func Sprint(a ...interface{}) string {
-	return fmt.Sprint(wrap(a, true)...)
-}
-
-// Sprintf is a convenience wrapper for fmt.Sprintf.
-//
-// Calling Sprintf(f, x, y) is equivalent to
-// fmt.Sprintf(f, Formatter(x), Formatter(y)).
-func Sprintf(format string, a ...interface{}) string {
-	return fmt.Sprintf(format, wrap(a, false)...)
-}
-
-func wrap(a []interface{}, force bool) []interface{} {
-	w := make([]interface{}, len(a))
-	for i, x := range a {
-		w[i] = formatter{v: reflect.ValueOf(x), force: force}
-	}
-	return w
-}

+ 0 - 41
vendor/github.com/kr/pretty/zero.go

@@ -1,41 +0,0 @@
-package pretty
-
-import (
-	"reflect"
-)
-
-func nonzero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Bool:
-		return v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() != 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() != 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() != 0
-	case reflect.Complex64, reflect.Complex128:
-		return v.Complex() != complex(0, 0)
-	case reflect.String:
-		return v.String() != ""
-	case reflect.Struct:
-		for i := 0; i < v.NumField(); i++ {
-			if nonzero(getField(v, i)) {
-				return true
-			}
-		}
-		return false
-	case reflect.Array:
-		for i := 0; i < v.Len(); i++ {
-			if nonzero(v.Index(i)) {
-				return true
-			}
-		}
-		return false
-	case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
-		return !v.IsNil()
-	case reflect.UnsafePointer:
-		return v.Pointer() != 0
-	}
-	return true
-}

+ 0 - 19
vendor/github.com/kr/text/License

@@ -1,19 +0,0 @@
-Copyright 2012 Keith Rarick
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 3
vendor/github.com/kr/text/doc.go

@@ -1,3 +0,0 @@
-// Package text provides rudimentary functions for manipulating text in
-// paragraphs.
-package text

+ 0 - 74
vendor/github.com/kr/text/indent.go

@@ -1,74 +0,0 @@
-package text
-
-import (
-	"io"
-)
-
-// Indent inserts prefix at the beginning of each non-empty line of s. The
-// end-of-line marker is NL.
-func Indent(s, prefix string) string {
-	return string(IndentBytes([]byte(s), []byte(prefix)))
-}
-
-// IndentBytes inserts prefix at the beginning of each non-empty line of b.
-// The end-of-line marker is NL.
-func IndentBytes(b, prefix []byte) []byte {
-	var res []byte
-	bol := true
-	for _, c := range b {
-		if bol && c != '\n' {
-			res = append(res, prefix...)
-		}
-		res = append(res, c)
-		bol = c == '\n'
-	}
-	return res
-}
-
-// Writer indents each line of its input.
-type indentWriter struct {
-	w   io.Writer
-	bol bool
-	pre [][]byte
-	sel int
-	off int
-}
-
-// NewIndentWriter makes a new write filter that indents the input
-// lines. Each line is prefixed in order with the corresponding
-// element of pre. If there are more lines than elements, the last
-// element of pre is repeated for each subsequent line.
-func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
-	return &indentWriter{
-		w:   w,
-		pre: pre,
-		bol: true,
-	}
-}
-
-// The only errors returned are from the underlying indentWriter.
-func (w *indentWriter) Write(p []byte) (n int, err error) {
-	for _, c := range p {
-		if w.bol {
-			var i int
-			i, err = w.w.Write(w.pre[w.sel][w.off:])
-			w.off += i
-			if err != nil {
-				return n, err
-			}
-		}
-		_, err = w.w.Write([]byte{c})
-		if err != nil {
-			return n, err
-		}
-		n++
-		w.bol = c == '\n'
-		if w.bol {
-			w.off = 0
-			if w.sel < len(w.pre)-1 {
-				w.sel++
-			}
-		}
-	}
-	return n, nil
-}

+ 0 - 86
vendor/github.com/kr/text/wrap.go

@@ -1,86 +0,0 @@
-package text
-
-import (
-	"bytes"
-	"math"
-)
-
-var (
-	nl = []byte{'\n'}
-	sp = []byte{' '}
-)
-
-const defaultPenalty = 1e5
-
-// Wrap wraps s into a paragraph of lines of length lim, with minimal
-// raggedness.
-func Wrap(s string, lim int) string {
-	return string(WrapBytes([]byte(s), lim))
-}
-
-// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
-// raggedness.
-func WrapBytes(b []byte, lim int) []byte {
-	words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
-	var lines [][]byte
-	for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
-		lines = append(lines, bytes.Join(line, sp))
-	}
-	return bytes.Join(lines, nl)
-}
-
-// WrapWords is the low-level line-breaking algorithm, useful if you need more
-// control over the details of the text wrapping process. For most uses, either
-// Wrap or WrapBytes will be sufficient and more convenient.
-//
-// WrapWords splits a list of words into lines with minimal "raggedness",
-// treating each byte as one unit, accounting for spc units between adjacent
-// words on each line, and attempting to limit lines to lim units. Raggedness
-// is the total error over all lines, where error is the square of the
-// difference of the length of the line and lim. Too-long lines (which only
-// happen when a single word is longer than lim units) have pen penalty units
-// added to the error.
-func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
-	n := len(words)
-
-	length := make([][]int, n)
-	for i := 0; i < n; i++ {
-		length[i] = make([]int, n)
-		length[i][i] = len(words[i])
-		for j := i + 1; j < n; j++ {
-			length[i][j] = length[i][j-1] + spc + len(words[j])
-		}
-	}
-
-	nbrk := make([]int, n)
-	cost := make([]int, n)
-	for i := range cost {
-		cost[i] = math.MaxInt32
-	}
-	for i := n - 1; i >= 0; i-- {
-		if length[i][n-1] <= lim || i == n-1 {
-			cost[i] = 0
-			nbrk[i] = n
-		} else {
-			for j := i + 1; j < n; j++ {
-				d := lim - length[i][j-1]
-				c := d*d + cost[j]
-				if length[i][j-1] > lim {
-					c += pen // too-long lines get a worse penalty
-				}
-				if c < cost[i] {
-					cost[i] = c
-					nbrk[i] = j
-				}
-			}
-		}
-	}
-
-	var lines [][][]byte
-	i := 0
-	for i < n {
-		lines = append(lines, words[i:nbrk[i]])
-		i = nbrk[i]
-	}
-	return lines
-}

+ 56 - 0
vendor/github.com/miekg/dns/acceptfunc.go

@@ -0,0 +1,56 @@
+package dns
+
+// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
+// It returns a MsgAcceptAction to indicate what should happen with the message.
+type MsgAcceptFunc func(dh Header) MsgAcceptAction
+
+// DefaultMsgAcceptFunc checks the request and will reject if:
+//
+// * isn't a request (don't respond in that case).
+// * opcode isn't OpcodeQuery or OpcodeNotify
+// * Zero bit isn't zero
+// * has more than 1 question in the question section
+// * has more than 1 RR in the Answer section
+// * has more than 0 RRs in the Authority section
+// * has more than 2 RRs in the Additional section
+var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
+
+// MsgAcceptAction represents the action to be taken.
+type MsgAcceptAction int
+
+const (
+	MsgAccept MsgAcceptAction = iota // Accept the message
+	MsgReject                        // Reject the message with a RcodeFormatError
+	MsgIgnore                        // Ignore the error and send nothing back.
+)
+
+func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
+	if isResponse := dh.Bits&_QR != 0; isResponse {
+		return MsgIgnore
+	}
+
+	// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
+	opcode := int(dh.Bits>>11) & 0xF
+	if opcode != OpcodeQuery && opcode != OpcodeNotify {
+		return MsgReject
+	}
+
+	if isZero := dh.Bits&_Z != 0; isZero {
+		return MsgReject
+	}
+	if dh.Qdcount != 1 {
+		return MsgReject
+	}
+	// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
+	if dh.Ancount > 1 {
+		return MsgReject
+	}
+	// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
+	if dh.Nscount > 1 {
+		return MsgReject
+	}
+	if dh.Arcount > 2 {
+		return MsgReject
+	}
+	return MsgAccept
+}

+ 45 - 210
vendor/github.com/miekg/dns/client.go

@@ -3,15 +3,11 @@ package dns
 // A client implementation.
 // A client implementation.
 
 
 import (
 import (
-	"bytes"
 	"context"
 	"context"
 	"crypto/tls"
 	"crypto/tls"
 	"encoding/binary"
 	"encoding/binary"
-	"fmt"
 	"io"
 	"io"
-	"io/ioutil"
 	"net"
 	"net"
-	"net/http"
 	"strings"
 	"strings"
 	"time"
 	"time"
 )
 )
@@ -19,8 +15,6 @@ import (
 const (
 const (
 	dnsTimeout     time.Duration = 2 * time.Second
 	dnsTimeout     time.Duration = 2 * time.Second
 	tcpIdleTimeout time.Duration = 8 * time.Second
 	tcpIdleTimeout time.Duration = 8 * time.Second
-
-	dohMimeType = "application/dns-message"
 )
 )
 
 
 // A Conn represents a connection to a DNS server.
 // A Conn represents a connection to a DNS server.
@@ -44,7 +38,6 @@ type Client struct {
 	DialTimeout    time.Duration     // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
 	DialTimeout    time.Duration     // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
 	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
 	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
 	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
 	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
-	HTTPClient     *http.Client      // The http.Client to use for DNS-over-HTTPS
 	TsigSecret     map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
 	TsigSecret     map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
 	SingleInflight bool              // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
 	SingleInflight bool              // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
 	group          singleflight
 	group          singleflight
@@ -89,32 +82,22 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
 	// create a new dialer with the appropriate timeout
 	// create a new dialer with the appropriate timeout
 	var d net.Dialer
 	var d net.Dialer
 	if c.Dialer == nil {
 	if c.Dialer == nil {
-		d = net.Dialer{Timeout:c.getTimeoutForRequest(c.dialTimeout())}
+		d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
 	} else {
 	} else {
-		d = net.Dialer(*c.Dialer)
-	}
-
-	network := "udp"
-	useTLS := false
-
-	switch c.Net {
-	case "tcp-tls":
-		network = "tcp"
-		useTLS = true
-	case "tcp4-tls":
-		network = "tcp4"
-		useTLS = true
-	case "tcp6-tls":
-		network = "tcp6"
-		useTLS = true
-	default:
-		if c.Net != "" {
-			network = c.Net
-		}
+		d = *c.Dialer
 	}
 	}
 
 
+	network := c.Net
+	if network == "" {
+		network = "udp"
+	}
+
+	useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
+
 	conn = new(Conn)
 	conn = new(Conn)
 	if useTLS {
 	if useTLS {
+		network = strings.TrimSuffix(network, "-tls")
+
 		conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
 		conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
 	} else {
 	} else {
 		conn.Conn, err = d.Dial(network, address)
 		conn.Conn, err = d.Dial(network, address)
@@ -122,6 +105,7 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
 	return conn, nil
 	return conn, nil
 }
 }
 
 
@@ -141,11 +125,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
 // attribute appropriately
 // attribute appropriately
 func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
 func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
 	if !c.SingleInflight {
 	if !c.SingleInflight {
-		if c.Net == "https" {
-			// TODO(tmthrgd): pipe timeouts into exchangeDOH
-			return c.exchangeDOH(context.TODO(), m, address)
-		}
-
 		return c.exchange(m, address)
 		return c.exchange(m, address)
 	}
 	}
 
 
@@ -158,11 +137,6 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
 		cl = cl1
 		cl = cl1
 	}
 	}
 	r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
 	r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
-		if c.Net == "https" {
-			// TODO(tmthrgd): pipe timeouts into exchangeDOH
-			return c.exchangeDOH(context.TODO(), m, address)
-		}
-
 		return c.exchange(m, address)
 		return c.exchange(m, address)
 	})
 	})
 	if r != nil && shared {
 	if r != nil && shared {
@@ -208,67 +182,6 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
 	return r, rtt, err
 	return r, rtt, err
 }
 }
 
 
-func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
-	p, err := m.Pack()
-	if err != nil {
-		return nil, 0, err
-	}
-
-	req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p))
-	if err != nil {
-		return nil, 0, err
-	}
-
-	req.Header.Set("Content-Type", dohMimeType)
-	req.Header.Set("Accept", dohMimeType)
-
-	hc := http.DefaultClient
-	if c.HTTPClient != nil {
-		hc = c.HTTPClient
-	}
-
-	if ctx != context.Background() && ctx != context.TODO() {
-		req = req.WithContext(ctx)
-	}
-
-	t := time.Now()
-
-	resp, err := hc.Do(req)
-	if err != nil {
-		return nil, 0, err
-	}
-	defer closeHTTPBody(resp.Body)
-
-	if resp.StatusCode != http.StatusOK {
-		return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
-	}
-
-	if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
-		return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
-	}
-
-	p, err = ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	rtt = time.Since(t)
-
-	r = new(Msg)
-	if err := r.Unpack(p); err != nil {
-		return r, 0, err
-	}
-
-	// TODO: TSIG? Is it even supported over DoH?
-
-	return r, rtt, nil
-}
-
-func closeHTTPBody(r io.ReadCloser) error {
-	io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
-	return r.Close()
-}
-
 // ReadMsg reads a message from the connection co.
 // ReadMsg reads a message from the connection co.
 // If the received message contains a TSIG record the transaction signature
 // If the received message contains a TSIG record the transaction signature
 // is verified. This method always tries to return the message, however if an
 // is verified. This method always tries to return the message, however if an
@@ -306,18 +219,15 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
 		n   int
 		n   int
 		err error
 		err error
 	)
 	)
-
-	switch t := co.Conn.(type) {
+	switch co.Conn.(type) {
 	case *net.TCPConn, *tls.Conn:
 	case *net.TCPConn, *tls.Conn:
-		r := t.(io.Reader)
-
-		// First two bytes specify the length of the entire message.
-		l, err := tcpMsgLen(r)
-		if err != nil {
+		var length uint16
+		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		p = make([]byte, l)
-		n, err = tcpRead(r, p)
+
+		p = make([]byte, length)
+		n, err = io.ReadFull(co.Conn, p)
 	default:
 	default:
 		if co.UDPSize > MinMsgSize {
 		if co.UDPSize > MinMsgSize {
 			p = make([]byte, co.UDPSize)
 			p = make([]byte, co.UDPSize)
@@ -344,78 +254,27 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
 	return p, err
 	return p, err
 }
 }
 
 
-// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
-func tcpMsgLen(t io.Reader) (int, error) {
-	p := []byte{0, 0}
-	n, err := t.Read(p)
-	if err != nil {
-		return 0, err
-	}
-
-	// As seen with my local router/switch, returns 1 byte on the above read,
-	// resulting a a ShortRead. Just write it out (instead of loop) and read the
-	// other byte.
-	if n == 1 {
-		n1, err := t.Read(p[1:])
-		if err != nil {
-			return 0, err
-		}
-		n += n1
-	}
-
-	if n != 2 {
-		return 0, ErrShortRead
-	}
-	l := binary.BigEndian.Uint16(p)
-	if l == 0 {
-		return 0, ErrShortRead
-	}
-	return int(l), nil
-}
-
-// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
-func tcpRead(t io.Reader, p []byte) (int, error) {
-	n, err := t.Read(p)
-	if err != nil {
-		return n, err
-	}
-	for n < len(p) {
-		j, err := t.Read(p[n:])
-		if err != nil {
-			return n, err
-		}
-		n += j
-	}
-	return n, err
-}
-
 // Read implements the net.Conn read method.
 // Read implements the net.Conn read method.
 func (co *Conn) Read(p []byte) (n int, err error) {
 func (co *Conn) Read(p []byte) (n int, err error) {
 	if co.Conn == nil {
 	if co.Conn == nil {
 		return 0, ErrConnEmpty
 		return 0, ErrConnEmpty
 	}
 	}
-	if len(p) < 2 {
-		return 0, io.ErrShortBuffer
-	}
-	switch t := co.Conn.(type) {
-	case *net.TCPConn, *tls.Conn:
-		r := t.(io.Reader)
 
 
-		l, err := tcpMsgLen(r)
-		if err != nil {
+	switch co.Conn.(type) {
+	case *net.TCPConn, *tls.Conn:
+		var length uint16
+		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		if l > len(p) {
-			return int(l), io.ErrShortBuffer
+		if int(length) > len(p) {
+			return 0, io.ErrShortBuffer
 		}
 		}
-		return tcpRead(r, p[:l])
+
+		return io.ReadFull(co.Conn, p[:length])
 	}
 	}
+
 	// UDP connection
 	// UDP connection
-	n, err = co.Conn.Read(p)
-	if err != nil {
-		return n, err
-	}
-	return n, err
+	return co.Conn.Read(p)
 }
 }
 
 
 // WriteMsg sends a message through the connection co.
 // WriteMsg sends a message through the connection co.
@@ -437,33 +296,26 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	if _, err = co.Write(out); err != nil {
-		return err
-	}
-	return nil
+	_, err = co.Write(out)
+	return err
 }
 }
 
 
 // Write implements the net.Conn Write method.
 // Write implements the net.Conn Write method.
 func (co *Conn) Write(p []byte) (n int, err error) {
 func (co *Conn) Write(p []byte) (n int, err error) {
-	switch t := co.Conn.(type) {
+	switch co.Conn.(type) {
 	case *net.TCPConn, *tls.Conn:
 	case *net.TCPConn, *tls.Conn:
-		w := t.(io.Writer)
-
-		lp := len(p)
-		if lp < 2 {
-			return 0, io.ErrShortBuffer
-		}
-		if lp > MaxMsgSize {
+		if len(p) > MaxMsgSize {
 			return 0, &Error{err: "message too large"}
 			return 0, &Error{err: "message too large"}
 		}
 		}
-		l := make([]byte, 2, lp+2)
-		binary.BigEndian.PutUint16(l, uint16(lp))
-		p = append(l, p...)
-		n, err := io.Copy(w, bytes.NewReader(p))
+
+		l := make([]byte, 2)
+		binary.BigEndian.PutUint16(l, uint16(len(p)))
+
+		n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
 		return int(n), err
 		return int(n), err
 	}
 	}
-	n, err = co.Conn.Write(p)
-	return n, err
+
+	return co.Conn.Write(p)
 }
 }
 
 
 // Return the appropriate timeout for a specific request
 // Return the appropriate timeout for a specific request
@@ -506,7 +358,7 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error)
 
 
 // ExchangeConn performs a synchronous query. It sends the message m via the connection
 // ExchangeConn performs a synchronous query. It sends the message m via the connection
 // c and waits for a reply. The connection c is not closed by ExchangeConn.
 // c and waits for a reply. The connection c is not closed by ExchangeConn.
-// This function is going away, but can easily be mimicked:
+// Deprecated: This function is going away, but can easily be mimicked:
 //
 //
 //	co := &dns.Conn{Conn: c} // c is your net.Conn
 //	co := &dns.Conn{Conn: c} // c is your net.Conn
 //	co.WriteMsg(m)
 //	co.WriteMsg(m)
@@ -530,11 +382,7 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
 // DialTimeout acts like Dial but takes a timeout.
 // DialTimeout acts like Dial but takes a timeout.
 func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
 func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
 	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
 	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
-	conn, err = client.Dial(address)
-	if err != nil {
-		return nil, err
-	}
-	return conn, nil
+	return client.Dial(address)
 }
 }
 
 
 // DialWithTLS connects to the address on the named network with TLS.
 // DialWithTLS connects to the address on the named network with TLS.
@@ -543,12 +391,7 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er
 		network += "-tls"
 		network += "-tls"
 	}
 	}
 	client := Client{Net: network, TLSConfig: tlsConfig}
 	client := Client{Net: network, TLSConfig: tlsConfig}
-	conn, err = client.Dial(address)
-
-	if err != nil {
-		return nil, err
-	}
-	return conn, nil
+	return client.Dial(address)
 }
 }
 
 
 // DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
 // DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
@@ -557,30 +400,22 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
 		network += "-tls"
 		network += "-tls"
 	}
 	}
 	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
 	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
-	conn, err = client.Dial(address)
-	if err != nil {
-		return nil, err
-	}
-	return conn, nil
+	return client.Dial(address)
 }
 }
 
 
 // ExchangeContext acts like Exchange, but honors the deadline on the provided
 // ExchangeContext acts like Exchange, but honors the deadline on the provided
 // context, if present. If there is both a context deadline and a configured
 // context, if present. If there is both a context deadline and a configured
 // timeout on the client, the earliest of the two takes effect.
 // timeout on the client, the earliest of the two takes effect.
 func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
 func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
-	if !c.SingleInflight && c.Net == "https" {
-		return c.exchangeDOH(ctx, m, a)
-	}
-
 	var timeout time.Duration
 	var timeout time.Duration
 	if deadline, ok := ctx.Deadline(); !ok {
 	if deadline, ok := ctx.Deadline(); !ok {
 		timeout = 0
 		timeout = 0
 	} else {
 	} else {
-		timeout = deadline.Sub(time.Now())
+		timeout = time.Until(deadline)
 	}
 	}
 	// not passing the context to the underlying calls, as the API does not support
 	// not passing the context to the underlying calls, as the API does not support
 	// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
 	// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
-	// TODO(tmthrgd): this is a race condition
+	// TODO(tmthrgd,miekg): this is a race condition.
 	c.Dialer = &net.Dialer{Timeout: timeout}
 	c.Dialer = &net.Dialer{Timeout: timeout}
 	return c.Exchange(m, a)
 	return c.Exchange(m, a)
 }
 }

+ 2 - 6
vendor/github.com/miekg/dns/clientconfig.go

@@ -68,14 +68,10 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
 			}
 			}
 
 
 		case "search": // set search path to given servers
 		case "search": // set search path to given servers
-			c.Search = make([]string, len(f)-1)
-			for i := 0; i < len(c.Search); i++ {
-				c.Search[i] = f[i+1]
-			}
+			c.Search = append([]string(nil), f[1:]...)
 
 
 		case "options": // magic options
 		case "options": // magic options
-			for i := 1; i < len(f); i++ {
-				s := f[i]
+			for _, s := range f[1:] {
 				switch {
 				switch {
 				case len(s) >= 6 && s[:6] == "ndots:":
 				case len(s) >= 6 && s[:6] == "ndots:":
 					n, _ := strconv.Atoi(s[6:])
 					n, _ := strconv.Atoi(s[6:])

+ 0 - 198
vendor/github.com/miekg/dns/compress_generate.go

@@ -1,198 +0,0 @@
-//+build ignore
-
-// compression_generate.go is meant to run with go generate. It will use
-// go/{importer,types} to track down all the RR struct types. Then for each type
-// it will look to see if there are (compressible) names, if so it will add that
-// type to compressionLenHelperType and comressionLenSearchType which "fake" the
-// compression so that Len() is fast.
-package main
-
-import (
-	"bytes"
-	"fmt"
-	"go/format"
-	"go/importer"
-	"go/types"
-	"log"
-	"os"
-)
-
-var packageHdr = `
-// Code generated by "go run compress_generate.go"; DO NOT EDIT.
-
-package dns
-
-`
-
-// getTypeStruct will take a type and the package scope, and return the
-// (innermost) struct if the type is considered a RR type (currently defined as
-// those structs beginning with a RR_Header, could be redefined as implementing
-// the RR interface). The bool return value indicates if embedded structs were
-// resolved.
-func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
-	st, ok := t.Underlying().(*types.Struct)
-	if !ok {
-		return nil, false
-	}
-	if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
-		return st, false
-	}
-	if st.Field(0).Anonymous() {
-		st, _ := getTypeStruct(st.Field(0).Type(), scope)
-		return st, true
-	}
-	return nil, false
-}
-
-func main() {
-	// Import and type-check the package
-	pkg, err := importer.Default().Import("github.com/miekg/dns")
-	fatalIfErr(err)
-	scope := pkg.Scope()
-
-	var domainTypes []string  // Types that have a domain name in them (either compressible or not).
-	var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType)
-Names:
-	for _, name := range scope.Names() {
-		o := scope.Lookup(name)
-		if o == nil || !o.Exported() {
-			continue
-		}
-		st, _ := getTypeStruct(o.Type(), scope)
-		if st == nil {
-			continue
-		}
-		if name == "PrivateRR" {
-			continue
-		}
-
-		if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
-			log.Fatalf("Constant Type%s does not exist.", o.Name())
-		}
-
-		for i := 1; i < st.NumFields(); i++ {
-			if _, ok := st.Field(i).Type().(*types.Slice); ok {
-				if st.Tag(i) == `dns:"domain-name"` {
-					domainTypes = append(domainTypes, o.Name())
-					continue Names
-				}
-				if st.Tag(i) == `dns:"cdomain-name"` {
-					cdomainTypes = append(cdomainTypes, o.Name())
-					domainTypes = append(domainTypes, o.Name())
-					continue Names
-				}
-				continue
-			}
-
-			switch {
-			case st.Tag(i) == `dns:"domain-name"`:
-				domainTypes = append(domainTypes, o.Name())
-				continue Names
-			case st.Tag(i) == `dns:"cdomain-name"`:
-				cdomainTypes = append(cdomainTypes, o.Name())
-				domainTypes = append(domainTypes, o.Name())
-				continue Names
-			}
-		}
-	}
-
-	b := &bytes.Buffer{}
-	b.WriteString(packageHdr)
-
-	// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
-
-	fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
-	fmt.Fprint(b, "currentLen := initLen\n")
-	fmt.Fprint(b, "switch x := r.(type) {\n")
-	for _, name := range domainTypes {
-		o := scope.Lookup(name)
-		st, _ := getTypeStruct(o.Type(), scope)
-
-		fmt.Fprintf(b, "case *%s:\n", name)
-		for i := 1; i < st.NumFields(); i++ {
-			out := func(s string) {
-				fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
-				fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
-			}
-
-			if _, ok := st.Field(i).Type().(*types.Slice); ok {
-				switch st.Tag(i) {
-				case `dns:"domain-name"`:
-					fallthrough
-				case `dns:"cdomain-name"`:
-					// For HIP we need to slice over the elements in this slice.
-					fmt.Fprintf(b, `for i := range x.%s {
-	currentLen -= len(x.%s[i]) + 1
-}
-`, st.Field(i).Name(), st.Field(i).Name())
-					fmt.Fprintf(b, `for i := range x.%s {
-	currentLen += compressionLenHelper(c, x.%s[i], currentLen)
-}
-`, st.Field(i).Name(), st.Field(i).Name())
-				}
-				continue
-			}
-
-			switch {
-			case st.Tag(i) == `dns:"cdomain-name"`:
-				fallthrough
-			case st.Tag(i) == `dns:"domain-name"`:
-				out(st.Field(i).Name())
-			}
-		}
-	}
-	fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
-
-	// compressionLenSearchType - search cdomain-tags types for compressible names.
-
-	fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
-	fmt.Fprint(b, "switch x := r.(type) {\n")
-	for _, name := range cdomainTypes {
-		o := scope.Lookup(name)
-		st, _ := getTypeStruct(o.Type(), scope)
-
-		fmt.Fprintf(b, "case *%s:\n", name)
-		j := 1
-		for i := 1; i < st.NumFields(); i++ {
-			out := func(s string, j int) {
-				fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
-			}
-
-			// There are no slice types with names that can be compressed.
-
-			switch {
-			case st.Tag(i) == `dns:"cdomain-name"`:
-				out(st.Field(i).Name(), j)
-				j++
-			}
-		}
-		k := "k1"
-		ok := "ok1"
-		sz := "sz1"
-		for i := 2; i < j; i++ {
-			k += fmt.Sprintf(" + k%d", i)
-			ok += fmt.Sprintf(" && ok%d", i)
-			sz += fmt.Sprintf(" + sz%d", i)
-		}
-		fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
-	}
-	fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
-
-	// gofmt
-	res, err := format.Source(b.Bytes())
-	if err != nil {
-		b.WriteTo(os.Stderr)
-		log.Fatal(err)
-	}
-
-	f, err := os.Create("zcompress.go")
-	fatalIfErr(err)
-	defer f.Close()
-	f.Write(res)
-}
-
-func fatalIfErr(err error) {
-	if err != nil {
-		log.Fatal(err)
-	}
-}

+ 106 - 16
vendor/github.com/miekg/dns/defaults.go

@@ -4,6 +4,7 @@ import (
 	"errors"
 	"errors"
 	"net"
 	"net"
 	"strconv"
 	"strconv"
+	"strings"
 )
 )
 
 
 const hexDigit = "0123456789abcdef"
 const hexDigit = "0123456789abcdef"
@@ -145,10 +146,9 @@ func (dns *Msg) IsTsig() *TSIG {
 // record in the additional section will do. It returns the OPT record
 // record in the additional section will do. It returns the OPT record
 // found or nil.
 // found or nil.
 func (dns *Msg) IsEdns0() *OPT {
 func (dns *Msg) IsEdns0() *OPT {
-	// EDNS0 is at the end of the additional section, start there.
-	// We might want to change this to *only* look at the last two
-	// records. So we see TSIG and/or OPT - this a slightly bigger
-	// change though.
+	// RFC 6891, Section 6.1.1 allows the OPT record to appear
+	// anywhere in the additional record section, but it's usually at
+	// the end so start there.
 	for i := len(dns.Extra) - 1; i >= 0; i-- {
 	for i := len(dns.Extra) - 1; i >= 0; i-- {
 		if dns.Extra[i].Header().Rrtype == TypeOPT {
 		if dns.Extra[i].Header().Rrtype == TypeOPT {
 			return dns.Extra[i].(*OPT)
 			return dns.Extra[i].(*OPT)
@@ -157,17 +157,93 @@ func (dns *Msg) IsEdns0() *OPT {
 	return nil
 	return nil
 }
 }
 
 
+// popEdns0 is like IsEdns0, but it removes the record from the message.
+func (dns *Msg) popEdns0() *OPT {
+	// RFC 6891, Section 6.1.1 allows the OPT record to appear
+	// anywhere in the additional record section, but it's usually at
+	// the end so start there.
+	for i := len(dns.Extra) - 1; i >= 0; i-- {
+		if dns.Extra[i].Header().Rrtype == TypeOPT {
+			opt := dns.Extra[i].(*OPT)
+			dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
+			return opt
+		}
+	}
+	return nil
+}
+
 // IsDomainName checks if s is a valid domain name, it returns the number of
 // IsDomainName checks if s is a valid domain name, it returns the number of
 // labels and true, when a domain name is valid.  Note that non fully qualified
 // labels and true, when a domain name is valid.  Note that non fully qualified
 // domain name is considered valid, in this case the last label is counted in
 // domain name is considered valid, in this case the last label is counted in
 // the number of labels.  When false is returned the number of labels is not
 // the number of labels.  When false is returned the number of labels is not
 // defined.  Also note that this function is extremely liberal; almost any
 // defined.  Also note that this function is extremely liberal; almost any
 // string is a valid domain name as the DNS is 8 bit protocol. It checks if each
 // string is a valid domain name as the DNS is 8 bit protocol. It checks if each
-// label fits in 63 characters, but there is no length check for the entire
-// string s. I.e.  a domain name longer than 255 characters is considered valid.
+// label fits in 63 characters and that the entire name will fit into the 255
+// octet wire format limit.
 func IsDomainName(s string) (labels int, ok bool) {
 func IsDomainName(s string) (labels int, ok bool) {
-	_, labels, err := packDomainName(s, nil, 0, nil, false)
-	return labels, err == nil
+	// XXX: The logic in this function was copied from packDomainName and
+	// should be kept in sync with that function.
+
+	const lenmsg = 256
+
+	if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
+		return 0, false
+	}
+
+	s = Fqdn(s)
+
+	// Each dot ends a segment of the name. Except for escaped dots (\.), which
+	// are normal dots.
+
+	var (
+		off    int
+		begin  int
+		wasDot bool
+	)
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '\\':
+			if off+1 > lenmsg {
+				return labels, false
+			}
+
+			// check for \DDD
+			if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
+				i += 3
+				begin += 3
+			} else {
+				i++
+				begin++
+			}
+
+			wasDot = false
+		case '.':
+			if wasDot {
+				// two dots back to back is not legal
+				return labels, false
+			}
+			wasDot = true
+
+			labelLen := i - begin
+			if labelLen >= 1<<6 { // top two bits of length must be clear
+				return labels, false
+			}
+
+			// off can already (we're in a loop) be bigger than lenmsg
+			// this happens when a name isn't fully qualified
+			off += 1 + labelLen
+			if off > lenmsg {
+				return labels, false
+			}
+
+			labels++
+			begin = i + 1
+		default:
+			wasDot = false
+		}
+	}
+
+	return labels, true
 }
 }
 
 
 // IsSubDomain checks if child is indeed a child of the parent. If child and parent
 // IsSubDomain checks if child is indeed a child of the parent. If child and parent
@@ -181,7 +257,7 @@ func IsSubDomain(parent, child string) bool {
 // The checking is performed on the binary payload.
 // The checking is performed on the binary payload.
 func IsMsg(buf []byte) error {
 func IsMsg(buf []byte) error {
 	// Header
 	// Header
-	if len(buf) < 12 {
+	if len(buf) < headerSize {
 		return errors.New("dns: bad message header")
 		return errors.New("dns: bad message header")
 	}
 	}
 	// Header: Opcode
 	// Header: Opcode
@@ -191,11 +267,18 @@ func IsMsg(buf []byte) error {
 
 
 // IsFqdn checks if a domain name is fully qualified.
 // IsFqdn checks if a domain name is fully qualified.
 func IsFqdn(s string) bool {
 func IsFqdn(s string) bool {
-	l := len(s)
-	if l == 0 {
+	s2 := strings.TrimSuffix(s, ".")
+	if s == s2 {
 		return false
 		return false
 	}
 	}
-	return s[l-1] == '.'
+
+	i := strings.LastIndexFunc(s2, func(r rune) bool {
+		return r != '\\'
+	})
+
+	// Test whether we have an even number of escape sequences before
+	// the dot or none.
+	return (len(s2)-i)%2 != 0
 }
 }
 
 
 // IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
 // IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
@@ -244,12 +327,19 @@ func ReverseAddr(addr string) (arpa string, err error) {
 	if ip == nil {
 	if ip == nil {
 		return "", &Error{err: "unrecognized address: " + addr}
 		return "", &Error{err: "unrecognized address: " + addr}
 	}
 	}
-	if ip.To4() != nil {
-		return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
-			strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
+	if v4 := ip.To4(); v4 != nil {
+		buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
+		// Add it, in reverse, to the buffer
+		for i := len(v4) - 1; i >= 0; i-- {
+			buf = strconv.AppendInt(buf, int64(v4[i]), 10)
+			buf = append(buf, '.')
+		}
+		// Append "in-addr.arpa." and return (buf already has the final .)
+		buf = append(buf, "in-addr.arpa."...)
+		return string(buf), nil
 	}
 	}
 	// Must be IPv6
 	// Must be IPv6
-	buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
+	buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
 	// Add it, in reverse, to the buffer
 	// Add it, in reverse, to the buffer
 	for i := len(ip) - 1; i >= 0; i-- {
 	for i := len(ip) - 1; i >= 0; i-- {
 		v := ip[i]
 		v := ip[i]

+ 49 - 12
vendor/github.com/miekg/dns/dns.go

@@ -34,10 +34,30 @@ type RR interface {
 
 
 	// copy returns a copy of the RR
 	// copy returns a copy of the RR
 	copy() RR
 	copy() RR
-	// len returns the length (in octets) of the uncompressed RR in wire format.
-	len() int
-	// pack packs an RR into wire format.
-	pack([]byte, int, map[string]int, bool) (int, error)
+
+	// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
+	//
+	// If compression is nil, the uncompressed size will be returned, otherwise the compressed
+	// size will be returned and domain names will be added to the map for future compression.
+	len(off int, compression map[string]struct{}) int
+
+	// pack packs the records RDATA into wire format. The header will
+	// already have been packed into msg.
+	pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
+
+	// unpack unpacks an RR from wire format.
+	//
+	// This will only be called on a new and empty RR type with only the header populated. It
+	// will only be called if the record's RDATA is non-empty.
+	unpack(msg []byte, off int) (off1 int, err error)
+
+	// parse parses an RR from zone file format.
+	//
+	// This will only be called on a new and empty RR type with only the header populated.
+	parse(c *zlexer, origin, file string) *ParseError
+
+	// isDuplicate returns whether the two RRs are duplicates.
+	isDuplicate(r2 RR) bool
 }
 }
 
 
 // RR_Header is the header all DNS resource records share.
 // RR_Header is the header all DNS resource records share.
@@ -70,28 +90,45 @@ func (h *RR_Header) String() string {
 	return s
 	return s
 }
 }
 
 
-func (h *RR_Header) len() int {
-	l := len(h.Name) + 1
+func (h *RR_Header) len(off int, compression map[string]struct{}) int {
+	l := domainNameLen(h.Name, off, compression, true)
 	l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
 	l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
 	return l
 	return l
 }
 }
 
 
+func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+	// RR_Header has no RDATA to pack.
+	return off, nil
+}
+
+func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
+	panic("dns: internal error: unpack should never be called on RR_Header")
+}
+
+func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError {
+	panic("dns: internal error: parse should never be called on RR_Header")
+}
+
 // ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
 // ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
 func (rr *RFC3597) ToRFC3597(r RR) error {
 func (rr *RFC3597) ToRFC3597(r RR) error {
-	buf := make([]byte, r.len()*2)
-	off, err := PackRR(r, buf, 0, nil, false)
+	buf := make([]byte, Len(r)*2)
+	headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	buf = buf[:off]
 	buf = buf[:off]
-	if int(r.Header().Rdlength) > off {
-		return ErrBuf
+
+	*rr = RFC3597{Hdr: *r.Header()}
+	rr.Hdr.Rdlength = uint16(off - headerEnd)
+
+	if noRdata(rr.Hdr) {
+		return nil
 	}
 	}
 
 
-	rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
+	_, err = rr.unpack(buf, headerEnd)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	*rr = *rfc3597.(*RFC3597)
+
 	return nil
 	return nil
 }
 }

+ 30 - 40
vendor/github.com/miekg/dns/dnssec.go

@@ -67,9 +67,6 @@ var AlgorithmToString = map[uint8]string{
 	PRIVATEOID:       "PRIVATEOID",
 	PRIVATEOID:       "PRIVATEOID",
 }
 }
 
 
-// StringToAlgorithm is the reverse of AlgorithmToString.
-var StringToAlgorithm = reverseInt8(AlgorithmToString)
-
 // AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
 // AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
 var AlgorithmToHash = map[uint8]crypto.Hash{
 var AlgorithmToHash = map[uint8]crypto.Hash{
 	RSAMD5:           crypto.MD5, // Deprecated in RFC 6725
 	RSAMD5:           crypto.MD5, // Deprecated in RFC 6725
@@ -102,9 +99,6 @@ var HashToString = map[uint8]string{
 	SHA512: "SHA512",
 	SHA512: "SHA512",
 }
 }
 
 
-// StringToHash is a map of names to hash IDs.
-var StringToHash = reverseInt8(HashToString)
-
 // DNSKEY flag values.
 // DNSKEY flag values.
 const (
 const (
 	SEP    = 1
 	SEP    = 1
@@ -173,7 +167,7 @@ func (k *DNSKEY) KeyTag() uint16 {
 				keytag += int(v) << 8
 				keytag += int(v) << 8
 			}
 			}
 		}
 		}
-		keytag += (keytag >> 16) & 0xFFFF
+		keytag += keytag >> 16 & 0xFFFF
 		keytag &= 0xFFFF
 		keytag &= 0xFFFF
 	}
 	}
 	return uint16(keytag)
 	return uint16(keytag)
@@ -268,16 +262,17 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
 		return ErrKey
 		return ErrKey
 	}
 	}
 
 
+	h0 := rrset[0].Header()
 	rr.Hdr.Rrtype = TypeRRSIG
 	rr.Hdr.Rrtype = TypeRRSIG
-	rr.Hdr.Name = rrset[0].Header().Name
-	rr.Hdr.Class = rrset[0].Header().Class
+	rr.Hdr.Name = h0.Name
+	rr.Hdr.Class = h0.Class
 	if rr.OrigTtl == 0 { // If set don't override
 	if rr.OrigTtl == 0 { // If set don't override
-		rr.OrigTtl = rrset[0].Header().Ttl
+		rr.OrigTtl = h0.Ttl
 	}
 	}
-	rr.TypeCovered = rrset[0].Header().Rrtype
-	rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
+	rr.TypeCovered = h0.Rrtype
+	rr.Labels = uint8(CountLabel(h0.Name))
 
 
-	if strings.HasPrefix(rrset[0].Header().Name, "*") {
+	if strings.HasPrefix(h0.Name, "*") {
 		rr.Labels-- // wildcard, remove from label count
 		rr.Labels-- // wildcard, remove from label count
 	}
 	}
 
 
@@ -401,7 +396,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
 	if rr.Algorithm != k.Algorithm {
 	if rr.Algorithm != k.Algorithm {
 		return ErrKey
 		return ErrKey
 	}
 	}
-	if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
+	if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
 		return ErrKey
 		return ErrKey
 	}
 	}
 	if k.Protocol != 3 {
 	if k.Protocol != 3 {
@@ -411,10 +406,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
 	// IsRRset checked that we have at least one RR and that the RRs in
 	// IsRRset checked that we have at least one RR and that the RRs in
 	// the set have consistent type, class, and name. Also check that type and
 	// the set have consistent type, class, and name. Also check that type and
 	// class matches the RRSIG record.
 	// class matches the RRSIG record.
-	if rrset[0].Header().Class != rr.Hdr.Class {
-		return ErrRRset
-	}
-	if rrset[0].Header().Rrtype != rr.TypeCovered {
+	if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
 		return ErrRRset
 		return ErrRRset
 	}
 	}
 
 
@@ -512,8 +504,8 @@ func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
 	}
 	}
 	modi := (int64(rr.Inception) - utc) / year68
 	modi := (int64(rr.Inception) - utc) / year68
 	mode := (int64(rr.Expiration) - utc) / year68
 	mode := (int64(rr.Expiration) - utc) / year68
-	ti := int64(rr.Inception) + (modi * year68)
-	te := int64(rr.Expiration) + (mode * year68)
+	ti := int64(rr.Inception) + modi*year68
+	te := int64(rr.Expiration) + mode*year68
 	return ti <= utc && utc <= te
 	return ti <= utc && utc <= te
 }
 }
 
 
@@ -563,20 +555,19 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
 
 
 	pubkey := new(rsa.PublicKey)
 	pubkey := new(rsa.PublicKey)
 
 
-	expo := uint64(0)
-	for i := 0; i < int(explen); i++ {
+	var expo uint64
+	// The exponent of length explen is between keyoff and modoff.
+	for _, v := range keybuf[keyoff:modoff] {
 		expo <<= 8
 		expo <<= 8
-		expo |= uint64(keybuf[keyoff+i])
+		expo |= uint64(v)
 	}
 	}
 	if expo > 1<<31-1 {
 	if expo > 1<<31-1 {
 		// Larger exponent than supported by the crypto package.
 		// Larger exponent than supported by the crypto package.
 		return nil
 		return nil
 	}
 	}
-	pubkey.E = int(expo)
-
-	pubkey.N = big.NewInt(0)
-	pubkey.N.SetBytes(keybuf[modoff:])
 
 
+	pubkey.E = int(expo)
+	pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
 	return pubkey
 	return pubkey
 }
 }
 
 
@@ -601,10 +592,8 @@ func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
 			return nil
 			return nil
 		}
 		}
 	}
 	}
-	pubkey.X = big.NewInt(0)
-	pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
-	pubkey.Y = big.NewInt(0)
-	pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
+	pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
+	pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
 	return pubkey
 	return pubkey
 }
 }
 
 
@@ -625,10 +614,10 @@ func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
 	p, keybuf := keybuf[:size], keybuf[size:]
 	p, keybuf := keybuf[:size], keybuf[size:]
 	g, y := keybuf[:size], keybuf[size:]
 	g, y := keybuf[:size], keybuf[size:]
 	pubkey := new(dsa.PublicKey)
 	pubkey := new(dsa.PublicKey)
-	pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
-	pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
-	pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
-	pubkey.Y = big.NewInt(0).SetBytes(y)
+	pubkey.Parameters.Q = new(big.Int).SetBytes(q)
+	pubkey.Parameters.P = new(big.Int).SetBytes(p)
+	pubkey.Parameters.G = new(big.Int).SetBytes(g)
+	pubkey.Y = new(big.Int).SetBytes(y)
 	return pubkey
 	return pubkey
 }
 }
 
 
@@ -658,15 +647,16 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
 	wires := make(wireSlice, len(rrset))
 	wires := make(wireSlice, len(rrset))
 	for i, r := range rrset {
 	for i, r := range rrset {
 		r1 := r.copy()
 		r1 := r.copy()
-		r1.Header().Ttl = s.OrigTtl
-		labels := SplitDomainName(r1.Header().Name)
+		h := r1.Header()
+		h.Ttl = s.OrigTtl
+		labels := SplitDomainName(h.Name)
 		// 6.2. Canonical RR Form. (4) - wildcards
 		// 6.2. Canonical RR Form. (4) - wildcards
 		if len(labels) > int(s.Labels) {
 		if len(labels) > int(s.Labels) {
 			// Wildcard
 			// Wildcard
-			r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
+			h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
 		}
 		}
 		// RFC 4034: 6.2.  Canonical RR Form. (2) - domain name to lowercase
 		// RFC 4034: 6.2.  Canonical RR Form. (2) - domain name to lowercase
-		r1.Header().Name = strings.ToLower(r1.Header().Name)
+		h.Name = strings.ToLower(h.Name)
 		// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
 		// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
 		//   NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
 		//   NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
 		//   HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
 		//   HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
@@ -724,7 +714,7 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
 			x.Target = strings.ToLower(x.Target)
 			x.Target = strings.ToLower(x.Target)
 		}
 		}
 		// 6.2. Canonical RR Form. (5) - origTTL
 		// 6.2. Canonical RR Form. (5) - origTTL
-		wire := make([]byte, r1.len()+1) // +1 to be safe(r)
+		wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
 		off, err1 := PackRR(r1, wire, 0, nil, false)
 		off, err1 := PackRR(r1, wire, 0, nil, false)
 		if err1 != nil {
 		if err1 != nil {
 			return nil, err1
 			return nil, err1

+ 132 - 77
vendor/github.com/miekg/dns/dnssec_keyscan.go

@@ -1,7 +1,7 @@
 package dns
 package dns
 
 
 import (
 import (
-	"bytes"
+	"bufio"
 	"crypto"
 	"crypto"
 	"crypto/dsa"
 	"crypto/dsa"
 	"crypto/ecdsa"
 	"crypto/ecdsa"
@@ -109,21 +109,16 @@ func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
 			}
 			}
 			switch k {
 			switch k {
 			case "modulus":
 			case "modulus":
-				p.PublicKey.N = big.NewInt(0)
-				p.PublicKey.N.SetBytes(v1)
+				p.PublicKey.N = new(big.Int).SetBytes(v1)
 			case "publicexponent":
 			case "publicexponent":
-				i := big.NewInt(0)
-				i.SetBytes(v1)
+				i := new(big.Int).SetBytes(v1)
 				p.PublicKey.E = int(i.Int64()) // int64 should be large enough
 				p.PublicKey.E = int(i.Int64()) // int64 should be large enough
 			case "privateexponent":
 			case "privateexponent":
-				p.D = big.NewInt(0)
-				p.D.SetBytes(v1)
+				p.D = new(big.Int).SetBytes(v1)
 			case "prime1":
 			case "prime1":
-				p.Primes[0] = big.NewInt(0)
-				p.Primes[0].SetBytes(v1)
+				p.Primes[0] = new(big.Int).SetBytes(v1)
 			case "prime2":
 			case "prime2":
-				p.Primes[1] = big.NewInt(0)
-				p.Primes[1].SetBytes(v1)
+				p.Primes[1] = new(big.Int).SetBytes(v1)
 			}
 			}
 		case "exponent1", "exponent2", "coefficient":
 		case "exponent1", "exponent2", "coefficient":
 			// not used in Go (yet)
 			// not used in Go (yet)
@@ -136,7 +131,7 @@ func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
 
 
 func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
 func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
 	p := new(dsa.PrivateKey)
 	p := new(dsa.PrivateKey)
-	p.X = big.NewInt(0)
+	p.X = new(big.Int)
 	for k, v := range m {
 	for k, v := range m {
 		switch k {
 		switch k {
 		case "private_value(x)":
 		case "private_value(x)":
@@ -154,7 +149,7 @@ func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
 
 
 func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
 func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
 	p := new(ecdsa.PrivateKey)
 	p := new(ecdsa.PrivateKey)
-	p.D = big.NewInt(0)
+	p.D = new(big.Int)
 	// TODO: validate that the required flags are present
 	// TODO: validate that the required flags are present
 	for k, v := range m {
 	for k, v := range m {
 		switch k {
 		switch k {
@@ -181,22 +176,10 @@ func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
-			if len(p1) != 32 {
+			if len(p1) != ed25519.SeedSize {
 				return nil, ErrPrivKey
 				return nil, ErrPrivKey
 			}
 			}
-			// RFC 8080 and Golang's x/crypto/ed25519 differ as to how the
-			// private keys are represented. RFC 8080 specifies that private
-			// keys be stored solely as the seed value (p1 above) while the
-			// ed25519 package represents them as the seed value concatenated
-			// to the public key, which is derived from the seed value.
-			//
-			// ed25519.GenerateKey reads exactly 32 bytes from the passed in
-			// io.Reader and uses them as the seed. It also derives the
-			// public key and produces a compatible private key.
-			_, p, err = ed25519.GenerateKey(bytes.NewReader(p1))
-			if err != nil {
-				return nil, err
-			}
+			p = ed25519.NewKeyFromSeed(p1)
 		case "created", "publish", "activate":
 		case "created", "publish", "activate":
 			/* not used in Go (yet) */
 			/* not used in Go (yet) */
 		}
 		}
@@ -207,23 +190,12 @@ func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
 // parseKey reads a private key from r. It returns a map[string]string,
 // parseKey reads a private key from r. It returns a map[string]string,
 // with the key-value pairs, or an error when the file is not correct.
 // with the key-value pairs, or an error when the file is not correct.
 func parseKey(r io.Reader, file string) (map[string]string, error) {
 func parseKey(r io.Reader, file string) (map[string]string, error) {
-	s, cancel := scanInit(r)
 	m := make(map[string]string)
 	m := make(map[string]string)
-	c := make(chan lex)
-	k := ""
-	defer func() {
-		cancel()
-		// zlexer can send up to two tokens, the next one and possibly 1 remainders.
-		// Do a non-blocking read.
-		_, ok := <-c
-		_, ok = <-c
-		if !ok {
-			// too bad
-		}
-	}()
-	// Start the lexer
-	go klexer(s, c)
-	for l := range c {
+	var k string
+
+	c := newKLexer(r)
+
+	for l, ok := c.Next(); ok; l, ok = c.Next() {
 		// It should alternate
 		// It should alternate
 		switch l.value {
 		switch l.value {
 		case zKey:
 		case zKey:
@@ -232,41 +204,111 @@ func parseKey(r io.Reader, file string) (map[string]string, error) {
 			if k == "" {
 			if k == "" {
 				return nil, &ParseError{file, "no private key seen", l}
 				return nil, &ParseError{file, "no private key seen", l}
 			}
 			}
-			//println("Setting", strings.ToLower(k), "to", l.token, "b")
+
 			m[strings.ToLower(k)] = l.token
 			m[strings.ToLower(k)] = l.token
 			k = ""
 			k = ""
 		}
 		}
 	}
 	}
+
+	// Surface any read errors from r.
+	if err := c.Err(); err != nil {
+		return nil, &ParseError{file: file, err: err.Error()}
+	}
+
 	return m, nil
 	return m, nil
 }
 }
 
 
-// klexer scans the sourcefile and returns tokens on the channel c.
-func klexer(s *scan, c chan lex) {
-	var l lex
-	str := "" // Hold the current read text
-	commt := false
-	key := true
-	x, err := s.tokenText()
-	defer close(c)
-	for err == nil {
-		l.column = s.position.Column
-		l.line = s.position.Line
+type klexer struct {
+	br io.ByteReader
+
+	readErr error
+
+	line   int
+	column int
+
+	key bool
+
+	eol bool // end-of-line
+}
+
+func newKLexer(r io.Reader) *klexer {
+	br, ok := r.(io.ByteReader)
+	if !ok {
+		br = bufio.NewReaderSize(r, 1024)
+	}
+
+	return &klexer{
+		br: br,
+
+		line: 1,
+
+		key: true,
+	}
+}
+
+func (kl *klexer) Err() error {
+	if kl.readErr == io.EOF {
+		return nil
+	}
+
+	return kl.readErr
+}
+
+// readByte returns the next byte from the input
+func (kl *klexer) readByte() (byte, bool) {
+	if kl.readErr != nil {
+		return 0, false
+	}
+
+	c, err := kl.br.ReadByte()
+	if err != nil {
+		kl.readErr = err
+		return 0, false
+	}
+
+	// delay the newline handling until the next token is delivered,
+	// fixes off-by-one errors when reporting a parse error.
+	if kl.eol {
+		kl.line++
+		kl.column = 0
+		kl.eol = false
+	}
+
+	if c == '\n' {
+		kl.eol = true
+	} else {
+		kl.column++
+	}
+
+	return c, true
+}
+
+func (kl *klexer) Next() (lex, bool) {
+	var (
+		l lex
+
+		str strings.Builder
+
+		commt bool
+	)
+
+	for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
+		l.line, l.column = kl.line, kl.column
+
 		switch x {
 		switch x {
 		case ':':
 		case ':':
-			if commt {
+			if commt || !kl.key {
 				break
 				break
 			}
 			}
-			l.token = str
-			if key {
-				l.value = zKey
-				c <- l
-				// Next token is a space, eat it
-				s.tokenText()
-				key = false
-				str = ""
-			} else {
-				l.value = zValue
-			}
+
+			kl.key = false
+
+			// Next token is a space, eat it
+			kl.readByte()
+
+			l.value = zKey
+			l.token = str.String()
+			return l, true
 		case ';':
 		case ';':
 			commt = true
 			commt = true
 		case '\n':
 		case '\n':
@@ -274,24 +316,37 @@ func klexer(s *scan, c chan lex) {
 				// Reset a comment
 				// Reset a comment
 				commt = false
 				commt = false
 			}
 			}
+
+			if kl.key && str.Len() == 0 {
+				// ignore empty lines
+				break
+			}
+
+			kl.key = true
+
 			l.value = zValue
 			l.value = zValue
-			l.token = str
-			c <- l
-			str = ""
-			commt = false
-			key = true
+			l.token = str.String()
+			return l, true
 		default:
 		default:
 			if commt {
 			if commt {
 				break
 				break
 			}
 			}
-			str += string(x)
+
+			str.WriteByte(x)
 		}
 		}
-		x, err = s.tokenText()
 	}
 	}
-	if len(str) > 0 {
+
+	if kl.readErr != nil && kl.readErr != io.EOF {
+		// Don't return any tokens after a read error occurs.
+		return lex{value: zEOF}, false
+	}
+
+	if str.Len() > 0 {
 		// Send remainder
 		// Send remainder
-		l.token = str
 		l.value = zValue
 		l.value = zValue
-		c <- l
+		l.token = str.String()
+		return l, true
 	}
 	}
+
+	return lex{value: zEOF}, false
 }
 }

+ 8 - 7
vendor/github.com/miekg/dns/dnssec_privkey.go

@@ -13,6 +13,8 @@ import (
 
 
 const format = "Private-key-format: v1.3\n"
 const format = "Private-key-format: v1.3\n"
 
 
+var bigIntOne = big.NewInt(1)
+
 // PrivateKeyString converts a PrivateKey to a string. This string has the same
 // PrivateKeyString converts a PrivateKey to a string. This string has the same
 // format as the private-key-file of BIND9 (Private-key-format: v1.3).
 // format as the private-key-file of BIND9 (Private-key-format: v1.3).
 // It needs some info from the key (the algorithm), so its a method of the DNSKEY
 // It needs some info from the key (the algorithm), so its a method of the DNSKEY
@@ -31,12 +33,11 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
 		prime2 := toBase64(p.Primes[1].Bytes())
 		prime2 := toBase64(p.Primes[1].Bytes())
 		// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
 		// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
 		// and from: http://code.google.com/p/go/issues/detail?id=987
 		// and from: http://code.google.com/p/go/issues/detail?id=987
-		one := big.NewInt(1)
-		p1 := big.NewInt(0).Sub(p.Primes[0], one)
-		q1 := big.NewInt(0).Sub(p.Primes[1], one)
-		exp1 := big.NewInt(0).Mod(p.D, p1)
-		exp2 := big.NewInt(0).Mod(p.D, q1)
-		coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
+		p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
+		q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
+		exp1 := new(big.Int).Mod(p.D, p1)
+		exp2 := new(big.Int).Mod(p.D, q1)
+		coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
 
 
 		exponent1 := toBase64(exp1.Bytes())
 		exponent1 := toBase64(exp1.Bytes())
 		exponent2 := toBase64(exp2.Bytes())
 		exponent2 := toBase64(exp2.Bytes())
@@ -82,7 +83,7 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
 			"Public_value(y): " + pub + "\n"
 			"Public_value(y): " + pub + "\n"
 
 
 	case ed25519.PrivateKey:
 	case ed25519.PrivateKey:
-		private := toBase64(p[:32])
+		private := toBase64(p.Seed())
 		return format +
 		return format +
 			"Algorithm: " + algorithm + "\n" +
 			"Algorithm: " + algorithm + "\n" +
 			"PrivateKey: " + private + "\n"
 			"PrivateKey: " + private + "\n"

+ 52 - 55
vendor/github.com/miekg/dns/doc.go

@@ -1,20 +1,20 @@
 /*
 /*
 Package dns implements a full featured interface to the Domain Name System.
 Package dns implements a full featured interface to the Domain Name System.
-Server- and client-side programming is supported.
-The package allows complete control over what is sent out to the DNS. The package
-API follows the less-is-more principle, by presenting a small, clean interface.
+Both server- and client-side programming is supported. The package allows
+complete control over what is sent out to the DNS. The API follows the
+less-is-more principle, by presenting a small, clean interface.
 
 
-The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
+It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
 TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
 TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
-Note that domain names MUST be fully qualified, before sending them, unqualified
+
+Note that domain names MUST be fully qualified before sending them, unqualified
 names in a message will result in a packing failure.
 names in a message will result in a packing failure.
 
 
-Resource records are native types. They are not stored in wire format.
-Basic usage pattern for creating a new resource record:
+Resource records are native types. They are not stored in wire format. Basic
+usage pattern for creating a new resource record:
 
 
      r := new(dns.MX)
      r := new(dns.MX)
-     r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
-     Class: dns.ClassINET, Ttl: 3600}
+     r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
      r.Preference = 10
      r.Preference = 10
      r.Mx = "mx.miek.nl."
      r.Mx = "mx.miek.nl."
 
 
@@ -30,8 +30,8 @@ Or even:
 
 
      mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
      mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
 
 
-In the DNS messages are exchanged, these messages contain resource
-records (sets). Use pattern for creating a message:
+In the DNS messages are exchanged, these messages contain resource records
+(sets). Use pattern for creating a message:
 
 
      m := new(dns.Msg)
      m := new(dns.Msg)
      m.SetQuestion("miek.nl.", dns.TypeMX)
      m.SetQuestion("miek.nl.", dns.TypeMX)
@@ -40,8 +40,8 @@ Or when not certain if the domain name is fully qualified:
 
 
 	m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
 	m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
 
 
-The message m is now a message with the question section set to ask
-the MX records for the miek.nl. zone.
+The message m is now a message with the question section set to ask the MX
+records for the miek.nl. zone.
 
 
 The following is slightly more verbose, but more flexible:
 The following is slightly more verbose, but more flexible:
 
 
@@ -51,9 +51,8 @@ The following is slightly more verbose, but more flexible:
      m1.Question = make([]dns.Question, 1)
      m1.Question = make([]dns.Question, 1)
      m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
      m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
 
 
-After creating a message it can be sent.
-Basic use pattern for synchronous querying the DNS at a
-server configured on 127.0.0.1 and port 53:
+After creating a message it can be sent. Basic use pattern for synchronous
+querying the DNS at a server configured on 127.0.0.1 and port 53:
 
 
      c := new(dns.Client)
      c := new(dns.Client)
      in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
      in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
@@ -99,25 +98,24 @@ the Answer section:
 
 
 Domain Name and TXT Character String Representations
 Domain Name and TXT Character String Representations
 
 
-Both domain names and TXT character strings are converted to presentation
-form both when unpacked and when converted to strings.
+Both domain names and TXT character strings are converted to presentation form
+both when unpacked and when converted to strings.
 
 
 For TXT character strings, tabs, carriage returns and line feeds will be
 For TXT character strings, tabs, carriage returns and line feeds will be
-converted to \t, \r and \n respectively. Back slashes and quotations marks
-will be escaped. Bytes below 32 and above 127 will be converted to \DDD
-form.
+converted to \t, \r and \n respectively. Back slashes and quotations marks will
+be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
 
 
-For domain names, in addition to the above rules brackets, periods,
-spaces, semicolons and the at symbol are escaped.
+For domain names, in addition to the above rules brackets, periods, spaces,
+semicolons and the at symbol are escaped.
 
 
 DNSSEC
 DNSSEC
 
 
-DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
-uses public key cryptography to sign resource records. The
-public keys are stored in DNSKEY records and the signatures in RRSIG records.
+DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
+public key cryptography to sign resource records. The public keys are stored in
+DNSKEY records and the signatures in RRSIG records.
 
 
-Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
-to a request.
+Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
+bit to a request.
 
 
      m := new(dns.Msg)
      m := new(dns.Msg)
      m.SetEdns0(4096, true)
      m.SetEdns0(4096, true)
@@ -126,9 +124,9 @@ Signature generation, signature verification and key generation are all supporte
 
 
 DYNAMIC UPDATES
 DYNAMIC UPDATES
 
 
-Dynamic updates reuses the DNS message format, but renames three of
-the sections. Question is Zone, Answer is Prerequisite, Authority is
-Update, only the Additional is not renamed. See RFC 2136 for the gory details.
+Dynamic updates reuses the DNS message format, but renames three of the
+sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
+the Additional is not renamed. See RFC 2136 for the gory details.
 
 
 You can set a rather complex set of rules for the existence of absence of
 You can set a rather complex set of rules for the existence of absence of
 certain resource records or names in a zone to specify if resource records
 certain resource records or names in a zone to specify if resource records
@@ -145,10 +143,9 @@ DNS function shows which functions exist to specify the prerequisites.
   NONE     rrset    empty    RRset does not exist       dns.RRsetNotUsed
   NONE     rrset    empty    RRset does not exist       dns.RRsetNotUsed
   zone     rrset    rr       RRset exists (value dep)   dns.Used
   zone     rrset    rr       RRset exists (value dep)   dns.Used
 
 
-The prerequisite section can also be left empty.
-If you have decided on the prerequisites you can tell what RRs should
-be added or deleted. The next table shows the options you have and
-what functions to call.
+The prerequisite section can also be left empty. If you have decided on the
+prerequisites you can tell what RRs should be added or deleted. The next table
+shows the options you have and what functions to call.
 
 
  3.4.2.6 - Table Of Metavalues Used In Update Section
  3.4.2.6 - Table Of Metavalues Used In Update Section
 
 
@@ -181,10 +178,10 @@ changes to the RRset after calling SetTsig() the signature will be incorrect.
 	...
 	...
 	// When sending the TSIG RR is calculated and filled in before sending
 	// When sending the TSIG RR is calculated and filled in before sending
 
 
-When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
-TSIG, this is the basic use pattern. In this example we request an AXFR for
-miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
-and using the server 176.58.119.54:
+When requesting an zone transfer (almost all TSIG usage is when requesting zone
+transfers), with TSIG, this is the basic use pattern. In this example we
+request an AXFR for miek.nl. with TSIG key named "axfr." and secret
+"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
 
 
 	t := new(dns.Transfer)
 	t := new(dns.Transfer)
 	m := new(dns.Msg)
 	m := new(dns.Msg)
@@ -194,8 +191,8 @@ and using the server 176.58.119.54:
 	c, err := t.In(m, "176.58.119.54:53")
 	c, err := t.In(m, "176.58.119.54:53")
 	for r := range c { ... }
 	for r := range c { ... }
 
 
-You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
-If something is not correct an error is returned.
+You can now read the records from the transfer as they come in. Each envelope
+is checked with TSIG. If something is not correct an error is returned.
 
 
 Basic use pattern validating and replying to a message that has TSIG set.
 Basic use pattern validating and replying to a message that has TSIG set.
 
 
@@ -220,29 +217,30 @@ Basic use pattern validating and replying to a message that has TSIG set.
 
 
 PRIVATE RRS
 PRIVATE RRS
 
 
-RFC 6895 sets aside a range of type codes for private use. This range
-is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
+RFC 6895 sets aside a range of type codes for private use. This range is 65,280
+- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
 can be used, before requesting an official type code from IANA.
 can be used, before requesting an official type code from IANA.
 
 
-see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
+See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
 information.
 information.
 
 
 EDNS0
 EDNS0
 
 
-EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
-by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
+EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
+RFC 6891. It defines an new RR type, the OPT RR, which is then completely
 abused.
 abused.
+
 Basic use pattern for creating an (empty) OPT RR:
 Basic use pattern for creating an (empty) OPT RR:
 
 
 	o := new(dns.OPT)
 	o := new(dns.OPT)
 	o.Hdr.Name = "." // MUST be the root zone, per definition.
 	o.Hdr.Name = "." // MUST be the root zone, per definition.
 	o.Hdr.Rrtype = dns.TypeOPT
 	o.Hdr.Rrtype = dns.TypeOPT
 
 
-The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
-interfaces. Currently only a few have been standardized: EDNS0_NSID
-(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
-that these options may be combined in an OPT RR.
-Basic use pattern for a server to check if (and which) options are set:
+The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
+Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
+EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options
+may be combined in an OPT RR. Basic use pattern for a server to check if (and
+which) options are set:
 
 
 	// o is a dns.OPT
 	// o is a dns.OPT
 	for _, s := range o.Option {
 	for _, s := range o.Option {
@@ -262,10 +260,9 @@ From RFC 2931:
     ... protection for glue records, DNS requests, protection for message headers
     ... protection for glue records, DNS requests, protection for message headers
     on requests and responses, and protection of the overall integrity of a response.
     on requests and responses, and protection of the overall integrity of a response.
 
 
-It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
-secret approach in TSIG.
-Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
-RSASHA512.
+It works like TSIG, except that SIG(0) uses public key cryptography, instead of
+the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
+ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
 
 
 Signing subsequent messages in multi-message sessions is not implemented.
 Signing subsequent messages in multi-message sessions is not implemented.
 */
 */

+ 20 - 7
vendor/github.com/miekg/dns/duplicate.go

@@ -7,19 +7,32 @@ package dns
 // is so, otherwise false.
 // is so, otherwise false.
 // It's is a protocol violation to have identical RRs in a message.
 // It's is a protocol violation to have identical RRs in a message.
 func IsDuplicate(r1, r2 RR) bool {
 func IsDuplicate(r1, r2 RR) bool {
-	if r1.Header().Class != r2.Header().Class {
+	// Check whether the record header is identical.
+	if !r1.Header().isDuplicate(r2.Header()) {
 		return false
 		return false
 	}
 	}
-	if r1.Header().Rrtype != r2.Header().Rrtype {
+
+	// Check whether the RDATA is identical.
+	return r1.isDuplicate(r2)
+}
+
+func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
+	r2, ok := _r2.(*RR_Header)
+	if !ok {
+		return false
+	}
+	if r1.Class != r2.Class {
+		return false
+	}
+	if r1.Rrtype != r2.Rrtype {
 		return false
 		return false
 	}
 	}
-	if !isDulicateName(r1.Header().Name, r2.Header().Name) {
+	if !isDuplicateName(r1.Name, r2.Name) {
 		return false
 		return false
 	}
 	}
 	// ignore TTL
 	// ignore TTL
-
-	return isDuplicateRdata(r1, r2)
+	return true
 }
 }
 
 
-// isDulicateName checks if the domain names s1 and s2 are equal.
-func isDulicateName(s1, s2 string) bool { return equal(s1, s2) }
+// isDuplicateName checks if the domain names s1 and s2 are equal.
+func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }

+ 10 - 24
vendor/github.com/miekg/dns/duplicate_generate.go

@@ -57,10 +57,7 @@ func main() {
 			continue
 			continue
 		}
 		}
 
 
-		if name == "PrivateRR" || name == "RFC3597" {
-			continue
-		}
-		if name == "OPT" || name == "ANY" || name == "IXFR" || name == "AXFR" {
+		if name == "PrivateRR" || name == "OPT" {
 			continue
 			continue
 		}
 		}
 
 
@@ -70,22 +67,6 @@ func main() {
 	b := &bytes.Buffer{}
 	b := &bytes.Buffer{}
 	b.WriteString(packageHdr)
 	b.WriteString(packageHdr)
 
 
-	// Generate the giant switch that calls the correct function for each type.
-	fmt.Fprint(b, "// isDuplicateRdata calls the rdata specific functions\n")
-	fmt.Fprint(b, "func isDuplicateRdata(r1, r2 RR) bool {\n")
-	fmt.Fprint(b, "switch r1.Header().Rrtype {\n")
-
-	for _, name := range namedTypes {
-
-		o := scope.Lookup(name)
-		_, isEmbedded := getTypeStruct(o.Type(), scope)
-		if isEmbedded {
-			continue
-		}
-		fmt.Fprintf(b, "case Type%s:\nreturn isDuplicate%s(r1.(*%s), r2.(*%s))\n", name, name, name, name)
-	}
-	fmt.Fprintf(b, "}\nreturn false\n}\n")
-
 	// Generate the duplicate check for each type.
 	// Generate the duplicate check for each type.
 	fmt.Fprint(b, "// isDuplicate() functions\n\n")
 	fmt.Fprint(b, "// isDuplicate() functions\n\n")
 	for _, name := range namedTypes {
 	for _, name := range namedTypes {
@@ -95,7 +76,10 @@ func main() {
 		if isEmbedded {
 		if isEmbedded {
 			continue
 			continue
 		}
 		}
-		fmt.Fprintf(b, "func isDuplicate%s(r1, r2 *%s) bool {\n", name, name)
+		fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name)
+		fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name)
+		fmt.Fprint(b, "if !ok { return false }\n")
+		fmt.Fprint(b, "_ = r2\n")
 		for i := 1; i < st.NumFields(); i++ {
 		for i := 1; i < st.NumFields(); i++ {
 			field := st.Field(i).Name()
 			field := st.Field(i).Name()
 			o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
 			o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
@@ -103,12 +87,12 @@ func main() {
 
 
 			// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
 			// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
 			// *indirectly* defined as a slice in the net package).
 			// *indirectly* defined as a slice in the net package).
-			if _, ok := st.Field(i).Type().(*types.Slice); ok || st.Tag(i) == `dns:"a"` || st.Tag(i) == `dns:"aaaa"` {
+			if _, ok := st.Field(i).Type().(*types.Slice); ok {
 				o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
 				o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
 
 
 				if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
 				if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
 					o3(`for i := 0; i < len(r1.%s); i++ {
 					o3(`for i := 0; i < len(r1.%s); i++ {
-						if !isDulicateName(r1.%s[i], r2.%s[i]) {
+						if !isDuplicateName(r1.%s[i], r2.%s[i]) {
 							return false
 							return false
 						}
 						}
 					}`)
 					}`)
@@ -128,8 +112,10 @@ func main() {
 			switch st.Tag(i) {
 			switch st.Tag(i) {
 			case `dns:"-"`:
 			case `dns:"-"`:
 				// ignored
 				// ignored
+			case `dns:"a"`, `dns:"aaaa"`:
+				o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}")
 			case `dns:"cdomain-name"`, `dns:"domain-name"`:
 			case `dns:"cdomain-name"`, `dns:"domain-name"`:
-				o2("if !isDulicateName(r1.%s, r2.%s) {\nreturn false\n}")
+				o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}")
 			default:
 			default:
 				o2("if r1.%s != r2.%s {\nreturn false\n}")
 				o2("if r1.%s != r2.%s {\nreturn false\n}")
 			}
 			}

+ 68 - 39
vendor/github.com/miekg/dns/edns.go

@@ -78,39 +78,44 @@ func (rr *OPT) String() string {
 	return s
 	return s
 }
 }
 
 
-func (rr *OPT) len() int {
-	l := rr.Hdr.len()
-	for i := 0; i < len(rr.Option); i++ {
+func (rr *OPT) len(off int, compression map[string]struct{}) int {
+	l := rr.Hdr.len(off, compression)
+	for _, o := range rr.Option {
 		l += 4 // Account for 2-byte option code and 2-byte option length.
 		l += 4 // Account for 2-byte option code and 2-byte option length.
-		lo, _ := rr.Option[i].pack()
+		lo, _ := o.pack()
 		l += len(lo)
 		l += len(lo)
 	}
 	}
 	return l
 	return l
 }
 }
 
 
+func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError {
+	panic("dns: internal error: parse should never be called on OPT")
+}
+
+func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
+
 // return the old value -> delete SetVersion?
 // return the old value -> delete SetVersion?
 
 
 // Version returns the EDNS version used. Only zero is defined.
 // Version returns the EDNS version used. Only zero is defined.
 func (rr *OPT) Version() uint8 {
 func (rr *OPT) Version() uint8 {
-	return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
+	return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
 }
 }
 
 
 // SetVersion sets the version of EDNS. This is usually zero.
 // SetVersion sets the version of EDNS. This is usually zero.
 func (rr *OPT) SetVersion(v uint8) {
 func (rr *OPT) SetVersion(v uint8) {
-	rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
+	rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
 }
 }
 
 
 // ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
 // ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
 func (rr *OPT) ExtendedRcode() int {
 func (rr *OPT) ExtendedRcode() int {
-	return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
+	return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
 }
 }
 
 
 // SetExtendedRcode sets the EDNS extended RCODE field.
 // SetExtendedRcode sets the EDNS extended RCODE field.
-func (rr *OPT) SetExtendedRcode(v uint8) {
-	if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
-		return
-	}
-	rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
+//
+// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
+func (rr *OPT) SetExtendedRcode(v uint16) {
+	rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
 }
 }
 
 
 // UDPSize returns the UDP buffer size.
 // UDPSize returns the UDP buffer size.
@@ -154,6 +159,8 @@ type EDNS0 interface {
 	unpack([]byte) error
 	unpack([]byte) error
 	// String returns the string representation of the option.
 	// String returns the string representation of the option.
 	String() string
 	String() string
+	// copy returns a deep-copy of the option.
+	copy() EDNS0
 }
 }
 
 
 // EDNS0_NSID option is used to retrieve a nameserver
 // EDNS0_NSID option is used to retrieve a nameserver
@@ -184,7 +191,8 @@ func (e *EDNS0_NSID) pack() ([]byte, error) {
 // Option implements the EDNS0 interface.
 // Option implements the EDNS0 interface.
 func (e *EDNS0_NSID) Option() uint16        { return EDNS0NSID } // Option returns the option code.
 func (e *EDNS0_NSID) Option() uint16        { return EDNS0NSID } // Option returns the option code.
 func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
 func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
-func (e *EDNS0_NSID) String() string        { return string(e.Nsid) }
+func (e *EDNS0_NSID) String() string        { return e.Nsid }
+func (e *EDNS0_NSID) copy() EDNS0           { return &EDNS0_NSID{e.Code, e.Nsid} }
 
 
 // EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
 // EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
 // an idea of where the client lives. See RFC 7871. It can then give back a different
 // an idea of where the client lives. See RFC 7871. It can then give back a different
@@ -274,22 +282,16 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
 		if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
 		if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
 			return errors.New("dns: bad netmask")
 			return errors.New("dns: bad netmask")
 		}
 		}
-		addr := make([]byte, net.IPv4len)
-		for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
-			addr[i] = b[4+i]
-		}
-		e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
+		addr := make(net.IP, net.IPv4len)
+		copy(addr, b[4:])
+		e.Address = addr.To16()
 	case 2:
 	case 2:
 		if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
 		if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
 			return errors.New("dns: bad netmask")
 			return errors.New("dns: bad netmask")
 		}
 		}
-		addr := make([]byte, net.IPv6len)
-		for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
-			addr[i] = b[4+i]
-		}
-		e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
-			addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
-			addr[11], addr[12], addr[13], addr[14], addr[15]}
+		addr := make(net.IP, net.IPv6len)
+		copy(addr, b[4:])
+		e.Address = addr
 	default:
 	default:
 		return errors.New("dns: bad address family")
 		return errors.New("dns: bad address family")
 	}
 	}
@@ -308,6 +310,16 @@ func (e *EDNS0_SUBNET) String() (s string) {
 	return
 	return
 }
 }
 
 
+func (e *EDNS0_SUBNET) copy() EDNS0 {
+	return &EDNS0_SUBNET{
+		e.Code,
+		e.Family,
+		e.SourceNetmask,
+		e.SourceScope,
+		e.Address,
+	}
+}
+
 // The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
 // The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
 //
 //
 //	o := new(dns.OPT)
 //	o := new(dns.OPT)
@@ -343,6 +355,7 @@ func (e *EDNS0_COOKIE) pack() ([]byte, error) {
 func (e *EDNS0_COOKIE) Option() uint16        { return EDNS0COOKIE }
 func (e *EDNS0_COOKIE) Option() uint16        { return EDNS0COOKIE }
 func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
 func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
 func (e *EDNS0_COOKIE) String() string        { return e.Cookie }
 func (e *EDNS0_COOKIE) String() string        { return e.Cookie }
+func (e *EDNS0_COOKIE) copy() EDNS0           { return &EDNS0_COOKIE{e.Code, e.Cookie} }
 
 
 // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
 // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
 // an expiration on an update RR. This is helpful for clients that cannot clean
 // an expiration on an update RR. This is helpful for clients that cannot clean
@@ -364,6 +377,7 @@ type EDNS0_UL struct {
 // Option implements the EDNS0 interface.
 // Option implements the EDNS0 interface.
 func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
 func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
 func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
 func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
+func (e *EDNS0_UL) copy() EDNS0    { return &EDNS0_UL{e.Code, e.Lease} }
 
 
 // Copied: http://golang.org/src/pkg/net/dnsmsg.go
 // Copied: http://golang.org/src/pkg/net/dnsmsg.go
 func (e *EDNS0_UL) pack() ([]byte, error) {
 func (e *EDNS0_UL) pack() ([]byte, error) {
@@ -418,10 +432,13 @@ func (e *EDNS0_LLQ) unpack(b []byte) error {
 
 
 func (e *EDNS0_LLQ) String() string {
 func (e *EDNS0_LLQ) String() string {
 	s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
 	s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
-		" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
+		" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
 		" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
 		" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
 	return s
 	return s
 }
 }
+func (e *EDNS0_LLQ) copy() EDNS0 {
+	return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
+}
 
 
 // EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
 // EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
 type EDNS0_DAU struct {
 type EDNS0_DAU struct {
@@ -436,15 +453,16 @@ func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
 
 
 func (e *EDNS0_DAU) String() string {
 func (e *EDNS0_DAU) String() string {
 	s := ""
 	s := ""
-	for i := 0; i < len(e.AlgCode); i++ {
-		if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
+	for _, alg := range e.AlgCode {
+		if a, ok := AlgorithmToString[alg]; ok {
 			s += " " + a
 			s += " " + a
 		} else {
 		} else {
-			s += " " + strconv.Itoa(int(e.AlgCode[i]))
+			s += " " + strconv.Itoa(int(alg))
 		}
 		}
 	}
 	}
 	return s
 	return s
 }
 }
+func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
 
 
 // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
 // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
 type EDNS0_DHU struct {
 type EDNS0_DHU struct {
@@ -459,15 +477,16 @@ func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
 
 
 func (e *EDNS0_DHU) String() string {
 func (e *EDNS0_DHU) String() string {
 	s := ""
 	s := ""
-	for i := 0; i < len(e.AlgCode); i++ {
-		if a, ok := HashToString[e.AlgCode[i]]; ok {
+	for _, alg := range e.AlgCode {
+		if a, ok := HashToString[alg]; ok {
 			s += " " + a
 			s += " " + a
 		} else {
 		} else {
-			s += " " + strconv.Itoa(int(e.AlgCode[i]))
+			s += " " + strconv.Itoa(int(alg))
 		}
 		}
 	}
 	}
 	return s
 	return s
 }
 }
+func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
 
 
 // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
 // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
 type EDNS0_N3U struct {
 type EDNS0_N3U struct {
@@ -483,15 +502,16 @@ func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
 func (e *EDNS0_N3U) String() string {
 func (e *EDNS0_N3U) String() string {
 	// Re-use the hash map
 	// Re-use the hash map
 	s := ""
 	s := ""
-	for i := 0; i < len(e.AlgCode); i++ {
-		if a, ok := HashToString[e.AlgCode[i]]; ok {
+	for _, alg := range e.AlgCode {
+		if a, ok := HashToString[alg]; ok {
 			s += " " + a
 			s += " " + a
 		} else {
 		} else {
-			s += " " + strconv.Itoa(int(e.AlgCode[i]))
+			s += " " + strconv.Itoa(int(alg))
 		}
 		}
 	}
 	}
 	return s
 	return s
 }
 }
+func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
 
 
 // EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
 // EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
 type EDNS0_EXPIRE struct {
 type EDNS0_EXPIRE struct {
@@ -502,13 +522,11 @@ type EDNS0_EXPIRE struct {
 // Option implements the EDNS0 interface.
 // Option implements the EDNS0 interface.
 func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
 func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
 func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
 func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
+func (e *EDNS0_EXPIRE) copy() EDNS0    { return &EDNS0_EXPIRE{e.Code, e.Expire} }
 
 
 func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
 func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
 	b := make([]byte, 4)
 	b := make([]byte, 4)
-	b[0] = byte(e.Expire >> 24)
-	b[1] = byte(e.Expire >> 16)
-	b[2] = byte(e.Expire >> 8)
-	b[3] = byte(e.Expire)
+	binary.BigEndian.PutUint32(b, e.Expire)
 	return b, nil
 	return b, nil
 }
 }
 
 
@@ -543,6 +561,11 @@ func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
 func (e *EDNS0_LOCAL) String() string {
 func (e *EDNS0_LOCAL) String() string {
 	return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
 	return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
 }
 }
+func (e *EDNS0_LOCAL) copy() EDNS0 {
+	b := make([]byte, len(e.Data))
+	copy(b, e.Data)
+	return &EDNS0_LOCAL{e.Code, b}
+}
 
 
 func (e *EDNS0_LOCAL) pack() ([]byte, error) {
 func (e *EDNS0_LOCAL) pack() ([]byte, error) {
 	b := make([]byte, len(e.Data))
 	b := make([]byte, len(e.Data))
@@ -615,6 +638,7 @@ func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
 	}
 	}
 	return
 	return
 }
 }
+func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
 
 
 // EDNS0_PADDING option is used to add padding to a request/response. The default
 // EDNS0_PADDING option is used to add padding to a request/response. The default
 // value of padding SHOULD be 0x0 but other values MAY be used, for instance if
 // value of padding SHOULD be 0x0 but other values MAY be used, for instance if
@@ -628,3 +652,8 @@ func (e *EDNS0_PADDING) Option() uint16        { return EDNS0PADDING }
 func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
 func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
 func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
 func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
 func (e *EDNS0_PADDING) String() string        { return fmt.Sprintf("%0X", e.Padding) }
 func (e *EDNS0_PADDING) String() string        { return fmt.Sprintf("%0X", e.Padding) }
+func (e *EDNS0_PADDING) copy() EDNS0 {
+	b := make([]byte, len(e.Padding))
+	copy(b, e.Padding)
+	return &EDNS0_PADDING{b}
+}

+ 7 - 1
vendor/github.com/miekg/dns/format.go

@@ -20,7 +20,7 @@ func Field(r RR, i int) string {
 		return ""
 		return ""
 	}
 	}
 	d := reflect.ValueOf(r).Elem().Field(i)
 	d := reflect.ValueOf(r).Elem().Field(i)
-	switch k := d.Kind(); k {
+	switch d.Kind() {
 	case reflect.String:
 	case reflect.String:
 		return d.String()
 		return d.String()
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@@ -31,6 +31,9 @@ func Field(r RR, i int) string {
 		switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
 		switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
 		case `dns:"a"`:
 		case `dns:"a"`:
 			// TODO(miek): Hmm store this as 16 bytes
 			// TODO(miek): Hmm store this as 16 bytes
+			if d.Len() < net.IPv4len {
+				return ""
+			}
 			if d.Len() < net.IPv6len {
 			if d.Len() < net.IPv6len {
 				return net.IPv4(byte(d.Index(0).Uint()),
 				return net.IPv4(byte(d.Index(0).Uint()),
 					byte(d.Index(1).Uint()),
 					byte(d.Index(1).Uint()),
@@ -42,6 +45,9 @@ func Field(r RR, i int) string {
 				byte(d.Index(14).Uint()),
 				byte(d.Index(14).Uint()),
 				byte(d.Index(15).Uint())).String()
 				byte(d.Index(15).Uint())).String()
 		case `dns:"aaaa"`:
 		case `dns:"aaaa"`:
+			if d.Len() < net.IPv6len {
+				return ""
+			}
 			return net.IP{
 			return net.IP{
 				byte(d.Index(0).Uint()),
 				byte(d.Index(0).Uint()),
 				byte(d.Index(1).Uint()),
 				byte(d.Index(1).Uint()),

+ 186 - 113
vendor/github.com/miekg/dns/generate.go

@@ -2,8 +2,8 @@ package dns
 
 
 import (
 import (
 	"bytes"
 	"bytes"
-	"errors"
 	"fmt"
 	"fmt"
+	"io"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 )
 )
@@ -18,152 +18,225 @@ import (
 // * rhs (rdata)
 // * rhs (rdata)
 // But we are lazy here, only the range is parsed *all* occurrences
 // But we are lazy here, only the range is parsed *all* occurrences
 // of $ after that are interpreted.
 // of $ after that are interpreted.
-// Any error are returned as a string value, the empty string signals
-// "no error".
-func generate(l lex, c chan lex, t chan *Token, o string) string {
+func (zp *ZoneParser) generate(l lex) (RR, bool) {
+	token := l.token
 	step := 1
 	step := 1
-	if i := strings.IndexAny(l.token, "/"); i != -1 {
-		if i+1 == len(l.token) {
-			return "bad step in $GENERATE range"
+	if i := strings.IndexByte(token, '/'); i >= 0 {
+		if i+1 == len(token) {
+			return zp.setParseError("bad step in $GENERATE range", l)
 		}
 		}
-		if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
-			if s < 0 {
-				return "bad step in $GENERATE range"
-			}
-			step = s
-		} else {
-			return "bad step in $GENERATE range"
+
+		s, err := strconv.Atoi(token[i+1:])
+		if err != nil || s <= 0 {
+			return zp.setParseError("bad step in $GENERATE range", l)
 		}
 		}
-		l.token = l.token[:i]
+
+		step = s
+		token = token[:i]
 	}
 	}
-	sx := strings.SplitN(l.token, "-", 2)
+
+	sx := strings.SplitN(token, "-", 2)
 	if len(sx) != 2 {
 	if len(sx) != 2 {
-		return "bad start-stop in $GENERATE range"
+		return zp.setParseError("bad start-stop in $GENERATE range", l)
 	}
 	}
+
 	start, err := strconv.Atoi(sx[0])
 	start, err := strconv.Atoi(sx[0])
 	if err != nil {
 	if err != nil {
-		return "bad start in $GENERATE range"
+		return zp.setParseError("bad start in $GENERATE range", l)
 	}
 	}
+
 	end, err := strconv.Atoi(sx[1])
 	end, err := strconv.Atoi(sx[1])
 	if err != nil {
 	if err != nil {
-		return "bad stop in $GENERATE range"
+		return zp.setParseError("bad stop in $GENERATE range", l)
 	}
 	}
 	if end < 0 || start < 0 || end < start {
 	if end < 0 || start < 0 || end < start {
-		return "bad range in $GENERATE range"
+		return zp.setParseError("bad range in $GENERATE range", l)
 	}
 	}
 
 
-	<-c // _BLANK
+	zp.c.Next() // _BLANK
+
 	// Create a complete new string, which we then parse again.
 	// Create a complete new string, which we then parse again.
-	s := ""
-BuildRR:
-	l = <-c
-	if l.value != zNewline && l.value != zEOF {
+	var s string
+	for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
+		if l.err {
+			return zp.setParseError("bad data in $GENERATE directive", l)
+		}
+		if l.value == zNewline {
+			break
+		}
+
 		s += l.token
 		s += l.token
-		goto BuildRR
-	}
-	for i := start; i <= end; i += step {
-		var (
-			escape bool
-			dom    bytes.Buffer
-			mod    string
-			err    error
-			offset int
-		)
-
-		for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
-			switch s[j] {
-			case '\\':
-				if escape {
-					dom.WriteByte('\\')
-					escape = false
-					continue
-				}
-				escape = true
-			case '$':
-				mod = "%d"
-				offset = 0
-				if escape {
-					dom.WriteByte('$')
-					escape = false
-					continue
-				}
-				escape = false
-				if j+1 >= len(s) { // End of the string
-					dom.WriteString(fmt.Sprintf(mod, i+offset))
-					continue
-				} else {
-					if s[j+1] == '$' {
-						dom.WriteByte('$')
-						j++
-						continue
-					}
-				}
-				// Search for { and }
-				if s[j+1] == '{' { // Modifier block
-					sep := strings.Index(s[j+2:], "}")
-					if sep == -1 {
-						return "bad modifier in $GENERATE"
-					}
-					mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
-					if err != nil {
-						return err.Error()
-					}
-					j += 2 + sep // Jump to it
-				}
-				dom.WriteString(fmt.Sprintf(mod, i+offset))
-			default:
-				if escape { // Pretty useless here
-					escape = false
-					continue
-				}
-				dom.WriteByte(s[j])
+	}
+
+	r := &generateReader{
+		s: s,
+
+		cur:   start,
+		start: start,
+		end:   end,
+		step:  step,
+
+		file: zp.file,
+		lex:  &l,
+	}
+	zp.sub = NewZoneParser(r, zp.origin, zp.file)
+	zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
+	zp.sub.SetDefaultTTL(defaultTtl)
+	return zp.subNext()
+}
+
+type generateReader struct {
+	s  string
+	si int
+
+	cur   int
+	start int
+	end   int
+	step  int
+
+	mod bytes.Buffer
+
+	escape bool
+
+	eof bool
+
+	file string
+	lex  *lex
+}
+
+func (r *generateReader) parseError(msg string, end int) *ParseError {
+	r.eof = true // Make errors sticky.
+
+	l := *r.lex
+	l.token = r.s[r.si-1 : end]
+	l.column += r.si // l.column starts one zBLANK before r.s
+
+	return &ParseError{r.file, msg, l}
+}
+
+func (r *generateReader) Read(p []byte) (int, error) {
+	// NewZLexer, through NewZoneParser, should use ReadByte and
+	// not end up here.
+
+	panic("not implemented")
+}
+
+func (r *generateReader) ReadByte() (byte, error) {
+	if r.eof {
+		return 0, io.EOF
+	}
+	if r.mod.Len() > 0 {
+		return r.mod.ReadByte()
+	}
+
+	if r.si >= len(r.s) {
+		r.si = 0
+		r.cur += r.step
+
+		r.eof = r.cur > r.end || r.cur < 0
+		return '\n', nil
+	}
+
+	si := r.si
+	r.si++
+
+	switch r.s[si] {
+	case '\\':
+		if r.escape {
+			r.escape = false
+			return '\\', nil
+		}
+
+		r.escape = true
+		return r.ReadByte()
+	case '$':
+		if r.escape {
+			r.escape = false
+			return '$', nil
+		}
+
+		mod := "%d"
+
+		if si >= len(r.s)-1 {
+			// End of the string
+			fmt.Fprintf(&r.mod, mod, r.cur)
+			return r.mod.ReadByte()
+		}
+
+		if r.s[si+1] == '$' {
+			r.si++
+			return '$', nil
+		}
+
+		var offset int
+
+		// Search for { and }
+		if r.s[si+1] == '{' {
+			// Modifier block
+			sep := strings.Index(r.s[si+2:], "}")
+			if sep < 0 {
+				return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
+			}
+
+			var errMsg string
+			mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
+			if errMsg != "" {
+				return 0, r.parseError(errMsg, si+3+sep)
+			}
+			if r.start+offset < 0 || r.end+offset > 1<<31-1 {
+				return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
 			}
 			}
+
+			r.si += 2 + sep // Jump to it
 		}
 		}
-		// Re-parse the RR and send it on the current channel t
-		rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
-		if err != nil {
-			return err.Error()
+
+		fmt.Fprintf(&r.mod, mod, r.cur+offset)
+		return r.mod.ReadByte()
+	default:
+		if r.escape { // Pretty useless here
+			r.escape = false
+			return r.ReadByte()
 		}
 		}
-		t <- &Token{RR: rx}
-		// Its more efficient to first built the rrlist and then parse it in
-		// one go! But is this a problem?
+
+		return r.s[si], nil
 	}
 	}
-	return ""
 }
 }
 
 
 // Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
 // Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
-func modToPrintf(s string) (string, int, error) {
-	xs := strings.Split(s, ",")
-
+func modToPrintf(s string) (string, int, string) {
 	// Modifier is { offset [ ,width [ ,base ] ] } - provide default
 	// Modifier is { offset [ ,width [ ,base ] ] } - provide default
 	// values for optional width and type, if necessary.
 	// values for optional width and type, if necessary.
-	switch len(xs) {
+	var offStr, widthStr, base string
+	switch xs := strings.Split(s, ","); len(xs) {
 	case 1:
 	case 1:
-		xs = append(xs, "0", "d")
+		offStr, widthStr, base = xs[0], "0", "d"
 	case 2:
 	case 2:
-		xs = append(xs, "d")
+		offStr, widthStr, base = xs[0], xs[1], "d"
 	case 3:
 	case 3:
+		offStr, widthStr, base = xs[0], xs[1], xs[2]
 	default:
 	default:
-		return "", 0, errors.New("bad modifier in $GENERATE")
+		return "", 0, "bad modifier in $GENERATE"
 	}
 	}
 
 
-	// xs[0] is offset, xs[1] is width, xs[2] is base
-	if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
-		return "", 0, errors.New("bad base in $GENERATE")
+	switch base {
+	case "o", "d", "x", "X":
+	default:
+		return "", 0, "bad base in $GENERATE"
 	}
 	}
-	offset, err := strconv.Atoi(xs[0])
-	if err != nil || offset > 255 {
-		return "", 0, errors.New("bad offset in $GENERATE")
+
+	offset, err := strconv.Atoi(offStr)
+	if err != nil {
+		return "", 0, "bad offset in $GENERATE"
 	}
 	}
-	width, err := strconv.Atoi(xs[1])
-	if err != nil || width > 255 {
-		return "", offset, errors.New("bad width in $GENERATE")
+
+	width, err := strconv.Atoi(widthStr)
+	if err != nil || width < 0 || width > 255 {
+		return "", 0, "bad width in $GENERATE"
 	}
 	}
-	switch {
-	case width < 0:
-		return "", offset, errors.New("bad width in $GENERATE")
-	case width == 0:
-		return "%" + xs[1] + xs[2], offset, nil
+
+	if width == 0 {
+		return "%" + base, offset, ""
 	}
 	}
-	return "%0" + xs[1] + xs[2], offset, nil
+
+	return "%0" + widthStr + base, offset, ""
 }
 }

+ 5 - 8
vendor/github.com/miekg/dns/labels.go

@@ -16,7 +16,7 @@ func SplitDomainName(s string) (labels []string) {
 	fqdnEnd := 0 // offset of the final '.' or the length of the name
 	fqdnEnd := 0 // offset of the final '.' or the length of the name
 	idx := Split(s)
 	idx := Split(s)
 	begin := 0
 	begin := 0
-	if s[len(s)-1] == '.' {
+	if IsFqdn(s) {
 		fqdnEnd = len(s) - 1
 		fqdnEnd = len(s) - 1
 	} else {
 	} else {
 		fqdnEnd = len(s)
 		fqdnEnd = len(s)
@@ -28,16 +28,13 @@ func SplitDomainName(s string) (labels []string) {
 	case 1:
 	case 1:
 		// no-op
 		// no-op
 	default:
 	default:
-		end := 0
-		for i := 1; i < len(idx); i++ {
-			end = idx[i]
+		for _, end := range idx[1:] {
 			labels = append(labels, s[begin:end-1])
 			labels = append(labels, s[begin:end-1])
 			begin = end
 			begin = end
 		}
 		}
 	}
 	}
 
 
-	labels = append(labels, s[begin:fqdnEnd])
-	return labels
+	return append(labels, s[begin:fqdnEnd])
 }
 }
 
 
 // CompareDomainName compares the names s1 and s2 and
 // CompareDomainName compares the names s1 and s2 and
@@ -178,10 +175,10 @@ func equal(a, b string) bool {
 		ai := a[i]
 		ai := a[i]
 		bi := b[i]
 		bi := b[i]
 		if ai >= 'A' && ai <= 'Z' {
 		if ai >= 'A' && ai <= 'Z' {
-			ai |= ('a' - 'A')
+			ai |= 'a' - 'A'
 		}
 		}
 		if bi >= 'A' && bi <= 'Z' {
 		if bi >= 'A' && bi <= 'Z' {
-			bi |= ('a' - 'A')
+			bi |= 'a' - 'A'
 		}
 		}
 		if ai != bi {
 		if ai != bi {
 			return false
 			return false

+ 44 - 0
vendor/github.com/miekg/dns/listen_go111.go

@@ -0,0 +1,44 @@
+// +build go1.11
+// +build aix darwin dragonfly freebsd linux netbsd openbsd
+
+package dns
+
+import (
+	"context"
+	"net"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+const supportsReusePort = true
+
+func reuseportControl(network, address string, c syscall.RawConn) error {
+	var opErr error
+	err := c.Control(func(fd uintptr) {
+		opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+	})
+	if err != nil {
+		return err
+	}
+
+	return opErr
+}
+
+func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
+	var lc net.ListenConfig
+	if reuseport {
+		lc.Control = reuseportControl
+	}
+
+	return lc.Listen(context.Background(), network, addr)
+}
+
+func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
+	var lc net.ListenConfig
+	if reuseport {
+		lc.Control = reuseportControl
+	}
+
+	return lc.ListenPacket(context.Background(), network, addr)
+}

Some files were not shown because too many files changed in this diff