Browse Source

Update dependencies and CHANGES

Ask Bjørn Hansen 5 years ago
parent
commit
9f7a8ae84d
100 changed files with 4742 additions and 2297 deletions
  1. 3 1
      CHANGES.md
  2. 113 35
      Gopkg.lock
  3. 13 0
      vendor/github.com/golang/geo/r1/interval.go
  4. 1 1
      vendor/github.com/golang/geo/r3/precisevector.go
  5. 2 4
      vendor/github.com/golang/geo/s1/angle.go
  6. 5 0
      vendor/github.com/golang/geo/s1/chordangle.go
  7. 126 12
      vendor/github.com/golang/geo/s1/interval.go
  8. 24 3
      vendor/github.com/golang/geo/s2/cellid.go
  9. 1 1
      vendor/github.com/golang/geo/s2/cellunion.go
  10. 3 3
      vendor/github.com/golang/geo/s2/convex_hull_query.go
  11. 4 5
      vendor/github.com/golang/geo/s2/crossing_edge_query.go
  12. 149 0
      vendor/github.com/golang/geo/s2/distance_target.go
  13. 30 4
      vendor/github.com/golang/geo/s2/edge_distances.go
  14. 512 0
      vendor/github.com/golang/geo/s2/edge_query.go
  15. 4 4
      vendor/github.com/golang/geo/s2/edge_tessellator.go
  16. 4 2
      vendor/github.com/golang/geo/s2/loop.go
  17. 304 0
      vendor/github.com/golang/geo/s2/max_distance_targets.go
  18. 360 0
      vendor/github.com/golang/geo/s2/min_distance_targets.go
  19. 1 1
      vendor/github.com/golang/geo/s2/paddedcell.go
  20. 6 1
      vendor/github.com/golang/geo/s2/point.go
  21. 2 1
      vendor/github.com/golang/geo/s2/point_vector.go
  22. 4 12
      vendor/github.com/golang/geo/s2/polygon.go
  23. 102 4
      vendor/github.com/golang/geo/s2/polyline.go
  24. 5 5
      vendor/github.com/golang/geo/s2/predicates.go
  25. 196 0
      vendor/github.com/golang/geo/s2/query_options.go
  26. 171 1
      vendor/github.com/golang/geo/s2/rect.go
  27. 1 1
      vendor/github.com/golang/geo/s2/regioncoverer.go
  28. 21 0
      vendor/github.com/golang/geo/s2/shape.go
  29. 6 19
      vendor/github.com/golang/geo/s2/shapeindex.go
  30. 2 3
      vendor/github.com/golang/protobuf/proto/properties.go
  31. 2 4
      vendor/github.com/miekg/dns/LICENSE
  32. 13 8
      vendor/github.com/miekg/dns/acceptfunc.go
  33. 36 42
      vendor/github.com/miekg/dns/client.go
  34. 2 2
      vendor/github.com/miekg/dns/dns.go
  35. 5 2
      vendor/github.com/miekg/dns/dnssec.go
  36. 4 42
      vendor/github.com/miekg/dns/dnssec_keygen.go
  37. 2 32
      vendor/github.com/miekg/dns/dnssec_keyscan.go
  38. 4 5
      vendor/github.com/miekg/dns/doc.go
  39. 1 1
      vendor/github.com/miekg/dns/duplicate.go
  40. 20 8
      vendor/github.com/miekg/dns/edns.go
  41. 10 1
      vendor/github.com/miekg/dns/fuzz.go
  42. 7 2
      vendor/github.com/miekg/dns/generate.go
  43. 42 18
      vendor/github.com/miekg/dns/labels.go
  44. 10 42
      vendor/github.com/miekg/dns/msg.go
  45. 50 8
      vendor/github.com/miekg/dns/msg_helpers.go
  46. 11 6
      vendor/github.com/miekg/dns/msg_truncate.go
  47. 10 28
      vendor/github.com/miekg/dns/privaterr.go
  48. 95 24
      vendor/github.com/miekg/dns/scan.go
  49. 143 301
      vendor/github.com/miekg/dns/scan_rr.go
  50. 2 26
      vendor/github.com/miekg/dns/serve_mux.go
  51. 11 5
      vendor/github.com/miekg/dns/server.go
  52. 2 2
      vendor/github.com/miekg/dns/tsig.go
  53. 42 44
      vendor/github.com/miekg/dns/types.go
  54. 1 3
      vendor/github.com/miekg/dns/types_generate.go
  55. 1 1
      vendor/github.com/miekg/dns/version.go
  56. 12 6
      vendor/github.com/miekg/dns/xfr.go
  57. 8 8
      vendor/github.com/miekg/dns/ztypes.go
  58. 87 4
      vendor/github.com/oschwald/geoip2-golang/reader.go
  59. 85 95
      vendor/github.com/oschwald/maxminddb-golang/decoder.go
  60. 1 1
      vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
  61. 42 0
      vendor/github.com/oschwald/maxminddb-golang/node.go
  62. 111 79
      vendor/github.com/oschwald/maxminddb-golang/reader.go
  63. 1 1
      vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
  64. 1 1
      vendor/github.com/oschwald/maxminddb-golang/reader_other.go
  65. 23 34
      vendor/github.com/oschwald/maxminddb-golang/traverse.go
  66. 7 2
      vendor/github.com/oschwald/maxminddb-golang/verifier.go
  67. 29 0
      vendor/github.com/prometheus/client_golang/prometheus/build_info.go
  68. 22 0
      vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
  69. 1 1
      vendor/github.com/prometheus/client_golang/prometheus/collector.go
  70. 3 4
      vendor/github.com/prometheus/client_golang/prometheus/doc.go
  71. 108 13
      vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
  72. 45 73
      vendor/github.com/prometheus/client_golang/prometheus/histogram.go
  73. 0 504
      vendor/github.com/prometheus/client_golang/prometheus/http.go
  74. 4 57
      vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
  75. 65 0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
  76. 112 0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
  77. 159 1
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
  78. 0 181
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
  79. 43 5
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
  80. 122 0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
  81. 0 144
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
  82. 12 4
      vendor/github.com/prometheus/client_golang/prometheus/registry.go
  83. 131 21
      vendor/github.com/prometheus/client_golang/prometheus/summary.go
  84. 21 0
      vendor/github.com/prometheus/client_golang/prometheus/wrap.go
  85. 0 201
      vendor/github.com/prometheus/client_model/ruby/LICENSE
  86. 8 10
      vendor/github.com/prometheus/common/expfmt/text_create.go
  87. 10 3
      vendor/github.com/prometheus/common/expfmt/text_parse.go
  88. 7 1
      vendor/github.com/prometheus/common/model/time.go
  89. 85 0
      vendor/github.com/prometheus/procfs/arp.go
  90. 3 13
      vendor/github.com/prometheus/procfs/buddyinfo.go
  91. 167 0
      vendor/github.com/prometheus/procfs/cpuinfo.go
  92. 131 0
      vendor/github.com/prometheus/procfs/crypto.go
  93. 17 20
      vendor/github.com/prometheus/procfs/fs.go
  94. 55 0
      vendor/github.com/prometheus/procfs/internal/fs/fs.go
  95. 88 0
      vendor/github.com/prometheus/procfs/internal/util/parse.go
  96. 38 0
      vendor/github.com/prometheus/procfs/internal/util/readfile.go
  97. 48 0
      vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
  98. 26 0
      vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
  99. 91 0
      vendor/github.com/prometheus/procfs/internal/util/valueparser.go
  100. 12 30
      vendor/github.com/prometheus/procfs/ipvs.go

+ 3 - 1
CHANGES.md

@@ -1,9 +1,11 @@
 # GeoDNS Changelog
 # GeoDNS Changelog
 
 
-## Master
+## 3.0.2 December 2019
 
 
 * Better test errors when geoip2 files aren't found
 * Better test errors when geoip2 files aren't found
 * Require Go 1.13 or later (just for build script for now)
 * Require Go 1.13 or later (just for build script for now)
+* Add geodns-logs to Docker image
+* Fix targeting tests (GeoIP data changed)
 * Update dependencies
 * Update dependencies
 
 
 ## 3.0.1 April 2019
 ## 3.0.1 April 2019

+ 113 - 35
Gopkg.lock

@@ -3,204 +3,282 @@
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:ad61071aaffb5343a2b2eaa9df42308409a233edea13bffc8c1837a6da208738"
   name = "github.com/abh/errorutil"
   name = "github.com/abh/errorutil"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
+  digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
   name = "github.com/beorn7/perks"
   name = "github.com/beorn7/perks"
   packages = ["quantile"]
   packages = ["quantile"]
-  revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
+  pruneopts = "NUT"
+  revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
+  version = "v1.0.1"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
   name = "github.com/davecgh/go-spew"
   name = "github.com/davecgh/go-spew"
   packages = ["spew"]
   packages = ["spew"]
+  pruneopts = "NUT"
   revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
   revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
   version = "v1.1.1"
   version = "v1.1.1"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
   name = "github.com/fsnotify/fsnotify"
   name = "github.com/fsnotify/fsnotify"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
   revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
   version = "v1.4.7"
   version = "v1.4.7"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:9cf4b2d3c07de2ed0229e6b06b785e5e181df5aeb7a458cb75d75c45e8a7f088"
   name = "github.com/golang/geo"
   name = "github.com/golang/geo"
   packages = [
   packages = [
     "r1",
     "r1",
     "r2",
     "r2",
     "r3",
     "r3",
     "s1",
     "s1",
-    "s2"
+    "s2",
   ]
   ]
-  revision = "476085157cff9aaeef4d4f124649436542d4114a"
+  pruneopts = "NUT"
+  revision = "5b978397cfecc7280e598e9ac5854e9534b0918b"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
   name = "github.com/golang/protobuf"
   name = "github.com/golang/protobuf"
   packages = ["proto"]
   packages = ["proto"]
-  revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
-  version = "v1.3.1"
+  pruneopts = "NUT"
+  revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
+  version = "v1.3.2"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53"
   name = "github.com/google/uuid"
   name = "github.com/google/uuid"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
   revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
   version = "v1.1.1"
   version = "v1.1.1"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:1ebcd0aef6d4512ef3e56932606daec583ea39f2b756111f64d50179ef032130"
   name = "github.com/hpcloud/tail"
   name = "github.com/hpcloud/tail"
   packages = [
   packages = [
     ".",
     ".",
     "ratelimiter",
     "ratelimiter",
     "util",
     "util",
     "watch",
     "watch",
-    "winfile"
+    "winfile",
   ]
   ]
+  pruneopts = "NUT"
   revision = "7d02b9cfe313d6d68d4a184d56d490b5a8ba4163"
   revision = "7d02b9cfe313d6d68d4a184d56d490b5a8ba4163"
   source = "https://github.com/abh/tail.git"
   source = "https://github.com/abh/tail.git"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
   name = "github.com/matttproud/golang_protobuf_extensions"
   name = "github.com/matttproud/golang_protobuf_extensions"
   packages = ["pbutil"]
   packages = ["pbutil"]
+  pruneopts = "NUT"
   revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
   revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
   version = "v1.0.1"
   version = "v1.0.1"
 
 
 [[projects]]
 [[projects]]
+  branch = "master"
+  digest = "1:f0fbceaa3347c12bb6ff0a2cdbf01cfe31ec460ec3f1dfcf39b588b62c3bfc2a"
   name = "github.com/miekg/dns"
   name = "github.com/miekg/dns"
   packages = ["."]
   packages = ["."]
-  revision = "73601d4aed9d844322611759d7f3619110b7c88e"
-  version = "v1.1.8"
+  pruneopts = "NUT"
+  revision = "eda228adcff6f7a80ddaf1d265209a10a4f51ca9"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:8458ad87b512968637510fc6caea25f8b734dfda7c35fd5ce00300ecd16ccf46"
   name = "github.com/oschwald/geoip2-golang"
   name = "github.com/oschwald/geoip2-golang"
   packages = ["."]
   packages = ["."]
-  revision = "42d566f218c8e6131d26c31d07a294c4c7eecfd8"
+  pruneopts = "NUT"
+  revision = "ae8b169eb05b73c205edf569a6e4b0dc42c139fb"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:e2b7b40b4c3edf6115a86b3e905b47f596c64d017fbc1d7fe2e294f78da998ce"
   name = "github.com/oschwald/maxminddb-golang"
   name = "github.com/oschwald/maxminddb-golang"
   packages = ["."]
   packages = ["."]
-  revision = "c5bec84d1963260297932a1b7a1753c8420717a7"
-  version = "v1.3.0"
+  pruneopts = "NUT"
+  revision = "6a033e62c03b7dab4c37f7c9eb2ebb3b10e8f13a"
+  version = "v1.6.0"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
   name = "github.com/pborman/uuid"
   name = "github.com/pborman/uuid"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
   revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
   version = "v1.2"
   version = "v1.2"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
   name = "github.com/pmezard/go-difflib"
   name = "github.com/pmezard/go-difflib"
   packages = ["difflib"]
   packages = ["difflib"]
+  pruneopts = "NUT"
   revision = "792786c7400a136282c1664665ae0a8db921c6c2"
   revision = "792786c7400a136282c1664665ae0a8db921c6c2"
   version = "v1.0.0"
   version = "v1.0.0"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:097cc61836050f45cbb712ae3bb45d66fba464c16b8fac09907fa3c1f753eff6"
   name = "github.com/prometheus/client_golang"
   name = "github.com/prometheus/client_golang"
   packages = [
   packages = [
     "prometheus",
     "prometheus",
     "prometheus/internal",
     "prometheus/internal",
-    "prometheus/promhttp"
+    "prometheus/promhttp",
   ]
   ]
-  revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
-  version = "v0.9.2"
+  pruneopts = "NUT"
+  revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
+  version = "v1.1.0"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
+  digest = "1:982be0b5396e16a663697899ce69cc7b1e71ddcae4153af157578d4dc9bc3f88"
   name = "github.com/prometheus/client_model"
   name = "github.com/prometheus/client_model"
   packages = ["go"]
   packages = ["go"]
-  revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
+  pruneopts = "NUT"
+  revision = "d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e"
+  version = "v0.1.0"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:98278956c7c550efc75a027e528aa51743f06fd0e33613d7ed224432a11e5ecf"
   name = "github.com/prometheus/common"
   name = "github.com/prometheus/common"
   packages = [
   packages = [
     "expfmt",
     "expfmt",
     "internal/bitbucket.org/ww/goautoneg",
     "internal/bitbucket.org/ww/goautoneg",
-    "model"
+    "model",
   ]
   ]
-  revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
-  version = "v0.2.0"
+  pruneopts = "NUT"
+  revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
+  version = "v0.7.0"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
+  digest = "1:a4b063a766f50cdadc1fce12053e3bde8599f1f8b7621d7e293cd2fc86543a44"
   name = "github.com/prometheus/procfs"
   name = "github.com/prometheus/procfs"
-  packages = ["."]
-  revision = "ea9eea63887261e4d8ed8315f4078e88d540c725"
+  packages = [
+    ".",
+    "internal/fs",
+    "internal/util",
+  ]
+  pruneopts = "NUT"
+  revision = "6d489fc7f1d9cd890a250f3ea3431b1744b9623f"
+  version = "v0.0.8"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:9b39ab945cd31eb35df36b7037e1c05c0bdd7d9e3bed36a92f3328ba7bdac093"
   name = "github.com/stretchr/testify"
   name = "github.com/stretchr/testify"
   packages = [
   packages = [
     "assert",
     "assert",
-    "require"
+    "require",
   ]
   ]
-  revision = "34c6fa2dc70986bccbbffcc6130f6920a924b075"
+  pruneopts = "NUT"
+  revision = "858f37ff9bc48070cde7f2c2895dbe0db1ad9326"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:cd7e85fc3687e062714febdee3e8efeb00a413a2a620d28908fd0258261d2353"
   name = "golang.org/x/crypto"
   name = "golang.org/x/crypto"
   packages = [
   packages = [
     "ed25519",
     "ed25519",
-    "ed25519/internal/edwards25519"
+    "ed25519/internal/edwards25519",
   ]
   ]
-  revision = "38d8ce5564a5b71b2e3a00553993f1b9a7ae852f"
+  pruneopts = "NUT"
+  revision = "53104e6ec876ad4e22ad27cce588b01392043c1b"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:7182ef5a2af56ca8c788b291e7f9926b85c354eb0a93bc5a57ce19c99e42d74f"
   name = "golang.org/x/net"
   name = "golang.org/x/net"
   packages = [
   packages = [
     "bpf",
     "bpf",
     "internal/iana",
     "internal/iana",
     "internal/socket",
     "internal/socket",
     "ipv4",
     "ipv4",
-    "ipv6"
+    "ipv6",
   ]
   ]
-  revision = "eb5bcb51f2a31c7d5141d810b70815c05d9c9146"
+  pruneopts = "NUT"
+  revision = "c0dbc17a35534bf2e581d7a942408dc936316da4"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
+  digest = "1:bf56d1df618e1d7e2b4e3340df554f9c6b2013a9e109a94b818dcfac400cf17c"
   name = "golang.org/x/sys"
   name = "golang.org/x/sys"
   packages = [
   packages = [
     "unix",
     "unix",
-    "windows"
+    "windows",
   ]
   ]
-  revision = "4b34438f7a67ee5f45cc6132e2bad873a20324e9"
+  pruneopts = "NUT"
+  revision = "c709ea063b76879dc9915358f55d4d77c16ab6d5"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:aebe32d1af029f012a9c605ff95051df68b8f744f5d1f4f1ac65162a03708938"
   name = "gopkg.in/gcfg.v1"
   name = "gopkg.in/gcfg.v1"
   packages = [
   packages = [
     ".",
     ".",
     "scanner",
     "scanner",
     "token",
     "token",
-    "types"
+    "types",
   ]
   ]
+  pruneopts = "NUT"
   revision = "61b2c08bc8f6068f7c5ca684372f9a6cb1c45ebe"
   revision = "61b2c08bc8f6068f7c5ca684372f9a6cb1c45ebe"
   version = "v1.2.3"
   version = "v1.2.3"
 
 
 [[projects]]
 [[projects]]
+  branch = "v2.0"
+  digest = "1:2fcc810e3b608ac6164b3bfc9b0f6b29ec9a02572b78808fddf7fed34b564e69"
   name = "gopkg.in/natefinch/lumberjack.v2"
   name = "gopkg.in/natefinch/lumberjack.v2"
   packages = ["."]
   packages = ["."]
-  revision = "7d6a1875575e09256dc552b4c0e450dcd02bd10e"
-  version = "v2.0.0"
+  pruneopts = "NUT"
+  revision = "94d9e492cc53c413571e9b40c0b39cee643ee516"
 
 
 [[projects]]
 [[projects]]
   branch = "v1"
   branch = "v1"
+  digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1"
   name = "gopkg.in/tomb.v1"
   name = "gopkg.in/tomb.v1"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
 
 
 [[projects]]
 [[projects]]
+  digest = "1:b233ad4ec87ac916e7bf5e678e98a2cb9e8b52f6de6ad3e11834fc7a71b8e3bf"
   name = "gopkg.in/warnings.v0"
   name = "gopkg.in/warnings.v0"
   packages = ["."]
   packages = ["."]
+  pruneopts = "NUT"
   revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
   revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
   version = "v0.1.2"
   version = "v0.1.2"
 
 
+[[projects]]
+  digest = "1:1532269ea4c7a7fcb29639a46318dd00c0f99c2f308a2a2860acf5b5354b8acc"
+  name = "gopkg.in/yaml.v2"
+  packages = ["."]
+  pruneopts = "NUT"
+  revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
+  version = "v2.2.7"
+
 [solve-meta]
 [solve-meta]
   analyzer-name = "dep"
   analyzer-name = "dep"
   analyzer-version = 1
   analyzer-version = 1
-  inputs-digest = "05c26536c9dbe29c5d47910fece572eab1118cf3f2c3b700802211e0e9a7afcd"
+  input-imports = [
+    "github.com/abh/errorutil",
+    "github.com/fsnotify/fsnotify",
+    "github.com/golang/geo/s2",
+    "github.com/hpcloud/tail",
+    "github.com/miekg/dns",
+    "github.com/oschwald/geoip2-golang",
+    "github.com/pborman/uuid",
+    "github.com/prometheus/client_golang/prometheus",
+    "github.com/prometheus/client_golang/prometheus/promhttp",
+    "github.com/stretchr/testify/assert",
+    "github.com/stretchr/testify/require",
+    "gopkg.in/gcfg.v1",
+    "gopkg.in/natefinch/lumberjack.v2",
+  ]
   solver-name = "gps-cdcl"
   solver-name = "gps-cdcl"
   solver-version = 1
   solver-version = 1

+ 13 - 0
vendor/github.com/golang/geo/r1/interval.go

@@ -162,3 +162,16 @@ func (i Interval) ApproxEqual(other Interval) bool {
 	return math.Abs(other.Lo-i.Lo) <= epsilon &&
 	return math.Abs(other.Lo-i.Lo) <= epsilon &&
 		math.Abs(other.Hi-i.Hi) <= epsilon
 		math.Abs(other.Hi-i.Hi) <= epsilon
 }
 }
+
+// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two
+// intervals x and y, this distance is defined as
+//     h(x, y) = max_{p in x} min_{q in y} d(p, q).
+func (i Interval) DirectedHausdorffDistance(other Interval) float64 {
+	if i.IsEmpty() {
+		return 0
+	}
+	if other.IsEmpty() {
+		return math.Inf(1)
+	}
+	return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo))
+}

+ 1 - 1
vendor/github.com/golang/geo/r3/precisevector.go

@@ -64,7 +64,7 @@ func precMul(a, b *big.Float) *big.Float {
 // PreciseVector represents a point in ℝ³ using high-precision values.
 // PreciseVector represents a point in ℝ³ using high-precision values.
 // Note that this is NOT a complete implementation because there are some
 // Note that this is NOT a complete implementation because there are some
 // operations that Vector supports that are not feasible with arbitrary precision
 // operations that Vector supports that are not feasible with arbitrary precision
-// math. (e.g., methods that need divison like Normalize, or methods needing a
+// math. (e.g., methods that need division like Normalize, or methods needing a
 // square root operation such as Norm)
 // square root operation such as Norm)
 type PreciseVector struct {
 type PreciseVector struct {
 	X, Y, Z *big.Float
 	X, Y, Z *big.Float

+ 2 - 4
vendor/github.com/golang/geo/s1/angle.go

@@ -22,7 +22,7 @@ import (
 // Angle represents a 1D angle. The internal representation is a double precision
 // Angle represents a 1D angle. The internal representation is a double precision
 // value in radians, so conversion to and from radians is exact.
 // value in radians, so conversion to and from radians is exact.
 // Conversions between E5, E6, E7, and Degrees are not always
 // Conversions between E5, E6, E7, and Degrees are not always
-// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(310000000).
+// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000).
 //
 //
 // The following conversions between degrees and radians are exact:
 // The following conversions between degrees and radians are exact:
 //
 //
@@ -98,7 +98,7 @@ func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
 // Abs returns the absolute value of the angle.
 // Abs returns the absolute value of the angle.
 func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
 func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
 
 
-// Normalized returns an equivalent angle in (-2π, 2π].
+// Normalized returns an equivalent angle in (-π, π].
 func (a Angle) Normalized() Angle {
 func (a Angle) Normalized() Angle {
 	rad := math.Remainder(float64(a), 2*math.Pi)
 	rad := math.Remainder(float64(a), 2*math.Pi)
 	if rad <= -math.Pi {
 	if rad <= -math.Pi {
@@ -113,5 +113,3 @@ func (a Angle) String() string {
 
 
 // BUG(dsymonds): The major differences from the C++ version are:
 // BUG(dsymonds): The major differences from the C++ version are:
 //   - no unsigned E5/E6/E7 methods
 //   - no unsigned E5/E6/E7 methods
-//   - no S2Point or S2LatLng constructors
-//   - no comparison or arithmetic operators

+ 5 - 0
vendor/github.com/golang/geo/s1/chordangle.go

@@ -243,3 +243,8 @@ func (c ChordAngle) Cos() float64 {
 func (c ChordAngle) Tan() float64 {
 func (c ChordAngle) Tan() float64 {
 	return c.Sin() / c.Cos()
 	return c.Sin() / c.Cos()
 }
 }
+
+// TODO(roberts): Differences from C++:
+//   Helpers to/from E5/E6/E7
+//   Helpers to/from degrees and radians directly.
+//   FastUpperBoundFrom(angle Angle)

+ 126 - 12
vendor/github.com/golang/geo/s1/interval.go

@@ -19,14 +19,25 @@ import (
 	"strconv"
 	"strconv"
 )
 )
 
 
-// Interval represents a closed interval on a unit circle.
-// Zero-length intervals (where Lo == Hi) represent single points.
-// If Lo > Hi then the interval is "inverted".
-// The point at (-1, 0) on the unit circle has two valid representations,
-// [π,π] and [-π,-π]. We normalize the latter to the former in IntervalFromEndpoints.
-// There are two special intervals that take advantage of that:
-//   - the full interval, [-π,π], and
-//   - the empty interval, [π,-π].
+// An Interval represents a closed interval on a unit circle (also known
+// as a 1-dimensional sphere). It is capable of representing the empty
+// interval (containing no points), the full interval (containing all
+// points), and zero-length intervals (containing a single point).
+//
+// Points are represented by the angle they make with the positive x-axis in
+// the range [-π, π]. An interval is represented by its lower and upper
+// bounds (both inclusive, since the interval is closed). The lower bound may
+// be greater than the upper bound, in which case the interval is "inverted"
+// (i.e. it passes through the point (-1, 0)).
+//
+// The point (-1, 0) has two valid representations, π and -π. The
+// normalized representation of this point is π, so that endpoints
+// of normal intervals are in the range (-π, π]. We normalize the latter to
+// the former in IntervalFromEndpoints. However, we take advantage of the point
+// -π to construct two special intervals:
+//   The full interval is [-π, π]
+//   The empty interval is [π, -π].
+//
 // Treat the exported fields as read-only.
 // Treat the exported fields as read-only.
 type Interval struct {
 type Interval struct {
 	Lo, Hi float64
 	Lo, Hi float64
@@ -271,7 +282,7 @@ func (i Interval) Intersection(oi Interval) Interval {
 }
 }
 
 
 // AddPoint returns the interval expanded by the minimum amount necessary such
 // AddPoint returns the interval expanded by the minimum amount necessary such
-// that it contains the given point "p" (an angle in the range [-Pi, Pi]).
+// that it contains the given point "p" (an angle in the range [-π, π]).
 func (i Interval) AddPoint(p float64) Interval {
 func (i Interval) AddPoint(p float64) Interval {
 	if math.Abs(p) > math.Pi {
 	if math.Abs(p) > math.Pi {
 		return i
 		return i
@@ -338,11 +349,114 @@ func (i Interval) Expanded(margin float64) Interval {
 	return result
 	return result
 }
 }
 
 
+// ApproxEqual reports whether this interval can be transformed into the given
+// interval by moving each endpoint by at most ε, without the
+// endpoints crossing (which would invert the interval). Empty and full
+// intervals are considered to start at an arbitrary point on the unit circle,
+// so any interval with (length <= 2*ε) matches the empty interval, and
+// any interval with (length >= 2*π - 2*ε) matches the full interval.
+func (i Interval) ApproxEqual(other Interval) bool {
+	// Full and empty intervals require special cases because the endpoints
+	// are considered to be positioned arbitrarily.
+	if i.IsEmpty() {
+		return other.Length() <= 2*epsilon
+	}
+	if other.IsEmpty() {
+		return i.Length() <= 2*epsilon
+	}
+	if i.IsFull() {
+		return other.Length() >= 2*(math.Pi-epsilon)
+	}
+	if other.IsFull() {
+		return i.Length() >= 2*(math.Pi-epsilon)
+	}
+
+	// The purpose of the last test below is to verify that moving the endpoints
+	// does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20].
+	return (math.Abs(math.Remainder(other.Lo-i.Lo, 2*math.Pi)) <= epsilon &&
+		math.Abs(math.Remainder(other.Hi-i.Hi, 2*math.Pi)) <= epsilon &&
+		math.Abs(i.Length()-other.Length()) <= 2*epsilon)
+
+}
+
 func (i Interval) String() string {
 func (i Interval) String() string {
 	// like "[%.7f, %.7f]"
 	// like "[%.7f, %.7f]"
 	return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]"
 	return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]"
 }
 }
 
 
-// BUG(dsymonds): The major differences from the C++ version are:
-//   - no validity checking on construction, etc. (not a bug?)
-//   - a few operations
+// Complement returns the complement of the interior of the interval. An interval and
+// its complement have the same boundary but do not share any interior
+// values. The complement operator is not a bijection, since the complement
+// of a singleton interval (containing a single value) is the same as the
+// complement of an empty interval.
+func (i Interval) Complement() Interval {
+	if i.Lo == i.Hi {
+		// Singleton. The interval just contains a single point.
+		return FullInterval()
+	}
+	// Handles empty and full.
+	return Interval{i.Hi, i.Lo}
+}
+
+// ComplementCenter returns the midpoint of the complement of the interval. For full and empty
+// intervals, the result is arbitrary. For a singleton interval (containing a
+// single point), the result is its antipodal point on S1.
+func (i Interval) ComplementCenter() float64 {
+	if i.Lo != i.Hi {
+		return i.Complement().Center()
+	}
+	// Singleton. The interval just contains a single point.
+	if i.Hi <= 0 {
+		return i.Hi + math.Pi
+	}
+	return i.Hi - math.Pi
+}
+
+// DirectedHausdorffDistance returns the Hausdorff distance to the given interval.
+// For two intervals i and y, this distance is defined by
+//     h(i, y) = max_{p in i} min_{q in y} d(p, q),
+// where d(.,.) is measured along S1.
+func (i Interval) DirectedHausdorffDistance(y Interval) Angle {
+	if y.ContainsInterval(i) {
+		return 0 // This includes the case i is empty.
+	}
+	if y.IsEmpty() {
+		return Angle(math.Pi) // maximum possible distance on s1.
+	}
+	yComplementCenter := y.ComplementCenter()
+	if i.Contains(yComplementCenter) {
+		return Angle(positiveDistance(y.Hi, yComplementCenter))
+	}
+
+	// The Hausdorff distance is realized by either two i.Hi endpoints or two
+	// i.Lo endpoints, whichever is farther apart.
+	hiHi := 0.0
+	if IntervalFromEndpoints(y.Hi, yComplementCenter).Contains(i.Hi) {
+		hiHi = positiveDistance(y.Hi, i.Hi)
+	}
+
+	loLo := 0.0
+	if IntervalFromEndpoints(yComplementCenter, y.Lo).Contains(i.Lo) {
+		loLo = positiveDistance(i.Lo, y.Lo)
+	}
+
+	return Angle(math.Max(hiHi, loLo))
+}
+
+// Project returns the closest point in the interval to the given point p.
+// The interval must be non-empty.
+func (i Interval) Project(p float64) float64 {
+	if p == -math.Pi {
+		p = math.Pi
+	}
+	if i.fastContains(p) {
+		return p
+	}
+	// Compute distance from p to each endpoint.
+	dlo := positiveDistance(p, i.Lo)
+	dhi := positiveDistance(i.Hi, p)
+	if dlo < dhi {
+		return i.Lo
+	}
+	return i.Hi
+}

+ 24 - 3
vendor/github.com/golang/geo/s2/cellid.go

@@ -341,6 +341,27 @@ func (ci CellID) String() string {
 	return b.String()
 	return b.String()
 }
 }
 
 
+// cellIDFromString returns a CellID from a string in the form "1/3210".
+func cellIDFromString(s string) CellID {
+	level := len(s) - 2
+	if level < 0 || level > maxLevel {
+		return CellID(0)
+	}
+	face := int(s[0] - '0')
+	if face < 0 || face > 5 || s[1] != '/' {
+		return CellID(0)
+	}
+	id := CellIDFromFace(face)
+	for i := 2; i < len(s); i++ {
+		childPos := s[i] - '0'
+		if childPos < 0 || childPos > 3 {
+			return CellID(0)
+		}
+		id = id.Children()[childPos]
+	}
+	return id
+}
+
 // Point returns the center of the s2 cell on the sphere as a Point.
 // Point returns the center of the s2 cell on the sphere as a Point.
 // The maximum directional error in Point (compared to the exact
 // The maximum directional error in Point (compared to the exact
 // mathematical result) is 1.5 * dblEpsilon radians, and the maximum length
 // mathematical result) is 1.5 * dblEpsilon radians, and the maximum length
@@ -460,7 +481,7 @@ func (ci CellID) encode(e *encoder) {
 	e.writeUint64(uint64(ci))
 	e.writeUint64(uint64(ci))
 }
 }
 
 
-// Decode encodes the CellID.
+// Decode decodes the CellID.
 func (ci *CellID) Decode(r io.Reader) error {
 func (ci *CellID) Decode(r io.Reader) error {
 	d := &decoder{r: asByteReader(r)}
 	d := &decoder{r: asByteReader(r)}
 	ci.decode(d)
 	ci.decode(d)
@@ -555,8 +576,8 @@ func cellIDFromFaceIJ(f, i, j int) CellID {
 	// Hilbert curve orientation respectively.
 	// Hilbert curve orientation respectively.
 	for k := 7; k >= 0; k-- {
 	for k := 7; k >= 0; k-- {
 		mask := (1 << lookupBits) - 1
 		mask := (1 << lookupBits) - 1
-		bits += int((i>>uint(k*lookupBits))&mask) << (lookupBits + 2)
-		bits += int((j>>uint(k*lookupBits))&mask) << 2
+		bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2)
+		bits += ((j >> uint(k*lookupBits)) & mask) << 2
 		bits = lookupPos[bits]
 		bits = lookupPos[bits]
 		n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits)
 		n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits)
 		bits &= (swapMask | invertMask)
 		bits &= (swapMask | invertMask)

+ 1 - 1
vendor/github.com/golang/geo/s2/cellunion.go

@@ -379,7 +379,7 @@ func areSiblings(a, b, c, d CellID) bool {
 	// mask that blocks out the two bits that encode the child position of
 	// mask that blocks out the two bits that encode the child position of
 	// "id" with respect to its parent, then check that the other three
 	// "id" with respect to its parent, then check that the other three
 	// children all agree with "mask".
 	// children all agree with "mask".
-	mask := uint64(d.lsb() << 1)
+	mask := d.lsb() << 1
 	mask = ^(mask + (mask << 1))
 	mask = ^(mask + (mask << 1))
 	idMasked := (uint64(d) & mask)
 	idMasked := (uint64(d) & mask)
 	return ((uint64(a)&mask) == idMasked &&
 	return ((uint64(a)&mask) == idMasked &&

+ 3 - 3
vendor/github.com/golang/geo/s2/convex_hull_query.go

@@ -223,15 +223,15 @@ func singlePointLoop(p Point) *Loop {
 	d1 := p.Cross(d0)
 	d1 := p.Cross(d0)
 	vertices := []Point{
 	vertices := []Point{
 		p,
 		p,
-		Point{p.Add(d0.Mul(offset)).Normalize()},
-		Point{p.Add(d1.Mul(offset)).Normalize()},
+		{p.Add(d0.Mul(offset)).Normalize()},
+		{p.Add(d1.Mul(offset)).Normalize()},
 	}
 	}
 	return LoopFromPoints(vertices)
 	return LoopFromPoints(vertices)
 }
 }
 
 
 // singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint.
 // singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint.
 func singleEdgeLoop(a, b Point) *Loop {
 func singleEdgeLoop(a, b Point) *Loop {
-	vertices := []Point{a, b, Point{a.Add(b.Vector).Normalize()}}
+	vertices := []Point{a, b, {a.Add(b.Vector).Normalize()}}
 	loop := LoopFromPoints(vertices)
 	loop := LoopFromPoints(vertices)
 	// The resulting loop may be clockwise, so invert it if necessary.
 	// The resulting loop may be clockwise, so invert it if necessary.
 	loop.Normalize()
 	loop.Normalize()

+ 4 - 5
vendor/github.com/golang/geo/s2/crossing_edge_query.go

@@ -152,14 +152,13 @@ func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int {
 
 
 	for _, cell := range c.cells {
 	for _, cell := range c.cells {
 		if cell == nil {
 		if cell == nil {
+			continue
 		}
 		}
 		clipped := cell.findByShapeID(shapeID)
 		clipped := cell.findByShapeID(shapeID)
 		if clipped == nil {
 		if clipped == nil {
 			continue
 			continue
 		}
 		}
-		for _, j := range clipped.edges {
-			edges = append(edges, j)
-		}
+		edges = append(edges, clipped.edges...)
 	}
 	}
 
 
 	if len(c.cells) > 1 {
 	if len(c.cells) > 1 {
@@ -190,7 +189,7 @@ func uniqueInts(in []int) []int {
 // CAVEAT: This method may return shapes that have an empty set of candidate edges.
 // CAVEAT: This method may return shapes that have an empty set of candidate edges.
 // However the return value is non-empty only if at least one shape has a candidate edge.
 // However the return value is non-empty only if at least one shape has a candidate edge.
 func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
 func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
-	edgeMap := make(EdgeMap, 0)
+	edgeMap := make(EdgeMap)
 
 
 	// If there are only a few edges then it's faster to use brute force. We
 	// If there are only a few edges then it's faster to use brute force. We
 	// only bother with this optimization when there is a single shape.
 	// only bother with this optimization when there is a single shape.
@@ -232,7 +231,7 @@ func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
 }
 }
 
 
 // getCells returns the set of ShapeIndexCells that might contain edges intersecting
 // getCells returns the set of ShapeIndexCells that might contain edges intersecting
-// the edge AB in the given cell root. This method is used primarly by loop and shapeutil.
+// the edge AB in the given cell root. This method is used primarily by loop and shapeutil.
 func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
 func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
 	aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
 	aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
 	if ok {
 	if ok {

+ 149 - 0
vendor/github.com/golang/geo/s2/distance_target.go

@@ -0,0 +1,149 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"github.com/golang/geo/s1"
+)
+
+// The distance interface represents a set of common methods used by algorithms
+// that compute distances between various S2 types.
+type distance interface {
+	// chordAngle returns this type as a ChordAngle.
+	chordAngle() s1.ChordAngle
+
+	// fromChordAngle is used to type convert a ChordAngle to this type.
+	// This is to work around needing to be clever in parts of the code
+	// where a distanceTarget interface method expects distances, but the
+	// user only supplies a ChordAngle, and we need to dynamically cast it
+	// to an appropriate distance interface types.
+	fromChordAngle(o s1.ChordAngle) distance
+
+	// zero returns a zero distance.
+	zero() distance
+	// negative returns a value smaller than any valid value.
+	negative() distance
+	// infinity returns a value larger than any valid value.
+	infinity() distance
+
+	// less is similar to the Less method in Sort. To get minimum values,
+	// this would be a less than type operation. For maximum, this would
+	// be a greater than type operation.
+	less(other distance) bool
+
+	// sub subtracts the other value from this one and returns the new value.
+	// This is done as a method and not simple mathematical operation to
+	// allow closest and furthest to implement this in opposite ways.
+	sub(other distance) distance
+
+	// chordAngleBound reports the upper bound on a ChordAngle corresponding
+	// to this distance. For example, if distance measures WGS84 ellipsoid
+	// distance then the corresponding angle needs to be 0.56% larger.
+	chordAngleBound() s1.ChordAngle
+
+	// updateDistance may update the value this distance represents
+	// based on the given input. The updated value and a boolean reporting
+	// if the value was changed are returned.
+	updateDistance(other distance) (distance, bool)
+}
+
+// distanceTarget is an interface that represents a geometric type to which distances
+// are measured.
+//
+// For example, there are implementations that measure distances to a Point,
+// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry
+// stored in ShapeIndex.
+//
+// The distanceTarget types are provided for the benefit of types that measure
+// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery,
+// ClosestPointQuery, and ClosestCellQuery, etc.
+type distanceTarget interface {
+	// capBound returns a Cap that bounds the set of points whose distance to the
+	// target is distance.zero().
+	capBound() Cap
+
+	// updateDistanceToPoint updates the distance if the distance to
+	// the point P is within than the given dist.
+	// The boolean reports if the value was updated.
+	updateDistanceToPoint(p Point, dist distance) (distance, bool)
+
+	// updateDistanceToEdge updates the distance if the distance to
+	// the edge E is within than the given dist.
+	// The boolean reports if the value was updated.
+	updateDistanceToEdge(e Edge, dist distance) (distance, bool)
+
+	// updateDistanceToCell updates the distance if the distance to the cell C
+	// (including its interior) is within than the given dist.
+	// The boolean reports if the value was updated.
+	updateDistanceToCell(c Cell, dist distance) (distance, bool)
+
+	// setMaxError potentially updates the value of MaxError, and reports if
+	// the specific type supports altering it. Whenever one of the
+	// updateDistanceTo... methods above returns true, the returned distance
+	// is allowed to be up to maxError larger than the true minimum distance.
+	// In other words, it gives this target object permission to terminate its
+	// distance calculation as soon as it has determined that (1) the minimum
+	// distance is less than minDist and (2) the best possible further
+	// improvement is less than maxError.
+	//
+	// If the target takes advantage of maxError to optimize its distance
+	// calculation, this method must return true. (Most target types will
+	// default to return false.)
+	setMaxError(maxErr s1.ChordAngle) bool
+
+	// maxBruteForceIndexSize reports the maximum number of indexed objects for
+	// which it is faster to compute the distance by brute force (e.g., by testing
+	// every edge) rather than by using an index.
+	//
+	// The following method is provided as a convenience for types that compute
+	// distances to a collection of indexed geometry, such as ClosestEdgeQuery
+	// and ClosestPointQuery.
+	//
+	// Types that do not support this should return a -1.
+	maxBruteForceIndexSize() int
+
+	// distance returns an instance of the underlying distance type this
+	// target uses. This is to work around the use of Templates in the C++.
+	distance() distance
+
+	// visitContainingShapes finds all polygons in the given index that
+	// completely contain a connected component of the target geometry. (For
+	// example, if the target consists of 10 points, this method finds
+	// polygons that contain any of those 10 points.) For each such polygon,
+	// the visit function is called with the Shape of the polygon along with
+	// a point of the target geometry that is contained by that polygon.
+	//
+	// Optionally, any polygon that intersects the target geometry may also be
+	// returned.  In other words, this method returns all polygons that
+	// contain any connected component of the target, along with an arbitrary
+	// subset of the polygons that intersect the target.
+	//
+	// For example, suppose that the index contains two abutting polygons
+	// A and B. If the target consists of two points "a" contained by A and
+	// "b" contained by B, then both A and B are returned. But if the target
+	// consists of the edge "ab", then any subset of {A, B} could be returned
+	// (because both polygons intersect the target but neither one contains
+	// the edge "ab").
+	//
+	// If the visit function returns false, this method terminates early and
+	// returns false as well. Otherwise returns true.
+	//
+	// NOTE(roberts): This method exists only for the purpose of implementing
+	// edgeQuery IncludeInteriors efficiently.
+	visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool
+}
+
+// shapePointVisitorFunc defines a type of function the visitContainingShapes can call.
+type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool

+ 30 - 4
vendor/github.com/golang/geo/s2/edge_distances.go

@@ -241,10 +241,33 @@ func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.C
 	// interior case then both of these angles must be acute.
 	// interior case then both of these angles must be acute.
 	//
 	//
 	// We check this by computing the squared edge lengths of the planar
 	// We check this by computing the squared edge lengths of the planar
-	// triangle ABX, and testing acuteness using the law of cosines:
+	// triangle ABX, and testing whether angles XAB and XBA are both acute using
+	// the law of cosines:
 	//
 	//
-	//   max(XA^2, XB^2) < min(XA^2, XB^2) + AB^2
-	if math.Max(xa2, xb2) >= math.Min(xa2, xb2)+(a.Sub(b.Vector)).Norm2() {
+	//            | XA^2 - XB^2 | < AB^2      (*)
+	//
+	// This test must be done conservatively (taking numerical errors into
+	// account) since otherwise we might miss a situation where the true minimum
+	// distance is achieved by a point on the edge interior.
+	//
+	// There are two sources of error in the expression above (*).  The first is
+	// that points are not normalized exactly; they are only guaranteed to be
+	// within 2 * dblEpsilon of unit length.  Under the assumption that the two
+	// sides of (*) are nearly equal, the total error due to normalization errors
+	// can be shown to be at most
+	//
+	//        2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
+	//
+	// The other source of error is rounding of results in the calculation of (*).
+	// Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon,
+	// plus an additional relative error of 0.5 * dblEpsilon in the final
+	// subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 +
+	// AB^2) for convenience.  This yields a final error bound of
+	//
+	//        4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
+	ab2 := a.Sub(b.Vector).Norm2()
+	maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon)
+	if math.Abs(xa2-xb2) >= ab2+maxError {
 		return minDist, false
 		return minDist, false
 	}
 	}
 
 
@@ -274,8 +297,11 @@ func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.C
 	// Otherwise we do the exact, more expensive test for the interior case.
 	// Otherwise we do the exact, more expensive test for the interior case.
 	// This test is very likely to succeed because of the conservative planar
 	// This test is very likely to succeed because of the conservative planar
 	// test we did initially.
 	// test we did initially.
+	//
+	// TODO(roberts): Ensure that the errors in test are accurately reflected in the
+	// minUpdateInteriorDistanceMaxError.
 	cx := c.Cross(x.Vector)
 	cx := c.Cross(x.Vector)
-	if a.Dot(cx) >= 0 || b.Dot(cx) <= 0 {
+	if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 {
 		return minDist, false
 		return minDist, false
 	}
 	}
 
 

+ 512 - 0
vendor/github.com/golang/geo/s2/edge_query.go

@@ -0,0 +1,512 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"sort"
+
+	"github.com/golang/geo/s1"
+)
+
+// EdgeQueryOptions holds the options for controlling how EdgeQuery operates.
+//
+// Options can be chained together builder-style:
+//
+//	opts = NewClosestEdgeQueryOptions().
+//		MaxResults(1).
+//		DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)).
+//		MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree))
+//	query = NewClosestEdgeQuery(index, opts)
+//
+//  or set individually:
+//
+//	opts = NewClosestEdgeQueryOptions()
+//	opts.IncludeInteriors(true)
+//
+// or just inline:
+//
+//	query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3))
+//
+// If you pass a nil as the options you get the default values for the options.
+type EdgeQueryOptions struct {
+	common *queryOptions
+}
+
+// DistanceLimit specifies that only edges whose distance to the target is
+// within, this distance should be returned.  Edges whose distance is equal
+// are not returned. To include values that are equal, specify the limit with
+// the next largest representable distance. i.e. limit.Successor().
+func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions {
+	e.common = e.common.DistanceLimit(limit)
+	return e
+}
+
+// IncludeInteriors specifies whether polygon interiors should be
+// included when measuring distances.
+func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions {
+	e.common = e.common.IncludeInteriors(x)
+	return e
+}
+
+// UseBruteForce sets or disables the use of brute force in a query.
+func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions {
+	e.common = e.common.UseBruteForce(x)
+	return e
+}
+
+// MaxError specifies that edges up to dist away than the true
+// matching edges may be substituted in the result set, as long as such
+// edges satisfy all the remaining search criteria (such as DistanceLimit).
+// This option only has an effect if MaxResults is also specified;
+// otherwise all edges closer than MaxDistance will always be returned.
+func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions {
+	e.common = e.common.MaxError(dist)
+	return e
+}
+
+// MaxResults specifies that at most MaxResults edges should be returned.
+// This must be at least 1.
+func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions {
+	e.common = e.common.MaxResults(n)
+	return e
+}
+
+// NewClosestEdgeQueryOptions returns a set of edge query options suitable
+// for performing closest edge queries.
+func NewClosestEdgeQueryOptions() *EdgeQueryOptions {
+	return &EdgeQueryOptions{
+		common: newQueryOptions(minDistance(0)),
+	}
+}
+
+// NewFurthestEdgeQueryOptions returns a set of edge query options suitable
+// for performing furthest edge queries.
+func NewFurthestEdgeQueryOptions() *EdgeQueryOptions {
+	return &EdgeQueryOptions{
+		common: newQueryOptions(maxDistance(0)),
+	}
+}
+
+// EdgeQueryResult represents an edge that meets the target criteria for the
+// query. Note the following special cases:
+//
+//  - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape.
+//    Such results may be returned when the option IncludeInteriors is true.
+//
+//  - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge
+//    satisfies the requested query options.
+type EdgeQueryResult struct {
+	distance distance
+	shapeID  int32
+	edgeID   int32
+}
+
+// Distance reports the distance between the edge in this shape that satisfied
+// the query's parameters.
+func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() }
+
+// ShapeID reports the ID of the Shape this result is for.
+func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID }
+
+// EdgeID reports the ID of the edge in the results Shape.
+func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID }
+
+// newEdgeQueryResult returns a result instance with default values.
+func newEdgeQueryResult(target distanceTarget) EdgeQueryResult {
+	return EdgeQueryResult{
+		distance: target.distance().infinity(),
+		shapeID:  -1,
+		edgeID:   -1,
+	}
+}
+
+// IsInterior reports if this result represents the interior of a Shape.
+func (e EdgeQueryResult) IsInterior() bool {
+	return e.shapeID >= 0 && e.edgeID < 0
+}
+
+// IsEmpty reports if this has no edge that satisfies the given edge query options.
+// This result is only returned in one special case, namely when FindEdge() does
+// not find any suitable edges.
+func (e EdgeQueryResult) IsEmpty() bool {
+	return e.shapeID < 0
+}
+
+// Less reports if this results is less that the other first by distance,
+// then by (shapeID, edgeID). This is used for sorting.
+func (e EdgeQueryResult) Less(other EdgeQueryResult) bool {
+	if e.distance.less(other.distance) {
+		return true
+	}
+	if other.distance.less(e.distance) {
+		return false
+	}
+	if e.shapeID < other.shapeID {
+		return true
+	}
+	if other.shapeID < e.shapeID {
+		return false
+	}
+	return e.edgeID < other.edgeID
+}
+
+// EdgeQuery is used to find the edge(s) between two geometries that match a
+// given set of options. It is flexible enough so that it can be adapted to
+// compute maximum distances and even potentially Hausdorff distances.
+//
+// By using the appropriate options, this type can answer questions such as:
+//
+//  - Find the minimum distance between two geometries A and B.
+//  - Find all edges of geometry A that are within a distance D of geometry B.
+//  - Find the k edges of geometry A that are closest to a given point P.
+//
+// You can also specify whether polygons should include their interiors (i.e.,
+// if a point is contained by a polygon, should the distance be zero or should
+// it be measured to the polygon boundary?)
+//
+// The input geometries may consist of any number of points, polylines, and
+// polygons (collectively referred to as "shapes"). Shapes do not need to be
+// disjoint; they may overlap or intersect arbitrarily. The implementation is
+// designed to be fast for both simple and complex geometries.
+type EdgeQuery struct {
+	index  *ShapeIndex
+	opts   *queryOptions
+	target distanceTarget
+
+	// True if opts.maxError must be subtracted from ShapeIndex cell distances
+	// in order to ensure that such distances are measured conservatively. This
+	// is true only if the target takes advantage of maxError in order to
+	// return faster results, and 0 < maxError < distanceLimit.
+	useConservativeCellDistance bool
+
+	// The decision about whether to use the brute force algorithm is based on
+	// counting the total number of edges in the index. However if the index
+	// contains a large number of shapes, this in itself might take too long.
+	// So instead we only count edges up to (maxBruteForceIndexSize() + 1)
+	// for the current target type (stored as indexNumEdgesLimit).
+	indexNumEdges      int
+	indexNumEdgesLimit int
+
+	// The distance beyond which we can safely ignore further candidate edges.
+	// (Candidates that are exactly at the limit are ignored; this is more
+	// efficient for UpdateMinDistance and should not affect clients since
+	// distance measurements have a small amount of error anyway.)
+	//
+	// Initially this is the same as the maximum distance specified by the user,
+	// but it can also be updated by the algorithm (see maybeAddResult).
+	distanceLimit distance
+
+	// The current set of results of the query.
+	results []EdgeQueryResult
+
+	// This field is true when duplicates must be avoided explicitly. This
+	// is achieved by maintaining a separate set keyed by (shapeID, edgeID)
+	// only, and checking whether each edge is in that set before computing the
+	// distance to it.
+	avoidDuplicates bool
+
+	// testedEdges tracks the set of shape and edges that have already been tested.
+	testedEdges map[ShapeEdgeID]uint32
+}
+
+// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the
+// closest edge(s) to a given Point, Edge, Cell, or geometry collection.
+//
+// You can find either the k closest edges, or all edges within a given
+// radius, or both (i.e., the k closest edges up to a given maximum radius).
+// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in
+// the options.
+//
+// By default *all* edges are returned, so you should always specify either
+// MaxResults or DistanceLimit options or both.
+//
+// Note that by default, distances are measured to the boundary and interior
+// of polygons. For example, if a point is inside a polygon then its distance
+// is zero. To change this behavior, set the IncludeInteriors option to false.
+//
+// If you only need to test whether the distance is above or below a given
+// threshold (e.g., 10 km), you can use the IsDistanceLess() method.  This is
+// much faster than actually calculating the distance with FindEdge,
+// since the implementation can stop as soon as it can prove that the minimum
+// distance is either above or below the threshold.
+func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
+	if opts == nil {
+		opts = NewClosestEdgeQueryOptions()
+	}
+	return &EdgeQuery{
+		testedEdges: make(map[ShapeEdgeID]uint32),
+		index:       index,
+		opts:        opts.common,
+	}
+}
+
+// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the
+// furthest edge(s) to a given Point, Edge, Cell, or geometry collection.
+//
+// The furthest edge is defined as the one which maximizes the
+// distance from any point on that edge to any point on the target geometry.
+//
+// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges
+// from a given Point:
+func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
+	if opts == nil {
+		opts = NewFurthestEdgeQueryOptions()
+	}
+	return &EdgeQuery{
+		testedEdges: make(map[ShapeEdgeID]uint32),
+		index:       index,
+		opts:        opts.common,
+	}
+}
+
+// FindEdges returns the edges for the given target that satisfy the current options.
+//
+// Note that if opts.IncludeInteriors is true, the results may include some
+// entries with edge_id == -1. This indicates that the target intersects
+// the indexed polygon with the given ShapeID.
+func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult {
+	return e.findEdges(target, e.opts)
+}
+
+// Distance reports the distance to the target. If the index or target is empty,
+// returns the EdgeQuery's maximal sentinel.
+//
+// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the
+// distance against a threshold value, since it is often much faster.
+func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle {
+	return e.findEdge(target, e.opts).Distance()
+}
+
+// IsDistanceLess reports if the distance to target is less than the given limit.
+//
+// This method is usually much faster than Distance(), since it is much
+// less work to determine whether the minimum distance is above or below a
+// threshold than it is to calculate the actual minimum distance.
+//
+// If you wish to check if the distance is less than or equal to the limit, use:
+//
+//	query.IsDistanceLess(target, limit.Successor())
+//
+func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool {
+	opts := e.opts
+	opts = opts.MaxResults(1).
+		DistanceLimit(limit).
+		MaxError(s1.StraightChordAngle)
+	return !e.findEdge(target, opts).IsEmpty()
+}
+
+// IsDistanceGreater reports if the distance to target is greater than limit.
+//
+// This method is usually much faster than Distance, since it is much
+// less work to determine whether the maximum distance is above or below a
+// threshold than it is to calculate the actual maximum distance.
+// If you wish to check if the distance is less than or equal to the limit, use:
+//
+//	query.IsDistanceGreater(target, limit.Predecessor())
+//
+func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool {
+	return e.IsDistanceLess(target, limit)
+}
+
+// IsConservativeDistanceLessOrEqual reports if the distance to target is less
+// or equal to the limit, where the limit has been expanded by the maximum error
+// for the distance calculation.
+//
+// For example, suppose that we want to test whether two geometries might
+// intersect each other after they are snapped together using Builder
+// (using the IdentitySnapFunction with a given "snap radius").  Since
+// Builder uses exact distance predicates (s2predicates), we need to
+// measure the distance between the two geometries conservatively.  If the
+// distance is definitely greater than "snap radius", then the geometries
+// are guaranteed to not intersect after snapping.
+func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
+	return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit)))
+}
+
+// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater
+// than or equal to the given limit with some small tolerance.
+func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
+	return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit)))
+}
+
+// findEdges returns the closest edges to the given target that satisfy the given options.
+//
+// Note that if opts.includeInteriors is true, the results may include some
+// entries with edgeID == -1. This indicates that the target intersects the
+// indexed polygon with the given shapeID.
+func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult {
+	e.findEdgesInternal(target, opts)
+	// TODO(roberts): Revisit this if there is a heap or other sorted and
+	// uniquing datastructure we can use instead of just a slice.
+	e.results = sortAndUniqueResults(e.results)
+	if len(e.results) > e.opts.maxResults {
+		e.results = e.results[:e.opts.maxResults]
+	}
+	return e.results
+}
+
+func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult {
+	if len(results) <= 1 {
+		return results
+	}
+	sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) })
+	j := 0
+	for i := 1; i < len(results); i++ {
+		if results[j] == results[i] {
+			continue
+		}
+		j++
+		results[j] = results[i]
+	}
+	return results[:j+1]
+}
+
+// findEdge is a convenience method that returns exactly one edge, and if no
+// edges satisfy the given search criteria, then a default Result is returned.
+//
+// This is primarily to ease the usage of a number of the methods in the DistanceTargets
+// and in EdgeQuery.
+func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult {
+	opts.MaxResults(1)
+	e.findEdges(target, opts)
+	if len(e.results) > 0 {
+		return e.results[0]
+	}
+
+	return newEdgeQueryResult(target)
+}
+
+// findEdgesInternal does the actual work for find edges that match the given options.
+func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) {
+	e.target = target
+	e.opts = opts
+
+	e.testedEdges = make(map[ShapeEdgeID]uint32)
+	e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit)
+	e.results = make([]EdgeQueryResult, 0)
+
+	if e.distanceLimit == target.distance().zero() {
+		return
+	}
+
+	if opts.includeInteriors {
+		shapeIDs := map[int32]struct{}{}
+		e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool {
+			shapeIDs[e.index.idForShape(containingShape)] = struct{}{}
+			return len(shapeIDs) < opts.maxResults
+		})
+		for shapeID := range shapeIDs {
+			e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1})
+		}
+
+		if e.distanceLimit == target.distance().zero() {
+			return
+		}
+	}
+
+	// If maxError > 0 and the target takes advantage of this, then we may
+	// need to adjust the distance estimates to ShapeIndex cells to ensure
+	// that they are always a lower bound on the true distance. For example,
+	// suppose max_distance == 100, maxError == 30, and we compute the distance
+	// to the target from some cell C0 as d(C0) == 80. Then because the target
+	// takes advantage of maxError, the true distance could be as low as 50.
+	// In order not to miss edges contained by such cells, we need to subtract
+	// maxError from the distance estimates. This behavior is controlled by
+	// the useConservativeCellDistance flag.
+	//
+	// However there is one important case where this adjustment is not
+	// necessary, namely when distanceLimit < maxError, This is because
+	// maxError only affects the algorithm once at least maxEdges edges
+	// have been found that satisfy the given distance limit. At that point,
+	// maxError is subtracted from distanceLimit in order to ensure that
+	// any further matches are closer by at least that amount. But when
+	// distanceLimit < maxError, this reduces the distance limit to 0,
+	// i.e. all remaining candidate cells and edges can safely be discarded.
+	// (This is how IsDistanceLess() and friends are implemented.)
+	targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() &&
+		e.target.setMaxError(opts.maxError)
+
+	// Note that we can't compare maxError and distanceLimit directly
+	// because one is a Delta and one is a Distance. Instead we subtract them.
+	e.useConservativeCellDistance = targetUsesMaxError &&
+		(e.distanceLimit == target.distance().infinity() ||
+			target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError))))
+
+	// Use the brute force algorithm if the index is small enough. To avoid
+	// spending too much time counting edges when there are many shapes, we stop
+	// counting once there are too many edges. We may need to recount the edges
+	// if we later see a target with a larger brute force edge threshold.
+	minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1
+	if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit {
+		e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges)
+		e.indexNumEdgesLimit = minOptimizedEdges
+	}
+
+	if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges {
+		// The brute force algorithm already considers each edge exactly once.
+		e.avoidDuplicates = false
+		e.findEdgesBruteForce()
+	} else {
+		// If the target takes advantage of maxError then we need to avoid
+		// duplicate edges explicitly. (Otherwise it happens automatically.)
+		e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1
+
+		// TODO(roberts): Uncomment when optimized is completed.
+		e.findEdgesBruteForce()
+		//e.findEdgesOptimized()
+	}
+}
+
+func (e *EdgeQuery) addResult(r EdgeQueryResult) {
+	e.results = append(e.results, r)
+	if e.opts.maxResults == 1 {
+		// Optimization for the common case where only the closest edge is wanted.
+		e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError))
+	}
+	// TODO(roberts): Add the other if/else cases when a different data structure
+	// is used for the results.
+}
+
+func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) {
+	if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok {
+		return
+	}
+	edge := shape.Edge(int(edgeID))
+	dist := e.distanceLimit
+
+	if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok {
+		e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID})
+	}
+}
+
+func (e *EdgeQuery) findEdgesBruteForce() {
+	// Range over all shapes in the index. Does order matter here? if so
+	// switch to for i = 0 .. n?
+	for _, shape := range e.index.shapes {
+		// TODO(roberts): can this happen if we are only ranging over current entries?
+		if shape == nil {
+			continue
+		}
+		for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ {
+			e.maybeAddResult(shape, edgeID)
+		}
+	}
+}
+
+// TODO(roberts): Remaining pieces
+// Add clear/reset/re-init method to empty out the state of the query.
+// findEdgesOptimized and related methods.
+// GetEdge
+// Project

+ 4 - 4
vendor/github.com/golang/geo/s2/edge_tessellator.go

@@ -36,10 +36,10 @@ const (
 // of edges in a given 2D projection such that the maximum distance between the
 // of edges in a given 2D projection such that the maximum distance between the
 // geodesic edge and the chain of projected edges is at most the requested tolerance.
 // geodesic edge and the chain of projected edges is at most the requested tolerance.
 //
 //
-// Method      | Input                  | Output
-// ------------|------------------------|-----------------------
-// Projected   | S2 geodesics           | Planar projected edges
-// Unprojected | Planar projected edges | S2 geodesics
+//   Method      | Input                  | Output
+//   ------------|------------------------|-----------------------
+//   Projected   | S2 geodesics           | Planar projected edges
+//   Unprojected | Planar projected edges | S2 geodesics
 type EdgeTessellator struct {
 type EdgeTessellator struct {
 	projection   Projection
 	projection   Projection
 	tolerance    s1.ChordAngle
 	tolerance    s1.ChordAngle

+ 4 - 2
vendor/github.com/golang/geo/s2/loop.go

@@ -506,6 +506,8 @@ func (l *Loop) ChainPosition(edgeID int) ChainPosition {
 // Dimension returns the dimension of the geometry represented by this Loop.
 // Dimension returns the dimension of the geometry represented by this Loop.
 func (l *Loop) Dimension() int { return 2 }
 func (l *Loop) Dimension() int { return 2 }
 
 
+func (l *Loop) typeTag() typeTag { return typeTagNone }
+
 func (l *Loop) privateInterface() {}
 func (l *Loop) privateInterface() {}
 
 
 // IsEmpty reports true if this is the special empty loop that contains no points.
 // IsEmpty reports true if this is the special empty loop that contains no points.
@@ -876,7 +878,7 @@ func (l *Loop) Invert() {
 	}
 	}
 
 
 	// originInside must be set correctly before building the ShapeIndex.
 	// originInside must be set correctly before building the ShapeIndex.
-	l.originInside = l.originInside != true
+	l.originInside = !l.originInside
 	if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 {
 	if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 {
 		// The complement of this loop contains both poles.
 		// The complement of this loop contains both poles.
 		l.bound = FullRect()
 		l.bound = FullRect()
@@ -1092,7 +1094,7 @@ func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point {
 // the loop. The return value is between 0 and 4*pi. (Note that the return
 // the loop. The return value is between 0 and 4*pi. (Note that the return
 // value is not affected by whether this loop is a "hole" or a "shell".)
 // value is not affected by whether this loop is a "hole" or a "shell".)
 func (l *Loop) Area() float64 {
 func (l *Loop) Area() float64 {
-	// It is suprisingly difficult to compute the area of a loop robustly. The
+	// It is surprisingly difficult to compute the area of a loop robustly. The
 	// main issues are (1) whether degenerate loops are considered to be CCW or
 	// main issues are (1) whether degenerate loops are considered to be CCW or
 	// not (i.e., whether their area is close to 0 or 4*pi), and (2) computing
 	// not (i.e., whether their area is close to 0 or 4*pi), and (2) computing
 	// the areas of small loops with good relative accuracy.
 	// the areas of small loops with good relative accuracy.

+ 304 - 0
vendor/github.com/golang/geo/s2/max_distance_targets.go

@@ -0,0 +1,304 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+
+	"github.com/golang/geo/s1"
+)
+
+// maxDistance implements distance as the supplementary distance (Pi - x) to find
+// results that are the furthest using the distance related algorithms.
+type maxDistance s1.ChordAngle
+
+func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
+func (m maxDistance) zero() distance            { return maxDistance(s1.StraightChordAngle) }
+func (m maxDistance) negative() distance        { return maxDistance(s1.InfChordAngle()) }
+func (m maxDistance) infinity() distance        { return maxDistance(s1.NegativeChordAngle) }
+func (m maxDistance) less(other distance) bool  { return m.chordAngle() > other.chordAngle() }
+func (m maxDistance) sub(other distance) distance {
+	return maxDistance(m.chordAngle() + other.chordAngle())
+}
+func (m maxDistance) chordAngleBound() s1.ChordAngle {
+	return s1.StraightChordAngle - m.chordAngle()
+}
+func (m maxDistance) updateDistance(dist distance) (distance, bool) {
+	if dist.less(m) {
+		m = maxDistance(dist.chordAngle())
+		return m, true
+	}
+	return m, false
+}
+
+func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance {
+	return maxDistance(o)
+}
+
+// MaxDistanceToPointTarget is used for computing the maximum distance to a Point.
+type MaxDistanceToPointTarget struct {
+	point Point
+	dist  distance
+}
+
+// NewMaxDistanceToPointTarget returns a new target for the given Point.
+func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget {
+	m := maxDistance(0)
+	return &MaxDistanceToPointTarget{point: point, dist: &m}
+}
+
+func (m *MaxDistanceToPointTarget) capBound() Cap {
+	return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0)))
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point)))
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(maxDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(cell.MaxDistance(m.point)))
+}
+
+func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// For furthest points, we visit the polygons whose interior contains
+	// the antipode of the target point. These are the polygons whose
+	// distance to the target is maxDistance.zero()
+	q := NewContainsPointQuery(index, VertexModelSemiOpen)
+	return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool {
+		return v(shape, m.point)
+	})
+}
+
+func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int           { return 300 }
+func (m *MaxDistanceToPointTarget) distance() distance                    { return m.dist }
+
+// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge.
+type MaxDistanceToEdgeTarget struct {
+	e    Edge
+	dist distance
+}
+
+// NewMaxDistanceToEdgeTarget returns a new target for the given Edge.
+func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget {
+	m := maxDistance(0)
+	return &MaxDistanceToEdgeTarget{e: e, dist: m}
+}
+
+// capBound returns a Cap that bounds the antipode of the target. (This
+// is the set of points whose maxDistance to the target is maxDistance.zero)
+func (m *MaxDistanceToEdgeTarget) capBound() Cap {
+	// The following computes a radius equal to half the edge length in an
+	// efficient and numerically stable way.
+	d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
+	r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
+	return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(maxDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(maxDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1)))
+}
+
+func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// We only need to test one edge point. That is because the method *must*
+	// visit a polygon if it fully contains the target, and *is allowed* to
+	// visit a polygon if it intersects the target. If the tested vertex is not
+	// contained, we know the full edge is not contained; if the tested vertex is
+	// contained, then the edge either is fully contained (must be visited) or it
+	// intersects (is allowed to be visited). We visit the center of the edge so
+	// that edge AB gives identical results to BA.
+	target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+	return target.visitContainingShapes(index, v)
+}
+
+func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int           { return 110 }
+func (m *MaxDistanceToEdgeTarget) distance() distance                    { return m.dist }
+
+// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell.
+type MaxDistanceToCellTarget struct {
+	cell Cell
+	dist distance
+}
+
+// NewMaxDistanceToCellTarget returns a new target for the given Cell.
+func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget {
+	m := maxDistance(0)
+	return &MaxDistanceToCellTarget{cell: cell, dist: m}
+}
+
+func (m *MaxDistanceToCellTarget) capBound() Cap {
+	c := m.cell.CapBound()
+	return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius())
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(m.cell.MaxDistance(p)))
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1)))
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell)))
+}
+
+func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// We only need to check one point here - cell center is simplest.
+	// See comment at MaxDistanceToEdgeTarget's visitContainingShapes.
+	target := NewMaxDistanceToPointTarget(m.cell.Center())
+	return target.visitContainingShapes(index, v)
+}
+
+func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int           { return 100 }
+func (m *MaxDistanceToCellTarget) distance() distance                    { return m.dist }
+
+// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex.
+type MaxDistanceToShapeIndexTarget struct {
+	index *ShapeIndex
+	query *EdgeQuery
+	dist  distance
+}
+
+// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
+func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget {
+	m := maxDistance(0)
+	return &MaxDistanceToShapeIndexTarget{
+		index: index,
+		dist:  m,
+		query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()),
+	}
+}
+
+// capBound returns a Cap that bounds the antipode of the target. This
+// is the set of points whose maxDistance to the target is maxDistance.zero()
+func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
+	// TODO(roberts): Depends on ShapeIndexRegion
+	// c := makeShapeIndexRegion(m.index).CapBound()
+	// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
+	panic("not implemented yet")
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMaxDistanceToPointTarget(p)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMaxDistanceToEdgeTarget(edge)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMaxDistanceToCellTarget(cell)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+// visitContainingShapes returns the polygons containing the antipodal
+// reflection of *any* connected component for target types consisting of
+// multiple connected components. It is sufficient to test containment of
+// one vertex per connected component, since this allows us to also return
+// any polygon whose boundary has distance.zero() to the target.
+func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// It is sufficient to find the set of chain starts in the target index
+	// (i.e., one vertex per connected component of edges) that are contained by
+	// the query index, except for one special case to handle full polygons.
+	//
+	// TODO(roberts): Do this by merge-joining the two ShapeIndexes and share
+	// the code with BooleanOperation.
+	for _, shape := range m.index.shapes {
+		numChains := shape.NumChains()
+		// Shapes that don't have any edges require a special case (below).
+		testedPoint := false
+		for c := 0; c < numChains; c++ {
+			chain := shape.Chain(c)
+			if chain.Length == 0 {
+				continue
+			}
+			testedPoint = true
+			target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
+			if !target.visitContainingShapes(index, v) {
+				return false
+			}
+		}
+		if !testedPoint {
+			// Special case to handle full polygons.
+			ref := shape.ReferencePoint()
+			if !ref.Contained {
+				continue
+			}
+			target := NewMaxDistanceToPointTarget(ref.Point)
+			if !target.visitContainingShapes(index, v) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
+	m.query.opts.maxError = maxErr
+	return true
+}
+func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 70 }
+func (m *MaxDistanceToShapeIndexTarget) distance() distance          { return m.dist }
+func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool)  { m.query.opts.includeInteriors = b }
+func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool)     { m.query.opts.useBruteForce = b }
+
+// TODO(roberts): Remaining methods
+//
+// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
+// CellUnionTarget

+ 360 - 0
vendor/github.com/golang/geo/s2/min_distance_targets.go

@@ -0,0 +1,360 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+
+	"github.com/golang/geo/s1"
+)
+
+// minDistance implements distance interface to find closest distance types.
+type minDistance s1.ChordAngle
+
+func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
+func (m minDistance) zero() distance            { return minDistance(0) }
+func (m minDistance) negative() distance        { return minDistance(s1.NegativeChordAngle) }
+func (m minDistance) infinity() distance        { return minDistance(s1.InfChordAngle()) }
+func (m minDistance) less(other distance) bool  { return m.chordAngle() < other.chordAngle() }
+func (m minDistance) sub(other distance) distance {
+	return minDistance(m.chordAngle() - other.chordAngle())
+}
+func (m minDistance) chordAngleBound() s1.ChordAngle {
+	return m.chordAngle().Expanded(m.chordAngle().MaxAngleError())
+}
+
+// updateDistance updates its own value if the other value is less() than it is,
+// and reports if it updated.
+func (m minDistance) updateDistance(dist distance) (distance, bool) {
+	if dist.less(m) {
+		m = minDistance(dist.chordAngle())
+		return m, true
+	}
+	return m, false
+}
+
+func (m minDistance) fromChordAngle(o s1.ChordAngle) distance {
+	return minDistance(o)
+}
+
+// MinDistanceToPointTarget is a type for computing the minimum distance to a Point.
+type MinDistanceToPointTarget struct {
+	point Point
+	dist  distance
+}
+
+// NewMinDistanceToPointTarget returns a new target for the given Point.
+func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget {
+	m := minDistance(0)
+	return &MinDistanceToPointTarget{point: point, dist: &m}
+}
+
+func (m *MinDistanceToPointTarget) capBound() Cap {
+	return CapFromCenterChordAngle(m.point, s1.ChordAngle(0))
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	var ok bool
+	dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point)))
+	return dist, ok
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(minDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	var ok bool
+	dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point)))
+	return dist, ok
+}
+
+func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// For furthest points, we visit the polygons whose interior contains
+	// the antipode of the target point. These are the polygons whose
+	// distance to the target is maxDistance.zero()
+	q := NewContainsPointQuery(index, VertexModelSemiOpen)
+	return q.visitContainingShapes(m.point, func(shape Shape) bool {
+		return v(shape, m.point)
+	})
+}
+
+func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int           { return 120 }
+func (m *MinDistanceToPointTarget) distance() distance                    { return m.dist }
+
+// ----------------------------------------------------------
+
+// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge.
+type MinDistanceToEdgeTarget struct {
+	e    Edge
+	dist distance
+}
+
+// NewMinDistanceToEdgeTarget returns a new target for the given Edge.
+func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget {
+	m := minDistance(0)
+	return &MinDistanceToEdgeTarget{e: e, dist: m}
+}
+
+// capBound returns a Cap that bounds the antipode of the target. (This
+// is the set of points whose maxDistance to the target is maxDistance.zero)
+func (m *MinDistanceToEdgeTarget) capBound() Cap {
+	// The following computes a radius equal to half the edge length in an
+	// efficient and numerically stable way.
+	d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
+	r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
+	return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(minDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
+		dist, _ = dist.updateDistance(minDistance(d))
+		return dist, true
+	}
+	return dist, false
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1)))
+}
+
+func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// We test the center of the edge in order to ensure that edge targets AB
+	// and BA yield identical results (which is not guaranteed by the API but
+	// users might expect).  Other options would be to test both endpoints, or
+	// return different results for AB and BA in some cases.
+	target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+	return target.visitContainingShapes(index, v)
+}
+
+func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int           { return 60 }
+func (m *MinDistanceToEdgeTarget) distance() distance                    { return m.dist }
+
+// ----------------------------------------------------------
+
+// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell.
+type MinDistanceToCellTarget struct {
+	cell Cell
+	dist distance
+}
+
+// NewMinDistanceToCellTarget returns a new target for the given Cell.
+func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget {
+	m := minDistance(0)
+	return &MinDistanceToCellTarget{cell: cell, dist: m}
+}
+
+func (m *MinDistanceToCellTarget) capBound() Cap {
+	return m.cell.CapBound()
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	return dist.updateDistance(minDistance(m.cell.Distance(p)))
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1)))
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell)))
+}
+
+func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// The simplest approach is simply to return the polygons that contain the
+	// cell center.  Alternatively, if the index cell is smaller than the target
+	// cell then we could return all polygons that are present in the
+	// shapeIndexCell, but since the index is built conservatively this may
+	// include some polygons that don't quite intersect the cell.  So we would
+	// either need to recheck for intersection more accurately, or weaken the
+	// VisitContainingShapes contract so that it only guarantees approximate
+	// intersection, neither of which seems like a good tradeoff.
+	target := NewMinDistanceToPointTarget(m.cell.Center())
+	return target.visitContainingShapes(index, v)
+}
+func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int           { return 30 }
+func (m *MinDistanceToCellTarget) distance() distance                    { return m.dist }
+
+// ----------------------------------------------------------
+
+/*
+// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion.
+type MinDistanceToCellUnionTarget struct {
+	cu    CellUnion
+	query *ClosestCellQuery
+	dist  distance
+}
+
+// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion.
+func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget {
+	m := minDistance(0)
+	return &MinDistanceToCellUnionTarget{cu: cu, dist: m}
+}
+
+func (m *MinDistanceToCellUnionTarget) capBound() Cap {
+	return m.cu.CapBound()
+}
+
+func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	m.query.opts.DistanceLimit = dist.chordAngle()
+	target := NewMinDistanceToPointTarget(p)
+	r := m.query.findEdge(target)
+	if r.ShapeID < 0 {
+		return dist, false
+	}
+	return minDistance(r.Distance), true
+}
+
+func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// We test the center of the edge in order to ensure that edge targets AB
+	// and BA yield identical results (which is not guaranteed by the API but
+	// users might expect).  Other options would be to test both endpoints, or
+	// return different results for AB and BA in some cases.
+	target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+	return target.visitContainingShapes(index, v)
+}
+func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool {
+	m.query.opts.MaxError = maxErr
+	return true
+}
+func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int           { return 30 }
+func (m *MinDistanceToCellUnionTarget) distance() distance                    { return m.dist }
+*/
+
+// ----------------------------------------------------------
+
+// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex.
+type MinDistanceToShapeIndexTarget struct {
+	index *ShapeIndex
+	query *EdgeQuery
+	dist  distance
+}
+
+// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
+func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget {
+	m := minDistance(0)
+	return &MinDistanceToShapeIndexTarget{
+		index: index,
+		dist:  m,
+		query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()),
+	}
+}
+
+func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
+	// TODO(roberts): Depends on ShapeIndexRegion existing.
+	// c := makeS2ShapeIndexRegion(m.index).CapBound()
+	// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
+	panic("not implemented yet")
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMinDistanceToPointTarget(p)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMinDistanceToEdgeTarget(edge)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+	m.query.opts.distanceLimit = dist.chordAngle()
+	target := NewMinDistanceToCellTarget(cell)
+	r := m.query.findEdge(target, m.query.opts)
+	if r.shapeID < 0 {
+		return dist, false
+	}
+	return r.distance, true
+}
+
+// For target types consisting of multiple connected components (such as this one),
+// this method should return the polygons containing the antipodal reflection of
+// *any* connected component. (It is sufficient to test containment of one vertex per
+// connected component, since this allows us to also return any polygon whose
+// boundary has distance.zero() to the target.)
+func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+	// It is sufficient to find the set of chain starts in the target index
+	// (i.e., one vertex per connected component of edges) that are contained by
+	// the query index, except for one special case to handle full polygons.
+	//
+	// TODO(roberts): Do this by merge-joining the two ShapeIndexes.
+	for _, shape := range m.index.shapes {
+		numChains := shape.NumChains()
+		// Shapes that don't have any edges require a special case (below).
+		testedPoint := false
+		for c := 0; c < numChains; c++ {
+			chain := shape.Chain(c)
+			if chain.Length == 0 {
+				continue
+			}
+			testedPoint = true
+			target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
+			if !target.visitContainingShapes(index, v) {
+				return false
+			}
+		}
+		if !testedPoint {
+			// Special case to handle full polygons.
+			ref := shape.ReferencePoint()
+			if !ref.Contained {
+				continue
+			}
+			target := NewMinDistanceToPointTarget(ref.Point)
+			if !target.visitContainingShapes(index, v) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
+	m.query.opts.maxError = maxErr
+	return true
+}
+func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 }
+func (m *MinDistanceToShapeIndexTarget) distance() distance          { return m.dist }
+func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool)  { m.query.opts.includeInteriors = b }
+func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool)     { m.query.opts.useBruteForce = b }
+
+// TODO(roberts): Remaining methods
+//
+// func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
+// CellUnionTarget

+ 1 - 1
vendor/github.com/golang/geo/s2/paddedcell.go

@@ -243,7 +243,7 @@ func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
 	// if both pairs of endpoints are equal we choose maxLevel; if they differ
 	// if both pairs of endpoints are equal we choose maxLevel; if they differ
 	// only at bit 0, we choose (maxLevel - 1), and so on.
 	// only at bit 0, we choose (maxLevel - 1), and so on.
 	levelMSB := uint64(((iXor | jXor) << 1) + 1)
 	levelMSB := uint64(((iXor | jXor) << 1) + 1)
-	level := maxLevel - int(findMSBSetNonZero64(levelMSB))
+	level := maxLevel - findMSBSetNonZero64(levelMSB)
 	if level <= p.level {
 	if level <= p.level {
 		return p.id
 		return p.id
 	}
 	}

+ 6 - 1
vendor/github.com/golang/geo/s2/point.go

@@ -128,7 +128,12 @@ func (p Point) Distance(b Point) s1.Angle {
 
 
 // ApproxEqual reports whether the two points are similar enough to be equal.
 // ApproxEqual reports whether the two points are similar enough to be equal.
 func (p Point) ApproxEqual(other Point) bool {
 func (p Point) ApproxEqual(other Point) bool {
-	return p.Vector.Angle(other.Vector) <= s1.Angle(epsilon)
+	return p.approxEqual(other, s1.Angle(epsilon))
+}
+
+// approxEqual reports whether the two points are within the given epsilon.
+func (p Point) approxEqual(other Point, eps s1.Angle) bool {
+	return p.Vector.Angle(other.Vector) <= eps
 }
 }
 
 
 // ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance
 // ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance

+ 2 - 1
vendor/github.com/golang/geo/s2/point_vector.go

@@ -20,7 +20,7 @@ var (
 )
 )
 
 
 // PointVector is a Shape representing a set of Points. Each point
 // PointVector is a Shape representing a set of Points. Each point
-// is represented as a degenerate point with the same starting and ending
+// is represented as a degenerate edge with the same starting and ending
 // vertices.
 // vertices.
 //
 //
 // This type is useful for adding a collection of points to an ShapeIndex.
 // This type is useful for adding a collection of points to an ShapeIndex.
@@ -38,4 +38,5 @@ func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{
 func (p *PointVector) Dimension() int                    { return 0 }
 func (p *PointVector) Dimension() int                    { return 0 }
 func (p *PointVector) IsEmpty() bool                     { return defaultShapeIsEmpty(p) }
 func (p *PointVector) IsEmpty() bool                     { return defaultShapeIsEmpty(p) }
 func (p *PointVector) IsFull() bool                      { return defaultShapeIsFull(p) }
 func (p *PointVector) IsFull() bool                      { return defaultShapeIsFull(p) }
+func (p *PointVector) typeTag() typeTag                  { return typeTagPointVector }
 func (p *PointVector) privateInterface()                 {}
 func (p *PointVector) privateInterface()                 {}

+ 4 - 12
vendor/github.com/golang/geo/s2/polygon.go

@@ -384,7 +384,7 @@ func (p *Polygon) initOneLoop() {
 // initLoopProperties sets the properties for polygons with multiple loops.
 // initLoopProperties sets the properties for polygons with multiple loops.
 func (p *Polygon) initLoopProperties() {
 func (p *Polygon) initLoopProperties() {
 	// the loops depths are set by initNested/initOriented prior to this.
 	// the loops depths are set by initNested/initOriented prior to this.
-
+	p.bound = EmptyRect()
 	p.hasHoles = false
 	p.hasHoles = false
 	for _, l := range p.loops {
 	for _, l := range p.loops {
 		if l.IsHole() {
 		if l.IsHole() {
@@ -814,6 +814,8 @@ func (p *Polygon) ChainPosition(edgeID int) ChainPosition {
 // Dimension returns the dimension of the geometry represented by this Polygon.
 // Dimension returns the dimension of the geometry represented by this Polygon.
 func (p *Polygon) Dimension() int { return 2 }
 func (p *Polygon) Dimension() int { return 2 }
 
 
+func (p *Polygon) typeTag() typeTag { return typeTagPolygon }
+
 func (p *Polygon) privateInterface() {}
 func (p *Polygon) privateInterface() {}
 
 
 // Contains reports whether this polygon contains the other polygon.
 // Contains reports whether this polygon contains the other polygon.
@@ -1177,18 +1179,8 @@ func (p *Polygon) decodeCompressed(d *decoder) {
 	for i := range p.loops {
 	for i := range p.loops {
 		p.loops[i] = new(Loop)
 		p.loops[i] = new(Loop)
 		p.loops[i].decodeCompressed(d, snapLevel)
 		p.loops[i].decodeCompressed(d, snapLevel)
-		// TODO(roberts): Update this bound.Union call when initLoopProperties is implemented.
-		p.bound = p.bound.Union(p.loops[i].bound)
-		p.numVertices += len(p.loops[i].vertices)
-	}
-	if d.err != nil {
-		return
 	}
 	}
-	if p.numVertices == 0 {
-		p.bound = EmptyRect()
-	}
-	p.subregionBound = ExpandForSubregions(p.bound)
-	p.initEdgesAndIndex()
+	p.initLoopProperties()
 }
 }
 
 
 // TODO(roberts): Differences from C++
 // TODO(roberts): Differences from C++

+ 102 - 4
vendor/github.com/golang/geo/s2/polyline.go

@@ -87,6 +87,25 @@ func (p *Polyline) Equal(b *Polyline) bool {
 	return true
 	return true
 }
 }
 
 
+// ApproxEqual reports whether two polylines have the same number of vertices,
+// and corresponding vertex pairs are separated by no more the standard margin.
+func (p *Polyline) ApproxEqual(o *Polyline) bool {
+	return p.approxEqual(o, s1.Angle(epsilon))
+}
+
+// approxEqual reports whether two polylines are equal within the given margin.
+func (p *Polyline) approxEqual(o *Polyline, maxError s1.Angle) bool {
+	if len(*p) != len(*o) {
+		return false
+	}
+	for offset, val := range *p {
+		if !val.approxEqual((*o)[offset], maxError) {
+			return false
+		}
+	}
+	return true
+}
+
 // CapBound returns the bounding Cap for this Polyline.
 // CapBound returns the bounding Cap for this Polyline.
 func (p *Polyline) CapBound() Cap {
 func (p *Polyline) CapBound() Cap {
 	return p.RectBound().CapBound()
 	return p.RectBound().CapBound()
@@ -198,6 +217,8 @@ func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
 // IsFull reports whether this shape contains all points on the sphere.
 // IsFull reports whether this shape contains all points on the sphere.
 func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
 func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
 
 
+func (p *Polyline) typeTag() typeTag { return typeTagPolyline }
+
 func (p *Polyline) privateInterface() {}
 func (p *Polyline) privateInterface() {}
 
 
 // findEndVertex reports the maximal end index such that the line segment between
 // findEndVertex reports the maximal end index such that the line segment between
@@ -460,9 +481,86 @@ func (p *Polyline) Validate() error {
 	return nil
 	return nil
 }
 }
 
 
+// Intersects reports whether this polyline intersects the given polyline. If
+// the polylines share a vertex they are considered to be intersecting. When a
+// polyline endpoint is the only intersection with the other polyline, the
+// function may return true or false arbitrarily.
+//
+// The running time is quadratic in the number of vertices.
+func (p *Polyline) Intersects(o *Polyline) bool {
+	if len(*p) == 0 || len(*o) == 0 {
+		return false
+	}
+
+	if !p.RectBound().Intersects(o.RectBound()) {
+		return false
+	}
+
+	// TODO(roberts): Use ShapeIndex here.
+	for i := 1; i < len(*p); i++ {
+		crosser := NewChainEdgeCrosser((*p)[i-1], (*p)[i], (*o)[0])
+		for j := 1; j < len(*o); j++ {
+			if crosser.ChainCrossingSign((*o)[j]) != DoNotCross {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// Interpolate returns the point whose distance from vertex 0 along the polyline is
+// the given fraction of the polyline's total length, and the index of
+// the next vertex after the interpolated point P. Fractions less than zero
+// or greater than one are clamped. The return value is unit length. The cost of
+// this function is currently linear in the number of vertices.
+//
+// This method allows the caller to easily construct a given suffix of the
+// polyline by concatenating P with the polyline vertices starting at that next
+// vertex. Note that P is guaranteed to be different than the point at the next
+// vertex, so this will never result in a duplicate vertex.
+//
+// The polyline must not be empty. Note that if fraction >= 1.0, then the next
+// vertex will be set to len(p) (indicating that no vertices from the polyline
+// need to be appended). The value of the next vertex is always between 1 and
+// len(p).
+//
+// This method can also be used to construct a prefix of the polyline, by
+// taking the polyline vertices up to next vertex-1 and appending the
+// returned point P if it is different from the last vertex (since in this
+// case there is no guarantee of distinctness).
+func (p *Polyline) Interpolate(fraction float64) (Point, int) {
+	// We intentionally let the (fraction >= 1) case fall through, since
+	// we need to handle it in the loop below in any case because of
+	// possible roundoff errors.
+	if fraction <= 0 {
+		return (*p)[0], 1
+	}
+	target := s1.Angle(fraction) * p.Length()
+
+	for i := 1; i < len(*p); i++ {
+		length := (*p)[i-1].Distance((*p)[i])
+		if target < length {
+			// This interpolates with respect to arc length rather than
+			// straight-line distance, and produces a unit-length result.
+			result := InterpolateAtDistance(target, (*p)[i-1], (*p)[i])
+
+			// It is possible that (result == vertex(i)) due to rounding errors.
+			if result == (*p)[i] {
+				return result, i + 1
+			}
+			return result, i
+		}
+		target -= length
+	}
+
+	return (*p)[len(*p)-1], len(*p)
+}
+
 // TODO(roberts): Differences from C++.
 // TODO(roberts): Differences from C++.
-// Suffix
-// Interpolate/UnInterpolate
-// Intersects(Polyline)
-// ApproxEqual
+// UnInterpolate
 // NearlyCoversPolyline
 // NearlyCoversPolyline
+// InitToSnapped
+// InitToSimplified
+// IsValid
+// SnapLevel
+// encode/decode compressed

+ 5 - 5
vendor/github.com/golang/geo/s2/predicates.go

@@ -79,8 +79,8 @@ type Direction int
 // These are the three options for the direction of a set of points.
 // These are the three options for the direction of a set of points.
 const (
 const (
 	Clockwise        Direction = -1
 	Clockwise        Direction = -1
-	Indeterminate              = 0
-	CounterClockwise           = 1
+	Indeterminate    Direction = 0
+	CounterClockwise Direction = 1
 )
 )
 
 
 // newBigFloat constructs a new big.Float with maximum precision.
 // newBigFloat constructs a new big.Float with maximum precision.
@@ -228,7 +228,7 @@ func expensiveSign(a, b, c Point) Direction {
 	// the three points are truly collinear (e.g., three points on the equator).
 	// the three points are truly collinear (e.g., three points on the equator).
 	detSign := stableSign(a, b, c)
 	detSign := stableSign(a, b, c)
 	if detSign != Indeterminate {
 	if detSign != Indeterminate {
-		return Direction(detSign)
+		return detSign
 	}
 	}
 
 
 	// Otherwise fall back to exact arithmetic and symbolic permutations.
 	// Otherwise fall back to exact arithmetic and symbolic permutations.
@@ -240,7 +240,7 @@ func expensiveSign(a, b, c Point) Direction {
 func exactSign(a, b, c Point, perturb bool) Direction {
 func exactSign(a, b, c Point, perturb bool) Direction {
 	// Sort the three points in lexicographic order, keeping track of the sign
 	// Sort the three points in lexicographic order, keeping track of the sign
 	// of the permutation. (Each exchange inverts the sign of the determinant.)
 	// of the permutation. (Each exchange inverts the sign of the determinant.)
-	permSign := Direction(CounterClockwise)
+	permSign := CounterClockwise
 	pa := &a
 	pa := &a
 	pb := &b
 	pb := &b
 	pc := &c
 	pc := &c
@@ -275,7 +275,7 @@ func exactSign(a, b, c Point, perturb bool) Direction {
 		// sign of the determinant.
 		// sign of the determinant.
 		detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
 		detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
 	}
 	}
-	return permSign * Direction(detSign)
+	return permSign * detSign
 }
 }
 
 
 // symbolicallyPerturbedSign reports the sign of the determinant of three points
 // symbolicallyPerturbedSign reports the sign of the determinant of three points

+ 196 - 0
vendor/github.com/golang/geo/s2/query_options.go

@@ -0,0 +1,196 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+
+	"github.com/golang/geo/s1"
+)
+
+const maxQueryResults = math.MaxInt32
+
+// queryOptions represents the set of all configurable parameters used by all of
+// the Query types. Most of these fields have non-zero defaults, so initialization
+// is handled within each Query type. All of the exported methods accept user
+// supplied sets of options to set or adjust as necessary.
+//
+// Several of the defaults depend on the distance interface type being used
+// (e.g. minDistance, maxDistance, etc.)
+//
+// If a user sets an option value that a given query type doesn't use, it is ignored.
+type queryOptions struct {
+	// maxResults specifies that at most MaxResults edges should be returned.
+	// This must be at least 1.
+	//
+	// The default value is to return all results.
+	maxResults int
+
+	// distanceLimit specifies that only edges whose distance to the target is
+	// within this distance should be returned.
+	//
+	// Note that edges whose distance is exactly equal to this are
+	// not returned. In most cases this doesn't matter (since distances are
+	// not computed exactly in the first place), but if such edges are needed
+	// then you can retrieve them by specifying the distance as the next
+	// largest representable distance. i.e. distanceLimit.Successor().
+	//
+	// The default value is the infinity value, such that all results will be
+	// returned.
+	distanceLimit s1.ChordAngle
+
+	// maxError specifies that edges up to MaxError further away than the true
+	// closest edges may be substituted in the result set, as long as such
+	// edges satisfy all the remaining search criteria (such as DistanceLimit).
+	// This option only has an effect if MaxResults is also specified;
+	// otherwise all edges closer than MaxDistance will always be returned.
+	//
+	// Note that this does not affect how the distance between edges is
+	// computed; it simply gives the algorithm permission to stop the search
+	// early as soon as the best possible improvement drops below MaxError.
+	//
+	// This can be used to implement distance predicates efficiently. For
+	// example, to determine whether the minimum distance is less than D, set
+	// MaxResults == 1 and MaxDistance == MaxError == D. This causes
+	// the algorithm to terminate as soon as it finds any edge whose distance
+	// is less than D, rather than continuing to search for an edge that is
+	// even closer.
+	//
+	// The default value is zero.
+	maxError s1.ChordAngle
+
+	// includeInteriors specifies that polygon interiors should be included
+	// when measuring distances. In other words, polygons that contain the target
+	// should have a distance of zero. (For targets consisting of multiple connected
+	// components, the distance is zero if any component is contained.) This
+	// is indicated in the results by returning a (ShapeID, EdgeID) pair
+	// with EdgeID == -1, i.e. this value denotes the polygons's interior.
+	//
+	// Note that for efficiency, any polygon that intersects the target may or
+	// may not have an EdgeID == -1 result. Such results are optional
+	// because in that case the distance to the polygon is already zero.
+	//
+	// The default value is true.
+	includeInteriors bool
+
+	// specifies that distances should be computed by examining every edge
+	// rather than using the ShapeIndex.
+	//
+	// TODO(roberts): When optimized is implemented, update the default to false.
+	// The default value is true.
+	useBruteForce bool
+
+	// region specifies that results must intersect the given Region.
+	//
+	// Note that if you want to set the region to a disc around a target
+	// point, it is faster to use a PointTarget with distanceLimit set
+	// instead. You can also set a distance limit and also require that results
+	// lie within a given rectangle.
+	//
+	// The default is nil (no region limits).
+	region Region
+}
+
+// UseBruteForce sets or disables the use of brute force in a query.
+func (q *queryOptions) UseBruteForce(x bool) *queryOptions {
+	q.useBruteForce = x
+	return q
+}
+
+// IncludeInteriors specifies whether polygon interiors should be
+// included when measuring distances.
+func (q *queryOptions) IncludeInteriors(x bool) *queryOptions {
+	q.includeInteriors = x
+	return q
+}
+
+// MaxError specifies that edges up to dist away than the true
+// matching edges may be substituted in the result set, as long as such
+// edges satisfy all the remaining search criteria (such as DistanceLimit).
+// This option only has an effect if MaxResults is also specified;
+// otherwise all edges closer than MaxDistance will always be returned.
+func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions {
+	q.maxError = x
+	return q
+}
+
+// MaxResults specifies that at most MaxResults edges should be returned.
+// This must be at least 1.
+func (q *queryOptions) MaxResults(x int) *queryOptions {
+	// TODO(roberts): What should be done if the value is <= 0?
+	q.maxResults = int(x)
+	return q
+}
+
+// DistanceLimit specifies that only edges whose distance to the target is
+// within, this distance should be returned. Edges whose distance is equal
+// are not returned.
+//
+// To include values that are equal, specify the limit with the next largest
+// representable distance such as limit.Successor(), or set the option with
+// Furthest/ClosestInclusiveDistanceLimit.
+func (q *queryOptions) DistanceLimit(x s1.ChordAngle) *queryOptions {
+	q.distanceLimit = x
+	return q
+}
+
+// ClosestInclusiveDistanceLimit sets the distance limit such that results whose
+// distance is exactly equal to the limit are also returned.
+func (q *queryOptions) ClosestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
+	q.distanceLimit = limit.Successor()
+	return q
+}
+
+// FurthestInclusiveDistanceLimit sets the distance limit such that results whose
+// distance is exactly equal to the limit are also returned.
+func (q *queryOptions) FurthestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
+	q.distanceLimit = limit.Predecessor()
+	return q
+}
+
+// ClosestConservativeDistanceLimit sets the distance limit such that results
+// also incorporates the error in distance calculations. This ensures that all
+// edges whose true distance is less than or equal to limit will be returned
+// (along with some edges whose true distance is slightly greater).
+//
+// Algorithms that need to do exact distance comparisons can use this
+// option to find a set of candidate edges that can then be filtered
+// further (e.g., using CompareDistance).
+func (q *queryOptions) ClosestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
+	q.distanceLimit = limit.Expanded(minUpdateDistanceMaxError(limit))
+	return q
+}
+
+// FurthestConservativeDistanceLimit sets the distance limit such that results
+// also incorporates the error in distance calculations. This ensures that all
+// edges whose true distance is greater than or equal to limit will be returned
+// (along with some edges whose true distance is slightly less).
+func (q *queryOptions) FurthestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
+	q.distanceLimit = limit.Expanded(-minUpdateDistanceMaxError(limit))
+	return q
+}
+
+// newQueryOptions returns a set of options using the given distance type
+// with the proper default values.
+func newQueryOptions(d distance) *queryOptions {
+	return &queryOptions{
+		maxResults:       maxQueryResults,
+		distanceLimit:    d.infinity().chordAngle(),
+		maxError:         0,
+		includeInteriors: true,
+		useBruteForce:    false,
+		region:           nil,
+	}
+}

+ 171 - 1
vendor/github.com/golang/geo/s2/rect.go

@@ -220,7 +220,7 @@ func (r Rect) Intersects(other Rect) bool {
 	return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng)
 	return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng)
 }
 }
 
 
-// CapBound returns a cap that countains Rect.
+// CapBound returns a cap that contains Rect.
 func (r Rect) CapBound() Cap {
 func (r Rect) CapBound() Cap {
 	// We consider two possible bounding caps, one whose axis passes
 	// We consider two possible bounding caps, one whose axis passes
 	// through the center of the lat-long rectangle and one whose axis
 	// through the center of the lat-long rectangle and one whose axis
@@ -461,5 +461,175 @@ func (r *Rect) decode(d *decoder) {
 	return
 	return
 }
 }
 
 
+// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere)
+// from a given point to the rectangle (both its boundary and its interior).
+// If r is empty, the result is meaningless.
+// The latlng must be valid.
+func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle {
+	if r.Lng.Contains(float64(ll.Lng)) {
+		return maxAngle(0, ll.Lat-s1.Angle(r.Lat.Hi), s1.Angle(r.Lat.Lo)-ll.Lat)
+	}
+
+	i := s1.IntervalFromEndpoints(r.Lng.Hi, r.Lng.ComplementCenter())
+	rectLng := r.Lng.Lo
+	if i.Contains(float64(ll.Lng)) {
+		rectLng = r.Lng.Hi
+	}
+
+	lo := LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
+	hi := LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
+	return DistanceFromSegment(PointFromLatLng(ll), PointFromLatLng(lo), PointFromLatLng(hi))
+}
+
+// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the
+// surface of the sphere) to the given Rect. The directed Hausdorff
+// distance from rectangle A to rectangle B is given by
+//     h(A, B) = max_{p in A} min_{q in B} d(p, q).
+func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle {
+	if r.IsEmpty() {
+		return 0 * s1.Radian
+	}
+	if other.IsEmpty() {
+		return math.Pi * s1.Radian
+	}
+
+	lng := r.Lng.DirectedHausdorffDistance(other.Lng)
+	return directedHausdorffDistance(lng, r.Lat, other.Lat)
+}
+
+// HausdorffDistance returns the undirected Hausdorff distance (measured along the
+// surface of the sphere) to the given Rect.
+// The Hausdorff distance between rectangle A and rectangle B is given by
+//     H(A, B) = max{h(A, B), h(B, A)}.
+func (r Rect) HausdorffDistance(other Rect) s1.Angle {
+	return maxAngle(r.DirectedHausdorffDistance(other),
+		other.DirectedHausdorffDistance(r))
+}
+
+// directedHausdorffDistance returns the directed Hausdorff distance
+// from one longitudinal edge spanning latitude range 'a' to the other
+// longitudinal edge spanning latitude range 'b', with their longitudinal
+// difference given by 'lngDiff'.
+func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle {
+	// By symmetry, we can assume a's longitude is 0 and b's longitude is
+	// lngDiff. Call b's two endpoints bLo and bHi. Let H be the hemisphere
+	// containing a and delimited by the longitude line of b. The Voronoi diagram
+	// of b on H has three edges (portions of great circles) all orthogonal to b
+	// and meeting at bLo cross bHi.
+	// E1: (bLo, bLo cross bHi)
+	// E2: (bHi, bLo cross bHi)
+	// E3: (-bMid, bLo cross bHi), where bMid is the midpoint of b
+	//
+	// They subdivide H into three Voronoi regions. Depending on how longitude 0
+	// (which contains edge a) intersects these regions, we distinguish two cases:
+	// Case 1: it intersects three regions. This occurs when lngDiff <= π/2.
+	// Case 2: it intersects only two regions. This occurs when lngDiff > π/2.
+	//
+	// In the first case, the directed Hausdorff distance to edge b can only be
+	// realized by the following points on a:
+	// A1: two endpoints of a.
+	// A2: intersection of a with the equator, if b also intersects the equator.
+	//
+	// In the second case, the directed Hausdorff distance to edge b can only be
+	// realized by the following points on a:
+	// B1: two endpoints of a.
+	// B2: intersection of a with E3
+	// B3: farthest point from bLo to the interior of D, and farthest point from
+	//     bHi to the interior of U, if any, where D (resp. U) is the portion
+	//     of edge a below (resp. above) the intersection point from B2.
+
+	if lngDiff < 0 {
+		panic("impossible: negative lngDiff")
+	}
+	if lngDiff > math.Pi {
+		panic("impossible: lngDiff > Pi")
+	}
+
+	if lngDiff == 0 {
+		return s1.Angle(a.DirectedHausdorffDistance(b))
+	}
+
+	// Assumed longitude of b.
+	bLng := lngDiff
+	// Two endpoints of b.
+	bLo := PointFromLatLng(LatLng{s1.Angle(b.Lo), bLng})
+	bHi := PointFromLatLng(LatLng{s1.Angle(b.Hi), bLng})
+
+	// Cases A1 and B1.
+	aLo := PointFromLatLng(LatLng{s1.Angle(a.Lo), 0})
+	aHi := PointFromLatLng(LatLng{s1.Angle(a.Hi), 0})
+	maxDistance := maxAngle(
+		DistanceFromSegment(aLo, bLo, bHi),
+		DistanceFromSegment(aHi, bLo, bHi))
+
+	if lngDiff <= math.Pi/2 {
+		// Case A2.
+		if a.Contains(0) && b.Contains(0) {
+			maxDistance = maxAngle(maxDistance, lngDiff)
+		}
+		return maxDistance
+	}
+
+	// Case B2.
+	p := bisectorIntersection(b, bLng)
+	pLat := LatLngFromPoint(p).Lat
+	if a.Contains(float64(pLat)) {
+		maxDistance = maxAngle(maxDistance, p.Angle(bLo.Vector))
+	}
+
+	// Case B3.
+	if pLat > s1.Angle(a.Lo) {
+		intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo)
+		if ok {
+			maxDistance = maxAngle(maxDistance, intDist)
+		}
+	}
+	if pLat < s1.Angle(a.Hi) {
+		intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi)
+		if ok {
+			maxDistance = maxAngle(maxDistance, intDist)
+		}
+	}
+
+	return maxDistance
+}
+
+// interiorMaxDistance returns the max distance from a point b to the segment spanning latitude range
+// aLat on longitude 0 if the max occurs in the interior of aLat. Otherwise, returns (0, false).
+func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) {
+	// Longitude 0 is in the y=0 plane. b.X >= 0 implies that the maximum
+	// does not occur in the interior of aLat.
+	if aLat.IsEmpty() || b.X >= 0 {
+		return 0, false
+	}
+
+	// Project b to the y=0 plane. The antipodal of the normalized projection is
+	// the point at which the maxium distance from b occurs, if it is contained
+	// in aLat.
+	intersectionPoint := PointFromCoords(-b.X, 0, -b.Z)
+	if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) {
+		return 0, false
+	}
+	return b.Angle(intersectionPoint.Vector), true
+}
+
+// bisectorIntersection return the intersection of longitude 0 with the bisector of an edge
+// on longitude 'lng' and spanning latitude range 'lat'.
+func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point {
+	lng = s1.Angle(math.Abs(float64(lng)))
+	latCenter := s1.Angle(lat.Center())
+
+	// A vector orthogonal to the bisector of the given longitudinal edge.
+	orthoBisector := LatLng{latCenter - math.Pi/2, lng}
+	if latCenter < 0 {
+		orthoBisector = LatLng{-latCenter - math.Pi/2, lng - math.Pi}
+	}
+
+	// A vector orthogonal to longitude 0.
+	orthoLng := Point{r3.Vector{0, -1, 0}}
+
+	return orthoLng.PointCross(PointFromLatLng(orthoBisector))
+}
+
 // BUG: The major differences from the C++ version are:
 // BUG: The major differences from the C++ version are:
 //   - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)
 //   - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)

+ 1 - 1
vendor/github.com/golang/geo/s2/regioncoverer.go

@@ -94,7 +94,7 @@ type candidate struct {
 	terminal    bool         // Cell should not be expanded further.
 	terminal    bool         // Cell should not be expanded further.
 	numChildren int          // Number of children that intersect the region.
 	numChildren int          // Number of children that intersect the region.
 	children    []*candidate // Actual size may be 0, 4, 16, or 64 elements.
 	children    []*candidate // Actual size may be 0, 4, 16, or 64 elements.
-	priority    int          // Priority of the candiate.
+	priority    int          // Priority of the candidate.
 }
 }
 
 
 type priorityQueue []*candidate
 type priorityQueue []*candidate

+ 21 - 0
vendor/github.com/golang/geo/s2/shape.go

@@ -114,6 +114,23 @@ func OriginReferencePoint(contained bool) ReferencePoint {
 	return ReferencePoint{Point: OriginPoint(), Contained: contained}
 	return ReferencePoint{Point: OriginPoint(), Contained: contained}
 }
 }
 
 
+// typeTag is a 32-bit tag that can be used to identify the type of an encoded
+// Shape. All encodable types have a non-zero type tag. The tag associated with
+type typeTag uint32
+
+const (
+	// Indicates that a given Shape type cannot be encoded.
+	typeTagNone        typeTag = 0
+	typeTagPolygon     typeTag = 1
+	typeTagPolyline    typeTag = 2
+	typeTagPointVector typeTag = 3
+	typeTagLaxPolyline typeTag = 4
+	typeTagLaxPolygon  typeTag = 5
+
+	// The minimum allowable tag for future user-defined Shape types.
+	typeTagMinUser typeTag = 8192
+)
+
 // Shape represents polygonal geometry in a flexible way. It is organized as a
 // Shape represents polygonal geometry in a flexible way. It is organized as a
 // collection of edges that optionally defines an interior. All geometry
 // collection of edges that optionally defines an interior. All geometry
 // represented by a given Shape must have the same dimension, which means that
 // represented by a given Shape must have the same dimension, which means that
@@ -220,6 +237,10 @@ type Shape interface {
 	// IsFull reports whether the Shape contains all points on the sphere.
 	// IsFull reports whether the Shape contains all points on the sphere.
 	IsFull() bool
 	IsFull() bool
 
 
+	// typeTag returns a value that can be used to identify the type of an
+	// encoded Shape.
+	typeTag() typeTag
+
 	// We do not support implementations of this interface outside this package.
 	// We do not support implementations of this interface outside this package.
 	privateInterface()
 	privateInterface()
 }
 }

+ 6 - 19
vendor/github.com/golang/geo/s2/shapeindex.go

@@ -16,6 +16,7 @@ package s2
 
 
 import (
 import (
 	"math"
 	"math"
+	"sort"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
 
 
@@ -303,18 +304,9 @@ func (s *ShapeIndexIterator) refresh() {
 // seek positions the iterator at the first cell whose ID >= target, or at the
 // seek positions the iterator at the first cell whose ID >= target, or at the
 // end of the index if no such cell exists.
 // end of the index if no such cell exists.
 func (s *ShapeIndexIterator) seek(target CellID) {
 func (s *ShapeIndexIterator) seek(target CellID) {
-	s.position = 0
-	// In C++, this relies on the lower_bound method of the underlying btree_map.
-	// TODO(roberts): Convert this to a binary search since the list of cells is ordered.
-	for k, v := range s.index.cells {
-		// We've passed the cell that is after us, so we are done.
-		if v >= target {
-			s.position = k
-			break
-		}
-		// Otherwise, advance the position.
-		s.position++
-	}
+	s.position = sort.Search(len(s.index.cells), func(i int) bool {
+		return s.index.cells[i] >= target
+	})
 	s.refresh()
 	s.refresh()
 }
 }
 
 
@@ -894,7 +886,6 @@ func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) {
 			allEdges[face] = append(allEdges[face], fe)
 			allEdges[face] = append(allEdges[face], fe)
 		}
 		}
 	}
 	}
-	return
 }
 }
 
 
 // updateFaceEdges adds or removes the various edges from the index.
 // updateFaceEdges adds or removes the various edges from the index.
@@ -1188,7 +1179,7 @@ func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *track
 		var clipped *clippedShape
 		var clipped *clippedShape
 		// advance to next value base + i
 		// advance to next value base + i
 		eshapeID := int32(s.Len())
 		eshapeID := int32(s.Len())
-		cshapeID := int32(eshapeID) // Sentinels
+		cshapeID := eshapeID // Sentinels
 
 
 		if eNext != len(edges) {
 		if eNext != len(edges) {
 			eshapeID = edges[eNext].faceEdge.shapeID
 			eshapeID = edges[eNext].faceEdge.shapeID
@@ -1495,7 +1486,7 @@ func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
 	}
 	}
 
 
 	// Count any remaining containing shapes.
 	// Count any remaining containing shapes.
-	count += int(len(shapeIDs)) - int(shapeIDidx)
+	count += len(shapeIDs) - shapeIDidx
 	return count
 	return count
 }
 }
 
 
@@ -1514,7 +1505,3 @@ func maxLevelForEdge(edge Edge) int {
 func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) {
 func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) {
 	// TODO(roberts): finish the implementation of this.
 	// TODO(roberts): finish the implementation of this.
 }
 }
-
-// TODO(roberts): Differences from C++.
-// ShapeContainsPoint
-// FindContainingShapes

+ 2 - 3
vendor/github.com/golang/protobuf/proto/properties.go

@@ -38,7 +38,6 @@ package proto
 import (
 import (
 	"fmt"
 	"fmt"
 	"log"
 	"log"
-	"os"
 	"reflect"
 	"reflect"
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
@@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) {
 	// "bytes,49,opt,name=foo,def=hello!"
 	// "bytes,49,opt,name=foo,def=hello!"
 	fields := strings.Split(s, ",") // breaks def=, but handled below.
 	fields := strings.Split(s, ",") // breaks def=, but handled below.
 	if len(fields) < 2 {
 	if len(fields) < 2 {
-		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		log.Printf("proto: tag has too few fields: %q", s)
 		return
 		return
 	}
 	}
 
 
@@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) {
 		p.WireType = WireBytes
 		p.WireType = WireBytes
 		// no numeric converter for non-numeric types
 		// no numeric converter for non-numeric types
 	default:
 	default:
-		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		log.Printf("proto: tag has unknown wire type: %q", s)
 		return
 		return
 	}
 	}
 
 

+ 2 - 4
vendor/github.com/miekg/dns/LICENSE

@@ -1,7 +1,3 @@
-Extensions of the original work are copyright (c) 2011 Miek Gieben
-
-As this is fork of the official Go code the same license applies:
-
 Copyright (c) 2009 The Go Authors. All rights reserved.
 Copyright (c) 2009 The Go Authors. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
@@ -30,3 +26,5 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+As this is fork of the official Go code the same license applies.
+Extensions of the original work are copyright (c) 2011 Miek Gieben

+ 13 - 8
vendor/github.com/miekg/dns/acceptfunc.go

@@ -6,22 +6,30 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
 
 
 // DefaultMsgAcceptFunc checks the request and will reject if:
 // DefaultMsgAcceptFunc checks the request and will reject if:
 //
 //
-// * isn't a request (don't respond in that case).
+// * isn't a request (don't respond in that case)
+//
 // * opcode isn't OpcodeQuery or OpcodeNotify
 // * opcode isn't OpcodeQuery or OpcodeNotify
+//
 // * Zero bit isn't zero
 // * Zero bit isn't zero
+//
 // * has more than 1 question in the question section
 // * has more than 1 question in the question section
+//
 // * has more than 1 RR in the Answer section
 // * has more than 1 RR in the Answer section
+//
 // * has more than 0 RRs in the Authority section
 // * has more than 0 RRs in the Authority section
+//
 // * has more than 2 RRs in the Additional section
 // * has more than 2 RRs in the Additional section
+//
 var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
 var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
 
 
 // MsgAcceptAction represents the action to be taken.
 // MsgAcceptAction represents the action to be taken.
 type MsgAcceptAction int
 type MsgAcceptAction int
 
 
 const (
 const (
-	MsgAccept MsgAcceptAction = iota // Accept the message
-	MsgReject                        // Reject the message with a RcodeFormatError
-	MsgIgnore                        // Ignore the error and send nothing back.
+	MsgAccept               MsgAcceptAction = iota // Accept the message
+	MsgReject                                      // Reject the message with a RcodeFormatError
+	MsgIgnore                                      // Ignore the error and send nothing back.
+	MsgRejectNotImplemented                        // Reject the message with a RcodeNotImplemented
 )
 )
 
 
 func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
 func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
@@ -32,12 +40,9 @@ func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
 	// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
 	// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
 	opcode := int(dh.Bits>>11) & 0xF
 	opcode := int(dh.Bits>>11) & 0xF
 	if opcode != OpcodeQuery && opcode != OpcodeNotify {
 	if opcode != OpcodeQuery && opcode != OpcodeNotify {
-		return MsgReject
+		return MsgRejectNotImplemented
 	}
 	}
 
 
-	if isZero := dh.Bits&_Z != 0; isZero {
-		return MsgReject
-	}
 	if dh.Qdcount != 1 {
 	if dh.Qdcount != 1 {
 		return MsgReject
 		return MsgReject
 	}
 	}

+ 36 - 42
vendor/github.com/miekg/dns/client.go

@@ -6,6 +6,7 @@ import (
 	"context"
 	"context"
 	"crypto/tls"
 	"crypto/tls"
 	"encoding/binary"
 	"encoding/binary"
+	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
 	"strings"
 	"strings"
@@ -128,20 +129,15 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
 		return c.exchange(m, address)
 		return c.exchange(m, address)
 	}
 	}
 
 
-	t := "nop"
-	if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
-		t = t1
-	}
-	cl := "nop"
-	if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
-		cl = cl1
-	}
-	r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
+	q := m.Question[0]
+	key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
+	r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
 		return c.exchange(m, address)
 		return c.exchange(m, address)
 	})
 	})
 	if r != nil && shared {
 	if r != nil && shared {
 		r = r.Copy()
 		r = r.Copy()
 	}
 	}
+
 	return r, rtt, err
 	return r, rtt, err
 }
 }
 
 
@@ -219,22 +215,22 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
 		n   int
 		n   int
 		err error
 		err error
 	)
 	)
-	switch co.Conn.(type) {
-	case *net.TCPConn, *tls.Conn:
-		var length uint16
-		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
-			return nil, err
-		}
 
 
-		p = make([]byte, length)
-		n, err = io.ReadFull(co.Conn, p)
-	default:
+	if _, ok := co.Conn.(net.PacketConn); ok {
 		if co.UDPSize > MinMsgSize {
 		if co.UDPSize > MinMsgSize {
 			p = make([]byte, co.UDPSize)
 			p = make([]byte, co.UDPSize)
 		} else {
 		} else {
 			p = make([]byte, MinMsgSize)
 			p = make([]byte, MinMsgSize)
 		}
 		}
 		n, err = co.Read(p)
 		n, err = co.Read(p)
+	} else {
+		var length uint16
+		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
+			return nil, err
+		}
+
+		p = make([]byte, length)
+		n, err = io.ReadFull(co.Conn, p)
 	}
 	}
 
 
 	if err != nil {
 	if err != nil {
@@ -260,21 +256,20 @@ func (co *Conn) Read(p []byte) (n int, err error) {
 		return 0, ErrConnEmpty
 		return 0, ErrConnEmpty
 	}
 	}
 
 
-	switch co.Conn.(type) {
-	case *net.TCPConn, *tls.Conn:
-		var length uint16
-		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
-			return 0, err
-		}
-		if int(length) > len(p) {
-			return 0, io.ErrShortBuffer
-		}
+	if _, ok := co.Conn.(net.PacketConn); ok {
+		// UDP connection
+		return co.Conn.Read(p)
+	}
 
 
-		return io.ReadFull(co.Conn, p[:length])
+	var length uint16
+	if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
+		return 0, err
+	}
+	if int(length) > len(p) {
+		return 0, io.ErrShortBuffer
 	}
 	}
 
 
-	// UDP connection
-	return co.Conn.Read(p)
+	return io.ReadFull(co.Conn, p[:length])
 }
 }
 
 
 // WriteMsg sends a message through the connection co.
 // WriteMsg sends a message through the connection co.
@@ -301,21 +296,20 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
 }
 }
 
 
 // Write implements the net.Conn Write method.
 // Write implements the net.Conn Write method.
-func (co *Conn) Write(p []byte) (n int, err error) {
-	switch co.Conn.(type) {
-	case *net.TCPConn, *tls.Conn:
-		if len(p) > MaxMsgSize {
-			return 0, &Error{err: "message too large"}
-		}
-
-		l := make([]byte, 2)
-		binary.BigEndian.PutUint16(l, uint16(len(p)))
+func (co *Conn) Write(p []byte) (int, error) {
+	if len(p) > MaxMsgSize {
+		return 0, &Error{err: "message too large"}
+	}
 
 
-		n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
-		return int(n), err
+	if _, ok := co.Conn.(net.PacketConn); ok {
+		return co.Conn.Write(p)
 	}
 	}
 
 
-	return co.Conn.Write(p)
+	l := make([]byte, 2)
+	binary.BigEndian.PutUint16(l, uint16(len(p)))
+
+	n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
+	return int(n), err
 }
 }
 
 
 // Return the appropriate timeout for a specific request
 // Return the appropriate timeout for a specific request

+ 2 - 2
vendor/github.com/miekg/dns/dns.go

@@ -54,7 +54,7 @@ type RR interface {
 	// parse parses an RR from zone file format.
 	// parse parses an RR from zone file format.
 	//
 	//
 	// This will only be called on a new and empty RR type with only the header populated.
 	// This will only be called on a new and empty RR type with only the header populated.
-	parse(c *zlexer, origin, file string) *ParseError
+	parse(c *zlexer, origin string) *ParseError
 
 
 	// isDuplicate returns whether the two RRs are duplicates.
 	// isDuplicate returns whether the two RRs are duplicates.
 	isDuplicate(r2 RR) bool
 	isDuplicate(r2 RR) bool
@@ -105,7 +105,7 @@ func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
 	panic("dns: internal error: unpack should never be called on RR_Header")
 	panic("dns: internal error: unpack should never be called on RR_Header")
 }
 }
 
 
-func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError {
+func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
 	panic("dns: internal error: parse should never be called on RR_Header")
 	panic("dns: internal error: parse should never be called on RR_Header")
 }
 }
 
 

+ 5 - 2
vendor/github.com/miekg/dns/dnssec.go

@@ -141,8 +141,8 @@ func (k *DNSKEY) KeyTag() uint16 {
 	switch k.Algorithm {
 	switch k.Algorithm {
 	case RSAMD5:
 	case RSAMD5:
 		// Look at the bottom two bytes of the modules, which the last
 		// Look at the bottom two bytes of the modules, which the last
-		// item in the pubkey. We could do this faster by looking directly
-		// at the base64 values. But I'm lazy.
+		// item in the pubkey.
+		// This algorithm has been deprecated, but keep this key-tag calculation.
 		modulus, _ := fromBase64([]byte(k.PublicKey))
 		modulus, _ := fromBase64([]byte(k.PublicKey))
 		if len(modulus) > 1 {
 		if len(modulus) > 1 {
 			x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
 			x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
@@ -318,6 +318,9 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
 		}
 		}
 
 
 		rr.Signature = toBase64(signature)
 		rr.Signature = toBase64(signature)
+	case RSAMD5, DSA, DSANSEC3SHA1:
+		// See RFC 6944.
+		return ErrAlg
 	default:
 	default:
 		h := hash.New()
 		h := hash.New()
 		h.Write(signdata)
 		h.Write(signdata)

+ 4 - 42
vendor/github.com/miekg/dns/dnssec_keygen.go

@@ -2,7 +2,6 @@ package dns
 
 
 import (
 import (
 	"crypto"
 	"crypto"
-	"crypto/dsa"
 	"crypto/ecdsa"
 	"crypto/ecdsa"
 	"crypto/elliptic"
 	"crypto/elliptic"
 	"crypto/rand"
 	"crypto/rand"
@@ -20,11 +19,9 @@ import (
 // bits should be set to the size of the algorithm.
 // bits should be set to the size of the algorithm.
 func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
 func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
 	switch k.Algorithm {
 	switch k.Algorithm {
-	case DSA, DSANSEC3SHA1:
-		if bits != 1024 {
-			return nil, ErrKeySize
-		}
-	case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
+	case RSAMD5, DSA, DSANSEC3SHA1:
+		return nil, ErrAlg
+	case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
 		if bits < 512 || bits > 4096 {
 		if bits < 512 || bits > 4096 {
 			return nil, ErrKeySize
 			return nil, ErrKeySize
 		}
 		}
@@ -47,20 +44,7 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
 	}
 	}
 
 
 	switch k.Algorithm {
 	switch k.Algorithm {
-	case DSA, DSANSEC3SHA1:
-		params := new(dsa.Parameters)
-		if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
-			return nil, err
-		}
-		priv := new(dsa.PrivateKey)
-		priv.PublicKey.Parameters = *params
-		err := dsa.GenerateKey(priv, rand.Reader)
-		if err != nil {
-			return nil, err
-		}
-		k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
-		return priv, nil
-	case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
+	case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
 		priv, err := rsa.GenerateKey(rand.Reader, bits)
 		priv, err := rsa.GenerateKey(rand.Reader, bits)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
@@ -120,16 +104,6 @@ func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
 	return true
 	return true
 }
 }
 
 
-// Set the public key for DSA
-func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
-	if _Q == nil || _P == nil || _G == nil || _Y == nil {
-		return false
-	}
-	buf := dsaToBuf(_Q, _P, _G, _Y)
-	k.PublicKey = toBase64(buf)
-	return true
-}
-
 // Set the public key for Ed25519
 // Set the public key for Ed25519
 func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
 func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
 	if _K == nil {
 	if _K == nil {
@@ -164,15 +138,3 @@ func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
 	buf = append(buf, intToBytes(_Y, intlen)...)
 	buf = append(buf, intToBytes(_Y, intlen)...)
 	return buf
 	return buf
 }
 }
-
-// Set the public key for X and Y for Curve. The two
-// values are just concatenated.
-func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
-	t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
-	buf := []byte{byte(t)}
-	buf = append(buf, intToBytes(_Q, 20)...)
-	buf = append(buf, intToBytes(_P, 64+t*8)...)
-	buf = append(buf, intToBytes(_G, 64+t*8)...)
-	buf = append(buf, intToBytes(_Y, 64+t*8)...)
-	return buf
-}

+ 2 - 32
vendor/github.com/miekg/dns/dnssec_keyscan.go

@@ -3,7 +3,6 @@ package dns
 import (
 import (
 	"bufio"
 	"bufio"
 	"crypto"
 	"crypto"
-	"crypto/dsa"
 	"crypto/ecdsa"
 	"crypto/ecdsa"
 	"crypto/rsa"
 	"crypto/rsa"
 	"io"
 	"io"
@@ -44,19 +43,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
 		return nil, ErrPrivKey
 		return nil, ErrPrivKey
 	}
 	}
 	switch uint8(algo) {
 	switch uint8(algo) {
-	case DSA:
-		priv, err := readPrivateKeyDSA(m)
-		if err != nil {
-			return nil, err
-		}
-		pub := k.publicKeyDSA()
-		if pub == nil {
-			return nil, ErrKey
-		}
-		priv.PublicKey = *pub
-		return priv, nil
-	case RSAMD5:
-		fallthrough
+	case RSAMD5, DSA, DSANSEC3SHA1:
+		return nil, ErrAlg
 	case RSASHA1:
 	case RSASHA1:
 		fallthrough
 		fallthrough
 	case RSASHA1NSEC3SHA1:
 	case RSASHA1NSEC3SHA1:
@@ -129,24 +117,6 @@ func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
 	return p, nil
 	return p, nil
 }
 }
 
 
-func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
-	p := new(dsa.PrivateKey)
-	p.X = new(big.Int)
-	for k, v := range m {
-		switch k {
-		case "private_value(x)":
-			v1, err := fromBase64([]byte(v))
-			if err != nil {
-				return nil, err
-			}
-			p.X.SetBytes(v1)
-		case "created", "publish", "activate":
-			/* not used in Go (yet) */
-		}
-	}
-	return p, nil
-}
-
 func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
 func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
 	p := new(ecdsa.PrivateKey)
 	p := new(ecdsa.PrivateKey)
 	p.D = new(big.Int)
 	p.D = new(big.Int)

+ 4 - 5
vendor/github.com/miekg/dns/doc.go

@@ -83,7 +83,7 @@ with:
 
 
 	in, err := dns.Exchange(m1, "127.0.0.1:53")
 	in, err := dns.Exchange(m1, "127.0.0.1:53")
 
 
-When this functions returns you will get dns message. A dns message consists
+When this functions returns you will get DNS message. A DNS message consists
 out of four sections.
 out of four sections.
 The question section: in.Question, the answer section: in.Answer,
 The question section: in.Question, the answer section: in.Answer,
 the authority section: in.Ns and the additional section: in.Extra.
 the authority section: in.Ns and the additional section: in.Extra.
@@ -221,7 +221,7 @@ RFC 6895 sets aside a range of type codes for private use. This range is 65,280
 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
 can be used, before requesting an official type code from IANA.
 can be used, before requesting an official type code from IANA.
 
 
-See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
+See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
 information.
 information.
 
 
 EDNS0
 EDNS0
@@ -238,9 +238,8 @@ Basic use pattern for creating an (empty) OPT RR:
 
 
 The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
 The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
 Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
 Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
-EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options
-may be combined in an OPT RR. Basic use pattern for a server to check if (and
-which) options are set:
+EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR.
+Basic use pattern for a server to check if (and which) options are set:
 
 
 	// o is a dns.OPT
 	// o is a dns.OPT
 	for _, s := range o.Option {
 	for _, s := range o.Option {

+ 1 - 1
vendor/github.com/miekg/dns/duplicate.go

@@ -5,7 +5,7 @@ package dns
 // IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
 // IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
 // So this means the header data is equal *and* the RDATA is the same. Return true
 // So this means the header data is equal *and* the RDATA is the same. Return true
 // is so, otherwise false.
 // is so, otherwise false.
-// It's is a protocol violation to have identical RRs in a message.
+// It's a protocol violation to have identical RRs in a message.
 func IsDuplicate(r1, r2 RR) bool {
 func IsDuplicate(r1, r2 RR) bool {
 	// Check whether the record header is identical.
 	// Check whether the record header is identical.
 	if !r1.Header().isDuplicate(r2.Header()) {
 	if !r1.Header().isDuplicate(r2.Header()) {

+ 20 - 8
vendor/github.com/miekg/dns/edns.go

@@ -88,7 +88,7 @@ func (rr *OPT) len(off int, compression map[string]struct{}) int {
 	return l
 	return l
 }
 }
 
 
-func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError {
+func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
 	panic("dns: internal error: parse should never be called on OPT")
 	panic("dns: internal error: parse should never be called on OPT")
 }
 }
 
 
@@ -360,7 +360,7 @@ func (e *EDNS0_COOKIE) copy() EDNS0           { return &EDNS0_COOKIE{e.Code, e.C
 // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
 // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
 // an expiration on an update RR. This is helpful for clients that cannot clean
 // an expiration on an update RR. This is helpful for clients that cannot clean
 // up after themselves. This is a draft RFC and more information can be found at
 // up after themselves. This is a draft RFC and more information can be found at
-// http://files.dns-sd.org/draft-sekar-dns-ul.txt
+// https://tools.ietf.org/html/draft-sekar-dns-ul-02
 //
 //
 //	o := new(dns.OPT)
 //	o := new(dns.OPT)
 //	o.Hdr.Name = "."
 //	o.Hdr.Name = "."
@@ -370,24 +370,36 @@ func (e *EDNS0_COOKIE) copy() EDNS0           { return &EDNS0_COOKIE{e.Code, e.C
 //	e.Lease = 120 // in seconds
 //	e.Lease = 120 // in seconds
 //	o.Option = append(o.Option, e)
 //	o.Option = append(o.Option, e)
 type EDNS0_UL struct {
 type EDNS0_UL struct {
-	Code  uint16 // Always EDNS0UL
-	Lease uint32
+	Code     uint16 // Always EDNS0UL
+	Lease    uint32
+	KeyLease uint32
 }
 }
 
 
 // Option implements the EDNS0 interface.
 // Option implements the EDNS0 interface.
 func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
 func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
-func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
-func (e *EDNS0_UL) copy() EDNS0    { return &EDNS0_UL{e.Code, e.Lease} }
+func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) }
+func (e *EDNS0_UL) copy() EDNS0    { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} }
 
 
 // Copied: http://golang.org/src/pkg/net/dnsmsg.go
 // Copied: http://golang.org/src/pkg/net/dnsmsg.go
 func (e *EDNS0_UL) pack() ([]byte, error) {
 func (e *EDNS0_UL) pack() ([]byte, error) {
-	b := make([]byte, 4)
+	var b []byte
+	if e.KeyLease == 0 {
+		b = make([]byte, 4)
+	} else {
+		b = make([]byte, 8)
+		binary.BigEndian.PutUint32(b[4:], e.KeyLease)
+	}
 	binary.BigEndian.PutUint32(b, e.Lease)
 	binary.BigEndian.PutUint32(b, e.Lease)
 	return b, nil
 	return b, nil
 }
 }
 
 
 func (e *EDNS0_UL) unpack(b []byte) error {
 func (e *EDNS0_UL) unpack(b []byte) error {
-	if len(b) < 4 {
+	switch len(b) {
+	case 4:
+		e.KeyLease = 0
+	case 8:
+		e.KeyLease = binary.BigEndian.Uint32(b[4:])
+	default:
 		return ErrBuf
 		return ErrBuf
 	}
 	}
 	e.Lease = binary.BigEndian.Uint32(b)
 	e.Lease = binary.BigEndian.Uint32(b)

+ 10 - 1
vendor/github.com/miekg/dns/fuzz.go

@@ -2,6 +2,8 @@
 
 
 package dns
 package dns
 
 
+import "strings"
+
 func Fuzz(data []byte) int {
 func Fuzz(data []byte) int {
 	msg := new(Msg)
 	msg := new(Msg)
 
 
@@ -16,7 +18,14 @@ func Fuzz(data []byte) int {
 }
 }
 
 
 func FuzzNewRR(data []byte) int {
 func FuzzNewRR(data []byte) int {
-	if _, err := NewRR(string(data)); err != nil {
+	str := string(data)
+	// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
+	// at avoiding them.
+	// See GH#1025 for context.
+	if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
+		return -1
+	}
+	if _, err := NewRR(str); err != nil {
 		return 0
 		return 0
 	}
 	}
 	return 1
 	return 1

+ 7 - 2
vendor/github.com/miekg/dns/generate.go

@@ -49,11 +49,15 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
 	if err != nil {
 	if err != nil {
 		return zp.setParseError("bad stop in $GENERATE range", l)
 		return zp.setParseError("bad stop in $GENERATE range", l)
 	}
 	}
-	if end < 0 || start < 0 || end < start {
+	if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
 		return zp.setParseError("bad range in $GENERATE range", l)
 		return zp.setParseError("bad range in $GENERATE range", l)
 	}
 	}
 
 
-	zp.c.Next() // _BLANK
+	// _BLANK
+	l, ok := zp.c.Next()
+	if !ok || l.value != zBlank {
+		return zp.setParseError("garbage after $GENERATE range", l)
+	}
 
 
 	// Create a complete new string, which we then parse again.
 	// Create a complete new string, which we then parse again.
 	var s string
 	var s string
@@ -81,6 +85,7 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
 	}
 	}
 	zp.sub = NewZoneParser(r, zp.origin, zp.file)
 	zp.sub = NewZoneParser(r, zp.origin, zp.file)
 	zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
 	zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
+	zp.sub.generateDisallowed = true
 	zp.sub.SetDefaultTTL(defaultTtl)
 	zp.sub.SetDefaultTTL(defaultTtl)
 	return zp.subNext()
 	return zp.subNext()
 }
 }

+ 42 - 18
vendor/github.com/miekg/dns/labels.go

@@ -126,20 +126,23 @@ func Split(s string) []int {
 // The bool end is true when the end of the string has been reached.
 // The bool end is true when the end of the string has been reached.
 // Also see PrevLabel.
 // Also see PrevLabel.
 func NextLabel(s string, offset int) (i int, end bool) {
 func NextLabel(s string, offset int) (i int, end bool) {
-	quote := false
+	if s == "" {
+		return 0, true
+	}
 	for i = offset; i < len(s)-1; i++ {
 	for i = offset; i < len(s)-1; i++ {
-		switch s[i] {
-		case '\\':
-			quote = !quote
-		default:
-			quote = false
-		case '.':
-			if quote {
-				quote = !quote
-				continue
-			}
-			return i + 1, false
+		if s[i] != '.' {
+			continue
+		}
+		j := i - 1
+		for j >= 0 && s[j] == '\\' {
+			j--
+		}
+
+		if (j-i)%2 == 0 {
+			continue
 		}
 		}
+
+		return i + 1, false
 	}
 	}
 	return i + 1, true
 	return i + 1, true
 }
 }
@@ -149,17 +152,38 @@ func NextLabel(s string, offset int) (i int, end bool) {
 // The bool start is true when the start of the string has been overshot.
 // The bool start is true when the start of the string has been overshot.
 // Also see NextLabel.
 // Also see NextLabel.
 func PrevLabel(s string, n int) (i int, start bool) {
 func PrevLabel(s string, n int) (i int, start bool) {
+	if s == "" {
+		return 0, true
+	}
 	if n == 0 {
 	if n == 0 {
 		return len(s), false
 		return len(s), false
 	}
 	}
-	lab := Split(s)
-	if lab == nil {
-		return 0, true
+
+	l := len(s) - 1
+	if s[l] == '.' {
+		l--
 	}
 	}
-	if n > len(lab) {
-		return 0, true
+
+	for ; l >= 0 && n > 0; l-- {
+		if s[l] != '.' {
+			continue
+		}
+		j := l - 1
+		for j >= 0 && s[j] == '\\' {
+			j--
+		}
+
+		if (j-l)%2 == 0 {
+			continue
+		}
+
+		n--
+		if n == 0 {
+			return l + 1, false
+		}
 	}
 	}
-	return lab[len(lab)-n], false
+
+	return 0, n > 1
 }
 }
 
 
 // equal compares a and b while ignoring case. It returns true when equal otherwise false.
 // equal compares a and b while ignoring case. It returns true when equal otherwise false.

+ 10 - 42
vendor/github.com/miekg/dns/msg.go

@@ -11,14 +11,12 @@ package dns
 //go:generate go run msg_generate.go
 //go:generate go run msg_generate.go
 
 
 import (
 import (
-	crand "crypto/rand"
+	"crypto/rand"
 	"encoding/binary"
 	"encoding/binary"
 	"fmt"
 	"fmt"
 	"math/big"
 	"math/big"
-	"math/rand"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
-	"sync"
 )
 )
 
 
 const (
 const (
@@ -73,53 +71,23 @@ var (
 	ErrTime          error = &Error{err: "bad time"}      // ErrTime indicates a timing error in TSIG authentication.
 	ErrTime          error = &Error{err: "bad time"}      // ErrTime indicates a timing error in TSIG authentication.
 )
 )
 
 
-// Id by default, returns a 16 bits random number to be used as a
-// message id. The random provided should be good enough. This being a
-// variable the function can be reassigned to a custom function.
-// For instance, to make it return a static value:
+// Id by default returns a 16-bit random number to be used as a message id. The
+// number is drawn from a cryptographically secure random number generator.
+// This being a variable the function can be reassigned to a custom function.
+// For instance, to make it return a static value for testing:
 //
 //
 //	dns.Id = func() uint16 { return 3 }
 //	dns.Id = func() uint16 { return 3 }
 var Id = id
 var Id = id
 
 
-var (
-	idLock sync.Mutex
-	idRand *rand.Rand
-)
-
 // id returns a 16 bits random number to be used as a
 // id returns a 16 bits random number to be used as a
 // message id. The random provided should be good enough.
 // message id. The random provided should be good enough.
 func id() uint16 {
 func id() uint16 {
-	idLock.Lock()
-
-	if idRand == nil {
-		// This (partially) works around
-		// https://github.com/golang/go/issues/11833 by only
-		// seeding idRand upon the first call to id.
-
-		var seed int64
-		var buf [8]byte
-
-		if _, err := crand.Read(buf[:]); err == nil {
-			seed = int64(binary.LittleEndian.Uint64(buf[:]))
-		} else {
-			seed = rand.Int63()
-		}
-
-		idRand = rand.New(rand.NewSource(seed))
+	var output uint16
+	err := binary.Read(rand.Reader, binary.BigEndian, &output)
+	if err != nil {
+		panic("dns: reading random id failed: " + err.Error())
 	}
 	}
-
-	// The call to idRand.Uint32 must be within the
-	// mutex lock because *rand.Rand is not safe for
-	// concurrent use.
-	//
-	// There is no added performance overhead to calling
-	// idRand.Uint32 inside a mutex lock over just
-	// calling rand.Uint32 as the global math/rand rng
-	// is internally protected by a sync.Mutex.
-	id := uint16(idRand.Uint32())
-
-	idLock.Unlock()
-	return id
+	return output
 }
 }
 
 
 // MsgHdr is a a manually-unpacked version of (id, bits).
 // MsgHdr is a a manually-unpacked version of (id, bits).

+ 50 - 8
vendor/github.com/miekg/dns/msg_helpers.go

@@ -265,24 +265,36 @@ func unpackString(msg []byte, off int) (string, int, error) {
 		return "", off, &Error{err: "overflow unpacking txt"}
 		return "", off, &Error{err: "overflow unpacking txt"}
 	}
 	}
 	l := int(msg[off])
 	l := int(msg[off])
-	if off+l+1 > len(msg) {
+	off++
+	if off+l > len(msg) {
 		return "", off, &Error{err: "overflow unpacking txt"}
 		return "", off, &Error{err: "overflow unpacking txt"}
 	}
 	}
 	var s strings.Builder
 	var s strings.Builder
-	s.Grow(l)
-	for _, b := range msg[off+1 : off+1+l] {
+	consumed := 0
+	for i, b := range msg[off : off+l] {
 		switch {
 		switch {
 		case b == '"' || b == '\\':
 		case b == '"' || b == '\\':
+			if consumed == 0 {
+				s.Grow(l * 2)
+			}
+			s.Write(msg[off+consumed : off+i])
 			s.WriteByte('\\')
 			s.WriteByte('\\')
 			s.WriteByte(b)
 			s.WriteByte(b)
+			consumed = i + 1
 		case b < ' ' || b > '~': // unprintable
 		case b < ' ' || b > '~': // unprintable
+			if consumed == 0 {
+				s.Grow(l * 2)
+			}
+			s.Write(msg[off+consumed : off+i])
 			s.WriteString(escapeByte(b))
 			s.WriteString(escapeByte(b))
-		default:
-			s.WriteByte(b)
+			consumed = i + 1
 		}
 		}
 	}
 	}
-	off += 1 + l
-	return s.String(), off, nil
+	if consumed == 0 { // no escaping needed
+		return string(msg[off : off+l]), off + l, nil
+	}
+	s.Write(msg[off+consumed : off+l])
+	return s.String(), off + l, nil
 }
 }
 
 
 func packString(s string, msg []byte, off int) (int, error) {
 func packString(s string, msg []byte, off int) (int, error) {
@@ -433,6 +445,13 @@ Option:
 		}
 		}
 		edns = append(edns, e)
 		edns = append(edns, e)
 		off += int(optlen)
 		off += int(optlen)
+	case EDNS0EXPIRE:
+		e := new(EDNS0_EXPIRE)
+		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+			return nil, len(msg), err
+		}
+		edns = append(edns, e)
+		off += int(optlen)
 	case EDNS0UL:
 	case EDNS0UL:
 		e := new(EDNS0_UL)
 		e := new(EDNS0_UL)
 		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
 		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
@@ -495,7 +514,7 @@ Option:
 func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
 func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
 	for _, el := range options {
 	for _, el := range options {
 		b, err := el.pack()
 		b, err := el.pack()
-		if err != nil || off+3 > len(msg) {
+		if err != nil || off+4 > len(msg) {
 			return len(msg), &Error{err: "overflow packing opt"}
 			return len(msg), &Error{err: "overflow packing opt"}
 		}
 		}
 		binary.BigEndian.PutUint16(msg[off:], el.Option())      // Option code
 		binary.BigEndian.PutUint16(msg[off:], el.Option())      // Option code
@@ -587,6 +606,29 @@ func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
 	return nsec, off, nil
 	return nsec, off, nil
 }
 }
 
 
+// typeBitMapLen is a helper function which computes the "maximum" length of
+// a the NSEC Type BitMap field.
+func typeBitMapLen(bitmap []uint16) int {
+	var l int
+	var lastwindow, lastlength uint16
+	for _, t := range bitmap {
+		window := t / 256
+		length := (t-window*256)/8 + 1
+		if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
+			l += int(lastlength) + 2
+			lastlength = 0
+		}
+		if window < lastwindow || length < lastlength {
+			// packDataNsec would return Error{err: "nsec bits out of order"} here, but
+			// when computing the length, we want do be liberal.
+			continue
+		}
+		lastwindow, lastlength = window, length
+	}
+	l += int(lastlength) + 2
+	return l
+}
+
 func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
 func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
 	if len(bitmap) == 0 {
 	if len(bitmap) == 0 {
 		return off, nil
 		return off, nil

+ 11 - 6
vendor/github.com/miekg/dns/msg_truncate.go

@@ -4,12 +4,17 @@ package dns
 // size by removing records that exceed the requested size.
 // size by removing records that exceed the requested size.
 //
 //
 // It will first check if the reply fits without compression and then with
 // It will first check if the reply fits without compression and then with
-// compression. If it won't fit with compression, Scrub then walks the
+// compression. If it won't fit with compression, Truncate then walks the
 // record adding as many records as possible without exceeding the
 // record adding as many records as possible without exceeding the
 // requested buffer size.
 // requested buffer size.
 //
 //
-// The TC bit will be set if any answer records were excluded from the
-// message. This indicates to that the client should retry over TCP.
+// The TC bit will be set if any records were excluded from the message.
+// This indicates to that the client should retry over TCP.
+//
+// According to RFC 2181, the TC bit should only be set if not all of the
+// "required" RRs can be included in the response. Unfortunately, we have
+// no way of knowing which RRs are required so we set the TC bit if any RR
+// had to be omitted from the response.
 //
 //
 // The appropriate buffer size can be retrieved from the requests OPT
 // The appropriate buffer size can be retrieved from the requests OPT
 // record, if present, and is transport specific otherwise. dns.MinMsgSize
 // record, if present, and is transport specific otherwise. dns.MinMsgSize
@@ -71,9 +76,9 @@ func (dns *Msg) Truncate(size int) {
 		l, numExtra = truncateLoop(dns.Extra, size, l, compression)
 		l, numExtra = truncateLoop(dns.Extra, size, l, compression)
 	}
 	}
 
 
-	// According to RFC 2181, the TC bit should only be set if not all
-	// of the answer RRs can be included in the response.
-	dns.Truncated = len(dns.Answer) > numAnswer
+	// See the function documentation for when we set this.
+	dns.Truncated = len(dns.Answer) > numAnswer ||
+		len(dns.Ns) > numNS || len(dns.Extra) > numExtra
 
 
 	dns.Answer = dns.Answer[:numAnswer]
 	dns.Answer = dns.Answer[:numAnswer]
 	dns.Ns = dns.Ns[:numNS]
 	dns.Ns = dns.Ns[:numNS]

+ 10 - 28
vendor/github.com/miekg/dns/privaterr.go

@@ -1,9 +1,6 @@
 package dns
 package dns
 
 
-import (
-	"fmt"
-	"strings"
-)
+import "strings"
 
 
 // PrivateRdata is an interface used for implementing "Private Use" RR types, see
 // PrivateRdata is an interface used for implementing "Private Use" RR types, see
 // RFC 6895. This allows one to experiment with new RR types, without requesting an
 // RFC 6895. This allows one to experiment with new RR types, without requesting an
@@ -18,7 +15,7 @@ type PrivateRdata interface {
 	// Unpack is used when unpacking a private RR from a buffer.
 	// Unpack is used when unpacking a private RR from a buffer.
 	// TODO(miek): diff. signature than Pack, see edns0.go for instance.
 	// TODO(miek): diff. signature than Pack, see edns0.go for instance.
 	Unpack([]byte) (int, error)
 	Unpack([]byte) (int, error)
-	// Copy copies the Rdata.
+	// Copy copies the Rdata into the PrivateRdata argument.
 	Copy(PrivateRdata) error
 	Copy(PrivateRdata) error
 	// Len returns the length in octets of the Rdata.
 	// Len returns the length in octets of the Rdata.
 	Len() int
 	Len() int
@@ -29,22 +26,8 @@ type PrivateRdata interface {
 type PrivateRR struct {
 type PrivateRR struct {
 	Hdr  RR_Header
 	Hdr  RR_Header
 	Data PrivateRdata
 	Data PrivateRdata
-}
-
-func mkPrivateRR(rrtype uint16) *PrivateRR {
-	// Panics if RR is not an instance of PrivateRR.
-	rrfunc, ok := TypeToRR[rrtype]
-	if !ok {
-		panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
-	}
-
-	anyrr := rrfunc()
-	rr, ok := anyrr.(*PrivateRR)
-	if !ok {
-		panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
-	}
 
 
-	return rr
+	generator func() PrivateRdata // for copy
 }
 }
 
 
 // Header return the RR header of r.
 // Header return the RR header of r.
@@ -61,13 +44,12 @@ func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
 
 
 func (r *PrivateRR) copy() RR {
 func (r *PrivateRR) copy() RR {
 	// make new RR like this:
 	// make new RR like this:
-	rr := mkPrivateRR(r.Hdr.Rrtype)
-	rr.Hdr = r.Hdr
+	rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
 
 
-	err := r.Data.Copy(rr.Data)
-	if err != nil {
-		panic("dns: got value that could not be used to copy Private rdata")
+	if err := r.Data.Copy(rr.Data); err != nil {
+		panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
 	}
 	}
+
 	return rr
 	return rr
 }
 }
 
 
@@ -86,7 +68,7 @@ func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
 	return off, err
 	return off, err
 }
 }
 
 
-func (r *PrivateRR) parse(c *zlexer, origin, file string) *ParseError {
+func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
 	var l lex
 	var l lex
 	text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
 	text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
 Fetch:
 Fetch:
@@ -103,7 +85,7 @@ Fetch:
 
 
 	err := r.Data.Parse(text)
 	err := r.Data.Parse(text)
 	if err != nil {
 	if err != nil {
-		return &ParseError{file, err.Error(), l}
+		return &ParseError{"", err.Error(), l}
 	}
 	}
 
 
 	return nil
 	return nil
@@ -116,7 +98,7 @@ func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
 func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
 func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
 	rtypestr = strings.ToUpper(rtypestr)
 	rtypestr = strings.ToUpper(rtypestr)
 
 
-	TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
+	TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
 	TypeToString[rtype] = rtypestr
 	TypeToString[rtype] = rtypestr
 	StringToType[rtypestr] = rtype
 	StringToType[rtypestr] = rtype
 }
 }

+ 95 - 24
vendor/github.com/miekg/dns/scan.go

@@ -134,7 +134,7 @@ func ReadRR(r io.Reader, file string) (RR, error) {
 }
 }
 
 
 // ParseZone reads a RFC 1035 style zonefile from r. It returns
 // ParseZone reads a RFC 1035 style zonefile from r. It returns
-// *Tokens on the returned channel, each consisting of either a
+// Tokens on the returned channel, each consisting of either a
 // parsed RR and optional comment or a nil RR and an error. The
 // parsed RR and optional comment or a nil RR and an error. The
 // channel is closed by ParseZone when the end of r is reached.
 // channel is closed by ParseZone when the end of r is reached.
 //
 //
@@ -143,7 +143,8 @@ func ReadRR(r io.Reader, file string) (RR, error) {
 // origin, as if the file would start with an $ORIGIN directive.
 // origin, as if the file would start with an $ORIGIN directive.
 //
 //
 // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
 // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
-// supported.
+// supported. Note that $GENERATE's range support up to a maximum of
+// of 65535 steps.
 //
 //
 // Basic usage pattern when reading from a string (z) containing the
 // Basic usage pattern when reading from a string (z) containing the
 // zone data:
 // zone data:
@@ -203,6 +204,7 @@ func parseZone(r io.Reader, origin, file string, t chan *Token) {
 //
 //
 // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
 // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
 // supported. Although $INCLUDE is disabled by default.
 // supported. Although $INCLUDE is disabled by default.
+// Note that $GENERATE's range support up to a maximum of 65535 steps.
 //
 //
 // Basic usage pattern when reading from a string (z) containing the
 // Basic usage pattern when reading from a string (z) containing the
 // zone data:
 // zone data:
@@ -246,6 +248,7 @@ type ZoneParser struct {
 	includeDepth uint8
 	includeDepth uint8
 
 
 	includeAllowed bool
 	includeAllowed bool
+	generateDisallowed bool
 }
 }
 
 
 // NewZoneParser returns an RFC 1035 style zonefile parser that reads
 // NewZoneParser returns an RFC 1035 style zonefile parser that reads
@@ -503,9 +506,8 @@ func (zp *ZoneParser) Next() (RR, bool) {
 				return zp.setParseError("expecting $TTL value, not this...", l)
 				return zp.setParseError("expecting $TTL value, not this...", l)
 			}
 			}
 
 
-			if e := slurpRemainder(zp.c, zp.file); e != nil {
-				zp.parseErr = e
-				return nil, false
+			if err := slurpRemainder(zp.c); err != nil {
+				return zp.setParseError(err.err, err.lex)
 			}
 			}
 
 
 			ttl, ok := stringToTTL(l.token)
 			ttl, ok := stringToTTL(l.token)
@@ -527,9 +529,8 @@ func (zp *ZoneParser) Next() (RR, bool) {
 				return zp.setParseError("expecting $ORIGIN value, not this...", l)
 				return zp.setParseError("expecting $ORIGIN value, not this...", l)
 			}
 			}
 
 
-			if e := slurpRemainder(zp.c, zp.file); e != nil {
-				zp.parseErr = e
-				return nil, false
+			if err := slurpRemainder(zp.c); err != nil {
+				return zp.setParseError(err.err, err.lex)
 			}
 			}
 
 
 			name, ok := toAbsoluteName(l.token, zp.origin)
 			name, ok := toAbsoluteName(l.token, zp.origin)
@@ -547,6 +548,9 @@ func (zp *ZoneParser) Next() (RR, bool) {
 
 
 			st = zExpectDirGenerate
 			st = zExpectDirGenerate
 		case zExpectDirGenerate:
 		case zExpectDirGenerate:
+			if zp.generateDisallowed {
+				return zp.setParseError("nested $GENERATE directive not allowed", l)
+			}
 			if l.value != zString {
 			if l.value != zString {
 				return zp.setParseError("expecting $GENERATE value, not this...", l)
 				return zp.setParseError("expecting $GENERATE value, not this...", l)
 			}
 			}
@@ -650,19 +654,44 @@ func (zp *ZoneParser) Next() (RR, bool) {
 
 
 			st = zExpectRdata
 			st = zExpectRdata
 		case zExpectRdata:
 		case zExpectRdata:
-			r, e := setRR(*h, zp.c, zp.origin, zp.file)
-			if e != nil {
-				// If e.lex is nil than we have encounter a unknown RR type
-				// in that case we substitute our current lex token
-				if e.lex.token == "" && e.lex.value == 0 {
-					e.lex = l // Uh, dirty
+			var rr RR
+			if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) {
+				rr = newFn()
+				*rr.Header() = *h
+			} else {
+				rr = &RFC3597{Hdr: *h}
+			}
+
+			_, isPrivate := rr.(*PrivateRR)
+			if !isPrivate && zp.c.Peek().token == "" {
+				// This is a dynamic update rr.
+
+				// TODO(tmthrgd): Previously slurpRemainder was only called
+				// for certain RR types, which may have been important.
+				if err := slurpRemainder(zp.c); err != nil {
+					return zp.setParseError(err.err, err.lex)
+				}
+
+				return rr, true
+			} else if l.value == zNewline {
+				return zp.setParseError("unexpected newline", l)
+			}
+
+			if err := rr.parse(zp.c, zp.origin); err != nil {
+				// err is a concrete *ParseError without the file field set.
+				// The setParseError call below will construct a new
+				// *ParseError with file set to zp.file.
+
+				// If err.lex is nil than we have encounter an unknown RR type
+				// in that case we substitute our current lex token.
+				if err.lex == (lex{}) {
+					return zp.setParseError(err.err, l)
 				}
 				}
 
 
-				zp.parseErr = e
-				return nil, false
+				return zp.setParseError(err.err, err.lex)
 			}
 			}
 
 
-			return r, true
+			return rr, true
 		}
 		}
 	}
 	}
 
 
@@ -671,6 +700,18 @@ func (zp *ZoneParser) Next() (RR, bool) {
 	return nil, false
 	return nil, false
 }
 }
 
 
+// canParseAsRR returns true if the record type can be parsed as a
+// concrete RR. It blacklists certain record types that must be parsed
+// according to RFC 3597 because they lack a presentation format.
+func canParseAsRR(rrtype uint16) bool {
+	switch rrtype {
+	case TypeANY, TypeNULL, TypeOPT, TypeTSIG:
+		return false
+	default:
+		return true
+	}
+}
+
 type zlexer struct {
 type zlexer struct {
 	br io.ByteReader
 	br io.ByteReader
 
 
@@ -682,7 +723,8 @@ type zlexer struct {
 	comBuf  string
 	comBuf  string
 	comment string
 	comment string
 
 
-	l lex
+	l       lex
+	cachedL *lex
 
 
 	brace  int
 	brace  int
 	quote  bool
 	quote  bool
@@ -748,13 +790,37 @@ func (zl *zlexer) readByte() (byte, bool) {
 	return c, true
 	return c, true
 }
 }
 
 
+func (zl *zlexer) Peek() lex {
+	if zl.nextL {
+		return zl.l
+	}
+
+	l, ok := zl.Next()
+	if !ok {
+		return l
+	}
+
+	if zl.nextL {
+		// Cache l. Next returns zl.cachedL then zl.l.
+		zl.cachedL = &l
+	} else {
+		// In this case l == zl.l, so we just tell Next to return zl.l.
+		zl.nextL = true
+	}
+
+	return l
+}
+
 func (zl *zlexer) Next() (lex, bool) {
 func (zl *zlexer) Next() (lex, bool) {
 	l := &zl.l
 	l := &zl.l
-	if zl.nextL {
+	switch {
+	case zl.cachedL != nil:
+		l, zl.cachedL = zl.cachedL, nil
+		return *l, true
+	case zl.nextL:
 		zl.nextL = false
 		zl.nextL = false
 		return *l, true
 		return *l, true
-	}
-	if l.err {
+	case l.err:
 		// Parsing errors should be sticky.
 		// Parsing errors should be sticky.
 		return lex{value: zEOF}, false
 		return lex{value: zEOF}, false
 	}
 	}
@@ -908,6 +974,11 @@ func (zl *zlexer) Next() (lex, bool) {
 				// was inside braces and we delayed adding it until now.
 				// was inside braces and we delayed adding it until now.
 				com[comi] = ' ' // convert newline to space
 				com[comi] = ' ' // convert newline to space
 				comi++
 				comi++
+				if comi >= len(com) {
+					l.token = "comment length insufficient for parsing"
+					l.err = true
+					return *l, true
+				}
 			}
 			}
 
 
 			com[comi] = ';'
 			com[comi] = ';'
@@ -1302,18 +1373,18 @@ func locCheckEast(token string, longitude uint32) (uint32, bool) {
 }
 }
 
 
 // "Eat" the rest of the "line"
 // "Eat" the rest of the "line"
-func slurpRemainder(c *zlexer, f string) *ParseError {
+func slurpRemainder(c *zlexer) *ParseError {
 	l, _ := c.Next()
 	l, _ := c.Next()
 	switch l.value {
 	switch l.value {
 	case zBlank:
 	case zBlank:
 		l, _ = c.Next()
 		l, _ = c.Next()
 		if l.value != zNewline && l.value != zEOF {
 		if l.value != zNewline && l.value != zEOF {
-			return &ParseError{f, "garbage after rdata", l}
+			return &ParseError{"", "garbage after rdata", l}
 		}
 		}
 	case zNewline:
 	case zNewline:
 	case zEOF:
 	case zEOF:
 	default:
 	default:
-		return &ParseError{f, "garbage after rdata", l}
+		return &ParseError{"", "garbage after rdata", l}
 	}
 	}
 	return nil
 	return nil
 }
 }

File diff suppressed because it is too large
+ 143 - 301
vendor/github.com/miekg/dns/scan_rr.go


+ 2 - 26
vendor/github.com/miekg/dns/serve_mux.go

@@ -36,33 +36,9 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
 		return nil
 		return nil
 	}
 	}
 
 
-	var handler Handler
-
-	// TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
-	// lands in a go release, replace the following with strings.ToLower.
-	var sb strings.Builder
-	for i := 0; i < len(q); i++ {
-		c := q[i]
-		if !(c >= 'A' && c <= 'Z') {
-			continue
-		}
-
-		sb.Grow(len(q))
-		sb.WriteString(q[:i])
-
-		for ; i < len(q); i++ {
-			c := q[i]
-			if c >= 'A' && c <= 'Z' {
-				c += 'a' - 'A'
-			}
-
-			sb.WriteByte(c)
-		}
-
-		q = sb.String()
-		break
-	}
+	q = strings.ToLower(q)
 
 
+	var handler Handler
 	for off, end := 0, false; !end; off, end = NextLabel(q, off) {
 	for off, end := 0, false; !end; off, end = NextLabel(q, off) {
 		if h, ok := mux.z[q[off:]]; ok {
 		if h, ok := mux.z[q[off:]]; ok {
 			if t != TypeDS {
 			if t != TypeDS {

+ 11 - 5
vendor/github.com/miekg/dns/server.go

@@ -560,26 +560,32 @@ func (srv *Server) serveDNS(m []byte, w *response) {
 	req := new(Msg)
 	req := new(Msg)
 	req.setHdr(dh)
 	req.setHdr(dh)
 
 
-	switch srv.MsgAcceptFunc(dh) {
+	switch action := srv.MsgAcceptFunc(dh); action {
 	case MsgAccept:
 	case MsgAccept:
 		if req.unpack(dh, m, off) == nil {
 		if req.unpack(dh, m, off) == nil {
 			break
 			break
 		}
 		}
 
 
 		fallthrough
 		fallthrough
-	case MsgReject:
+	case MsgReject, MsgRejectNotImplemented:
+		opcode := req.Opcode
 		req.SetRcodeFormatError(req)
 		req.SetRcodeFormatError(req)
+		req.Zero = false
+		if action == MsgRejectNotImplemented {
+			req.Opcode = opcode
+			req.Rcode = RcodeNotImplemented
+		}
+
 		// Are we allowed to delete any OPT records here?
 		// Are we allowed to delete any OPT records here?
 		req.Ns, req.Answer, req.Extra = nil, nil, nil
 		req.Ns, req.Answer, req.Extra = nil, nil, nil
 
 
 		w.WriteMsg(req)
 		w.WriteMsg(req)
-
+		fallthrough
+	case MsgIgnore:
 		if w.udp != nil && cap(m) == srv.UDPSize {
 		if w.udp != nil && cap(m) == srv.UDPSize {
 			srv.udpPool.Put(m[:srv.UDPSize])
 			srv.udpPool.Put(m[:srv.UDPSize])
 		}
 		}
 
 
-		return
-	case MsgIgnore:
 		return
 		return
 	}
 	}
 
 

+ 2 - 2
vendor/github.com/miekg/dns/tsig.go

@@ -40,7 +40,7 @@ type TSIG struct {
 // TSIG has no official presentation format, but this will suffice.
 // TSIG has no official presentation format, but this will suffice.
 
 
 func (rr *TSIG) String() string {
 func (rr *TSIG) String() string {
-	s := "\n;; TSIG PSEUDOSECTION:\n"
+	s := "\n;; TSIG PSEUDOSECTION:\n; " // add another semi-colon to signify TSIG does not have a presentation format
 	s += rr.Hdr.String() +
 	s += rr.Hdr.String() +
 		" " + rr.Algorithm +
 		" " + rr.Algorithm +
 		" " + tsigTimeToString(rr.TimeSigned) +
 		" " + tsigTimeToString(rr.TimeSigned) +
@@ -54,7 +54,7 @@ func (rr *TSIG) String() string {
 	return s
 	return s
 }
 }
 
 
-func (rr *TSIG) parse(c *zlexer, origin, file string) *ParseError {
+func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
 	panic("dns: internal error: parse should never be called on TSIG")
 	panic("dns: internal error: parse should never be called on TSIG")
 }
 }
 
 

+ 42 - 44
vendor/github.com/miekg/dns/types.go

@@ -238,7 +238,7 @@ type ANY struct {
 
 
 func (rr *ANY) String() string { return rr.Hdr.String() }
 func (rr *ANY) String() string { return rr.Hdr.String() }
 
 
-func (rr *ANY) parse(c *zlexer, origin, file string) *ParseError {
+func (rr *ANY) parse(c *zlexer, origin string) *ParseError {
 	panic("dns: internal error: parse should never be called on ANY")
 	panic("dns: internal error: parse should never be called on ANY")
 }
 }
 
 
@@ -253,7 +253,7 @@ func (rr *NULL) String() string {
 	return ";" + rr.Hdr.String() + rr.Data
 	return ";" + rr.Hdr.String() + rr.Data
 }
 }
 
 
-func (rr *NULL) parse(c *zlexer, origin, file string) *ParseError {
+func (rr *NULL) parse(c *zlexer, origin string) *ParseError {
 	panic("dns: internal error: parse should never be called on NULL")
 	panic("dns: internal error: parse should never be called on NULL")
 }
 }
 
 
@@ -438,25 +438,54 @@ func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
 
 
 func sprintName(s string) string {
 func sprintName(s string) string {
 	var dst strings.Builder
 	var dst strings.Builder
-	dst.Grow(len(s))
+
 	for i := 0; i < len(s); {
 	for i := 0; i < len(s); {
 		if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
 		if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
-			dst.WriteString(s[i : i+2])
+			if dst.Len() != 0 {
+				dst.WriteString(s[i : i+2])
+			}
 			i += 2
 			i += 2
 			continue
 			continue
 		}
 		}
 
 
 		b, n := nextByte(s, i)
 		b, n := nextByte(s, i)
-		switch {
-		case n == 0:
-			i++ // dangling back slash
-		case b == '.':
-			dst.WriteByte('.')
+		if n == 0 {
+			i++
+			continue
+		}
+		if b == '.' {
+			if dst.Len() != 0 {
+				dst.WriteByte('.')
+			}
+			i += n
+			continue
+		}
+		switch b {
+		case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape
+			if dst.Len() == 0 {
+				dst.Grow(len(s) * 2)
+				dst.WriteString(s[:i])
+			}
+			dst.WriteByte('\\')
+			dst.WriteByte(b)
 		default:
 		default:
-			writeDomainNameByte(&dst, b)
+			if ' ' <= b && b <= '~' {
+				if dst.Len() != 0 {
+					dst.WriteByte(b)
+				}
+			} else {
+				if dst.Len() == 0 {
+					dst.Grow(len(s) * 2)
+					dst.WriteString(s[:i])
+				}
+				dst.WriteString(escapeByte(b))
+			}
 		}
 		}
 		i += n
 		i += n
 	}
 	}
+	if dst.Len() == 0 {
+		return s
+	}
 	return dst.String()
 	return dst.String()
 }
 }
 
 
@@ -510,16 +539,6 @@ func sprintTxt(txt []string) string {
 	return out.String()
 	return out.String()
 }
 }
 
 
-func writeDomainNameByte(s *strings.Builder, b byte) {
-	switch b {
-	case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
-		s.WriteByte('\\')
-		s.WriteByte(b)
-	default:
-		writeTXTStringByte(s, b)
-	}
-}
-
 func writeTXTStringByte(s *strings.Builder, b byte) {
 func writeTXTStringByte(s *strings.Builder, b byte) {
 	switch {
 	switch {
 	case b == '"' || b == '\\':
 	case b == '"' || b == '\\':
@@ -854,14 +873,7 @@ func (rr *NSEC) String() string {
 func (rr *NSEC) len(off int, compression map[string]struct{}) int {
 func (rr *NSEC) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
 	l += domainNameLen(rr.NextDomain, off+l, compression, false)
 	l += domainNameLen(rr.NextDomain, off+l, compression, false)
-	lastwindow := uint32(2 ^ 32 + 1)
-	for _, t := range rr.TypeBitMap {
-		window := t / 256
-		if uint32(window) != lastwindow {
-			l += 1 + 32
-		}
-		lastwindow = uint32(window)
-	}
+	l += typeBitMapLen(rr.TypeBitMap)
 	return l
 	return l
 }
 }
 
 
@@ -1020,14 +1032,7 @@ func (rr *NSEC3) String() string {
 func (rr *NSEC3) len(off int, compression map[string]struct{}) int {
 func (rr *NSEC3) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
 	l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
 	l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
-	lastwindow := uint32(2 ^ 32 + 1)
-	for _, t := range rr.TypeBitMap {
-		window := t / 256
-		if uint32(window) != lastwindow {
-			l += 1 + 32
-		}
-		lastwindow = uint32(window)
-	}
+	l += typeBitMapLen(rr.TypeBitMap)
 	return l
 	return l
 }
 }
 
 
@@ -1344,14 +1349,7 @@ func (rr *CSYNC) String() string {
 func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
 func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
 	l += 4 + 2
 	l += 4 + 2
-	lastwindow := uint32(2 ^ 32 + 1)
-	for _, t := range rr.TypeBitMap {
-		window := t / 256
-		if uint32(window) != lastwindow {
-			l += 1 + 32
-		}
-		lastwindow = uint32(window)
-	}
+	l += typeBitMapLen(rr.TypeBitMap)
 	return l
 	return l
 }
 }
 
 

+ 1 - 3
vendor/github.com/miekg/dns/types_generate.go

@@ -189,10 +189,8 @@ func main() {
 				o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
 				o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
 			case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
 			case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
 				o("l += len(rr.%s)/2\n")
 				o("l += len(rr.%s)/2\n")
-			case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
-				fallthrough
 			case st.Tag(i) == `dns:"hex"`:
 			case st.Tag(i) == `dns:"hex"`:
-				o("l += len(rr.%s)/2 + 1\n")
+				o("l += len(rr.%s)/2\n")
 			case st.Tag(i) == `dns:"any"`:
 			case st.Tag(i) == `dns:"any"`:
 				o("l += len(rr.%s)\n")
 				o("l += len(rr.%s)\n")
 			case st.Tag(i) == `dns:"a"`:
 			case st.Tag(i) == `dns:"a"`:

+ 1 - 1
vendor/github.com/miekg/dns/version.go

@@ -3,7 +3,7 @@ package dns
 import "fmt"
 import "fmt"
 
 
 // Version is current version of this library.
 // Version is current version of this library.
-var Version = V{1, 1, 8}
+var Version = V{1, 1, 26}
 
 
 // V holds the version of this library.
 // V holds the version of this library.
 type V struct {
 type V struct {

+ 12 - 6
vendor/github.com/miekg/dns/xfr.go

@@ -182,14 +182,17 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
 //
 //
 //	ch := make(chan *dns.Envelope)
 //	ch := make(chan *dns.Envelope)
 //	tr := new(dns.Transfer)
 //	tr := new(dns.Transfer)
-//	go tr.Out(w, r, ch)
+//	var wg sync.WaitGroup
+//	go func() {
+//		tr.Out(w, r, ch)
+//		wg.Done()
+//	}()
 //	ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
 //	ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
 //	close(ch)
 //	close(ch)
-//	w.Hijack()
-//	// w.Close() // Client closes connection
+//	wg.Wait() // wait until everything is written out
+//	w.Close() // close connection
 //
 //
-// The server is responsible for sending the correct sequence of RRs through the
-// channel ch.
+// The server is responsible for sending the correct sequence of RRs through the channel ch.
 func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
 func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
 	for x := range ch {
 	for x := range ch {
 		r := new(Msg)
 		r := new(Msg)
@@ -198,11 +201,14 @@ func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
 		r.Authoritative = true
 		r.Authoritative = true
 		// assume it fits TODO(miek): fix
 		// assume it fits TODO(miek): fix
 		r.Answer = append(r.Answer, x.RR...)
 		r.Answer = append(r.Answer, x.RR...)
+		if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil {
+			r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix())
+		}
 		if err := w.WriteMsg(r); err != nil {
 		if err := w.WriteMsg(r); err != nil {
 			return err
 			return err
 		}
 		}
+		w.TsigTimersOnly(true)
 	}
 	}
-	w.TsigTimersOnly(true)
 	return nil
 	return nil
 }
 }
 
 

+ 8 - 8
vendor/github.com/miekg/dns/ztypes.go

@@ -312,12 +312,12 @@ func (rr *DS) len(off int, compression map[string]struct{}) int {
 	l += 2 // KeyTag
 	l += 2 // KeyTag
 	l++    // Algorithm
 	l++    // Algorithm
 	l++    // DigestType
 	l++    // DigestType
-	l += len(rr.Digest)/2 + 1
+	l += len(rr.Digest) / 2
 	return l
 	return l
 }
 }
 func (rr *EID) len(off int, compression map[string]struct{}) int {
 func (rr *EID) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
-	l += len(rr.Endpoint)/2 + 1
+	l += len(rr.Endpoint) / 2
 	return l
 	return l
 }
 }
 func (rr *EUI48) len(off int, compression map[string]struct{}) int {
 func (rr *EUI48) len(off int, compression map[string]struct{}) int {
@@ -452,7 +452,7 @@ func (rr *NID) len(off int, compression map[string]struct{}) int {
 }
 }
 func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
 func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
-	l += len(rr.Locator)/2 + 1
+	l += len(rr.Locator) / 2
 	return l
 	return l
 }
 }
 func (rr *NINFO) len(off int, compression map[string]struct{}) int {
 func (rr *NINFO) len(off int, compression map[string]struct{}) int {
@@ -505,7 +505,7 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
 }
 }
 func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
 func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
-	l += len(rr.Rdata)/2 + 1
+	l += len(rr.Rdata) / 2
 	return l
 	return l
 }
 }
 func (rr *RKEY) len(off int, compression map[string]struct{}) int {
 func (rr *RKEY) len(off int, compression map[string]struct{}) int {
@@ -546,7 +546,7 @@ func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
 	l++ // Usage
 	l++ // Usage
 	l++ // Selector
 	l++ // Selector
 	l++ // MatchingType
 	l++ // MatchingType
-	l += len(rr.Certificate)/2 + 1
+	l += len(rr.Certificate) / 2
 	return l
 	return l
 }
 }
 func (rr *SOA) len(off int, compression map[string]struct{}) int {
 func (rr *SOA) len(off int, compression map[string]struct{}) int {
@@ -579,7 +579,7 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l := rr.Hdr.len(off, compression)
 	l++ // Algorithm
 	l++ // Algorithm
 	l++ // Type
 	l++ // Type
-	l += len(rr.FingerPrint)/2 + 1
+	l += len(rr.FingerPrint) / 2
 	return l
 	return l
 }
 }
 func (rr *TA) len(off int, compression map[string]struct{}) int {
 func (rr *TA) len(off int, compression map[string]struct{}) int {
@@ -587,7 +587,7 @@ func (rr *TA) len(off int, compression map[string]struct{}) int {
 	l += 2 // KeyTag
 	l += 2 // KeyTag
 	l++    // Algorithm
 	l++    // Algorithm
 	l++    // DigestType
 	l++    // DigestType
-	l += len(rr.Digest)/2 + 1
+	l += len(rr.Digest) / 2
 	return l
 	return l
 }
 }
 func (rr *TALINK) len(off int, compression map[string]struct{}) int {
 func (rr *TALINK) len(off int, compression map[string]struct{}) int {
@@ -614,7 +614,7 @@ func (rr *TLSA) len(off int, compression map[string]struct{}) int {
 	l++ // Usage
 	l++ // Usage
 	l++ // Selector
 	l++ // Selector
 	l++ // MatchingType
 	l++ // MatchingType
-	l += len(rr.Certificate)/2 + 1
+	l += len(rr.Certificate) / 2
 	return l
 	return l
 }
 }
 func (rr *TSIG) len(off int, compression map[string]struct{}) int {
 func (rr *TSIG) len(off int, compression map[string]struct{}) int {

+ 87 - 4
vendor/github.com/oschwald/geoip2-golang/reader.go

@@ -14,6 +14,71 @@ import (
 	"github.com/oschwald/maxminddb-golang"
 	"github.com/oschwald/maxminddb-golang"
 )
 )
 
 
+// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise
+// database.
+type Enterprise struct {
+	City struct {
+		Confidence uint8             `maxminddb:"confidence"`
+		GeoNameID  uint              `maxminddb:"geoname_id"`
+		Names      map[string]string `maxminddb:"names"`
+	} `maxminddb:"city"`
+	Continent struct {
+		Code      string            `maxminddb:"code"`
+		GeoNameID uint              `maxminddb:"geoname_id"`
+		Names     map[string]string `maxminddb:"names"`
+	} `maxminddb:"continent"`
+	Country struct {
+		GeoNameID         uint              `maxminddb:"geoname_id"`
+		IsoCode           string            `maxminddb:"iso_code"`
+		Names             map[string]string `maxminddb:"names"`
+		Confidence        uint8             `maxminddb:"confidence"`
+		IsInEuropeanUnion bool              `maxminddb:"is_in_european_union"`
+	} `maxminddb:"country"`
+	Location struct {
+		AccuracyRadius uint16  `maxminddb:"accuracy_radius"`
+		Latitude       float64 `maxminddb:"latitude"`
+		Longitude      float64 `maxminddb:"longitude"`
+		MetroCode      uint    `maxminddb:"metro_code"`
+		TimeZone       string  `maxminddb:"time_zone"`
+	} `maxminddb:"location"`
+	Postal struct {
+		Code       string `maxminddb:"code"`
+		Confidence uint8  `maxminddb:"confidence"`
+	} `maxminddb:"postal"`
+	RegisteredCountry struct {
+		GeoNameID         uint              `maxminddb:"geoname_id"`
+		IsoCode           string            `maxminddb:"iso_code"`
+		Names             map[string]string `maxminddb:"names"`
+		Confidence        uint8             `maxminddb:"confidence"`
+		IsInEuropeanUnion bool              `maxminddb:"is_in_european_union"`
+	} `maxminddb:"registered_country"`
+	RepresentedCountry struct {
+		GeoNameID         uint              `maxminddb:"geoname_id"`
+		IsInEuropeanUnion bool              `maxminddb:"is_in_european_union"`
+		IsoCode           string            `maxminddb:"iso_code"`
+		Names             map[string]string `maxminddb:"names"`
+		Type              string            `maxminddb:"type"`
+	} `maxminddb:"represented_country"`
+	Subdivisions []struct {
+		Confidence uint8             `maxminddb:"confidence"`
+		GeoNameID  uint              `maxminddb:"geoname_id"`
+		IsoCode    string            `maxminddb:"iso_code"`
+		Names      map[string]string `maxminddb:"names"`
+	} `maxminddb:"subdivisions"`
+	Traits struct {
+		AutonomousSystemNumber       uint   `maxminddb:"autonomous_system_number"`
+		AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
+		ConnectionType               string `maxminddb:"connection_type"`
+		Domain                       string `maxminddb:"domain"`
+		IsAnonymousProxy             bool   `maxminddb:"is_anonymous_proxy"`
+		IsLegitimateProxy            bool   `maxminddb:"is_legitimate_proxy"`
+		IsSatelliteProvider          bool   `maxminddb:"is_satellite_provider"`
+		ISP                          string `maxminddb:"isp"`
+		Organization                 string `maxminddb:"organization"`
+		UserType                     string `maxminddb:"user_type"`
+	} `maxminddb:"traits"`
+}
+
 // The City struct corresponds to the data in the GeoIP2/GeoLite2 City
 // The City struct corresponds to the data in the GeoIP2/GeoLite2 City
 // databases.
 // databases.
 type City struct {
 type City struct {
@@ -210,7 +275,11 @@ func getDBType(reader *maxminddb.Reader) (databaseType, error) {
 	case "GeoLite2-ASN":
 	case "GeoLite2-ASN":
 		return isASN, nil
 		return isASN, nil
 	// We allow City lookups on Country for back compat
 	// We allow City lookups on Country for back compat
-	case "GeoLite2-City",
+	case "DBIP-City-Lite",
+		"DBIP-City",
+		"DBIP-Country-Lite",
+		"DBIP-Country",
+		"GeoLite2-City",
 		"GeoIP2-City",
 		"GeoIP2-City",
 		"GeoIP2-City-Africa",
 		"GeoIP2-City-Africa",
 		"GeoIP2-City-Asia-Pacific",
 		"GeoIP2-City-Asia-Pacific",
@@ -225,15 +294,29 @@ func getDBType(reader *maxminddb.Reader) (databaseType, error) {
 		return isConnectionType, nil
 		return isConnectionType, nil
 	case "GeoIP2-Domain":
 	case "GeoIP2-Domain":
 		return isDomain, nil
 		return isDomain, nil
-	case "GeoIP2-Enterprise":
+	case "DBIP-Location-ISP (compat=Enterprise)",
+		"GeoIP2-Enterprise":
 		return isEnterprise | isCity | isCountry, nil
 		return isEnterprise | isCity | isCountry, nil
-	case "GeoIP2-ISP", "GeoIP2-Precision-ISP":
-		return isISP, nil
+	case "GeoIP2-ISP",
+		"GeoIP2-Precision-ISP":
+		return isISP | isASN, nil
 	default:
 	default:
 		return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType}
 		return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType}
 	}
 	}
 }
 }
 
 
+// Enterprise takes an IP address as a net.IP struct and returns an Enterprise
+// struct and/or an error. This is intended to be used with the GeoIP2
+// Enterprise database.
+func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) {
+	if isEnterprise&r.databaseType == 0 {
+		return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType}
+	}
+	var enterprise Enterprise
+	err := r.mmdbReader.Lookup(ipAddress, &enterprise)
+	return &enterprise, err
+}
+
 // City takes an IP address as a net.IP struct and returns a City struct
 // City takes an IP address as a net.IP struct and returns a City struct
 // and/or an error. Although this can be used with other databases, this
 // and/or an error. Although this can be used with other databases, this
 // method generally should be used with the GeoIP2 or GeoLite2 City databases.
 // method generally should be used with the GeoIP2 or GeoLite2 City databases.

+ 85 - 95
vendor/github.com/oschwald/maxminddb-golang/decoder.go

@@ -27,8 +27,10 @@ const (
 	_Uint64
 	_Uint64
 	_Uint128
 	_Uint128
 	_Slice
 	_Slice
-	_Container
-	_Marker
+	// We don't use the next two. They are placeholders. See the spec
+	// for more details.
+	_Container // nolint: deadcode, varcheck
+	_Marker    // nolint: deadcode, varcheck
 	_Bool
 	_Bool
 	_Float32
 	_Float32
 )
 )
@@ -159,10 +161,8 @@ func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (u
 	if size > 1 {
 	if size > 1 {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
 	}
 	}
-	value, newOffset, err := d.decodeBool(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeBool(size, offset)
+
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Bool:
 	case reflect.Bool:
 		result.SetBool(value)
 		result.SetBool(value)
@@ -207,10 +207,8 @@ func (d *decoder) indirect(result reflect.Value) reflect.Value {
 var sliceType = reflect.TypeOf([]byte{})
 var sliceType = reflect.TypeOf([]byte{})
 
 
 func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
 func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
-	value, newOffset, err := d.decodeBytes(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeBytes(size, offset)
+
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Slice:
 	case reflect.Slice:
 		if result.Type() == sliceType {
 		if result.Type() == sliceType {
@@ -230,10 +228,7 @@ func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value)
 	if size != 4 {
 	if size != 4 {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
 	}
 	}
-	value, newOffset, err := d.decodeFloat32(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeFloat32(size, offset)
 
 
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Float32, reflect.Float64:
 	case reflect.Float32, reflect.Float64:
@@ -253,10 +248,8 @@ func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value)
 	if size != 8 {
 	if size != 8 {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
 	}
 	}
-	value, newOffset, err := d.decodeFloat64(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeFloat64(size, offset)
+
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Float32, reflect.Float64:
 	case reflect.Float32, reflect.Float64:
 		if result.OverflowFloat(value) {
 		if result.OverflowFloat(value) {
@@ -277,10 +270,7 @@ func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (
 	if size > 4 {
 	if size > 4 {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
 	}
 	}
-	value, newOffset, err := d.decodeInt(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeInt(size, offset)
 
 
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@@ -360,11 +350,8 @@ func (d *decoder) unmarshalSlice(
 }
 }
 
 
 func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
 func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
-	value, newOffset, err := d.decodeString(size, offset)
+	value, newOffset := d.decodeString(size, offset)
 
 
-	if err != nil {
-		return 0, err
-	}
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.String:
 	case reflect.String:
 		result.SetString(value)
 		result.SetString(value)
@@ -384,10 +371,7 @@ func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, ui
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
 	}
 	}
 
 
-	value, newOffset, err := d.decodeUint(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeUint(size, offset)
 
 
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@@ -416,10 +400,7 @@ func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value)
 	if size > 16 {
 	if size > 16 {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
 		return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
 	}
 	}
-	value, newOffset, err := d.decodeUint128(size, offset)
-	if err != nil {
-		return 0, err
-	}
+	value, newOffset := d.decodeUint128(size, offset)
 
 
 	switch result.Kind() {
 	switch result.Kind() {
 	case reflect.Struct:
 	case reflect.Struct:
@@ -436,36 +417,36 @@ func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value)
 	return newOffset, newUnmarshalTypeError(value, result.Type())
 	return newOffset, newUnmarshalTypeError(value, result.Type())
 }
 }
 
 
-func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
-	return size != 0, offset, nil
+func (d *decoder) decodeBool(size uint, offset uint) (bool, uint) {
+	return size != 0, offset
 }
 }
 
 
-func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
+func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	bytes := make([]byte, size)
 	bytes := make([]byte, size)
 	copy(bytes, d.buffer[offset:newOffset])
 	copy(bytes, d.buffer[offset:newOffset])
-	return bytes, newOffset, nil
+	return bytes, newOffset
 }
 }
 
 
-func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
+func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
 	bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
-	return math.Float64frombits(bits), newOffset, nil
+	return math.Float64frombits(bits), newOffset
 }
 }
 
 
-func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
+func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
 	bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
-	return math.Float32frombits(bits), newOffset, nil
+	return math.Float32frombits(bits), newOffset
 }
 }
 
 
-func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
+func (d *decoder) decodeInt(size uint, offset uint) (int, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	var val int32
 	var val int32
 	for _, b := range d.buffer[offset:newOffset] {
 	for _, b := range d.buffer[offset:newOffset] {
 		val = (val << 8) | int32(b)
 		val = (val << 8) | int32(b)
 	}
 	}
-	return int(val), newOffset, nil
+	return int(val), newOffset
 }
 }
 
 
 func (d *decoder) decodeMap(
 func (d *decoder) decodeMap(
@@ -475,9 +456,14 @@ func (d *decoder) decodeMap(
 	depth int,
 	depth int,
 ) (uint, error) {
 ) (uint, error) {
 	if result.IsNil() {
 	if result.IsNil() {
-		result.Set(reflect.MakeMap(result.Type()))
+		result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
 	}
 	}
 
 
+	mapType := result.Type()
+	keyValue := reflect.New(mapType.Key()).Elem()
+	elemType := mapType.Elem()
+	elemKind := elemType.Kind()
+	var elemValue reflect.Value
 	for i := uint(0); i < size; i++ {
 	for i := uint(0); i < size; i++ {
 		var key []byte
 		var key []byte
 		var err error
 		var err error
@@ -487,12 +473,17 @@ func (d *decoder) decodeMap(
 			return 0, err
 			return 0, err
 		}
 		}
 
 
-		value := reflect.New(result.Type().Elem())
-		offset, err = d.decode(offset, value, depth)
+		if !elemValue.IsValid() || elemKind == reflect.Interface {
+			elemValue = reflect.New(elemType).Elem()
+		}
+
+		offset, err = d.decode(offset, elemValue, depth)
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
-		result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
+
+		keyValue.SetString(string(key))
+		result.SetMapIndex(keyValue, elemValue)
 	}
 	}
 	return offset, nil
 	return offset, nil
 }
 }
@@ -511,7 +502,7 @@ func (d *decoder) decodePointer(
 	if pointerSize == 4 {
 	if pointerSize == 4 {
 		prefix = 0
 		prefix = 0
 	} else {
 	} else {
-		prefix = uint(size & 0x7)
+		prefix = size & 0x7
 	}
 	}
 	unpacked := uintFromBytes(prefix, pointerBytes)
 	unpacked := uintFromBytes(prefix, pointerBytes)
 
 
@@ -549,57 +540,18 @@ func (d *decoder) decodeSlice(
 	return offset, nil
 	return offset, nil
 }
 }
 
 
-func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
+func (d *decoder) decodeString(size uint, offset uint) (string, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
-	return string(d.buffer[offset:newOffset]), newOffset, nil
+	return string(d.buffer[offset:newOffset]), newOffset
 }
 }
 
 
-type fieldsType struct {
-	namedFields     map[string]int
-	anonymousFields []int
-}
-
-var (
-	fieldMap   = map[reflect.Type]*fieldsType{}
-	fieldMapMu sync.RWMutex
-)
-
 func (d *decoder) decodeStruct(
 func (d *decoder) decodeStruct(
 	size uint,
 	size uint,
 	offset uint,
 	offset uint,
 	result reflect.Value,
 	result reflect.Value,
 	depth int,
 	depth int,
 ) (uint, error) {
 ) (uint, error) {
-	resultType := result.Type()
-
-	fieldMapMu.RLock()
-	fields, ok := fieldMap[resultType]
-	fieldMapMu.RUnlock()
-	if !ok {
-		numFields := resultType.NumField()
-		namedFields := make(map[string]int, numFields)
-		var anonymous []int
-		for i := 0; i < numFields; i++ {
-			field := resultType.Field(i)
-
-			fieldName := field.Name
-			if tag := field.Tag.Get("maxminddb"); tag != "" {
-				if tag == "-" {
-					continue
-				}
-				fieldName = tag
-			}
-			if field.Anonymous {
-				anonymous = append(anonymous, i)
-				continue
-			}
-			namedFields[fieldName] = i
-		}
-		fieldMapMu.Lock()
-		fields = &fieldsType{namedFields, anonymous}
-		fieldMap[resultType] = fields
-		fieldMapMu.Unlock()
-	}
+	fields := cachedFields(result)
 
 
 	// This fills in embedded structs
 	// This fills in embedded structs
 	for _, i := range fields.anonymousFields {
 	for _, i := range fields.anonymousFields {
@@ -638,7 +590,45 @@ func (d *decoder) decodeStruct(
 	return offset, nil
 	return offset, nil
 }
 }
 
 
-func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
+type fieldsType struct {
+	namedFields     map[string]int
+	anonymousFields []int
+}
+
+var fieldsMap sync.Map
+
+func cachedFields(result reflect.Value) *fieldsType {
+	resultType := result.Type()
+
+	if fields, ok := fieldsMap.Load(resultType); ok {
+		return fields.(*fieldsType)
+	}
+	numFields := resultType.NumField()
+	namedFields := make(map[string]int, numFields)
+	var anonymous []int
+	for i := 0; i < numFields; i++ {
+		field := resultType.Field(i)
+
+		fieldName := field.Name
+		if tag := field.Tag.Get("maxminddb"); tag != "" {
+			if tag == "-" {
+				continue
+			}
+			fieldName = tag
+		}
+		if field.Anonymous {
+			anonymous = append(anonymous, i)
+			continue
+		}
+		namedFields[fieldName] = i
+	}
+	fields := &fieldsType{namedFields, anonymous}
+	fieldsMap.Store(resultType, fields)
+
+	return fields
+}
+
+func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	bytes := d.buffer[offset:newOffset]
 	bytes := d.buffer[offset:newOffset]
 
 
@@ -646,15 +636,15 @@ func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
 	for _, b := range bytes {
 	for _, b := range bytes {
 		val = (val << 8) | uint64(b)
 		val = (val << 8) | uint64(b)
 	}
 	}
-	return val, newOffset, nil
+	return val, newOffset
 }
 }
 
 
-func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error) {
+func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint) {
 	newOffset := offset + size
 	newOffset := offset + size
 	val := new(big.Int)
 	val := new(big.Int)
 	val.SetBytes(d.buffer[offset:newOffset])
 	val.SetBytes(d.buffer[offset:newOffset])
 
 
-	return val, newOffset, nil
+	return val, newOffset
 }
 }
 
 
 func uintFromBytes(prefix uint, uintBytes []byte) uint {
 func uintFromBytes(prefix uint, uintBytes []byte) uint {

+ 1 - 1
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go

@@ -1,4 +1,4 @@
-// +build !windows,!appengine
+// +build !windows,!appengine,!plan9
 
 
 package maxminddb
 package maxminddb
 
 

+ 42 - 0
vendor/github.com/oschwald/maxminddb-golang/node.go

@@ -0,0 +1,42 @@
+package maxminddb
+
+type nodeReader interface {
+	readLeft(uint) uint
+	readRight(uint) uint
+}
+
+type nodeReader24 struct {
+	buffer []byte
+}
+
+func (n nodeReader24) readLeft(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader24) readRight(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber+3]) << 16) | (uint(n.buffer[nodeNumber+4]) << 8) | uint(n.buffer[nodeNumber+5])
+}
+
+type nodeReader28 struct {
+	buffer []byte
+}
+
+func (n nodeReader28) readLeft(nodeNumber uint) uint {
+	return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) | (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader28) readRight(nodeNumber uint) uint {
+	return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) | (uint(n.buffer[nodeNumber+4]) << 16) | (uint(n.buffer[nodeNumber+5]) << 8) | uint(n.buffer[nodeNumber+6])
+}
+
+type nodeReader32 struct {
+	buffer []byte
+}
+
+func (n nodeReader32) readLeft(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber]) << 24) | (uint(n.buffer[nodeNumber+1]) << 16) | (uint(n.buffer[nodeNumber+2]) << 8) | uint(n.buffer[nodeNumber+3])
+}
+
+func (n nodeReader32) readRight(nodeNumber uint) uint {
+	return (uint(n.buffer[nodeNumber+4]) << 24) | (uint(n.buffer[nodeNumber+5]) << 16) | (uint(n.buffer[nodeNumber+6]) << 8) | uint(n.buffer[nodeNumber+7])
+}

+ 111 - 79
vendor/github.com/oschwald/maxminddb-golang/reader.go

@@ -20,16 +20,22 @@ var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
 
 
 // Reader holds the data corresponding to the MaxMind DB file. Its only public
 // Reader holds the data corresponding to the MaxMind DB file. Its only public
 // field is Metadata, which contains the metadata from the MaxMind DB file.
 // field is Metadata, which contains the metadata from the MaxMind DB file.
+//
+// All of the methods on Reader are thread-safe. The struct may be safely
+// shared across goroutines.
 type Reader struct {
 type Reader struct {
-	hasMappedFile bool
-	buffer        []byte
-	decoder       decoder
-	Metadata      Metadata
-	ipv4Start     uint
+	hasMappedFile     bool
+	buffer            []byte
+	nodeReader        nodeReader
+	decoder           decoder
+	Metadata          Metadata
+	ipv4Start         uint
+	ipv4StartBitDepth int
+	nodeOffsetMult    uint
 }
 }
 
 
 // Metadata holds the metadata decoded from the MaxMind DB file. In particular
 // Metadata holds the metadata decoded from the MaxMind DB file. In particular
-// in has the format version, the build time as Unix epoch time, the database
+// it has the format version, the build time as Unix epoch time, the database
 // type and description, the IP version supported, and a slice of the natural
 // type and description, the IP version supported, and a slice of the natural
 // languages included.
 // languages included.
 type Metadata struct {
 type Metadata struct {
@@ -74,65 +80,123 @@ func FromBytes(buffer []byte) (*Reader, error) {
 		buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
 		buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
 	}
 	}
 
 
+	nodeBuffer := buffer[:searchTreeSize]
+	var nodeReader nodeReader
+	switch metadata.RecordSize {
+	case 24:
+		nodeReader = nodeReader24{buffer: nodeBuffer}
+	case 28:
+		nodeReader = nodeReader28{buffer: nodeBuffer}
+	case 32:
+		nodeReader = nodeReader32{buffer: nodeBuffer}
+	default:
+		return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
+	}
+
 	reader := &Reader{
 	reader := &Reader{
-		buffer:    buffer,
-		decoder:   d,
-		Metadata:  metadata,
-		ipv4Start: 0,
+		buffer:         buffer,
+		nodeReader:     nodeReader,
+		decoder:        d,
+		Metadata:       metadata,
+		ipv4Start:      0,
+		nodeOffsetMult: metadata.RecordSize / 4,
 	}
 	}
 
 
-	reader.ipv4Start, err = reader.startNode()
+	reader.setIPv4Start()
 
 
 	return reader, err
 	return reader, err
 }
 }
 
 
-func (r *Reader) startNode() (uint, error) {
+func (r *Reader) setIPv4Start() {
 	if r.Metadata.IPVersion != 6 {
 	if r.Metadata.IPVersion != 6 {
-		return 0, nil
+		return
 	}
 	}
 
 
 	nodeCount := r.Metadata.NodeCount
 	nodeCount := r.Metadata.NodeCount
 
 
 	node := uint(0)
 	node := uint(0)
-	var err error
-	for i := 0; i < 96 && node < nodeCount; i++ {
-		node, err = r.readNode(node, 0)
-		if err != nil {
-			return 0, err
-		}
+	i := 0
+	for ; i < 96 && node < nodeCount; i++ {
+		node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
 	}
 	}
-	return node, err
+	r.ipv4Start = node
+	r.ipv4StartBitDepth = i
 }
 }
 
 
-// Lookup takes an IP address as a net.IP structure and a pointer to the
-// result value to Decode into.
-func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
+// Lookup retrieves the database record for ip and stores it in the value
+// pointed to by result. If result is nil or not a pointer, an error is
+// returned. If the data in the database record cannot be stored in result
+// because of type differences, an UnmarshalTypeError is returned. If the
+// database is invalid or otherwise cannot be read, an InvalidDatabaseError
+// is returned.
+func (r *Reader) Lookup(ip net.IP, result interface{}) error {
 	if r.buffer == nil {
 	if r.buffer == nil {
 		return errors.New("cannot call Lookup on a closed database")
 		return errors.New("cannot call Lookup on a closed database")
 	}
 	}
-	pointer, err := r.lookupPointer(ipAddress)
+	pointer, _, _, err := r.lookupPointer(ip)
 	if pointer == 0 || err != nil {
 	if pointer == 0 || err != nil {
 		return err
 		return err
 	}
 	}
 	return r.retrieveData(pointer, result)
 	return r.retrieveData(pointer, result)
 }
 }
 
 
+// LookupNetwork retrieves the database record for ip and stores it in the
+// value pointed to by result. The network returned is the network associated
+// with the data record in the database. The ok return value indicates whether
+// the database contained a record for the ip.
+//
+// If result is nil or not a pointer, an error is returned. If the data in the
+// database record cannot be stored in result because of type differences, an
+// UnmarshalTypeError is returned. If the database is invalid or otherwise
+// cannot be read, an InvalidDatabaseError is returned.
+func (r *Reader) LookupNetwork(ip net.IP, result interface{}) (network *net.IPNet, ok bool, err error) {
+	if r.buffer == nil {
+		return nil, false, errors.New("cannot call Lookup on a closed database")
+	}
+	pointer, prefixLength, ip, err := r.lookupPointer(ip)
+
+	network = r.cidr(ip, prefixLength)
+	if pointer == 0 || err != nil {
+		return network, false, err
+	}
+
+	return network, true, r.retrieveData(pointer, result)
+}
+
 // LookupOffset maps an argument net.IP to a corresponding record offset in the
 // LookupOffset maps an argument net.IP to a corresponding record offset in the
 // database. NotFound is returned if no such record is found, and a record may
 // database. NotFound is returned if no such record is found, and a record may
 // otherwise be extracted by passing the returned offset to Decode. LookupOffset
 // otherwise be extracted by passing the returned offset to Decode. LookupOffset
 // is an advanced API, which exists to provide clients with a means to cache
 // is an advanced API, which exists to provide clients with a means to cache
 // previously-decoded records.
 // previously-decoded records.
-func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
+func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
 	if r.buffer == nil {
 	if r.buffer == nil {
 		return 0, errors.New("cannot call LookupOffset on a closed database")
 		return 0, errors.New("cannot call LookupOffset on a closed database")
 	}
 	}
-	pointer, err := r.lookupPointer(ipAddress)
+	pointer, _, _, err := r.lookupPointer(ip)
 	if pointer == 0 || err != nil {
 	if pointer == 0 || err != nil {
 		return NotFound, err
 		return NotFound, err
 	}
 	}
 	return r.resolveDataPointer(pointer)
 	return r.resolveDataPointer(pointer)
 }
 }
 
 
+func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
+	// This is necessary as the node that the IPv4 start is at may
+	// be at a bit depth that is less that 96, i.e., ipv4Start points
+	// to a leaf node. For instance, if a record was inserted at ::/8,
+	// the ipv4Start would point directly at the leaf node for the
+	// record and would have a bit depth of 8. This would not happen
+	// with databases currently distributed by MaxMind as all of them
+	// have an IPv4 subtree that is greater than a single node.
+	if r.Metadata.IPVersion == 6 &&
+		len(ip) == net.IPv4len &&
+		r.ipv4StartBitDepth != 96 {
+		return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
+	}
+
+	mask := net.CIDRMask(prefixLength, len(ip)*8)
+	return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
+}
+
 // Decode the record at |offset| into |result|. The result value pointed to
 // Decode the record at |offset| into |result|. The result value pointed to
 // must be a data value that corresponds to a record in the database. This may
 // must be a data value that corresponds to a record in the database. This may
 // include a struct representation of the data, a map capable of holding the
 // include a struct representation of the data, a map capable of holding the
@@ -162,29 +226,24 @@ func (r *Reader) decode(offset uintptr, result interface{}) error {
 		return errors.New("result param must be a pointer")
 		return errors.New("result param must be a pointer")
 	}
 	}
 
 
-	_, err := r.decoder.decode(uint(offset), reflect.ValueOf(result), 0)
+	_, err := r.decoder.decode(uint(offset), rv, 0)
 	return err
 	return err
 }
 }
 
 
-func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
-	if ipAddress == nil {
-		return 0, errors.New("ipAddress passed to Lookup cannot be nil")
+func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
+	if ip == nil {
+		return 0, 0, ip, errors.New("IP passed to Lookup cannot be nil")
 	}
 	}
 
 
-	ipV4Address := ipAddress.To4()
+	ipV4Address := ip.To4()
 	if ipV4Address != nil {
 	if ipV4Address != nil {
-		ipAddress = ipV4Address
+		ip = ipV4Address
 	}
 	}
-	if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
-		return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
+	if len(ip) == 16 && r.Metadata.IPVersion == 4 {
+		return 0, 0, ip, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ip.String())
 	}
 	}
 
 
-	return r.findAddressInTree(ipAddress)
-}
-
-func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
-
-	bitCount := uint(len(ipAddress) * 8)
+	bitCount := uint(len(ip) * 8)
 
 
 	var node uint
 	var node uint
 	if bitCount == 32 {
 	if bitCount == 32 {
@@ -193,52 +252,25 @@ func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
 
 
 	nodeCount := r.Metadata.NodeCount
 	nodeCount := r.Metadata.NodeCount
 
 
-	for i := uint(0); i < bitCount && node < nodeCount; i++ {
-		bit := uint(1) & (uint(ipAddress[i>>3]) >> (7 - (i % 8)))
+	i := uint(0)
+	for ; i < bitCount && node < nodeCount; i++ {
+		bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
 
 
-		var err error
-		node, err = r.readNode(node, bit)
-		if err != nil {
-			return 0, err
+		offset := node * r.nodeOffsetMult
+		if bit == 0 {
+			node = r.nodeReader.readLeft(offset)
+		} else {
+			node = r.nodeReader.readRight(offset)
 		}
 		}
 	}
 	}
 	if node == nodeCount {
 	if node == nodeCount {
 		// Record is empty
 		// Record is empty
-		return 0, nil
+		return 0, int(i), ip, nil
 	} else if node > nodeCount {
 	} else if node > nodeCount {
-		return node, nil
+		return node, int(i), ip, nil
 	}
 	}
 
 
-	return 0, newInvalidDatabaseError("invalid node in search tree")
-}
-
-func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
-	RecordSize := r.Metadata.RecordSize
-
-	baseOffset := nodeNumber * RecordSize / 4
-
-	var nodeBytes []byte
-	var prefix uint
-	switch RecordSize {
-	case 24:
-		offset := baseOffset + index*3
-		nodeBytes = r.buffer[offset : offset+3]
-	case 28:
-		prefix = uint(r.buffer[baseOffset+3])
-		if index != 0 {
-			prefix &= 0x0F
-		} else {
-			prefix = (0xF0 & prefix) >> 4
-		}
-		offset := baseOffset + index*4
-		nodeBytes = r.buffer[offset : offset+3]
-	case 32:
-		offset := baseOffset + index*4
-		nodeBytes = r.buffer[offset : offset+4]
-	default:
-		return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
-	}
-	return uintFromBytes(prefix, nodeBytes), nil
+	return 0, int(i), ip, newInvalidDatabaseError("invalid node in search tree")
 }
 }
 
 
 func (r *Reader) retrieveData(pointer uint, result interface{}) error {
 func (r *Reader) retrieveData(pointer uint, result interface{}) error {
@@ -252,7 +284,7 @@ func (r *Reader) retrieveData(pointer uint, result interface{}) error {
 func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
 func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
 	var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
 	var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
 
 
-	if resolved > uintptr(len(r.buffer)) {
+	if resolved >= uintptr(len(r.buffer)) {
 		return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
 		return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
 	}
 	}
 	return resolved, nil
 	return resolved, nil

+ 1 - 1
vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go

@@ -1,4 +1,4 @@
-// +build appengine
+// +build appengine plan9
 
 
 package maxminddb
 package maxminddb
 
 

+ 1 - 1
vendor/github.com/oschwald/maxminddb-golang/reader_other.go

@@ -1,4 +1,4 @@
-// +build !appengine
+// +build !appengine,!plan9
 
 
 package maxminddb
 package maxminddb
 
 

+ 23 - 34
vendor/github.com/oschwald/maxminddb-golang/traverse.go

@@ -21,7 +21,7 @@ type Networks struct {
 // the database.
 // the database.
 //
 //
 // Please note that a MaxMind DB may map IPv4 networks into several locations
 // Please note that a MaxMind DB may map IPv4 networks into several locations
-// in in an IPv6 database. This iterator will iterate over all of these
+// in an IPv6 database. This iterator will iterate over all of these
 // locations separately.
 // locations separately.
 func (r *Reader) Networks() *Networks {
 func (r *Reader) Networks() *Networks {
 	s := 4
 	s := 4
@@ -46,42 +46,31 @@ func (n *Networks) Next() bool {
 		node := n.nodes[len(n.nodes)-1]
 		node := n.nodes[len(n.nodes)-1]
 		n.nodes = n.nodes[:len(n.nodes)-1]
 		n.nodes = n.nodes[:len(n.nodes)-1]
 
 
-		for {
-			if node.pointer < n.reader.Metadata.NodeCount {
-				ipRight := make(net.IP, len(node.ip))
-				copy(ipRight, node.ip)
-				if len(ipRight) <= int(node.bit>>3) {
-					n.err = newInvalidDatabaseError(
-						"invalid search tree at %v/%v", ipRight, node.bit)
-					return false
-				}
-				ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
-
-				rightPointer, err := n.reader.readNode(node.pointer, 1)
-				if err != nil {
-					n.err = err
-					return false
-				}
-
-				node.bit++
-				n.nodes = append(n.nodes, netNode{
-					pointer: rightPointer,
-					ip:      ipRight,
-					bit:     node.bit,
-				})
-
-				node.pointer, err = n.reader.readNode(node.pointer, 0)
-				if err != nil {
-					n.err = err
-					return false
-				}
-
-			} else if node.pointer > n.reader.Metadata.NodeCount {
+		for node.pointer != n.reader.Metadata.NodeCount {
+			if node.pointer > n.reader.Metadata.NodeCount {
 				n.lastNode = node
 				n.lastNode = node
 				return true
 				return true
-			} else {
-				break
 			}
 			}
+			ipRight := make(net.IP, len(node.ip))
+			copy(ipRight, node.ip)
+			if len(ipRight) <= int(node.bit>>3) {
+				n.err = newInvalidDatabaseError(
+					"invalid search tree at %v/%v", ipRight, node.bit)
+				return false
+			}
+			ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
+
+			offset := node.pointer * n.reader.nodeOffsetMult
+			rightPointer := n.reader.nodeReader.readRight(offset)
+
+			node.bit++
+			n.nodes = append(n.nodes, netNode{
+				pointer: rightPointer,
+				ip:      ipRight,
+				bit:     node.bit,
+			})
+
+			node.pointer = n.reader.nodeReader.readLeft(offset)
 		}
 		}
 	}
 	}
 
 

+ 7 - 2
vendor/github.com/oschwald/maxminddb-golang/verifier.go

@@ -1,6 +1,9 @@
 package maxminddb
 package maxminddb
 
 
-import "reflect"
+import (
+	"reflect"
+	"runtime"
+)
 
 
 type verifier struct {
 type verifier struct {
 	reader *Reader
 	reader *Reader
@@ -15,7 +18,9 @@ func (r *Reader) Verify() error {
 		return err
 		return err
 	}
 	}
 
 
-	return v.verifyDatabase()
+	err := v.verifyDatabase()
+	runtime.KeepAlive(v.reader)
+	return err
 }
 }
 
 
 func (v *verifier) verifyMetadata() error {
 func (v *verifier) verifyMetadata() error {

+ 29 - 0
vendor/github.com/prometheus/client_golang/prometheus/build_info.go

@@ -0,0 +1,29 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.12
+
+package prometheus
+
+import "runtime/debug"
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
+func readBuildInfo() (path, version, sum string) {
+	path, version, sum = "unknown", "unknown", "unknown"
+	if bi, ok := debug.ReadBuildInfo(); ok {
+		path = bi.Main.Path
+		version = bi.Main.Version
+		sum = bi.Main.Sum
+	}
+	return
+}

+ 22 - 0
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go

@@ -0,0 +1,22 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.12
+
+package prometheus
+
+// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
+// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
+func readBuildInfo() (path, version, sum string) {
+	return "unknown", "unknown", "unknown"
+}

+ 1 - 1
vendor/github.com/prometheus/client_golang/prometheus/collector.go

@@ -79,7 +79,7 @@ type Collector interface {
 // of the Describe method. If a Collector sometimes collects no metrics at all
 // of the Describe method. If a Collector sometimes collects no metrics at all
 // (for example vectors like CounterVec, GaugeVec, etc., which only collect
 // (for example vectors like CounterVec, GaugeVec, etc., which only collect
 // metrics after a metric with a fully specified label set has been accessed),
 // metrics after a metric with a fully specified label set has been accessed),
-// it might even get registered as an unchecked Collecter (cf. the Register
+// it might even get registered as an unchecked Collector (cf. the Register
 // method of the Registerer interface). Hence, only use this shortcut
 // method of the Registerer interface). Hence, only use this shortcut
 // implementation of Describe if you are certain to fulfill the contract.
 // implementation of Describe if you are certain to fulfill the contract.
 //
 //

+ 3 - 4
vendor/github.com/prometheus/client_golang/prometheus/doc.go

@@ -122,13 +122,13 @@
 // the Collect method. The Describe method has to return separate Desc
 // the Collect method. The Describe method has to return separate Desc
 // instances, representative of the “throw-away” metrics to be created later.
 // instances, representative of the “throw-away” metrics to be created later.
 // NewDesc comes in handy to create those Desc instances. Alternatively, you
 // NewDesc comes in handy to create those Desc instances. Alternatively, you
-// could return no Desc at all, which will marke the Collector “unchecked”.  No
-// checks are porformed at registration time, but metric consistency will still
+// could return no Desc at all, which will mark the Collector “unchecked”.  No
+// checks are performed at registration time, but metric consistency will still
 // be ensured at scrape time, i.e. any inconsistencies will lead to scrape
 // be ensured at scrape time, i.e. any inconsistencies will lead to scrape
 // errors. Thus, with unchecked Collectors, the responsibility to not collect
 // errors. Thus, with unchecked Collectors, the responsibility to not collect
 // metrics that lead to inconsistencies in the total scrape result lies with the
 // metrics that lead to inconsistencies in the total scrape result lies with the
 // implementer of the Collector. While this is not a desirable state, it is
 // implementer of the Collector. While this is not a desirable state, it is
-// sometimes necessary. The typical use case is a situatios where the exact
+// sometimes necessary. The typical use case is a situation where the exact
 // metrics to be returned by a Collector cannot be predicted at registration
 // metrics to be returned by a Collector cannot be predicted at registration
 // time, but the implementer has sufficient knowledge of the whole system to
 // time, but the implementer has sufficient knowledge of the whole system to
 // guarantee metric consistency.
 // guarantee metric consistency.
@@ -183,7 +183,6 @@
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // method can then expose the gathered metrics in some way. Usually, the metrics
 // are served via HTTP on the /metrics endpoint. That's happening in the example
 // are served via HTTP on the /metrics endpoint. That's happening in the example
 // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
 // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
-// (The top-level functions in the prometheus package are deprecated.)
 //
 //
 // Pushing to the Pushgateway
 // Pushing to the Pushgateway
 //
 //

+ 108 - 13
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go

@@ -14,9 +14,9 @@
 package prometheus
 package prometheus
 
 
 import (
 import (
-	"fmt"
 	"runtime"
 	"runtime"
 	"runtime/debug"
 	"runtime/debug"
+	"sync"
 	"time"
 	"time"
 )
 )
 
 
@@ -26,16 +26,41 @@ type goCollector struct {
 	gcDesc         *Desc
 	gcDesc         *Desc
 	goInfoDesc     *Desc
 	goInfoDesc     *Desc
 
 
-	// metrics to describe and collect
-	metrics memStatsMetrics
+	// ms... are memstats related.
+	msLast          *runtime.MemStats // Previously collected memstats.
+	msLastTimestamp time.Time
+	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
+	msMetrics       memStatsMetrics
+	msRead          func(*runtime.MemStats) // For mocking in tests.
+	msMaxWait       time.Duration           // Wait time for fresh memstats.
+	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
 }
 }
 
 
-// NewGoCollector returns a collector which exports metrics about the current Go
+// NewGoCollector returns a collector that exports metrics about the current Go
 // process. This includes memory stats. To collect those, runtime.ReadMemStats
 // process. This includes memory stats. To collect those, runtime.ReadMemStats
-// is called. This causes a stop-the-world, which is very short with Go1.9+
-// (~25µs). However, with older Go versions, the stop-the-world duration depends
-// on the heap size and can be quite significant (~1.7 ms/GiB as per
+// is called. This requires to “stop the world”, which usually only happens for
+// garbage collection (GC). Take the following implications into account when
+// deciding whether to use the Go collector:
+//
+// 1. The performance impact of stopping the world is the more relevant the more
+// frequently metrics are collected. However, with Go1.9 or later the
+// stop-the-world time per metrics collection is very short (~25µs) so that the
+// performance impact will only matter in rare cases. However, with older Go
+// versions, the stop-the-world duration depends on the heap size and can be
+// quite significant (~1.7 ms/GiB as per
 // https://go-review.googlesource.com/c/go/+/34937).
 // https://go-review.googlesource.com/c/go/+/34937).
+//
+// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
+// metrics collection happens to coincide with GC, it will only complete after
+// GC has finished. Usually, GC is fast enough to not cause problems. However,
+// with a very large heap, GC might take multiple seconds, which is enough to
+// cause scrape timeouts in common setups. To avoid this problem, the Go
+// collector will use the memstats from a previous collection if
+// runtime.ReadMemStats takes more than 1s. However, if there are no previously
+// collected memstats, or their collection is more than 5m ago, the collection
+// will block until runtime.ReadMemStats succeeds. (The problem might be solved
+// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
+// issue.)
 func NewGoCollector() Collector {
 func NewGoCollector() Collector {
 	return &goCollector{
 	return &goCollector{
 		goroutinesDesc: NewDesc(
 		goroutinesDesc: NewDesc(
@@ -54,7 +79,11 @@ func NewGoCollector() Collector {
 			"go_info",
 			"go_info",
 			"Information about the Go environment.",
 			"Information about the Go environment.",
 			nil, Labels{"version": runtime.Version()}),
 			nil, Labels{"version": runtime.Version()}),
-		metrics: memStatsMetrics{
+		msLast:    &runtime.MemStats{},
+		msRead:    runtime.ReadMemStats,
+		msMaxWait: time.Second,
+		msMaxAge:  5 * time.Minute,
+		msMetrics: memStatsMetrics{
 			{
 			{
 				desc: NewDesc(
 				desc: NewDesc(
 					memstatNamespace("alloc_bytes"),
 					memstatNamespace("alloc_bytes"),
@@ -253,7 +282,7 @@ func NewGoCollector() Collector {
 }
 }
 
 
 func memstatNamespace(s string) string {
 func memstatNamespace(s string) string {
-	return fmt.Sprintf("go_memstats_%s", s)
+	return "go_memstats_" + s
 }
 }
 
 
 // Describe returns all descriptions of the collector.
 // Describe returns all descriptions of the collector.
@@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
 	ch <- c.threadsDesc
 	ch <- c.threadsDesc
 	ch <- c.gcDesc
 	ch <- c.gcDesc
 	ch <- c.goInfoDesc
 	ch <- c.goInfoDesc
-	for _, i := range c.metrics {
+	for _, i := range c.msMetrics {
 		ch <- i.desc
 		ch <- i.desc
 	}
 	}
 }
 }
 
 
 // Collect returns the current state of all metrics of the collector.
 // Collect returns the current state of all metrics of the collector.
 func (c *goCollector) Collect(ch chan<- Metric) {
 func (c *goCollector) Collect(ch chan<- Metric) {
+	var (
+		ms   = &runtime.MemStats{}
+		done = make(chan struct{})
+	)
+	// Start reading memstats first as it might take a while.
+	go func() {
+		c.msRead(ms)
+		c.msMtx.Lock()
+		c.msLast = ms
+		c.msLastTimestamp = time.Now()
+		c.msMtx.Unlock()
+		close(done)
+	}()
+
 	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
 	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
 	n, _ := runtime.ThreadCreateProfile(nil)
 	n, _ := runtime.ThreadCreateProfile(nil)
 	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
 	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) {
 
 
 	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
 	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
 
 
-	ms := &runtime.MemStats{}
-	runtime.ReadMemStats(ms)
-	for _, i := range c.metrics {
+	timer := time.NewTimer(c.msMaxWait)
+	select {
+	case <-done: // Our own ReadMemStats succeeded in time. Use it.
+		timer.Stop() // Important for high collection frequencies to not pile up timers.
+		c.msCollect(ch, ms)
+		return
+	case <-timer.C: // Time out, use last memstats if possible. Continue below.
+	}
+	c.msMtx.Lock()
+	if time.Since(c.msLastTimestamp) < c.msMaxAge {
+		// Last memstats are recent enough. Collect from them under the lock.
+		c.msCollect(ch, c.msLast)
+		c.msMtx.Unlock()
+		return
+	}
+	// If we are here, the last memstats are too old or don't exist. We have
+	// to wait until our own ReadMemStats finally completes. For that to
+	// happen, we have to release the lock.
+	c.msMtx.Unlock()
+	<-done
+	c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+	for _, i := range c.msMetrics {
 		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
 		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
 	}
 	}
 }
 }
@@ -299,3 +364,33 @@ type memStatsMetrics []struct {
 	eval    func(*runtime.MemStats) float64
 	eval    func(*runtime.MemStats) float64
 	valType ValueType
 	valType ValueType
 }
 }
+
+// NewBuildInfoCollector returns a collector collecting a single metric
+// "go_build_info" with the constant value 1 and three labels "path", "version",
+// and "checksum". Their label values contain the main module path, version, and
+// checksum, respectively. The labels will only have meaningful values if the
+// binary is built with Go module support and from source code retrieved from
+// the source repository (rather than the local file system). This is usually
+// accomplished by building from outside of GOPATH, specifying the full address
+// of the main package, e.g. "GO111MODULE=on go run
+// github.com/prometheus/client_golang/examples/random". If built without Go
+// module support, all label values will be "unknown". If built with Go module
+// support but using the source code from the local file system, the "path" will
+// be set appropriately, but "checksum" will be empty and "version" will be
+// "(devel)".
+//
+// This collector uses only the build information for the main module. See
+// https://github.com/povilasv/prommod for an example of a collector for the
+// module dependencies.
+func NewBuildInfoCollector() Collector {
+	path, version, sum := readBuildInfo()
+	c := &selfCollector{MustNewConstMetric(
+		NewDesc(
+			"go_build_info",
+			"Build information about the main Go module.",
+			nil, Labels{"path": path, "version": version, "checksum": sum},
+		),
+		GaugeValue, 1)}
+	c.init(c.self)
+	return c
+}

+ 45 - 73
vendor/github.com/prometheus/client_golang/prometheus/histogram.go

@@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 			}
 			}
 		}
 		}
 	}
 	}
-	// Finally we know the final length of h.upperBounds and can make counts
-	// for both states:
+	// Finally we know the final length of h.upperBounds and can make buckets
+	// for both counts:
 	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[0].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
 	h.counts[1].buckets = make([]uint64, len(h.upperBounds))
 
 
@@ -224,18 +224,21 @@ type histogramCounts struct {
 }
 }
 
 
 type histogram struct {
 type histogram struct {
-	// countAndHotIdx is a complicated one. For lock-free yet atomic
-	// observations, we need to save the total count of observations again,
-	// combined with the index of the currently-hot counts struct, so that
-	// we can perform the operation on both values atomically. The least
-	// significant bit defines the hot counts struct. The remaining 63 bits
-	// represent the total count of observations. This happens under the
-	// assumption that the 63bit count will never overflow. Rationale: An
-	// observations takes about 30ns. Let's assume it could happen in
-	// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
-	// which is about 3000 years.
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// histogramCounts, as a marker for completion.
 	//
 	//
-	// This has to be first in the struct for 64bit alignment. See
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the histogram) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cool fields must
+	// be merged into the new hot before releasing writeMtx.
+	//
+	// Fields with atomic access first! See alignment constraint:
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
 	countAndHotIdx uint64
 	countAndHotIdx uint64
 
 
@@ -243,16 +246,14 @@ type histogram struct {
 	desc     *Desc
 	desc     *Desc
 	writeMtx sync.Mutex // Only used in the Write method.
 	writeMtx sync.Mutex // Only used in the Write method.
 
 
-	upperBounds []float64
-
 	// Two counts, one is "hot" for lock-free observations, the other is
 	// Two counts, one is "hot" for lock-free observations, the other is
 	// "cold" for writing out a dto.Metric. It has to be an array of
 	// "cold" for writing out a dto.Metric. It has to be an array of
 	// pointers to guarantee 64bit alignment of the histogramCounts, see
 	// pointers to guarantee 64bit alignment of the histogramCounts, see
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
 	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
 	counts [2]*histogramCounts
 	counts [2]*histogramCounts
-	hotIdx int // Index of currently-hot counts. Only used within Write.
 
 
-	labelPairs []*dto.LabelPair
+	upperBounds []float64
+	labelPairs  []*dto.LabelPair
 }
 }
 
 
 func (h *histogram) Desc() *Desc {
 func (h *histogram) Desc() *Desc {
@@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) {
 	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
 	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
 	i := sort.SearchFloat64s(h.upperBounds, v)
 	i := sort.SearchFloat64s(h.upperBounds, v)
 
 
-	// We increment h.countAndHotIdx by 2 so that the counter in the upper
-	// 63 bits gets incremented by 1. At the same time, we get the new value
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
 	// back, which we can use to find the currently-hot counts.
 	// back, which we can use to find the currently-hot counts.
-	n := atomic.AddUint64(&h.countAndHotIdx, 2)
-	hotCounts := h.counts[n%2]
+	n := atomic.AddUint64(&h.countAndHotIdx, 1)
+	hotCounts := h.counts[n>>63]
 
 
 	if i < len(h.upperBounds) {
 	if i < len(h.upperBounds) {
 		atomic.AddUint64(&hotCounts.buckets[i], 1)
 		atomic.AddUint64(&hotCounts.buckets[i], 1)
@@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) {
 }
 }
 
 
 func (h *histogram) Write(out *dto.Metric) error {
 func (h *histogram) Write(out *dto.Metric) error {
-	var (
-		his                   = &dto.Histogram{}
-		buckets               = make([]*dto.Bucket, len(h.upperBounds))
-		hotCounts, coldCounts *histogramCounts
-		count                 uint64
-	)
-
-	// For simplicity, we mutex the rest of this method. It is not in the
-	// hot path, i.e.  Observe is called much more often than Write. The
-	// complication of making Write lock-free isn't worth it.
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
 	h.writeMtx.Lock()
 	h.writeMtx.Lock()
 	defer h.writeMtx.Unlock()
 	defer h.writeMtx.Unlock()
 
 
-	// This is a bit arcane, which is why the following spells out this if
-	// clause in English:
-	//
-	// If the currently-hot counts struct is #0, we atomically increment
-	// h.countAndHotIdx by 1 so that from now on Observe will use the counts
-	// struct #1. Furthermore, the atomic increment gives us the new value,
-	// which, in its most significant 63 bits, tells us the count of
-	// observations done so far up to and including currently ongoing
-	// observations still using the counts struct just changed from hot to
-	// cold. To have a normal uint64 for the count, we bitshift by 1 and
-	// save the result in count. We also set h.hotIdx to 1 for the next
-	// Write call, and we will refer to counts #1 as hotCounts and to counts
-	// #0 as coldCounts.
-	//
-	// If the currently-hot counts struct is #1, we do the corresponding
-	// things the other way round. We have to _decrement_ h.countAndHotIdx
-	// (which is a bit arcane in itself, as we have to express -1 with an
-	// unsigned int...).
-	if h.hotIdx == 0 {
-		count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
-		h.hotIdx = 1
-		hotCounts = h.counts[1]
-		coldCounts = h.counts[0]
-	} else {
-		count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
-		h.hotIdx = 0
-		hotCounts = h.counts[0]
-		coldCounts = h.counts[1]
-	}
-
-	// Now we have to wait for the now-declared-cold counts to actually cool
-	// down, i.e. wait for all observations still using it to finish. That's
-	// the case once the count in the cold counts struct is the same as the
-	// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
-	for {
-		if count == atomic.LoadUint64(&coldCounts.count) {
-			break
-		}
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := h.counts[n>>63]
+	coldCounts := h.counts[(^n)>>63]
+
+	// Await cooldown.
+	for count != atomic.LoadUint64(&coldCounts.count) {
 		runtime.Gosched() // Let observations get work done.
 		runtime.Gosched() // Let observations get work done.
 	}
 	}
 
 
-	his.SampleCount = proto.Uint64(count)
-	his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
+	his := &dto.Histogram{
+		Bucket:      make([]*dto.Bucket, len(h.upperBounds)),
+		SampleCount: proto.Uint64(count),
+		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+	}
 	var cumCount uint64
 	var cumCount uint64
 	for i, upperBound := range h.upperBounds {
 	for i, upperBound := range h.upperBounds {
 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
 		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
-		buckets[i] = &dto.Bucket{
+		his.Bucket[i] = &dto.Bucket{
 			CumulativeCount: proto.Uint64(cumCount),
 			CumulativeCount: proto.Uint64(cumCount),
 			UpperBound:      proto.Float64(upperBound),
 			UpperBound:      proto.Float64(upperBound),
 		}
 		}
 	}
 	}
 
 
-	his.Bucket = buckets
 	out.Histogram = his
 	out.Histogram = his
 	out.Label = h.labelPairs
 	out.Label = h.labelPairs
 
 

+ 0 - 504
vendor/github.com/prometheus/client_golang/prometheus/http.go

@@ -1,504 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
-	"bufio"
-	"compress/gzip"
-	"io"
-	"net"
-	"net/http"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/prometheus/common/expfmt"
-)
-
-// TODO(beorn7): Remove this whole file. It is a partial mirror of
-// promhttp/http.go (to avoid circular import chains) where everything HTTP
-// related should live. The functions here are just for avoiding
-// breakage. Everything is deprecated.
-
-const (
-	contentTypeHeader     = "Content-Type"
-	contentLengthHeader   = "Content-Length"
-	contentEncodingHeader = "Content-Encoding"
-	acceptEncodingHeader  = "Accept-Encoding"
-)
-
-var gzipPool = sync.Pool{
-	New: func() interface{} {
-		return gzip.NewWriter(nil)
-	},
-}
-
-// Handler returns an HTTP handler for the DefaultGatherer. It is
-// already instrumented with InstrumentHandler (using "prometheus" as handler
-// name).
-//
-// Deprecated: Please note the issues described in the doc comment of
-// InstrumentHandler. You might want to consider using promhttp.Handler instead.
-func Handler() http.Handler {
-	return InstrumentHandler("prometheus", UninstrumentedHandler())
-}
-
-// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
-//
-// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
-// instead. See there for further documentation.
-func UninstrumentedHandler() http.Handler {
-	return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
-		mfs, err := DefaultGatherer.Gather()
-		if err != nil {
-			httpError(rsp, err)
-			return
-		}
-
-		contentType := expfmt.Negotiate(req.Header)
-		header := rsp.Header()
-		header.Set(contentTypeHeader, string(contentType))
-
-		w := io.Writer(rsp)
-		if gzipAccepted(req.Header) {
-			header.Set(contentEncodingHeader, "gzip")
-			gz := gzipPool.Get().(*gzip.Writer)
-			defer gzipPool.Put(gz)
-
-			gz.Reset(w)
-			defer gz.Close()
-
-			w = gz
-		}
-
-		enc := expfmt.NewEncoder(w, contentType)
-
-		for _, mf := range mfs {
-			if err := enc.Encode(mf); err != nil {
-				httpError(rsp, err)
-				return
-			}
-		}
-	})
-}
-
-var instLabels = []string{"method", "code"}
-
-type nower interface {
-	Now() time.Time
-}
-
-type nowFunc func() time.Time
-
-func (n nowFunc) Now() time.Time {
-	return n()
-}
-
-var now nower = nowFunc(func() time.Time {
-	return time.Now()
-})
-
-// InstrumentHandler wraps the given HTTP handler for instrumentation. It
-// registers four metric collectors (if not already done) and reports HTTP
-// metrics to the (newly or already) registered collectors: http_requests_total
-// (CounterVec), http_request_duration_microseconds (Summary),
-// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
-// has a constant label named "handler" with the provided handlerName as
-// value. http_requests_total is a metric vector partitioned by HTTP method
-// (label name "method") and HTTP status code (label name "code").
-//
-// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
-// package promhttp instead. The issues are the following: (1) It uses Summaries
-// rather than Histograms. Summaries are not useful if aggregation across
-// multiple instances is required. (2) It uses microseconds as unit, which is
-// deprecated and should be replaced by seconds. (3) The size of the request is
-// calculated in a separate goroutine. Since this calculator requires access to
-// the request header, it creates a race with any writes to the header performed
-// during request handling.  httputil.ReverseProxy is a prominent example for a
-// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
-// https://github.com/prometheus/client_golang/issues/272.
-func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
-	return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFunc wraps the given function for instrumentation. It
-// otherwise works in the same way as InstrumentHandler (and shares the same
-// issues).
-//
-// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
-	return InstrumentHandlerFuncWithOpts(
-		SummaryOpts{
-			Subsystem:   "http",
-			ConstLabels: Labels{"handler": handlerName},
-			Objectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
-		},
-		handlerFunc,
-	)
-}
-
-// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
-// issues) but provides more flexibility (at the cost of a more complex call
-// syntax). As InstrumentHandler, this function registers four metric
-// collectors, but it uses the provided SummaryOpts to create them. However, the
-// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
-// by "requests_total", "request_duration_microseconds", "request_size_bytes",
-// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
-// help string. The names of the variable labels of the http_requests_total
-// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
-//
-// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
-// behavior of InstrumentHandler:
-//
-//     prometheus.InstrumentHandlerWithOpts(
-//         prometheus.SummaryOpts{
-//              Subsystem:   "http",
-//              ConstLabels: prometheus.Labels{"handler": handlerName},
-//         },
-//         handler,
-//     )
-//
-// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
-// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
-// and all its fields are set to the equally named fields in the provided
-// SummaryOpts.
-//
-// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
-	return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
-// the same issues) but provides more flexibility (at the cost of a more complex
-// call syntax). See InstrumentHandlerWithOpts for details how the provided
-// SummaryOpts are used.
-//
-// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
-func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
-	reqCnt := NewCounterVec(
-		CounterOpts{
-			Namespace:   opts.Namespace,
-			Subsystem:   opts.Subsystem,
-			Name:        "requests_total",
-			Help:        "Total number of HTTP requests made.",
-			ConstLabels: opts.ConstLabels,
-		},
-		instLabels,
-	)
-	if err := Register(reqCnt); err != nil {
-		if are, ok := err.(AlreadyRegisteredError); ok {
-			reqCnt = are.ExistingCollector.(*CounterVec)
-		} else {
-			panic(err)
-		}
-	}
-
-	opts.Name = "request_duration_microseconds"
-	opts.Help = "The HTTP request latencies in microseconds."
-	reqDur := NewSummary(opts)
-	if err := Register(reqDur); err != nil {
-		if are, ok := err.(AlreadyRegisteredError); ok {
-			reqDur = are.ExistingCollector.(Summary)
-		} else {
-			panic(err)
-		}
-	}
-
-	opts.Name = "request_size_bytes"
-	opts.Help = "The HTTP request sizes in bytes."
-	reqSz := NewSummary(opts)
-	if err := Register(reqSz); err != nil {
-		if are, ok := err.(AlreadyRegisteredError); ok {
-			reqSz = are.ExistingCollector.(Summary)
-		} else {
-			panic(err)
-		}
-	}
-
-	opts.Name = "response_size_bytes"
-	opts.Help = "The HTTP response sizes in bytes."
-	resSz := NewSummary(opts)
-	if err := Register(resSz); err != nil {
-		if are, ok := err.(AlreadyRegisteredError); ok {
-			resSz = are.ExistingCollector.(Summary)
-		} else {
-			panic(err)
-		}
-	}
-
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		now := time.Now()
-
-		delegate := &responseWriterDelegator{ResponseWriter: w}
-		out := computeApproximateRequestSize(r)
-
-		_, cn := w.(http.CloseNotifier)
-		_, fl := w.(http.Flusher)
-		_, hj := w.(http.Hijacker)
-		_, rf := w.(io.ReaderFrom)
-		var rw http.ResponseWriter
-		if cn && fl && hj && rf {
-			rw = &fancyResponseWriterDelegator{delegate}
-		} else {
-			rw = delegate
-		}
-		handlerFunc(rw, r)
-
-		elapsed := float64(time.Since(now)) / float64(time.Microsecond)
-
-		method := sanitizeMethod(r.Method)
-		code := sanitizeCode(delegate.status)
-		reqCnt.WithLabelValues(method, code).Inc()
-		reqDur.Observe(elapsed)
-		resSz.Observe(float64(delegate.written))
-		reqSz.Observe(float64(<-out))
-	})
-}
-
-func computeApproximateRequestSize(r *http.Request) <-chan int {
-	// Get URL length in current goroutine for avoiding a race condition.
-	// HandlerFunc that runs in parallel may modify the URL.
-	s := 0
-	if r.URL != nil {
-		s += len(r.URL.String())
-	}
-
-	out := make(chan int, 1)
-
-	go func() {
-		s += len(r.Method)
-		s += len(r.Proto)
-		for name, values := range r.Header {
-			s += len(name)
-			for _, value := range values {
-				s += len(value)
-			}
-		}
-		s += len(r.Host)
-
-		// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
-		if r.ContentLength != -1 {
-			s += int(r.ContentLength)
-		}
-		out <- s
-		close(out)
-	}()
-
-	return out
-}
-
-type responseWriterDelegator struct {
-	http.ResponseWriter
-
-	status      int
-	written     int64
-	wroteHeader bool
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
-	r.status = code
-	r.wroteHeader = true
-	r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
-	if !r.wroteHeader {
-		r.WriteHeader(http.StatusOK)
-	}
-	n, err := r.ResponseWriter.Write(b)
-	r.written += int64(n)
-	return n, err
-}
-
-type fancyResponseWriterDelegator struct {
-	*responseWriterDelegator
-}
-
-func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
-	return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-func (f *fancyResponseWriterDelegator) Flush() {
-	f.ResponseWriter.(http.Flusher).Flush()
-}
-
-func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
-	return f.ResponseWriter.(http.Hijacker).Hijack()
-}
-
-func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
-	if !f.wroteHeader {
-		f.WriteHeader(http.StatusOK)
-	}
-	n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
-	f.written += n
-	return n, err
-}
-
-func sanitizeMethod(m string) string {
-	switch m {
-	case "GET", "get":
-		return "get"
-	case "PUT", "put":
-		return "put"
-	case "HEAD", "head":
-		return "head"
-	case "POST", "post":
-		return "post"
-	case "DELETE", "delete":
-		return "delete"
-	case "CONNECT", "connect":
-		return "connect"
-	case "OPTIONS", "options":
-		return "options"
-	case "NOTIFY", "notify":
-		return "notify"
-	default:
-		return strings.ToLower(m)
-	}
-}
-
-func sanitizeCode(s int) string {
-	switch s {
-	case 100:
-		return "100"
-	case 101:
-		return "101"
-
-	case 200:
-		return "200"
-	case 201:
-		return "201"
-	case 202:
-		return "202"
-	case 203:
-		return "203"
-	case 204:
-		return "204"
-	case 205:
-		return "205"
-	case 206:
-		return "206"
-
-	case 300:
-		return "300"
-	case 301:
-		return "301"
-	case 302:
-		return "302"
-	case 304:
-		return "304"
-	case 305:
-		return "305"
-	case 307:
-		return "307"
-
-	case 400:
-		return "400"
-	case 401:
-		return "401"
-	case 402:
-		return "402"
-	case 403:
-		return "403"
-	case 404:
-		return "404"
-	case 405:
-		return "405"
-	case 406:
-		return "406"
-	case 407:
-		return "407"
-	case 408:
-		return "408"
-	case 409:
-		return "409"
-	case 410:
-		return "410"
-	case 411:
-		return "411"
-	case 412:
-		return "412"
-	case 413:
-		return "413"
-	case 414:
-		return "414"
-	case 415:
-		return "415"
-	case 416:
-		return "416"
-	case 417:
-		return "417"
-	case 418:
-		return "418"
-
-	case 500:
-		return "500"
-	case 501:
-		return "501"
-	case 502:
-		return "502"
-	case 503:
-		return "503"
-	case 504:
-		return "504"
-	case 505:
-		return "505"
-
-	case 428:
-		return "428"
-	case 429:
-		return "429"
-	case 431:
-		return "431"
-	case 511:
-		return "511"
-
-	default:
-		return strconv.Itoa(s)
-	}
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
-	a := header.Get(acceptEncodingHeader)
-	parts := strings.Split(a, ",")
-	for _, part := range parts {
-		part = strings.TrimSpace(part)
-		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
-			return true
-		}
-	}
-	return false
-}
-
-// httpError removes any content-encoding header and then calls http.Error with
-// the provided error and http.StatusInternalServerErrer. Error contents is
-// supposed to be uncompressed plain text. However, same as with a plain
-// http.Error, any header settings will be void if the header has already been
-// sent. The error message will still be written to the writer, but it will
-// probably be of limited use.
-func httpError(rsp http.ResponseWriter, err error) {
-	rsp.Header().Del(contentEncodingHeader)
-	http.Error(
-		rsp,
-		"An error has occurred while serving metrics:\n\n"+err.Error(),
-		http.StatusInternalServerError,
-	)
-}

+ 4 - 57
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go

@@ -16,8 +16,6 @@ package prometheus
 import (
 import (
 	"errors"
 	"errors"
 	"os"
 	"os"
-
-	"github.com/prometheus/procfs"
 )
 )
 
 
 type processCollector struct {
 type processCollector struct {
@@ -59,20 +57,9 @@ type ProcessCollectorOpts struct {
 // collector for the current process with an empty namespace string and no error
 // collector for the current process with an empty namespace string and no error
 // reporting.
 // reporting.
 //
 //
-// Currently, the collector depends on a Linux-style proc filesystem and
-// therefore only exports metrics for Linux.
-//
-// Note: An older version of this function had the following signature:
-//
-//     NewProcessCollector(pid int, namespace string) Collector
-//
-// Most commonly, it was called as
-//
-//     NewProcessCollector(os.Getpid(), "")
-//
-// The following call of the current version is equivalent to the above:
-//
-//     NewProcessCollector(ProcessCollectorOpts{})
+// The collector only works on operating systems with a Linux-style proc
+// filesystem and on Microsoft Windows. On other operating systems, it will not
+// collect any metrics.
 func NewProcessCollector(opts ProcessCollectorOpts) Collector {
 func NewProcessCollector(opts ProcessCollectorOpts) Collector {
 	ns := ""
 	ns := ""
 	if len(opts.Namespace) > 0 {
 	if len(opts.Namespace) > 0 {
@@ -126,7 +113,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
 	}
 	}
 
 
 	// Set up process metric collection if supported by the runtime.
 	// Set up process metric collection if supported by the runtime.
-	if _, err := procfs.NewStat(); err == nil {
+	if canCollectProcess() {
 		c.collectFn = c.processCollect
 		c.collectFn = c.processCollect
 	} else {
 	} else {
 		c.collectFn = func(ch chan<- Metric) {
 		c.collectFn = func(ch chan<- Metric) {
@@ -153,46 +140,6 @@ func (c *processCollector) Collect(ch chan<- Metric) {
 	c.collectFn(ch)
 	c.collectFn(ch)
 }
 }
 
 
-func (c *processCollector) processCollect(ch chan<- Metric) {
-	pid, err := c.pidFn()
-	if err != nil {
-		c.reportError(ch, nil, err)
-		return
-	}
-
-	p, err := procfs.NewProc(pid)
-	if err != nil {
-		c.reportError(ch, nil, err)
-		return
-	}
-
-	if stat, err := p.NewStat(); err == nil {
-		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
-		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
-		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
-		if startTime, err := stat.StartTime(); err == nil {
-			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
-		} else {
-			c.reportError(ch, c.startTime, err)
-		}
-	} else {
-		c.reportError(ch, nil, err)
-	}
-
-	if fds, err := p.FileDescriptorsLen(); err == nil {
-		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
-	} else {
-		c.reportError(ch, c.openFDs, err)
-	}
-
-	if limits, err := p.NewLimits(); err == nil {
-		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
-		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
-	} else {
-		c.reportError(ch, nil, err)
-	}
-}
-
 func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
 func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
 	if !c.reportErrors {
 	if !c.reportErrors {
 		return
 		return

+ 65 - 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go

@@ -0,0 +1,65 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package prometheus
+
+import (
+	"github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+	_, err := procfs.NewDefaultFS()
+	return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	pid, err := c.pidFn()
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	p, err := procfs.NewProc(pid)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	if stat, err := p.Stat(); err == nil {
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+		if startTime, err := stat.StartTime(); err == nil {
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if fds, err := p.FileDescriptorsLen(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if limits, err := p.Limits(); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}

+ 112 - 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go

@@ -0,0 +1,112 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+	return true
+}
+
+var (
+	modpsapi    = syscall.NewLazyDLL("psapi.dll")
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procGetProcessMemoryInfo  = modpsapi.NewProc("GetProcessMemoryInfo")
+	procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
+	_                          uint32
+	PageFaultCount             uint32
+	PeakWorkingSetSize         uint64
+	WorkingSetSize             uint64
+	QuotaPeakPagedPoolUsage    uint64
+	QuotaPagedPoolUsage        uint64
+	QuotaPeakNonPagedPoolUsage uint64
+	QuotaNonPagedPoolUsage     uint64
+	PagefileUsage              uint64
+	PeakPagefileUsage          uint64
+	PrivateUsage               uint64
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+	mem := processMemoryCounters{}
+	r1, _, err := procGetProcessMemoryInfo.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&mem)),
+		uintptr(unsafe.Sizeof(mem)),
+	)
+	if r1 != 1 {
+		return mem, err
+	} else {
+		return mem, nil
+	}
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+	var count uint32
+	r1, _, err := procGetProcessHandleCount.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&count)),
+	)
+	if r1 != 1 {
+		return 0, err
+	} else {
+		return count, nil
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	h, err := windows.GetCurrentProcess()
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	var startTime, exitTime, kernelTime, userTime windows.Filetime
+	err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+	ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+	mem, err := getProcessMemoryInfo(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+	ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+	handles, err := getProcessHandleCount(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}

+ 159 - 1
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go

@@ -38,7 +38,6 @@ type delegator interface {
 type responseWriterDelegator struct {
 type responseWriterDelegator struct {
 	http.ResponseWriter
 	http.ResponseWriter
 
 
-	handler, method    string
 	status             int
 	status             int
 	written            int64
 	written            int64
 	wroteHeader        bool
 	wroteHeader        bool
@@ -75,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator }
 type flusherDelegator struct{ *responseWriterDelegator }
 type flusherDelegator struct{ *responseWriterDelegator }
 type hijackerDelegator struct{ *responseWriterDelegator }
 type hijackerDelegator struct{ *responseWriterDelegator }
 type readerFromDelegator struct{ *responseWriterDelegator }
 type readerFromDelegator struct{ *responseWriterDelegator }
+type pusherDelegator struct{ *responseWriterDelegator }
 
 
 func (d closeNotifierDelegator) CloseNotify() <-chan bool {
 func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+	//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+	//remove support from client_golang yet.
 	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
 	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
 }
 }
 func (d flusherDelegator) Flush() {
 func (d flusherDelegator) Flush() {
@@ -93,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
 	d.written += n
 	d.written += n
 	return n, err
 	return n, err
 }
 }
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
 
 
 var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
 var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
 
 
@@ -196,4 +201,157 @@ func init() {
 			http.CloseNotifier
 			http.CloseNotifier
 		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
 		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
 	}
 	}
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+		return pusherDelegator{d}
+	}
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+		}{d, pusherDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+		}{d, pusherDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+		}{d, pusherDelegator{d}, readerFromDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+	}
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+		return struct {
+			*responseWriterDelegator
+			http.Pusher
+			io.ReaderFrom
+			http.Hijacker
+			http.Flusher
+			http.CloseNotifier
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+	}
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+	d := &responseWriterDelegator{
+		ResponseWriter:     w,
+		observeWriteHeader: observeWriteHeaderFunc,
+	}
+
+	id := 0
+	//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
+	//remove support from client_golang yet.
+	if _, ok := w.(http.CloseNotifier); ok {
+		id += closeNotifier
+	}
+	if _, ok := w.(http.Flusher); ok {
+		id += flusher
+	}
+	if _, ok := w.(http.Hijacker); ok {
+		id += hijacker
+	}
+	if _, ok := w.(io.ReaderFrom); ok {
+		id += readerFrom
+	}
+	if _, ok := w.(http.Pusher); ok {
+		id += pusher
+	}
+
+	return pickDelegator[id](d)
 }
 }

+ 0 - 181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go

@@ -1,181 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-package promhttp
-
-import (
-	"io"
-	"net/http"
-)
-
-type pusherDelegator struct{ *responseWriterDelegator }
-
-func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
-	return d.ResponseWriter.(http.Pusher).Push(target, opts)
-}
-
-func init() {
-	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
-		return pusherDelegator{d}
-	}
-	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Flusher
-		}{d, pusherDelegator{d}, flusherDelegator{d}}
-	}
-	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Flusher
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Hijacker
-		}{d, pusherDelegator{d}, hijackerDelegator{d}}
-	}
-	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Hijacker
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Hijacker
-			http.Flusher
-		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
-	}
-	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			http.Hijacker
-			http.Flusher
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-		}{d, pusherDelegator{d}, readerFromDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Flusher
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Flusher
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Hijacker
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Hijacker
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Hijacker
-			http.Flusher
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
-	}
-	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
-		return struct {
-			*responseWriterDelegator
-			http.Pusher
-			io.ReaderFrom
-			http.Hijacker
-			http.Flusher
-			http.CloseNotifier
-		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
-	}
-}
-
-func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
-	d := &responseWriterDelegator{
-		ResponseWriter:     w,
-		observeWriteHeader: observeWriteHeaderFunc,
-	}
-
-	id := 0
-	if _, ok := w.(http.CloseNotifier); ok {
-		id += closeNotifier
-	}
-	if _, ok := w.(http.Flusher); ok {
-		id += flusher
-	}
-	if _, ok := w.(http.Hijacker); ok {
-		id += hijacker
-	}
-	if _, ok := w.(io.ReaderFrom); ok {
-		id += readerFrom
-	}
-	if _, ok := w.(http.Pusher); ok {
-		id += pusher
-	}
-
-	return pickDelegator[id](d)
-}

+ 43 - 5
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go

@@ -47,7 +47,6 @@ import (
 
 
 const (
 const (
 	contentTypeHeader     = "Content-Type"
 	contentTypeHeader     = "Content-Type"
-	contentLengthHeader   = "Content-Length"
 	contentEncodingHeader = "Content-Encoding"
 	contentEncodingHeader = "Content-Encoding"
 	acceptEncodingHeader  = "Accept-Encoding"
 	acceptEncodingHeader  = "Accept-Encoding"
 )
 )
@@ -85,10 +84,32 @@ func Handler() http.Handler {
 // instrumentation. Use the InstrumentMetricHandler function to apply the same
 // instrumentation. Use the InstrumentMetricHandler function to apply the same
 // kind of instrumentation as it is used by the Handler function.
 // kind of instrumentation as it is used by the Handler function.
 func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
-	var inFlightSem chan struct{}
+	var (
+		inFlightSem chan struct{}
+		errCnt      = prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Name: "promhttp_metric_handler_errors_total",
+				Help: "Total number of internal errors encountered by the promhttp metric handler.",
+			},
+			[]string{"cause"},
+		)
+	)
+
 	if opts.MaxRequestsInFlight > 0 {
 	if opts.MaxRequestsInFlight > 0 {
 		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
 		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
 	}
 	}
+	if opts.Registry != nil {
+		// Initialize all possibilites that can occur below.
+		errCnt.WithLabelValues("gathering")
+		errCnt.WithLabelValues("encoding")
+		if err := opts.Registry.Register(errCnt); err != nil {
+			if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+				errCnt = are.ExistingCollector.(*prometheus.CounterVec)
+			} else {
+				panic(err)
+			}
+		}
+	}
 
 
 	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
 	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
 		if inFlightSem != nil {
 		if inFlightSem != nil {
@@ -107,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 			if opts.ErrorLog != nil {
 			if opts.ErrorLog != nil {
 				opts.ErrorLog.Println("error gathering metrics:", err)
 				opts.ErrorLog.Println("error gathering metrics:", err)
 			}
 			}
+			errCnt.WithLabelValues("gathering").Inc()
 			switch opts.ErrorHandling {
 			switch opts.ErrorHandling {
 			case PanicOnError:
 			case PanicOnError:
 				panic(err)
 				panic(err)
@@ -147,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
 				if opts.ErrorLog != nil {
 				if opts.ErrorLog != nil {
 					opts.ErrorLog.Println("error encoding and sending metric family:", err)
 					opts.ErrorLog.Println("error encoding and sending metric family:", err)
 				}
 				}
+				errCnt.WithLabelValues("encoding").Inc()
 				switch opts.ErrorHandling {
 				switch opts.ErrorHandling {
 				case PanicOnError:
 				case PanicOnError:
 					panic(err)
 					panic(err)
@@ -237,9 +260,12 @@ const (
 	// Ignore errors and try to serve as many metrics as possible.  However,
 	// Ignore errors and try to serve as many metrics as possible.  However,
 	// if no metrics can be served, serve an HTTP status code 500 and the
 	// if no metrics can be served, serve an HTTP status code 500 and the
 	// last error message in the body. Only use this in deliberate "best
 	// last error message in the body. Only use this in deliberate "best
-	// effort" metrics collection scenarios. It is recommended to at least
-	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
-	// errors completely.
+	// effort" metrics collection scenarios. In this case, it is highly
+	// recommended to provide other means of detecting errors: By setting an
+	// ErrorLog in HandlerOpts, the errors are logged. By providing a
+	// Registry in HandlerOpts, the exposed metrics include an error counter
+	// "promhttp_metric_handler_errors_total", which can be used for
+	// alerts.
 	ContinueOnError
 	ContinueOnError
 	// Panic upon the first error encountered (useful for "crash only" apps).
 	// Panic upon the first error encountered (useful for "crash only" apps).
 	PanicOnError
 	PanicOnError
@@ -262,6 +288,18 @@ type HandlerOpts struct {
 	// logged regardless of the configured ErrorHandling provided ErrorLog
 	// logged regardless of the configured ErrorHandling provided ErrorLog
 	// is not nil.
 	// is not nil.
 	ErrorHandling HandlerErrorHandling
 	ErrorHandling HandlerErrorHandling
+	// If Registry is not nil, it is used to register a metric
+	// "promhttp_metric_handler_errors_total", partitioned by "cause". A
+	// failed registration causes a panic. Note that this error counter is
+	// different from the instrumentation you get from the various
+	// InstrumentHandler... helpers. It counts errors that don't necessarily
+	// result in a non-2xx HTTP status code. There are two typical cases:
+	// (1) Encoding errors that only happen after streaming of the HTTP body
+	// has already started (and the status code 200 has been sent). This
+	// should only happen with custom collectors. (2) Collection errors with
+	// no effect on the HTTP status code because ErrorHandling is set to
+	// ContinueOnError.
+	Registry prometheus.Registerer
 	// If DisableCompression is true, the handler will never compress the
 	// If DisableCompression is true, the handler will never compress the
 	// response, even if requested by the client.
 	// response, even if requested by the client.
 	DisableCompression bool
 	DisableCompression bool

+ 122 - 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go

@@ -14,7 +14,9 @@
 package promhttp
 package promhttp
 
 
 import (
 import (
+	"crypto/tls"
 	"net/http"
 	"net/http"
+	"net/http/httptrace"
 	"time"
 	"time"
 
 
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus"
@@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
 		return resp, err
 		return resp, err
 	})
 	})
 }
 }
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+	GotConn              func(float64)
+	PutIdleConn          func(float64)
+	GotFirstResponseByte func(float64)
+	Got100Continue       func(float64)
+	DNSStart             func(float64)
+	DNSDone              func(float64)
+	ConnectStart         func(float64)
+	ConnectDone          func(float64)
+	TLSHandshakeStart    func(float64)
+	TLSHandshakeDone     func(float64)
+	WroteHeaders         func(float64)
+	Wait100Continue      func(float64)
+	WroteRequest         func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+		start := time.Now()
+
+		trace := &httptrace.ClientTrace{
+			GotConn: func(_ httptrace.GotConnInfo) {
+				if it.GotConn != nil {
+					it.GotConn(time.Since(start).Seconds())
+				}
+			},
+			PutIdleConn: func(err error) {
+				if err != nil {
+					return
+				}
+				if it.PutIdleConn != nil {
+					it.PutIdleConn(time.Since(start).Seconds())
+				}
+			},
+			DNSStart: func(_ httptrace.DNSStartInfo) {
+				if it.DNSStart != nil {
+					it.DNSStart(time.Since(start).Seconds())
+				}
+			},
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
+				if it.DNSDone != nil {
+					it.DNSDone(time.Since(start).Seconds())
+				}
+			},
+			ConnectStart: func(_, _ string) {
+				if it.ConnectStart != nil {
+					it.ConnectStart(time.Since(start).Seconds())
+				}
+			},
+			ConnectDone: func(_, _ string, err error) {
+				if err != nil {
+					return
+				}
+				if it.ConnectDone != nil {
+					it.ConnectDone(time.Since(start).Seconds())
+				}
+			},
+			GotFirstResponseByte: func() {
+				if it.GotFirstResponseByte != nil {
+					it.GotFirstResponseByte(time.Since(start).Seconds())
+				}
+			},
+			Got100Continue: func() {
+				if it.Got100Continue != nil {
+					it.Got100Continue(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeStart: func() {
+				if it.TLSHandshakeStart != nil {
+					it.TLSHandshakeStart(time.Since(start).Seconds())
+				}
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+				if err != nil {
+					return
+				}
+				if it.TLSHandshakeDone != nil {
+					it.TLSHandshakeDone(time.Since(start).Seconds())
+				}
+			},
+			WroteHeaders: func() {
+				if it.WroteHeaders != nil {
+					it.WroteHeaders(time.Since(start).Seconds())
+				}
+			},
+			Wait100Continue: func() {
+				if it.Wait100Continue != nil {
+					it.Wait100Continue(time.Since(start).Seconds())
+				}
+			},
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
+				if it.WroteRequest != nil {
+					it.WroteRequest(time.Since(start).Seconds())
+				}
+			},
+		}
+		r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
+
+		return next.RoundTrip(r)
+	})
+}

+ 0 - 144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go

@@ -1,144 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-package promhttp
-
-import (
-	"context"
-	"crypto/tls"
-	"net/http"
-	"net/http/httptrace"
-	"time"
-)
-
-// InstrumentTrace is used to offer flexibility in instrumenting the available
-// httptrace.ClientTrace hook functions. Each function is passed a float64
-// representing the time in seconds since the start of the http request. A user
-// may choose to use separately buckets Histograms, or implement custom
-// instance labels on a per function basis.
-type InstrumentTrace struct {
-	GotConn              func(float64)
-	PutIdleConn          func(float64)
-	GotFirstResponseByte func(float64)
-	Got100Continue       func(float64)
-	DNSStart             func(float64)
-	DNSDone              func(float64)
-	ConnectStart         func(float64)
-	ConnectDone          func(float64)
-	TLSHandshakeStart    func(float64)
-	TLSHandshakeDone     func(float64)
-	WroteHeaders         func(float64)
-	Wait100Continue      func(float64)
-	WroteRequest         func(float64)
-}
-
-// InstrumentRoundTripperTrace is a middleware that wraps the provided
-// RoundTripper and reports times to hook functions provided in the
-// InstrumentTrace struct. Hook functions that are not present in the provided
-// InstrumentTrace struct are ignored. Times reported to the hook functions are
-// time since the start of the request. Only with Go1.9+, those times are
-// guaranteed to never be negative. (Earlier Go versions are not using a
-// monotonic clock.) Note that partitioning of Histograms is expensive and
-// should be used judiciously.
-//
-// For hook functions that receive an error as an argument, no observations are
-// made in the event of a non-nil error value.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
-	return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
-		start := time.Now()
-
-		trace := &httptrace.ClientTrace{
-			GotConn: func(_ httptrace.GotConnInfo) {
-				if it.GotConn != nil {
-					it.GotConn(time.Since(start).Seconds())
-				}
-			},
-			PutIdleConn: func(err error) {
-				if err != nil {
-					return
-				}
-				if it.PutIdleConn != nil {
-					it.PutIdleConn(time.Since(start).Seconds())
-				}
-			},
-			DNSStart: func(_ httptrace.DNSStartInfo) {
-				if it.DNSStart != nil {
-					it.DNSStart(time.Since(start).Seconds())
-				}
-			},
-			DNSDone: func(_ httptrace.DNSDoneInfo) {
-				if it.DNSDone != nil {
-					it.DNSDone(time.Since(start).Seconds())
-				}
-			},
-			ConnectStart: func(_, _ string) {
-				if it.ConnectStart != nil {
-					it.ConnectStart(time.Since(start).Seconds())
-				}
-			},
-			ConnectDone: func(_, _ string, err error) {
-				if err != nil {
-					return
-				}
-				if it.ConnectDone != nil {
-					it.ConnectDone(time.Since(start).Seconds())
-				}
-			},
-			GotFirstResponseByte: func() {
-				if it.GotFirstResponseByte != nil {
-					it.GotFirstResponseByte(time.Since(start).Seconds())
-				}
-			},
-			Got100Continue: func() {
-				if it.Got100Continue != nil {
-					it.Got100Continue(time.Since(start).Seconds())
-				}
-			},
-			TLSHandshakeStart: func() {
-				if it.TLSHandshakeStart != nil {
-					it.TLSHandshakeStart(time.Since(start).Seconds())
-				}
-			},
-			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
-				if err != nil {
-					return
-				}
-				if it.TLSHandshakeDone != nil {
-					it.TLSHandshakeDone(time.Since(start).Seconds())
-				}
-			},
-			WroteHeaders: func() {
-				if it.WroteHeaders != nil {
-					it.WroteHeaders(time.Since(start).Seconds())
-				}
-			},
-			Wait100Continue: func() {
-				if it.Wait100Continue != nil {
-					it.Wait100Continue(time.Since(start).Seconds())
-				}
-			},
-			WroteRequest: func(_ httptrace.WroteRequestInfo) {
-				if it.WroteRequest != nil {
-					it.WroteRequest(time.Since(start).Seconds())
-				}
-			},
-		}
-		r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
-
-		return next.RoundTrip(r)
-	})
-}

+ 12 - 4
vendor/github.com/prometheus/client_golang/prometheus/registry.go

@@ -325,9 +325,17 @@ func (r *Registry) Register(c Collector) error {
 		return nil
 		return nil
 	}
 	}
 	if existing, exists := r.collectorsByID[collectorID]; exists {
 	if existing, exists := r.collectorsByID[collectorID]; exists {
-		return AlreadyRegisteredError{
-			ExistingCollector: existing,
-			NewCollector:      c,
+		switch e := existing.(type) {
+		case *wrappingCollector:
+			return AlreadyRegisteredError{
+				ExistingCollector: e.unwrapRecursively(),
+				NewCollector:      c,
+			}
+		default:
+			return AlreadyRegisteredError{
+				ExistingCollector: e,
+				NewCollector:      c,
+			}
 		}
 		}
 	}
 	}
 	// If the collectorID is new, but at least one of the descs existed
 	// If the collectorID is new, but at least one of the descs existed
@@ -680,7 +688,7 @@ func processMetric(
 // Gatherers is a slice of Gatherer instances that implements the Gatherer
 // Gatherers is a slice of Gatherer instances that implements the Gatherer
 // interface itself. Its Gather method calls Gather on all Gatherers in the
 // interface itself. Its Gather method calls Gather on all Gatherers in the
 // slice in order and returns the merged results. Errors returned from the
 // slice in order and returns the merged results. Errors returned from the
-// Gather calles are all returned in a flattened MultiError. Duplicate and
+// Gather calls are all returned in a flattened MultiError. Duplicate and
 // inconsistent Metrics are skipped (first occurrence in slice order wins) and
 // inconsistent Metrics are skipped (first occurrence in slice order wins) and
 // reported in the returned error.
 // reported in the returned error.
 //
 //

+ 131 - 21
vendor/github.com/prometheus/client_golang/prometheus/summary.go

@@ -16,8 +16,10 @@ package prometheus
 import (
 import (
 	"fmt"
 	"fmt"
 	"math"
 	"math"
+	"runtime"
 	"sort"
 	"sort"
 	"sync"
 	"sync"
+	"sync/atomic"
 	"time"
 	"time"
 
 
 	"github.com/beorn7/perks/quantile"
 	"github.com/beorn7/perks/quantile"
@@ -37,7 +39,7 @@ const quantileLabel = "quantile"
 // A typical use-case is the observation of request latencies. By default, a
 // A typical use-case is the observation of request latencies. By default, a
 // Summary provides the median, the 90th and the 99th percentile of the latency
 // Summary provides the median, the 90th and the 99th percentile of the latency
 // as rank estimations. However, the default behavior will change in the
 // as rank estimations. However, the default behavior will change in the
-// upcoming v0.10 of the library. There will be no rank estimations at all by
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
 // default. For a sane transition, it is recommended to set the desired rank
 // default. For a sane transition, it is recommended to set the desired rank
 // estimations explicitly.
 // estimations explicitly.
 //
 //
@@ -56,16 +58,8 @@ type Summary interface {
 	Observe(float64)
 	Observe(float64)
 }
 }
 
 
-// DefObjectives are the default Summary quantile values.
-//
-// Deprecated: DefObjectives will not be used as the default objectives in
-// v0.10 of the library. The default Summary will have no quantiles then.
-var (
-	DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
-
-	errQuantileLabelNotAllowed = fmt.Errorf(
-		"%q is not allowed as label name in summaries", quantileLabel,
-	)
+var errQuantileLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in summaries", quantileLabel,
 )
 )
 
 
 // Default values for SummaryOpts.
 // Default values for SummaryOpts.
@@ -84,7 +78,7 @@ const (
 // mandatory to set Name to a non-empty string. While all other fields are
 // mandatory to set Name to a non-empty string. While all other fields are
 // optional and can safely be left at their zero value, it is recommended to set
 // optional and can safely be left at their zero value, it is recommended to set
 // a help string and to explicitly set the Objectives field to the desired value
 // a help string and to explicitly set the Objectives field to the desired value
-// as the default value will change in the upcoming v0.10 of the library.
+// as the default value will change in the upcoming v1.0.0 of the library.
 type SummaryOpts struct {
 type SummaryOpts struct {
 	// Namespace, Subsystem, and Name are components of the fully-qualified
 	// Namespace, Subsystem, and Name are components of the fully-qualified
 	// name of the Summary (created by joining these components with
 	// name of the Summary (created by joining these components with
@@ -121,13 +115,8 @@ type SummaryOpts struct {
 	// Objectives defines the quantile rank estimates with their respective
 	// Objectives defines the quantile rank estimates with their respective
 	// absolute error. If Objectives[q] = e, then the value reported for q
 	// absolute error. If Objectives[q] = e, then the value reported for q
 	// will be the φ-quantile value for some φ between q-e and q+e.  The
 	// will be the φ-quantile value for some φ between q-e and q+e.  The
-	// default value is DefObjectives. It is used if Objectives is left at
-	// its zero value (i.e. nil). To create a Summary without Objectives,
-	// set it to an empty map (i.e. map[float64]float64{}).
-	//
-	// Deprecated: Note that the current value of DefObjectives is
-	// deprecated. It will be replaced by an empty map in v0.10 of the
-	// library. Please explicitly set Objectives to the desired value.
+	// default value is an empty map, resulting in a summary without
+	// quantiles.
 	Objectives map[float64]float64
 	Objectives map[float64]float64
 
 
 	// MaxAge defines the duration for which an observation stays relevant
 	// MaxAge defines the duration for which an observation stays relevant
@@ -151,7 +140,7 @@ type SummaryOpts struct {
 	BufCap uint32
 	BufCap uint32
 }
 }
 
 
-// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// Problem with the sliding-window decay algorithm... The Merge method of
 // perk/quantile is actually not working as advertised - and it might be
 // perk/quantile is actually not working as advertised - and it might be
 // unfixable, as the underlying algorithm is apparently not capable of merging
 // unfixable, as the underlying algorithm is apparently not capable of merging
 // summaries in the first place. To avoid using Merge, we are currently adding
 // summaries in the first place. To avoid using Merge, we are currently adding
@@ -196,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
 	}
 	}
 
 
 	if opts.Objectives == nil {
 	if opts.Objectives == nil {
-		opts.Objectives = DefObjectives
+		opts.Objectives = map[float64]float64{}
 	}
 	}
 
 
 	if opts.MaxAge < 0 {
 	if opts.MaxAge < 0 {
@@ -214,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
 		opts.BufCap = DefBufCap
 		opts.BufCap = DefBufCap
 	}
 	}
 
 
+	if len(opts.Objectives) == 0 {
+		// Use the lock-free implementation of a Summary without objectives.
+		s := &noObjectivesSummary{
+			desc:       desc,
+			labelPairs: makeLabelPairs(desc, labelValues),
+			counts:     [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
+		}
+		s.init(s) // Init self-collection.
+		return s
+	}
+
 	s := &summary{
 	s := &summary{
 		desc: desc,
 		desc: desc,
 
 
@@ -382,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
 	}
 	}
 }
 }
 
 
+type summaryCounts struct {
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations. sumBits and count have to go first in the struct to
+	// guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	sumBits uint64
+	count   uint64
+}
+
+type noObjectivesSummary struct {
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// summaryCounts, as a marker for completion.
+	//
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the summary) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cool fields must
+	// be merged into the new hot before releasing writeMtx.
+
+	// Fields with atomic access first! See alignment constraint:
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc     *Desc
+	writeMtx sync.Mutex // Only used in the Write method.
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*summaryCounts
+
+	labelPairs []*dto.LabelPair
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1)
+	hotCounts := s.counts[n>>63]
+
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			break
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
+	s.writeMtx.Lock()
+	defer s.writeMtx.Unlock()
+
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := s.counts[n>>63]
+	coldCounts := s.counts[(^n)>>63]
+
+	// Await cooldown.
+	for count != atomic.LoadUint64(&coldCounts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+
+	sum := &dto.Summary{
+		SampleCount: proto.Uint64(count),
+		SampleSum:   proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+	}
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
+	atomic.AddUint64(&hotCounts.count, count)
+	atomic.StoreUint64(&coldCounts.count, 0)
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
+			break
+		}
+	}
+	return nil
+}
+
 type quantSort []*dto.Quantile
 type quantSort []*dto.Quantile
 
 
 func (s quantSort) Len() int {
 func (s quantSort) Len() int {

+ 21 - 0
vendor/github.com/prometheus/client_golang/prometheus/wrap.go

@@ -32,6 +32,12 @@ import (
 // WrapRegistererWith provides a way to add fixed labels to a subset of
 // WrapRegistererWith provides a way to add fixed labels to a subset of
 // Collectors. It should not be used to add fixed labels to all metrics exposed.
 // Collectors. It should not be used to add fixed labels to all metrics exposed.
 //
 //
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
 // The Collector example demonstrates a use of WrapRegistererWith.
 // The Collector example demonstrates a use of WrapRegistererWith.
 func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
 func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
 	return &wrappingRegisterer{
 	return &wrappingRegisterer{
@@ -54,6 +60,12 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
 // (see NewGoCollector) and the process collector (see NewProcessCollector). (In
 // (see NewGoCollector) and the process collector (see NewProcessCollector). (In
 // fact, those metrics are already prefixed with “go_” or “process_”,
 // fact, those metrics are already prefixed with “go_” or “process_”,
 // respectively.)
 // respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
 func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
 func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
 	return &wrappingRegisterer{
 	return &wrappingRegisterer{
 		wrappedRegisterer: reg,
 		wrappedRegisterer: reg,
@@ -123,6 +135,15 @@ func (c *wrappingCollector) Describe(ch chan<- *Desc) {
 	}
 	}
 }
 }
 
 
+func (c *wrappingCollector) unwrapRecursively() Collector {
+	switch wc := c.wrappedCollector.(type) {
+	case *wrappingCollector:
+		return wc.unwrapRecursively()
+	default:
+		return wc
+	}
+}
+
 type wrappingMetric struct {
 type wrappingMetric struct {
 	wrappedMetric Metric
 	wrappedMetric Metric
 	prefix        string
 	prefix        string

+ 0 - 201
vendor/github.com/prometheus/client_model/ruby/LICENSE

@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.

+ 8 - 10
vendor/github.com/prometheus/common/expfmt/text_create.go

@@ -14,9 +14,10 @@
 package expfmt
 package expfmt
 
 
 import (
 import (
-	"bytes"
+	"bufio"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+	"io/ioutil"
 	"math"
 	"math"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
@@ -27,7 +28,7 @@ import (
 	dto "github.com/prometheus/client_model/go"
 	dto "github.com/prometheus/client_model/go"
 )
 )
 
 
-// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
 // implements it.
 // implements it.
 type enhancedWriter interface {
 type enhancedWriter interface {
 	io.Writer
 	io.Writer
@@ -37,14 +38,13 @@ type enhancedWriter interface {
 }
 }
 
 
 const (
 const (
-	initialBufSize    = 512
 	initialNumBufSize = 24
 	initialNumBufSize = 24
 )
 )
 
 
 var (
 var (
 	bufPool = sync.Pool{
 	bufPool = sync.Pool{
 		New: func() interface{} {
 		New: func() interface{} {
-			return bytes.NewBuffer(make([]byte, 0, initialBufSize))
+			return bufio.NewWriter(ioutil.Discard)
 		},
 		},
 	}
 	}
 	numBufPool = sync.Pool{
 	numBufPool = sync.Pool{
@@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
 	}
 	}
 
 
 	// Try the interface upgrade. If it doesn't work, we'll use a
 	// Try the interface upgrade. If it doesn't work, we'll use a
-	// bytes.Buffer from the sync.Pool and write out its content to out in a
-	// single go in the end.
+	// bufio.Writer from the sync.Pool.
 	w, ok := out.(enhancedWriter)
 	w, ok := out.(enhancedWriter)
 	if !ok {
 	if !ok {
-		b := bufPool.Get().(*bytes.Buffer)
-		b.Reset()
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
 		w = b
 		w = b
 		defer func() {
 		defer func() {
-			bWritten, bErr := out.Write(b.Bytes())
-			written = bWritten
+			bErr := b.Flush()
 			if err == nil {
 			if err == nil {
 				err = bErr
 				err = bErr
 			}
 			}

+ 10 - 3
vendor/github.com/prometheus/common/expfmt/text_parse.go

@@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn {
 	// - Other labels have to be added to currentLabels for signature calculation.
 	// - Other labels have to be added to currentLabels for signature calculation.
 	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
 	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
 		if p.currentLabelPair.GetName() == model.QuantileLabel {
 		if p.currentLabelPair.GetName() == model.QuantileLabel {
-			if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
 				// Create a more helpful error message.
 				// Create a more helpful error message.
 				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
 				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
 				return nil
 				return nil
@@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn {
 	// Similar special treatment of histograms.
 	// Similar special treatment of histograms.
 	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
 	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
 		if p.currentLabelPair.GetName() == model.BucketLabel {
 		if p.currentLabelPair.GetName() == model.BucketLabel {
-			if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+			if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
 				// Create a more helpful error message.
 				// Create a more helpful error message.
 				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
 				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
 				return nil
 				return nil
@@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn {
 	if p.readTokenUntilWhitespace(); p.err != nil {
 	if p.readTokenUntilWhitespace(); p.err != nil {
 		return nil // Unexpected end of input.
 		return nil // Unexpected end of input.
 	}
 	}
-	value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+	value, err := parseFloat(p.currentToken.String())
 	if err != nil {
 	if err != nil {
 		// Create a more helpful error message.
 		// Create a more helpful error message.
 		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
 		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
@@ -755,3 +755,10 @@ func histogramMetricName(name string) string {
 		return name
 		return name
 	}
 	}
 }
 }
+
+func parseFloat(s string) (float64, error) {
+	if strings.ContainsAny(s, "pP_") {
+		return 0, fmt.Errorf("unsupported character in float")
+	}
+	return strconv.ParseFloat(s, 64)
+}

+ 7 - 1
vendor/github.com/prometheus/common/model/time.go

@@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
 			return err
 			return err
 		}
 		}
 
 
-		*t = Time(v + va)
+		// If the value was something like -0.1 the negative is lost in the
+		// parsing because of the leading zero, this ensures that we capture it.
+		if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+			*t = Time(v+va) * -1
+		} else {
+			*t = Time(v + va)
+		}
 
 
 	default:
 	default:
 		return fmt.Errorf("invalid time %q", string(b))
 		return fmt.Errorf("invalid time %q", string(b))

+ 85 - 0
vendor/github.com/prometheus/procfs/arp.go

@@ -0,0 +1,85 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"strings"
+)
+
+// ARPEntry contains a single row of the columnar data represented in
+// /proc/net/arp.
+type ARPEntry struct {
+	// IP address
+	IPAddr net.IP
+	// MAC address
+	HWAddr net.HardwareAddr
+	// Name of the device
+	Device string
+}
+
+// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
+// and then return a slice of ARPEntry's.
+func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
+	data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
+	if err != nil {
+		return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
+	}
+
+	return parseARPEntries(data)
+}
+
+func parseARPEntries(data []byte) ([]ARPEntry, error) {
+	lines := strings.Split(string(data), "\n")
+	entries := make([]ARPEntry, 0)
+	var err error
+	const (
+		expectedDataWidth   = 6
+		expectedHeaderWidth = 9
+	)
+	for _, line := range lines {
+		columns := strings.Fields(line)
+		width := len(columns)
+
+		if width == expectedHeaderWidth || width == 0 {
+			continue
+		} else if width == expectedDataWidth {
+			entry, err := parseARPEntry(columns)
+			if err != nil {
+				return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
+			}
+			entries = append(entries, entry)
+		} else {
+			return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+		}
+
+	}
+
+	return entries, err
+}
+
+func parseARPEntry(columns []string) (ARPEntry, error) {
+	ip := net.ParseIP(columns[0])
+	mac := net.HardwareAddr(columns[3])
+
+	entry := ARPEntry{
+		IPAddr: ip,
+		HWAddr: mac,
+		Device: columns[5],
+	}
+
+	return entry, nil
+}

+ 3 - 13
vendor/github.com/prometheus/procfs/buddyinfo.go

@@ -31,19 +31,9 @@ type BuddyInfo struct {
 	Sizes []float64
 	Sizes []float64
 }
 }
 
 
-// NewBuddyInfo reads the buddyinfo statistics.
-func NewBuddyInfo() ([]BuddyInfo, error) {
-	fs, err := NewFS(DefaultMountPoint)
-	if err != nil {
-		return nil, err
-	}
-
-	return fs.NewBuddyInfo()
-}
-
-// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
-func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
-	file, err := os.Open(fs.Path("buddyinfo"))
+// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+	file, err := os.Open(fs.proc.Path("buddyinfo"))
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 167 - 0
vendor/github.com/prometheus/procfs/cpuinfo.go

@@ -0,0 +1,167 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
+type CPUInfo struct {
+	Processor       uint
+	VendorID        string
+	CPUFamily       string
+	Model           string
+	ModelName       string
+	Stepping        string
+	Microcode       string
+	CPUMHz          float64
+	CacheSize       string
+	PhysicalID      string
+	Siblings        uint
+	CoreID          string
+	CPUCores        uint
+	APICID          string
+	InitialAPICID   string
+	FPU             string
+	FPUException    string
+	CPUIDLevel      uint
+	WP              string
+	Flags           []string
+	Bugs            []string
+	BogoMips        float64
+	CLFlushSize     uint
+	CacheAlignment  uint
+	AddressSizes    string
+	PowerManagement string
+}
+
+// CPUInfo returns information about current system CPUs.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) CPUInfo() ([]CPUInfo, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCPUInfo(data)
+}
+
+// parseCPUInfo parses data from /proc/cpuinfo
+func parseCPUInfo(info []byte) ([]CPUInfo, error) {
+	cpuinfo := []CPUInfo{}
+	i := -1
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	for scanner.Scan() {
+		line := scanner.Text()
+		if strings.TrimSpace(line) == "" {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "vendor_id":
+			cpuinfo[i].VendorID = field[1]
+		case "cpu family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "model":
+			cpuinfo[i].Model = field[1]
+		case "model name":
+			cpuinfo[i].ModelName = field[1]
+		case "stepping":
+			cpuinfo[i].Stepping = field[1]
+		case "microcode":
+			cpuinfo[i].Microcode = field[1]
+		case "cpu MHz":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		case "cache size":
+			cpuinfo[i].CacheSize = field[1]
+		case "physical id":
+			cpuinfo[i].PhysicalID = field[1]
+		case "siblings":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Siblings = uint(v)
+		case "core id":
+			cpuinfo[i].CoreID = field[1]
+		case "cpu cores":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUCores = uint(v)
+		case "apicid":
+			cpuinfo[i].APICID = field[1]
+		case "initial apicid":
+			cpuinfo[i].InitialAPICID = field[1]
+		case "fpu":
+			cpuinfo[i].FPU = field[1]
+		case "fpu_exception":
+			cpuinfo[i].FPUException = field[1]
+		case "cpuid level":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUIDLevel = uint(v)
+		case "wp":
+			cpuinfo[i].WP = field[1]
+		case "flags":
+			cpuinfo[i].Flags = strings.Fields(field[1])
+		case "bugs":
+			cpuinfo[i].Bugs = strings.Fields(field[1])
+		case "bogomips":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		case "clflush size":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CLFlushSize = uint(v)
+		case "cache_alignment":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CacheAlignment = uint(v)
+		case "address sizes":
+			cpuinfo[i].AddressSizes = field[1]
+		case "power management":
+			cpuinfo[i].PowerManagement = field[1]
+		}
+	}
+	return cpuinfo, nil
+
+}

+ 131 - 0
vendor/github.com/prometheus/procfs/crypto.go

@@ -0,0 +1,131 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Crypto holds info parsed from /proc/crypto.
+type Crypto struct {
+	Alignmask   *uint64
+	Async       bool
+	Blocksize   *uint64
+	Chunksize   *uint64
+	Ctxsize     *uint64
+	Digestsize  *uint64
+	Driver      string
+	Geniv       string
+	Internal    string
+	Ivsize      *uint64
+	Maxauthsize *uint64
+	MaxKeysize  *uint64
+	MinKeysize  *uint64
+	Module      string
+	Name        string
+	Priority    *int64
+	Refcnt      *int64
+	Seedsize    *uint64
+	Selftest    string
+	Type        string
+	Walksize    *uint64
+}
+
+// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
+func (fs FS) Crypto() ([]Crypto, error) {
+	data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+	}
+	crypto, err := parseCrypto(data)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
+	}
+	return crypto, nil
+}
+
+func parseCrypto(cryptoData []byte) ([]Crypto, error) {
+	crypto := []Crypto{}
+
+	cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
+
+	for _, block := range cryptoBlocks {
+		var newCryptoElem Crypto
+
+		lines := strings.Split(string(block), "\n")
+		for _, line := range lines {
+			if strings.TrimSpace(line) == "" || line[0] == ' ' {
+				continue
+			}
+			fields := strings.Split(line, ":")
+			key := strings.TrimSpace(fields[0])
+			value := strings.TrimSpace(fields[1])
+			vp := util.NewValueParser(value)
+
+			switch strings.TrimSpace(key) {
+			case "async":
+				b, err := strconv.ParseBool(value)
+				if err == nil {
+					newCryptoElem.Async = b
+				}
+			case "blocksize":
+				newCryptoElem.Blocksize = vp.PUInt64()
+			case "chunksize":
+				newCryptoElem.Chunksize = vp.PUInt64()
+			case "digestsize":
+				newCryptoElem.Digestsize = vp.PUInt64()
+			case "driver":
+				newCryptoElem.Driver = value
+			case "geniv":
+				newCryptoElem.Geniv = value
+			case "internal":
+				newCryptoElem.Internal = value
+			case "ivsize":
+				newCryptoElem.Ivsize = vp.PUInt64()
+			case "maxauthsize":
+				newCryptoElem.Maxauthsize = vp.PUInt64()
+			case "max keysize":
+				newCryptoElem.MaxKeysize = vp.PUInt64()
+			case "min keysize":
+				newCryptoElem.MinKeysize = vp.PUInt64()
+			case "module":
+				newCryptoElem.Module = value
+			case "name":
+				newCryptoElem.Name = value
+			case "priority":
+				newCryptoElem.Priority = vp.PInt64()
+			case "refcnt":
+				newCryptoElem.Refcnt = vp.PInt64()
+			case "seedsize":
+				newCryptoElem.Seedsize = vp.PUInt64()
+			case "selftest":
+				newCryptoElem.Selftest = value
+			case "type":
+				newCryptoElem.Type = value
+			case "walksize":
+				newCryptoElem.Walksize = vp.PUInt64()
+			}
+		}
+		crypto = append(crypto, newCryptoElem)
+	}
+	return crypto, nil
+}

+ 17 - 20
vendor/github.com/prometheus/procfs/fs.go

@@ -14,33 +14,30 @@
 package procfs
 package procfs
 
 
 import (
 import (
-	"fmt"
-	"os"
-	"path"
+	"github.com/prometheus/procfs/internal/fs"
 )
 )
 
 
-// FS represents the pseudo-filesystem proc, which provides an interface to
+// FS represents the pseudo-filesystem sys, which provides an interface to
 // kernel data structures.
 // kernel data structures.
-type FS string
+type FS struct {
+	proc fs.FS
+}
 
 
 // DefaultMountPoint is the common mount point of the proc filesystem.
 // DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = "/proc"
+const DefaultMountPoint = fs.DefaultProcMountPoint
+
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+	return NewFS(DefaultMountPoint)
+}
 
 
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
 func NewFS(mountPoint string) (FS, error) {
 func NewFS(mountPoint string) (FS, error) {
-	info, err := os.Stat(mountPoint)
+	fs, err := fs.NewFS(mountPoint)
 	if err != nil {
 	if err != nil {
-		return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
-	}
-	if !info.IsDir() {
-		return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+		return FS{}, err
 	}
 	}
-
-	return FS(mountPoint), nil
-}
-
-// Path returns the path of the given subsystem relative to the procfs root.
-func (fs FS) Path(p ...string) string {
-	return path.Join(append([]string{string(fs)}, p...)...)
+	return FS{fs}, nil
 }
 }

+ 55 - 0
vendor/github.com/prometheus/procfs/internal/fs/fs.go

@@ -0,0 +1,55 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+const (
+	// DefaultProcMountPoint is the common mount point of the proc filesystem.
+	DefaultProcMountPoint = "/proc"
+
+	// DefaultSysMountPoint is the common mount point of the sys filesystem.
+	DefaultSysMountPoint = "/sys"
+
+	// DefaultConfigfsMountPoint is the common mount point of the configfs
+	DefaultConfigfsMountPoint = "/sys/kernel/config"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+	info, err := os.Stat(mountPoint)
+	if err != nil {
+		return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+	}
+	if !info.IsDir() {
+		return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+	}
+
+	return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+	return filepath.Join(append([]string{string(fs)}, p...)...)
+}

+ 88 - 0
vendor/github.com/prometheus/procfs/internal/util/parse.go

@@ -0,0 +1,88 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"io/ioutil"
+	"strconv"
+	"strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+	us := make([]uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, u)
+	}
+
+	return us, nil
+}
+
+// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
+func ParsePInt64s(ss []string) ([]*int64, error) {
+	us := make([]*int64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseInt(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ParseBool parses a string into a boolean pointer.
+func ParseBool(b string) *bool {
+	var truth bool
+	switch b {
+	case "enabled":
+		truth = true
+	case "disabled":
+		truth = false
+	default:
+		return nil
+	}
+	return &truth
+}

+ 38 - 0
vendor/github.com/prometheus/procfs/internal/util/readfile.go

@@ -0,0 +1,38 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+)
+
+// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
+// This is similar to ioutil.ReadFile but without the call to os.Stat, because
+// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
+// Reads a max file size of 512kB.  For files larger than this, a scanner
+// should be used.
+func ReadFileNoStat(filename string) ([]byte, error) {
+	const maxBufferSize = 1024 * 512
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	reader := io.LimitReader(f, maxBufferSize)
+	return ioutil.ReadAll(reader)
+}

+ 48 - 0
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go

@@ -0,0 +1,48 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux,!appengine
+
+package util
+
+import (
+	"bytes"
+	"os"
+	"syscall"
+)
+
+// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+//
+// Note that this function will not read files larger than 128 bytes.
+func SysReadFile(file string) (string, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	// On some machines, hwmon drivers are broken and return EAGAIN.  This causes
+	// Go's ioutil.ReadFile implementation to poll forever.
+	//
+	// Since we either want to read data or bail immediately, do the simplest
+	// possible read using syscall directly.
+	const sysFileBufferSize = 128
+	b := make([]byte, sysFileBufferSize)
+	n, err := syscall.Read(int(f.Fd()), b)
+	if err != nil {
+		return "", err
+	}
+
+	return string(bytes.TrimSpace(b[:n])), nil
+}

+ 26 - 0
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go

@@ -0,0 +1,26 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux,appengine !linux
+
+package util
+
+import (
+	"fmt"
+)
+
+// SysReadFile is here implemented as a noop for builds that do not support
+// the read syscall. For example Windows, or Linux on Google App Engine.
+func SysReadFile(file string) (string, error) {
+	return "", fmt.Errorf("not supported on this platform")
+}

+ 91 - 0
vendor/github.com/prometheus/procfs/internal/util/valueparser.go

@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"strconv"
+)
+
+// TODO(mdlayher): util packages are an anti-pattern and this should be moved
+// somewhere else that is more focused in the future.
+
+// A ValueParser enables parsing a single string into a variety of data types
+// in a concise and safe way. The Err method must be invoked after invoking
+// any other methods to ensure a value was successfully parsed.
+type ValueParser struct {
+	v   string
+	err error
+}
+
+// NewValueParser creates a ValueParser using the input string.
+func NewValueParser(v string) *ValueParser {
+	return &ValueParser{v: v}
+}
+
+// Int interprets the underlying value as an int and returns that value.
+func (vp *ValueParser) Int() int { return int(vp.int64()) }
+
+// PInt64 interprets the underlying value as an int64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PInt64() *int64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	v := vp.int64()
+	return &v
+}
+
+// int64 interprets the underlying value as an int64 and returns that value.
+// TODO: export if/when necessary.
+func (vp *ValueParser) int64() int64 {
+	if vp.err != nil {
+		return 0
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseInt(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return 0
+	}
+
+	return v
+}
+
+// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PUInt64() *uint64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseUint(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return nil
+	}
+
+	return &v
+}
+
+// Err returns the last error, if any, encountered by the ValueParser.
+func (vp *ValueParser) Err() error {
+	return vp.err
+}

+ 12 - 30
vendor/github.com/prometheus/procfs/ipvs.go

@@ -15,6 +15,7 @@ package procfs
 
 
 import (
 import (
 	"bufio"
 	"bufio"
+	"bytes"
 	"encoding/hex"
 	"encoding/hex"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
@@ -24,6 +25,8 @@ import (
 	"os"
 	"os"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
 )
 )
 
 
 // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
 // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
@@ -62,29 +65,18 @@ type IPVSBackendStatus struct {
 	Weight uint64
 	Weight uint64
 }
 }
 
 
-// NewIPVSStats reads the IPVS statistics.
-func NewIPVSStats() (IPVSStats, error) {
-	fs, err := NewFS(DefaultMountPoint)
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
 	if err != nil {
 	if err != nil {
 		return IPVSStats{}, err
 		return IPVSStats{}, err
 	}
 	}
 
 
-	return fs.NewIPVSStats()
-}
-
-// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) NewIPVSStats() (IPVSStats, error) {
-	file, err := os.Open(fs.Path("net/ip_vs_stats"))
-	if err != nil {
-		return IPVSStats{}, err
-	}
-	defer file.Close()
-
-	return parseIPVSStats(file)
+	return parseIPVSStats(bytes.NewReader(data))
 }
 }
 
 
 // parseIPVSStats performs the actual parsing of `ip_vs_stats`.
 // parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+func parseIPVSStats(r io.Reader) (IPVSStats, error) {
 	var (
 	var (
 		statContent []byte
 		statContent []byte
 		statLines   []string
 		statLines   []string
@@ -92,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
 		stats       IPVSStats
 		stats       IPVSStats
 	)
 	)
 
 
-	statContent, err := ioutil.ReadAll(file)
+	statContent, err := ioutil.ReadAll(r)
 	if err != nil {
 	if err != nil {
 		return IPVSStats{}, err
 		return IPVSStats{}, err
 	}
 	}
@@ -131,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
 	return stats, nil
 	return stats, nil
 }
 }
 
 
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
-func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
-	fs, err := NewFS(DefaultMountPoint)
-	if err != nil {
-		return []IPVSBackendStatus{}, err
-	}
-
-	return fs.NewIPVSBackendStatus()
-}
-
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
-	file, err := os.Open(fs.Path("net/ip_vs"))
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+	file, err := os.Open(fs.proc.Path("net/ip_vs"))
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

Some files were not shown because too many files changed in this diff