Browse Source

Try using dep tool, update dependencies

Ask Bjørn Hansen 8 years ago
parent
commit
0e90ca880a
100 changed files with 17108 additions and 969 deletions
  1. 0 88
      Godeps/Godeps.json
  2. 126 0
      Gopkg.lock
  3. 118 0
      Gopkg.toml
  4. 0 26
      vendor/github.com/abh/geoip/.gitignore
  5. 0 18
      vendor/github.com/abh/geoip/.travis.yml
  6. 0 37
      vendor/github.com/abh/geoip/README.md
  7. 0 45
      vendor/github.com/abh/geoip/const.go
  8. 0 1
      vendor/github.com/abh/geoip/db/.gitignore
  9. 0 29
      vendor/github.com/abh/geoip/db/download
  10. 0 65
      vendor/github.com/abh/geoip/ex/geoip-demo.go
  11. 0 340
      vendor/github.com/abh/geoip/geoip.go
  12. 0 124
      vendor/github.com/abh/geoip/geoip_test.go
  13. BIN
      vendor/github.com/abh/geoip/test-db/GeoIP.dat
  14. BIN
      vendor/github.com/abh/geoip/test-db/GeoIPCity.dat
  15. BIN
      vendor/github.com/abh/geoip/test-db/GeoIPRegion.dat
  16. 0 0
      vendor/github.com/davecgh/go-spew/.gitignore
  17. 14 0
      vendor/github.com/davecgh/go-spew/.travis.yml
  18. 1 1
      vendor/github.com/davecgh/go-spew/LICENSE
  19. 205 0
      vendor/github.com/davecgh/go-spew/README.md
  20. 22 0
      vendor/github.com/davecgh/go-spew/cov_report.sh
  21. 1 1
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  22. 1 1
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  23. 1 1
      vendor/github.com/davecgh/go-spew/spew/common.go
  24. 298 0
      vendor/github.com/davecgh/go-spew/spew/common_test.go
  25. 1 1
      vendor/github.com/davecgh/go-spew/spew/config.go
  26. 10 1
      vendor/github.com/davecgh/go-spew/spew/doc.go
  27. 1 1
      vendor/github.com/davecgh/go-spew/spew/dump.go
  28. 1042 0
      vendor/github.com/davecgh/go-spew/spew/dump_test.go
  29. 99 0
      vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
  30. 26 0
      vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
  31. 226 0
      vendor/github.com/davecgh/go-spew/spew/example_test.go
  32. 1 1
      vendor/github.com/davecgh/go-spew/spew/format.go
  33. 1558 0
      vendor/github.com/davecgh/go-spew/spew/format_test.go
  34. 87 0
      vendor/github.com/davecgh/go-spew/spew/internal_test.go
  35. 102 0
      vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
  36. 1 1
      vendor/github.com/davecgh/go-spew/spew/spew.go
  37. 320 0
      vendor/github.com/davecgh/go-spew/spew/spew_test.go
  38. 82 0
      vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
  39. 61 0
      vendor/github.com/davecgh/go-spew/test_coverage.txt
  40. 151 0
      vendor/github.com/golang/geo/README.md
  41. 1 1
      vendor/github.com/golang/geo/r1/interval.go
  42. 349 0
      vendor/github.com/golang/geo/r1/interval_test.go
  43. 476 0
      vendor/github.com/golang/geo/r2/rect_test.go
  44. 477 0
      vendor/github.com/golang/geo/r3/precisevector_test.go
  45. 339 0
      vendor/github.com/golang/geo/r3/vector_test.go
  46. 169 0
      vendor/github.com/golang/geo/s1/angle_test.go
  47. 9 3
      vendor/github.com/golang/geo/s1/chordangle.go
  48. 226 0
      vendor/github.com/golang/geo/s1/chordangle_test.go
  49. 457 0
      vendor/github.com/golang/geo/s1/interval_test.go
  50. 142 80
      vendor/github.com/golang/geo/s2/cap.go
  51. 718 0
      vendor/github.com/golang/geo/s2/cap_test.go
  52. 522 0
      vendor/github.com/golang/geo/s2/cell_test.go
  53. 168 8
      vendor/github.com/golang/geo/s2/cellid.go
  54. 1052 0
      vendor/github.com/golang/geo/s2/cellid_test.go
  55. 723 0
      vendor/github.com/golang/geo/s2/cellunion_test.go
  56. 2 2
      vendor/github.com/golang/geo/s2/edgeutil.go
  57. 1201 0
      vendor/github.com/golang/geo/s2/edgeutil_test.go
  58. 2 1
      vendor/github.com/golang/geo/s2/latlng.go
  59. 155 0
      vendor/github.com/golang/geo/s2/latlng_test.go
  60. 32 12
      vendor/github.com/golang/geo/s2/loop.go
  61. 533 0
      vendor/github.com/golang/geo/s2/loop_test.go
  62. 9 7
      vendor/github.com/golang/geo/s2/matrix3x3.go
  63. 494 0
      vendor/github.com/golang/geo/s2/matrix3x3_test.go
  64. 109 0
      vendor/github.com/golang/geo/s2/metric_test.go
  65. 197 0
      vendor/github.com/golang/geo/s2/paddedcell_test.go
  66. 26 6
      vendor/github.com/golang/geo/s2/point.go
  67. 384 0
      vendor/github.com/golang/geo/s2/point_test.go
  68. 140 5
      vendor/github.com/golang/geo/s2/polygon.go
  69. 342 0
      vendor/github.com/golang/geo/s2/polygon_test.go
  70. 53 33
      vendor/github.com/golang/geo/s2/polyline.go
  71. 144 0
      vendor/github.com/golang/geo/s2/polyline_test.go
  72. 314 0
      vendor/github.com/golang/geo/s2/predicates_test.go
  73. 4 3
      vendor/github.com/golang/geo/s2/rect.go
  74. 862 0
      vendor/github.com/golang/geo/s2/rect_test.go
  75. 2 1
      vendor/github.com/golang/geo/s2/region.go
  76. 5 3
      vendor/github.com/golang/geo/s2/regioncoverer.go
  77. 151 0
      vendor/github.com/golang/geo/s2/regioncoverer_test.go
  78. 414 0
      vendor/github.com/golang/geo/s2/s2_test.go
  79. 196 0
      vendor/github.com/golang/geo/s2/s2_test_test.go
  80. 66 17
      vendor/github.com/golang/geo/s2/shapeindex.go
  81. 84 0
      vendor/github.com/golang/geo/s2/shapeindex_test.go
  82. 321 0
      vendor/github.com/golang/geo/s2/stuv_test.go
  83. 3 0
      vendor/github.com/hpcloud/tail/.gitignore
  84. 18 0
      vendor/github.com/hpcloud/tail/.travis.yml
  85. 63 0
      vendor/github.com/hpcloud/tail/CHANGES.md
  86. 19 0
      vendor/github.com/hpcloud/tail/Dockerfile
  87. 15 0
      vendor/github.com/hpcloud/tail/Godeps/Godeps.json
  88. 0 0
      vendor/github.com/hpcloud/tail/Godeps/Readme
  89. 3 5
      vendor/github.com/hpcloud/tail/LICENSE.txt
  90. 11 0
      vendor/github.com/hpcloud/tail/Makefile
  91. 28 0
      vendor/github.com/hpcloud/tail/README.md
  92. 11 0
      vendor/github.com/hpcloud/tail/appveyor.yml
  93. 1 0
      vendor/github.com/hpcloud/tail/cmd/gotail/.gitignore
  94. 4 0
      vendor/github.com/hpcloud/tail/cmd/gotail/Makefile
  95. 66 0
      vendor/github.com/hpcloud/tail/cmd/gotail/gotail.go
  96. 7 0
      vendor/github.com/hpcloud/tail/ratelimiter/Licence
  97. 97 0
      vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
  98. 73 0
      vendor/github.com/hpcloud/tail/ratelimiter/leakybucket_test.go
  99. 58 0
      vendor/github.com/hpcloud/tail/ratelimiter/memory.go
  100. 6 0
      vendor/github.com/hpcloud/tail/ratelimiter/storage.go

+ 0 - 88
Godeps/Godeps.json

@@ -1,88 +0,0 @@
-{
-	"ImportPath": "github.com/abh/geodns",
-	"GoVersion": "go1.7",
-	"GodepVersion": "v75",
-	"Deps": [
-		{
-			"ImportPath": "github.com/abh/errorutil",
-			"Rev": "f9bd360d00b902548fbb80837aef90dca2c8285e"
-		},
-		{
-			"ImportPath": "github.com/abh/geoip",
-			"Rev": "e5f8ea00f476d60a1e539d74211ce24881347d9e"
-		},
-		{
-			"ImportPath": "github.com/golang/geo/r1",
-			"Rev": "0c31b84d978c2b31eab2ba8c6c6e820d8ce3acff"
-		},
-		{
-			"ImportPath": "github.com/golang/geo/r2",
-			"Rev": "0c31b84d978c2b31eab2ba8c6c6e820d8ce3acff"
-		},
-		{
-			"ImportPath": "github.com/golang/geo/r3",
-			"Rev": "0c31b84d978c2b31eab2ba8c6c6e820d8ce3acff"
-		},
-		{
-			"ImportPath": "github.com/golang/geo/s1",
-			"Rev": "0c31b84d978c2b31eab2ba8c6c6e820d8ce3acff"
-		},
-		{
-			"ImportPath": "github.com/golang/geo/s2",
-			"Rev": "0c31b84d978c2b31eab2ba8c6c6e820d8ce3acff"
-		},
-		{
-			"ImportPath": "github.com/miekg/dns",
-			"Rev": "58f52c57ce9df13460ac68200cef30a008b9c468"
-		},
-		{
-			"ImportPath": "github.com/pborman/uuid",
-			"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
-		},
-		{
-			"ImportPath": "github.com/rcrowley/go-metrics",
-			"Rev": "eeba7bd0dd01ace6e690fa833b3f22aaec29af43"
-		},
-		{
-			"ImportPath": "github.com/stretchr/testify",
-			"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
-		},
-		{
-			"ImportPath": "golang.org/x/net/websocket",
-			"Rev": "db8e4de5b2d6653f66aea53094624468caad15d2"
-		},
-		{
-			"ImportPath": "gopkg.in/check.v1",
-			"Rev": "11d3bc7aa68e238947792f30573146a3231fc0f1"
-		},
-		{
-			"ImportPath": "gopkg.in/fsnotify.v1",
-			"Comment": "v1.2.0",
-			"Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0"
-		},
-		{
-			"ImportPath": "gopkg.in/gcfg.v1",
-			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
-		},
-		{
-			"ImportPath": "gopkg.in/gcfg.v1/scanner",
-			"Comment": "v1.0.0",
-			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
-		},
-		{
-			"ImportPath": "gopkg.in/gcfg.v1/token",
-			"Comment": "v1.0.0",
-			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
-		},
-		{
-			"ImportPath": "gopkg.in/gcfg.v1/types",
-			"Comment": "v1.0.0",
-			"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
-		},
-		{
-			"ImportPath": "gopkg.in/natefinch/lumberjack.v2",
-			"Comment": "v1.0-21-g514cbda",
-			"Rev": "514cbda263a734ae8caac038dadf05f8f3f9f738"
-		}
-	]
-}

+ 126 - 0
Gopkg.lock

@@ -0,0 +1,126 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+memo = "da057b40b78dea546d93310e141024db552a5a898a38576f408bbddb7297a5ae"
+
+[[projects]]
+  name = "github.com/abh/errorutil"
+  packages = ["."]
+  revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/golang/geo"
+  packages = ["r1","r2","r3","s1","s2"]
+  revision = "5747e9816367bd031622778e3e538f9737814005"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/hpcloud/tail"
+  packages = [".","ratelimiter","util","watch","winfile"]
+  revision = "faf842bde7ed83bbc3c65a2c454fae39bc29a95f"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/influxdata/influxdb"
+  packages = ["client/v2","models","pkg/escape"]
+  revision = "38735b24f67ed462a310d490cf23c1b3953e53e8"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/kr/pretty"
+  packages = ["."]
+  revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/kr/text"
+  packages = ["."]
+  revision = "7cafcd837844e784b526369c9bce262804aebc60"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/miekg/dns"
+  packages = ["."]
+  revision = "f282f80e243cc2bf8f6410c30d821b93b794e168"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/oschwald/geoip2-golang"
+  packages = ["."]
+  revision = "5b1dc16861f81d05d9836bb21c2d0d65282fc0b8"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/oschwald/maxminddb-golang"
+  packages = ["."]
+  revision = "d19f6d453e836d12ee8fe895d0494421e93ef8c1"
+
+[[projects]]
+  name = "github.com/pborman/uuid"
+  packages = ["."]
+  revision = "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
+
+[[projects]]
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  name = "github.com/rcrowley/go-metrics"
+  packages = ["."]
+  revision = "eeba7bd0dd01ace6e690fa833b3f22aaec29af43"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/stretchr/testify"
+  packages = ["assert","require"]
+  revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = ["websocket"]
+  revision = "fcc8ed8e87ee07a511396864dad3960b9632e44f"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/sys"
+  packages = ["unix","windows"]
+  revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5"
+
+[[projects]]
+  branch = "master"
+  name = "gopkg.in/fsnotify.v1"
+  packages = ["."]
+  revision = "629574ca2a5df945712d3079857300b5e4da0236"
+
+[[projects]]
+  branch = "master"
+  name = "gopkg.in/gcfg.v1"
+  packages = [".","scanner","token","types"]
+  revision = "27e4946190b4a327b539185f2b5b1f7c84730728"
+
+[[projects]]
+  branch = "v2.0"
+  name = "gopkg.in/natefinch/lumberjack.v2"
+  packages = ["."]
+  revision = "514cbda263a734ae8caac038dadf05f8f3f9f738"
+
+[[projects]]
+  branch = "v1"
+  name = "gopkg.in/tomb.v1"
+  packages = ["."]
+  revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
+
+[[projects]]
+  branch = "master"
+  name = "gopkg.in/warnings.v0"
+  packages = ["."]
+  revision = "8a331561fe74dadba6edfc59f3be66c22c3b065d"

+ 118 - 0
Gopkg.toml

@@ -0,0 +1,118 @@
+
+## Gopkg.toml example (these lines may be deleted)
+
+## "metadata" defines metadata about the project that could be used by other independent
+## systems. The metadata defined here will be ignored by dep.
+# [metadata]
+# key1 = "value that convey data to other systems"
+# system1-data = "value that is used by a system"
+# system2-data = "value that is used by another system"
+
+## "required" lists a set of packages (not projects) that must be included in
+## Gopkg.lock. This list is merged with the set of packages imported by the current
+## project. Use it when your project needs a package it doesn't explicitly import -
+## including "main" packages.
+# required = ["github.com/user/thing/cmd/thing"]
+
+## "ignored" lists a set of packages (not projects) that are ignored when
+## dep statically analyzes source code. Ignored packages can be in this project,
+## or in a dependency.
+# ignored = ["github.com/user/project/badpkg"]
+
+## Dependencies define constraints on dependent projects. They are respected by
+## dep whether coming from the Gopkg.toml of the current project or a dependency.
+# [[dependencies]]
+## Required: the root import path of the project being constrained.
+# name = "github.com/user/project"
+#
+## Recommended: the version constraint to enforce for the project.
+## Only one of "branch", "version" or "revision" can be specified.
+# version = "1.0.0"
+# branch = "master"
+# revision = "abc123"
+#
+## Optional: an alternate location (URL or import path) for the project's source.
+# source = "https://github.com/myfork/package.git"
+#
+## "metadata" defines metadata about the dependency or override that could be used
+## by other independent systems. The metadata defined here will be ignored by dep.
+# [metadata]
+# key1 = "value that convey data to other systems"
+# system1-data = "value that is used by a system"
+# system2-data = "value that is used by another system"
+
+## Overrides have the same structure as [[dependencies]], but supersede all
+## [[dependencies]] declarations from all projects. Only the current project's
+## [[overrides]] are applied.
+##
+## Overrides are a sledgehammer. Use them only as a last resort.
+# [[overrides]]
+## Required: the root import path of the project being constrained.
+# name = "github.com/user/project"
+#
+## Optional: specifying a version constraint override will cause all other
+## constraints on this project to be ignored; only the overridden constraint
+## need be satisfied.
+## Again, only one of "branch", "version" or "revision" can be specified.
+# version = "1.0.0"
+# branch = "master"
+# revision = "abc123"
+#
+## Optional: specifying an alternate source location as an override will
+## enforce that the alternate location is used for that project, regardless of
+## what source location any dependent projects specify.
+# source = "https://github.com/myfork/package.git"
+
+
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/golang/geo"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/hpcloud/tail"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/influxdata/influxdb"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/kr/pretty"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/miekg/dns"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/oschwald/geoip2-golang"
+
+[[dependencies]]
+  name = "github.com/pmezard/go-difflib"
+  version = "^1.0.0"
+
+[[dependencies]]
+  branch = "master"
+  name = "github.com/stretchr/testify"
+
+[[dependencies]]
+  branch = "master"
+  name = "golang.org/x/net"
+
+[[dependencies]]
+  branch = "master"
+  name = "gopkg.in/fsnotify.v1"
+
+[[dependencies]]
+  branch = "master"
+  name = "gopkg.in/gcfg.v1"
+
+[[dependencies]]
+  branch = "v2.0"
+  name = "gopkg.in/natefinch/lumberjack.v2"
+
+[[dependencies]]
+  branch = "v1"
+  name = "gopkg.in/tomb.v1"

+ 0 - 26
vendor/github.com/abh/geoip/.gitignore

@@ -1,26 +0,0 @@
-geoip-demo
-*~
-/db/GeoLiteCity.dat
-
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe

+ 0 - 18
vendor/github.com/abh/geoip/.travis.yml

@@ -1,18 +0,0 @@
-language: go
-
-go:
-  - 1.3
-  - 1.4
-  - tip
-
-before_install:
-  - sudo apt-get install libgeoip-dev bzr
-
-install:
-  - mkdir -p $TRAVIS_BUILD_DIR/db
-  - curl http://geodns.bitnames.com/geoip/GeoLiteCity.dat.gz  | gzip -cd > $TRAVIS_BUILD_DIR/db/GeoLiteCity.dat
-  - go get gopkg.in/check.v1
-
-script:
-  - cd $TRAVIS_BUILD_DIR && go test -gocheck.v
-  - go test -gocheck.v -gocheck.b -gocheck.btime=2s

+ 0 - 37
vendor/github.com/abh/geoip/README.md

@@ -1,37 +0,0 @@
-# GeoIP API for Go
-
-This package wraps the [libgeoip C library](http://www.maxmind.com/app/c) for
-access from Go (golang). [![Build Status](https://travis-ci.org/abh/geoip.png?branch=master)](https://travis-ci.org/abh/geoip)
-
-Install with `go get github.com/abh/geoip` and use [godoc
-geoip](http://godoc.org/github.com/abh/geoip) to read the documentation.
-
-There's a small example in the `ex/` subdirectory.
-
-You can download the free [GeoLite
-Country](http://www.maxmind.com/app/geoip_country) database or you can
-[subscribe to updates](http://www.maxmind.com/app/country).
-
-## Examples
-
-	file := "/usr/share/GeoIP/GeoIP.dat"
-
-	gi, err := geoip.Open(file)
-	if err != nil {
-		fmt.Printf("Could not open GeoIP database\n")
-	}
-
-	if gi != nil {
-		country, netmask := gi.GetCountry("207.171.7.51")
-	}
-
-	// Setup gi6 by opening the optional IPv6 database and then...
-	country := gi6.GetCountry_v6("2607:f238:2::5")
-	fmt.Println(country)
-
-
-## Contact
-
-Copyright 2012-2013 Ask Bjørn Hansen <[email protected]>. The package
-is MIT licensed, see the LICENSE file. Originally based on example code
-from [email protected].

+ 0 - 45
vendor/github.com/abh/geoip/const.go

@@ -1,45 +0,0 @@
-package geoip
-
-// GeoIPDBTypes enum in GeoIP.h
-const (
-	GEOIP_COUNTRY_EDITION            = 1
-	GEOIP_REGION_EDITION_REV0        = 7
-	GEOIP_CITY_EDITION_REV0          = 6
-	GEOIP_ORG_EDITION                = 5
-	GEOIP_ISP_EDITION                = 4
-	GEOIP_CITY_EDITION_REV1          = 2
-	GEOIP_REGION_EDITION_REV1        = 3
-	GEOIP_PROXY_EDITION              = 8
-	GEOIP_ASNUM_EDITION              = 9
-	GEOIP_NETSPEED_EDITION           = 10
-	GEOIP_DOMAIN_EDITION             = 11
-	GEOIP_COUNTRY_EDITION_V6         = 12
-	GEOIP_LOCATIONA_EDITION          = 13
-	GEOIP_ACCURACYRADIUS_EDITION     = 14
-	GEOIP_CITYCONFIDENCE_EDITION     = 15
-	GEOIP_CITYCONFIDENCEDIST_EDITION = 16
-	GEOIP_LARGE_COUNTRY_EDITION      = 17
-	GEOIP_LARGE_COUNTRY_EDITION_V6   = 18
-	GEOIP_ASNUM_EDITION_V6           = 21
-	GEOIP_ISP_EDITION_V6             = 22
-	GEOIP_ORG_EDITION_V6             = 23
-	GEOIP_DOMAIN_EDITION_V6          = 24
-	GEOIP_LOCATIONA_EDITION_V6       = 25
-	GEOIP_REGISTRAR_EDITION          = 26
-	GEOIP_REGISTRAR_EDITION_V6       = 27
-	GEOIP_USERTYPE_EDITION           = 28
-	GEOIP_USERTYPE_EDITION_V6        = 29
-	GEOIP_CITY_EDITION_REV1_V6       = 30
-	GEOIP_CITY_EDITION_REV0_V6       = 31
-	GEOIP_NETSPEED_EDITION_REV1      = 32
-	GEOIP_NETSPEED_EDITION_REV1_V6   = 33
-)
-
-// GeoIPOptions enum in GeoIP.h
-const (
-	GEOIP_STANDARD     = 0
-	GEOIP_MEMORY_CACHE = 1
-	GEOIP_CHECK_CACHE  = 2
-	GEOIP_INDEX_CACHE  = 4
-	GEOIP_MMAP_CACHE   = 8
-)

+ 0 - 1
vendor/github.com/abh/geoip/db/.gitignore

@@ -1 +0,0 @@
-*.dat.gz

+ 0 - 29
vendor/github.com/abh/geoip/db/download

@@ -1,29 +0,0 @@
-#!/usr/bin/env perl
-use strict;
-use v5.12.0;
-use LWP::Simple qw(mirror);
-use File::Basename qw(basename);
-
-my @files = qw(
-  http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz
-  http://geolite.maxmind.com/download/geoip/database/GeoIPv6.dat.gz
-  http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
-  http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz
-  http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz
-  http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNumv6.dat.gz
-);
-
-for my $url (@files) {
-    my $file       = basename($url);
-    my ($dat_name) = ($file =~ m/(.*)\.gz/);
-    my $rv         = mirror($url, $file);
-    if ($rv == 200) {
-        system("gzip -cd $file > $dat_name");
-    }
-    elsif ($rv == 304) {
-        # already updated
-    }
-    else {
-        say "$url:", $rv;
-    }
-}

+ 0 - 65
vendor/github.com/abh/geoip/ex/geoip-demo.go

@@ -1,65 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"github.com/abh/geoip"
-)
-
-func main() {
-
-	file6 := "../db/GeoIPv6.dat"
-
-	gi6, err := geoip.Open(file6)
-	if err != nil {
-		fmt.Printf("Could not open GeoIPv6 database: %s\n", err)
-	}
-
-	gi, err := geoip.Open()
-	if err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-	}
-
-	giasn, err := geoip.Open("../db/GeoIPASNum.dat")
-	if err != nil {
-		fmt.Printf("Could not open GeoIPASN database: %s\n", err)
-	}
-
-	giasn6, err := geoip.Open("../db/GeoIPASNumv6.dat")
-	if err != nil {
-		fmt.Printf("Could not open GeoIPASN database: %s\n", err)
-	}
-
-	if giasn != nil {
-		ip := "207.171.7.51"
-		asn, netmask := giasn.GetName(ip)
-		fmt.Printf("%s: %s (netmask /%d)\n", ip, asn, netmask)
-
-	}
-
-	if gi != nil {
-		test4(*gi, "207.171.7.51")
-		test4(*gi, "127.0.0.1")
-	}
-	if gi6 != nil {
-		ip := "2607:f238:2::5"
-		country, netmask := gi6.GetCountry_v6(ip)
-		var asn string
-		var asn_netmask int
-		if giasn6 != nil {
-			asn, asn_netmask = giasn6.GetNameV6(ip)
-		}
-		fmt.Printf("%s: %s/%d %s/%d\n", ip, country, netmask, asn, asn_netmask)
-
-	}
-
-}
-
-func test4(g geoip.GeoIP, ip string) {
-	test(func(s string) (string, int) { return g.GetCountry(s) }, ip)
-}
-
-func test(f func(string) (string, int), ip string) {
-	country, netmask := f(ip)
-	fmt.Printf("ip: %s is [%s] (netmask %d)\n", ip, country, netmask)
-
-}

+ 0 - 340
vendor/github.com/abh/geoip/geoip.go

@@ -1,340 +0,0 @@
-/* Go (cgo) interface to libgeoip */
-package geoip
-
-/*
-#cgo pkg-config: geoip  
-#include <stdio.h>
-#include <errno.h>
-#include <GeoIP.h>
-#include <GeoIPCity.h>
-
-//typedef GeoIP* GeoIP_pnt
-*/
-import "C"
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"runtime"
-	"sync"
-	"unsafe"
-)
-
-type GeoIP struct {
-	db *C.GeoIP
-
-	// We don't use GeoIP's thread-safe API calls, which means there is a
-	// single global netmask variable that gets clobbered in the main
-	// lookup routine.  Any calls which have _GeoIP_seek_record_gl need to
-	// be wrapped in this mutex.
-
-	mu sync.Mutex
-}
-
-func (gi *GeoIP) free() {
-	if gi == nil {
-		return
-	}
-	if gi.db == nil {
-		gi = nil
-		return
-	}
-	C.GeoIP_delete(gi.db)
-	gi = nil
-	return
-}
-
-// Default convenience wrapper around OpenDb
-func Open(files ...string) (*GeoIP, error) {
-	return OpenDb(files, GEOIP_MEMORY_CACHE)
-}
-
-// Opens a GeoIP database by filename with specified GeoIPOptions flag.
-// All formats supported by libgeoip are supported though there are only
-// functions to access some of the databases in this API.
-// If you don't pass a filename, it will try opening the database from
-// a list of common paths.
-func OpenDb(files []string, flag int) (*GeoIP, error) {
-	if len(files) == 0 {
-		files = []string{
-			"/usr/share/GeoIP/GeoIP.dat",       // Linux default
-			"/usr/share/local/GeoIP/GeoIP.dat", // source install?
-			"/usr/local/share/GeoIP/GeoIP.dat", // FreeBSD
-			"/opt/local/share/GeoIP/GeoIP.dat", // MacPorts
-			"/usr/share/GeoIP/GeoIP.dat",       // ArchLinux
-		}
-	}
-
-	g := &GeoIP{}
-	runtime.SetFinalizer(g, (*GeoIP).free)
-
-	var err error
-
-	for _, file := range files {
-
-		// libgeoip prints errors if it can't open the file, so check first
-		if _, err := os.Stat(file); err != nil {
-			if os.IsExist(err) {
-				log.Println(err)
-			}
-			continue
-		}
-
-		cbase := C.CString(file)
-		defer C.free(unsafe.Pointer(cbase))
-
-		g.db, err = C.GeoIP_open(cbase, C.int(flag))
-		if g.db != nil && err != nil {
-			break
-		}
-	}
-	if err != nil {
-		return nil, fmt.Errorf("Error opening GeoIP database (%s): %s", files, err)
-	}
-
-	if g.db == nil {
-		return nil, fmt.Errorf("Didn't open GeoIP database (%s)", files)
-	}
-
-	C.GeoIP_set_charset(g.db, C.GEOIP_CHARSET_UTF8)
-	return g, nil
-}
-
-// SetCustomDirectory sets the default location for the GeoIP .dat files used when
-// calling OpenType()
-func SetCustomDirectory(dir string) {
-	cdir := C.CString(dir)
-	// GeoIP doesn't copy the string, so don't free it when we're done here.
-	// defer C.free(unsafe.Pointer(cdir))
-	C.GeoIP_setup_custom_directory(cdir)
-}
-
-// OpenType opens a specified GeoIP database type in the default location with the
-// specified GeoIPOptions flag. Constants are defined for each database type
-// (for example GEOIP_COUNTRY_EDITION).
-func OpenTypeFlag(dbType int, flag int) (*GeoIP, error) {
-	g := &GeoIP{}
-	runtime.SetFinalizer(g, (*GeoIP).free)
-
-	var err error
-
-	g.db, err = C.GeoIP_open_type(C.int(dbType), C.int(flag))
-	if err != nil {
-		return nil, fmt.Errorf("Error opening GeoIP database (%d): %s", dbType, err)
-	}
-
-	if g.db == nil {
-		return nil, fmt.Errorf("Didn't open GeoIP database (%d)", dbType)
-	}
-
-	C.GeoIP_set_charset(g.db, C.GEOIP_CHARSET_UTF8)
-
-	return g, nil
-}
-
-// OpenType opens a specified GeoIP database type in the default location
-// and the 'memory cache' flag. Use OpenTypeFlag() to specify flag.
-func OpenType(dbType int) (*GeoIP, error) {
-	return OpenTypeFlag(dbType, GEOIP_MEMORY_CACHE)
-}
-
-// Takes an IPv4 address string and returns the organization name for that IP.
-// Requires the GeoIP organization database.
-func (gi *GeoIP) GetOrg(ip string) string {
-	name, _ := gi.GetName(ip)
-	return name
-}
-
-// Works on the ASN, Netspeed, Organization and probably other
-// databases, takes and IP string and returns a "name" and the
-// netmask.
-func (gi *GeoIP) GetName(ip string) (name string, netmask int) {
-	if gi.db == nil {
-		return
-	}
-
-	gi.mu.Lock()
-	defer gi.mu.Unlock()
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-	cname := C.GeoIP_name_by_addr(gi.db, cip)
-
-	if cname != nil {
-		name = C.GoString(cname)
-		defer C.free(unsafe.Pointer(cname))
-		netmask = int(C.GeoIP_last_netmask(gi.db))
-		return
-	}
-	return
-}
-
-type GeoIPRecord struct {
-	CountryCode   string
-	CountryCode3  string
-	CountryName   string
-	Region        string
-	City          string
-	PostalCode    string
-	Latitude      float32
-	Longitude     float32
-	MetroCode     int
-	AreaCode      int
-	CharSet       int
-	ContinentCode string
-}
-
-// Returns the "City Record" for an IP address. Requires the GeoCity(Lite)
-// database - http://www.maxmind.com/en/city
-func (gi *GeoIP) GetRecord(ip string) *GeoIPRecord {
-	if gi.db == nil {
-		return nil
-	}
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-
-	gi.mu.Lock()
-	record := C.GeoIP_record_by_addr(gi.db, cip)
-	gi.mu.Unlock()
-
-	if record == nil {
-		return nil
-	}
-	// defer C.free(unsafe.Pointer(record))
-	defer C.GeoIPRecord_delete(record)
-	rec := new(GeoIPRecord)
-	rec.CountryCode = C.GoString(record.country_code)
-	rec.CountryCode3 = C.GoString(record.country_code3)
-	rec.CountryName = C.GoString(record.country_name)
-	rec.Region = C.GoString(record.region)
-	rec.City = C.GoString(record.city)
-	rec.PostalCode = C.GoString(record.postal_code)
-	rec.Latitude = float32(record.latitude)
-	rec.Longitude = float32(record.longitude)
-	rec.CharSet = int(record.charset)
-	rec.ContinentCode = C.GoString(record.continent_code)
-
-	if gi.db.databaseType != C.GEOIP_CITY_EDITION_REV0 {
-		/* DIRTY HACK BELOW:
-		   The GeoIPRecord struct in GeoIPCity.h contains an int32 union of metro_code and dma_code.
-		   The union is unnamed, so cgo names it anon0 and assumes it's a 4-byte array.
-		*/
-		union_int := (*int32)(unsafe.Pointer(&record.anon0))
-		rec.MetroCode = int(*union_int)
-		rec.AreaCode = int(record.area_code)
-	}
-
-	return rec
-}
-
-// Returns the country code and region code for an IP address. Requires
-// the GeoIP Region database.
-func (gi *GeoIP) GetRegion(ip string) (string, string) {
-	if gi.db == nil {
-		return "", ""
-	}
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-
-	gi.mu.Lock()
-	region := C.GeoIP_region_by_addr(gi.db, cip)
-	gi.mu.Unlock()
-
-	if region == nil {
-		return "", ""
-	}
-
-	countryCode := C.GoString(&region.country_code[0])
-	regionCode := C.GoString(&region.region[0])
-	defer C.free(unsafe.Pointer(region))
-
-	return countryCode, regionCode
-}
-
-// Returns the region name given a country code and region code
-func GetRegionName(countryCode, regionCode string) string {
-
-	cc := C.CString(countryCode)
-	defer C.free(unsafe.Pointer(cc))
-
-	rc := C.CString(regionCode)
-	defer C.free(unsafe.Pointer(rc))
-
-	region := C.GeoIP_region_name_by_code(cc, rc)
-	if region == nil {
-		return ""
-	}
-
-	// it's a static string constant, don't free this
-	regionName := C.GoString(region)
-
-	return regionName
-}
-
-// Same as GetName() but for IPv6 addresses.
-func (gi *GeoIP) GetNameV6(ip string) (name string, netmask int) {
-	if gi.db == nil {
-		return
-	}
-
-	gi.mu.Lock()
-	defer gi.mu.Unlock()
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-	cname := C.GeoIP_name_by_addr_v6(gi.db, cip)
-
-	if cname != nil {
-		name = C.GoString(cname)
-		defer C.free(unsafe.Pointer(cname))
-		netmask = int(C.GeoIP_last_netmask(gi.db))
-		return
-	}
-	return
-}
-
-// Takes an IPv4 address string and returns the country code for that IP
-// and the netmask for that IP range.
-func (gi *GeoIP) GetCountry(ip string) (cc string, netmask int) {
-	if gi.db == nil {
-		return
-	}
-
-	gi.mu.Lock()
-	defer gi.mu.Unlock()
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-	ccountry := C.GeoIP_country_code_by_addr(gi.db, cip)
-
-	if ccountry != nil {
-		cc = C.GoString(ccountry)
-		netmask = int(C.GeoIP_last_netmask(gi.db))
-		return
-	}
-	return
-}
-
-// GetCountry_v6 works the same as GetCountry except for IPv6 addresses, be sure to
-// load a database with IPv6 data to get any results.
-func (gi *GeoIP) GetCountry_v6(ip string) (cc string, netmask int) {
-	if gi.db == nil {
-		return
-	}
-
-	gi.mu.Lock()
-	defer gi.mu.Unlock()
-
-	cip := C.CString(ip)
-	defer C.free(unsafe.Pointer(cip))
-	ccountry := C.GeoIP_country_code_by_addr_v6(gi.db, cip)
-	if ccountry != nil {
-		cc = C.GoString(ccountry)
-		netmask = int(C.GeoIP_last_netmask(gi.db))
-		return
-	}
-	return
-}

+ 0 - 124
vendor/github.com/abh/geoip/geoip_test.go

@@ -1,124 +0,0 @@
-package geoip
-
-import (
-	"fmt"
-	"testing"
-
-	. "gopkg.in/check.v1"
-)
-
-// Hook up gocheck into the gotest runner.
-func Test(t *testing.T) { TestingT(t) }
-
-type GeoIPSuite struct {
-}
-
-var _ = Suite(&GeoIPSuite{})
-
-func (s *GeoIPSuite) Testv4(c *C) {
-	gi, err := Open()
-	if gi == nil || err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-		return
-	}
-
-	c.Check(gi, NotNil)
-
-	country, netmask := gi.GetCountry("64.17.254.216")
-	c.Check(country, Equals, "US")
-	c.Check(netmask, Equals, 17)
-
-	country, netmask = gi.GetCountry("222.230.136.0")
-	c.Check(country, Equals, "JP")
-	c.Check(netmask, Equals, 16)
-}
-
-func (s *GeoIPSuite) TestOpenType(c *C) {
-
-	SetCustomDirectory("test-db")
-
-	// Open Country database
-	gi, err := OpenType(GEOIP_COUNTRY_EDITION)
-	c.Check(err, IsNil)
-	c.Assert(gi, NotNil)
-	country, _ := gi.GetCountry("81.2.69.160")
-	c.Check(country, Equals, "GB")
-}
-
-func (s *GeoIPSuite) Benchmark_GetCountry(c *C) {
-	gi, err := Open()
-	if gi == nil || err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-		return
-	}
-
-	for i := 0; i < c.N; i++ {
-		gi.GetCountry("207.171.7.51")
-	}
-}
-
-func (s *GeoIPSuite) Testv4Record(c *C) {
-	gi, err := Open("test-db/GeoIPCity.dat")
-	if gi == nil || err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-		return
-	}
-
-	c.Check(gi, NotNil)
-
-	record := gi.GetRecord("66.92.181.240")
-	c.Assert(record, NotNil)
-	c.Check(
-		*record,
-		Equals,
-		GeoIPRecord{
-			CountryCode:   "US",
-			CountryCode3:  "USA",
-			CountryName:   "United States",
-			Region:        "CA",
-			City:          "Fremont",
-			PostalCode:    "94538",
-			Latitude:      37.5079,
-			Longitude:     -121.96,
-			AreaCode:      510,
-			MetroCode:     807,
-			CharSet:       1,
-			ContinentCode: "NA",
-		},
-	)
-}
-
-func (s *GeoIPSuite) Benchmark_GetRecord(c *C) {
-
-	gi, err := Open("db/GeoLiteCity.dat")
-	if gi == nil || err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-		return
-	}
-
-	for i := 0; i < c.N; i++ {
-		record := gi.GetRecord("207.171.7.51")
-		if record == nil {
-			panic("")
-		}
-	}
-}
-
-func (s *GeoIPSuite) Testv4Region(c *C) {
-	gi, err := Open("test-db/GeoIPRegion.dat")
-	if gi == nil || err != nil {
-		fmt.Printf("Could not open GeoIP database: %s\n", err)
-		return
-	}
-
-	country, region := gi.GetRegion("64.17.254.223")
-	c.Check(country, Equals, "US")
-	c.Check(region, Equals, "CA")
-}
-
-func (s *GeoIPSuite) TestRegionName(c *C) {
-	regionName := GetRegionName("NL", "07")
-	c.Check(regionName, Equals, "Noord-Holland")
-	regionName = GetRegionName("CA", "ON")
-	c.Check(regionName, Equals, "Ontario")
-}

BIN
vendor/github.com/abh/geoip/test-db/GeoIP.dat


BIN
vendor/github.com/abh/geoip/test-db/GeoIPCity.dat


BIN
vendor/github.com/abh/geoip/test-db/GeoIPRegion.dat


+ 0 - 0
vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore → vendor/github.com/davecgh/go-spew/.gitignore


+ 14 - 0
vendor/github.com/davecgh/go-spew/.travis.yml

@@ -0,0 +1,14 @@
+language: go
+go:
+    - 1.5.4
+    - 1.6.3
+    - 1.7
+install:
+    - go get -v golang.org/x/tools/cmd/cover
+script:
+    - go test -v -tags=safe ./spew
+    - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
+after_success:
+    - go get -v github.com/mattn/goveralls
+    - export PATH=$PATH:$HOME/gopath/bin
+    - goveralls -coverprofile=profile.cov -service=travis-ci

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE → vendor/github.com/davecgh/go-spew/LICENSE

@@ -1,6 +1,6 @@
 ISC License
 
-Copyright (c) 2012-2013 Dave Collins <[email protected]>
+Copyright (c) 2012-2016 Dave Collins <[email protected]>
 
 Permission to use, copy, modify, and distribute this software for any
 purpose with or without fee is hereby granted, provided that the above

+ 205 - 0
vendor/github.com/davecgh/go-spew/README.md

@@ -0,0 +1,205 @@
+go-spew
+=======
+
+[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
+(https://travis-ci.org/davecgh/go-spew) [![ISC License]
+(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
+(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
+(https://coveralls.io/r/davecgh/go-spew?branch=master)
+
+
+Go-spew implements a deep pretty printer for Go data structures to aid in
+debugging.  A comprehensive suite of tests with 100% test coverage is provided
+to ensure proper functionality.  See `test_coverage.txt` for the gocov coverage
+report.  Go-spew is licensed under the liberal ISC license, so it may be used in
+open source or commercial projects.
+
+If you're interested in reading about how this package came to life and some
+of the challenges involved in providing a deep pretty printer, there is a blog
+post about it
+[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
+
+## Documentation
+
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
+(http://godoc.org/github.com/davecgh/go-spew/spew)
+
+Full `go doc` style documentation for the project can be viewed online without
+installing this package by using the excellent GoDoc site here:
+http://godoc.org/github.com/davecgh/go-spew/spew
+
+You can also view the documentation locally once the package is installed with
+the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
+http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
+
+## Installation
+
+```bash
+$ go get -u github.com/davecgh/go-spew/spew
+```
+
+## Quick Start
+
+Add this import line to the file you're working in:
+
+```Go
+import "github.com/davecgh/go-spew/spew"
+```
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+
+```Go
+spew.Dump(myVar1, myVar2, ...)
+spew.Fdump(someWriter, myVar1, myVar2, ...)
+str := spew.Sdump(myVar1, myVar2, ...)
+```
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
+compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
+and pointer addresses): 
+
+```Go
+spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+```
+
+## Debugging a Web Application Example
+
+Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
+
+```Go
+package main
+
+import (
+    "fmt"
+    "html"
+    "net/http"
+
+    "github.com/davecgh/go-spew/spew"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    w.Header().Set("Content-Type", "text/html")
+    fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
+    fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
+}
+
+func main() {
+    http.HandleFunc("/", handler)
+    http.ListenAndServe(":8080", nil)
+}
+```
+
+## Sample Dump Output
+
+```
+(main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+  flag: (main.Flag) flagTwo,
+  data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) {
+  (string) "one": (bool) true
+ }
+}
+([]uint8) {
+ 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+ 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+ 00000020  31 32                                             |12|
+}
+```
+
+## Sample Formatter Output
+
+Double pointer to a uint8:
+```
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+```
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+```
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+```
+
+## Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available via the
+spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+```
+* Indent
+	String to use for each indentation level for Dump functions.
+	It is a single space by default.  A popular alternative is "\t".
+
+* MaxDepth
+	Maximum number of levels to descend into nested data structures.
+	There is no limit by default.
+
+* DisableMethods
+	Disables invocation of error and Stringer interface methods.
+	Method invocation is enabled by default.
+
+* DisablePointerMethods
+	Disables invocation of error and Stringer interface methods on types
+	which only accept pointer receivers from non-pointer variables.  This option
+	relies on access to the unsafe package, so it will not have any effect when
+	running in environments without access to the unsafe package such as Google
+	App Engine or with the "safe" build tag specified.
+	Pointer method invocation is enabled by default.
+
+* DisablePointerAddresses
+	DisablePointerAddresses specifies whether to disable the printing of
+	pointer addresses. This is useful when diffing data structures in tests.
+
+* DisableCapacities
+	DisableCapacities specifies whether to disable the printing of capacities
+	for arrays, slices, maps and channels. This is useful when diffing data
+	structures in tests.
+
+* ContinueOnMethod
+	Enables recursion into types after invoking error and Stringer interface
+	methods. Recursion after method invocation is disabled by default.
+
+* SortKeys
+	Specifies map keys should be sorted before being printed. Use
+	this to have a more deterministic, diffable output.  Note that
+	only native types (bool, int, uint, floats, uintptr and string)
+	and types which implement error or Stringer interfaces are supported,
+	with other types sorted according to the reflect.Value.String() output
+	which guarantees display stability.  Natural map order is used by
+	default.
+
+* SpewKeys
+	SpewKeys specifies that, as a last resort attempt, map keys should be
+	spewed to strings and sorted by those strings.  This is only considered
+	if SortKeys is true.
+
+```
+
+## Unsafe Package Dependency
+
+This package relies on the unsafe package to perform some of the more advanced
+features, however it also supports a "limited" mode which allows it to work in
+environments where the unsafe package is not available.  By default, it will
+operate in this mode on Google App Engine and when compiled with GopherJS.  The
+"safe" build tag may also be specified to force the package to build without
+using the unsafe package.
+
+## License
+
+Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

+ 22 - 0
vendor/github.com/davecgh/go-spew/cov_report.sh

@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# This script uses gocov to generate a test coverage report.
+# The gocov tool my be obtained with the following command:
+#   go get github.com/axw/gocov/gocov
+#
+# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
+
+# Check for gocov.
+if ! type gocov >/dev/null 2>&1; then
+	echo >&2 "This script requires the gocov tool."
+	echo >&2 "You may obtain it with the following command:"
+	echo >&2 "go get github.com/axw/gocov/gocov"
+	exit 1
+fi
+
+# Only run the cgo tests if gcc is installed.
+if type gcc >/dev/null 2>&1; then
+	(cd spew && gocov test -tags testcgo | gocov report)
+else
+	(cd spew && gocov test | gocov report)
+fi

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go → vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <[email protected]>
+// Copyright (c) 2015-2016 Dave Collins <[email protected]>
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go → vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <[email protected]>
+// Copyright (c) 2015-2016 Dave Collins <[email protected]>
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go → vendor/github.com/davecgh/go-spew/spew/common.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 298 - 0
vendor/github.com/davecgh/go-spew/spew/common_test.go

@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+// custom type to test Stinger interface on non-pointer receiver.
+type stringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with non-pointer receivers.
+func (s stringer) String() string {
+	return "stringer " + string(s)
+}
+
+// custom type to test Stinger interface on pointer receiver.
+type pstringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with only pointer receivers.
+func (s *pstringer) String() string {
+	return "stringer " + string(*s)
+}
+
+// xref1 and xref2 are cross referencing structs for testing circular reference
+// detection.
+type xref1 struct {
+	ps2 *xref2
+}
+type xref2 struct {
+	ps1 *xref1
+}
+
+// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
+// reference for testing detection.
+type indirCir1 struct {
+	ps2 *indirCir2
+}
+type indirCir2 struct {
+	ps3 *indirCir3
+}
+type indirCir3 struct {
+	ps1 *indirCir1
+}
+
+// embed is used to test embedded structures.
+type embed struct {
+	a string
+}
+
+// embedwrap is used to test embedded structures.
+type embedwrap struct {
+	*embed
+	e *embed
+}
+
+// panicer is used to intentionally cause a panic for testing spew properly
+// handles them
+type panicer int
+
+func (p panicer) String() string {
+	panic("test panic")
+}
+
+// customError is used to test custom error interface invocation.
+type customError int
+
+func (e customError) Error() string {
+	return fmt.Sprintf("error: %d", int(e))
+}
+
+// stringizeWants converts a slice of wanted test output into a format suitable
+// for a test error message.
+func stringizeWants(wants []string) string {
+	s := ""
+	for i, want := range wants {
+		if i > 0 {
+			s += fmt.Sprintf("want%d: %s", i+1, want)
+		} else {
+			s += "want: " + want
+		}
+	}
+	return s
+}
+
+// testFailed returns whether or not a test failed by checking if the result
+// of the test is in the slice of wanted strings.
+func testFailed(result string, wants []string) bool {
+	for _, want := range wants {
+		if result == want {
+			return false
+		}
+	}
+	return true
+}
+
+type sortableStruct struct {
+	x int
+}
+
+func (ss sortableStruct) String() string {
+	return fmt.Sprintf("ss.%d", ss.x)
+}
+
+type unsortableStruct struct {
+	x int
+}
+
+type sortTestCase struct {
+	input    []reflect.Value
+	expected []reflect.Value
+}
+
+func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
+	getInterfaces := func(values []reflect.Value) []interface{} {
+		interfaces := []interface{}{}
+		for _, v := range values {
+			interfaces = append(interfaces, v.Interface())
+		}
+		return interfaces
+	}
+
+	for _, test := range tests {
+		spew.SortValues(test.input, cs)
+		// reflect.DeepEqual cannot really make sense of reflect.Value,
+		// probably because of all the pointer tricks. For instance,
+		// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
+		// instead.
+		input := getInterfaces(test.input)
+		expected := getInterfaces(test.expected)
+		if !reflect.DeepEqual(input, expected) {
+			t.Errorf("Sort mismatch:\n %v != %v", input, expected)
+		}
+	}
+}
+
+// TestSortValues ensures the sort functionality for relect.Value based sorting
+// works as intended.
+func TestSortValues(t *testing.T) {
+	v := reflect.ValueOf
+
+	a := v("a")
+	b := v("b")
+	c := v("c")
+	embedA := v(embed{"a"})
+	embedB := v(embed{"b"})
+	embedC := v(embed{"c"})
+	tests := []sortTestCase{
+		// No values.
+		{
+			[]reflect.Value{},
+			[]reflect.Value{},
+		},
+		// Bools.
+		{
+			[]reflect.Value{v(false), v(true), v(false)},
+			[]reflect.Value{v(false), v(false), v(true)},
+		},
+		// Ints.
+		{
+			[]reflect.Value{v(2), v(1), v(3)},
+			[]reflect.Value{v(1), v(2), v(3)},
+		},
+		// Uints.
+		{
+			[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
+			[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
+		},
+		// Floats.
+		{
+			[]reflect.Value{v(2.0), v(1.0), v(3.0)},
+			[]reflect.Value{v(1.0), v(2.0), v(3.0)},
+		},
+		// Strings.
+		{
+			[]reflect.Value{b, a, c},
+			[]reflect.Value{a, b, c},
+		},
+		// Array
+		{
+			[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
+			[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
+		},
+		// Uintptrs.
+		{
+			[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
+			[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
+		},
+		// SortableStructs.
+		{
+			// Note: not sorted - DisableMethods is set.
+			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+		},
+		// UnsortableStructs.
+		{
+			// Note: not sorted - SpewKeys is false.
+			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+		},
+		// Invalid.
+		{
+			[]reflect.Value{embedB, embedA, embedC},
+			[]reflect.Value{embedB, embedA, embedC},
+		},
+	}
+	cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
+	helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithMethods ensures the sort functionality for relect.Value
+// based sorting works as intended when using string methods.
+func TestSortValuesWithMethods(t *testing.T) {
+	v := reflect.ValueOf
+
+	a := v("a")
+	b := v("b")
+	c := v("c")
+	tests := []sortTestCase{
+		// Ints.
+		{
+			[]reflect.Value{v(2), v(1), v(3)},
+			[]reflect.Value{v(1), v(2), v(3)},
+		},
+		// Strings.
+		{
+			[]reflect.Value{b, a, c},
+			[]reflect.Value{a, b, c},
+		},
+		// SortableStructs.
+		{
+			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+			[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+		},
+		// UnsortableStructs.
+		{
+			// Note: not sorted - SpewKeys is false.
+			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+		},
+	}
+	cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
+	helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithSpew ensures the sort functionality for relect.Value
+// based sorting works as intended when using spew to stringify keys.
+func TestSortValuesWithSpew(t *testing.T) {
+	v := reflect.ValueOf
+
+	a := v("a")
+	b := v("b")
+	c := v("c")
+	tests := []sortTestCase{
+		// Ints.
+		{
+			[]reflect.Value{v(2), v(1), v(3)},
+			[]reflect.Value{v(1), v(2), v(3)},
+		},
+		// Strings.
+		{
+			[]reflect.Value{b, a, c},
+			[]reflect.Value{a, b, c},
+		},
+		// SortableStructs.
+		{
+			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+			[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+		},
+		// UnsortableStructs.
+		{
+			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+			[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
+		},
+	}
+	cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
+	helpTestSortValues(tests, &cs, t)
+}

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go → vendor/github.com/davecgh/go-spew/spew/config.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 10 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go → vendor/github.com/davecgh/go-spew/spew/doc.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -91,6 +91,15 @@ The following configuration options are available:
 		which only accept pointer receivers from non-pointer variables.
 		Pointer method invocation is enabled by default.
 
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
 	* ContinueOnMethod
 		Enables recursion into types after invoking error and Stringer interface
 		methods. Recursion after method invocation is disabled by default.

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go → vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1042 - 0
vendor/github.com/davecgh/go-spew/spew/dump_test.go

@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Array containing bytes
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Slice containing bytes
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+  exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+*/
+
+package spew_test
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+	"unsafe"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+// dumpTest is used to describe a test to be performed against the Dump method.
+type dumpTest struct {
+	in    interface{}
+	wants []string
+}
+
+// dumpTests houses all of the tests to be performed against the Dump method.
+var dumpTests = make([]dumpTest, 0)
+
+// addDumpTest is a helper method to append the passed input and desired result
+// to dumpTests
+func addDumpTest(in interface{}, wants ...string) {
+	test := dumpTest{in, wants}
+	dumpTests = append(dumpTests, test)
+}
+
+func addIntDumpTests() {
+	// Max int8.
+	v := int8(127)
+	nv := (*int8)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "int8"
+	vs := "127"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Max int16.
+	v2 := int16(32767)
+	nv2 := (*int16)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "int16"
+	v2s := "32767"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+	// Max int32.
+	v3 := int32(2147483647)
+	nv3 := (*int32)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "int32"
+	v3s := "2147483647"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+	// Max int64.
+	v4 := int64(9223372036854775807)
+	nv4 := (*int64)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "int64"
+	v4s := "9223372036854775807"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+	// Max int.
+	v5 := int(2147483647)
+	nv5 := (*int)(nil)
+	pv5 := &v5
+	v5Addr := fmt.Sprintf("%p", pv5)
+	pv5Addr := fmt.Sprintf("%p", &pv5)
+	v5t := "int"
+	v5s := "2147483647"
+	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addUintDumpTests() {
+	// Max uint8.
+	v := uint8(255)
+	nv := (*uint8)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "uint8"
+	vs := "255"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Max uint16.
+	v2 := uint16(65535)
+	nv2 := (*uint16)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uint16"
+	v2s := "65535"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+	// Max uint32.
+	v3 := uint32(4294967295)
+	nv3 := (*uint32)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "uint32"
+	v3s := "4294967295"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+	// Max uint64.
+	v4 := uint64(18446744073709551615)
+	nv4 := (*uint64)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "uint64"
+	v4s := "18446744073709551615"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+	// Max uint.
+	v5 := uint(4294967295)
+	nv5 := (*uint)(nil)
+	pv5 := &v5
+	v5Addr := fmt.Sprintf("%p", pv5)
+	pv5Addr := fmt.Sprintf("%p", &pv5)
+	v5t := "uint"
+	v5s := "4294967295"
+	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addBoolDumpTests() {
+	// Boolean true.
+	v := bool(true)
+	nv := (*bool)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "bool"
+	vs := "true"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Boolean false.
+	v2 := bool(false)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "bool"
+	v2s := "false"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFloatDumpTests() {
+	// Standard float32.
+	v := float32(3.1415)
+	nv := (*float32)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "float32"
+	vs := "3.1415"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Standard float64.
+	v2 := float64(3.1415926)
+	nv2 := (*float64)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "float64"
+	v2s := "3.1415926"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addComplexDumpTests() {
+	// Standard complex64.
+	v := complex(float32(6), -2)
+	nv := (*complex64)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "complex64"
+	vs := "(6-2i)"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Standard complex128.
+	v2 := complex(float64(-6), 2)
+	nv2 := (*complex128)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "complex128"
+	v2s := "(-6+2i)"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addArrayDumpTests() {
+	// Array containing standard ints.
+	v := [3]int{1, 2, 3}
+	vLen := fmt.Sprintf("%d", len(v))
+	vCap := fmt.Sprintf("%d", cap(v))
+	nv := (*[3]int)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "int"
+	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
+		vt + ") 2,\n (" + vt + ") 3\n}"
+	addDumpTest(v, "([3]"+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
+
+	// Array containing type with custom formatter on pointer receiver only.
+	v2i0 := pstringer("1")
+	v2i1 := pstringer("2")
+	v2i2 := pstringer("3")
+	v2 := [3]pstringer{v2i0, v2i1, v2i2}
+	v2i0Len := fmt.Sprintf("%d", len(v2i0))
+	v2i1Len := fmt.Sprintf("%d", len(v2i1))
+	v2i2Len := fmt.Sprintf("%d", len(v2i2))
+	v2Len := fmt.Sprintf("%d", len(v2))
+	v2Cap := fmt.Sprintf("%d", cap(v2))
+	nv2 := (*[3]pstringer)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.pstringer"
+	v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+		") (len=" + v2i0Len + ") stringer 1,\n (" + v2t +
+		") (len=" + v2i1Len + ") stringer 2,\n (" + v2t +
+		") (len=" + v2i2Len + ") " + "stringer 3\n}"
+	v2s := v2sp
+	if spew.UnsafeDisabled {
+		v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+			") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" +
+			v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len +
+			") " + "\"3\"\n}"
+	}
+	addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n")
+	addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n")
+	addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
+
+	// Array containing interfaces.
+	v3i0 := "one"
+	v3 := [3]interface{}{v3i0, int(2), uint(3)}
+	v3i0Len := fmt.Sprintf("%d", len(v3i0))
+	v3Len := fmt.Sprintf("%d", len(v3))
+	v3Cap := fmt.Sprintf("%d", cap(v3))
+	nv3 := (*[3]interface{})(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "[3]interface {}"
+	v3t2 := "string"
+	v3t3 := "int"
+	v3t4 := "uint"
+	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+		v3t4 + ") 3\n}"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+	// Array containing bytes.
+	v4 := [34]byte{
+		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+		0x31, 0x32,
+	}
+	v4Len := fmt.Sprintf("%d", len(v4))
+	v4Cap := fmt.Sprintf("%d", cap(v4))
+	nv4 := (*[34]byte)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "[34]uint8"
+	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
+		"  |............... |\n" +
+		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
+		"  |!\"#$%&'()*+,-./0|\n" +
+		" 00000020  31 32                                           " +
+		"  |12|\n}"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+}
+
+func addSliceDumpTests() {
+	// Slice containing standard float32 values.
+	v := []float32{3.14, 6.28, 12.56}
+	vLen := fmt.Sprintf("%d", len(v))
+	vCap := fmt.Sprintf("%d", cap(v))
+	nv := (*[]float32)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "float32"
+	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
+		vt + ") 6.28,\n (" + vt + ") 12.56\n}"
+	addDumpTest(v, "([]"+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
+
+	// Slice containing type with custom formatter on pointer receiver only.
+	v2i0 := pstringer("1")
+	v2i1 := pstringer("2")
+	v2i2 := pstringer("3")
+	v2 := []pstringer{v2i0, v2i1, v2i2}
+	v2i0Len := fmt.Sprintf("%d", len(v2i0))
+	v2i1Len := fmt.Sprintf("%d", len(v2i1))
+	v2i2Len := fmt.Sprintf("%d", len(v2i2))
+	v2Len := fmt.Sprintf("%d", len(v2))
+	v2Cap := fmt.Sprintf("%d", cap(v2))
+	nv2 := (*[]pstringer)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.pstringer"
+	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
+		v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
+		") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
+		"stringer 3\n}"
+	addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
+
+	// Slice containing interfaces.
+	v3i0 := "one"
+	v3 := []interface{}{v3i0, int(2), uint(3), nil}
+	v3i0Len := fmt.Sprintf("%d", len(v3i0))
+	v3Len := fmt.Sprintf("%d", len(v3))
+	v3Cap := fmt.Sprintf("%d", cap(v3))
+	nv3 := (*[]interface{})(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "[]interface {}"
+	v3t2 := "string"
+	v3t3 := "int"
+	v3t4 := "uint"
+	v3t5 := "interface {}"
+	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+		v3t4 + ") 3,\n (" + v3t5 + ") <nil>\n}"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+	// Slice containing bytes.
+	v4 := []byte{
+		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+		0x31, 0x32,
+	}
+	v4Len := fmt.Sprintf("%d", len(v4))
+	v4Cap := fmt.Sprintf("%d", cap(v4))
+	nv4 := (*[]byte)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "[]uint8"
+	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
+		"  |............... |\n" +
+		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
+		"  |!\"#$%&'()*+,-./0|\n" +
+		" 00000020  31 32                                           " +
+		"  |12|\n}"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+	// Nil slice.
+	v5 := []int(nil)
+	nv5 := (*[]int)(nil)
+	pv5 := &v5
+	v5Addr := fmt.Sprintf("%p", pv5)
+	pv5Addr := fmt.Sprintf("%p", &pv5)
+	v5t := "[]int"
+	v5s := "<nil>"
+	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addStringDumpTests() {
+	// Standard string.
+	v := "test"
+	vLen := fmt.Sprintf("%d", len(v))
+	nv := (*string)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "string"
+	vs := "(len=" + vLen + ") \"test\""
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addInterfaceDumpTests() {
+	// Nil interface.
+	var v interface{}
+	nv := (*interface{})(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "interface {}"
+	vs := "<nil>"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Sub-interface.
+	v2 := interface{}(uint16(65535))
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uint16"
+	v2s := "65535"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addMapDumpTests() {
+	// Map with string keys and int vals.
+	k := "one"
+	kk := "two"
+	m := map[string]int{k: 1, kk: 2}
+	klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
+	kkLen := fmt.Sprintf("%d", len(kk))
+	mLen := fmt.Sprintf("%d", len(m))
+	nilMap := map[string]int(nil)
+	nm := (*map[string]int)(nil)
+	pm := &m
+	mAddr := fmt.Sprintf("%p", pm)
+	pmAddr := fmt.Sprintf("%p", &pm)
+	mt := "map[string]int"
+	mt1 := "string"
+	mt2 := "int"
+	ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
+		"\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
+		") \"two\": (" + mt2 + ") 2\n}"
+	ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
+		"\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
+		") \"one\": (" + mt2 + ") 1\n}"
+	addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
+	addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
+		"(*"+mt+")("+mAddr+")("+ms2+")\n")
+	addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
+		"(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
+	addDumpTest(nm, "(*"+mt+")(<nil>)\n")
+	addDumpTest(nilMap, "("+mt+") <nil>\n")
+
+	// Map with custom formatter type on pointer receiver only keys and vals.
+	k2 := pstringer("one")
+	v2 := pstringer("1")
+	m2 := map[pstringer]pstringer{k2: v2}
+	k2Len := fmt.Sprintf("%d", len(k2))
+	v2Len := fmt.Sprintf("%d", len(v2))
+	m2Len := fmt.Sprintf("%d", len(m2))
+	nilMap2 := map[pstringer]pstringer(nil)
+	nm2 := (*map[pstringer]pstringer)(nil)
+	pm2 := &m2
+	m2Addr := fmt.Sprintf("%p", pm2)
+	pm2Addr := fmt.Sprintf("%p", &pm2)
+	m2t := "map[spew_test.pstringer]spew_test.pstringer"
+	m2t1 := "spew_test.pstringer"
+	m2t2 := "spew_test.pstringer"
+	m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
+		"stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
+	if spew.UnsafeDisabled {
+		m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len +
+			") " + "\"one\": (" + m2t2 + ") (len=" + v2Len +
+			") \"1\"\n}"
+	}
+	addDumpTest(m2, "("+m2t+") "+m2s+"\n")
+	addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
+	addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
+	addDumpTest(nm2, "(*"+m2t+")(<nil>)\n")
+	addDumpTest(nilMap2, "("+m2t+") <nil>\n")
+
+	// Map with interface keys and values.
+	k3 := "one"
+	k3Len := fmt.Sprintf("%d", len(k3))
+	m3 := map[interface{}]interface{}{k3: 1}
+	m3Len := fmt.Sprintf("%d", len(m3))
+	nilMap3 := map[interface{}]interface{}(nil)
+	nm3 := (*map[interface{}]interface{})(nil)
+	pm3 := &m3
+	m3Addr := fmt.Sprintf("%p", pm3)
+	pm3Addr := fmt.Sprintf("%p", &pm3)
+	m3t := "map[interface {}]interface {}"
+	m3t1 := "string"
+	m3t2 := "int"
+	m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
+		"\"one\": (" + m3t2 + ") 1\n}"
+	addDumpTest(m3, "("+m3t+") "+m3s+"\n")
+	addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
+	addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
+	addDumpTest(nm3, "(*"+m3t+")(<nil>)\n")
+	addDumpTest(nilMap3, "("+m3t+") <nil>\n")
+
+	// Map with nil interface value.
+	k4 := "nil"
+	k4Len := fmt.Sprintf("%d", len(k4))
+	m4 := map[string]interface{}{k4: nil}
+	m4Len := fmt.Sprintf("%d", len(m4))
+	nilMap4 := map[string]interface{}(nil)
+	nm4 := (*map[string]interface{})(nil)
+	pm4 := &m4
+	m4Addr := fmt.Sprintf("%p", pm4)
+	pm4Addr := fmt.Sprintf("%p", &pm4)
+	m4t := "map[string]interface {}"
+	m4t1 := "string"
+	m4t2 := "interface {}"
+	m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
+		" \"nil\": (" + m4t2 + ") <nil>\n}"
+	addDumpTest(m4, "("+m4t+") "+m4s+"\n")
+	addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
+	addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
+	addDumpTest(nm4, "(*"+m4t+")(<nil>)\n")
+	addDumpTest(nilMap4, "("+m4t+") <nil>\n")
+}
+
+func addStructDumpTests() {
+	// Struct with primitives.
+	type s1 struct {
+		a int8
+		b uint8
+	}
+	v := s1{127, 255}
+	nv := (*s1)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.s1"
+	vt2 := "int8"
+	vt3 := "uint8"
+	vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Struct that contains another struct.
+	type s2 struct {
+		s1 s1
+		b  bool
+	}
+	v2 := s2{s1{127, 255}, true}
+	nv2 := (*s2)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.s2"
+	v2t2 := "spew_test.s1"
+	v2t3 := "int8"
+	v2t4 := "uint8"
+	v2t5 := "bool"
+	v2s := "{\n s1: (" + v2t2 + ") {\n  a: (" + v2t3 + ") 127,\n  b: (" +
+		v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+	// Struct that contains custom type with Stringer pointer interface via both
+	// exported and unexported fields.
+	type s3 struct {
+		s pstringer
+		S pstringer
+	}
+	v3 := s3{"test", "test2"}
+	nv3 := (*s3)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "spew_test.s3"
+	v3t2 := "spew_test.pstringer"
+	v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
+		") (len=5) stringer test2\n}"
+	v3sp := v3s
+	if spew.UnsafeDisabled {
+		v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+			v3t2 + ") (len=5) \"test2\"\n}"
+		v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+			v3t2 + ") (len=5) stringer test2\n}"
+	}
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+	// Struct that contains embedded struct and field to same struct.
+	e := embed{"embedstr"}
+	eLen := fmt.Sprintf("%d", len("embedstr"))
+	v4 := embedwrap{embed: &e, e: &e}
+	nv4 := (*embedwrap)(nil)
+	pv4 := &v4
+	eAddr := fmt.Sprintf("%p", &e)
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "spew_test.embedwrap"
+	v4t2 := "spew_test.embed"
+	v4t3 := "string"
+	v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n  a: (" + v4t3 +
+		") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
+		")(" + eAddr + ")({\n  a: (" + v4t3 + ") (len=" + eLen + ")" +
+		" \"embedstr\"\n })\n}"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+}
+
+func addUintptrDumpTests() {
+	// Null pointer.
+	v := uintptr(0)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "uintptr"
+	vs := "<nil>"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+
+	// Address of real variable.
+	i := 1
+	v2 := uintptr(unsafe.Pointer(&i))
+	nv2 := (*uintptr)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uintptr"
+	v2s := fmt.Sprintf("%p", &i)
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addUnsafePointerDumpTests() {
+	// Null pointer.
+	v := unsafe.Pointer(uintptr(0))
+	nv := (*unsafe.Pointer)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "unsafe.Pointer"
+	vs := "<nil>"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Address of real variable.
+	i := 1
+	v2 := unsafe.Pointer(&i)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "unsafe.Pointer"
+	v2s := fmt.Sprintf("%p", &i)
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addChanDumpTests() {
+	// Nil channel.
+	var v chan int
+	pv := &v
+	nv := (*chan int)(nil)
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "chan int"
+	vs := "<nil>"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Real channel.
+	v2 := make(chan int)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "chan int"
+	v2s := fmt.Sprintf("%p", v2)
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFuncDumpTests() {
+	// Function with no params and no returns.
+	v := addIntDumpTests
+	nv := (*func())(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "func()"
+	vs := fmt.Sprintf("%p", v)
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+	// Function with param and no returns.
+	v2 := TestDump
+	nv2 := (*func(*testing.T))(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "func(*testing.T)"
+	v2s := fmt.Sprintf("%p", v2)
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+	// Function with multiple params and multiple returns.
+	var v3 = func(i int, s string) (b bool, err error) {
+		return true, nil
+	}
+	nv3 := (*func(int, string) (bool, error))(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "func(int, string) (bool, error)"
+	v3s := fmt.Sprintf("%p", v3)
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+}
+
+func addCircularDumpTests() {
+	// Struct that is circular through self referencing.
+	type circular struct {
+		c *circular
+	}
+	v := circular{nil}
+	v.c = &v
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.circular"
+	vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n  c: (*" + vt + ")(" +
+		vAddr + ")(<already shown>)\n })\n}"
+	vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
+
+	// Structs that are circular through cross referencing.
+	v2 := xref1{nil}
+	ts2 := xref2{&v2}
+	v2.ps2 = &ts2
+	pv2 := &v2
+	ts2Addr := fmt.Sprintf("%p", &ts2)
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.xref1"
+	v2t2 := "spew_test.xref2"
+	v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
+		")(" + v2Addr + ")({\n   ps2: (*" + v2t2 + ")(" + ts2Addr +
+		")(<already shown>)\n  })\n })\n}"
+	v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
+		")(" + v2Addr + ")(<already shown>)\n })\n}"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
+	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
+
+	// Structs that are indirectly circular.
+	v3 := indirCir1{nil}
+	tic2 := indirCir2{nil}
+	tic3 := indirCir3{&v3}
+	tic2.ps3 = &tic3
+	v3.ps2 = &tic2
+	pv3 := &v3
+	tic2Addr := fmt.Sprintf("%p", &tic2)
+	tic3Addr := fmt.Sprintf("%p", &tic3)
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "spew_test.indirCir1"
+	v3t2 := "spew_test.indirCir2"
+	v3t3 := "spew_test.indirCir3"
+	v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
+		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
+		")({\n    ps2: (*" + v3t2 + ")(" + tic2Addr +
+		")(<already shown>)\n   })\n  })\n })\n}"
+	v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
+		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
+		")(<already shown>)\n  })\n })\n}"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
+	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
+}
+
+func addPanicDumpTests() {
+	// Type that panics in its Stringer interface.
+	v := panicer(127)
+	nv := (*panicer)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.panicer"
+	vs := "(PANIC=test panic)127"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addErrorDumpTests() {
+	// Type that has a custom Error interface.
+	v := customError(127)
+	nv := (*customError)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.customError"
+	vs := "error: 127"
+	addDumpTest(v, "("+vt+") "+vs+"\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+// TestDump executes all of the tests described by dumpTests.
+func TestDump(t *testing.T) {
+	// Setup tests.
+	addIntDumpTests()
+	addUintDumpTests()
+	addBoolDumpTests()
+	addFloatDumpTests()
+	addComplexDumpTests()
+	addArrayDumpTests()
+	addSliceDumpTests()
+	addStringDumpTests()
+	addInterfaceDumpTests()
+	addMapDumpTests()
+	addStructDumpTests()
+	addUintptrDumpTests()
+	addUnsafePointerDumpTests()
+	addChanDumpTests()
+	addFuncDumpTests()
+	addCircularDumpTests()
+	addPanicDumpTests()
+	addErrorDumpTests()
+	addCgoDumpTests()
+
+	t.Logf("Running %d tests", len(dumpTests))
+	for i, test := range dumpTests {
+		buf := new(bytes.Buffer)
+		spew.Fdump(buf, test.in)
+		s := buf.String()
+		if testFailed(s, test.wants) {
+			t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
+			continue
+		}
+	}
+}
+
+func TestDumpSortedKeys(t *testing.T) {
+	cfg := spew.ConfigState{SortKeys: true}
+	s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
+	expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " +
+		"\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " +
+		"(len=1) \"3\"\n" +
+		"}\n"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2})
+	expected = "(map[spew_test.stringer]int) (len=3) {\n" +
+		"(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" +
+		"(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" +
+		"(spew_test.stringer) (len=1) stringer 3: (int) 3\n" +
+		"}\n"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
+	expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+		"(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" +
+		"(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" +
+		"(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" +
+		"}\n"
+	if spew.UnsafeDisabled {
+		expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+			"(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" +
+			"(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" +
+			"(spew_test.pstringer) (len=1) \"3\": (int) 3\n" +
+			"}\n"
+	}
+	if s != expected {
+		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
+	expected = "(map[spew_test.customError]int) (len=3) {\n" +
+		"(spew_test.customError) error: 1: (int) 1,\n" +
+		"(spew_test.customError) error: 2: (int) 2,\n" +
+		"(spew_test.customError) error: 3: (int) 3\n" +
+		"}\n"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
+	}
+
+}

+ 99 - 0
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go

@@ -0,0 +1,99 @@
+// Copyright (c) 2013-2016 Dave Collins <[email protected]>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when both cgo is supported and "-tags testcgo" is added to the go test
+// command line.  This means the cgo tests are only added (and hence run) when
+// specifially requested.  This configuration is used because spew itself
+// does not require cgo to run even though it does handle certain cgo types
+// specially.  Rather than forcing all clients to require cgo and an external
+// C compiler just to run the tests, this scheme makes them optional.
+// +build cgo,testcgo
+
+package spew_test
+
+import (
+	"fmt"
+
+	"github.com/davecgh/go-spew/spew/testdata"
+)
+
+func addCgoDumpTests() {
+	// C char pointer.
+	v := testdata.GetCgoCharPointer()
+	nv := testdata.GetCgoNullCharPointer()
+	pv := &v
+	vcAddr := fmt.Sprintf("%p", v)
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "*testdata._Ctype_char"
+	vs := "116"
+	addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
+	addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
+	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
+	addDumpTest(nv, "("+vt+")(<nil>)\n")
+
+	// C char array.
+	v2, v2l, v2c := testdata.GetCgoCharArray()
+	v2Len := fmt.Sprintf("%d", v2l)
+	v2Cap := fmt.Sprintf("%d", v2c)
+	v2t := "[6]testdata._Ctype_char"
+	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
+		"{\n 00000000  74 65 73 74 32 00                               " +
+		"  |test2.|\n}"
+	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+
+	// C unsigned char array.
+	v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
+	v3Len := fmt.Sprintf("%d", v3l)
+	v3Cap := fmt.Sprintf("%d", v3c)
+	v3t := "[6]testdata._Ctype_unsignedchar"
+	v3t2 := "[6]testdata._Ctype_uchar"
+	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
+		"{\n 00000000  74 65 73 74 33 00                               " +
+		"  |test3.|\n}"
+	addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
+
+	// C signed char array.
+	v4, v4l, v4c := testdata.GetCgoSignedCharArray()
+	v4Len := fmt.Sprintf("%d", v4l)
+	v4Cap := fmt.Sprintf("%d", v4c)
+	v4t := "[6]testdata._Ctype_schar"
+	v4t2 := "testdata._Ctype_schar"
+	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+		"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
+		") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
+		") 0\n}"
+	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+
+	// C uint8_t array.
+	v5, v5l, v5c := testdata.GetCgoUint8tArray()
+	v5Len := fmt.Sprintf("%d", v5l)
+	v5Cap := fmt.Sprintf("%d", v5c)
+	v5t := "[6]testdata._Ctype_uint8_t"
+	v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
+		"{\n 00000000  74 65 73 74 35 00                               " +
+		"  |test5.|\n}"
+	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+
+	// C typedefed unsigned char array.
+	v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
+	v6Len := fmt.Sprintf("%d", v6l)
+	v6Cap := fmt.Sprintf("%d", v6c)
+	v6t := "[6]testdata._Ctype_custom_uchar_t"
+	v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
+		"{\n 00000000  74 65 73 74 36 00                               " +
+		"  |test6.|\n}"
+	addDumpTest(v6, "("+v6t+") "+v6s+"\n")
+}

+ 26 - 0
vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go

@@ -0,0 +1,26 @@
+// Copyright (c) 2013 Dave Collins <[email protected]>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when either cgo is not supported or "-tags testcgo" is not added to the go
+// test command line.  This file intentionally does not setup any cgo tests in
+// this scenario.
+// +build !cgo !testcgo
+
+package spew_test
+
+func addCgoDumpTests() {
+	// Don't add any tests for cgo since this file is only compiled when
+	// there should not be any cgo tests.
+}

+ 226 - 0
vendor/github.com/davecgh/go-spew/spew/example_test.go

@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+	"fmt"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+type Flag int
+
+const (
+	flagOne Flag = iota
+	flagTwo
+)
+
+var flagStrings = map[Flag]string{
+	flagOne: "flagOne",
+	flagTwo: "flagTwo",
+}
+
+func (f Flag) String() string {
+	if s, ok := flagStrings[f]; ok {
+		return s
+	}
+	return fmt.Sprintf("Unknown flag (%d)", int(f))
+}
+
+type Bar struct {
+	data uintptr
+}
+
+type Foo struct {
+	unexportedField Bar
+	ExportedField   map[interface{}]interface{}
+}
+
+// This example demonstrates how to use Dump to dump variables to stdout.
+func ExampleDump() {
+	// The following package level declarations are assumed for this example:
+	/*
+		type Flag int
+
+		const (
+			flagOne Flag = iota
+			flagTwo
+		)
+
+		var flagStrings = map[Flag]string{
+			flagOne: "flagOne",
+			flagTwo: "flagTwo",
+		}
+
+		func (f Flag) String() string {
+			if s, ok := flagStrings[f]; ok {
+				return s
+			}
+			return fmt.Sprintf("Unknown flag (%d)", int(f))
+		}
+
+		type Bar struct {
+			data uintptr
+		}
+
+		type Foo struct {
+			unexportedField Bar
+			ExportedField   map[interface{}]interface{}
+		}
+	*/
+
+	// Setup some sample data structures for the example.
+	bar := Bar{uintptr(0)}
+	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+	f := Flag(5)
+	b := []byte{
+		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+		0x31, 0x32,
+	}
+
+	// Dump!
+	spew.Dump(s1, f, b)
+
+	// Output:
+	// (spew_test.Foo) {
+	//  unexportedField: (spew_test.Bar) {
+	//   data: (uintptr) <nil>
+	//  },
+	//  ExportedField: (map[interface {}]interface {}) (len=1) {
+	//   (string) (len=3) "one": (bool) true
+	//  }
+	// }
+	// (spew_test.Flag) Unknown flag (5)
+	// ([]uint8) (len=34 cap=34) {
+	//  00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	//  00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	//  00000020  31 32                                             |12|
+	// }
+	//
+}
+
+// This example demonstrates how to use Printf to display a variable with a
+// format string and inline formatting.
+func ExamplePrintf() {
+	// Create a double pointer to a uint 8.
+	ui8 := uint8(5)
+	pui8 := &ui8
+	ppui8 := &pui8
+
+	// Create a circular data type.
+	type circular struct {
+		ui8 uint8
+		c   *circular
+	}
+	c := circular{ui8: 1}
+	c.c = &c
+
+	// Print!
+	spew.Printf("ppui8: %v\n", ppui8)
+	spew.Printf("circular: %v\n", c)
+
+	// Output:
+	// ppui8: <**>5
+	// circular: {1 <*>{1 <*><shown>}}
+}
+
+// This example demonstrates how to use a ConfigState.
+func ExampleConfigState() {
+	// Modify the indent level of the ConfigState only.  The global
+	// configuration is not modified.
+	scs := spew.ConfigState{Indent: "\t"}
+
+	// Output using the ConfigState instance.
+	v := map[string]int{"one": 1}
+	scs.Printf("v: %v\n", v)
+	scs.Dump(v)
+
+	// Output:
+	// v: map[one:1]
+	// (map[string]int) (len=1) {
+	// 	(string) (len=3) "one": (int) 1
+	// }
+}
+
+// This example demonstrates how to use ConfigState.Dump to dump variables to
+// stdout
+func ExampleConfigState_Dump() {
+	// See the top-level Dump example for details on the types used in this
+	// example.
+
+	// Create two ConfigState instances with different indentation.
+	scs := spew.ConfigState{Indent: "\t"}
+	scs2 := spew.ConfigState{Indent: " "}
+
+	// Setup some sample data structures for the example.
+	bar := Bar{uintptr(0)}
+	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+
+	// Dump using the ConfigState instances.
+	scs.Dump(s1)
+	scs2.Dump(s1)
+
+	// Output:
+	// (spew_test.Foo) {
+	// 	unexportedField: (spew_test.Bar) {
+	// 		data: (uintptr) <nil>
+	// 	},
+	// 	ExportedField: (map[interface {}]interface {}) (len=1) {
+	//		(string) (len=3) "one": (bool) true
+	// 	}
+	// }
+	// (spew_test.Foo) {
+	//  unexportedField: (spew_test.Bar) {
+	//   data: (uintptr) <nil>
+	//  },
+	//  ExportedField: (map[interface {}]interface {}) (len=1) {
+	//   (string) (len=3) "one": (bool) true
+	//  }
+	// }
+	//
+}
+
+// This example demonstrates how to use ConfigState.Printf to display a variable
+// with a format string and inline formatting.
+func ExampleConfigState_Printf() {
+	// See the top-level Dump example for details on the types used in this
+	// example.
+
+	// Create two ConfigState instances and modify the method handling of the
+	// first ConfigState only.
+	scs := spew.NewDefaultConfig()
+	scs2 := spew.NewDefaultConfig()
+	scs.DisableMethods = true
+
+	// Alternatively
+	// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
+	// scs2 := spew.ConfigState{Indent: " "}
+
+	// This is of type Flag which implements a Stringer and has raw value 1.
+	f := flagTwo
+
+	// Dump using the ConfigState instances.
+	scs.Printf("f: %v\n", f)
+	scs2.Printf("f: %v\n", f)
+
+	// Output:
+	// f: 1
+	// f: flagTwo
+}

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go → vendor/github.com/davecgh/go-spew/spew/format.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1558 - 0
vendor/github.com/davecgh/go-spew/spew/format_test.go

@@ -0,0 +1,1558 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+  exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+- Type that has a custom Error interface
+- %x passthrough with uint
+- %#x passthrough with uint
+- %f passthrough with precision
+- %f passthrough with width and precision
+- %d passthrough with width
+- %q passthrough with string
+*/
+
+package spew_test
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+	"unsafe"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+// formatterTest is used to describe a test to be performed against NewFormatter.
+type formatterTest struct {
+	format string
+	in     interface{}
+	wants  []string
+}
+
+// formatterTests houses all of the tests to be performed against NewFormatter.
+var formatterTests = make([]formatterTest, 0)
+
+// addFormatterTest is a helper method to append the passed input and desired
+// result to formatterTests.
+func addFormatterTest(format string, in interface{}, wants ...string) {
+	test := formatterTest{format, in, wants}
+	formatterTests = append(formatterTests, test)
+}
+
+func addIntFormatterTests() {
+	// Max int8.
+	v := int8(127)
+	nv := (*int8)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "int8"
+	vs := "127"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Max int16.
+	v2 := int16(32767)
+	nv2 := (*int16)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "int16"
+	v2s := "32767"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Max int32.
+	v3 := int32(2147483647)
+	nv3 := (*int32)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "int32"
+	v3s := "2147483647"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+
+	// Max int64.
+	v4 := int64(9223372036854775807)
+	nv4 := (*int64)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "int64"
+	v4s := "9223372036854775807"
+	addFormatterTest("%v", v4, v4s)
+	addFormatterTest("%v", pv4, "<*>"+v4s)
+	addFormatterTest("%v", &pv4, "<**>"+v4s)
+	addFormatterTest("%v", nv4, "<nil>")
+	addFormatterTest("%+v", v4, v4s)
+	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+
+	// Max int.
+	v5 := int(2147483647)
+	nv5 := (*int)(nil)
+	pv5 := &v5
+	v5Addr := fmt.Sprintf("%p", pv5)
+	pv5Addr := fmt.Sprintf("%p", &pv5)
+	v5t := "int"
+	v5s := "2147483647"
+	addFormatterTest("%v", v5, v5s)
+	addFormatterTest("%v", pv5, "<*>"+v5s)
+	addFormatterTest("%v", &pv5, "<**>"+v5s)
+	addFormatterTest("%v", nv5, "<nil>")
+	addFormatterTest("%+v", v5, v5s)
+	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+	addFormatterTest("%+v", nv5, "<nil>")
+	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+	addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"<nil>")
+}
+
+func addUintFormatterTests() {
+	// Max uint8.
+	v := uint8(255)
+	nv := (*uint8)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "uint8"
+	vs := "255"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Max uint16.
+	v2 := uint16(65535)
+	nv2 := (*uint16)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uint16"
+	v2s := "65535"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Max uint32.
+	v3 := uint32(4294967295)
+	nv3 := (*uint32)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "uint32"
+	v3s := "4294967295"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+
+	// Max uint64.
+	v4 := uint64(18446744073709551615)
+	nv4 := (*uint64)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "uint64"
+	v4s := "18446744073709551615"
+	addFormatterTest("%v", v4, v4s)
+	addFormatterTest("%v", pv4, "<*>"+v4s)
+	addFormatterTest("%v", &pv4, "<**>"+v4s)
+	addFormatterTest("%v", nv4, "<nil>")
+	addFormatterTest("%+v", v4, v4s)
+	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+
+	// Max uint.
+	v5 := uint(4294967295)
+	nv5 := (*uint)(nil)
+	pv5 := &v5
+	v5Addr := fmt.Sprintf("%p", pv5)
+	pv5Addr := fmt.Sprintf("%p", &pv5)
+	v5t := "uint"
+	v5s := "4294967295"
+	addFormatterTest("%v", v5, v5s)
+	addFormatterTest("%v", pv5, "<*>"+v5s)
+	addFormatterTest("%v", &pv5, "<**>"+v5s)
+	addFormatterTest("%v", nv5, "<nil>")
+	addFormatterTest("%+v", v5, v5s)
+	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+	addFormatterTest("%+v", nv5, "<nil>")
+	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+}
+
+func addBoolFormatterTests() {
+	// Boolean true.
+	v := bool(true)
+	nv := (*bool)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "bool"
+	vs := "true"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Boolean false.
+	v2 := bool(false)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "bool"
+	v2s := "false"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addFloatFormatterTests() {
+	// Standard float32.
+	v := float32(3.1415)
+	nv := (*float32)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "float32"
+	vs := "3.1415"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Standard float64.
+	v2 := float64(3.1415926)
+	nv2 := (*float64)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "float64"
+	v2s := "3.1415926"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+}
+
+func addComplexFormatterTests() {
+	// Standard complex64.
+	v := complex(float32(6), -2)
+	nv := (*complex64)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "complex64"
+	vs := "(6-2i)"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Standard complex128.
+	v2 := complex(float64(-6), 2)
+	nv2 := (*complex128)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "complex128"
+	v2s := "(-6+2i)"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+}
+
+func addArrayFormatterTests() {
+	// Array containing standard ints.
+	v := [3]int{1, 2, 3}
+	nv := (*[3]int)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "[3]int"
+	vs := "[1 2 3]"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Array containing type with custom formatter on pointer receiver only.
+	v2 := [3]pstringer{"1", "2", "3"}
+	nv2 := (*[3]pstringer)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "[3]spew_test.pstringer"
+	v2sp := "[stringer 1 stringer 2 stringer 3]"
+	v2s := v2sp
+	if spew.UnsafeDisabled {
+		v2s = "[1 2 3]"
+	}
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2sp)
+	addFormatterTest("%v", &pv2, "<**>"+v2sp)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Array containing interfaces.
+	v3 := [3]interface{}{"one", int(2), uint(3)}
+	nv3 := (*[3]interface{})(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "[3]interface {}"
+	v3t2 := "string"
+	v3t3 := "int"
+	v3t4 := "uint"
+	v3s := "[one 2 3]"
+	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+}
+
+func addSliceFormatterTests() {
+	// Slice containing standard float32 values.
+	v := []float32{3.14, 6.28, 12.56}
+	nv := (*[]float32)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "[]float32"
+	vs := "[3.14 6.28 12.56]"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Slice containing type with custom formatter on pointer receiver only.
+	v2 := []pstringer{"1", "2", "3"}
+	nv2 := (*[]pstringer)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "[]spew_test.pstringer"
+	v2s := "[stringer 1 stringer 2 stringer 3]"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Slice containing interfaces.
+	v3 := []interface{}{"one", int(2), uint(3), nil}
+	nv3 := (*[]interface{})(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "[]interface {}"
+	v3t2 := "string"
+	v3t3 := "int"
+	v3t4 := "uint"
+	v3t5 := "interface {}"
+	v3s := "[one 2 3 <nil>]"
+	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
+		")<nil>]"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+	// Nil slice.
+	var v4 []int
+	nv4 := (*[]int)(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "[]int"
+	v4s := "<nil>"
+	addFormatterTest("%v", v4, v4s)
+	addFormatterTest("%v", pv4, "<*>"+v4s)
+	addFormatterTest("%v", &pv4, "<**>"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%+v", v4, v4s)
+	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addStringFormatterTests() {
+	// Standard string.
+	v := "test"
+	nv := (*string)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "string"
+	vs := "test"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addInterfaceFormatterTests() {
+	// Nil interface.
+	var v interface{}
+	nv := (*interface{})(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "interface {}"
+	vs := "<nil>"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Sub-interface.
+	v2 := interface{}(uint16(65535))
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uint16"
+	v2s := "65535"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addMapFormatterTests() {
+	// Map with string keys and int vals.
+	v := map[string]int{"one": 1, "two": 2}
+	nilMap := map[string]int(nil)
+	nv := (*map[string]int)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "map[string]int"
+	vs := "map[one:1 two:2]"
+	vs2 := "map[two:2 one:1]"
+	addFormatterTest("%v", v, vs, vs2)
+	addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
+	addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
+	addFormatterTest("%+v", nilMap, "<nil>")
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs, vs2)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
+		"<**>("+pvAddr+"->"+vAddr+")"+vs2)
+	addFormatterTest("%+v", nilMap, "<nil>")
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
+	addFormatterTest("%#v", nilMap, "("+vt+")"+"<nil>")
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
+		"(*"+vt+")("+vAddr+")"+vs2)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
+		"(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
+	addFormatterTest("%#+v", nilMap, "("+vt+")"+"<nil>")
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Map with custom formatter type on pointer receiver only keys and vals.
+	v2 := map[pstringer]pstringer{"one": "1"}
+	nv2 := (*map[pstringer]pstringer)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "map[spew_test.pstringer]spew_test.pstringer"
+	v2s := "map[stringer one:stringer 1]"
+	if spew.UnsafeDisabled {
+		v2s = "map[one:1]"
+	}
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Map with interface keys and values.
+	v3 := map[interface{}]interface{}{"one": 1}
+	nv3 := (*map[interface{}]interface{})(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "map[interface {}]interface {}"
+	v3t1 := "string"
+	v3t2 := "int"
+	v3s := "map[one:1]"
+	v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+	// Map with nil interface value
+	v4 := map[string]interface{}{"nil": nil}
+	nv4 := (*map[string]interface{})(nil)
+	pv4 := &v4
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "map[string]interface {}"
+	v4t1 := "interface {}"
+	v4s := "map[nil:<nil>]"
+	v4s2 := "map[nil:(" + v4t1 + ")<nil>]"
+	addFormatterTest("%v", v4, v4s)
+	addFormatterTest("%v", pv4, "<*>"+v4s)
+	addFormatterTest("%v", &pv4, "<**>"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%+v", v4, v4s)
+	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
+	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
+	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
+	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
+	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
+	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
+	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addStructFormatterTests() {
+	// Struct with primitives.
+	type s1 struct {
+		a int8
+		b uint8
+	}
+	v := s1{127, 255}
+	nv := (*s1)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.s1"
+	vt2 := "int8"
+	vt3 := "uint8"
+	vs := "{127 255}"
+	vs2 := "{a:127 b:255}"
+	vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs2)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs3)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs3)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Struct that contains another struct.
+	type s2 struct {
+		s1 s1
+		b  bool
+	}
+	v2 := s2{s1{127, 255}, true}
+	nv2 := (*s2)(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.s2"
+	v2t2 := "spew_test.s1"
+	v2t3 := "int8"
+	v2t4 := "uint8"
+	v2t5 := "bool"
+	v2s := "{{127 255} true}"
+	v2s2 := "{s1:{a:127 b:255} b:true}"
+	v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
+		v2t5 + ")true}"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s2)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Struct that contains custom type with Stringer pointer interface via both
+	// exported and unexported fields.
+	type s3 struct {
+		s pstringer
+		S pstringer
+	}
+	v3 := s3{"test", "test2"}
+	nv3 := (*s3)(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "spew_test.s3"
+	v3t2 := "spew_test.pstringer"
+	v3s := "{stringer test stringer test2}"
+	v3sp := v3s
+	v3s2 := "{s:stringer test S:stringer test2}"
+	v3s2p := v3s2
+	v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
+	v3s3p := v3s3
+	if spew.UnsafeDisabled {
+		v3s = "{test test2}"
+		v3sp = "{test stringer test2}"
+		v3s2 = "{s:test S:test2}"
+		v3s2p = "{s:test S:stringer test2}"
+		v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}"
+		v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}"
+	}
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3sp)
+	addFormatterTest("%v", &pv3, "<**>"+v3sp)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s2)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s3)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p)
+	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+	// Struct that contains embedded struct and field to same struct.
+	e := embed{"embedstr"}
+	v4 := embedwrap{embed: &e, e: &e}
+	nv4 := (*embedwrap)(nil)
+	pv4 := &v4
+	eAddr := fmt.Sprintf("%p", &e)
+	v4Addr := fmt.Sprintf("%p", pv4)
+	pv4Addr := fmt.Sprintf("%p", &pv4)
+	v4t := "spew_test.embedwrap"
+	v4t2 := "spew_test.embed"
+	v4t3 := "string"
+	v4s := "{<*>{embedstr} <*>{embedstr}}"
+	v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr +
+		"){a:embedstr}}"
+	v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 +
+		"){a:(" + v4t3 + ")embedstr}}"
+	v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 +
+		")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}"
+	addFormatterTest("%v", v4, v4s)
+	addFormatterTest("%v", pv4, "<*>"+v4s)
+	addFormatterTest("%v", &pv4, "<**>"+v4s)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%+v", v4, v4s2)
+	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2)
+	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2)
+	addFormatterTest("%+v", nv4, "<nil>")
+	addFormatterTest("%#v", v4, "("+v4t+")"+v4s3)
+	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3)
+	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3)
+	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4)
+	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4)
+	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4)
+	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addUintptrFormatterTests() {
+	// Null pointer.
+	v := uintptr(0)
+	nv := (*uintptr)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "uintptr"
+	vs := "<nil>"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Address of real variable.
+	i := 1
+	v2 := uintptr(unsafe.Pointer(&i))
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "uintptr"
+	v2s := fmt.Sprintf("%p", &i)
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addUnsafePointerFormatterTests() {
+	// Null pointer.
+	v := unsafe.Pointer(uintptr(0))
+	nv := (*unsafe.Pointer)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "unsafe.Pointer"
+	vs := "<nil>"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Address of real variable.
+	i := 1
+	v2 := unsafe.Pointer(&i)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "unsafe.Pointer"
+	v2s := fmt.Sprintf("%p", &i)
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addChanFormatterTests() {
+	// Nil channel.
+	var v chan int
+	pv := &v
+	nv := (*chan int)(nil)
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "chan int"
+	vs := "<nil>"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Real channel.
+	v2 := make(chan int)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "chan int"
+	v2s := fmt.Sprintf("%p", v2)
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addFuncFormatterTests() {
+	// Function with no params and no returns.
+	v := addIntFormatterTests
+	nv := (*func())(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "func()"
+	vs := fmt.Sprintf("%p", v)
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+	// Function with param and no returns.
+	v2 := TestFormatter
+	nv2 := (*func(*testing.T))(nil)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "func(*testing.T)"
+	v2s := fmt.Sprintf("%p", v2)
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s)
+	addFormatterTest("%v", &pv2, "<**>"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%+v", v2, v2s)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%+v", nv2, "<nil>")
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+	// Function with multiple params and multiple returns.
+	var v3 = func(i int, s string) (b bool, err error) {
+		return true, nil
+	}
+	nv3 := (*func(int, string) (bool, error))(nil)
+	pv3 := &v3
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "func(int, string) (bool, error)"
+	v3s := fmt.Sprintf("%p", v3)
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s)
+	addFormatterTest("%v", &pv3, "<**>"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%+v", v3, v3s)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%+v", nv3, "<nil>")
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+}
+
+func addCircularFormatterTests() {
+	// Struct that is circular through self referencing.
+	type circular struct {
+		c *circular
+	}
+	v := circular{nil}
+	v.c = &v
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.circular"
+	vs := "{<*>{<*><shown>}}"
+	vs2 := "{<*><shown>}"
+	vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")<shown>}}"
+	vs4 := "{c:<*>(" + vAddr + ")<shown>}"
+	vs5 := "{c:(*" + vt + "){c:(*" + vt + ")<shown>}}"
+	vs6 := "{c:(*" + vt + ")<shown>}"
+	vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr +
+		")<shown>}}"
+	vs8 := "{c:(*" + vt + ")(" + vAddr + ")<shown>}"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs2)
+	addFormatterTest("%v", &pv, "<**>"+vs2)
+	addFormatterTest("%+v", v, vs3)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4)
+	addFormatterTest("%#v", v, "("+vt+")"+vs5)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs6)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6)
+	addFormatterTest("%#+v", v, "("+vt+")"+vs7)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8)
+
+	// Structs that are circular through cross referencing.
+	v2 := xref1{nil}
+	ts2 := xref2{&v2}
+	v2.ps2 = &ts2
+	pv2 := &v2
+	ts2Addr := fmt.Sprintf("%p", &ts2)
+	v2Addr := fmt.Sprintf("%p", pv2)
+	pv2Addr := fmt.Sprintf("%p", &pv2)
+	v2t := "spew_test.xref1"
+	v2t2 := "spew_test.xref2"
+	v2s := "{<*>{<*>{<*><shown>}}}"
+	v2s2 := "{<*>{<*><shown>}}"
+	v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" +
+		ts2Addr + ")<shown>}}}"
+	v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")<shown>}}"
+	v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 +
+		")<shown>}}}"
+	v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")<shown>}}"
+	v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
+		")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr +
+		")<shown>}}}"
+	v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
+		")(" + v2Addr + ")<shown>}}"
+	addFormatterTest("%v", v2, v2s)
+	addFormatterTest("%v", pv2, "<*>"+v2s2)
+	addFormatterTest("%v", &pv2, "<**>"+v2s2)
+	addFormatterTest("%+v", v2, v2s3)
+	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4)
+	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4)
+	addFormatterTest("%#v", v2, "("+v2t+")"+v2s5)
+	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6)
+	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6)
+	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7)
+	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8)
+	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8)
+
+	// Structs that are indirectly circular.
+	v3 := indirCir1{nil}
+	tic2 := indirCir2{nil}
+	tic3 := indirCir3{&v3}
+	tic2.ps3 = &tic3
+	v3.ps2 = &tic2
+	pv3 := &v3
+	tic2Addr := fmt.Sprintf("%p", &tic2)
+	tic3Addr := fmt.Sprintf("%p", &tic3)
+	v3Addr := fmt.Sprintf("%p", pv3)
+	pv3Addr := fmt.Sprintf("%p", &pv3)
+	v3t := "spew_test.indirCir1"
+	v3t2 := "spew_test.indirCir2"
+	v3t3 := "spew_test.indirCir3"
+	v3s := "{<*>{<*>{<*>{<*><shown>}}}}"
+	v3s2 := "{<*>{<*>{<*><shown>}}}"
+	v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
+		v3Addr + "){ps2:<*>(" + tic2Addr + ")<shown>}}}}"
+	v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
+		v3Addr + ")<shown>}}}"
+	v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
+		"){ps2:(*" + v3t2 + ")<shown>}}}}"
+	v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
+		")<shown>}}}"
+	v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
+		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 +
+		")(" + tic2Addr + ")<shown>}}}}"
+	v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
+		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")<shown>}}}"
+	addFormatterTest("%v", v3, v3s)
+	addFormatterTest("%v", pv3, "<*>"+v3s2)
+	addFormatterTest("%v", &pv3, "<**>"+v3s2)
+	addFormatterTest("%+v", v3, v3s3)
+	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4)
+	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4)
+	addFormatterTest("%#v", v3, "("+v3t+")"+v3s5)
+	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6)
+	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6)
+	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7)
+	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8)
+	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8)
+}
+
+func addPanicFormatterTests() {
+	// Type that panics in its Stringer interface.
+	v := panicer(127)
+	nv := (*panicer)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.panicer"
+	vs := "(PANIC=test panic)127"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addErrorFormatterTests() {
+	// Type that has a custom Error interface.
+	v := customError(127)
+	nv := (*customError)(nil)
+	pv := &v
+	vAddr := fmt.Sprintf("%p", pv)
+	pvAddr := fmt.Sprintf("%p", &pv)
+	vt := "spew_test.customError"
+	vs := "error: 127"
+	addFormatterTest("%v", v, vs)
+	addFormatterTest("%v", pv, "<*>"+vs)
+	addFormatterTest("%v", &pv, "<**>"+vs)
+	addFormatterTest("%v", nv, "<nil>")
+	addFormatterTest("%+v", v, vs)
+	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%+v", nv, "<nil>")
+	addFormatterTest("%#v", v, "("+vt+")"+vs)
+	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+	addFormatterTest("%#+v", v, "("+vt+")"+vs)
+	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addPassthroughFormatterTests() {
+	// %x passthrough with uint.
+	v := uint(4294967295)
+	pv := &v
+	vAddr := fmt.Sprintf("%x", pv)
+	pvAddr := fmt.Sprintf("%x", &pv)
+	vs := "ffffffff"
+	addFormatterTest("%x", v, vs)
+	addFormatterTest("%x", pv, vAddr)
+	addFormatterTest("%x", &pv, pvAddr)
+
+	// %#x passthrough with uint.
+	v2 := int(2147483647)
+	pv2 := &v2
+	v2Addr := fmt.Sprintf("%#x", pv2)
+	pv2Addr := fmt.Sprintf("%#x", &pv2)
+	v2s := "0x7fffffff"
+	addFormatterTest("%#x", v2, v2s)
+	addFormatterTest("%#x", pv2, v2Addr)
+	addFormatterTest("%#x", &pv2, pv2Addr)
+
+	// %f passthrough with precision.
+	addFormatterTest("%.2f", 3.1415, "3.14")
+	addFormatterTest("%.3f", 3.1415, "3.142")
+	addFormatterTest("%.4f", 3.1415, "3.1415")
+
+	// %f passthrough with width and precision.
+	addFormatterTest("%5.2f", 3.1415, " 3.14")
+	addFormatterTest("%6.3f", 3.1415, " 3.142")
+	addFormatterTest("%7.4f", 3.1415, " 3.1415")
+
+	// %d passthrough with width.
+	addFormatterTest("%3d", 127, "127")
+	addFormatterTest("%4d", 127, " 127")
+	addFormatterTest("%5d", 127, "  127")
+
+	// %q passthrough with string.
+	addFormatterTest("%q", "test", "\"test\"")
+}
+
+// TestFormatter executes all of the tests described by formatterTests.
+func TestFormatter(t *testing.T) {
+	// Setup tests.
+	addIntFormatterTests()
+	addUintFormatterTests()
+	addBoolFormatterTests()
+	addFloatFormatterTests()
+	addComplexFormatterTests()
+	addArrayFormatterTests()
+	addSliceFormatterTests()
+	addStringFormatterTests()
+	addInterfaceFormatterTests()
+	addMapFormatterTests()
+	addStructFormatterTests()
+	addUintptrFormatterTests()
+	addUnsafePointerFormatterTests()
+	addChanFormatterTests()
+	addFuncFormatterTests()
+	addCircularFormatterTests()
+	addPanicFormatterTests()
+	addErrorFormatterTests()
+	addPassthroughFormatterTests()
+
+	t.Logf("Running %d tests", len(formatterTests))
+	for i, test := range formatterTests {
+		buf := new(bytes.Buffer)
+		spew.Fprintf(buf, test.format, test.in)
+		s := buf.String()
+		if testFailed(s, test.wants) {
+			t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s,
+				stringizeWants(test.wants))
+			continue
+		}
+	}
+}
+
+type testStruct struct {
+	x int
+}
+
+func (ts testStruct) String() string {
+	return fmt.Sprintf("ts.%d", ts.x)
+}
+
+type testStructP struct {
+	x int
+}
+
+func (ts *testStructP) String() string {
+	return fmt.Sprintf("ts.%d", ts.x)
+}
+
+func TestPrintSortedKeys(t *testing.T) {
+	cfg := spew.ConfigState{SortKeys: true}
+	s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"})
+	expected := "map[1:1 2:2 3:3]"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch 1:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2})
+	expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch 2:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
+	expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
+	if spew.UnsafeDisabled {
+		expected = "map[1:1 2:2 3:3]"
+	}
+	if s != expected {
+		t.Errorf("Sorted keys mismatch 3:\n  %v %v", s, expected)
+	}
+
+	s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
+	expected = "map[ts.1:1 ts.2:2 ts.3:3]"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch 4:\n  %v %v", s, expected)
+	}
+
+	if !spew.UnsafeDisabled {
+		s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
+		expected = "map[ts.1:1 ts.2:2 ts.3:3]"
+		if s != expected {
+			t.Errorf("Sorted keys mismatch 5:\n  %v %v", s, expected)
+		}
+	}
+
+	s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
+	expected = "map[error: 1:1 error: 2:2 error: 3:3]"
+	if s != expected {
+		t.Errorf("Sorted keys mismatch 6:\n  %v %v", s, expected)
+	}
+}

+ 87 - 0
vendor/github.com/davecgh/go-spew/spew/internal_test.go

@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+This test file is part of the spew package rather than than the spew_test
+package because it needs access to internals to properly test certain cases
+which are not possible via the public interface since they should never happen.
+*/
+
+package spew
+
+import (
+	"bytes"
+	"reflect"
+	"testing"
+)
+
+// dummyFmtState implements a fake fmt.State to use for testing invalid
+// reflect.Value handling.  This is necessary because the fmt package catches
+// invalid values before invoking the formatter on them.
+type dummyFmtState struct {
+	bytes.Buffer
+}
+
+func (dfs *dummyFmtState) Flag(f int) bool {
+	if f == int('+') {
+		return true
+	}
+	return false
+}
+
+func (dfs *dummyFmtState) Precision() (int, bool) {
+	return 0, false
+}
+
+func (dfs *dummyFmtState) Width() (int, bool) {
+	return 0, false
+}
+
+// TestInvalidReflectValue ensures the dump and formatter code handles an
+// invalid reflect value properly.  This needs access to internal state since it
+// should never happen in real code and therefore can't be tested via the public
+// API.
+func TestInvalidReflectValue(t *testing.T) {
+	i := 1
+
+	// Dump invalid reflect value.
+	v := new(reflect.Value)
+	buf := new(bytes.Buffer)
+	d := dumpState{w: buf, cs: &Config}
+	d.dump(*v)
+	s := buf.String()
+	want := "<invalid>"
+	if s != want {
+		t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
+	}
+	i++
+
+	// Formatter invalid reflect value.
+	buf2 := new(dummyFmtState)
+	f := formatState{value: *v, cs: &Config, fs: buf2}
+	f.format(*v)
+	s = buf2.String()
+	want = "<invalid>"
+	if s != want {
+		t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
+	}
+}
+
+// SortValues makes the internal sortValues function available to the test
+// package.
+func SortValues(values []reflect.Value, cs *ConfigState) {
+	sortValues(values, cs)
+}

+ 102 - 0
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go

@@ -0,0 +1,102 @@
+// Copyright (c) 2013-2016 Dave Collins <[email protected]>
+
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+/*
+This test file is part of the spew package rather than than the spew_test
+package because it needs access to internals to properly test certain cases
+which are not possible via the public interface since they should never happen.
+*/
+
+package spew
+
+import (
+	"bytes"
+	"reflect"
+	"testing"
+	"unsafe"
+)
+
+// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
+// the maximum kind value which does not exist.  This is needed to test the
+// fallback code which punts to the standard fmt library for new types that
+// might get added to the language.
+func changeKind(v *reflect.Value, readOnly bool) {
+	rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
+	*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
+	if readOnly {
+		*rvf |= flagRO
+	} else {
+		*rvf &= ^uintptr(flagRO)
+	}
+}
+
+// TestAddedReflectValue tests functionaly of the dump and formatter code which
+// falls back to the standard fmt library for new types that might get added to
+// the language.
+func TestAddedReflectValue(t *testing.T) {
+	i := 1
+
+	// Dump using a reflect.Value that is exported.
+	v := reflect.ValueOf(int8(5))
+	changeKind(&v, false)
+	buf := new(bytes.Buffer)
+	d := dumpState{w: buf, cs: &Config}
+	d.dump(v)
+	s := buf.String()
+	want := "(int8) 5"
+	if s != want {
+		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
+	}
+	i++
+
+	// Dump using a reflect.Value that is not exported.
+	changeKind(&v, true)
+	buf.Reset()
+	d.dump(v)
+	s = buf.String()
+	want = "(int8) <int8 Value>"
+	if s != want {
+		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
+	}
+	i++
+
+	// Formatter using a reflect.Value that is exported.
+	changeKind(&v, false)
+	buf2 := new(dummyFmtState)
+	f := formatState{value: v, cs: &Config, fs: buf2}
+	f.format(v)
+	s = buf2.String()
+	want = "5"
+	if s != want {
+		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
+	}
+	i++
+
+	// Formatter using a reflect.Value that is not exported.
+	changeKind(&v, true)
+	buf2.Reset()
+	f = formatState{value: v, cs: &Config, fs: buf2}
+	f.format(v)
+	s = buf2.String()
+	want = "<int8 Value>"
+	if s != want {
+		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
+	}
+}

+ 1 - 1
vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go → vendor/github.com/davecgh/go-spew/spew/spew.go

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <[email protected]>
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 320 - 0
vendor/github.com/davecgh/go-spew/spew/spew_test.go

@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <[email protected]>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+// spewFunc is used to identify which public function of the spew package or
+// ConfigState a test applies to.
+type spewFunc int
+
+const (
+	fCSFdump spewFunc = iota
+	fCSFprint
+	fCSFprintf
+	fCSFprintln
+	fCSPrint
+	fCSPrintln
+	fCSSdump
+	fCSSprint
+	fCSSprintf
+	fCSSprintln
+	fCSErrorf
+	fCSNewFormatter
+	fErrorf
+	fFprint
+	fFprintln
+	fPrint
+	fPrintln
+	fSdump
+	fSprint
+	fSprintf
+	fSprintln
+)
+
+// Map of spewFunc values to names for pretty printing.
+var spewFuncStrings = map[spewFunc]string{
+	fCSFdump:        "ConfigState.Fdump",
+	fCSFprint:       "ConfigState.Fprint",
+	fCSFprintf:      "ConfigState.Fprintf",
+	fCSFprintln:     "ConfigState.Fprintln",
+	fCSSdump:        "ConfigState.Sdump",
+	fCSPrint:        "ConfigState.Print",
+	fCSPrintln:      "ConfigState.Println",
+	fCSSprint:       "ConfigState.Sprint",
+	fCSSprintf:      "ConfigState.Sprintf",
+	fCSSprintln:     "ConfigState.Sprintln",
+	fCSErrorf:       "ConfigState.Errorf",
+	fCSNewFormatter: "ConfigState.NewFormatter",
+	fErrorf:         "spew.Errorf",
+	fFprint:         "spew.Fprint",
+	fFprintln:       "spew.Fprintln",
+	fPrint:          "spew.Print",
+	fPrintln:        "spew.Println",
+	fSdump:          "spew.Sdump",
+	fSprint:         "spew.Sprint",
+	fSprintf:        "spew.Sprintf",
+	fSprintln:       "spew.Sprintln",
+}
+
+func (f spewFunc) String() string {
+	if s, ok := spewFuncStrings[f]; ok {
+		return s
+	}
+	return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
+}
+
+// spewTest is used to describe a test to be performed against the public
+// functions of the spew package or ConfigState.
+type spewTest struct {
+	cs     *spew.ConfigState
+	f      spewFunc
+	format string
+	in     interface{}
+	want   string
+}
+
+// spewTests houses the tests to be performed against the public functions of
+// the spew package and ConfigState.
+//
+// These tests are only intended to ensure the public functions are exercised
+// and are intentionally not exhaustive of types.  The exhaustive type
+// tests are handled in the dump and format tests.
+var spewTests []spewTest
+
+// redirStdout is a helper function to return the standard output from f as a
+// byte slice.
+func redirStdout(f func()) ([]byte, error) {
+	tempFile, err := ioutil.TempFile("", "ss-test")
+	if err != nil {
+		return nil, err
+	}
+	fileName := tempFile.Name()
+	defer os.Remove(fileName) // Ignore error
+
+	origStdout := os.Stdout
+	os.Stdout = tempFile
+	f()
+	os.Stdout = origStdout
+	tempFile.Close()
+
+	return ioutil.ReadFile(fileName)
+}
+
+func initSpewTests() {
+	// Config states with various settings.
+	scsDefault := spew.NewDefaultConfig()
+	scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
+	scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
+	scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
+	scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
+	scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
+	scsNoCap := &spew.ConfigState{DisableCapacities: true}
+
+	// Variables for tests on types which implement Stringer interface with and
+	// without a pointer receiver.
+	ts := stringer("test")
+	tps := pstringer("test")
+
+	type ptrTester struct {
+		s *struct{}
+	}
+	tptr := &ptrTester{s: &struct{}{}}
+
+	// depthTester is used to test max depth handling for structs, array, slices
+	// and maps.
+	type depthTester struct {
+		ic    indirCir1
+		arr   [1]string
+		slice []string
+		m     map[string]int
+	}
+	dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
+		map[string]int{"one": 1}}
+
+	// Variable for tests on types which implement error interface.
+	te := customError(10)
+
+	spewTests = []spewTest{
+		{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
+		{scsDefault, fCSFprint, "", int16(32767), "32767"},
+		{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
+		{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
+		{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
+		{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
+		{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
+		{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
+		{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
+		{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
+		{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
+		{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
+		{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
+		{scsDefault, fFprint, "", float32(3.14), "3.14"},
+		{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
+		{scsDefault, fPrint, "", true, "true"},
+		{scsDefault, fPrintln, "", false, "false\n"},
+		{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
+		{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
+		{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
+		{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
+		{scsNoMethods, fCSFprint, "", ts, "test"},
+		{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
+		{scsNoMethods, fCSFprint, "", tps, "test"},
+		{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
+		{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
+		{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
+		{scsNoPmethods, fCSFprint, "", tps, "test"},
+		{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
+		{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
+		{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
+			" ic: (spew_test.indirCir1) {\n  <max depth reached>\n },\n" +
+			" arr: ([1]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
+			" slice: ([]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
+			" m: (map[string]int) (len=1) {\n  <max depth reached>\n }\n}\n"},
+		{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
+		{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
+			"(len=4) (stringer test) \"test\"\n"},
+		{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
+		{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
+			"(error: 10) 10\n"},
+		{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
+		{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
+		{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
+		{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
+	}
+}
+
+// TestSpew executes all of the tests described by spewTests.
+func TestSpew(t *testing.T) {
+	initSpewTests()
+
+	t.Logf("Running %d tests", len(spewTests))
+	for i, test := range spewTests {
+		buf := new(bytes.Buffer)
+		switch test.f {
+		case fCSFdump:
+			test.cs.Fdump(buf, test.in)
+
+		case fCSFprint:
+			test.cs.Fprint(buf, test.in)
+
+		case fCSFprintf:
+			test.cs.Fprintf(buf, test.format, test.in)
+
+		case fCSFprintln:
+			test.cs.Fprintln(buf, test.in)
+
+		case fCSPrint:
+			b, err := redirStdout(func() { test.cs.Print(test.in) })
+			if err != nil {
+				t.Errorf("%v #%d %v", test.f, i, err)
+				continue
+			}
+			buf.Write(b)
+
+		case fCSPrintln:
+			b, err := redirStdout(func() { test.cs.Println(test.in) })
+			if err != nil {
+				t.Errorf("%v #%d %v", test.f, i, err)
+				continue
+			}
+			buf.Write(b)
+
+		case fCSSdump:
+			str := test.cs.Sdump(test.in)
+			buf.WriteString(str)
+
+		case fCSSprint:
+			str := test.cs.Sprint(test.in)
+			buf.WriteString(str)
+
+		case fCSSprintf:
+			str := test.cs.Sprintf(test.format, test.in)
+			buf.WriteString(str)
+
+		case fCSSprintln:
+			str := test.cs.Sprintln(test.in)
+			buf.WriteString(str)
+
+		case fCSErrorf:
+			err := test.cs.Errorf(test.format, test.in)
+			buf.WriteString(err.Error())
+
+		case fCSNewFormatter:
+			fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
+
+		case fErrorf:
+			err := spew.Errorf(test.format, test.in)
+			buf.WriteString(err.Error())
+
+		case fFprint:
+			spew.Fprint(buf, test.in)
+
+		case fFprintln:
+			spew.Fprintln(buf, test.in)
+
+		case fPrint:
+			b, err := redirStdout(func() { spew.Print(test.in) })
+			if err != nil {
+				t.Errorf("%v #%d %v", test.f, i, err)
+				continue
+			}
+			buf.Write(b)
+
+		case fPrintln:
+			b, err := redirStdout(func() { spew.Println(test.in) })
+			if err != nil {
+				t.Errorf("%v #%d %v", test.f, i, err)
+				continue
+			}
+			buf.Write(b)
+
+		case fSdump:
+			str := spew.Sdump(test.in)
+			buf.WriteString(str)
+
+		case fSprint:
+			str := spew.Sprint(test.in)
+			buf.WriteString(str)
+
+		case fSprintf:
+			str := spew.Sprintf(test.format, test.in)
+			buf.WriteString(str)
+
+		case fSprintln:
+			str := spew.Sprintln(test.in)
+			buf.WriteString(str)
+
+		default:
+			t.Errorf("%v #%d unrecognized function", test.f, i)
+			continue
+		}
+		s := buf.String()
+		if test.want != s {
+			t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
+			continue
+		}
+	}
+}

+ 82 - 0
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go

@@ -0,0 +1,82 @@
+// Copyright (c) 2013 Dave Collins <[email protected]>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when both cgo is supported and "-tags testcgo" is added to the go test
+// command line.  This code should really only be in the dumpcgo_test.go file,
+// but unfortunately Go will not allow cgo in test files, so this is a
+// workaround to allow cgo types to be tested.  This configuration is used
+// because spew itself does not require cgo to run even though it does handle
+// certain cgo types specially.  Rather than forcing all clients to require cgo
+// and an external C compiler just to run the tests, this scheme makes them
+// optional.
+// +build cgo,testcgo
+
+package testdata
+
+/*
+#include <stdint.h>
+typedef unsigned char custom_uchar_t;
+
+char            *ncp = 0;
+char            *cp = "test";
+char             ca[6] = {'t', 'e', 's', 't', '2', '\0'};
+unsigned char    uca[6] = {'t', 'e', 's', 't', '3', '\0'};
+signed char      sca[6] = {'t', 'e', 's', 't', '4', '\0'};
+uint8_t          ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
+custom_uchar_t   tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
+*/
+import "C"
+
+// GetCgoNullCharPointer returns a null char pointer via cgo.  This is only
+// used for tests.
+func GetCgoNullCharPointer() interface{} {
+	return C.ncp
+}
+
+// GetCgoCharPointer returns a char pointer via cgo.  This is only used for
+// tests.
+func GetCgoCharPointer() interface{} {
+	return C.cp
+}
+
+// GetCgoCharArray returns a char array via cgo and the array's len and cap.
+// This is only used for tests.
+func GetCgoCharArray() (interface{}, int, int) {
+	return C.ca, len(C.ca), cap(C.ca)
+}
+
+// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
+// array's len and cap.  This is only used for tests.
+func GetCgoUnsignedCharArray() (interface{}, int, int) {
+	return C.uca, len(C.uca), cap(C.uca)
+}
+
+// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
+// and cap.  This is only used for tests.
+func GetCgoSignedCharArray() (interface{}, int, int) {
+	return C.sca, len(C.sca), cap(C.sca)
+}
+
+// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
+// cap.  This is only used for tests.
+func GetCgoUint8tArray() (interface{}, int, int) {
+	return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
+}
+
+// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
+// cgo and the array's len and cap.  This is only used for tests.
+func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
+	return C.tuca, len(C.tuca), cap(C.tuca)
+}

+ 61 - 0
vendor/github.com/davecgh/go-spew/test_coverage.txt

@@ -0,0 +1,61 @@
+
+github.com/davecgh/go-spew/spew/dump.go		 dumpState.dump			 100.00% (88/88)
+github.com/davecgh/go-spew/spew/format.go	 formatState.format		 100.00% (82/82)
+github.com/davecgh/go-spew/spew/format.go	 formatState.formatPtr		 100.00% (52/52)
+github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpPtr		 100.00% (44/44)
+github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpSlice		 100.00% (39/39)
+github.com/davecgh/go-spew/spew/common.go	 handleMethods			 100.00% (30/30)
+github.com/davecgh/go-spew/spew/common.go	 printHexPtr			 100.00% (18/18)
+github.com/davecgh/go-spew/spew/common.go	 unsafeReflectValue		 100.00% (13/13)
+github.com/davecgh/go-spew/spew/format.go	 formatState.constructOrigFormat 100.00% (12/12)
+github.com/davecgh/go-spew/spew/dump.go		 fdump				 100.00% (11/11)
+github.com/davecgh/go-spew/spew/format.go	 formatState.Format		 100.00% (11/11)
+github.com/davecgh/go-spew/spew/common.go	 init				 100.00% (10/10)
+github.com/davecgh/go-spew/spew/common.go	 printComplex			 100.00% (9/9)
+github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Less		 100.00% (8/8)
+github.com/davecgh/go-spew/spew/format.go	 formatState.buildDefaultFormat	 100.00% (7/7)
+github.com/davecgh/go-spew/spew/format.go	 formatState.unpackValue	 100.00% (5/5)
+github.com/davecgh/go-spew/spew/dump.go		 dumpState.indent		 100.00% (4/4)
+github.com/davecgh/go-spew/spew/common.go	 catchPanic			 100.00% (4/4)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.convertArgs	 100.00% (4/4)
+github.com/davecgh/go-spew/spew/spew.go		 convertArgs			 100.00% (4/4)
+github.com/davecgh/go-spew/spew/format.go	 newFormatter			 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go		 Sdump				 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go	 printBool			 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go	 sortValues			 100.00% (3/3)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sdump		 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go		 dumpState.unpackValue		 100.00% (3/3)
+github.com/davecgh/go-spew/spew/spew.go		 Printf				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Println			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Sprint				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Sprintf			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Sprintln			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go	 printFloat			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 NewDefaultConfig		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go	 printInt			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go	 printUint			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Len		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Swap		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Errorf		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprint		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintf		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintln		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Print		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Printf		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Println		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprint		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintf		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintln		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.NewFormatter	 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fdump		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go	 ConfigState.Dump		 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go		 Fdump				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go		 Dump				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Fprintln			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/format.go	 NewFormatter			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Errorf				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Fprint				 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Fprintf			 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go		 Print				 100.00% (1/1)
+github.com/davecgh/go-spew/spew			 ------------------------------- 100.00% (505/505)
+

+ 151 - 0
vendor/github.com/golang/geo/README.md

@@ -0,0 +1,151 @@
+# Overview
+
+This is a library for manipulating geometric shapes. Unlike many geometry
+libraries, S2 is primarily designed to work with _spherical geometry_, i.e.,
+shapes drawn on a sphere rather than on a planar 2D map. (In fact, the name S2
+is derived from the mathematical notation for the unit sphere.) This makes it
+especially suitable for working with geographic data.
+
+The library consists of:
+
+*   Basic representations of angles, intervals, latitude-longitude points, unit
+    3D vectors, and conversions among them.
+
+*   Various shapes over the unit sphere, such as spherical caps ("discs"),
+    latitude-longitude rectangles, polylines, and polygons. These are
+    collectively known as "regions".
+
+*   Support for spatial indexing of collections of geometry, and algorithms for
+    testing containment, finding nearby objects, finding intersections, etc.
+
+*   A hierarchical decomposition of the sphere into regions called "cells". The
+    hierarchy starts with the six faces of a projected cube and recursively
+    subdivides them in a quadtree-like fashion.
+
+*   The ability to approximate arbitrary regions as a collection of cells. This
+    is useful for building inverted indexes that allow queries over arbitrarily
+    shaped regions.
+
+The implementations attempt to be precise both in terms of mathematical
+definitions (e.g. whether regions include their boundaries, representations of
+empty and full regions) and numerical accuracy (e.g. avoiding cancellation
+error).
+
+Note that the intent of this library is to represent geometry as a mathematical
+abstraction. For example, although the unit sphere is obviously a useful
+approximation for the Earth's surface, functions that are specifically related
+to geography are not part of the core library (e.g. easting/northing
+conversions, ellipsoid approximations, geodetic vs. geocentric coordinates,
+etc).
+
+See http://godoc.org/github.com/golang/geo for specific package documentation.
+
+For an analogous library in C++, see
+https://code.google.com/archive/p/s2-geometry-library/, and in Java, see
+https://github.com/google/s2-geometry-library-java
+
+# Status of the Go Library
+
+This library is principally a port of [the C++ S2
+library](https://code.google.com/archive/p/s2-geometry-library), adapting to Go
+idioms where it makes sense. We detail the progress of this port below relative
+to that C++ library.
+
+## [ℝ¹](https://godoc.org/github.com/golang/geo/r1) - One-dimensional Cartesian coordinates
+
+Full parity with C++.
+
+## [ℝ²](https://godoc.org/github.com/golang/geo/r2) - Two-dimensional Cartesian coordinates
+
+Full parity with C++.
+
+## [ℝ³](https://godoc.org/github.com/golang/geo/r3) - Three-dimensional Cartesian coordinates
+
+Full parity with C++.
+
+## [S¹](https://godoc.org/github.com/golang/geo/s1) - Circular Geometry
+
+**Complete**
+
+*   ChordAngle
+
+**Mostly complete**
+
+*   Angle - Missing Arithmetic methods, Trigonometric methods, Conversion
+    to/from s2.Point, s2.LatLng, convenience methods from E5/E6/E7
+*   Interval - Missing ClampPoint, Complement, ComplementCenter,
+    HaussdorfDistance
+
+## [S²](https://godoc.org/github.com/golang/geo/s2) - Spherical Geometry
+
+Approximately ~40% complete.
+
+**Complete** These files have full parity with the C++ implementation.
+
+*   Cap
+*   CellID
+*   LatLng
+*   matrix3x3
+*   Metric
+*   PaddedCell
+*   Region
+*   s2stuv.go (s2coords.h in C++) - This file is a collection of helper and
+    conversion methods to and from ST-space, UV-space, and XYZ-space.
+
+**Mostly Complete** Files that have almost all of the features of the original
+C++ code, and are reasonably complete enough to use in live code. Up to date
+listing of the incomplete methods are documented at the end of each file.
+
+*   Cell - Missing Subdivide, BoundUV, DistanceToEdge, ChordDistance.
+*   CellUnion - Missing Union, Intersection, etc.
+*   Edgeutil - Missing Distance methods, LongitudePruner, FaceSegments,
+    ClosestPair.
+*   Point - Missing TurningAngle, Rotate, some Area methods.
+*   Polyline - Missing Projection, Intersects, Interpolate, etc.
+*   Rect (AKA s2latlngrect in C++) - Missing Centroid, Distance,
+    InteriorContains.
+*   RegionCoverer - Missing FloodFill and SimpleCovering.
+*   s2_test.go (AKA s2testing and s2textformat in C++) - Missing
+    ConcentricLoopsPolygon and Fractal test shape generation. This file is a
+    collection of testing helper methods.
+
+**In Progress** Files that have some work done, but are probably not complete
+enough for general use in production code.
+
+*   Loop - Loop has basic skelton complete and some tendons, but missing most
+    things. Normalize, Invert, Area, Centroid, Projection, Distance, Contains,
+    Intersects, Union, etc. A significant portion of these are under review now.
+*   Polygon - Polygon is at the partial skeleton phase, the fields all exist,
+    and some basic methods are implemented, but it's missing almost everything.
+    Init with multiple loops, Area, Centroid, Distance, Projection,
+    Intersection, Union, Contains, Normalized, etc.
+*   PolylineSimplifier - Initial work has begun on this.
+*   s2predicates.go - This file is a collection of helper methods used by other
+    parts of the library.
+*   ShapeIndex - Currently has only the minimal skeleton pieces submitted, but
+    changes are out for review for the remainder of it.
+
+**Not Started Yet.** These files (and their associated unit tests) have
+dependencies on most of the In Progress files before they can begin to be
+started.
+
+*   BoundaryOperation
+*   Builder - This is a robust tool for creating the various Shape types from
+    collection of simpler S2 types.
+*   BuilderGraph
+*   BuilderLayers
+*   BuilderSnapFunctions
+*   ClosestEdgeQuery
+*   ClosestPointQuery
+*   ConvexHullQuery
+*   CrossingEdgeQuery
+*   EdgeTesselator
+*   PointCompression
+*   PointIndex
+*   PolygonBuilder
+*   RegionIntersection
+*   RegionUnion
+*   Projections
+*   ShapeUtil - Most of this will end up in s2_test.
+*   lexicon
+*   priorityqueuesequence

+ 1 - 1
vendor/github.com/golang/geo/r1/interval.go

@@ -84,7 +84,7 @@ func (i Interval) Intersects(oi Interval) bool {
 
 // InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
 func (i Interval) InteriorIntersects(oi Interval) bool {
-	return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= i.Hi
+	return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi
 }
 
 // Intersection returns the interval containing all points common to i and j.

+ 349 - 0
vendor/github.com/golang/geo/r1/interval_test.go

@@ -0,0 +1,349 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r1
+
+import (
+	"testing"
+)
+
+// Some standard intervals for use throughout the tests.
+var (
+	unit    = Interval{0, 1}
+	negunit = Interval{-1, 0}
+	half    = Interval{0.5, 0.5}
+	empty   = EmptyInterval()
+)
+
+func TestIsEmpty(t *testing.T) {
+	var zero Interval
+	if unit.IsEmpty() {
+		t.Errorf("%v should not be empty", unit)
+	}
+	if half.IsEmpty() {
+		t.Errorf("%v should not be empty", half)
+	}
+	if !empty.IsEmpty() {
+		t.Errorf("%v should be empty", empty)
+	}
+	if zero.IsEmpty() {
+		t.Errorf("zero Interval %v should not be empty", zero)
+	}
+}
+
+func TestCenter(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		want     float64
+	}{
+		{unit, 0.5},
+		{negunit, -0.5},
+		{half, 0.5},
+	}
+	for _, test := range tests {
+		got := test.interval.Center()
+		if got != test.want {
+			t.Errorf("%v.Center() = %v, want %v", test.interval, got, test.want)
+		}
+	}
+}
+
+func TestLength(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		want     float64
+	}{
+		{unit, 1},
+		{negunit, 1},
+		{half, 0},
+	}
+	for _, test := range tests {
+		if l := test.interval.Length(); l != test.want {
+			t.Errorf("%v.Length() = %v, want %v", test.interval, l, test.want)
+		}
+	}
+	if l := empty.Length(); l >= 0 {
+		t.Errorf("empty interval has non-negative length")
+	}
+}
+
+func TestIntervalContains(t *testing.T) {
+	tests := []struct {
+		interval         Interval
+		p                float64
+		contains         bool
+		interiorContains bool
+	}{
+		{
+			interval:         unit,
+			p:                0.5,
+			contains:         true,
+			interiorContains: true,
+		},
+		{
+			interval:         unit,
+			p:                0,
+			contains:         true,
+			interiorContains: false,
+		},
+		{
+			interval:         unit,
+			p:                1,
+			contains:         true,
+			interiorContains: false,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.interval.Contains(test.p); got != test.contains {
+			t.Errorf("%v.Contains(%v) = %t, want %t", test.interval, test.p, got, test.contains)
+		}
+		if got := test.interval.InteriorContains(test.p); got != test.interiorContains {
+			t.Errorf("%v.InteriorContains(%v) = %t, want %t", test.interval, test.p, got, test.interiorContains)
+		}
+	}
+}
+
+func TestIntervalOperations(t *testing.T) {
+	tests := []struct {
+		have               Interval
+		other              Interval
+		contains           bool
+		interiorContains   bool
+		intersects         bool
+		interiorIntersects bool
+	}{
+		{
+			have:               empty,
+			other:              empty,
+			contains:           true,
+			interiorContains:   true,
+			intersects:         false,
+			interiorIntersects: false,
+		},
+		{
+			have:               empty,
+			other:              unit,
+			contains:           false,
+			interiorContains:   false,
+			intersects:         false,
+			interiorIntersects: false,
+		},
+		{
+			have:               unit,
+			other:              half,
+			contains:           true,
+			interiorContains:   true,
+			intersects:         true,
+			interiorIntersects: true,
+		},
+		{
+			have:               unit,
+			other:              unit,
+			contains:           true,
+			interiorContains:   false,
+			intersects:         true,
+			interiorIntersects: true,
+		},
+		{
+			have:               unit,
+			other:              empty,
+			contains:           true,
+			interiorContains:   true,
+			intersects:         false,
+			interiorIntersects: false,
+		},
+		{
+			have:               unit,
+			other:              negunit,
+			contains:           false,
+			interiorContains:   false,
+			intersects:         true,
+			interiorIntersects: false,
+		},
+		{
+			have:               unit,
+			other:              Interval{0, 0.5},
+			contains:           true,
+			interiorContains:   false,
+			intersects:         true,
+			interiorIntersects: true,
+		},
+		{
+			have:               half,
+			other:              Interval{0, 0.5},
+			contains:           false,
+			interiorContains:   false,
+			intersects:         true,
+			interiorIntersects: false,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.ContainsInterval(test.other); got != test.contains {
+			t.Errorf("%v.ContainsInterval(%v) = %t, want %t", test.have, test.other, got, test.contains)
+		}
+		if got := test.have.InteriorContainsInterval(test.other); got != test.interiorContains {
+			t.Errorf("%v.InteriorContainsInterval(%v) = %t, want %t", test.have, test.other, got, test.interiorContains)
+		}
+		if got := test.have.Intersects(test.other); got != test.intersects {
+			t.Errorf("%v.Intersects(%v) = %t, want %t", test.have, test.other, got, test.intersects)
+		}
+		if got := test.have.InteriorIntersects(test.other); got != test.interiorIntersects {
+			t.Errorf("%v.InteriorIntersects(%v) = %t, want %t", test.have, test.other, got, test.interiorIntersects)
+		}
+	}
+}
+
+func TestIntersection(t *testing.T) {
+	tests := []struct {
+		x, y Interval
+		want Interval
+	}{
+		{unit, half, half},
+		{unit, negunit, Interval{0, 0}},
+		{negunit, half, empty},
+		{unit, empty, empty},
+		{empty, unit, empty},
+	}
+	for _, test := range tests {
+		if got := test.x.Intersection(test.y); !got.Equal(test.want) {
+			t.Errorf("%v.Intersection(%v) = %v, want equal to %v", test.x, test.y, got, test.want)
+		}
+	}
+}
+
+func TestUnion(t *testing.T) {
+	tests := []struct {
+		x, y Interval
+		want Interval
+	}{
+		{Interval{99, 100}, empty, Interval{99, 100}},
+		{empty, Interval{99, 100}, Interval{99, 100}},
+		{Interval{5, 3}, Interval{0, -2}, empty},
+		{Interval{0, -2}, Interval{5, 3}, empty},
+		{unit, unit, unit},
+		{unit, negunit, Interval{-1, 1}},
+		{negunit, unit, Interval{-1, 1}},
+		{half, unit, unit},
+	}
+	for _, test := range tests {
+		if got := test.x.Union(test.y); !got.Equal(test.want) {
+			t.Errorf("%v.Union(%v) = %v, want equal to %v", test.x, test.y, got, test.want)
+		}
+	}
+}
+
+func TestAddPoint(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		point    float64
+		want     Interval
+	}{
+		{empty, 5, Interval{5, 5}},
+		{Interval{5, 5}, -1, Interval{-1, 5}},
+		{Interval{-1, 5}, 0, Interval{-1, 5}},
+		{Interval{-1, 5}, 6, Interval{-1, 6}},
+	}
+	for _, test := range tests {
+		if got := test.interval.AddPoint(test.point); !got.Equal(test.want) {
+			t.Errorf("%v.AddPoint(%v) = %v, want equal to %v", test.interval, test.point, got, test.want)
+		}
+	}
+}
+
+func TestClampPoint(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		clamp    float64
+		want     float64
+	}{
+		{Interval{0.1, 0.4}, 0.3, 0.3},
+		{Interval{0.1, 0.4}, -7.0, 0.1},
+		{Interval{0.1, 0.4}, 0.6, 0.4},
+	}
+	for _, test := range tests {
+		if got := test.interval.ClampPoint(test.clamp); got != test.want {
+			t.Errorf("%v.ClampPoint(%v) = %v, want equal to %v", test.interval, test.clamp, got, test.want)
+		}
+	}
+}
+
+func TestExpanded(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		margin   float64
+		want     Interval
+	}{
+		{empty, 0.45, empty},
+		{unit, 0.5, Interval{-0.5, 1.5}},
+		{unit, -0.5, Interval{0.5, 0.5}},
+		{unit, -0.51, empty},
+	}
+	for _, test := range tests {
+		if got := test.interval.Expanded(test.margin); !got.Equal(test.want) {
+			t.Errorf("%v.Expanded(%v) = %v, want equal to %v", test.interval, test.margin, got, test.want)
+		}
+	}
+}
+
+func TestIntervalString(t *testing.T) {
+	i := Interval{2, 4.5}
+	if s, exp := i.String(), "[2.0000000, 4.5000000]"; s != exp {
+		t.Errorf("i.String() = %q, want %q", s, exp)
+	}
+}
+
+func TestApproxEqual(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		other    Interval
+		want     bool
+	}{
+		// Empty intervals.
+		{EmptyInterval(), EmptyInterval(), true},
+		{Interval{0, 0}, EmptyInterval(), true},
+		{EmptyInterval(), Interval{0, 0}, true},
+		{Interval{1, 1}, EmptyInterval(), true},
+		{EmptyInterval(), Interval{1, 1}, true},
+		{EmptyInterval(), Interval{0, 1}, false},
+		{EmptyInterval(), Interval{1, 1 + 2*epsilon}, true},
+
+		// Singleton intervals.
+		{Interval{1, 1}, Interval{1, 1}, true},
+		{Interval{1, 1}, Interval{1 - epsilon, 1 - epsilon}, true},
+		{Interval{1, 1}, Interval{1 + epsilon, 1 + epsilon}, true},
+		{Interval{1, 1}, Interval{1 - 3*epsilon, 1}, false},
+		{Interval{1, 1}, Interval{1, 1 + 3*epsilon}, false},
+		{Interval{1, 1}, Interval{1 - epsilon, 1 + epsilon}, true},
+		{Interval{0, 0}, Interval{1, 1}, false},
+
+		// Other intervals.
+		{Interval{1 - epsilon, 2 + epsilon}, Interval{1, 2}, false},
+		{Interval{1 + epsilon, 2 - epsilon}, Interval{1, 2}, true},
+		{Interval{1 - 3*epsilon, 2 + epsilon}, Interval{1, 2}, false},
+		{Interval{1 + 3*epsilon, 2 - epsilon}, Interval{1, 2}, false},
+		{Interval{1 - epsilon, 2 + 3*epsilon}, Interval{1, 2}, false},
+		{Interval{1 + epsilon, 2 - 3*epsilon}, Interval{1, 2}, false},
+	}
+
+	for _, test := range tests {
+		if got := test.interval.ApproxEqual(test.other); got != test.want {
+			t.Errorf("%v.ApproxEqual(%v) = %t, want %t",
+				test.interval, test.other, got, test.want)
+		}
+	}
+}

+ 476 - 0
vendor/github.com/golang/geo/r2/rect_test.go

@@ -0,0 +1,476 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Most of the Rect methods have trivial implementations in terms of the
+// Interval class, so most of the testing is done in that unit test.
+
+package r2
+
+import (
+	"math"
+	"reflect"
+	"testing"
+
+	"github.com/golang/geo/r1"
+)
+
+var (
+	sw = Point{0, 0.25}
+	se = Point{0.5, 0.25}
+	ne = Point{0.5, 0.75}
+	nw = Point{0, 0.75}
+
+	empty   = EmptyRect()
+	rect    = RectFromPoints(sw, ne)
+	rectMid = RectFromPoints(Point{0.25, 0.5}, Point{0.25, 0.5})
+	rectSW  = RectFromPoints(sw, sw)
+	rectNE  = RectFromPoints(ne, ne)
+)
+
+func float64Eq(x, y float64) bool { return math.Abs(x-y) < 1e-14 }
+
+func pointsApproxEqual(a, b Point) bool {
+	return float64Eq(a.X, b.X) && float64Eq(a.Y, b.Y)
+}
+
+func TestOrtho(t *testing.T) {
+	tests := []struct {
+		p    Point
+		want Point
+	}{
+		{Point{0, 0}, Point{0, 0}},
+		{Point{0, 1}, Point{-1, 0}},
+		{Point{1, 1}, Point{-1, 1}},
+		{Point{-4, 7}, Point{-7, -4}},
+		{Point{1, math.Sqrt(3)}, Point{-math.Sqrt(3), 1}},
+	}
+	for _, test := range tests {
+		if got := test.p.Ortho(); !pointsApproxEqual(got, test.want) {
+			t.Errorf("%v.Ortho() = %v, want %v", test.p, got, test.want)
+		}
+	}
+}
+
+func TestDot(t *testing.T) {
+	tests := []struct {
+		p    Point
+		op   Point
+		want float64
+	}{
+		{Point{0, 0}, Point{0, 0}, 0},
+		{Point{0, 1}, Point{0, 0}, 0},
+		{Point{1, 1}, Point{4, 3}, 7},
+		{Point{-4, 7}, Point{1, 5}, 31},
+	}
+	for _, test := range tests {
+		if got := test.p.Dot(test.op); !float64Eq(got, test.want) {
+			t.Errorf("%v.Dot(%v) = %v, want %v", test.p, test.op, got, test.want)
+		}
+	}
+}
+
+func TestCross(t *testing.T) {
+	tests := []struct {
+		p    Point
+		op   Point
+		want float64
+	}{
+		{Point{0, 0}, Point{0, 0}, 0},
+		{Point{0, 1}, Point{0, 0}, 0},
+		{Point{1, 1}, Point{-1, -1}, 0},
+		{Point{1, 1}, Point{4, 3}, -1},
+		{Point{1, 5}, Point{-2, 3}, 13},
+	}
+
+	for _, test := range tests {
+		if got := test.p.Cross(test.op); !float64Eq(got, test.want) {
+			t.Errorf("%v.Cross(%v) = %v, want %v", test.p, test.op, got, test.want)
+		}
+	}
+}
+
+func TestNorm(t *testing.T) {
+	tests := []struct {
+		p    Point
+		want float64
+	}{
+		{Point{0, 0}, 0},
+		{Point{0, 1}, 1},
+		{Point{-1, 0}, 1},
+		{Point{3, 4}, 5},
+		{Point{3, -4}, 5},
+		{Point{2, 2}, 2 * math.Sqrt(2)},
+		{Point{1, math.Sqrt(3)}, 2},
+		{Point{29, 29 * math.Sqrt(3)}, 29 * 2},
+		{Point{1, 1e15}, 1e15},
+		{Point{1e14, math.MaxFloat32 - 1}, math.MaxFloat32},
+	}
+
+	for _, test := range tests {
+		if !float64Eq(test.p.Norm(), test.want) {
+			t.Errorf("%v.Norm() = %v, want %v", test.p, test.p.Norm(), test.want)
+		}
+	}
+}
+
+func TestNormalize(t *testing.T) {
+	tests := []struct {
+		have Point
+		want Point
+	}{
+		{Point{}, Point{}},
+		{Point{0, 0}, Point{0, 0}},
+		{Point{0, 1}, Point{0, 1}},
+		{Point{-1, 0}, Point{-1, 0}},
+		{Point{3, 4}, Point{0.6, 0.8}},
+		{Point{3, -4}, Point{0.6, -0.8}},
+		{Point{2, 2}, Point{math.Sqrt(2) / 2, math.Sqrt(2) / 2}},
+		{Point{7, 7 * math.Sqrt(3)}, Point{0.5, math.Sqrt(3) / 2}},
+		{Point{1e21, 1e21 * math.Sqrt(3)}, Point{0.5, math.Sqrt(3) / 2}},
+		{Point{1, 1e16}, Point{0, 1}},
+		{Point{1e4, math.MaxFloat32 - 1}, Point{0, 1}},
+	}
+
+	for _, test := range tests {
+		if got := test.have.Normalize(); !pointsApproxEqual(got, test.want) {
+			t.Errorf("%v.Normalize() = %v, want %v", test.have, got, test.want)
+		}
+	}
+
+}
+
+func TestEmptyRect(t *testing.T) {
+	if !empty.IsValid() {
+		t.Errorf("empty Rect should be valid: %v", empty)
+	}
+	if !empty.IsEmpty() {
+		t.Errorf("empty Rect should be empty: %v", empty)
+	}
+}
+
+func TestFromVariousTypes(t *testing.T) {
+	d1 := RectFromPoints(Point{0.1, 0}, Point{0.25, 1})
+	tests := []struct {
+		r1, r2 Rect
+	}{
+		{
+			RectFromCenterSize(Point{0.3, 0.5}, Point{0.2, 0.4}),
+			RectFromPoints(Point{0.2, 0.3}, Point{0.4, 0.7}),
+		},
+		{
+			RectFromCenterSize(Point{1, 0.1}, Point{0, 2}),
+			RectFromPoints(Point{1, -0.9}, Point{1, 1.1}),
+		},
+		{
+			d1,
+			Rect{d1.X, d1.Y},
+		},
+		{
+			RectFromPoints(Point{0.15, 0.3}, Point{0.35, 0.9}),
+			RectFromPoints(Point{0.15, 0.9}, Point{0.35, 0.3}),
+		},
+		{
+			RectFromPoints(Point{0.12, 0}, Point{0.83, 0.5}),
+			RectFromPoints(Point{0.83, 0}, Point{0.12, 0.5}),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.r1.ApproxEquals(test.r2); !got {
+			t.Errorf("%v.ApproxEquals(%v); got %v want true", test.r1, test.r2, got)
+		}
+	}
+}
+
+func TestCenter(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		want Point
+	}{
+		{empty, Point{0.5, 0.5}},
+		{rect, Point{0.25, 0.5}},
+	}
+	for _, test := range tests {
+		if got := test.rect.Center(); got != test.want {
+			t.Errorf("%v.Center(); got %v want %v", test.rect, got, test.want)
+		}
+	}
+}
+
+func TestVertices(t *testing.T) {
+	want := [4]Point{sw, se, ne, nw}
+	got := rect.Vertices()
+	if !reflect.DeepEqual(got, want) {
+		t.Errorf("%v.Vertices(); got %v want %v", rect, got, want)
+	}
+}
+
+func TestContainsPoint(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		p    Point
+		want bool
+	}{
+		{rect, Point{0.2, 0.4}, true},
+		{rect, Point{0.2, 0.8}, false},
+		{rect, Point{-0.1, 0.4}, false},
+		{rect, Point{0.6, 0.1}, false},
+		{rect, Point{rect.X.Lo, rect.Y.Lo}, true},
+		{rect, Point{rect.X.Hi, rect.Y.Hi}, true},
+	}
+	for _, test := range tests {
+		if got := test.rect.ContainsPoint(test.p); got != test.want {
+			t.Errorf("%v.ContainsPoint(%v); got %v want %v", test.rect, test.p, got, test.want)
+		}
+	}
+}
+
+func TestInteriorContainsPoint(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		p    Point
+		want bool
+	}{
+		// Check corners are not contained.
+		{rect, sw, false},
+		{rect, ne, false},
+		// Check a point on the border is not contained.
+		{rect, Point{0, 0.5}, false},
+		{rect, Point{0.25, 0.25}, false},
+		{rect, Point{0.5, 0.5}, false},
+		// Check points inside are contained.
+		{rect, Point{0.125, 0.6}, true},
+	}
+	for _, test := range tests {
+		if got := test.rect.InteriorContainsPoint(test.p); got != test.want {
+			t.Errorf("%v.InteriorContainsPoint(%v); got %v want %v",
+				test.rect, test.p, got, test.want)
+		}
+	}
+}
+
+func TestIntervalOps(t *testing.T) {
+	tests := []struct {
+		r1, r2                                           Rect
+		contains, intContains, intersects, intIntersects bool
+		wantUnion, wantIntersection                      Rect
+	}{
+		{
+			rect, rectMid,
+			true, true, true, true,
+			rect, rectMid,
+		},
+		{
+			rect, rectSW,
+			true, false, true, false,
+			rect, rectSW,
+		},
+		{
+			rect, rectNE,
+			true, false, true, false,
+			rect, rectNE,
+		},
+		{
+			rect,
+			RectFromPoints(Point{0.45, 0.1}, Point{0.75, 0.3}),
+			false, false, true, true,
+			RectFromPoints(Point{0, 0.1}, Point{0.75, 0.75}),
+			RectFromPoints(Point{0.45, 0.25}, Point{0.5, 0.3}),
+		},
+		{
+			rect,
+			RectFromPoints(Point{0.5, 0.1}, Point{0.7, 0.3}),
+			false, false, true, false,
+			RectFromPoints(Point{0, 0.1}, Point{0.7, 0.75}),
+			RectFromPoints(Point{0.5, 0.25}, Point{0.5, 0.3}),
+		},
+		{
+			rect,
+			RectFromPoints(Point{0.45, 0.1}, Point{0.7, 0.25}),
+			false, false, true, false,
+			RectFromPoints(Point{0, 0.1}, Point{0.7, 0.75}),
+			RectFromPoints(Point{0.45, 0.25}, Point{0.5, 0.25}),
+		},
+		{
+			RectFromPoints(Point{0.1, 0.2}, Point{0.1, 0.3}),
+			RectFromPoints(Point{0.15, 0.7}, Point{0.2, 0.8}),
+			false, false, false, false,
+			RectFromPoints(Point{0.1, 0.2}, Point{0.2, 0.8}),
+			EmptyRect(),
+		},
+		// Check that the intersection of two rectangles that overlap in x but not y
+		// is valid, and vice versa.
+		{
+			RectFromPoints(Point{0.1, 0.2}, Point{0.4, 0.5}),
+			RectFromPoints(Point{0, 0}, Point{0.2, 0.1}),
+			false, false, false, false,
+			RectFromPoints(Point{0, 0}, Point{0.4, 0.5}),
+			EmptyRect(),
+		},
+		{
+			RectFromPoints(Point{0, 0}, Point{0.1, 0.3}),
+			RectFromPoints(Point{0.2, 0.1}, Point{0.3, 0.4}),
+			false, false, false, false,
+			RectFromPoints(Point{0, 0}, Point{0.3, 0.4}),
+			EmptyRect(),
+		},
+	}
+	for _, test := range tests {
+		if got := test.r1.Contains(test.r2); got != test.contains {
+			t.Errorf("%v.Contains(%v); got %v want %v",
+				test.r1, test.r2, got, test.contains)
+		}
+
+		if got := test.r1.InteriorContains(test.r2); got != test.intContains {
+			t.Errorf("%v.InteriorContains(%v); got %v want %v",
+				test.r1, test.r2, got, test.contains)
+		}
+
+		if got := test.r1.Intersects(test.r2); got != test.intersects {
+			t.Errorf("%v.Intersects(%v); got %v want %v",
+				test.r1, test.r2, got, test.intersects)
+		}
+
+		if got := test.r1.InteriorIntersects(test.r2); got != test.intIntersects {
+			t.Errorf("%v.InteriorIntersects(%v); got %v want %v",
+				test.r1, test.r2, got, test.intIntersects)
+		}
+
+		tCon := test.r1.Contains(test.r2)
+		if got := test.r1.Union(test.r2).ApproxEquals(test.r1); got != tCon {
+			t.Errorf("%v.Union(%v) == %v.Contains(%v); got %v want %v",
+				test.r1, test.r2, test.r1, test.r2, got, tCon)
+		}
+
+		tInter := test.r1.Intersects(test.r2)
+		if got := !test.r1.Intersection(test.r2).IsEmpty(); got != tInter {
+			t.Errorf("%v.Intersection(%v).IsEmpty() == %v.Intersects(%v); got %v want %v",
+				test.r1, test.r2, test.r1, test.r2, got, tInter)
+		}
+
+		if got := test.r1.Union(test.r2); got != test.wantUnion {
+			t.Errorf("%v.Union(%v); got %v want %v",
+				test.r1, test.r2, got, test.wantUnion)
+		}
+
+		if got := test.r1.Intersection(test.r2); got != test.wantIntersection {
+			t.Errorf("%v.Intersection(%v); got %v want %v",
+				test.r1, test.r2, got, test.wantIntersection)
+		}
+
+		r := test.r1.AddRect(test.r2)
+
+		if r != test.wantUnion {
+			t.Errorf("%v.AddRect(%v); got %v want %v", test.r1, test.r2, r, test.wantUnion)
+		}
+	}
+}
+
+func TestAddPoint(t *testing.T) {
+	r1 := rect
+	r2 := EmptyRect()
+
+	r2 = r2.AddPoint(sw)
+	r2 = r2.AddPoint(se)
+	r2 = r2.AddPoint(nw)
+	r2 = r2.AddPoint(Point{0.1, 0.4})
+
+	if !r1.ApproxEquals(r2) {
+		t.Errorf("%v.AddPoint(%v); got false want true", r1, r2)
+	}
+}
+
+func TestClampPoint(t *testing.T) {
+	r := Rect{r1.Interval{Lo: 0, Hi: 0.5}, r1.Interval{Lo: 0.25, Hi: 0.75}}
+	tests := []struct {
+		p    Point
+		want Point
+	}{
+		{Point{-0.01, 0.24}, Point{0, 0.25}},
+		{Point{-5.0, 0.48}, Point{0, 0.48}},
+		{Point{-5.0, 2.48}, Point{0, 0.75}},
+		{Point{0.19, 2.48}, Point{0.19, 0.75}},
+
+		{Point{6.19, 2.48}, Point{0.5, 0.75}},
+		{Point{6.19, 0.53}, Point{0.5, 0.53}},
+		{Point{6.19, -2.53}, Point{0.5, 0.25}},
+		{Point{0.33, -2.53}, Point{0.33, 0.25}},
+		{Point{0.33, 0.37}, Point{0.33, 0.37}},
+	}
+	for _, test := range tests {
+		if got := r.ClampPoint(test.p); got != test.want {
+			t.Errorf("%v.ClampPoint(%v); got %v want %v", r, test.p, got, test.want)
+		}
+	}
+}
+
+func TestExpandedEmpty(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		p    Point
+	}{
+		{
+			EmptyRect(),
+			Point{0.1, 0.3},
+		},
+		{
+			EmptyRect(),
+			Point{-0.1, -0.3},
+		},
+		{
+			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
+			Point{-0.1, 0.3},
+		},
+		{
+			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
+			Point{0.1, -0.2},
+		},
+	}
+	for _, test := range tests {
+		if got := test.rect.Expanded(test.p); !got.IsEmpty() {
+			t.Errorf("%v.Expanded(%v); got %v want true", test.rect, test.p, got.IsEmpty())
+		}
+	}
+}
+
+func TestExpandedEquals(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		p    Point
+		want Rect
+	}{
+		{
+			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
+			Point{0.1, 0.3},
+			RectFromPoints(Point{0.1, 0.1}, Point{0.4, 1.0}),
+		},
+		{
+			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
+			Point{0.1, -0.1},
+			RectFromPoints(Point{0.1, 0.5}, Point{0.4, 0.6}),
+		},
+		{
+			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
+			Point{0.1, 0.1},
+			RectFromPoints(Point{0.1, 0.3}, Point{0.4, 0.8}),
+		},
+	}
+	for _, test := range tests {
+		if got := test.rect.Expanded(test.p); !got.ApproxEquals(test.want) {
+			t.Errorf("%v.Expanded(%v); got %v want %v", test.rect, test.p, got, test.want)
+		}
+	}
+}

+ 477 - 0
vendor/github.com/golang/geo/r3/precisevector_test.go

@@ -0,0 +1,477 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r3
+
+import (
+	"math/big"
+	"testing"
+)
+
+// preciseEq compares two big.Floats and checks if the are the same.
+func preciseEq(a, b *big.Float) bool {
+	return a.SetPrec(prec).Cmp(b.SetPrec(prec)) == 0
+}
+
+func TestPreciseRoundtrip(t *testing.T) {
+	tests := []struct {
+		v Vector
+	}{
+		{Vector{0, 0, 0}},
+		{Vector{1, 2, 3}},
+		{Vector{3, -4, 12}},
+		{Vector{1, 1e-16, 1e-32}},
+	}
+
+	for _, test := range tests {
+		if got := PreciseVectorFromVector(test.v).Vector(); !got.ApproxEqual(test.v) {
+			t.Errorf("PreciseVectorFromVector(%v).Vector() = %v, want %v", test.v, got, test.v)
+		}
+	}
+}
+
+func TestPreciseIsUnit(t *testing.T) {
+	const epsilon = 1e-14
+	tests := []struct {
+		v    PreciseVector
+		want bool
+	}{
+		{
+			v:    NewPreciseVector(0, 0, 0),
+			want: false,
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			want: true,
+		},
+		{
+			v:    NewPreciseVector(0, 1, 0),
+			want: true,
+		},
+		{
+			v:    NewPreciseVector(0, 0, 1),
+			want: true,
+		},
+		{
+			v:    NewPreciseVector(1+2*epsilon, 0, 0),
+			want: false,
+		},
+		{
+			v:    NewPreciseVector(0*(1+epsilon), 0, 0),
+			want: false,
+		},
+		{
+			v:    NewPreciseVector(1, 1, 1),
+			want: false,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v.IsUnit(); got != test.want {
+			t.Errorf("%v.IsUnit() = %v, want %v", test.v, got, test.want)
+		}
+	}
+}
+
+func TestPreciseNorm2(t *testing.T) {
+	tests := []struct {
+		v    PreciseVector
+		want *big.Float
+	}{
+		{
+			v:    NewPreciseVector(0, 0, 0),
+			want: precise0,
+		},
+		{
+			v:    NewPreciseVector(0, 1, 0),
+			want: precise1,
+		},
+		{
+			v:    NewPreciseVector(1, 1, 1),
+			want: precStr("3"),
+		},
+		{
+			v:    NewPreciseVector(1, 2, 3),
+			want: precStr("14"),
+		},
+		{
+			v:    NewPreciseVector(3, -4, 12),
+			want: precStr("169"),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v.Norm2(); !preciseEq(got, test.want) {
+			t.Errorf("%v.Norm2() = %v, want %v", test.v, test.v.Norm2(), test.want)
+		}
+	}
+}
+
+func TestPreciseAdd(t *testing.T) {
+	tests := []struct {
+		v1, v2, want PreciseVector
+	}{
+		{
+			v1:   NewPreciseVector(0, 0, 0),
+			v2:   NewPreciseVector(0, 0, 0),
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(0, 0, 0),
+			want: NewPreciseVector(1, 0, 0),
+		},
+		{
+			v1:   NewPreciseVector(1, 2, 3),
+			v2:   NewPreciseVector(4, 5, 7),
+			want: NewPreciseVector(5, 7, 10),
+		},
+		{
+			v1:   NewPreciseVector(1, -3, 5),
+			v2:   NewPreciseVector(1, -6, -6),
+			want: NewPreciseVector(2, -9, -1),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v1.Add(test.v2); !got.Equals(test.want) {
+			t.Errorf("%v + %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestPreciseSub(t *testing.T) {
+	tests := []struct {
+		v1, v2, want PreciseVector
+	}{
+		{
+			v1:   NewPreciseVector(0, 0, 0),
+			v2:   NewPreciseVector(0, 0, 0),
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(0, 0, 0),
+			want: NewPreciseVector(1, 0, 0),
+		},
+		{
+			v1:   NewPreciseVector(1, 2, 3),
+			v2:   NewPreciseVector(4, 5, 7),
+			want: NewPreciseVector(-3, -3, -4),
+		},
+		{
+			v1:   NewPreciseVector(1, -3, 5),
+			v2:   NewPreciseVector(1, -6, -6),
+			want: NewPreciseVector(0, 3, 11),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v1.Sub(test.v2); !got.Equals(test.want) {
+			t.Errorf("%v - %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestPreciseMul(t *testing.T) {
+	tests := []struct {
+		v    PreciseVector
+		f    *big.Float
+		want PreciseVector
+	}{
+		{
+			v:    NewPreciseVector(0, 0, 0),
+			f:    precFloat(3),
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    precFloat(1),
+			want: NewPreciseVector(1, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    precFloat(0),
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    precFloat(3),
+			want: NewPreciseVector(3, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, -3, 5),
+			f:    precFloat(-1),
+			want: NewPreciseVector(-1, 3, -5),
+		},
+		{
+			v:    NewPreciseVector(1, -3, 5),
+			f:    precFloat(2),
+			want: NewPreciseVector(2, -6, 10),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v.Mul(test.f); !got.Equals(test.want) {
+			t.Errorf("%v.Mul(%v) = %v, want %v", test.v, test.f, got, test.want)
+		}
+	}
+}
+
+func TestPreciseMulByFloat64(t *testing.T) {
+	tests := []struct {
+		v    PreciseVector
+		f    float64
+		want PreciseVector
+	}{
+		{
+			v:    NewPreciseVector(0, 0, 0),
+			f:    3,
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    1,
+			want: NewPreciseVector(1, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    0,
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, 0, 0),
+			f:    3,
+			want: NewPreciseVector(3, 0, 0),
+		},
+		{
+			v:    NewPreciseVector(1, -3, 5),
+			f:    -1,
+			want: NewPreciseVector(-1, 3, -5),
+		},
+		{
+			v:    NewPreciseVector(1, -3, 5),
+			f:    2,
+			want: NewPreciseVector(2, -6, 10),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v.MulByFloat64(test.f); !got.Equals(test.want) {
+			t.Errorf("%v.MulByFloat64(%v) = %v, want %v", test.v, test.f, got, test.want)
+		}
+	}
+}
+
+func TestPreciseDot(t *testing.T) {
+	tests := []struct {
+		v1, v2 PreciseVector
+		want   *big.Float
+	}{
+		{
+			// Dot with self should be 1.
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(1, 0, 0),
+			want: precise1,
+		},
+		{
+			// Dot with self should be 1.
+			v1:   NewPreciseVector(0, 1, 0),
+			v2:   NewPreciseVector(0, 1, 0),
+			want: precise1,
+		},
+		{
+			// Dot with self should be 1.
+			v1:   NewPreciseVector(0, 0, 1),
+			v2:   NewPreciseVector(0, 0, 1),
+			want: precise1,
+		},
+		{
+			// Perpendicular should be 0.
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(0, 1, 0),
+			want: precise0,
+		},
+		{
+			// Perpendicular should be 0.
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(0, 1, 1),
+			want: precise0,
+		},
+		{
+			v1:   NewPreciseVector(1, 1, 1),
+			v2:   NewPreciseVector(-1, -1, -1),
+			want: precStr("-3"),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v1.Dot(test.v2); !preciseEq(got, test.want) {
+			t.Errorf("%v · %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+		if got := test.v2.Dot(test.v1); !preciseEq(got, test.want) {
+			t.Errorf("%v · %v = %v, want %v", test.v2, test.v1, got, test.want)
+		}
+	}
+}
+
+func TestPreciseCross(t *testing.T) {
+	tests := []struct {
+		v1, v2, want PreciseVector
+	}{
+		{
+			// Cross with self should be 0.
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(1, 0, 0),
+			want: NewPreciseVector(0, 0, 0),
+		},
+		{
+			// Cross with perpendicular should give the remaining axis.
+			v1:   NewPreciseVector(1, 0, 0),
+			v2:   NewPreciseVector(0, 1, 0),
+			want: NewPreciseVector(0, 0, 1),
+		},
+		{
+			// Cross with perpendicular should give the remaining axis.
+			v1:   NewPreciseVector(0, 1, 0),
+			v2:   NewPreciseVector(0, 0, 1),
+			want: NewPreciseVector(1, 0, 0),
+		},
+		{
+			// Cross with perpendicular should give the remaining axis.
+			v1:   NewPreciseVector(0, 0, 1),
+			v2:   NewPreciseVector(1, 0, 0),
+			want: NewPreciseVector(0, 1, 0),
+		},
+		{
+			v1:   NewPreciseVector(0, 1, 0),
+			v2:   NewPreciseVector(1, 0, 0),
+			want: NewPreciseVector(0, 0, -1),
+		},
+		{
+			v1:   NewPreciseVector(1, 2, 3),
+			v2:   NewPreciseVector(-4, 5, -6),
+			want: NewPreciseVector(-27, -6, 13),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v1.Cross(test.v2); !got.Equals(test.want) {
+			t.Errorf("%v ⨯ %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestPreciseIdentities(t *testing.T) {
+	tests := []struct {
+		v1, v2 PreciseVector
+	}{
+		{
+			v1: NewPreciseVector(0, 0, 0),
+			v2: NewPreciseVector(0, 0, 0),
+		},
+		{
+			v1: NewPreciseVector(0, 0, 0),
+			v2: NewPreciseVector(0, 1, 2),
+		},
+		{
+			v1: NewPreciseVector(1, 0, 0),
+			v2: NewPreciseVector(0, 1, 0),
+		},
+		{
+			v1: NewPreciseVector(1, 0, 0),
+			v2: NewPreciseVector(0, 1, 1),
+		},
+		{
+			v1: NewPreciseVector(1, 1, 1),
+			v2: NewPreciseVector(-1, -1, -1),
+		},
+		{
+			v1: NewPreciseVector(1, 2, 2),
+			v2: NewPreciseVector(-0.3, 0.4, -1.2),
+		},
+	}
+
+	for _, test := range tests {
+		c1 := test.v1.Cross(test.v2)
+		c2 := test.v2.Cross(test.v1)
+		d1 := test.v1.Dot(test.v2)
+		d2 := test.v2.Dot(test.v1)
+
+		// Dot commutes
+		if !preciseEq(d1, d2) {
+			t.Errorf("%v = %v · %v != %v · %v = %v", d1, test.v1, test.v2, test.v2, test.v1, d2)
+		}
+		// Cross anti-commutes
+		if !c1.Equals(c2.MulByFloat64(-1.0)) {
+			t.Errorf("%v = %v ⨯ %v != -(%v ⨯ %v) = -%v", c1, test.v1, test.v2, test.v2, test.v1, c2)
+		}
+		// Cross is orthogonal to original vectors
+		if got := test.v1.Dot(c1); !preciseEq(got, precise0) {
+			t.Errorf("%v · (%v ⨯ %v) = %v, want %v", test.v1, test.v1, test.v2, got, precise0)
+		}
+		if got := test.v2.Dot(c1); !preciseEq(got, precise0) {
+			t.Errorf("%v · (%v ⨯ %v) = %v, want %v", test.v2, test.v1, test.v2, got, precise0)
+		}
+	}
+}
+
+func TestPreciseLargestSmallestComponents(t *testing.T) {
+	tests := []struct {
+		v                 PreciseVector
+		largest, smallest Axis
+	}{
+		{
+			v:        NewPreciseVector(0, 0, 0),
+			largest:  ZAxis,
+			smallest: ZAxis,
+		},
+		{
+			v:        NewPreciseVector(1, 0, 0),
+			largest:  XAxis,
+			smallest: ZAxis,
+		},
+		{
+			v:        NewPreciseVector(1, -1, 0),
+			largest:  YAxis,
+			smallest: ZAxis,
+		},
+		{
+			v:        NewPreciseVector(-1, -1.1, -1.1),
+			largest:  ZAxis,
+			smallest: XAxis,
+		},
+		{
+			v:        NewPreciseVector(0.5, -0.4, -0.5),
+			largest:  ZAxis,
+			smallest: YAxis,
+		},
+		{
+			v:        NewPreciseVector(1e-15, 1e-14, 1e-13),
+			largest:  ZAxis,
+			smallest: XAxis,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.v.LargestComponent(); got != test.largest {
+			t.Errorf("%v.LargestComponent() = %v, want %v", test.v, got, test.largest)
+		}
+		if got := test.v.SmallestComponent(); got != test.smallest {
+			t.Errorf("%v.SmallestComponent() = %v, want %v", test.v, got, test.smallest)
+		}
+	}
+}

+ 339 - 0
vendor/github.com/golang/geo/r3/vector_test.go

@@ -0,0 +1,339 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r3
+
+import (
+	"math"
+	"testing"
+)
+
+func float64Eq(x, y float64) bool { return math.Abs(x-y) < 1e-14 }
+
+func TestVectorNorm(t *testing.T) {
+	tests := []struct {
+		v    Vector
+		want float64
+	}{
+		{Vector{0, 0, 0}, 0},
+		{Vector{0, 1, 0}, 1},
+		{Vector{3, -4, 12}, 13},
+		{Vector{1, 1e-16, 1e-32}, 1},
+	}
+	for _, test := range tests {
+		if !float64Eq(test.v.Norm(), test.want) {
+			t.Errorf("%v.Norm() = %v, want %v", test.v, test.v.Norm(), test.want)
+		}
+	}
+}
+
+func TestVectorNorm2(t *testing.T) {
+	tests := []struct {
+		v    Vector
+		want float64
+	}{
+		{Vector{0, 0, 0}, 0},
+		{Vector{0, 1, 0}, 1},
+		{Vector{1, 1, 1}, 3},
+		{Vector{1, 2, 3}, 14},
+		{Vector{3, -4, 12}, 169},
+		{Vector{1, 1e-16, 1e-32}, 1},
+	}
+	for _, test := range tests {
+		if !float64Eq(test.v.Norm2(), test.want) {
+			t.Errorf("%v.Norm2() = %v, want %v", test.v, test.v.Norm2(), test.want)
+		}
+	}
+}
+
+func TestVectorNormalize(t *testing.T) {
+	vectors := []Vector{
+		{1, 0, 0},
+		{0, 1, 0},
+		{0, 0, 1},
+		{1, 1, 1},
+		{1, 1e-16, 1e-32},
+		{12.34, 56.78, 91.01},
+	}
+	for _, v := range vectors {
+		nv := v.Normalize()
+		if !float64Eq(v.X*nv.Y, v.Y*nv.X) || !float64Eq(v.X*nv.Z, v.Z*nv.X) {
+			t.Errorf("%v.Normalize() did not preserve direction", v)
+		}
+		if !float64Eq(nv.Norm(), 1.0) {
+			t.Errorf("|%v| = %v, want 1", v, v.Norm())
+		}
+	}
+}
+
+func TestVectorIsUnit(t *testing.T) {
+	const epsilon = 1e-14
+	tests := []struct {
+		v    Vector
+		want bool
+	}{
+		{Vector{0, 0, 0}, false},
+		{Vector{0, 1, 0}, true},
+		{Vector{1 + 2*epsilon, 0, 0}, true},
+		{Vector{1 * (1 + epsilon), 0, 0}, true},
+		{Vector{1, 1, 1}, false},
+		{Vector{1, 1e-16, 1e-32}, true},
+	}
+	for _, test := range tests {
+		if got := test.v.IsUnit(); got != test.want {
+			t.Errorf("%v.IsUnit() = %v, want %v", test.v, got, test.want)
+		}
+	}
+}
+func TestVectorDot(t *testing.T) {
+	tests := []struct {
+		v1, v2 Vector
+		want   float64
+	}{
+		{Vector{1, 0, 0}, Vector{1, 0, 0}, 1},
+		{Vector{1, 0, 0}, Vector{0, 1, 0}, 0},
+		{Vector{1, 0, 0}, Vector{0, 1, 1}, 0},
+		{Vector{1, 1, 1}, Vector{-1, -1, -1}, -3},
+		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}, -1.9},
+	}
+	for _, test := range tests {
+		v1 := Vector{test.v1.X, test.v1.Y, test.v1.Z}
+		v2 := Vector{test.v2.X, test.v2.Y, test.v2.Z}
+		if !float64Eq(v1.Dot(v2), test.want) {
+			t.Errorf("%v · %v = %v, want %v", v1, v2, v1.Dot(v2), test.want)
+		}
+		if !float64Eq(v2.Dot(v1), test.want) {
+			t.Errorf("%v · %v = %v, want %v", v2, v1, v2.Dot(v1), test.want)
+		}
+	}
+}
+
+func TestVectorCross(t *testing.T) {
+	tests := []struct {
+		v1, v2, want Vector
+	}{
+		{Vector{1, 0, 0}, Vector{1, 0, 0}, Vector{0, 0, 0}},
+		{Vector{1, 0, 0}, Vector{0, 1, 0}, Vector{0, 0, 1}},
+		{Vector{0, 1, 0}, Vector{1, 0, 0}, Vector{0, 0, -1}},
+		{Vector{1, 2, 3}, Vector{-4, 5, -6}, Vector{-27, -6, 13}},
+	}
+	for _, test := range tests {
+		if got := test.v1.Cross(test.v2); !got.ApproxEqual(test.want) {
+			t.Errorf("%v ⨯ %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestVectorAdd(t *testing.T) {
+	tests := []struct {
+		v1, v2, want Vector
+	}{
+		{Vector{0, 0, 0}, Vector{0, 0, 0}, Vector{0, 0, 0}},
+		{Vector{1, 0, 0}, Vector{0, 0, 0}, Vector{1, 0, 0}},
+		{Vector{1, 2, 3}, Vector{4, 5, 7}, Vector{5, 7, 10}},
+		{Vector{1, -3, 5}, Vector{1, -6, -6}, Vector{2, -9, -1}},
+	}
+	for _, test := range tests {
+		if got := test.v1.Add(test.v2); !got.ApproxEqual(test.want) {
+			t.Errorf("%v + %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestVectorSub(t *testing.T) {
+	tests := []struct {
+		v1, v2, want Vector
+	}{
+		{Vector{0, 0, 0}, Vector{0, 0, 0}, Vector{0, 0, 0}},
+		{Vector{1, 0, 0}, Vector{0, 0, 0}, Vector{1, 0, 0}},
+		{Vector{1, 2, 3}, Vector{4, 5, 7}, Vector{-3, -3, -4}},
+		{Vector{1, -3, 5}, Vector{1, -6, -6}, Vector{0, 3, 11}},
+	}
+	for _, test := range tests {
+		if got := test.v1.Sub(test.v2); !got.ApproxEqual(test.want) {
+			t.Errorf("%v - %v = %v, want %v", test.v1, test.v2, got, test.want)
+		}
+	}
+}
+
+func TestVectorDistance(t *testing.T) {
+	tests := []struct {
+		v1, v2 Vector
+		want   float64
+	}{
+		{Vector{1, 0, 0}, Vector{1, 0, 0}, 0},
+		{Vector{1, 0, 0}, Vector{0, 1, 0}, 1.41421356237310},
+		{Vector{1, 0, 0}, Vector{0, 1, 1}, 1.73205080756888},
+		{Vector{1, 1, 1}, Vector{-1, -1, -1}, 3.46410161513775},
+		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}, 3.80657326213486},
+	}
+	for _, test := range tests {
+		v1 := Vector{test.v1.X, test.v1.Y, test.v1.Z}
+		v2 := Vector{test.v2.X, test.v2.Y, test.v2.Z}
+		if got, want := v1.Distance(v2), test.want; !float64Eq(got, want) {
+			t.Errorf("%v.Distance(%v) = %v, want %v", v1, v2, got, want)
+		}
+		if got, want := v2.Distance(v1), test.want; !float64Eq(got, want) {
+			t.Errorf("%v.Distance(%v) = %v, want %v", v2, v1, got, want)
+		}
+	}
+}
+
+func TestVectorMul(t *testing.T) {
+	tests := []struct {
+		v    Vector
+		m    float64
+		want Vector
+	}{
+		{Vector{0, 0, 0}, 3, Vector{0, 0, 0}},
+		{Vector{1, 0, 0}, 1, Vector{1, 0, 0}},
+		{Vector{1, 0, 0}, 0, Vector{0, 0, 0}},
+		{Vector{1, 0, 0}, 3, Vector{3, 0, 0}},
+		{Vector{1, -3, 5}, -1, Vector{-1, 3, -5}},
+		{Vector{1, -3, 5}, 2, Vector{2, -6, 10}},
+	}
+	for _, test := range tests {
+		if !test.v.Mul(test.m).ApproxEqual(test.want) {
+			t.Errorf("%v%v = %v, want %v", test.m, test.v, test.v.Mul(test.m), test.want)
+		}
+	}
+}
+
+func TestVectorAngle(t *testing.T) {
+	tests := []struct {
+		v1, v2 Vector
+		want   float64 // radians
+	}{
+		{Vector{1, 0, 0}, Vector{1, 0, 0}, 0},
+		{Vector{1, 0, 0}, Vector{0, 1, 0}, math.Pi / 2},
+		{Vector{1, 0, 0}, Vector{0, 1, 1}, math.Pi / 2},
+		{Vector{1, 0, 0}, Vector{-1, 0, 0}, math.Pi},
+		{Vector{1, 2, 3}, Vector{2, 3, -1}, 1.2055891055045298},
+	}
+	for _, test := range tests {
+		if a := test.v1.Angle(test.v2).Radians(); !float64Eq(a, test.want) {
+			t.Errorf("%v.Angle(%v) = %v, want %v", test.v1, test.v2, a, test.want)
+		}
+		if a := test.v2.Angle(test.v1).Radians(); !float64Eq(a, test.want) {
+			t.Errorf("%v.Angle(%v) = %v, want %v", test.v2, test.v1, a, test.want)
+		}
+	}
+}
+
+func TestVectorOrtho(t *testing.T) {
+	vectors := []Vector{
+		{1, 0, 0},
+		{1, 1, 0},
+		{1, 2, 3},
+		{1, -2, -5},
+		{0.012, 0.0053, 0.00457},
+		{-0.012, -1, -0.00457},
+	}
+	for _, v := range vectors {
+		if !float64Eq(v.Dot(v.Ortho()), 0) {
+			t.Errorf("%v = not orthogonal to %v.Ortho()", v, v)
+		}
+		if !float64Eq(v.Ortho().Norm(), 1) {
+			t.Errorf("|%v.Ortho()| = %v, want 1", v, v.Ortho().Norm())
+		}
+	}
+}
+
+func TestVectorIdentities(t *testing.T) {
+	tests := []struct {
+		v1, v2 Vector
+	}{
+		{Vector{0, 0, 0}, Vector{0, 0, 0}},
+		{Vector{0, 0, 0}, Vector{0, 1, 2}},
+		{Vector{1, 0, 0}, Vector{0, 1, 0}},
+		{Vector{1, 0, 0}, Vector{0, 1, 1}},
+		{Vector{1, 1, 1}, Vector{-1, -1, -1}},
+		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}},
+	}
+	for _, test := range tests {
+		a1 := test.v1.Angle(test.v2).Radians()
+		a2 := test.v2.Angle(test.v1).Radians()
+		c1 := test.v1.Cross(test.v2)
+		c2 := test.v2.Cross(test.v1)
+		d1 := test.v1.Dot(test.v2)
+		d2 := test.v2.Dot(test.v1)
+		// Angle commutes
+		if !float64Eq(a1, a2) {
+			t.Errorf("%v = %v.Angle(%v) != %v.Angle(%v) = %v", a1, test.v1, test.v2, test.v2, test.v1, a2)
+		}
+		// Dot commutes
+		if !float64Eq(d1, d2) {
+			t.Errorf("%v = %v · %v != %v · %v = %v", d1, test.v1, test.v2, test.v2, test.v1, d2)
+		}
+		// Cross anti-commutes
+		if !c1.ApproxEqual(c2.Mul(-1.0)) {
+			t.Errorf("%v = %v ⨯ %v != -(%v ⨯ %v) = -%v", c1, test.v1, test.v2, test.v2, test.v1, c2)
+		}
+		// Cross is orthogonal to original vectors
+		if !float64Eq(test.v1.Dot(c1), 0.0) {
+			t.Errorf("%v · (%v ⨯ %v) = %v != 0", test.v1, test.v1, test.v2, test.v1.Dot(c1))
+		}
+		if !float64Eq(test.v2.Dot(c1), 0.0) {
+			t.Errorf("%v · (%v ⨯ %v) = %v != 0", test.v2, test.v1, test.v2, test.v2.Dot(c1))
+		}
+	}
+}
+
+func TestVectorLargestSmallestComponents(t *testing.T) {
+	tests := []struct {
+		v                 Vector
+		largest, smallest Axis
+	}{
+		{Vector{0, 0, 0}, ZAxis, ZAxis},
+		{Vector{1, 0, 0}, XAxis, ZAxis},
+		{Vector{1, -1, 0}, YAxis, ZAxis},
+		{Vector{-1, -1.1, -1.1}, ZAxis, XAxis},
+		{Vector{0.5, -0.4, -0.5}, ZAxis, YAxis},
+		{Vector{1e-15, 1e-14, 1e-13}, ZAxis, XAxis},
+	}
+
+	for _, test := range tests {
+		if got := test.v.LargestComponent(); got != test.largest {
+			t.Errorf("%v.LargestComponent() = %v, want %v", test.v, got, test.largest)
+		}
+		if got := test.v.SmallestComponent(); got != test.smallest {
+			t.Errorf("%v.SmallestComponent() = %v, want %v", test.v, got, test.smallest)
+		}
+	}
+}
+
+func TestVectorCmp(t *testing.T) {
+	tests := []struct {
+		a, b Vector
+		want int
+	}{
+		{Vector{0, 0, 0}, Vector{0, 0, 0}, 0},
+		{Vector{0, 0, 0}, Vector{1, 0, 0}, -1},
+		{Vector{0, 1, 0}, Vector{0, 0, 0}, 1},
+		{Vector{1, 2, 3}, Vector{3, 2, 1}, -1},
+		{Vector{-1, 0, 0}, Vector{0, 0, -1}, -1},
+		{Vector{8, 6, 4}, Vector{7, 5, 3}, 1},
+		{Vector{-1, -0.5, 0}, Vector{0, 0, 0.1}, -1},
+		{Vector{1, 2, 3}, Vector{2, 3, 4}, -1},
+		{Vector{1.23, 4.56, 7.89}, Vector{1.23, 4.56, 7.89}, 0},
+	}
+
+	for _, test := range tests {
+		if got := test.a.Cmp(test.b); got != test.want {
+			t.Errorf("%v.Cmp(%v) = %d, want %d", test.a, test.b, got, test.want)
+		}
+	}
+}

+ 169 - 0
vendor/github.com/golang/geo/s1/angle_test.go

@@ -0,0 +1,169 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+	"math"
+	"testing"
+)
+
+// float64Eq reports whether the two values are within the default epsilon.
+func float64Eq(x, y float64) bool {
+	return float64Near(x, y, epsilon)
+}
+
+// float64Near reports whether the two values are within the specified epsilon.
+func float64Near(x, y, eps float64) bool {
+	return math.Abs(x-y) <= eps
+}
+
+func TestEmptyValue(t *testing.T) {
+	var a Angle
+	if rad := a.Radians(); rad != 0 {
+		t.Errorf("Empty value of Angle was %v, want 0", rad)
+	}
+}
+
+func TestPiRadiansExactly180Degrees(t *testing.T) {
+	if rad := (math.Pi * Radian).Radians(); rad != math.Pi {
+		t.Errorf("(π * Radian).Radians() was %v, want π", rad)
+	}
+	if deg := (math.Pi * Radian).Degrees(); deg != 180 {
+		t.Errorf("(π * Radian).Degrees() was %v, want 180", deg)
+	}
+	if rad := (180 * Degree).Radians(); rad != math.Pi {
+		t.Errorf("(180 * Degree).Radians() was %v, want π", rad)
+	}
+	if deg := (180 * Degree).Degrees(); deg != 180 {
+		t.Errorf("(180 * Degree).Degrees() was %v, want 180", deg)
+	}
+
+	if deg := (math.Pi / 2 * Radian).Degrees(); deg != 90 {
+		t.Errorf("(π/2 * Radian).Degrees() was %v, want 90", deg)
+	}
+
+	// Check negative angles.
+	if deg := (-math.Pi / 2 * Radian).Degrees(); deg != -90 {
+		t.Errorf("(-π/2 * Radian).Degrees() was %v, want -90", deg)
+	}
+	if rad := (-45 * Degree).Radians(); rad != -math.Pi/4 {
+		t.Errorf("(-45 * Degree).Radians() was %v, want -π/4", rad)
+	}
+}
+
+func TestE5E6E7Representation(t *testing.T) {
+	// NOTE(dsymonds): This first test gives a variance in the 16th decimal place. I should track that down.
+	exp, act := (-45 * Degree).Radians(), (-4500000 * E5).Radians()
+	if math.Abs(exp-act) > 1e-15 {
+		t.Errorf("(-4500000 * E5).Radians() was %v, want %v", act, exp)
+	}
+	if exp, act := (-60 * Degree).Radians(), (-60000000 * E6).Radians(); exp != act {
+		t.Errorf("(-60000000 * E6).Radians() was %v, want %v", act, exp)
+	}
+	if exp, act := (75 * Degree).Radians(), (750000000 * E7).Radians(); exp != act {
+		t.Errorf("(-750000000 * E7).Radians() was %v, want %v", act, exp)
+	}
+
+	if exp, act := int32(-17256123), (-172.56123 * Degree).E5(); exp != act {
+		t.Errorf("(-172.56123°).E5() was %v, want %v", act, exp)
+	}
+	if exp, act := int32(12345678), (12.345678 * Degree).E6(); exp != act {
+		t.Errorf("(12.345678°).E6() was %v, want %v", act, exp)
+	}
+	if exp, act := int32(-123456789), (-12.3456789 * Degree).E7(); exp != act {
+		t.Errorf("(-12.3456789°).E7() was %v, want %v", act, exp)
+	}
+
+	roundingTests := []struct {
+		have Angle
+		want int32
+	}{
+		{0.500000001, 1},
+		{-0.500000001, -1},
+		{0.499999999, 0},
+		{-0.499999999, 0},
+	}
+	for _, test := range roundingTests {
+		if act := (test.have * 1e-5 * Degree).E5(); test.want != act {
+			t.Errorf("(%v°).E5() was %v, want %v", test.have, act, test.want)
+		}
+		if act := (test.have * 1e-6 * Degree).E6(); test.want != act {
+			t.Errorf("(%v°).E6() was %v, want %v", test.have, act, test.want)
+		}
+		if act := (test.have * 1e-7 * Degree).E7(); test.want != act {
+			t.Errorf("(%v°).E7() was %v, want %v", test.have, act, test.want)
+		}
+	}
+}
+
+func TestNormalizeCorrectlyCanonicalizesAngles(t *testing.T) {
+	tests := []struct {
+		in, want float64 // both in degrees
+	}{
+		{360, 0},
+		{-180, 180},
+		{180, 180},
+		{540, 180},
+		{-270, 90},
+	}
+	for _, test := range tests {
+		deg := (Angle(test.in) * Degree).Normalized().Degrees()
+		if deg != test.want {
+			t.Errorf("Normalized %.0f° = %v, want %v", test.in, deg, test.want)
+		}
+	}
+}
+
+func TestAngleString(t *testing.T) {
+	if s, exp := (180 * Degree).String(), "180.0000000"; s != exp {
+		t.Errorf("(180°).String() = %q, want %q", s, exp)
+	}
+}
+
+func TestDegreesVsRadians(t *testing.T) {
+	// This test tests the exactness of specific values between degrees and radians.
+	for k := -8; k <= 8; k++ {
+		if got, want := Angle(45*k)*Degree, Angle((float64(k)*math.Pi)/4)*Radian; got != want {
+			t.Errorf("45°*%d != (%d*π)/4 radians (%f vs %f)", k, k, got, want)
+		}
+
+		if got, want := (Angle(45*k) * Degree).Degrees(), float64(45*k); got != want {
+			t.Errorf("Angle(45°*%d).Degrees() != 45*%d, (%f vs %f)", k, k, got, want)
+		}
+	}
+
+	for k := uint64(0); k < 30; k++ {
+		m := 1 << k
+		n := float64(m)
+		for _, test := range []struct{ deg, rad float64 }{
+			{180, 1},
+			{60, 3},
+			{36, 5},
+			{20, 9},
+			{4, 45},
+		} {
+			if got, want := Angle(test.deg/n)*Degree, Angle(math.Pi/(test.rad*n))*Radian; got != want {
+				t.Errorf("%v°/%d != π/%v*%d rad (%f vs %f)", test.deg, m, test.rad, m, got, want)
+			}
+		}
+	}
+
+	// We also spot check a non-identity.
+	if got := (60 * Degree).Degrees(); float64Eq(got, 60) {
+		t.Errorf("Angle(60).Degrees() == 60, but should not (%f vs %f)", got, 60.0)
+	}
+}

+ 9 - 3
vendor/github.com/golang/geo/s1/chordangle.go

@@ -164,8 +164,8 @@ func (c ChordAngle) Add(other ChordAngle) ChordAngle {
 	return ChordAngle(math.Min(4.0, x+y+2*math.Sqrt(x*y)))
 }
 
-// Sub subtracts the other ChordAngle from this one and returns the resulting value.
-// This method assumes the ChordAngles are not special.
+// Sub subtracts the other ChordAngle from this one and returns the resulting
+// value. This method assumes the ChordAngles are not special.
 func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
 	if other == 0 {
 		return c
@@ -181,12 +181,18 @@ func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
 // Sin returns the sine of this chord angle. This method is more efficient
 // than converting to Angle and performing the computation.
 func (c ChordAngle) Sin() float64 {
+	return math.Sqrt(c.Sin2())
+}
+
+// Sin2 returns the square of the sine of this chord angle.
+// It is more efficient than Sin.
+func (c ChordAngle) Sin2() float64 {
 	// Let a be the (non-squared) chord length, and let A be the corresponding
 	// half-angle (a = 2*sin(A)).  The formula below can be derived from:
 	//   sin(2*A) = 2 * sin(A) * cos(A)
 	//   cos^2(A) = 1 - sin^2(A)
 	// This is much faster than converting to an angle and computing its sine.
-	return math.Sqrt(float64(c * (1 - 0.25*c)))
+	return float64(c * (1 - 0.25*c))
 }
 
 // Cos returns the cosine of this chord angle. This method is more efficient

+ 226 - 0
vendor/github.com/golang/geo/s1/chordangle_test.go

@@ -0,0 +1,226 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+	"math"
+	"testing"
+)
+
+func TestChordAngleBasics(t *testing.T) {
+	var zeroChord ChordAngle
+	tests := []struct {
+		a, b     ChordAngle
+		lessThan bool
+		equal    bool
+	}{
+		{NegativeChordAngle, NegativeChordAngle, false, true},
+		{NegativeChordAngle, zeroChord, true, false},
+		{NegativeChordAngle, StraightChordAngle, true, false},
+		{NegativeChordAngle, InfChordAngle(), true, false},
+
+		{zeroChord, zeroChord, false, true},
+		{zeroChord, StraightChordAngle, true, false},
+		{zeroChord, InfChordAngle(), true, false},
+
+		{StraightChordAngle, StraightChordAngle, false, true},
+		{StraightChordAngle, InfChordAngle(), true, false},
+
+		{InfChordAngle(), InfChordAngle(), false, true},
+		{InfChordAngle(), InfChordAngle(), false, true},
+	}
+
+	for _, test := range tests {
+		if got := test.a < test.b; got != test.lessThan {
+			t.Errorf("%v should be less than %v", test.a, test.b)
+		}
+		if got := test.a == test.b; got != test.equal {
+			t.Errorf("%v should be equal to %v", test.a, test.b)
+		}
+	}
+}
+
+func TestChordAngleIsFunctions(t *testing.T) {
+	var zeroChord ChordAngle
+	tests := []struct {
+		have       ChordAngle
+		isNegative bool
+		isZero     bool
+		isInf      bool
+		isSpecial  bool
+	}{
+		{zeroChord, false, true, false, false},
+		{NegativeChordAngle, true, false, false, true},
+		{zeroChord, false, true, false, false},
+		{StraightChordAngle, false, false, false, false},
+		{InfChordAngle(), false, false, true, true},
+	}
+
+	for _, test := range tests {
+		if got := test.have < 0; got != test.isNegative {
+			t.Errorf("%v.isNegative() = %t, want %t", test.have, got, test.isNegative)
+		}
+		if got := test.have == 0; got != test.isZero {
+			t.Errorf("%v.isZero() = %t, want %t", test.have, got, test.isZero)
+		}
+		if got := test.have.isInf(); got != test.isInf {
+			t.Errorf("%v.isInf() = %t, want %t", test.have, got, test.isInf)
+		}
+		if got := test.have.isSpecial(); got != test.isSpecial {
+			t.Errorf("%v.isSpecial() = %t, want %t", test.have, got, test.isSpecial)
+		}
+	}
+}
+
+func TestChordAngleFromAngle(t *testing.T) {
+	for _, angle := range []float64{0, 1, -1, math.Pi} {
+		if got := ChordAngleFromAngle(Angle(angle)).Angle().Radians(); got != angle {
+			t.Errorf("ChordAngleFromAngle(Angle(%v)) = %v, want %v", angle, got, angle)
+		}
+	}
+
+	if got := ChordAngleFromAngle(Angle(math.Pi)); got != StraightChordAngle {
+		t.Errorf("a ChordAngle from an Angle of π = %v, want %v", got, StraightChordAngle)
+	}
+
+	if InfAngle() != ChordAngleFromAngle(InfAngle()).Angle() {
+		t.Errorf("converting infinite Angle to ChordAngle should yield infinite Angle")
+	}
+}
+
+func TestChordAngleArithmetic(t *testing.T) {
+	var (
+		zero      ChordAngle
+		degree30  = ChordAngleFromAngle(30 * Degree)
+		degree60  = ChordAngleFromAngle(60 * Degree)
+		degree90  = ChordAngleFromAngle(90 * Degree)
+		degree120 = ChordAngleFromAngle(120 * Degree)
+		degree180 = StraightChordAngle
+	)
+
+	addTests := []struct {
+		a, b ChordAngle
+		want ChordAngle
+	}{
+		{zero, zero, zero},
+		{degree60, zero, degree60},
+		{zero, degree60, degree60},
+		{degree30, degree60, degree90},
+		{degree60, degree30, degree90},
+		{degree180, zero, degree180},
+		{degree60, degree30, degree90},
+		{degree90, degree90, degree180},
+		{degree120, degree90, degree180},
+		{degree120, degree120, degree180},
+		{degree30, degree180, degree180},
+		{degree180, degree180, degree180},
+	}
+
+	subTests := []struct {
+		a, b ChordAngle
+		want ChordAngle
+	}{
+		{zero, zero, zero},
+		{degree60, degree60, zero},
+		{degree180, degree180, zero},
+		{zero, degree60, zero},
+		{degree30, degree90, zero},
+		{degree90, degree30, degree60},
+		{degree90, degree60, degree30},
+		{degree180, zero, degree180},
+	}
+
+	for _, test := range addTests {
+		if got := float64(test.a.Add(test.b)); !float64Eq(got, float64(test.want)) {
+			t.Errorf("%v.Add(%v) = %0.24f, want %0.24f", test.a.Angle().Degrees(), test.b.Angle().Degrees(), got, test.want)
+		}
+	}
+	for _, test := range subTests {
+		if got := float64(test.a.Sub(test.b)); !float64Eq(got, float64(test.want)) {
+			t.Errorf("%v.Sub(%v) = %0.24f, want %0.24f", test.a.Angle().Degrees(), test.b.Angle().Degrees(), got, test.want)
+		}
+	}
+}
+
+func TestChordAngleTrigonometry(t *testing.T) {
+	// Because of the way the math works out, the 9/10th's case has slightly more
+	// difference than all the other computations, so this gets a more generous
+	// epsilon to deal with that.
+	const epsilon = 1e-14
+	const iters = 40
+	for iter := 0; iter <= iters; iter++ {
+		radians := math.Pi * float64(iter) / float64(iters)
+		angle := ChordAngleFromAngle(Angle(radians))
+		if !float64Near(math.Sin(radians), angle.Sin(), epsilon) {
+			t.Errorf("(%d/%d)*π. %v.Sin() = %v, want %v", iter, iters, angle, angle.Sin(), math.Sin(radians))
+		}
+		if !float64Near(math.Cos(radians), angle.Cos(), epsilon) {
+			t.Errorf("(%d/%d)*π. %v.Cos() = %v, want %v", iter, iters, angle, angle.Cos(), math.Cos(radians))
+		}
+		// Since tan(x) is unbounded near pi/4, we map the result back to an
+		// angle before comparing. The assertion is that the result is equal to
+		// the tangent of a nearby angle.
+		if !float64Near(math.Atan(math.Tan(radians)), math.Atan(angle.Tan()), 1e-14) {
+			t.Errorf("(%d/%d)*π. %v.Tan() = %v, want %v", iter, iters, angle, angle.Tan(), math.Tan(radians))
+		}
+	}
+
+	// Unlike Angle, ChordAngle can represent 90 and 180 degrees exactly.
+	angle90 := ChordAngleFromSquaredLength(2)
+	angle180 := ChordAngleFromSquaredLength(4)
+	if !float64Eq(1, angle90.Sin()) {
+		t.Errorf("%v.Sin() = %v, want 1", angle90, angle90.Sin())
+	}
+	if !float64Eq(0, angle90.Cos()) {
+		t.Errorf("%v.Cos() = %v, want 0", angle90, angle90.Cos())
+	}
+	if !math.IsInf(angle90.Tan(), 0) {
+		t.Errorf("%v.Tan() should be infinite, but was not.", angle90)
+	}
+	if !float64Eq(0, angle180.Sin()) {
+		t.Errorf("%v.Sin() = %v, want 0", angle180, angle180.Sin())
+	}
+	if !float64Eq(-1, angle180.Cos()) {
+		t.Errorf("%v.Cos() = %v, want -1", angle180, angle180.Cos())
+	}
+	if !float64Eq(0, angle180.Tan()) {
+		t.Errorf("%v.Tan() = %v, want 0", angle180, angle180.Tan())
+	}
+}
+
+func TestChordAngleExpanded(t *testing.T) {
+	var zero ChordAngle
+
+	tests := []struct {
+		have ChordAngle
+		add  float64
+		want ChordAngle
+	}{
+		{NegativeChordAngle, 5, NegativeChordAngle.Expanded(5)},
+		{InfChordAngle(), -5, InfChordAngle()},
+		{StraightChordAngle, 5, ChordAngleFromSquaredLength(5)},
+		{zero, -5, zero},
+		{ChordAngleFromSquaredLength(1.25), 0.25, ChordAngleFromSquaredLength(1.5)},
+		{ChordAngleFromSquaredLength(0.75), 0.25, ChordAngleFromSquaredLength(1)},
+	}
+
+	for _, test := range tests {
+		if got := test.have.Expanded(test.add); got != test.want {
+			t.Errorf("%v.Expanded(%v) = %v, want %v", test.have, test.add, got, test.want)
+		}
+	}
+}

+ 457 - 0
vendor/github.com/golang/geo/s1/interval_test.go

@@ -0,0 +1,457 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+	"math"
+	"testing"
+)
+
+// Some standard intervals for use throughout the tests.
+var (
+	empty = EmptyInterval()
+	full  = FullInterval()
+	// Single-point intervals:
+	zero  = IntervalFromEndpoints(0, 0)
+	pi2   = IntervalFromEndpoints(math.Pi/2, math.Pi/2)
+	pi    = IntervalFromEndpoints(math.Pi, math.Pi)
+	mipi  = IntervalFromEndpoints(-math.Pi, -math.Pi) // same as pi after normalization
+	mipi2 = IntervalFromEndpoints(-math.Pi/2, -math.Pi/2)
+	// Single quadrants:
+	quad1 = IntervalFromEndpoints(0, math.Pi/2)
+	quad2 = IntervalFromEndpoints(math.Pi/2, -math.Pi) // equivalent to (pi/2, pi)
+	quad3 = IntervalFromEndpoints(math.Pi, -math.Pi/2)
+	quad4 = IntervalFromEndpoints(-math.Pi/2, 0)
+	// Quadrant pairs:
+	quad12 = IntervalFromEndpoints(0, -math.Pi)
+	quad23 = IntervalFromEndpoints(math.Pi/2, -math.Pi/2)
+	quad34 = IntervalFromEndpoints(-math.Pi, 0)
+	quad41 = IntervalFromEndpoints(-math.Pi/2, math.Pi/2)
+	// Quadrant triples:
+	quad123 = IntervalFromEndpoints(0, -math.Pi/2)
+	quad234 = IntervalFromEndpoints(math.Pi/2, 0)
+	quad341 = IntervalFromEndpoints(math.Pi, math.Pi/2)
+	quad412 = IntervalFromEndpoints(-math.Pi/2, -math.Pi)
+	// Small intervals around the midpoints between quadrants,
+	// such that the center of each interval is offset slightly CCW from the midpoint.
+	mid12 = IntervalFromEndpoints(math.Pi/2-0.01, math.Pi/2+0.02)
+	mid23 = IntervalFromEndpoints(math.Pi-0.01, -math.Pi+0.02)
+	mid34 = IntervalFromEndpoints(-math.Pi/2-0.01, -math.Pi/2+0.02)
+	mid41 = IntervalFromEndpoints(-0.01, 0.02)
+)
+
+func TestConstructors(t *testing.T) {
+	// Check that [-π,-π] is normalized to [π,π].
+	if mipi.Lo != math.Pi {
+		t.Errorf("mipi.Lo = %v, want π", mipi.Lo)
+	}
+	if mipi.Hi != math.Pi {
+		t.Errorf("mipi.Hi = %v, want π", mipi.Lo)
+	}
+
+	var i Interval
+	if !i.IsValid() {
+		t.Errorf("Zero value Interval is not valid")
+	}
+}
+
+func TestIntervalFromPointPair(t *testing.T) {
+	tests := []struct {
+		a, b float64
+		want Interval
+	}{
+		{-math.Pi, math.Pi, pi},
+		{math.Pi, -math.Pi, pi},
+		{mid34.Hi, mid34.Lo, mid34},
+		{mid23.Lo, mid23.Hi, mid23},
+	}
+	for _, test := range tests {
+		got := IntervalFromPointPair(test.a, test.b)
+		if got != test.want {
+			t.Errorf("IntervalFromPointPair(%f, %f) = %v, want %v", test.a, test.b, got, test.want)
+		}
+	}
+}
+
+func TestSimplePredicates(t *testing.T) {
+	if !zero.IsValid() || zero.IsEmpty() || zero.IsFull() {
+		t.Errorf("Zero interval is invalid or empty or full")
+	}
+	if !empty.IsValid() || !empty.IsEmpty() || empty.IsFull() {
+		t.Errorf("Empty interval is invalid or not empty or full")
+	}
+	if !empty.IsInverted() {
+		t.Errorf("Empty interval is not inverted")
+	}
+	if !full.IsValid() || full.IsEmpty() || !full.IsFull() {
+		t.Errorf("Full interval is invalid or empty or not full")
+	}
+	if !pi.IsValid() || pi.IsEmpty() || pi.IsInverted() {
+		t.Errorf("pi is invalid or empty or inverted")
+	}
+	if !mipi.IsValid() || mipi.IsEmpty() || mipi.IsInverted() {
+		t.Errorf("mipi is invalid or empty or inverted")
+	}
+}
+
+func TestAlmostFullOrEmpty(t *testing.T) {
+	// Test that rounding errors don't cause intervals that are almost empty or
+	// full to be considered empty or full.  The following value is the greatest
+	// representable value less than Pi.
+	almostPi := math.Pi - 2*dblEpsilon
+
+	i := Interval{-almostPi, math.Pi}
+	if i.IsFull() {
+		t.Errorf("%v.IsFull should not be true", i)
+	}
+
+	i = Interval{-math.Pi, almostPi}
+	if i.IsFull() {
+		t.Errorf("%v.IsFull should not be true", i)
+	}
+
+	i = Interval{math.Pi, -almostPi}
+	if i.IsEmpty() {
+		t.Errorf("%v.IsEmpty should not be true", i)
+	}
+
+	i = Interval{almostPi, -math.Pi}
+	if i.IsEmpty() {
+		t.Errorf("%v.IsEmpty should not be true", i)
+	}
+}
+
+func TestCenter(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		want     float64
+	}{
+		{quad12, math.Pi / 2},
+		{IntervalFromEndpoints(3.1, 2.9), 3 - math.Pi},
+		{IntervalFromEndpoints(-2.9, -3.1), math.Pi - 3},
+		{IntervalFromEndpoints(2.1, -2.1), math.Pi},
+		{pi, math.Pi},
+		{mipi, math.Pi},
+		// TODO(dsymonds): The C++ test for quad23 uses fabs. Why?
+		{quad23, math.Pi},
+		// TODO(dsymonds): The C++ test for quad123 uses EXPECT_DOUBLE_EQ. Why?
+		{quad123, 0.75 * math.Pi},
+	}
+	for _, test := range tests {
+		got := test.interval.Center()
+		// TODO(dsymonds): Some are inaccurate in the 16th decimal place. Track it down.
+		if math.Abs(got-test.want) > 1e-15 {
+			t.Errorf("%v.Center() = %v, want %v", test.interval, got, test.want)
+		}
+	}
+}
+
+func TestLength(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		want     float64
+	}{
+		{quad12, math.Pi},
+		{pi, 0},
+		{mipi, 0},
+		// TODO(dsymonds): The C++ test for quad123 uses DOUBLE_EQ. Why?
+		{quad123, 1.5 * math.Pi},
+		// TODO(dsymonds): The C++ test for quad23 uses fabs. Why?
+		{quad23, math.Pi},
+		{full, 2 * math.Pi},
+	}
+	for _, test := range tests {
+		if l := test.interval.Length(); l != test.want {
+			t.Errorf("%v.Length() got %v, want %v", test.interval, l, test.want)
+		}
+	}
+	if l := empty.Length(); l >= 0 {
+		t.Errorf("empty interval has non-negative length %v", l)
+	}
+}
+
+func TestContains(t *testing.T) {
+	tests := []struct {
+		interval  Interval
+		in, out   []float64 // points that should be inside/outside the interval
+		iIn, iOut []float64 // points that should be inside/outside the interior
+	}{
+		{empty, nil, []float64{0, math.Pi, -math.Pi}, nil, []float64{math.Pi, -math.Pi}},
+		{full, []float64{0, math.Pi, -math.Pi}, nil, []float64{math.Pi, -math.Pi}, nil},
+		{quad12, []float64{0, math.Pi, -math.Pi}, nil,
+			[]float64{math.Pi / 2}, []float64{0, math.Pi, -math.Pi}},
+		{quad23, []float64{math.Pi / 2, -math.Pi / 2, math.Pi, -math.Pi}, []float64{0},
+			[]float64{math.Pi, -math.Pi}, []float64{math.Pi / 2, -math.Pi / 2, 0}},
+		{pi, []float64{math.Pi, -math.Pi}, []float64{0}, nil, []float64{math.Pi, -math.Pi}},
+		{mipi, []float64{math.Pi, -math.Pi}, []float64{0}, nil, []float64{math.Pi, -math.Pi}},
+		{zero, []float64{0}, nil, nil, []float64{0}},
+	}
+	for _, test := range tests {
+		for _, p := range test.in {
+			if !test.interval.Contains(p) {
+				t.Errorf("%v should contain %v", test.interval, p)
+			}
+		}
+		for _, p := range test.out {
+			if test.interval.Contains(p) {
+				t.Errorf("%v should not contain %v", test.interval, p)
+			}
+		}
+		for _, p := range test.iIn {
+			if !test.interval.InteriorContains(p) {
+				t.Errorf("interior of %v should contain %v", test.interval, p)
+			}
+		}
+		for _, p := range test.iOut {
+			if test.interval.InteriorContains(p) {
+				t.Errorf("interior %v should not contain %v", test.interval, p)
+			}
+		}
+	}
+}
+
+func TestIntervalOperations(t *testing.T) {
+	quad12eps := IntervalFromEndpoints(quad12.Lo, mid23.Hi)
+	quad2hi := IntervalFromEndpoints(mid23.Lo, quad12.Hi)
+	quad412eps := IntervalFromEndpoints(mid34.Lo, quad12.Hi)
+	quadeps12 := IntervalFromEndpoints(mid41.Lo, quad12.Hi)
+	quad1lo := IntervalFromEndpoints(quad12.Lo, mid41.Hi)
+	quad2lo := IntervalFromEndpoints(quad23.Lo, mid12.Hi)
+	quad3hi := IntervalFromEndpoints(mid34.Lo, quad23.Hi)
+	quadeps23 := IntervalFromEndpoints(mid12.Lo, quad23.Hi)
+	quad23eps := IntervalFromEndpoints(quad23.Lo, mid34.Hi)
+	quadeps123 := IntervalFromEndpoints(mid41.Lo, quad23.Hi)
+
+	// This massive list of test cases is ported directly from the C++ test case.
+	tests := []struct {
+		x, y                               Interval
+		xContainsY, xInteriorContainsY     bool
+		xIntersectsY, xInteriorIntersectsY bool
+		wantUnion, wantIntersection        Interval
+	}{
+		// 0
+		{empty, empty, true, true, false, false, empty, empty},
+		{empty, full, false, false, false, false, full, empty},
+		{empty, zero, false, false, false, false, zero, empty},
+		{empty, pi, false, false, false, false, pi, empty},
+		{empty, mipi, false, false, false, false, mipi, empty},
+
+		// 5
+		{full, empty, true, true, false, false, full, empty},
+		{full, full, true, true, true, true, full, full},
+		{full, zero, true, true, true, true, full, zero},
+		{full, pi, true, true, true, true, full, pi},
+		{full, mipi, true, true, true, true, full, mipi},
+		{full, quad12, true, true, true, true, full, quad12},
+		{full, quad23, true, true, true, true, full, quad23},
+
+		// 12
+		{zero, empty, true, true, false, false, zero, empty},
+		{zero, full, false, false, true, false, full, zero},
+		{zero, zero, true, false, true, false, zero, zero},
+		{zero, pi, false, false, false, false, IntervalFromEndpoints(0, math.Pi), empty},
+		{zero, pi2, false, false, false, false, quad1, empty},
+		{zero, mipi, false, false, false, false, quad12, empty},
+		{zero, mipi2, false, false, false, false, quad4, empty},
+		{zero, quad12, false, false, true, false, quad12, zero},
+		{zero, quad23, false, false, false, false, quad123, empty},
+
+		// 21
+		{pi2, empty, true, true, false, false, pi2, empty},
+		{pi2, full, false, false, true, false, full, pi2},
+		{pi2, zero, false, false, false, false, quad1, empty},
+		{pi2, pi, false, false, false, false, IntervalFromEndpoints(math.Pi/2, math.Pi), empty},
+		{pi2, pi2, true, false, true, false, pi2, pi2},
+		{pi2, mipi, false, false, false, false, quad2, empty},
+		{pi2, mipi2, false, false, false, false, quad23, empty},
+		{pi2, quad12, false, false, true, false, quad12, pi2},
+		{pi2, quad23, false, false, true, false, quad23, pi2},
+
+		// 30
+		{pi, empty, true, true, false, false, pi, empty},
+		{pi, full, false, false, true, false, full, pi},
+		{pi, zero, false, false, false, false, IntervalFromEndpoints(math.Pi, 0), empty},
+		{pi, pi, true, false, true, false, pi, pi},
+		{pi, pi2, false, false, false, false, IntervalFromEndpoints(math.Pi/2, math.Pi), empty},
+		{pi, mipi, true, false, true, false, pi, pi},
+		{pi, mipi2, false, false, false, false, quad3, empty},
+		{pi, quad12, false, false, true, false, IntervalFromEndpoints(0, math.Pi), pi},
+		{pi, quad23, false, false, true, false, quad23, pi},
+
+		// 39
+		{mipi, empty, true, true, false, false, mipi, empty},
+		{mipi, full, false, false, true, false, full, mipi},
+		{mipi, zero, false, false, false, false, quad34, empty},
+		{mipi, pi, true, false, true, false, mipi, mipi},
+		{mipi, pi2, false, false, false, false, quad2, empty},
+		{mipi, mipi, true, false, true, false, mipi, mipi},
+		{mipi, mipi2, false, false, false, false, IntervalFromEndpoints(-math.Pi, -math.Pi/2), empty},
+		{mipi, quad12, false, false, true, false, quad12, mipi},
+		{mipi, quad23, false, false, true, false, quad23, mipi},
+
+		// 48
+		{quad12, empty, true, true, false, false, quad12, empty},
+		{quad12, full, false, false, true, true, full, quad12},
+		{quad12, zero, true, false, true, false, quad12, zero},
+		{quad12, pi, true, false, true, false, quad12, pi},
+		{quad12, mipi, true, false, true, false, quad12, mipi},
+		{quad12, quad12, true, false, true, true, quad12, quad12},
+		{quad12, quad23, false, false, true, true, quad123, quad2},
+		{quad12, quad34, false, false, true, false, full, quad12},
+
+		// 56
+		{quad23, empty, true, true, false, false, quad23, empty},
+		{quad23, full, false, false, true, true, full, quad23},
+		{quad23, zero, false, false, false, false, quad234, empty},
+		{quad23, pi, true, true, true, true, quad23, pi},
+		{quad23, mipi, true, true, true, true, quad23, mipi},
+		{quad23, quad12, false, false, true, true, quad123, quad2},
+		{quad23, quad23, true, false, true, true, quad23, quad23},
+		{quad23, quad34, false, false, true, true, quad234, IntervalFromEndpoints(-math.Pi, -math.Pi/2)},
+
+		// 64
+		{quad1, quad23, false, false, true, false, quad123, IntervalFromEndpoints(math.Pi/2, math.Pi/2)},
+		{quad2, quad3, false, false, true, false, quad23, mipi},
+		{quad3, quad2, false, false, true, false, quad23, pi},
+		{quad2, pi, true, false, true, false, quad2, pi},
+		{quad2, mipi, true, false, true, false, quad2, mipi},
+		{quad3, pi, true, false, true, false, quad3, pi},
+		{quad3, mipi, true, false, true, false, quad3, mipi},
+
+		// 71
+		{quad12, mid12, true, true, true, true, quad12, mid12},
+		{mid12, quad12, false, false, true, true, quad12, mid12},
+
+		// 73
+		{quad12, mid23, false, false, true, true, quad12eps, quad2hi},
+		{mid23, quad12, false, false, true, true, quad12eps, quad2hi},
+
+		// This test checks that the union of two disjoint intervals is the smallest
+		// interval that contains both of them.  Note that the center of "mid34"
+		// slightly CCW of -Pi/2 so that there is no ambiguity about the result.
+		// 75
+		{quad12, mid34, false, false, false, false, quad412eps, empty},
+		{mid34, quad12, false, false, false, false, quad412eps, empty},
+
+		// 77
+		{quad12, mid41, false, false, true, true, quadeps12, quad1lo},
+		{mid41, quad12, false, false, true, true, quadeps12, quad1lo},
+
+		// 79
+		{quad23, mid12, false, false, true, true, quadeps23, quad2lo},
+		{mid12, quad23, false, false, true, true, quadeps23, quad2lo},
+		{quad23, mid23, true, true, true, true, quad23, mid23},
+		{mid23, quad23, false, false, true, true, quad23, mid23},
+		{quad23, mid34, false, false, true, true, quad23eps, quad3hi},
+		{mid34, quad23, false, false, true, true, quad23eps, quad3hi},
+		{quad23, mid41, false, false, false, false, quadeps123, empty},
+		{mid41, quad23, false, false, false, false, quadeps123, empty},
+	}
+	should := func(b bool) string {
+		if b {
+			return "should"
+		}
+		return "should not"
+	}
+	for _, test := range tests {
+		if test.x.ContainsInterval(test.y) != test.xContainsY {
+			t.Errorf("%v %s contain %v", test.x, should(test.xContainsY), test.y)
+		}
+		if test.x.InteriorContainsInterval(test.y) != test.xInteriorContainsY {
+			t.Errorf("interior of %v %s contain %v", test.x, should(test.xInteriorContainsY), test.y)
+		}
+		if test.x.Intersects(test.y) != test.xIntersectsY {
+			t.Errorf("%v %s intersect %v", test.x, should(test.xIntersectsY), test.y)
+		}
+		if test.x.InteriorIntersects(test.y) != test.xInteriorIntersectsY {
+			t.Errorf("interior of %v %s intersect %v", test.x, should(test.xInteriorIntersectsY), test.y)
+		}
+		if u := test.x.Union(test.y); u != test.wantUnion {
+			t.Errorf("%v ∪ %v was %v, want %v", test.x, test.y, u, test.wantUnion)
+		}
+		if u := test.x.Intersection(test.y); u != test.wantIntersection {
+			t.Errorf("%v ∩ %v was %v, want %v", test.x, test.y, u, test.wantIntersection)
+		}
+	}
+}
+
+func TestAddPoint(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		points   []float64
+		want     Interval
+	}{
+		{empty, []float64{0}, zero},
+		{empty, []float64{math.Pi}, pi},
+		{empty, []float64{-math.Pi}, mipi},
+		{empty, []float64{math.Pi, -math.Pi}, pi},
+		{empty, []float64{-math.Pi, math.Pi}, mipi},
+		{empty, []float64{mid12.Lo, mid12.Hi}, mid12},
+		{empty, []float64{mid23.Lo, mid23.Hi}, mid23},
+
+		{quad1, []float64{-0.9 * math.Pi, -math.Pi / 2}, quad123},
+		{full, []float64{0}, full},
+		{full, []float64{math.Pi}, full},
+		{full, []float64{-math.Pi}, full},
+	}
+	for _, test := range tests {
+		got := test.interval
+		for _, point := range test.points {
+			got = got.AddPoint(point)
+		}
+		want := test.want
+		if math.Abs(got.Lo-want.Lo) > 1e-15 || math.Abs(got.Hi-want.Hi) > 1e-15 {
+			t.Errorf("%v.AddPoint(%v) = %v, want %v", test.interval, test.points, got, want)
+		}
+	}
+}
+
+func TestExpanded(t *testing.T) {
+	tests := []struct {
+		interval Interval
+		margin   float64
+		want     Interval
+	}{
+		{empty, 1, empty},
+		{full, 1, full},
+		{zero, 1, Interval{-1, 1}},
+		{mipi, 0.01, Interval{math.Pi - 0.01, -math.Pi + 0.01}},
+		{pi, 27, full},
+		{pi, math.Pi / 2, quad23},
+		{pi2, math.Pi / 2, quad12},
+		{mipi2, math.Pi / 2, quad34},
+
+		{empty, -1, empty},
+		{full, -1, full},
+		{quad123, -27, empty},
+		{quad234, -27, empty},
+		{quad123, -math.Pi / 2, quad2},
+		{quad341, -math.Pi / 2, quad4},
+		{quad412, -math.Pi / 2, quad1},
+	}
+	for _, test := range tests {
+		if got, want := test.interval.Expanded(test.margin), test.want; math.Abs(got.Lo-want.Lo) > 1e-15 || math.Abs(got.Hi-want.Hi) > 1e-15 {
+			t.Errorf("%v.Expanded(%v) = %v, want %v", test.interval, test.margin, got, want)
+		}
+	}
+}
+
+func TestIntervalString(t *testing.T) {
+	if s, exp := pi.String(), "[3.1415927, 3.1415927]"; s != exp {
+		t.Errorf("pi.String() = %q, want %q", s, exp)
+	}
+}

+ 142 - 80
vendor/github.com/golang/geo/s2/cap.go

@@ -24,17 +24,9 @@ import (
 	"github.com/golang/geo/s1"
 )
 
-const (
-	emptyHeight = -1.0
-	zeroHeight  = 0.0
-	fullHeight  = 2.0
-
-	roundUp = 1.0 + 1.0/(1<<52)
-)
-
 var (
-	// centerPoint is the default center for S2Caps
-	centerPoint = Point{PointFromCoords(1.0, 0, 0).Normalize()}
+	// centerPoint is the default center for Caps
+	centerPoint = PointFromCoords(1.0, 0, 0)
 )
 
 // Cap represents a disc-shaped region defined by a center and radius.
@@ -53,36 +45,49 @@ var (
 // The center is a point on the surface of the unit sphere. (Hence the need for
 // it to be of unit length.)
 //
-// Internally, the cap is represented by its center and "height". The height
-// is the distance from the center point to the cutoff plane. This
-// representation is much more efficient for containment tests than the
-// (center, radius) representation. There is also support for "empty" and
-// "full" caps, which contain no points and all points respectively.
+// A cap can also be defined by its center point and height. The height is the
+// distance from the center point to the cutoff plane. There is also support for
+// "empty" and "full" caps, which contain no points and all points respectively.
+//
+// Here are some useful relationships between the cap height (h), the cap
+// radius (r), the maximum chord length from the cap's center (d), and the
+// radius of cap's base (a).
+//
+//     h = 1 - cos(r)
+//       = 2 * sin^2(r/2)
+//   d^2 = 2 * h
+//       = a^2 + h^2
 //
 // The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap.
 type Cap struct {
 	center Point
-	height float64
+	radius s1.ChordAngle
 }
 
 // CapFromPoint constructs a cap containing a single point.
 func CapFromPoint(p Point) Cap {
-	return CapFromCenterHeight(p, zeroHeight)
+	return CapFromCenterChordAngle(p, 0)
 }
 
 // CapFromCenterAngle constructs a cap with the given center and angle.
 func CapFromCenterAngle(center Point, angle s1.Angle) Cap {
-	return CapFromCenterHeight(center, radiusToHeight(angle))
+	return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle))
+}
+
+// CapFromCenterChordAngle constructs a cap where the angle is expressed as an
+// s1.ChordAngle. This constructor is more efficient than using an s1.Angle.
+func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap {
+	return Cap{
+		center: center,
+		radius: radius,
+	}
 }
 
 // CapFromCenterHeight constructs a cap with the given center and height. A
 // negative height yields an empty cap; a height of 2 or more yields a full cap.
 // The center should be unit length.
 func CapFromCenterHeight(center Point, height float64) Cap {
-	return Cap{
-		center: center,
-		height: height,
-	}
+	return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height))
 }
 
 // CapFromCenterArea constructs a cap with the given center and surface area.
@@ -90,33 +95,32 @@ func CapFromCenterHeight(center Point, height float64) Cap {
 // cap (because the sphere has unit radius). A negative area yields an empty cap;
 // an area of 4*π or more yields a full cap.
 func CapFromCenterArea(center Point, area float64) Cap {
-	return CapFromCenterHeight(center, area/(math.Pi*2.0))
+	return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi))
 }
 
 // EmptyCap returns a cap that contains no points.
 func EmptyCap() Cap {
-	return CapFromCenterHeight(centerPoint, emptyHeight)
+	return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle)
 }
 
 // FullCap returns a cap that contains all points.
 func FullCap() Cap {
-	return CapFromCenterHeight(centerPoint, fullHeight)
+	return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle)
 }
 
 // IsValid reports whether the Cap is considered valid.
-// Heights are normalized so that they do not exceed 2.
 func (c Cap) IsValid() bool {
-	return c.center.Vector.IsUnit() && c.height <= fullHeight
+	return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle
 }
 
 // IsEmpty reports whether the cap is empty, i.e. it contains no points.
 func (c Cap) IsEmpty() bool {
-	return c.height < zeroHeight
+	return c.radius < 0
 }
 
 // IsFull reports whether the cap is full, i.e. it contains all points.
 func (c Cap) IsFull() bool {
-	return c.height == fullHeight
+	return c.radius == s1.StraightChordAngle
 }
 
 // Center returns the cap's center point.
@@ -124,26 +128,23 @@ func (c Cap) Center() Point {
 	return c.center
 }
 
-// Height returns the cap's "height".
+// Height returns the height of the cap. This is the distance from the center
+// point to the cutoff plane.
 func (c Cap) Height() float64 {
-	return c.height
+	return float64(0.5 * c.radius)
 }
 
-// Radius returns the cap's radius.
+// Radius returns the cap radius as an s1.Angle. (Note that the cap angle
+// is stored internally as a ChordAngle, so this method requires a trigonometric
+// operation and may yield a slightly different result than the value passed
+// to CapFromCenterAngle).
 func (c Cap) Radius() s1.Angle {
-	if c.IsEmpty() {
-		return s1.Angle(emptyHeight)
-	}
-
-	// This could also be computed as acos(1 - height_), but the following
-	// formula is much more accurate when the cap height is small. It
-	// follows from the relationship h = 1 - cos(r) = 2 sin^2(r/2).
-	return s1.Angle(2 * math.Asin(math.Sqrt(0.5*c.height)))
+	return c.radius.Angle()
 }
 
 // Area returns the surface area of the Cap on the unit sphere.
 func (c Cap) Area() float64 {
-	return 2.0 * math.Pi * math.Max(zeroHeight, c.height)
+	return 2.0 * math.Pi * math.Max(0, c.Height())
 }
 
 // Contains reports whether this cap contains the other.
@@ -152,7 +153,7 @@ func (c Cap) Contains(other Cap) bool {
 	if c.IsFull() || other.IsEmpty() {
 		return true
 	}
-	return c.Radius() >= c.center.Distance(other.center)+other.Radius()
+	return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
 }
 
 // Intersects reports whether this cap intersects the other cap.
@@ -162,27 +163,27 @@ func (c Cap) Intersects(other Cap) bool {
 		return false
 	}
 
-	return c.Radius()+other.Radius() >= c.center.Distance(other.center)
+	return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center)
 }
 
 // InteriorIntersects reports whether this caps interior intersects the other cap.
 func (c Cap) InteriorIntersects(other Cap) bool {
 	// Make sure this cap has an interior and the other cap is non-empty.
-	if c.height <= zeroHeight || other.IsEmpty() {
+	if c.radius <= 0 || other.IsEmpty() {
 		return false
 	}
 
-	return c.Radius()+other.Radius() > c.center.Distance(other.center)
+	return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center)
 }
 
 // ContainsPoint reports whether this cap contains the point.
 func (c Cap) ContainsPoint(p Point) bool {
-	return c.center.Sub(p.Vector).Norm2() <= 2*c.height
+	return ChordAngleBetweenPoints(c.center, p) <= c.radius
 }
 
 // InteriorContainsPoint reports whether the point is within the interior of this cap.
 func (c Cap) InteriorContainsPoint(p Point) bool {
-	return c.IsFull() || c.center.Sub(p.Vector).Norm2() < 2*c.height
+	return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius
 }
 
 // Complement returns the complement of the interior of the cap. A cap and its
@@ -191,11 +192,14 @@ func (c Cap) InteriorContainsPoint(p Point) bool {
 // singleton cap (containing a single point) is the same as the complement
 // of an empty cap.
 func (c Cap) Complement() Cap {
-	height := emptyHeight
-	if !c.IsFull() {
-		height = fullHeight - math.Max(c.height, zeroHeight)
+	if c.IsFull() {
+		return EmptyCap()
 	}
-	return CapFromCenterHeight(Point{c.center.Mul(-1.0)}, height)
+	if c.IsEmpty() {
+		return FullCap()
+	}
+
+	return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius))
 }
 
 // CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
@@ -241,7 +245,7 @@ func (c Cap) RectBound() Rect {
 		// minus the latitude). This formula also works for negative latitudes.
 		//
 		// The formula for sin(a) follows from the relationship h = 1 - cos(a).
-		sinA := math.Sqrt(c.height * (2 - c.height))
+		sinA := c.radius.Sin()
 		sinC := math.Cos(latitude(c.center).Radians())
 		if sinA <= sinC {
 			angleA := math.Asin(sinA / sinC)
@@ -252,31 +256,41 @@ func (c Cap) RectBound() Rect {
 	return Rect{lat, lng}
 }
 
-// ApproxEqual reports whether this cap's center and height are within
-// a reasonable epsilon from the other cap.
+// Equal reports whether this cap is equal to the other cap.
+func (c Cap) Equal(other Cap) bool {
+	return (c.radius == other.radius && c.center == other.center) ||
+		(c.IsEmpty() && other.IsEmpty()) ||
+		(c.IsFull() && other.IsFull())
+}
+
+// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance.
 func (c Cap) ApproxEqual(other Cap) bool {
-	// Caps have a wider tolerance than the usual epsilon for approximately equal.
 	const epsilon = 1e-14
+	r2 := float64(c.radius)
+	otherR2 := float64(other.radius)
 	return c.center.ApproxEqual(other.center) &&
-		math.Abs(c.height-other.height) <= epsilon ||
-		c.IsEmpty() && other.height <= epsilon ||
-		other.IsEmpty() && c.height <= epsilon ||
-		c.IsFull() && other.height >= 2-epsilon ||
-		other.IsFull() && c.height >= 2-epsilon
+		math.Abs(r2-otherR2) <= epsilon ||
+		c.IsEmpty() && otherR2 <= epsilon ||
+		other.IsEmpty() && r2 <= epsilon ||
+		c.IsFull() && otherR2 >= 2-epsilon ||
+		other.IsFull() && r2 >= 2-epsilon
 }
 
 // AddPoint increases the cap if necessary to include the given point. If this cap is empty,
 // then the center is set to the point with a zero height. p must be unit-length.
 func (c Cap) AddPoint(p Point) Cap {
 	if c.IsEmpty() {
-		return Cap{center: p}
+		c.center = p
+		c.radius = 0
+		return c
 	}
 
-	// To make sure that the resulting cap actually includes this point,
-	// we need to round up the distance calculation.  That is, after
-	// calling cap.AddPoint(p), cap.Contains(p) should be true.
-	dist2 := c.center.Sub(p.Vector).Norm2()
-	c.height = math.Max(c.height, roundUp*0.5*dist2)
+	// After calling cap.AddPoint(p), cap.Contains(p) must be true. However
+	// we don't need to do anything special to achieve this because Contains()
+	// does exactly the same distance calculation that we do here.
+	if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius {
+		c.radius = newRad
+	}
 	return c
 }
 
@@ -290,8 +304,12 @@ func (c Cap) AddCap(other Cap) Cap {
 		return c
 	}
 
-	radius := c.center.Angle(other.center.Vector) + other.Radius()
-	c.height = math.Max(c.height, roundUp*radiusToHeight(radius))
+	// We round up the distance to ensure that the cap is actually contained.
+	// TODO(roberts): Do some error analysis in order to guarantee this.
+	dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
+	if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius {
+		c.radius = newRad
+	}
 	return c
 }
 
@@ -301,7 +319,7 @@ func (c Cap) Expanded(distance s1.Angle) Cap {
 	if c.IsEmpty() {
 		return EmptyCap()
 	}
-	return CapFromCenterAngle(c.center, c.Radius()+distance)
+	return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance)))
 }
 
 func (c Cap) String() string {
@@ -311,16 +329,12 @@ func (c Cap) String() string {
 // radiusToHeight converts an s1.Angle into the height of the cap.
 func radiusToHeight(r s1.Angle) float64 {
 	if r.Radians() < 0 {
-		return emptyHeight
+		return float64(s1.NegativeChordAngle)
 	}
 	if r.Radians() >= math.Pi {
-		return fullHeight
+		return float64(s1.RightChordAngle)
 	}
-	// The height of the cap can be computed as 1 - cos(r), but this isn't very
-	// accurate for angles close to zero (where cos(r) is almost 1). The
-	// formula below has much better precision.
-	d := math.Sin(0.5 * r.Radians())
-	return 2 * d * d
+	return float64(0.5 * s1.ChordAngleFromAngle(r))
 
 }
 
@@ -357,7 +371,7 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
 	// If the cap is a hemisphere or larger, the cell and the complement of the cap
 	// are both convex. Therefore since no vertex of the cell is contained, no other
 	// interior point of the cell is contained either.
-	if c.height >= 1 {
+	if c.radius >= s1.RightChordAngle {
 		return false
 	}
 
@@ -375,7 +389,7 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
 	// At this point we know that the cell does not contain the cap center, and the cap
 	// does not contain any cell vertex. The only way that they can intersect is if the
 	// cap intersects the interior of some edge.
-	sin2Angle := c.height * (2 - c.height)
+	sin2Angle := c.radius.Sin2()
 	for k := 0; k < 4; k++ {
 		edge := cell.Edge(k).Vector
 		dot := c.center.Vector.Dot(edge)
@@ -402,5 +416,53 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
 	return false
 }
 
-// TODO(roberts): Differences from C++
-// Centroid, Union
+// Centroid returns the true centroid of the cap multiplied by its surface area
+// The result lies on the ray from the origin through the cap's center, but it
+// is not unit length. Note that if you just want the "surface centroid", i.e.
+// the normalized result, then it is simpler to call Center.
+//
+// The reason for multiplying the result by the cap area is to make it
+// easier to compute the centroid of more complicated shapes. The centroid
+// of a union of disjoint regions can be computed simply by adding their
+// Centroid() results. Caveat: for caps that contain a single point
+// (i.e., zero radius), this method always returns the origin (0, 0, 0).
+// This is because shapes with no area don't affect the centroid of a
+// union whose total area is positive.
+func (c Cap) Centroid() Point {
+	// From symmetry, the centroid of the cap must be somewhere on the line
+	// from the origin to the center of the cap on the surface of the sphere.
+	// When a sphere is divided into slices of constant thickness by a set of
+	// parallel planes, all slices have the same surface area. This implies
+	// that the radial component of the centroid is simply the midpoint of the
+	// range of radial distances spanned by the cap. That is easily computed
+	// from the cap height.
+	if c.IsEmpty() {
+		return Point{}
+	}
+	r := 1 - 0.5*c.Height()
+	return Point{c.center.Mul(r * c.Area())}
+}
+
+// Union returns the smallest cap which encloses this cap and other.
+func (c Cap) Union(other Cap) Cap {
+	// If the other cap is larger, swap c and other for the rest of the computations.
+	if c.radius < other.radius {
+		c, other = other, c
+	}
+
+	if c.IsFull() || other.IsEmpty() {
+		return c
+	}
+
+	// TODO: This calculation would be more efficient using s1.ChordAngles.
+	cRadius := c.Radius()
+	otherRadius := other.Radius()
+	distance := c.center.Distance(other.center)
+	if cRadius >= distance+otherRadius {
+		return c
+	}
+
+	resRadius := 0.5 * (distance + cRadius + otherRadius)
+	resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
+	return CapFromCenterAngle(resCenter, resRadius)
+}

+ 718 - 0
vendor/github.com/golang/geo/s2/cap_test.go

@@ -0,0 +1,718 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+const (
+	tinyRad = 1e-10
+)
+
+var (
+	emptyCap   = EmptyCap()
+	fullCap    = FullCap()
+	defaultCap = EmptyCap()
+
+	zeroHeight  = 0.0
+	fullHeight  = 2.0
+	emptyHeight = -1.0
+
+	xAxisPt = Point{r3.Vector{1, 0, 0}}
+	yAxisPt = Point{r3.Vector{0, 1, 0}}
+
+	xAxis = CapFromPoint(xAxisPt)
+	yAxis = CapFromPoint(yAxisPt)
+	xComp = xAxis.Complement()
+
+	hemi    = CapFromCenterHeight(PointFromCoords(1, 0, 1), 1)
+	concave = CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(80, 10)), s1.Angle(150.0)*s1.Degree)
+	tiny    = CapFromCenterAngle(PointFromCoords(1, 2, 3), s1.Angle(tinyRad))
+)
+
+func TestCapBasicEmptyFullValid(t *testing.T) {
+	tests := []struct {
+		got                Cap
+		empty, full, valid bool
+	}{
+		{Cap{}, false, false, false},
+
+		{emptyCap, true, false, true},
+		{emptyCap.Complement(), false, true, true},
+		{fullCap, false, true, true},
+		{fullCap.Complement(), true, false, true},
+		{defaultCap, true, false, true},
+
+		{xComp, false, true, true},
+		{xComp.Complement(), true, false, true},
+
+		{tiny, false, false, true},
+		{concave, false, false, true},
+		{hemi, false, false, true},
+		{tiny, false, false, true},
+	}
+	for _, test := range tests {
+		if e := test.got.IsEmpty(); e != test.empty {
+			t.Errorf("%v.IsEmpty() = %t; want %t", test.got, e, test.empty)
+		}
+		if f := test.got.IsFull(); f != test.full {
+			t.Errorf("%v.IsFull() = %t; want %t", test.got, f, test.full)
+		}
+		if v := test.got.IsValid(); v != test.valid {
+			t.Errorf("%v.IsValid() = %t; want %t", test.got, v, test.valid)
+		}
+	}
+}
+
+func TestCapCenterHeightRadius(t *testing.T) {
+	if xAxis == xAxis.Complement().Complement() {
+		t.Errorf("the complement of the complement is not the original. %v == %v",
+			xAxis, xAxis.Complement().Complement())
+	}
+
+	if fullCap.Height() != fullHeight {
+		t.Error("full Caps should be full height")
+	}
+	if fullCap.Radius().Degrees() != 180.0 {
+		t.Error("radius of x-axis cap should be 180 degrees")
+	}
+
+	if emptyCap.center != defaultCap.center {
+		t.Error("empty Caps should be have the same center as the default")
+	}
+	if emptyCap.Height() != defaultCap.Height() {
+		t.Error("empty Caps should be have the same height as the default")
+	}
+
+	if yAxis.Height() != zeroHeight {
+		t.Error("y-axis cap should not be empty height")
+	}
+
+	if xAxis.Height() != zeroHeight {
+		t.Error("x-axis cap should not be empty height")
+	}
+	if xAxis.Radius().Radians() != zeroHeight {
+		t.Errorf("radius of x-axis cap got %f want %f", xAxis.Radius().Radians(), emptyHeight)
+	}
+
+	hc := Point{hemi.center.Mul(-1.0)}
+	if hc != hemi.Complement().center {
+		t.Error("hemi center and its complement should have the same center")
+	}
+	if hemi.Height() != 1.0 {
+		t.Error("hemi cap should be 1.0 in height")
+	}
+}
+
+func TestCapContains(t *testing.T) {
+	tests := []struct {
+		c1, c2 Cap
+		want   bool
+	}{
+		{emptyCap, emptyCap, true},
+		{fullCap, emptyCap, true},
+		{fullCap, fullCap, true},
+		{emptyCap, xAxis, false},
+		{fullCap, xAxis, true},
+		{xAxis, fullCap, false},
+		{xAxis, xAxis, true},
+		{xAxis, emptyCap, true},
+		{hemi, tiny, true},
+		{hemi, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/4-epsilon)), true},
+		{hemi, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/4+epsilon)), false},
+		{concave, hemi, true},
+		{concave, CapFromCenterHeight(Point{concave.center.Mul(-1.0)}, 0.1), false},
+	}
+	for _, test := range tests {
+		if got := test.c1.Contains(test.c2); got != test.want {
+			t.Errorf("%v.Contains(%v) = %t; want %t", test.c1, test.c2, got, test.want)
+		}
+	}
+}
+
+func TestCapContainsPoint(t *testing.T) {
+	// We don't use the standard epsilon in this test due different compiler
+	// math optimizations that are permissible (FMA vs no FMA) that yield
+	// slightly different floating point results between gccgo and gc.
+	const epsilon = 1e-14
+	tangent := tiny.center.Cross(r3.Vector{3, 2, 1}).Normalize()
+	tests := []struct {
+		c    Cap
+		p    Point
+		want bool
+	}{
+		{xAxis, xAxisPt, true},
+		{xAxis, Point{r3.Vector{1, 1e-20, 0}}, false},
+		{yAxis, xAxis.center, false},
+		{xComp, xAxis.center, true},
+		{xComp.Complement(), xAxis.center, false},
+		{tiny, Point{tiny.center.Add(tangent.Mul(tinyRad * 0.99))}, true},
+		{tiny, Point{tiny.center.Add(tangent.Mul(tinyRad * 1.01))}, false},
+		{hemi, PointFromCoords(1, 0, -(1 - epsilon)), true},
+		{hemi, xAxisPt, true},
+		{hemi.Complement(), xAxisPt, false},
+		{concave, PointFromLatLng(LatLngFromDegrees(-70*(1-epsilon), 10)), true},
+		{concave, PointFromLatLng(LatLngFromDegrees(-70*(1+epsilon), 10)), false},
+		// This test case is the one where the floating point values end up
+		// different in the 15th place and beyond.
+		{concave, PointFromLatLng(LatLngFromDegrees(-50*(1-epsilon), -170)), true},
+		{concave, PointFromLatLng(LatLngFromDegrees(-50*(1+epsilon), -170)), false},
+	}
+	for _, test := range tests {
+		if got := test.c.ContainsPoint(test.p); got != test.want {
+			t.Errorf("%v.ContainsPoint(%v) = %t, want %t", test.c, test.p, got, test.want)
+		}
+	}
+}
+
+func TestCapInteriorIntersects(t *testing.T) {
+	tests := []struct {
+		c1, c2 Cap
+		want   bool
+	}{
+		{emptyCap, emptyCap, false},
+		{emptyCap, xAxis, false},
+		{fullCap, emptyCap, false},
+		{fullCap, fullCap, true},
+		{fullCap, xAxis, true},
+		{xAxis, fullCap, false},
+		{xAxis, xAxis, false},
+		{xAxis, emptyCap, false},
+		{concave, hemi.Complement(), true},
+	}
+	for _, test := range tests {
+		if got := test.c1.InteriorIntersects(test.c2); got != test.want {
+			t.Errorf("%v.InteriorIntersects(%v); got %t want %t", test.c1, test.c2, got, test.want)
+		}
+	}
+}
+
+func TestCapInteriorContains(t *testing.T) {
+	if hemi.InteriorContainsPoint(Point{r3.Vector{1, 0, -(1 + epsilon)}}) {
+		t.Errorf("hemi (%v) should not contain point just past half way(%v)", hemi,
+			Point{r3.Vector{1, 0, -(1 + epsilon)}})
+	}
+}
+
+func TestCapExpanded(t *testing.T) {
+	cap50 := CapFromCenterAngle(xAxisPt, 50.0*s1.Degree)
+	cap51 := CapFromCenterAngle(xAxisPt, 51.0*s1.Degree)
+
+	if !emptyCap.Expanded(s1.Angle(fullHeight)).IsEmpty() {
+		t.Error("Expanding empty cap should return an empty cap")
+	}
+	if !fullCap.Expanded(s1.Angle(fullHeight)).IsFull() {
+		t.Error("Expanding a full cap should return an full cap")
+	}
+
+	if !cap50.Expanded(0).ApproxEqual(cap50) {
+		t.Error("Expanding a cap by 0° should be equal to the original")
+	}
+	if !cap50.Expanded(1 * s1.Degree).ApproxEqual(cap51) {
+		t.Error("Expanding 50° by 1° should equal the 51° cap")
+	}
+
+	if cap50.Expanded(129.99 * s1.Degree).IsFull() {
+		t.Error("Expanding 50° by 129.99° should not give a full cap")
+	}
+	if !cap50.Expanded(130.01 * s1.Degree).IsFull() {
+		t.Error("Expanding 50° by 130.01° should give a full cap")
+	}
+}
+
+func TestCapRadiusToHeight(t *testing.T) {
+	tests := []struct {
+		got  s1.Angle
+		want float64
+	}{
+		// Above/below boundary checks.
+		{s1.Angle(-0.5), emptyHeight},
+		{s1.Angle(0), 0},
+		{s1.Angle(math.Pi), fullHeight},
+		{s1.Angle(2 * math.Pi), fullHeight},
+		// Degree tests.
+		{-7.0 * s1.Degree, emptyHeight},
+		{-0.0 * s1.Degree, 0},
+		{0.0 * s1.Degree, 0},
+		{12.0 * s1.Degree, 0.0218523992661943},
+		{30.0 * s1.Degree, 0.1339745962155613},
+		{45.0 * s1.Degree, 0.2928932188134525},
+		{90.0 * s1.Degree, 1.0},
+		{179.99 * s1.Degree, 1.9999999847691292},
+		{180.0 * s1.Degree, fullHeight},
+		{270.0 * s1.Degree, fullHeight},
+		// Radians tests.
+		{-1.0 * s1.Radian, emptyHeight},
+		{-0.0 * s1.Radian, 0},
+		{0.0 * s1.Radian, 0},
+		{1.0 * s1.Radian, 0.45969769413186},
+		{math.Pi / 2.0 * s1.Radian, 1.0},
+		{2.0 * s1.Radian, 1.4161468365471424},
+		{3.0 * s1.Radian, 1.9899924966004454},
+		{math.Pi * s1.Radian, fullHeight},
+		{4.0 * s1.Radian, fullHeight},
+	}
+	for _, test := range tests {
+		// float64Eq comes from s2latlng_test.go
+		if got := radiusToHeight(test.got); !float64Eq(got, test.want) {
+			t.Errorf("radiusToHeight(%v) = %v; want %v", test.got, got, test.want)
+		}
+	}
+}
+
+func TestCapRectBounds(t *testing.T) {
+	const epsilon = 1e-13
+	var tests = []struct {
+		desc     string
+		have     Cap
+		latLoDeg float64
+		latHiDeg float64
+		lngLoDeg float64
+		lngHiDeg float64
+		isFull   bool
+	}{
+		{
+			"Cap that includes South Pole.",
+			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(-45, 57)), s1.Degree*50),
+			-90, 5, -180, 180, true,
+		},
+		{
+			"Cap that is tangent to the North Pole.",
+			CapFromCenterAngle(PointFromCoords(1, 0, 1), s1.Radian*(math.Pi/4.0+1e-16)),
+			0, 90, -180, 180, true,
+		},
+		{
+			"Cap that at 45 degree center that goes from equator to the pole.",
+			CapFromCenterAngle(PointFromCoords(1, 0, 1), s1.Degree*(45+5e-15)),
+			0, 90, -180, 180, true,
+		},
+		{
+			"The eastern hemisphere.",
+			CapFromCenterAngle(Point{r3.Vector{0, 1, 0}}, s1.Radian*(math.Pi/2+2e-16)),
+			-90, 90, -180, 180, true,
+		},
+		{
+			"A cap centered on the equator.",
+			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(0, 50)), s1.Degree*20),
+			-20, 20, 30, 70, false,
+		},
+		{
+			"A cap centered on the North Pole.",
+			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(90, 123)), s1.Degree*10),
+			80, 90, -180, 180, true,
+		},
+	}
+
+	for _, test := range tests {
+		r := test.have.RectBound()
+		if !float64Near(s1.Angle(r.Lat.Lo).Degrees(), test.latLoDeg, epsilon) {
+			t.Errorf("%s: %v.RectBound(), Lat.Lo not close enough, got %0.20f, want %0.20f",
+				test.desc, test.have, s1.Angle(r.Lat.Lo).Degrees(), test.latLoDeg)
+		}
+		if !float64Near(s1.Angle(r.Lat.Hi).Degrees(), test.latHiDeg, epsilon) {
+			t.Errorf("%s: %v.RectBound(), Lat.Hi not close enough, got %0.20f, want %0.20f",
+				test.desc, test.have, s1.Angle(r.Lat.Hi).Degrees(), test.latHiDeg)
+		}
+		if !float64Near(s1.Angle(r.Lng.Lo).Degrees(), test.lngLoDeg, epsilon) {
+			t.Errorf("%s: %v.RectBound(), Lng.Lo not close enough, got %0.20f, want %0.20f",
+				test.desc, test.have, s1.Angle(r.Lng.Lo).Degrees(), test.lngLoDeg)
+		}
+		if !float64Near(s1.Angle(r.Lng.Hi).Degrees(), test.lngHiDeg, epsilon) {
+			t.Errorf("%s: %v.RectBound(), Lng.Hi not close enough, got %0.20f, want %0.20f",
+				test.desc, test.have, s1.Angle(r.Lng.Hi).Degrees(), test.lngHiDeg)
+		}
+		if got := r.Lng.IsFull(); got != test.isFull {
+			t.Errorf("%s: RectBound(%v).isFull() = %t, want %t", test.desc, test.have, got, test.isFull)
+		}
+	}
+
+	// Empty and full caps.
+	if !EmptyCap().RectBound().IsEmpty() {
+		t.Errorf("RectBound() on EmptyCap should be empty.")
+	}
+
+	if !FullCap().RectBound().IsFull() {
+		t.Errorf("RectBound() on FullCap should be full.")
+	}
+}
+
+func TestCapAddPoint(t *testing.T) {
+	const epsilon = 1e-14
+	tests := []struct {
+		have Cap
+		p    Point
+		want Cap
+	}{
+		// Cap plus its center equals itself.
+		{xAxis, xAxisPt, xAxis},
+		{yAxis, yAxisPt, yAxis},
+
+		// Cap plus opposite point equals full.
+		{xAxis, Point{r3.Vector{-1, 0, 0}}, fullCap},
+		{yAxis, Point{r3.Vector{0, -1, 0}}, fullCap},
+
+		// Cap plus orthogonal axis equals half cap.
+		{xAxis, Point{r3.Vector{0, 0, 1}}, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
+		{xAxis, Point{r3.Vector{0, 0, -1}}, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
+
+		// The 45 degree angled hemisphere plus some points.
+		{
+			hemi,
+			PointFromCoords(0, 1, -1),
+			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
+				s1.Angle(120.0)*s1.Degree),
+		},
+		{
+			hemi,
+			PointFromCoords(0, -1, -1),
+			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
+				s1.Angle(120.0)*s1.Degree),
+		},
+		{
+			hemi,
+			PointFromCoords(-1, -1, -1),
+			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
+				s1.Angle(math.Acos(-math.Sqrt(2.0/3.0)))),
+		},
+		{hemi, Point{r3.Vector{0, 1, 1}}, hemi},
+		{hemi, Point{r3.Vector{1, 0, 0}}, hemi},
+	}
+
+	for _, test := range tests {
+		got := test.have.AddPoint(test.p)
+		if !got.ApproxEqual(test.want) {
+			t.Errorf("%v.AddPoint(%v) = %v, want %v", test.have, test.p, got, test.want)
+		}
+
+		if !got.ContainsPoint(test.p) {
+			t.Errorf("%v.AddPoint(%v) did not contain added point", test.have, test.p)
+		}
+	}
+}
+
+func TestCapAddCap(t *testing.T) {
+	tests := []struct {
+		have  Cap
+		other Cap
+		want  Cap
+	}{
+		// Identity cases.
+		{emptyCap, emptyCap, emptyCap},
+		{fullCap, fullCap, fullCap},
+
+		// Anything plus empty equals itself.
+		{fullCap, emptyCap, fullCap},
+		{emptyCap, fullCap, fullCap},
+		{xAxis, emptyCap, xAxis},
+		{emptyCap, xAxis, xAxis},
+		{yAxis, emptyCap, yAxis},
+		{emptyCap, yAxis, yAxis},
+
+		// Two halves make a whole.
+		{xAxis, xComp, fullCap},
+
+		// Two zero-height orthogonal axis caps make a half-cap.
+		{xAxis, yAxis, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
+	}
+
+	for _, test := range tests {
+		got := test.have.AddCap(test.other)
+		if !got.ApproxEqual(test.want) {
+			t.Errorf("%v.AddCap(%v) = %v, want %v", test.have, test.other, got, test.want)
+		}
+	}
+}
+
+func TestCapContainsCell(t *testing.T) {
+	faceRadius := math.Atan(math.Sqrt2)
+	for face := 0; face < 6; face++ {
+		// The cell consisting of the entire face.
+		rootCell := CellFromCellID(CellIDFromFace(face))
+
+		// A leaf cell at the midpoint of the v=1 edge.
+		edgeCell := CellFromPoint(Point{faceUVToXYZ(face, 0, 1-epsilon)})
+
+		// A leaf cell at the u=1, v=1 corner
+		cornerCell := CellFromPoint(Point{faceUVToXYZ(face, 1-epsilon, 1-epsilon)})
+
+		// Quick check for full and empty caps.
+		if !fullCap.ContainsCell(rootCell) {
+			t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", fullCap, rootCell, false, true)
+		}
+
+		// Check intersections with the bounding caps of the leaf cells that are adjacent to
+		// cornerCell along the Hilbert curve.  Because this corner is at (u=1,v=1), the curve
+		// stays locally within the same cube face.
+		first := cornerCell.id.Advance(-3)
+		last := cornerCell.id.Advance(4)
+		for id := first; id < last; id = id.Next() {
+			c := CellFromCellID(id).CapBound()
+			if got, want := c.ContainsCell(cornerCell), id == cornerCell.id; got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", c, cornerCell, got, want)
+			}
+		}
+
+		for capFace := 0; capFace < 6; capFace++ {
+			// A cap that barely contains all of capFace.
+			center := unitNorm(capFace)
+			covering := CapFromCenterAngle(center, s1.Angle(faceRadius+epsilon))
+			if got, want := covering.ContainsCell(rootCell), capFace == face; got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, rootCell, got, want)
+			}
+			if got, want := covering.ContainsCell(edgeCell), center.Vector.Dot(edgeCell.id.Point().Vector) > 0.1; got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
+			}
+			if got, want := covering.ContainsCell(edgeCell), covering.IntersectsCell(edgeCell); got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
+			}
+			if got, want := covering.ContainsCell(cornerCell), capFace == face; got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, cornerCell, got, want)
+			}
+
+			// A cap that barely intersects the edges of capFace.
+			bulging := CapFromCenterAngle(center, s1.Angle(math.Pi/4+epsilon))
+			if bulging.ContainsCell(rootCell) {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, rootCell, true, false)
+			}
+			if got, want := bulging.ContainsCell(edgeCell), capFace == face; got != want {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, edgeCell, got, want)
+			}
+			if bulging.ContainsCell(cornerCell) {
+				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, cornerCell, true, false)
+			}
+		}
+	}
+}
+
+func TestCapIntersectsCell(t *testing.T) {
+	faceRadius := math.Atan(math.Sqrt2)
+	for face := 0; face < 6; face++ {
+		// The cell consisting of the entire face.
+		rootCell := CellFromCellID(CellIDFromFace(face))
+
+		// A leaf cell at the midpoint of the v=1 edge.
+		edgeCell := CellFromPoint(Point{faceUVToXYZ(face, 0, 1-epsilon)})
+
+		// A leaf cell at the u=1, v=1 corner
+		cornerCell := CellFromPoint(Point{faceUVToXYZ(face, 1-epsilon, 1-epsilon)})
+
+		// Quick check for full and empty caps.
+		if emptyCap.IntersectsCell(rootCell) {
+			t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", emptyCap, rootCell, true, false)
+		}
+
+		// Check intersections with the bounding caps of the leaf cells that are adjacent to
+		// cornerCell along the Hilbert curve.  Because this corner is at (u=1,v=1), the curve
+		// stays locally within the same cube face.
+		first := cornerCell.id.Advance(-3)
+		last := cornerCell.id.Advance(4)
+		for id := first; id < last; id = id.Next() {
+			c := CellFromCellID(id).CapBound()
+			if got, want := c.IntersectsCell(cornerCell), id.immediateParent().Contains(cornerCell.id); got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", c, cornerCell, got, want)
+			}
+		}
+
+		antiFace := (face + 3) % 6
+		for capFace := 0; capFace < 6; capFace++ {
+			// A cap that barely contains all of capFace.
+			center := unitNorm(capFace)
+			covering := CapFromCenterAngle(center, s1.Angle(faceRadius+epsilon))
+			if got, want := covering.IntersectsCell(rootCell), capFace != antiFace; got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, rootCell, got, want)
+			}
+			if got, want := covering.IntersectsCell(edgeCell), covering.ContainsCell(edgeCell); got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
+			}
+			if got, want := covering.IntersectsCell(cornerCell), center.Vector.Dot(cornerCell.id.Point().Vector) > 0; got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, cornerCell, got, want)
+			}
+
+			// A cap that barely intersects the edges of capFace.
+			bulging := CapFromCenterAngle(center, s1.Angle(math.Pi/4+epsilon))
+			if got, want := bulging.IntersectsCell(rootCell), capFace != antiFace; got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, rootCell, got, want)
+			}
+			if got, want := bulging.IntersectsCell(edgeCell), center.Vector.Dot(edgeCell.id.Point().Vector) > 0.1; got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, edgeCell, got, want)
+			}
+			if bulging.IntersectsCell(cornerCell) {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, cornerCell, true, false)
+			}
+
+			// A singleton cap.
+			singleton := CapFromCenterAngle(center, 0)
+			if got, want := singleton.IntersectsCell(rootCell), capFace == face; got != want {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, rootCell, got, want)
+			}
+			if singleton.IntersectsCell(edgeCell) {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, edgeCell, true, false)
+			}
+			if singleton.IntersectsCell(cornerCell) {
+				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, cornerCell, true, false)
+			}
+		}
+	}
+}
+
+func TestCapCentroid(t *testing.T) {
+	// Empty and full caps.
+	if got, want := EmptyCap().Centroid(), (Point{}); !got.ApproxEqual(want) {
+		t.Errorf("Centroid of EmptyCap should be zero point, got %v", want)
+	}
+	if got, want := FullCap().Centroid().Norm(), 1e-15; got > want {
+		t.Errorf("Centroid of FullCap should have a Norm of 0, got %v", want)
+	}
+
+	// Random caps.
+	for i := 0; i < 100; i++ {
+		center := randomPoint()
+		height := randomUniformFloat64(0.0, 2.0)
+		c := CapFromCenterHeight(center, height)
+		got := c.Centroid()
+		want := center.Mul((1.0 - height/2.0) * c.Area())
+		if delta := got.Sub(want).Norm(); delta > 1e-15 {
+			t.Errorf("%v.Sub(%v).Norm() = %v, want %v", got, want, delta, 1e-15)
+		}
+	}
+}
+
+func TestCapUnion(t *testing.T) {
+	// Two caps which have the same center but one has a larger radius.
+	a := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.0, 10.0)), s1.Degree*0.2)
+	b := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.0, 10.0)), s1.Degree*0.3)
+	if !b.Contains(a) {
+		t.Errorf("%v.Contains(%v) = false, want true", b, a)
+	}
+	if got := b.ApproxEqual(a.Union(b)); !got {
+		t.Errorf("%v.ApproxEqual(%v) = %v, want true", b, a.Union(b), got)
+	}
+
+	// Two caps where one is the full cap.
+	if got := a.Union(FullCap()); !got.IsFull() {
+		t.Errorf("%v.Union(%v).IsFull() = %v, want true", a, got, got.IsFull())
+	}
+
+	// Two caps where one is the empty cap.
+	if got := a.Union(EmptyCap()); !a.ApproxEqual(got) {
+		t.Errorf("%v.Union(EmptyCap) = %v, want %v", a, got, a)
+	}
+
+	// Two caps which have different centers, one entirely encompasses the other.
+	c := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(51.0, 11.0)), s1.Degree*1.5)
+	if !c.Contains(a) {
+		t.Errorf("%v.Contains(%v) = false, want true", c, a)
+	}
+	if got := a.Union(c).center; !got.ApproxEqual(c.center) {
+		t.Errorf("%v.Union(%v).center = %v, want %v", a, c, got, c.center)
+	}
+	if got := a.Union(c); !float64Eq(float64(got.Radius()), float64(c.Radius())) {
+		t.Errorf("%v.Union(%v).Radius = %v, want %v", a, c, got.Radius(), c.Radius())
+	}
+
+	// Two entirely disjoint caps.
+	d := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(51.0, 11.0)), s1.Degree*0.1)
+	if d.Contains(a) {
+		t.Errorf("%v.Contains(%v) = true, want false", d, a)
+	}
+	if d.Intersects(a) {
+		t.Errorf("%v.Intersects(%v) = true, want false", d, a)
+	}
+
+	// Check union and reverse direction are the same.
+	aUnionD := a.Union(d)
+	if !aUnionD.ApproxEqual(d.Union(a)) {
+		t.Errorf("%v.Union(%v).ApproxEqual(%v.Union(%v)) = false, want true", a, d, d, a)
+	}
+	if got, want := LatLngFromPoint(aUnionD.center).Lat.Degrees(), 50.4588; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Center.Lat = %v, want %v", aUnionD, got, want)
+	}
+	if got, want := LatLngFromPoint(aUnionD.center).Lng.Degrees(), 10.4525; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Center.Lng = %v, want %v", aUnionD, got, want)
+	}
+	if got, want := aUnionD.Radius().Degrees(), 0.7425; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Radius = %v, want %v", aUnionD, got, want)
+	}
+
+	// Two partially overlapping caps.
+	e := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.3, 10.3)), s1.Degree*0.2)
+	aUnionE := a.Union(e)
+	if e.Contains(a) {
+		t.Errorf("%v.Contains(%v) = false, want true", e, a)
+	}
+	if !e.Intersects(a) {
+		t.Errorf("%v.Intersects(%v) = false, want true", e, a)
+	}
+	if !aUnionE.ApproxEqual(e.Union(a)) {
+		t.Errorf("%v.Union(%v).ApproxEqual(%v.Union(%v)) = false, want true", a, e, e, a)
+	}
+	if got, want := LatLngFromPoint(aUnionE.center).Lat.Degrees(), 50.1500; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Center.Lat = %v, want %v", aUnionE, got, want)
+	}
+	if got, want := LatLngFromPoint(aUnionE.center).Lng.Degrees(), 10.1495; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Center.Lng = %v, want %v", aUnionE, got, want)
+	}
+	if got, want := aUnionE.Radius().Degrees(), 0.3781; !float64Near(got, want, 0.001) {
+		t.Errorf("%v.Radius = %v, want %v", aUnionE, got, want)
+	}
+
+	p1 := Point{r3.Vector{0, 0, 1}}
+	p2 := Point{r3.Vector{0, 1, 0}}
+	// Two very large caps, whose radius sums to in excess of 180 degrees, and
+	// whose centers are not antipodal.
+	f := CapFromCenterAngle(p1, s1.Degree*150)
+	g := CapFromCenterAngle(p2, s1.Degree*150)
+	if !f.Union(g).IsFull() {
+		t.Errorf("%v.Union(%v).IsFull() = false, want true", f, g)
+	}
+
+	// Two non-overlapping hemisphere caps with antipodal centers.
+	hemi := CapFromCenterHeight(p1, 1)
+	if !hemi.Union(hemi.Complement()).IsFull() {
+		t.Errorf("%v.Union(%v).Complement().IsFull() = false, want true", hemi, hemi.Complement())
+	}
+}
+
+func TestCapEqual(t *testing.T) {
+	tests := []struct {
+		a, b Cap
+		want bool
+	}{
+		{EmptyCap(), EmptyCap(), true},
+		{EmptyCap(), FullCap(), false},
+		{FullCap(), FullCap(), true},
+		{
+			CapFromCenterAngle(PointFromCoords(0, 0, 1), s1.Degree*150),
+			CapFromCenterAngle(PointFromCoords(0, 0, 1), s1.Degree*151),
+			false,
+		},
+		{xAxis, xAxis, true},
+		{xAxis, yAxis, false},
+		{xComp, xAxis.Complement(), true},
+	}
+
+	for _, test := range tests {
+		if got := test.a.Equal(test.b); got != test.want {
+			t.Errorf("%v.Equal(%v) = %t, want %t", test.a, test.b, got, test.want)
+		}
+	}
+}

+ 522 - 0
vendor/github.com/golang/geo/s2/cell_test.go

@@ -0,0 +1,522 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+	"unsafe"
+
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/s1"
+)
+
+// maxCellSize is the upper bounds on the number of bytes we want the Cell object to ever be.
+const maxCellSize = 48
+
+func TestCellObjectSize(t *testing.T) {
+	if sz := unsafe.Sizeof(Cell{}); sz > maxCellSize {
+		t.Errorf("Cell struct too big: %d bytes > %d bytes", sz, maxCellSize)
+	}
+}
+
+func TestCellFaces(t *testing.T) {
+	edgeCounts := make(map[Point]int)
+	vertexCounts := make(map[Point]int)
+
+	for face := 0; face < 6; face++ {
+		id := CellIDFromFace(face)
+		cell := CellFromCellID(id)
+
+		if cell.id != id {
+			t.Errorf("cell.id != id; %v != %v", cell.id, id)
+		}
+
+		if cell.face != int8(face) {
+			t.Errorf("cell.face != face: %v != %v", cell.face, face)
+		}
+
+		if cell.level != 0 {
+			t.Errorf("cell.level != 0: %v != 0", cell.level)
+		}
+
+		// Top-level faces have alternating orientations to get RHS coordinates.
+		if cell.orientation != int8(face&swapMask) {
+			t.Errorf("cell.orientation != orientation: %v != %v", cell.orientation, face&swapMask)
+		}
+
+		if cell.IsLeaf() {
+			t.Errorf("cell should not be a leaf: IsLeaf = %v", cell.IsLeaf())
+		}
+		for k := 0; k < 4; k++ {
+			edgeCounts[cell.Edge(k)]++
+			vertexCounts[cell.Vertex(k)]++
+			if d := cell.Vertex(k).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
+				t.Errorf("dot product of vertex and edge failed, got %v, want 0", d)
+			}
+			if d := cell.Vertex((k + 1) & 3).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
+				t.Errorf("dot product for edge and next vertex failed, got %v, want 0", d)
+			}
+			if d := cell.Vertex(k).Vector.Cross(cell.Vertex((k + 1) & 3).Vector).Normalize().Dot(cell.Edge(k).Vector); !float64Eq(1.0, d) {
+				t.Errorf("dot product of cross product for vertices failed, got %v, want 1.0", d)
+			}
+		}
+	}
+
+	// Check that edges have multiplicity 2 and vertices have multiplicity 3.
+	for k, v := range edgeCounts {
+		if v != 2 {
+			t.Errorf("edge %v counts wrong, got %d, want 2", k, v)
+		}
+	}
+	for k, v := range vertexCounts {
+		if v != 3 {
+			t.Errorf("vertex %v counts wrong, got %d, want 3", k, v)
+		}
+	}
+}
+
+func TestCellChildren(t *testing.T) {
+	testCellChildren(t, CellFromCellID(CellIDFromFace(0)))
+	testCellChildren(t, CellFromCellID(CellIDFromFace(3)))
+	testCellChildren(t, CellFromCellID(CellIDFromFace(5)))
+}
+
+func testCellChildren(t *testing.T, cell Cell) {
+	children, ok := cell.Children()
+	if cell.IsLeaf() && !ok {
+		return
+	}
+	if cell.IsLeaf() && ok {
+		t.Errorf("leaf cells should not be able to return children. cell %v", cell)
+	}
+
+	if !ok {
+		t.Errorf("unable to get Children for %v", cell)
+		return
+	}
+
+	childID := cell.id.ChildBegin()
+	for i, ci := range children {
+		// Check that the child geometry is consistent with its cell ID.
+		if childID != ci.id {
+			t.Errorf("%v.child[%d].id = %v, want %v", cell, i, ci.id, childID)
+		}
+
+		direct := CellFromCellID(childID)
+		if !ci.Center().ApproxEqual(childID.Point()) {
+			t.Errorf("%v.Center() = %v, want %v", ci, ci.Center(), childID.Point())
+		}
+		if ci.face != direct.face {
+			t.Errorf("%v.face = %v, want %v", ci, ci.face, direct.face)
+		}
+		if ci.level != direct.level {
+			t.Errorf("%v.level = %v, want %v", ci, ci.level, direct.level)
+		}
+		if ci.orientation != direct.orientation {
+			t.Errorf("%v.orientation = %v, want %v", ci, ci.orientation, direct.orientation)
+		}
+		if !ci.Center().ApproxEqual(direct.Center()) {
+			t.Errorf("%v.Center() = %v, want %v", ci, ci.Center(), direct.Center())
+		}
+
+		for k := 0; k < 4; k++ {
+			if !direct.Vertex(k).ApproxEqual(ci.Vertex(k)) {
+				t.Errorf("child %d %v.Vertex(%d) = %v, want %v", i, ci, k, ci.Vertex(k), direct.Vertex(k))
+			}
+			if direct.Edge(k) != ci.Edge(k) {
+				t.Errorf("child %d %v.Edge(%d) = %v, want %v", i, ci, k, ci.Edge(k), direct.Edge(k))
+			}
+		}
+
+		// Test ContainsCell() and IntersectsCell().
+		if !cell.ContainsCell(ci) {
+			t.Errorf("%v.ContainsCell(%v) = false, want true", cell, ci)
+		}
+		if !cell.IntersectsCell(ci) {
+			t.Errorf("%v.IntersectsCell(%v) = false, want true", cell, ci)
+		}
+		if ci.ContainsCell(cell) {
+			t.Errorf("%v.ContainsCell(%v) = true, want false", ci, cell)
+		}
+		if !cell.ContainsPoint(ci.Center()) {
+			t.Errorf("%v.ContainsPoint(%v) = false, want true", cell, ci.Center())
+		}
+		for j := 0; j < 4; j++ {
+			if !cell.ContainsPoint(ci.Vertex(j)) {
+				t.Errorf("%v.ContainsPoint(%v.Vertex(%d)) = false, want true", cell, ci, j)
+			}
+			if j != i {
+				if ci.ContainsPoint(children[j].Center()) {
+					t.Errorf("%v.ContainsPoint(%v[%d].Center()) = true, want false", ci, children, j)
+				}
+				if ci.IntersectsCell(children[j]) {
+					t.Errorf("%v.IntersectsCell(%v[%d]) = true, want false", ci, children, j)
+				}
+			}
+		}
+
+		// Test CapBound and RectBound.
+		parentCap := cell.CapBound()
+		parentRect := cell.RectBound()
+		if cell.ContainsPoint(PointFromCoords(0, 0, 1)) || cell.ContainsPoint(PointFromCoords(0, 0, -1)) {
+			if !parentRect.Lng.IsFull() {
+				t.Errorf("%v.Lng.IsFull() = false, want true", parentRect)
+			}
+		}
+		childCap := ci.CapBound()
+		childRect := ci.RectBound()
+		if !childCap.ContainsPoint(ci.Center()) {
+			t.Errorf("childCap %v.ContainsPoint(%v.Center()) = false, want true", childCap, ci)
+		}
+		if !childRect.ContainsPoint(ci.Center()) {
+			t.Errorf("childRect %v.ContainsPoint(%v.Center()) = false, want true", childRect, ci)
+		}
+		if !parentCap.ContainsPoint(ci.Center()) {
+			t.Errorf("parentCap %v.ContainsPoint(%v.Center()) = false, want true", parentCap, ci)
+		}
+		if !parentRect.ContainsPoint(ci.Center()) {
+			t.Errorf("parentRect %v.ContainsPoint(%v.Center()) = false, want true", parentRect, ci)
+		}
+		for j := 0; j < 4; j++ {
+			if !childCap.ContainsPoint(ci.Vertex(j)) {
+				t.Errorf("childCap %v.ContainsPoint(%v.Vertex(%d)) = false, want true", childCap, ci, j)
+			}
+			if !childRect.ContainsPoint(ci.Vertex(j)) {
+				t.Errorf("childRect %v.ContainsPoint(%v.Vertex(%d)) = false, want true", childRect, ci, j)
+			}
+			if !parentCap.ContainsPoint(ci.Vertex(j)) {
+				t.Errorf("parentCap %v.ContainsPoint(%v.Vertex(%d)) = false, want true", parentCap, ci, j)
+			}
+			if !parentRect.ContainsPoint(ci.Vertex(j)) {
+				t.Errorf("parentRect %v.ContainsPoint(%v.Vertex(%d)) = false, want true", parentRect, ci, j)
+			}
+			if j != i {
+				// The bounding caps and rectangles should be tight enough so that
+				// they exclude at least two vertices of each adjacent cell.
+				capCount := 0
+				rectCount := 0
+				for k := 0; k < 4; k++ {
+					if childCap.ContainsPoint(children[j].Vertex(k)) {
+						capCount++
+					}
+					if childRect.ContainsPoint(children[j].Vertex(k)) {
+						rectCount++
+					}
+				}
+				if capCount > 2 {
+					t.Errorf("childs bounding cap should contain no more than 2 points, got %d", capCount)
+				}
+				if childRect.Lat.Lo > -math.Pi/2 && childRect.Lat.Hi < math.Pi/2 {
+					// Bounding rectangles may be too large at the poles
+					// because the pole itself has an arbitrary longitude.
+					if rectCount > 2 {
+						t.Errorf("childs bounding rect should contain no more than 2 points, got %d", rectCount)
+					}
+				}
+			}
+		}
+
+		// Check all children for the first few levels, and then sample randomly.
+		// We also always subdivide the cells containing a few chosen points so
+		// that we have a better chance of sampling the minimum and maximum metric
+		// values.  kMaxSizeUV is the absolute value of the u- and v-coordinate
+		// where the cell size at a given level is maximal.
+		maxSizeUV := 0.3964182625366691
+		specialUV := []r2.Point{
+			r2.Point{dblEpsilon, dblEpsilon}, // Face center
+			r2.Point{dblEpsilon, 1},          // Edge midpoint
+			r2.Point{1, 1},                   // Face corner
+			r2.Point{maxSizeUV, maxSizeUV},   // Largest cell area
+			r2.Point{dblEpsilon, maxSizeUV},  // Longest edge/diagonal
+		}
+		forceSubdivide := false
+		for _, uv := range specialUV {
+			if ci.BoundUV().ContainsPoint(uv) {
+				forceSubdivide = true
+			}
+		}
+
+		// For a more in depth test, add an "|| oneIn(n)" to this condition
+		// to cause more children to be tested beyond the ones to level 5.
+		if forceSubdivide || cell.level < 5 {
+			testCellChildren(t, ci)
+		}
+
+		childID = childID.Next()
+	}
+}
+
+func TestCellAreas(t *testing.T) {
+	// relative error bounds for each type of area computation
+	var exactError = math.Log(1 + 1e-6)
+	var approxError = math.Log(1.03)
+	var avgError = math.Log(1 + 1e-15)
+
+	// Test 1. Check the area of a top level cell.
+	const level1Cell = CellID(0x1000000000000000)
+	const wantArea = 4 * math.Pi / 6
+	if area := CellFromCellID(level1Cell).ExactArea(); !float64Eq(area, wantArea) {
+		t.Fatalf("Area of a top-level cell %v = %f, want %f", level1Cell, area, wantArea)
+	}
+
+	// Test 2. Iterate inwards from this cell, checking at every level that
+	// the sum of the areas of the children is equal to the area of the parent.
+	childIndex := 1
+	for cell := CellID(0x1000000000000000); cell.Level() < 21; cell = cell.Children()[childIndex] {
+		var exactArea, approxArea, avgArea float64
+		for _, child := range cell.Children() {
+			exactArea += CellFromCellID(child).ExactArea()
+			approxArea += CellFromCellID(child).ApproxArea()
+			avgArea += CellFromCellID(child).AverageArea()
+		}
+
+		if area := CellFromCellID(cell).ExactArea(); !float64Eq(exactArea, area) {
+			t.Fatalf("Areas of children of a level-%d cell %v don't add up to parent's area. "+
+				"This cell: %e, sum of children: %e",
+				cell.Level(), cell, area, exactArea)
+		}
+
+		childIndex = (childIndex + 1) % 4
+
+		// For ExactArea(), the best relative error we can expect is about 1e-6
+		// because the precision of the unit vector coordinates is only about 1e-15
+		// and the edge length of a leaf cell is about 1e-9.
+		if logExact := math.Abs(math.Log(exactArea / CellFromCellID(cell).ExactArea())); logExact > exactError {
+			t.Errorf("The relative error of ExactArea for children of a level-%d "+
+				"cell %v should be less than %e, got %e. This cell: %e, children area: %e",
+				cell.Level(), cell, exactError, logExact,
+				CellFromCellID(cell).ExactArea(), exactArea)
+		}
+		// For ApproxArea(), the areas are accurate to within a few percent.
+		if logApprox := math.Abs(math.Log(approxArea / CellFromCellID(cell).ApproxArea())); logApprox > approxError {
+			t.Errorf("The relative error of ApproxArea for children of a level-%d "+
+				"cell %v should be within %e%%, got %e. This cell: %e, sum of children: %e",
+				cell.Level(), cell, approxError, logApprox,
+				CellFromCellID(cell).ExactArea(), exactArea)
+		}
+		// For AverageArea(), the areas themselves are not very accurate, but
+		// the average area of a parent is exactly 4 times the area of a child.
+		if logAvg := math.Abs(math.Log(avgArea / CellFromCellID(cell).AverageArea())); logAvg > avgError {
+			t.Errorf("The relative error of AverageArea for children of a level-%d "+
+				"cell %v should be less than %e, got %e. This cell: %e, sum of children: %e",
+				cell.Level(), cell, avgError, logAvg,
+				CellFromCellID(cell).AverageArea(), avgArea)
+		}
+	}
+}
+
+func TestCellIntersectsCell(t *testing.T) {
+	tests := []struct {
+		c    Cell
+		oc   Cell
+		want bool
+	}{
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
+			false,
+		},
+	}
+	for _, test := range tests {
+		if got := test.c.IntersectsCell(test.oc); got != test.want {
+			t.Errorf("Cell(%v).IntersectsCell(%v) = %t; want %t", test.c, test.oc, got, test.want)
+		}
+	}
+}
+
+func TestCellContainsCell(t *testing.T) {
+	tests := []struct {
+		c    Cell
+		oc   Cell
+		want bool
+	}{
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			false,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			false,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
+			false,
+		},
+	}
+	for _, test := range tests {
+		if got := test.c.ContainsCell(test.oc); got != test.want {
+			t.Errorf("Cell(%v).ContainsCell(%v) = %t; want %t", test.c, test.oc, got, test.want)
+		}
+	}
+}
+
+func TestCellRectBound(t *testing.T) {
+	tests := []struct {
+		lat float64
+		lng float64
+	}{
+		{50, 50},
+		{-50, 50},
+		{50, -50},
+		{-50, -50},
+		{0, 0},
+		{0, 180},
+		{0, -179},
+	}
+	for _, test := range tests {
+		c := CellFromLatLng(LatLngFromDegrees(test.lat, test.lng))
+		rect := c.RectBound()
+		for i := 0; i < 4; i++ {
+			if !rect.ContainsLatLng(LatLngFromPoint(c.Vertex(i))) {
+				t.Errorf("%v should contain %v", rect, c.Vertex(i))
+			}
+		}
+	}
+}
+
+func TestCellRectBoundAroundPoleMinLat(t *testing.T) {
+	tests := []struct {
+		cellID       CellID
+		latLng       LatLng
+		wantContains bool
+	}{
+		{
+			cellID:       CellIDFromFacePosLevel(2, 0, 0),
+			latLng:       LatLngFromDegrees(3, 0),
+			wantContains: false,
+		},
+		{
+			cellID:       CellIDFromFacePosLevel(2, 0, 0),
+			latLng:       LatLngFromDegrees(50, 0),
+			wantContains: true,
+		},
+		{
+			cellID:       CellIDFromFacePosLevel(5, 0, 0),
+			latLng:       LatLngFromDegrees(-3, 0),
+			wantContains: false,
+		},
+		{
+			cellID:       CellIDFromFacePosLevel(5, 0, 0),
+			latLng:       LatLngFromDegrees(-50, 0),
+			wantContains: true,
+		},
+	}
+	for _, test := range tests {
+		if got := CellFromCellID(test.cellID).RectBound().ContainsLatLng(test.latLng); got != test.wantContains {
+			t.Errorf("CellID(%v) contains %v: got %t, want %t", test.cellID, test.latLng, got, test.wantContains)
+		}
+	}
+}
+
+func TestCellCapBound(t *testing.T) {
+	c := CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(20))
+	s2Cap := c.CapBound()
+	for i := 0; i < 4; i++ {
+		if !s2Cap.ContainsPoint(c.Vertex(i)) {
+			t.Errorf("%v should contain %v", s2Cap, c.Vertex(i))
+		}
+	}
+}
+
+func TestCellContainsPoint(t *testing.T) {
+	tests := []struct {
+		c    Cell
+		p    Point
+		want bool
+	}{
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)).Vertex(1),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)).Vertex(1),
+			true,
+		},
+		{
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
+			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next().ChildBeginAtLevel(5)).Vertex(1),
+			false,
+		},
+	}
+	for _, test := range tests {
+		if got := test.c.ContainsPoint(test.p); got != test.want {
+			t.Errorf("Cell(%v).ContainsPoint(%v) = %t; want %t", test.c, test.p, got, test.want)
+		}
+	}
+}
+
+func TestCellContainsPointConsistentWithS2CellIDFromPoint(t *testing.T) {
+	// Construct many points that are nearly on a Cell edge, and verify that
+	// CellFromCellID(cellIDFromPoint(p)).Contains(p) is always true.
+	for iter := 0; iter < 1000; iter++ {
+		cell := CellFromCellID(randomCellID())
+		i1 := randomUniformInt(4)
+		i2 := (i1 + 1) & 3
+		v1 := cell.Vertex(i1)
+		v2 := samplePointFromCap(CapFromCenterAngle(cell.Vertex(i2), s1.Angle(epsilon)))
+		p := Interpolate(randomFloat64(), v1, v2)
+		if !CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) {
+			t.Errorf("For p=%v, CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) was false", p)
+		}
+	}
+}
+
+func TestCellContainsPointContainsAmbiguousPoint(t *testing.T) {
+	// This tests a case where S2CellId returns the "wrong" cell for a point
+	// that is very close to the cell edge. (ConsistentWithS2CellIdFromPoint
+	// generates more examples like this.)
+	//
+	// The Point below should have x = 0, but conversion from LatLng to
+	// (x,y,z) gives x = ~6.1e-17. When xyz is converted to uv, this gives
+	// u = -6.1e-17. However when converting to st, which has a range of [0,1],
+	// the low precision bits of u are lost and we wind up with s = 0.5.
+	// cellIDFromPoint then chooses an arbitrary neighboring cell.
+	//
+	// This tests that Cell.ContainsPoint() expands the cell bounds sufficiently
+	// so that the returned cell is still considered to contain p.
+	p := PointFromLatLng(LatLngFromDegrees(-2, 90))
+	cell := CellFromCellID(cellIDFromPoint(p).Parent(1))
+	if !cell.ContainsPoint(p) {
+		t.Errorf("For p=%v, CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) was false", p)
+	}
+}

+ 168 - 8
vendor/github.com/golang/geo/s2/cellid.go

@@ -26,6 +26,7 @@ import (
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r2"
 	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
 )
 
 // CellID uniquely identifies a cell in the S2 cell decomposition.
@@ -34,13 +35,34 @@ import (
 // along the Hilbert curve on that face. The zero value and the value
 // (1<<64)-1 are invalid cell IDs. The first compares less than any
 // valid cell ID, the second as greater than any valid cell ID.
+//
+// Sequentially increasing cell IDs follow a continuous space-filling curve
+// over the entire sphere. They have the following properties:
+//
+//  - The ID of a cell at level k consists of a 3-bit face number followed
+//    by k bit pairs that recursively select one of the four children of
+//    each cell. The next bit is always 1, and all other bits are 0.
+//    Therefore, the level of a cell is determined by the position of its
+//    lowest-numbered bit that is turned on (for a cell at level k, this
+//    position is 2 * (maxLevel - k)).
+//
+//  - The ID of a parent cell is at the midpoint of the range of IDs spanned
+//    by its children (or by its descendants at any level).
+//
+// Leaf cells are often used to represent points on the unit sphere, and
+// this type provides methods for converting directly between these two
+// representations. For cells that represent 2D regions rather than
+// discrete point, it is better to use Cells.
 type CellID uint64
 
 // TODO(dsymonds): Some of these constants should probably be exported.
 const (
-	faceBits   = 3
-	numFaces   = 6
-	maxLevel   = 30
+	faceBits = 3
+	numFaces = 6
+	maxLevel = 30
+	// The extra position bit (61 rather than 60) lets us encode each cell as its
+	// Hilbert curve position at the cell center (which is halfway along the
+	// portion of the Hilbert curve that fills that cell).
 	posBits    = 2*maxLevel + 1
 	maxSize    = 1 << maxLevel
 	wrapOffset = uint64(numFaces) << posBits
@@ -211,6 +233,59 @@ func (ci CellID) VertexNeighbors(level int) []CellID {
 	return results
 }
 
+// AllNeighbors returns all neighbors of this cell at the given level. Two
+// cells X and Y are neighbors if their boundaries intersect but their
+// interiors do not. In particular, two cells that intersect at a single
+// point are neighbors. Note that for cells adjacent to a face vertex, the
+// same neighbor may be returned more than once. There could be up to eight
+// neighbors including the diagonal ones that share the vertex.
+//
+// This requires level >= ci.Level().
+func (ci CellID) AllNeighbors(level int) []CellID {
+	var neighbors []CellID
+
+	face, i, j, _ := ci.faceIJOrientation()
+
+	// Find the coordinates of the lower left-hand leaf cell. We need to
+	// normalize (i,j) to a known position within the cell because level
+	// may be larger than this cell's level.
+	size := sizeIJ(ci.Level())
+	i &= -size
+	j &= -size
+
+	nbrSize := sizeIJ(level)
+
+	// We compute the top-bottom, left-right, and diagonal neighbors in one
+	// pass. The loop test is at the end of the loop to avoid 32-bit overflow.
+	for k := -nbrSize; ; k += nbrSize {
+		var sameFace bool
+		if k < 0 {
+			sameFace = (j+k >= 0)
+		} else if k >= size {
+			sameFace = (j+k < maxSize)
+		} else {
+			sameFace = true
+			// Top and bottom neighbors.
+			neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize,
+				j-size >= 0).Parent(level))
+			neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size,
+				j+size < maxSize).Parent(level))
+		}
+
+		// Left, right, and diagonal neighbors.
+		neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k,
+			sameFace && i-size >= 0).Parent(level))
+		neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k,
+			sameFace && i+size < maxSize).Parent(level))
+
+		if k >= size {
+			break
+		}
+	}
+
+	return neighbors
+}
+
 // RangeMin returns the minimum CellID that is contained within this cell.
 func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) }
 
@@ -352,12 +427,18 @@ func (ci CellID) AdvanceWrap(steps int64) CellID {
 // TODO: the methods below are not exported yet.  Settle on the entire API design
 // before doing this.  Do we want to mirror the C++ one as closely as possible?
 
+// distanceFromBegin returns the number of steps that this cell is from the first
+// node in the S2 heirarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
+// The return value is always non-negative.
+func (ci CellID) distanceFromBegin() int64 {
+	return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
+}
+
 // rawPoint returns an unnormalized r3 vector from the origin through the center
 // of the s2 cell on the sphere.
 func (ci CellID) rawPoint() r3.Vector {
 	face, si, ti := ci.faceSiTi()
 	return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti)))
-
 }
 
 // faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
@@ -677,7 +758,57 @@ func (ci CellID) centerUV() r2.Point {
 func (ci CellID) boundUV() r2.Rect {
 	_, i, j, _ := ci.faceIJOrientation()
 	return ijLevelToBoundUV(i, j, ci.Level())
+}
 
+// expandEndpoint returns a new u-coordinate u' such that the distance from the
+// line u=u' to the given edge (u,v0)-(u,v1) is exactly the given distance
+// (which is specified as the sine of the angle corresponding to the distance).
+func expandEndpoint(u, maxV, sinDist float64) float64 {
+	// This is based on solving a spherical right triangle, similar to the
+	// calculation in Cap.RectBound.
+	// Given an edge of the form (u,v0)-(u,v1), let maxV = max(abs(v0), abs(v1)).
+	sinUShift := sinDist * math.Sqrt((1+u*u+maxV*maxV)/(1+u*u))
+	cosUShift := math.Sqrt(1 - sinUShift*sinUShift)
+	// The following is an expansion of tan(atan(u) + asin(sinUShift)).
+	return (cosUShift*u + sinUShift) / (cosUShift - sinUShift*u)
+}
+
+// expandedByDistanceUV returns a rectangle expanded in (u,v)-space so that it
+// contains all points within the given distance of the boundary, and return the
+// smallest such rectangle. If the distance is negative, then instead shrink this
+// rectangle so that it excludes all points within the given absolute distance
+// of the boundary.
+//
+// Distances are measured *on the sphere*, not in (u,v)-space. For example,
+// you can use this method to expand the (u,v)-bound of an CellID so that
+// it contains all points within 5km of the original cell. You can then
+// test whether a point lies within the expanded bounds like this:
+//
+//   if u, v, ok := faceXYZtoUV(face, point); ok && bound.ContainsPoint(r2.Point{u,v}) { ... }
+//
+// Limitations:
+//
+//  - Because the rectangle is drawn on one of the six cube-face planes
+//    (i.e., {x,y,z} = +/-1), it can cover at most one hemisphere. This
+//    limits the maximum amount that a rectangle can be expanded. For
+//    example, CellID bounds can be expanded safely by at most 45 degrees
+//    (about 5000 km on the Earth's surface).
+//
+//  - The implementation is not exact for negative distances. The resulting
+//    rectangle will exclude all points within the given distance of the
+//    boundary but may be slightly smaller than necessary.
+func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
+	// Expand each of the four sides of the rectangle just enough to include all
+	// points within the given distance of that side. (The rectangle may be
+	// expanded by a different amount in (u,v)-space on each side.)
+	maxU := math.Max(math.Abs(uv.X.Lo), math.Abs(uv.X.Hi))
+	maxV := math.Max(math.Abs(uv.Y.Lo), math.Abs(uv.Y.Hi))
+	sinDist := math.Sin(float64(distance))
+	return r2.Rect{
+		X: r1.Interval{expandEndpoint(uv.X.Lo, maxV, -sinDist),
+			expandEndpoint(uv.X.Hi, maxV, sinDist)},
+		Y: r1.Interval{expandEndpoint(uv.Y.Lo, maxU, -sinDist),
+			expandEndpoint(uv.Y.Hi, maxU, sinDist)}}
 }
 
 // MaxTile returns the largest cell with the same RangeMin such that
@@ -723,7 +854,36 @@ func (ci CellID) MaxTile(limit CellID) CellID {
 	return ci
 }
 
-// TODO: Differences from C++:
-// ExpandedByDistanceUV/ExpandEndpoint
-// CenterSiTi
-// AppendVertexNeighbors/AppendAllNeighbors
+// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell.
+// Note that although (si,ti) coordinates span the range [0,2**31] in general,
+// the cell center coordinates are always in the range [1,2**31-1] and
+// therefore can be represented using a signed 32-bit integer.
+func (ci CellID) centerFaceSiTi() (face, si, ti int) {
+	// First we compute the discrete (i,j) coordinates of a leaf cell contained
+	// within the given cell. Given that cells are represented by the Hilbert
+	// curve position corresponding at their center, it turns out that the cell
+	// returned by faceIJOrientation is always one of two leaf cells closest
+	// to the center of the cell (unless the given cell is a leaf cell itself,
+	// in which case there is only one possibility).
+	//
+	// Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin,
+	// jmin) be the coordinates of its lower left-hand corner, the leaf cell
+	// returned by faceIJOrientation is either (imin + s/2, jmin + s/2)
+	// (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want.
+	// We can distinguish these two cases by looking at the low bit of i or
+	// j. In the second case the low bit is one, unless s == 2 (i.e. the
+	// level just above leaf cells) in which case the low bit is zero.
+	//
+	// In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true
+	// if we are in the second case described above.
+	face, i, j, _ := ci.faceIJOrientation()
+	delta := 0
+	if ci.IsLeaf() {
+		delta = 1
+	} else if (int64(i)^(int64(ci)>>2))&1 == 1 {
+		delta = 2
+	}
+
+	// Note that (2 * {i,j} + delta) will never overflow a 32-bit integer.
+	return face, 2*i + delta, 2*j + delta
+}

+ 1052 - 0
vendor/github.com/golang/geo/s2/cellid_test.go

@@ -0,0 +1,1052 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"reflect"
+	"sort"
+	"testing"
+
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/s1"
+)
+
+func TestCellIDFromFace(t *testing.T) {
+	for face := 0; face < 6; face++ {
+		fpl := CellIDFromFacePosLevel(face, 0, 0)
+		f := CellIDFromFace(face)
+		if fpl != f {
+			t.Errorf("CellIDFromFacePosLevel(%d, 0, 0) != CellIDFromFace(%d), got %v wanted %v", face, face, f, fpl)
+		}
+	}
+}
+
+func TestCellIDParentChildRelationships(t *testing.T) {
+	ci := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
+
+	if !ci.IsValid() {
+		t.Errorf("CellID %v should be valid", ci)
+	}
+	if f := ci.Face(); f != 3 {
+		t.Errorf("ci.Face() is %v, want 3", f)
+	}
+	if p := ci.Pos(); p != 0x12345700 {
+		t.Errorf("ci.Pos() is 0x%X, want 0x12345700", p)
+	}
+	if l := ci.Level(); l != 26 { // 26 is maxLevel - 4
+		t.Errorf("ci.Level() is %v, want 26", l)
+	}
+	if ci.IsLeaf() {
+		t.Errorf("CellID %v should not be a leaf", ci)
+	}
+
+	if kid2 := ci.ChildBeginAtLevel(ci.Level() + 2).Pos(); kid2 != 0x12345610 {
+		t.Errorf("child two levels down is 0x%X, want 0x12345610", kid2)
+	}
+	if kid0 := ci.ChildBegin().Pos(); kid0 != 0x12345640 {
+		t.Errorf("first child is 0x%X, want 0x12345640", kid0)
+	}
+	if kid0 := ci.Children()[0].Pos(); kid0 != 0x12345640 {
+		t.Errorf("first child is 0x%X, want 0x12345640", kid0)
+	}
+	if parent := ci.immediateParent().Pos(); parent != 0x12345400 {
+		t.Errorf("ci.immediateParent().Pos() = 0x%X, want 0x12345400", parent)
+	}
+	if parent := ci.Parent(ci.Level() - 2).Pos(); parent != 0x12345000 {
+		t.Errorf("ci.Parent(l-2).Pos() = 0x%X, want 0x12345000", parent)
+	}
+
+	if uint64(ci.ChildBegin()) >= uint64(ci) {
+		t.Errorf("ci.ChildBegin() is 0x%X, want < 0x%X", ci.ChildBegin(), ci)
+	}
+	if uint64(ci.ChildEnd()) <= uint64(ci) {
+		t.Errorf("ci.ChildEnd() is 0x%X, want > 0x%X", ci.ChildEnd(), ci)
+	}
+	if ci.ChildEnd() != ci.ChildBegin().Next().Next().Next().Next() {
+		t.Errorf("ci.ChildEnd() is 0x%X, want 0x%X", ci.ChildEnd(), ci.ChildBegin().Next().Next().Next().Next())
+	}
+	if ci.RangeMin() != ci.ChildBeginAtLevel(maxLevel) {
+		t.Errorf("ci.RangeMin() is 0x%X, want 0x%X", ci.RangeMin(), ci.ChildBeginAtLevel(maxLevel))
+	}
+	if ci.RangeMax().Next() != ci.ChildEndAtLevel(maxLevel) {
+		t.Errorf("ci.RangeMax().Next() is 0x%X, want 0x%X", ci.RangeMax().Next(), ci.ChildEndAtLevel(maxLevel))
+	}
+}
+
+func TestCellIDContainment(t *testing.T) {
+	a := CellID(0x80855c0000000000) // Pittsburg
+	b := CellID(0x80855d0000000000) // child of a
+	c := CellID(0x80855dc000000000) // child of b
+	d := CellID(0x8085630000000000) // part of Pittsburg disjoint from a
+	tests := []struct {
+		x, y                                 CellID
+		xContainsY, yContainsX, xIntersectsY bool
+	}{
+		{a, a, true, true, true},
+		{a, b, true, false, true},
+		{a, c, true, false, true},
+		{a, d, false, false, false},
+		{b, b, true, true, true},
+		{b, c, true, false, true},
+		{b, d, false, false, false},
+		{c, c, true, true, true},
+		{c, d, false, false, false},
+		{d, d, true, true, true},
+	}
+	should := func(b bool) string {
+		if b {
+			return "should"
+		}
+		return "should not"
+	}
+	for _, test := range tests {
+		if test.x.Contains(test.y) != test.xContainsY {
+			t.Errorf("%v %s contain %v", test.x, should(test.xContainsY), test.y)
+		}
+		if test.x.Intersects(test.y) != test.xIntersectsY {
+			t.Errorf("%v %s intersect %v", test.x, should(test.xIntersectsY), test.y)
+		}
+		if test.y.Contains(test.x) != test.yContainsX {
+			t.Errorf("%v %s contain %v", test.y, should(test.yContainsX), test.x)
+		}
+	}
+
+	// TODO(dsymonds): Test Contains, Intersects better, such as with adjacent cells.
+}
+
+func TestCellIDString(t *testing.T) {
+	ci := CellID(0xbb04000000000000)
+	if s, exp := ci.String(), "5/31200"; s != exp {
+		t.Errorf("ci.String() = %q, want %q", s, exp)
+	}
+}
+
+func TestCellIDLatLng(t *testing.T) {
+	// You can generate these with the s2cellid2latlngtestcase C++ program in this directory.
+	tests := []struct {
+		id       CellID
+		lat, lng float64
+	}{
+		{0x47a1cbd595522b39, 49.703498679, 11.770681595},
+		{0x46525318b63be0f9, 55.685376759, 12.588490937},
+		{0x52b30b71698e729d, 45.486546517, -93.449700022},
+		{0x46ed8886cfadda85, 58.299984854, 23.049300056},
+		{0x3663f18a24cbe857, 34.364439040, 108.330699969},
+		{0x10a06c0a948cf5d, -30.694551352, -30.048758753},
+		{0x2b2bfd076787c5df, -25.285264027, 133.823116966},
+		{0xb09dff882a7809e1, -75.000000031, 0.000000133},
+		{0x94daa3d000000001, -24.694439215, -47.537363213},
+		{0x87a1000000000001, 38.899730392, -99.901813021},
+		{0x4fc76d5000000001, 81.647200334, -55.631712940},
+		{0x3b00955555555555, 10.050986518, 78.293170610},
+		{0x1dcc469991555555, -34.055420593, 18.551140038},
+		{0xb112966aaaaaaaab, -69.219262171, 49.670072392},
+	}
+	for _, test := range tests {
+		l1 := LatLngFromDegrees(test.lat, test.lng)
+		l2 := test.id.LatLng()
+		if l1.Distance(l2) > 1e-9*s1.Degree { // ~0.1mm on earth.
+			t.Errorf("LatLng() for CellID %x (%s) : got %s, want %s", uint64(test.id), test.id, l2, l1)
+		}
+		c1 := test.id
+		c2 := CellIDFromLatLng(l1)
+		if c1 != c2 {
+			t.Errorf("CellIDFromLatLng(%s) = %x (%s), want %s", l1, uint64(c2), c2, c1)
+		}
+	}
+}
+
+func TestCellIDEdgeNeighbors(t *testing.T) {
+	// Check the edge neighbors of face 1.
+	faces := []int{5, 3, 2, 0}
+	for i, nbr := range cellIDFromFaceIJ(1, 0, 0).Parent(0).EdgeNeighbors() {
+		if !nbr.isFace() {
+			t.Errorf("CellID(%d) is not a face", nbr)
+		}
+		if got, want := nbr.Face(), faces[i]; got != want {
+			t.Errorf("CellID(%d).Face() = %d, want %d", nbr, got, want)
+		}
+	}
+	// Check the edge neighbors of the corner cells at all levels.  This case is
+	// trickier because it requires projecting onto adjacent faces.
+	const maxIJ = maxSize - 1
+	for level := 1; level <= maxLevel; level++ {
+		id := cellIDFromFaceIJ(1, 0, 0).Parent(level)
+		// These neighbors were determined manually using the face and axis
+		// relationships.
+		levelSizeIJ := sizeIJ(level)
+		want := []CellID{
+			cellIDFromFaceIJ(5, maxIJ, maxIJ).Parent(level),
+			cellIDFromFaceIJ(1, levelSizeIJ, 0).Parent(level),
+			cellIDFromFaceIJ(1, 0, levelSizeIJ).Parent(level),
+			cellIDFromFaceIJ(0, maxIJ, 0).Parent(level),
+		}
+		for i, nbr := range id.EdgeNeighbors() {
+			if nbr != want[i] {
+				t.Errorf("CellID(%d).EdgeNeighbors()[%d] = %v, want %v", id, i, nbr, want[i])
+			}
+		}
+	}
+}
+
+type byCellID []CellID
+
+func (v byCellID) Len() int           { return len(v) }
+func (v byCellID) Swap(i, j int)      { v[i], v[j] = v[j], v[i] }
+func (v byCellID) Less(i, j int) bool { return uint64(v[i]) < uint64(v[j]) }
+
+func TestCellIDVertexNeighbors(t *testing.T) {
+	// Check the vertex neighbors of the center of face 2 at level 5.
+	id := cellIDFromPoint(PointFromCoords(0, 0, 1))
+	neighbors := id.VertexNeighbors(5)
+	sort.Sort(byCellID(neighbors))
+
+	for n, nbr := range neighbors {
+		i, j := 1<<29, 1<<29
+		if n < 2 {
+			i--
+		}
+		if n == 0 || n == 3 {
+			j--
+		}
+		want := cellIDFromFaceIJ(2, i, j).Parent(5)
+
+		if nbr != want {
+			t.Errorf("CellID(%s).VertexNeighbors()[%d] = %v, want %v", id, n, nbr, want)
+		}
+	}
+
+	// Check the vertex neighbors of the corner of faces 0, 4, and 5.
+	id = CellIDFromFacePosLevel(0, 0, maxLevel)
+	neighbors = id.VertexNeighbors(0)
+	sort.Sort(byCellID(neighbors))
+	if len(neighbors) != 3 {
+		t.Errorf("len(CellID(%d).VertexNeighbors()) = %d, wanted %d", id, len(neighbors), 3)
+	}
+	if neighbors[0] != CellIDFromFace(0) {
+		t.Errorf("CellID(%d).VertexNeighbors()[0] = %d, wanted %d", id, neighbors[0], CellIDFromFace(0))
+	}
+	if neighbors[1] != CellIDFromFace(4) {
+		t.Errorf("CellID(%d).VertexNeighbors()[1] = %d, wanted %d", id, neighbors[1], CellIDFromFace(4))
+	}
+}
+
+// dedupCellIDs returns the unique slice of CellIDs from the sorted input list.
+func dedupCellIDs(ids []CellID) []CellID {
+	var out []CellID
+	var prev CellID
+	for _, id := range ids {
+		if id != prev {
+			out = append(out, id)
+		}
+		prev = id
+	}
+
+	return out
+}
+
+func TestCellIDAllNeighbors(t *testing.T) {
+	// Check that AllNeighbors produces results that are consistent
+	// with VertexNeighbors for a bunch of random cells.
+	for i := 0; i < 1000; i++ {
+		id := randomCellID()
+		if id.IsLeaf() {
+			id = id.immediateParent()
+		}
+
+		// testAllNeighbors computes approximately 2**(2*(diff+1)) cell ids,
+		// so it's not reasonable to use large values of diff.
+		maxDiff := min(6, maxLevel-id.Level()-1)
+		level := id.Level() + randomUniformInt(maxDiff)
+
+		// We compute AllNeighbors, and then add in all the children of id
+		// at the given level. We then compare this against the result of finding
+		// all the vertex neighbors of all the vertices of children of id at the
+		// given level. These should give the same result.
+		var want []CellID
+		all := id.AllNeighbors(level)
+		end := id.ChildEndAtLevel(level + 1)
+		for c := id.ChildBeginAtLevel(level + 1); c != end; c = c.Next() {
+			all = append(all, c.immediateParent())
+			want = append(want, c.VertexNeighbors(level)...)
+		}
+
+		// Sort the results and eliminate duplicates.
+		sort.Sort(byCellID(all))
+		sort.Sort(byCellID(want))
+		all = dedupCellIDs(all)
+		want = dedupCellIDs(want)
+
+		if !reflect.DeepEqual(all, want) {
+			t.Errorf("%v.AllNeighbors(%d) = %v, want %v", id, level, all, want)
+		}
+	}
+}
+
+func TestCellIDTokensNominal(t *testing.T) {
+	tests := []struct {
+		token string
+		id    CellID
+	}{
+		{"1", 0x1000000000000000},
+		{"3", 0x3000000000000000},
+		{"14", 0x1400000000000000},
+		{"41", 0x4100000000000000},
+		{"094", 0x0940000000000000},
+		{"537", 0x5370000000000000},
+		{"3fec", 0x3fec000000000000},
+		{"72f3", 0x72f3000000000000},
+		{"52b8c", 0x52b8c00000000000},
+		{"990ed", 0x990ed00000000000},
+		{"4476dc", 0x4476dc0000000000},
+		{"2a724f", 0x2a724f0000000000},
+		{"7d4afc4", 0x7d4afc4000000000},
+		{"b675785", 0xb675785000000000},
+		{"40cd6124", 0x40cd612400000000},
+		{"3ba32f81", 0x3ba32f8100000000},
+		{"08f569b5c", 0x08f569b5c0000000},
+		{"385327157", 0x3853271570000000},
+		{"166c4d1954", 0x166c4d1954000000},
+		{"96f48d8c39", 0x96f48d8c39000000},
+		{"0bca3c7f74c", 0x0bca3c7f74c00000},
+		{"1ae3619d12f", 0x1ae3619d12f00000},
+		{"07a77802a3fc", 0x07a77802a3fc0000},
+		{"4e7887ec1801", 0x4e7887ec18010000},
+		{"4adad7ae74124", 0x4adad7ae74124000},
+		{"90aba04afe0c5", 0x90aba04afe0c5000},
+		{"8ffc3f02af305c", 0x8ffc3f02af305c00},
+		{"6fa47550938183", 0x6fa4755093818300},
+		{"aa80a565df5e7fc", 0xaa80a565df5e7fc0},
+		{"01614b5e968e121", 0x01614b5e968e1210},
+		{"aa05238e7bd3ee7c", 0xaa05238e7bd3ee7c},
+		{"48a23db9c2963e5b", 0x48a23db9c2963e5b},
+	}
+	for _, test := range tests {
+		ci := CellIDFromToken(test.token)
+		if ci != test.id {
+			t.Errorf("CellIDFromToken(%q) = %x, want %x", test.token, uint64(ci), uint64(test.id))
+		}
+
+		token := ci.ToToken()
+		if token != test.token {
+			t.Errorf("ci.ToToken = %q, want %q", token, test.token)
+		}
+	}
+}
+
+func TestCellIDFromTokensErrorCases(t *testing.T) {
+	noneToken := CellID(0).ToToken()
+	if noneToken != "X" {
+		t.Errorf("CellID(0).Token() = %q, want X", noneToken)
+	}
+	noneID := CellIDFromToken(noneToken)
+	if noneID != CellID(0) {
+		t.Errorf("CellIDFromToken(%q) = %x, want 0", noneToken, uint64(noneID))
+	}
+	tests := []string{
+		"876b e99",
+		"876bee99\n",
+		"876[ee99",
+		" 876bee99",
+	}
+	for _, test := range tests {
+		ci := CellIDFromToken(test)
+		if uint64(ci) != 0 {
+			t.Errorf("CellIDFromToken(%q) = %x, want 0", test, uint64(ci))
+		}
+	}
+}
+
+func TestIJLevelToBoundUV(t *testing.T) {
+	maxIJ := 1<<maxLevel - 1
+
+	tests := []struct {
+		i     int
+		j     int
+		level int
+		want  r2.Rect
+	}{
+		// The i/j space is [0, 2^30 - 1) which maps to [-1, 1] for the
+		// x/y axes of the face surface. Results are scaled by the size of a cell
+		// at the given level. At level 0, everything is one cell of the full size
+		// of the space.  At maxLevel, the bounding rect is almost floating point
+		// noise.
+
+		// What should be out of bounds values, but passes the C++ code as well.
+		{
+			-1, -1, 0,
+			r2.RectFromPoints(r2.Point{-5, -5}, r2.Point{-1, -1}),
+		},
+		{
+			-1 * maxIJ, -1 * maxIJ, 0,
+			r2.RectFromPoints(r2.Point{-5, -5}, r2.Point{-1, -1}),
+		},
+		{
+			-1, -1, maxLevel,
+			r2.RectFromPoints(r2.Point{-1.0000000024835267, -1.0000000024835267},
+				r2.Point{-1, -1}),
+		},
+		{
+			0, 0, maxLevel + 1,
+			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{-1, -1}),
+		},
+
+		// Minimum i,j at different levels
+		{
+			0, 0, 0,
+			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
+		},
+		{
+			0, 0, maxLevel / 2,
+			r2.RectFromPoints(r2.Point{-1, -1},
+				r2.Point{-0.999918621033430099, -0.999918621033430099}),
+		},
+		{
+			0, 0, maxLevel,
+			r2.RectFromPoints(r2.Point{-1, -1},
+				r2.Point{-0.999999997516473060, -0.999999997516473060}),
+		},
+
+		// Just a hair off the outer bounds at different levels.
+		{
+			1, 1, 0,
+			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
+		},
+		{
+			1, 1, maxLevel / 2,
+			r2.RectFromPoints(r2.Point{-1, -1},
+				r2.Point{-0.999918621033430099, -0.999918621033430099}),
+		},
+		{
+			1, 1, maxLevel,
+			r2.RectFromPoints(r2.Point{-0.9999999975164731, -0.9999999975164731},
+				r2.Point{-0.9999999950329462, -0.9999999950329462}),
+		},
+
+		// Center point of the i,j space at different levels.
+		{
+			maxIJ / 2, maxIJ / 2, 0,
+			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1})},
+		{
+			maxIJ / 2, maxIJ / 2, maxLevel / 2,
+			r2.RectFromPoints(r2.Point{-0.000040691345930099, -0.000040691345930099},
+				r2.Point{0, 0})},
+		{
+			maxIJ / 2, maxIJ / 2, maxLevel,
+			r2.RectFromPoints(r2.Point{-0.000000001241763433, -0.000000001241763433},
+				r2.Point{0, 0})},
+
+		// Maximum i, j at different levels.
+		{
+			maxIJ, maxIJ, 0,
+			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
+		},
+		{
+			maxIJ, maxIJ, maxLevel / 2,
+			r2.RectFromPoints(r2.Point{0.999918621033430099, 0.999918621033430099},
+				r2.Point{1, 1}),
+		},
+		{
+			maxIJ, maxIJ, maxLevel,
+			r2.RectFromPoints(r2.Point{0.999999997516473060, 0.999999997516473060},
+				r2.Point{1, 1}),
+		},
+	}
+
+	for _, test := range tests {
+		uv := ijLevelToBoundUV(test.i, test.j, test.level)
+		if !float64Eq(uv.X.Lo, test.want.X.Lo) ||
+			!float64Eq(uv.X.Hi, test.want.X.Hi) ||
+			!float64Eq(uv.Y.Lo, test.want.Y.Lo) ||
+			!float64Eq(uv.Y.Hi, test.want.Y.Hi) {
+			t.Errorf("ijLevelToBoundUV(%d, %d, %d), got %v, want %v",
+				test.i, test.j, test.level, uv, test.want)
+		}
+	}
+}
+
+func TestCellIDCommonAncestorLevel(t *testing.T) {
+	tests := []struct {
+		ci     CellID
+		other  CellID
+		want   int
+		wantOk bool
+	}{
+		// Identical cell IDs.
+		{
+			CellIDFromFace(0),
+			CellIDFromFace(0),
+			0,
+			true,
+		},
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(30),
+			CellIDFromFace(0).ChildBeginAtLevel(30),
+			30,
+			true,
+		},
+		// One cell is a descendant of the other.
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(30),
+			CellIDFromFace(0),
+			0,
+			true,
+		},
+		{
+			CellIDFromFace(5),
+			CellIDFromFace(5).ChildEndAtLevel(30).Prev(),
+			0,
+			true,
+		},
+		// No common ancestors.
+		{
+			CellIDFromFace(0),
+			CellIDFromFace(5),
+			0,
+			false,
+		},
+		{
+			CellIDFromFace(2).ChildBeginAtLevel(30),
+			CellIDFromFace(3).ChildBeginAtLevel(20),
+			0,
+			false,
+		},
+		// Common ancestor distinct from both.
+		{
+			CellIDFromFace(5).ChildBeginAtLevel(9).Next().ChildBeginAtLevel(15),
+			CellIDFromFace(5).ChildBeginAtLevel(9).ChildBeginAtLevel(20),
+			8,
+			true,
+		},
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(30),
+			CellIDFromFace(0).ChildBeginAtLevel(2).Next().ChildBeginAtLevel(5),
+			1,
+			true,
+		},
+	}
+	for _, test := range tests {
+		if got, ok := test.ci.CommonAncestorLevel(test.other); ok != test.wantOk || got != test.want {
+			t.Errorf("CellID(%v).CommonAncestorLevel(%v) = %d, %t; want %d, %t", test.ci, test.other, got, ok, test.want, test.wantOk)
+		}
+	}
+}
+
+func TestCellIDDistanceToBegin(t *testing.T) {
+	tests := []struct {
+		id   CellID
+		want int64
+	}{
+		{
+			// at level 0 (i.e. full faces), there are only 6 cells from
+			// the last face to the beginning of the Hilbert curve.
+			id:   CellIDFromFace(5).ChildEndAtLevel(0),
+			want: 6,
+		},
+		{
+			// from the last cell on the last face at the smallest cell size,
+			// there are the maximum number of possible cells.
+			id:   CellIDFromFace(5).ChildEndAtLevel(maxLevel),
+			want: 6 * (1 << uint(2*maxLevel)),
+		},
+		{
+			// from the first cell on the first face.
+			id:   CellIDFromFace(0).ChildBeginAtLevel(0),
+			want: 0,
+		},
+		{
+			// from the first cell at the smallest level on the first face.
+			id:   CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+			want: 0,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.id.distanceFromBegin(); got != test.want {
+			t.Errorf("%v.distanceToBegin() = %v, want %v", test.id, got, test.want)
+		}
+	}
+
+	// Test that advancing from the beginning by the distance from a cell gets
+	// us back to that cell.
+	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
+	if got := CellIDFromFace(0).ChildBeginAtLevel(id.Level()).Advance(id.distanceFromBegin()); got != id {
+		t.Errorf("advancing from the beginning by the distance of a cell should return us to that cell. got %v, want %v", got, id)
+	}
+}
+
+func TestFindMSBSetNonZero64(t *testing.T) {
+	testOne := uint64(0x8000000000000000)
+	testAll := uint64(0xFFFFFFFFFFFFFFFF)
+	testSome := uint64(0xFEDCBA9876543210)
+	for i := 63; i >= 0; i-- {
+		if got := findMSBSetNonZero64(testOne); got != i {
+			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testOne, got, i)
+		}
+		if got := findMSBSetNonZero64(testAll); got != i {
+			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testAll, got, i)
+		}
+		if got := findMSBSetNonZero64(testSome); got != i {
+			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testSome, got, i)
+		}
+		testOne >>= 1
+		testAll >>= 1
+		testSome >>= 1
+	}
+
+	if got := findMSBSetNonZero64(1); got != 0 {
+		t.Errorf("findMSBSetNonZero64(1) = %v, want 0", got)
+	}
+
+	if got := findMSBSetNonZero64(0); got != 0 {
+		t.Errorf("findMSBSetNonZero64(0) = %v, want 0", got)
+	}
+}
+
+func TestFindLSBSetNonZero64(t *testing.T) {
+	testOne := uint64(0x0000000000000001)
+	testAll := uint64(0xFFFFFFFFFFFFFFFF)
+	testSome := uint64(0x0123456789ABCDEF)
+	for i := 0; i < 64; i++ {
+		if got := findLSBSetNonZero64(testOne); got != i {
+			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testOne, got, i)
+		}
+		if got := findLSBSetNonZero64(testAll); got != i {
+			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testAll, got, i)
+		}
+		if got := findLSBSetNonZero64(testSome); got != i {
+			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testSome, got, i)
+		}
+		testOne <<= 1
+		testAll <<= 1
+		testSome <<= 1
+	}
+
+	if got := findLSBSetNonZero64(0); got != 0 {
+		t.Errorf("findLSBSetNonZero64(0) = %v, want 0", got)
+	}
+}
+
+func TestCellIDWrapping(t *testing.T) {
+	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
+
+	tests := []struct {
+		msg  string
+		got  CellID
+		want CellID
+	}{
+		{
+			"test wrap from beginning to end of Hilbert curve",
+			CellIDFromFace(5).ChildEndAtLevel(0).Prev(),
+			CellIDFromFace(0).ChildBeginAtLevel(0).PrevWrap(),
+		},
+		{
+			"smallest end leaf wraps to smallest first leaf using PrevWrap",
+			CellIDFromFacePosLevel(5, ^uint64(0)>>faceBits, maxLevel),
+			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).PrevWrap(),
+		},
+		{
+			"smallest end leaf wraps to smallest first leaf using AdvanceWrap",
+			CellIDFromFacePosLevel(5, ^uint64(0)>>faceBits, maxLevel),
+			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).AdvanceWrap(-1),
+		},
+		{
+			"PrevWrap is the same as AdvanceWrap(-1)",
+			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).AdvanceWrap(-1),
+			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).PrevWrap(),
+		},
+		{
+			"Prev + NextWrap stays the same at given level",
+			CellIDFromFace(0).ChildBeginAtLevel(4),
+			CellIDFromFace(5).ChildEndAtLevel(4).Prev().NextWrap(),
+		},
+		{
+			"AdvanceWrap forward and back stays the same at given level",
+			CellIDFromFace(0).ChildBeginAtLevel(4),
+			CellIDFromFace(5).ChildEndAtLevel(4).Advance(-1).AdvanceWrap(1),
+		},
+		{
+			"Prev().NextWrap() stays same for first cell at level",
+			CellIDFromFacePosLevel(0, 0, maxLevel),
+			CellIDFromFace(5).ChildEndAtLevel(maxLevel).Prev().NextWrap(),
+		},
+		{
+			"AdvanceWrap forward and back stays same for first cell at level",
+			CellIDFromFacePosLevel(0, 0, maxLevel),
+			CellIDFromFace(5).ChildEndAtLevel(maxLevel).Advance(-1).AdvanceWrap(1),
+		},
+		// Check basic properties of AdvanceWrap().
+		{
+			"advancing 7 steps around cube should end up one past start.",
+			CellIDFromFace(1),
+			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(7),
+		},
+		{
+			"twice around should end up where we started",
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(12),
+		},
+		{
+			"backwards once around plus one step should be one before we started",
+			CellIDFromFace(4),
+			CellIDFromFace(5).AdvanceWrap(-7),
+		},
+		{
+			"wrapping even multiple of times around should end where we started",
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(-12000000),
+		},
+		{
+			"wrapping combination of even times around should end where it started",
+			CellIDFromFace(0).ChildBeginAtLevel(5).AdvanceWrap(6644),
+			CellIDFromFace(0).ChildBeginAtLevel(5).AdvanceWrap(-11788),
+		},
+		{
+			"moving 256 should advance us one cell at max level",
+			id.Next().ChildBeginAtLevel(maxLevel),
+			id.ChildBeginAtLevel(maxLevel).AdvanceWrap(256),
+		},
+		{
+			"wrapping by 4 times cells per face should advance 4 faces",
+			CellIDFromFacePosLevel(1, 0, maxLevel),
+			CellIDFromFacePosLevel(5, 0, maxLevel).AdvanceWrap(2 << (2 * maxLevel)),
+		},
+	}
+
+	for _, test := range tests {
+		if test.got != test.want {
+			t.Errorf("%s: got %v want %v", test.msg, test.got, test.want)
+		}
+	}
+}
+
+func TestCellIDAdvance(t *testing.T) {
+	tests := []struct {
+		ci    CellID
+		steps int64
+		want  CellID
+	}{
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+			7,
+			CellIDFromFace(5).ChildEndAtLevel(0),
+		},
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+			12,
+			CellIDFromFace(5).ChildEndAtLevel(0),
+		},
+		{
+			CellIDFromFace(5).ChildEndAtLevel(0),
+			-7,
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+		},
+		{
+			CellIDFromFace(5).ChildEndAtLevel(0),
+			-12000000,
+			CellIDFromFace(0).ChildBeginAtLevel(0),
+		},
+		{
+			CellIDFromFace(0).ChildBeginAtLevel(5),
+			500,
+			CellIDFromFace(5).ChildEndAtLevel(5).Advance(500 - (6 << (2 * 5))),
+		},
+		{
+			CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4).ChildBeginAtLevel(maxLevel),
+			256,
+			CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4).Next().ChildBeginAtLevel(maxLevel),
+		},
+		{
+			CellIDFromFacePosLevel(1, 0, maxLevel),
+			4 << (2 * maxLevel),
+			CellIDFromFacePosLevel(5, 0, maxLevel),
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.ci.Advance(test.steps); got != test.want {
+			t.Errorf("CellID(%v).Advance(%d) = %v; want = %v", test.ci, test.steps, got, test.want)
+		}
+	}
+}
+
+func TestCellIDFaceSiTi(t *testing.T) {
+	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel)
+	// Check that the (si, ti) coordinates of the center end in a
+	// 1 followed by (30 - level) 0's.
+	for level := uint64(0); level <= maxLevel; level++ {
+		l := maxLevel - int(level)
+		want := 1 << level
+		mask := 1<<(level+1) - 1
+
+		_, si, ti := id.Parent(l).faceSiTi()
+		if want != si&mask {
+			t.Errorf("CellID.Parent(%d).faceSiTi(), si = %b, want %b", l, si&mask, want)
+		}
+		if want != ti&mask {
+			t.Errorf("CellID.Parent(%d).faceSiTi(), ti = %b, want %b", l, ti&mask, want)
+		}
+	}
+}
+
+func TestCellIDContinuity(t *testing.T) {
+	const maxWalkLevel = 8
+	const cellSize = 1.0 / (1 << maxWalkLevel)
+
+	// Make sure that sequentially increasing cell ids form a continuous
+	// path over the surface of the sphere, i.e. there are no
+	// discontinuous jumps from one region to another.
+
+	maxDist := MaxWidthMetric.Value(maxWalkLevel)
+	end := CellIDFromFace(5).ChildEndAtLevel(maxWalkLevel)
+	id := CellIDFromFace(0).ChildBeginAtLevel(maxWalkLevel)
+
+	for ; id != end; id = id.Next() {
+
+		if got := id.rawPoint().Angle(id.NextWrap().rawPoint()); float64(got) > maxDist {
+			t.Errorf("%v.rawPoint().Angle(%v.NextWrap().rawPoint()) = %v > %v", id, id, got, maxDist)
+		}
+		if id.NextWrap() != id.AdvanceWrap(1) {
+			t.Errorf("%v.NextWrap() != %v.AdvanceWrap(1) %v != %v)", id, id, id.NextWrap(), id.AdvanceWrap(1))
+		}
+		if id != id.NextWrap().AdvanceWrap(-1) {
+			t.Errorf("%v.NextWrap().AdvanceWrap(-1) = %v want %v)", id, id.NextWrap().AdvanceWrap(-1), id)
+		}
+
+		// Check that the rawPoint() returns the center of each cell
+		// in (s,t) coordinates.
+		_, u, v := xyzToFaceUV(id.rawPoint())
+		if !float64Eq(math.Remainder(uvToST(u), 0.5*cellSize), 0.0) {
+			t.Errorf("uvToST(%v) = %v, want %v", u, uvToST(u), 0.5*cellSize)
+		}
+		if !float64Eq(math.Remainder(uvToST(v), 0.5*cellSize), 0.0) {
+			t.Errorf("uvToST(%v) = %v, want %v", v, uvToST(v), 0.5*cellSize)
+		}
+	}
+}
+
+// sampleBoundary returns a random point on the boundary of the given rectangle.
+func sampleBoundary(rect r2.Rect) (u, v float64) {
+	if oneIn(2) {
+		v = randomUniformFloat64(rect.Y.Lo, rect.Y.Hi)
+		if oneIn(2) {
+			u = rect.X.Lo
+		} else {
+			u = rect.X.Hi
+		}
+	} else {
+		u = randomUniformFloat64(rect.X.Lo, rect.X.Hi)
+		if oneIn(2) {
+			v = rect.Y.Lo
+		} else {
+			v = rect.Y.Hi
+		}
+	}
+	return u, v
+}
+
+// projectToBoundary returns the closest point to uv on the boundary of rect.
+func projectToBoundary(u, v float64, rect r2.Rect) r2.Point {
+	du0 := math.Abs(u - rect.X.Lo)
+	du1 := math.Abs(u - rect.X.Hi)
+	dv0 := math.Abs(v - rect.Y.Lo)
+	dv1 := math.Abs(v - rect.Y.Hi)
+
+	dmin := math.Min(math.Min(du0, du1), math.Min(dv0, dv1))
+	if du0 == dmin {
+		return r2.Point{rect.X.Lo, rect.Y.ClampPoint(v)}
+	}
+	if du1 == dmin {
+		return r2.Point{rect.X.Hi, rect.Y.ClampPoint(v)}
+	}
+	if dv0 == dmin {
+		return r2.Point{rect.X.ClampPoint(u), rect.Y.Lo}
+	}
+
+	return r2.Point{rect.X.ClampPoint(u), rect.Y.Hi}
+}
+
+func TestCellIDExpandedByDistanceUV(t *testing.T) {
+	const maxDistDegrees = 10
+	for i := 0; i < 1000; i++ {
+		id := randomCellID()
+		distance := s1.Degree * s1.Angle(randomUniformFloat64(-maxDistDegrees, maxDistDegrees))
+
+		bound := id.boundUV()
+		expanded := expandedByDistanceUV(bound, distance)
+		for iter := 0; iter < 10; iter++ {
+			// Choose a point on the boundary of the rectangle.
+			face := randomUniformInt(6)
+			centerU, centerV := sampleBoundary(bound)
+			center := Point{faceUVToXYZ(face, centerU, centerV).Normalize()}
+
+			// Now sample a point from a disc of radius (2 * distance).
+			p := samplePointFromCap(CapFromCenterHeight(center, 2*math.Abs(float64(distance))))
+
+			// Find the closest point on the boundary to the sampled point.
+			u, v, ok := faceXYZToUV(face, p)
+			if !ok {
+				continue
+			}
+
+			uv := r2.Point{u, v}
+			closestUV := projectToBoundary(u, v, bound)
+			closest := faceUVToXYZ(face, closestUV.X, closestUV.Y).Normalize()
+			actualDist := p.Distance(Point{closest})
+
+			if distance >= 0 {
+				// expanded should contain all points in the original bound,
+				// and also all points within distance of the boundary.
+				if bound.ContainsPoint(uv) || actualDist < distance {
+					if !expanded.ContainsPoint(uv) {
+						t.Errorf("expandedByDistanceUV(%v, %v).ContainsPoint(%v) = false, want true", bound, distance, uv)
+					}
+				}
+			} else {
+				// expanded should not contain any points within distance
+				// of the original boundary.
+				if actualDist < -distance {
+					if expanded.ContainsPoint(uv) {
+						t.Errorf("negatively expandedByDistanceUV(%v, %v).ContainsPoint(%v) = true, want false", bound, distance, uv)
+					}
+				}
+			}
+		}
+	}
+}
+
+func TestCellIDMaxTile(t *testing.T) {
+	// This method is also tested more thoroughly in s2cellunion_test.
+	for iter := 0; iter < 1000; iter++ {
+		id := randomCellIDForLevel(10)
+
+		// Check that limit is returned for tiles at or beyond limit.
+		if got, want := id, id.MaxTile(id); got != want {
+			t.Errorf("%v.MaxTile(%v) = %v, want %v", id, id, got, want)
+		}
+		if got, want := id, id.Children()[0].MaxTile(id); got != want {
+			t.Errorf("%v.Children()[0].MaxTile(%v) = %v, want %v", id, id, got, want)
+		}
+		if got, want := id, id.Children()[1].MaxTile(id); got != want {
+			t.Errorf("%v.Children()[1].MaxTile(%v) = %v, want %v", id, id, got, want)
+		}
+		if got, want := id, id.Next().MaxTile(id); got != want {
+			t.Errorf("%v.Next().MaxTile(%v) = %v, want %v", id, id, got, want)
+		}
+		if got, want := id.Children()[0], id.MaxTile(id.Children()[0]); got != want {
+			t.Errorf("%v.MaxTile(%v.Children()[0] = %v, want %v", id, id, got, want)
+		}
+
+		// Check that the tile size is increased when possible.
+		if got, want := id, id.Children()[0].MaxTile(id.Next()); got != want {
+			t.Errorf("%v.Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.Children()[0].MaxTile(id.Next().Children()[0]); got != want {
+			t.Errorf("%v.Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.Children()[0].MaxTile(id.Next().Children()[1].Children()[0]); got != want {
+			t.Errorf("%v.Children()[0].MaxTile(%v.Next().Children()[1].Children()[0] = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.Children()[0].Children()[0].MaxTile(id.Next()); got != want {
+			t.Errorf("%v.Children()[0].Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.Children()[0].Children()[0].Children()[0].MaxTile(id.Next()); got != want {
+			t.Errorf("%v.Children()[0].Children()[0].Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
+		}
+
+		// Check that the tile size is decreased when necessary.
+		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next()); got != want {
+			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next().Children()[0]); got != want {
+			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next().Children()[0]) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next().Children()[1]); got != want {
+			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next().Children()[1]) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id.Children()[0].Children()[0], id.MaxTile(id.Children()[0].Children()[0].Next()); got != want {
+			t.Errorf("%v.Children()[0].Children()[0], id.MaxTile(%v.Children()[0].Children()[0].Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id.Children()[0].Children()[0].Children()[0],
+			id.MaxTile(id.Children()[0].Children()[0].Children()[0].Next()); got != want {
+			t.Errorf("%v.MaxTile(%v.Children()[0].Children()[0].Children()[0].Next()) = %v, want %v", id, id, got, want)
+		}
+
+		// Check that the tile size is otherwise unchanged.
+		if got, want := id, id.MaxTile(id.Next()); got != want {
+			t.Errorf("%v.MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.MaxTile(id.Next().Children()[0]); got != want {
+			t.Errorf("%v.MaxTile(%v.Next().Children()[0]) = %v, want %v", id, id, got, want)
+		}
+
+		if got, want := id, id.MaxTile(id.Next().Children()[1].Children()[0]); got != want {
+			t.Errorf("%v.MaxTile(%v.Next().Children()[1].Children()[0]) = %v, want %v", id, id, got, want)
+		}
+	}
+}
+
+func TestCellIDCenterFaceSiTi(t *testing.T) {
+	// Check that the (si, ti) coordinates of the center end in a
+	// 1 followed by (30 - level) 0s.
+
+	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel)
+
+	tests := []struct {
+		id          CellID
+		levelOffset uint
+	}{
+		// Leaf level, 30.
+		{id, 0},
+		// Level 29.
+		{id.Parent(maxLevel - 1), 1},
+		// Level 28.
+		{id.Parent(maxLevel - 2), 2},
+		// Level 20.
+		{id.Parent(maxLevel - 10), 10},
+		// Level 10.
+		{id.Parent(maxLevel - 20), 20},
+		// Level 0.
+		{id.Parent(0), maxLevel},
+	}
+
+	for _, test := range tests {
+		_, si, ti := test.id.centerFaceSiTi()
+		want := 1 << test.levelOffset
+		mask := (1 << (test.levelOffset + 1)) - 1
+		if want != si&mask {
+			t.Errorf("Level Offset %d. %b != %b", test.levelOffset, want, si&mask)
+		}
+		if want != ti&mask {
+			t.Errorf("Level Offset: %d. %b != %b", test.levelOffset, want, ti&mask)
+		}
+	}
+}
+
+// TODO(roberts): Remaining tests to convert.
+// Coverage
+// TraversalOrder

+ 723 - 0
vendor/github.com/golang/geo/s2/cellunion_test.go

@@ -0,0 +1,723 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"reflect"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/s1"
+)
+
+func TestCellUnionNormalization(t *testing.T) {
+	cu := CellUnion{
+		0x80855c0000000000, // A: a cell over Pittsburg CA
+		0x80855d0000000000, // B, a child of A
+		0x8085634000000000, // first child of X, disjoint from A
+		0x808563c000000000, // second child of X
+		0x80855dc000000000, // a child of B
+		0x808562c000000000, // third child of X
+		0x8085624000000000, // fourth child of X
+		0x80855d0000000000, // B again
+	}
+	exp := CellUnion{
+		0x80855c0000000000, // A
+		0x8085630000000000, // X
+	}
+	cu.Normalize()
+	if !reflect.DeepEqual(cu, exp) {
+		t.Errorf("got %v, want %v", cu, exp)
+	}
+
+	// add a redundant cell
+	/* TODO(dsymonds)
+	cu.Add(0x808562c000000000)
+	if !reflect.DeepEqual(cu, exp) {
+		t.Errorf("after redundant add, got %v, want %v", cu, exp)
+	}
+	*/
+}
+
+func TestCellUnionBasic(t *testing.T) {
+	empty := CellUnion{}
+	empty.Normalize()
+	if len(empty) != 0 {
+		t.Errorf("empty CellUnion had %d cells, want 0", len(empty))
+	}
+
+	face1ID := CellIDFromFace(1)
+	face1Cell := CellFromCellID(face1ID)
+	face1Union := CellUnion{face1ID}
+	face1Union.Normalize()
+	if len(face1Union) != 1 {
+		t.Errorf("%v had %d cells, want 1", face1Union, len(face1Union))
+	}
+	if face1ID != face1Union[0] {
+		t.Errorf("%v[0] = %v, want %v", face1Union, face1Union[0], face1ID)
+	}
+	if got := face1Union.ContainsCell(face1Cell); !got {
+		t.Errorf("%v.ContainsCell(%v) = %t, want %t", face1Union, face1Cell, got, true)
+	}
+
+	face2ID := CellIDFromFace(2)
+	face2Cell := CellFromCellID(face2ID)
+	face2Union := CellUnion{face2ID}
+	face2Union.Normalize()
+	if len(face2Union) != 1 {
+		t.Errorf("%v had %d cells, want 1", face2Union, len(face2Union))
+	}
+	if face2ID != face2Union[0] {
+		t.Errorf("%v[0] = %v, want %v", face2Union, face2Union[0], face2ID)
+	}
+
+	if got := face1Union.ContainsCell(face2Cell); got {
+		t.Errorf("%v.ContainsCell(%v) = %t, want %t", face1Union, face2Cell, got, false)
+	}
+
+}
+
+func TestCellUnion(t *testing.T) {
+	tests := []struct {
+		cells     []CellID // A test CellUnion.
+		contained []CellID // List of cellIDs contained in the CellUnion.
+		overlaps  []CellID // List of CellIDs that intersects the CellUnion but not contained in it.
+		disjoint  []CellID // List of CellIDs that are disjoint from the CellUnion.
+	}{
+		{
+			// Single cell around NYC, and some simple nearby probes
+			cells: []CellID{0x89c25c0000000000},
+			contained: []CellID{
+				CellID(0x89c25c0000000000).ChildBegin(),
+				CellID(0x89c25c0000000000).ChildBeginAtLevel(28),
+			},
+			overlaps: []CellID{
+				CellID(0x89c25c0000000000).immediateParent(),
+				CellIDFromFace(CellID(0x89c25c0000000000).Face()), // the whole face
+			},
+			disjoint: []CellID{
+				CellID(0x89c25c0000000000).Next(),                       // Cell next to this one at same level
+				CellID(0x89c25c0000000000).Next().ChildBeginAtLevel(28), // Cell next to this one at deep level
+				0x89c2700000000000,                                      // Big(er) neighbor cell
+				0x89e9000000000000,                                      // Very big next door cell.
+				0x89c1000000000000,                                      // Very big cell, smaller value than probe
+			},
+		},
+
+		{
+			// NYC and SFO:
+			cells: []CellID{
+				0x89c25b0000000000, // NYC
+				0x89c2590000000000, // NYC
+				0x89c2f70000000000, // NYC
+				0x89c2f50000000000, // NYC
+				0x8085870000000000, // SFO
+				0x8085810000000000, // SFO
+				0x808f7d0000000000, // SFO
+				0x808f7f0000000000, // SFO
+			},
+			contained: []CellID{
+				0x808f7ef300000000, // SFO
+				0x808f7e5cf0000000, // SFO
+				0x808587f000000000, // SFO
+				0x89c25ac000000000, // NYC
+				0x89c259a400000000, // NYC
+				0x89c258fa10000000, // NYC
+				0x89c258f174007000, // NYC
+			},
+			overlaps: []CellID{
+				0x808c000000000000, // Big SFO
+				0x89c4000000000000, // Big NYC
+			},
+			disjoint: []CellID{
+				0x89c15a4fcb1bb000, // outside NYC
+				0x89c15a4e4aa95000, // outside NYC
+				0x8094000000000000, // outside SFO (big)
+				0x8096f10000000000, // outside SFO (smaller)
+
+				0x87c0000000000000, // Midwest very big
+			},
+		},
+		{
+			// CellUnion with cells at many levels:
+			cells: []CellID{
+				0x8100000000000000, // starting around california
+				0x8740000000000000, // adjacent cells at increasing
+				0x8790000000000000, // levels, moving eastward.
+				0x87f4000000000000,
+				0x87f9000000000000, // going down across the midwest
+				0x87ff400000000000,
+				0x87ff900000000000,
+				0x87fff40000000000,
+				0x87fff90000000000,
+				0x87ffff4000000000,
+				0x87ffff9000000000,
+				0x87fffff400000000,
+				0x87fffff900000000,
+				0x87ffffff40000000,
+				0x87ffffff90000000,
+				0x87fffffff4000000,
+				0x87fffffff9000000,
+				0x87ffffffff400000, // to a very small cell in Wisconsin
+			},
+			contained: []CellID{
+				0x808f400000000000,
+				0x80eb118b00000000,
+				0x8136a7a11d000000,
+				0x8136a7a11dac0000,
+				0x876c7c0000000000,
+				0x87f96d0000000000,
+				0x87ffffffff400000,
+			},
+			overlaps: []CellID{
+				CellID(0x8100000000000000).immediateParent(),
+				CellID(0x8740000000000000).immediateParent(),
+			},
+			disjoint: []CellID{
+				0x52aaaaaaab300000,
+				0x52aaaaaaacd00000,
+				0x87fffffffa100000,
+				0x87ffffffed500000,
+				0x87ffffffa0100000,
+				0x87fffffed5540000,
+				0x87fffffed6240000,
+				0x52aaaacccb340000,
+				0x87a0000400000000,
+				0x87a000001f000000,
+				0x87a0000029d00000,
+				0x9500000000000000,
+			},
+		},
+	}
+	for _, test := range tests {
+		union := CellUnion(test.cells)
+		union.Normalize()
+
+		// Ensure self-containment tests are correct.
+		for _, id := range test.cells {
+			if !union.IntersectsCellID(id) {
+				t.Errorf("CellUnion %v should self-intersect %v but does not", union, id)
+			}
+			if !union.ContainsCellID(id) {
+				t.Errorf("CellUnion %v should self-contain %v but does not", union, id)
+			}
+		}
+		// Test for containment specified in test case.
+		for _, id := range test.contained {
+			if !union.IntersectsCellID(id) {
+				t.Errorf("CellUnion %v should intersect %v but does not", union, id)
+			}
+			if !union.ContainsCellID(id) {
+				t.Errorf("CellUnion %v should contain %v but does not", union, id)
+			}
+		}
+		// Make sure the CellUnion intersect these cells but do not contain.
+		for _, id := range test.overlaps {
+			if !union.IntersectsCellID(id) {
+				t.Errorf("CellUnion %v should intersect %v but does not", union, id)
+			}
+			if union.ContainsCellID(id) {
+				t.Errorf("CellUnion %v should not contain %v but does", union, id)
+			}
+		}
+		// Negative cases make sure the CellUnion neither contain nor intersect these cells
+		for _, id := range test.disjoint {
+			if union.IntersectsCellID(id) {
+				t.Errorf("CellUnion %v should not intersect %v but does", union, id)
+			}
+			if union.ContainsCellID(id) {
+				t.Errorf("CellUnion %v should not contain %v but does", union, id)
+			}
+		}
+	}
+}
+
+func addCells(id CellID, selected bool, input *[]CellID, expected *[]CellID, t *testing.T) {
+	// Decides whether to add "id" and/or some of its descendants to the test case.  If "selected"
+	// is true, then the region covered by "id" *must* be added to the test case (either by adding
+	// "id" itself, or some combination of its descendants, or both).  If cell ids are to the test
+	// case "input", then the corresponding expected result after simplification is added to
+	// "expected".
+
+	if id == 0 {
+		// Initial call: decide whether to add cell(s) from each face.
+		for face := 0; face < 6; face++ {
+			addCells(CellIDFromFace(face), false, input, expected, t)
+		}
+		return
+	}
+
+	if id.IsLeaf() {
+		// The oneIn() call below ensures that the parent of a leaf cell will always be selected (if
+		// we make it that far down the hierarchy).
+		if selected != true {
+			t.Errorf("id IsLeaf() and not selected")
+		}
+		*input = append(*input, id)
+		return
+	}
+
+	// The following code ensures that the probability of selecting a cell at each level is
+	// approximately the same, i.e. we test normalization of cells at all levels.
+	if !selected && oneIn(maxLevel-id.Level()) {
+		//  Once a cell has been selected, the expected output is predetermined.  We then make sure
+		//  that cells are selected that will normalize to the desired output.
+		*expected = append(*expected, id)
+		selected = true
+
+	}
+
+	// With the rnd.OneIn() constants below, this function adds an average
+	// of 5/6 * (kMaxLevel - level) cells to "input" where "level" is the
+	// level at which the cell was first selected (level 15 on average).
+	// Therefore the average number of input cells in a test case is about
+	// (5/6 * 15 * 6) = 75.  The average number of output cells is about 6.
+
+	// If a cell is selected, we add it to "input" with probability 5/6.
+	added := false
+	if selected && !oneIn(6) {
+		*input = append(*input, id)
+		added = true
+	}
+	numChildren := 0
+	for child := id.ChildBegin(); child != id.ChildEnd(); child = child.Next() {
+		// If the cell is selected, on average we recurse on 4/12 = 1/3 child.
+		// This intentionally may result in a cell and some of its children
+		// being included in the test case.
+		//
+		// If the cell is not selected, on average we recurse on one child.
+		// We also make sure that we do not recurse on all 4 children, since
+		// then we might include all 4 children in the input case by accident
+		// (in which case the expected output would not be correct).
+		recurse := false
+		if selected {
+			recurse = oneIn(12)
+		} else {
+			recurse = oneIn(4)
+		}
+		if recurse && numChildren < 3 {
+			addCells(child, selected, input, expected, t)
+			numChildren++
+		}
+		// If this cell was selected but the cell itself was not added, we
+		// must ensure that all 4 children (or some combination of their
+		// descendants) are added.
+
+		if selected && !added {
+			addCells(child, selected, input, expected, t)
+		}
+	}
+}
+
+func TestCellUnionNormalizePseudoRandom(t *testing.T) {
+	// Try a bunch of random test cases, and keep track of average statistics
+	// for normalization (to see if they agree with the analysis above).
+
+	inSum := 0
+	outSum := 0
+	iters := 2000
+
+	for i := 0; i < iters; i++ {
+		input := []CellID{}
+		expected := []CellID{}
+		addCells(CellID(0), false, &input, &expected, t)
+		inSum += len(input)
+		outSum += len(expected)
+		cellunion := CellUnion(input)
+		cellunion.Normalize()
+
+		if len(expected) != len(cellunion) {
+			t.Errorf("Expected size of union to be %d, but got %d.",
+				len(expected), len(cellunion))
+		}
+
+		// Test GetCapBound().
+		cb := cellunion.CapBound()
+		for _, ci := range cellunion {
+			if !cb.ContainsCell(CellFromCellID(ci)) {
+				t.Errorf("CapBound %v of union %v should contain cellID %v", cb, cellunion, ci)
+			}
+		}
+
+		for _, j := range input {
+			if !cellunion.ContainsCellID(j) {
+				t.Errorf("Expected containment of CellID %v", j)
+			}
+
+			if cellunion.IntersectsCellID(j) == false {
+				t.Errorf("Expected intersection with %v.", j)
+			}
+
+			if !j.isFace() {
+				if cellunion.IntersectsCellID(j.immediateParent()) == false {
+					t.Errorf("Expected intersection with parent cell %v.", j.immediateParent())
+					if j.Level() > 1 {
+						if cellunion.IntersectsCellID(j.immediateParent().immediateParent()) == false {
+							t.Errorf("Expected intersection with parent's parent %v.",
+								j.immediateParent().immediateParent())
+						}
+						if cellunion.IntersectsCellID(j.Parent(0)) == false {
+							t.Errorf("Expected intersection with parent %v at level 0.", j.Parent(0))
+						}
+					}
+				}
+			}
+
+			if !j.IsLeaf() {
+				if cellunion.ContainsCellID(j.ChildBegin()) == false {
+					t.Errorf("Expected containment of %v.", j.ChildBegin())
+				}
+				if cellunion.IntersectsCellID(j.ChildBegin()) == false {
+					t.Errorf("Expected intersection with %v.", j.ChildBegin())
+				}
+				if cellunion.ContainsCellID(j.ChildEnd().Prev()) == false {
+					t.Errorf("Expected containment of %v.", j.ChildEnd().Prev())
+				}
+				if cellunion.IntersectsCellID(j.ChildEnd().Prev()) == false {
+					t.Errorf("Expected intersection with %v.", j.ChildEnd().Prev())
+				}
+				if cellunion.ContainsCellID(j.ChildBeginAtLevel(maxLevel)) == false {
+					t.Errorf("Expected containment of %v.", j.ChildBeginAtLevel(maxLevel))
+				}
+				if cellunion.IntersectsCellID(j.ChildBeginAtLevel(maxLevel)) == false {
+					t.Errorf("Expected intersection with %v.", j.ChildBeginAtLevel(maxLevel))
+				}
+			}
+		}
+
+		for _, exp := range expected {
+			if !exp.isFace() {
+				if cellunion.ContainsCellID(exp.Parent(exp.Level() - 1)) {
+					t.Errorf("cellunion should not contain its parent %v", exp.Parent(exp.Level()-1))
+				}
+				if cellunion.ContainsCellID(exp.Parent(0)) {
+					t.Errorf("cellunion should not contain the top level parent %v", exp.Parent(0))
+				}
+			}
+		}
+
+		var test []CellID
+		var dummy []CellID
+		addCells(CellID(0), false, &test, &dummy, t)
+		for _, j := range test {
+			intersects := false
+			contains := false
+			for _, k := range expected {
+				if k.Contains(j) {
+					contains = true
+				}
+				if k.Intersects(j) {
+					intersects = true
+				}
+			}
+			if cellunion.ContainsCellID(j) != contains {
+				t.Errorf("Expected contains with %v.", (uint64)(j))
+			}
+			if cellunion.IntersectsCellID(j) != intersects {
+				t.Errorf("Expected intersection with %v.", (uint64)(j))
+			}
+		}
+	}
+	t.Logf("avg in %.2f, avg out %.2f\n", (float64)(inSum)/(float64)(iters), (float64)(outSum)/(float64)(iters))
+}
+
+func TestCellUnionDenormalize(t *testing.T) {
+	tests := []struct {
+		name string
+		minL int
+		lMod int
+		cu   *CellUnion
+		exp  *CellUnion
+	}{
+		{
+			"not expanded, level mod == 1",
+			10,
+			1,
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(11),
+				CellIDFromFace(2).ChildBeginAtLevel(11),
+				CellIDFromFace(3).ChildBeginAtLevel(14),
+				CellIDFromFace(0).ChildBeginAtLevel(10),
+			},
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(11),
+				CellIDFromFace(2).ChildBeginAtLevel(11),
+				CellIDFromFace(3).ChildBeginAtLevel(14),
+				CellIDFromFace(0).ChildBeginAtLevel(10),
+			},
+		},
+		{
+			"not expanded, level mod > 1",
+			10,
+			2,
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(12),
+				CellIDFromFace(2).ChildBeginAtLevel(12),
+				CellIDFromFace(3).ChildBeginAtLevel(14),
+				CellIDFromFace(0).ChildBeginAtLevel(10),
+			},
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(12),
+				CellIDFromFace(2).ChildBeginAtLevel(12),
+				CellIDFromFace(3).ChildBeginAtLevel(14),
+				CellIDFromFace(0).ChildBeginAtLevel(10),
+			},
+		},
+		{
+			"expended, (level - min_level) is not multiple of level mod",
+			10,
+			3,
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(12),
+				CellIDFromFace(5).ChildBeginAtLevel(11),
+			},
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[0],
+				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[1],
+				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[2],
+				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[3],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[0],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[1],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[2],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[3],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[0],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[1],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[2],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[3],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[0],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[1],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[2],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[3],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[0],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[1],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[2],
+				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[3],
+			},
+		},
+		{
+			"expended, level < min_level",
+			10,
+			3,
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(9),
+			},
+			&CellUnion{
+				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[0],
+				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[1],
+				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[2],
+				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[3],
+			},
+		},
+	}
+	for _, test := range tests {
+		if test.cu.Denormalize(test.minL, test.lMod); !reflect.DeepEqual(test.cu, test.exp) {
+			t.Errorf("test: %s; got %v, want %v", test.name, test.cu, test.exp)
+		}
+	}
+}
+
+func TestCellUnionRectBound(t *testing.T) {
+	tests := []struct {
+		cu   *CellUnion
+		want Rect
+	}{
+		{&CellUnion{}, EmptyRect()},
+		{
+			&CellUnion{CellIDFromFace(1)},
+			Rect{
+				r1.Interval{-math.Pi / 4, math.Pi / 4},
+				s1.Interval{math.Pi / 4, 3 * math.Pi / 4},
+			},
+		},
+		{
+			&CellUnion{
+				0x808c000000000000, // Big SFO
+			},
+			Rect{
+				r1.Interval{
+					float64(s1.Degree * 34.644220547108482),
+					float64(s1.Degree * 38.011928357226651),
+				},
+				s1.Interval{
+					float64(s1.Degree * -124.508522987668428),
+					float64(s1.Degree * -121.628309835221216),
+				},
+			},
+		},
+		{
+			&CellUnion{
+				0x89c4000000000000, // Big NYC
+			},
+			Rect{
+				r1.Interval{
+					float64(s1.Degree * 38.794595155857657),
+					float64(s1.Degree * 41.747046884651063),
+				},
+				s1.Interval{
+					float64(s1.Degree * -76.456308667788633),
+					float64(s1.Degree * -73.465162142654819),
+				},
+			},
+		},
+		{
+			&CellUnion{
+				0x89c4000000000000, // Big NYC
+				0x808c000000000000, // Big SFO
+			},
+			Rect{
+				r1.Interval{
+					float64(s1.Degree * 34.644220547108482),
+					float64(s1.Degree * 41.747046884651063),
+				},
+				s1.Interval{
+					float64(s1.Degree * -124.508522987668428),
+					float64(s1.Degree * -73.465162142654819),
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.cu.RectBound(); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
+			t.Errorf("%v.RectBound() = %v, want %v", test.cu, got, test.want)
+		}
+	}
+}
+
+func TestCellUnionLeafCellsCovered(t *testing.T) {
+	tests := []struct {
+		have []CellID
+		want int64
+	}{
+		{},
+		{
+			have: []CellID{},
+			want: 0,
+		},
+		{
+			// One leaf cell on face 0.
+			have: []CellID{
+				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+			},
+			want: 1,
+		},
+		{
+			// Face 0 itself (which includes the previous leaf cell).
+			have: []CellID{
+				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+				CellIDFromFace(0),
+			},
+			want: 1 << 60,
+		},
+		/*
+			TODO(roberts): Once Expand is implemented, add the two tests for these
+			// Five faces.
+			cell_union.Expand(0),
+			want: 5 << 60,
+			// Whole world.
+			cell_union.Expand(0),
+			want: 6 << 60,
+		*/
+		{
+			// Add some disjoint cells.
+			have: []CellID{
+				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+				CellIDFromFace(0),
+				CellIDFromFace(1).ChildBeginAtLevel(1),
+				CellIDFromFace(2).ChildBeginAtLevel(2),
+				CellIDFromFace(2).ChildEndAtLevel(2).Prev(),
+				CellIDFromFace(3).ChildBeginAtLevel(14),
+				CellIDFromFace(4).ChildBeginAtLevel(27),
+				CellIDFromFace(4).ChildEndAtLevel(15).Prev(),
+				CellIDFromFace(5).ChildBeginAtLevel(30),
+			},
+			want: 1 + (1 << 6) + (1 << 30) + (1 << 32) +
+				(2 << 56) + (1 << 58) + (1 << 60),
+		},
+	}
+
+	for _, test := range tests {
+		cu := CellUnion(test.have)
+		cu.Normalize()
+		if got := cu.LeafCellsCovered(); got != test.want {
+			t.Errorf("CellUnion(%v).LeafCellsCovered() = %v, want %v", cu, got, test.want)
+		}
+	}
+}
+
+func TestCellUnionFromRange(t *testing.T) {
+	for iter := 0; iter < 100; iter++ {
+		min := randomCellIDForLevel(maxLevel)
+		max := randomCellIDForLevel(maxLevel)
+		if min > max {
+			min, max = max, min
+		}
+
+		cu := CellUnionFromRange(min, max.Next())
+		if len(cu) <= 0 {
+			t.Errorf("len(CellUnionFromRange(%v, %v)) = %d, want > 0", min, max.Next(), len(cu))
+		}
+		if min != cu[0].RangeMin() {
+			t.Errorf("%v.RangeMin of CellUnion should not be below the minimum value it was created from %v", cu[0], min)
+		}
+		if max != cu[len(cu)-1].RangeMax() {
+			t.Errorf("%v.RangeMax of CellUnion should not be above the maximum value it was created from %v", cu[len(cu)-1], max)
+		}
+		for i := 1; i < len(cu); i++ {
+			if got, want := cu[i].RangeMin(), cu[i-1].RangeMax().Next(); got != want {
+				t.Errorf("%v.RangeMin() = %v, want %v", cu[i], got, want)
+			}
+		}
+	}
+
+	// Focus on test cases that generate an empty or full range.
+
+	// Test an empty range before the minimum CellID.
+	idBegin := CellIDFromFace(0).ChildBeginAtLevel(maxLevel)
+	cu := CellUnionFromRange(idBegin, idBegin)
+	if len(cu) != 0 {
+		t.Errorf("CellUnionFromRange with begin and end as the first CellID should be empty, got %d", len(cu))
+	}
+
+	// Test an empty range after the maximum CellID.
+	idEnd := CellIDFromFace(5).ChildEndAtLevel(maxLevel)
+	cu = CellUnionFromRange(idEnd, idEnd)
+	if len(cu) != 0 {
+		t.Errorf("CellUnionFromRange with begin and end as the last CellID should be empty, got %d", len(cu))
+	}
+
+	// Test the full sphere.
+	cu = CellUnionFromRange(idBegin, idEnd)
+	if len(cu) != 6 {
+		t.Errorf("CellUnionFromRange from first CellID to last CellID should have 6 cells, got %d", len(cu))
+	}
+
+	for i := 0; i < len(cu); i++ {
+		if !cu[i].isFace() {
+			t.Errorf("CellUnionFromRange for full sphere cu[%d].isFace() = %t, want %t", i, cu[i].isFace(), true)
+		}
+	}
+}
+
+func BenchmarkCellUnionFromRange(b *testing.B) {
+	x := CellIDFromFace(0).ChildBeginAtLevel(maxLevel)
+	y := CellIDFromFace(5).ChildEndAtLevel(maxLevel)
+	for i := 0; i < b.N; i++ {
+		CellUnionFromRange(x, y)
+	}
+}

+ 2 - 2
vendor/github.com/golang/geo/s2/edgeutil.go

@@ -292,7 +292,7 @@ func (r *RectBounder) AddPoint(b Point) {
 	// and B attains its minimum and maximum latitudes). To test whether AB
 	// crosses this plane, we compute a vector M perpendicular to this
 	// plane and then project A and B onto it.
-	m := n.Cross(PointFromCoords(0, 0, 1).Vector)
+	m := n.Cross(r3.Vector{0, 0, 1})
 	mA := m.Dot(r.a.Vector)
 	mB := m.Dot(b.Vector)
 
@@ -845,7 +845,7 @@ func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Po
 	// Otherwise find the point B' where the line AB exits the face.
 	uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
 
-	p := pointUVW(PointFromCoords(uv.X, uv.Y, 1.0))
+	p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
 
 	// Determine if the exit point B' is contained within the segment. We do this
 	// by computing the dot products with two inward-facing tangent vectors at A

+ 1201 - 0
vendor/github.com/golang/geo/s2/edgeutil_test.go

@@ -0,0 +1,1201 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"fmt"
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+func TestEdgeutilCrossings(t *testing.T) {
+	na1 := math.Nextafter(1, 0)
+	na2 := math.Nextafter(1, 2)
+
+	tests := []struct {
+		msg          string
+		a, b, c, d   Point
+		simpleTest   bool
+		robust       Crossing
+		vertex       bool
+		edgeOrVertex bool
+	}{
+		{
+			"two regular edges that cross",
+			Point{r3.Vector{1, 2, 1}},
+			Point{r3.Vector{1, -3, 0.5}},
+			Point{r3.Vector{1, -0.5, -3}},
+			Point{r3.Vector{0.1, 0.5, 3}},
+			true,
+			Cross,
+			true,
+			true,
+		},
+		{
+			"two regular edges that cross antipodal points",
+			Point{r3.Vector{1, 2, 1}},
+			Point{r3.Vector{1, -3, 0.5}},
+			Point{r3.Vector{-1, 0.5, 3}},
+			Point{r3.Vector{-0.1, -0.5, -3}},
+			true,
+			DoNotCross,
+			true,
+			false,
+		},
+		{
+			"two edges on the same great circle",
+			Point{r3.Vector{0, 0, -1}},
+			Point{r3.Vector{0, 1, 0}},
+			Point{r3.Vector{0, 1, 1}},
+			Point{r3.Vector{0, 0, 1}},
+			true,
+			DoNotCross,
+			false,
+			false,
+		},
+		{
+			"two edges that cross where one vertex is the OriginPoint",
+			Point{r3.Vector{1, 0, 0}},
+			OriginPoint(),
+			Point{r3.Vector{1, -0.1, 1}},
+			Point{r3.Vector{1, 1, -0.1}},
+			true,
+			Cross,
+			true,
+			true,
+		},
+		{
+			"two edges that cross antipodal points",
+			Point{r3.Vector{1, 0, 0}},
+			Point{r3.Vector{0, 1, 0}},
+			Point{r3.Vector{0, 0, -1}},
+			Point{r3.Vector{-1, -1, 1}},
+			true,
+			DoNotCross,
+			true,
+			false,
+		},
+		{
+			"two edges that share an endpoint",
+			// The Ortho() direction is (-4,0,2) and edge CD
+			// is further CCW around (2,3,4) than AB.
+			Point{r3.Vector{2, 3, 4}},
+			Point{r3.Vector{-1, 2, 5}},
+			Point{r3.Vector{7, -2, 3}},
+			Point{r3.Vector{2, 3, 4}},
+			true,
+			MaybeCross,
+			true,
+			false,
+		},
+		{
+			"two edges that barely cross near the middle of one edge",
+			// The edge AB is approximately in the x=y plane, while CD is approximately
+			// perpendicular to it and ends exactly at the x=y plane.
+			Point{r3.Vector{1, 1, 1}},
+			Point{r3.Vector{1, na1, -1}},
+			Point{r3.Vector{11, -12, -1}},
+			Point{r3.Vector{10, 10, 1}},
+			false,
+			DoNotCross, // TODO(sbeckman): Should be 1, fix once exactSign is implemented.
+			true,
+			false, // TODO(sbeckman): Should be true, fix once exactSign is implemented.
+		},
+		{
+			"two edges that barely cross near the middle separated by a distance of about 1e-15",
+			Point{r3.Vector{1, 1, 1}},
+			Point{r3.Vector{1, na2, -1}},
+			Point{r3.Vector{1, -1, 0}},
+			Point{r3.Vector{1, 1, 0}},
+			false,
+			DoNotCross,
+			false,
+			false,
+		},
+		{
+			"two edges that barely cross each other near the end of both edges",
+			// This example cannot be handled using regular double-precision
+			// arithmetic due to floating-point underflow.
+			Point{r3.Vector{0, 0, 1}},
+			Point{r3.Vector{2, -1e-323, 1}},
+			Point{r3.Vector{1, -1, 1}},
+			Point{r3.Vector{1e-323, 0, 1}},
+			false,
+			DoNotCross, // TODO(sbeckman): Should be 1, fix once exactSign is implemented.
+			false,
+			false, // TODO(sbeckman): Should be true, fix once exactSign is implemented.
+		},
+		{
+			"two edges that barely cross each other near the end separated by a distance of about 1e-640",
+			Point{r3.Vector{0, 0, 1}},
+			Point{r3.Vector{2, 1e-323, 1}},
+			Point{r3.Vector{1, -1, 1}},
+			Point{r3.Vector{1e-323, 0, 1}},
+			false,
+			DoNotCross,
+			false,
+			false,
+		},
+		{
+			"two edges that barely cross each other near the middle of one edge",
+			// Computing the exact determinant of some of the triangles in this test
+			// requires more than 2000 bits of precision.
+			Point{r3.Vector{1, -1e-323, -1e-323}},
+			Point{r3.Vector{1e-323, 1, 1e-323}},
+			Point{r3.Vector{1, -1, 1e-323}},
+			Point{r3.Vector{1, 1, 0}},
+			false,
+			Cross,
+			true,
+			true,
+		},
+		{
+			"two edges that barely cross each other near the middle separated by a distance of about 1e-640",
+			Point{r3.Vector{1, 1e-323, -1e-323}},
+			Point{r3.Vector{-1e-323, 1, 1e-323}},
+			Point{r3.Vector{1, -1, 1e-323}},
+			Point{r3.Vector{1, 1, 0}},
+			false,
+			Cross, // TODO(sbeckman): Should be -1, fix once exactSign is implemented.
+			true,
+			true, // TODO(sbeckman): Should be false, fix once exactSign is implemented.
+		},
+	}
+
+	for _, test := range tests {
+		if err := testCrossing(test.a, test.b, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+		if err := testCrossing(test.b, test.a, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+		if err := testCrossing(test.a, test.b, test.d, test.c, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+		if err := testCrossing(test.b, test.a, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+		if err := testCrossing(test.a, test.b, test.a, test.b, MaybeCross, true, true, false); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+		if err := testCrossing(test.c, test.d, test.a, test.b, test.robust, test.vertex, test.edgeOrVertex != (test.robust == MaybeCross), test.simpleTest); err != nil {
+			t.Errorf("%s: %v", test.msg, err)
+		}
+
+		if got := VertexCrossing(test.a, test.b, test.c, test.b); got != test.vertex {
+			t.Errorf("%s: VertexCrossing(%v,%v,%v,%v) = %t, want %t", test.msg, test.a, test.b, test.c, test.d, got, test.vertex)
+		}
+	}
+}
+
+func testCrossing(a, b, c, d Point, robust Crossing, vertex, edgeOrVertex, simple bool) error {
+	input := fmt.Sprintf("a: %v, b: %v, c: %v, d: %v", a, b, c, d)
+	if got, want := SimpleCrossing(a, b, c, d), robust == Cross; simple && got != want {
+		return fmt.Errorf("%v, SimpleCrossing(a, b, c, d) = %t, want %t", input, got, want)
+	}
+
+	crosser := NewChainEdgeCrosser(a, b, c)
+	if got, want := crosser.ChainCrossingSign(d), robust; got != want {
+		return fmt.Errorf("%v, ChainCrossingSign(d) = %d, want %d", input, got, want)
+	}
+	if got, want := crosser.ChainCrossingSign(c), robust; got != want {
+		return fmt.Errorf("%v, ChainCrossingSign(c) = %d, want %d", input, got, want)
+	}
+	if got, want := crosser.CrossingSign(d, c), robust; got != want {
+		return fmt.Errorf("%v, CrossingSign(d, c) = %d, want %d", input, got, want)
+	}
+	if got, want := crosser.CrossingSign(c, d), robust; got != want {
+		return fmt.Errorf("%v, CrossingSign(c, d) = %d, want %d", input, got, want)
+	}
+
+	crosser.RestartAt(c)
+	if got, want := crosser.EdgeOrVertexChainCrossing(d), edgeOrVertex; got != want {
+		return fmt.Errorf("%v, EdgeOrVertexChainCrossing(d) = %t, want %t", input, got, want)
+	}
+	if got, want := crosser.EdgeOrVertexChainCrossing(c), edgeOrVertex; got != want {
+		return fmt.Errorf("%v, EdgeOrVertexChainCrossing(c) = %t, want %t", input, got, want)
+	}
+	if got, want := crosser.EdgeOrVertexCrossing(d, c), edgeOrVertex; got != want {
+		return fmt.Errorf("%v, EdgeOrVertexCrossing(d, c) = %t, want %t", input, got, want)
+	}
+	if got, want := crosser.EdgeOrVertexCrossing(c, d), edgeOrVertex; got != want {
+		return fmt.Errorf("%v, EdgeOrVertexCrossing(c, d) = %t, want %t", input, got, want)
+	}
+	return nil
+}
+
+func TestEdgeutilInterpolate(t *testing.T) {
+	// Choose test points designed to expose floating-point errors.
+	p1 := PointFromCoords(0.1, 1e-30, 0.3)
+	p2 := PointFromCoords(-0.7, -0.55, -1e30)
+
+	tests := []struct {
+		a, b Point
+		dist float64
+		want Point
+	}{
+		// A zero-length edge.
+		{p1, p1, 0, p1},
+		{p1, p1, 1, p1},
+		// Start, end, and middle of a medium-length edge.
+		{p1, p2, 0, p1},
+		{p1, p2, 1, p2},
+		{p1, p2, 0.5, Point{(p1.Add(p2.Vector)).Mul(0.5)}},
+
+		// Test that interpolation is done using distances on the sphere
+		// rather than linear distances.
+		{
+			Point{r3.Vector{1, 0, 0}},
+			Point{r3.Vector{0, 1, 0}},
+			1.0 / 3.0,
+			Point{r3.Vector{math.Sqrt(3), 1, 0}},
+		},
+		{
+			Point{r3.Vector{1, 0, 0}},
+			Point{r3.Vector{0, 1, 0}},
+			2.0 / 3.0,
+			Point{r3.Vector{1, math.Sqrt(3), 0}},
+		},
+	}
+
+	for _, test := range tests {
+		// We allow a bit more than the usual 1e-15 error tolerance because
+		// Interpolate() uses trig functions.
+		if got := Interpolate(test.dist, test.a, test.b); !pointsApproxEquals(got, test.want, 3e-15) {
+			t.Errorf("Interpolate(%v, %v, %v) = %v, want %v", test.dist, test.a, test.b, got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilInterpolateOverLongEdge(t *testing.T) {
+	lng := math.Pi - 1e-2
+	a := Point{PointFromLatLng(LatLng{0, 0}).Normalize()}
+	b := Point{PointFromLatLng(LatLng{0, s1.Angle(lng)}).Normalize()}
+
+	for f := 0.4; f > 1e-15; f *= 0.1 {
+		// Test that interpolation is accurate on a long edge (but not so long that
+		// the definition of the edge itself becomes too unstable).
+		want := Point{PointFromLatLng(LatLng{0, s1.Angle(f * lng)}).Normalize()}
+		if got := Interpolate(f, a, b); !pointsApproxEquals(got, want, 3e-15) {
+			t.Errorf("long edge Interpolate(%v, %v, %v) = %v, want %v", f, a, b, got, want)
+		}
+
+		// Test the remainder of the dist also matches.
+		wantRem := Point{PointFromLatLng(LatLng{0, s1.Angle((1 - f) * lng)}).Normalize()}
+		if got := Interpolate(1-f, a, b); !pointsApproxEquals(got, wantRem, 3e-15) {
+			t.Errorf("long edge Interpolate(%v, %v, %v) = %v, want %v", 1-f, a, b, got, wantRem)
+		}
+	}
+}
+
+func TestEdgeutilInterpolateAntipodal(t *testing.T) {
+	p1 := PointFromCoords(0.1, 1e-30, 0.3)
+
+	// Test that interpolation on a 180 degree edge (antipodal endpoints) yields
+	// a result with the correct distance from each endpoint.
+	for dist := 0.0; dist <= 1.0; dist += 0.125 {
+		actual := Interpolate(dist, p1, Point{p1.Mul(-1)})
+		if !float64Near(actual.Distance(p1).Radians(), dist*math.Pi, 3e-15) {
+			t.Errorf("antipodal points Interpolate(%v, %v, %v) = %v, want %v", dist, p1, Point{p1.Mul(-1)}, actual, dist*math.Pi)
+		}
+	}
+}
+
+func TestEdgeutilRepeatedInterpolation(t *testing.T) {
+	// Check that points do not drift away from unit length when repeated
+	// interpolations are done.
+	for i := 0; i < 100; i++ {
+		a := randomPoint()
+		b := randomPoint()
+		for j := 0; j < 1000; j++ {
+			a = Interpolate(0.01, a, b)
+		}
+		if !a.Vector.IsUnit() {
+			t.Errorf("repeated Interpolate(%v, %v, %v) calls did not stay unit length for", 0.01, a, b)
+		}
+	}
+}
+
+func rectBoundForPoints(a, b Point) Rect {
+	bounder := NewRectBounder()
+	bounder.AddPoint(a)
+	bounder.AddPoint(b)
+	return bounder.RectBound()
+}
+
+func TestEdgeutilRectBounderMaxLatitudeSimple(t *testing.T) {
+	cubeLat := math.Asin(1 / math.Sqrt(3)) // 35.26 degrees
+	cubeLatRect := Rect{r1.IntervalFromPoint(-cubeLat).AddPoint(cubeLat),
+		s1.IntervalFromEndpoints(-math.Pi/4, math.Pi/4)}
+
+	tests := []struct {
+		a, b Point
+		want Rect
+	}{
+		// Check cases where the min/max latitude is attained at a vertex.
+		{
+			a:    Point{r3.Vector{1, 1, 1}},
+			b:    Point{r3.Vector{1, -1, -1}},
+			want: cubeLatRect,
+		},
+		{
+			a:    Point{r3.Vector{1, -1, 1}},
+			b:    Point{r3.Vector{1, 1, -1}},
+			want: cubeLatRect,
+		},
+	}
+
+	for _, test := range tests {
+		if got := rectBoundForPoints(test.a, test.b); !rectsApproxEqual(got, test.want, rectErrorLat, rectErrorLng) {
+			t.Errorf("RectBounder for points (%v, %v) near max lat failed: got %v, want %v", test.a, test.b, got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilRectBounderMaxLatitudeEdgeInterior(t *testing.T) {
+	// Check cases where the min/max latitude occurs in the edge interior.
+	// These tests expect the result to be pretty close to the middle of the
+	// allowable error range (i.e., by adding 0.5 * kRectError).
+
+	tests := []struct {
+		got  float64
+		want float64
+	}{
+		// Max latitude, CW edge
+		{
+			math.Pi/4 + 0.5*rectErrorLat,
+			rectBoundForPoints(Point{r3.Vector{1, 1, 1}}, Point{r3.Vector{1, -1, 1}}).Lat.Hi,
+		},
+		// Min latitude, CW edge
+		{
+			-math.Pi/4 - 0.5*rectErrorLat,
+			rectBoundForPoints(Point{r3.Vector{1, -1, -1}}, Point{r3.Vector{-1, -1, -1}}).Lat.Lo,
+		},
+		// Max latitude, CCW edge
+		{
+			math.Pi/4 + 0.5*rectErrorLat,
+			rectBoundForPoints(Point{r3.Vector{1, -1, 1}}, Point{r3.Vector{1, 1, 1}}).Lat.Hi,
+		},
+		// Min latitude, CCW edge
+		{
+			-math.Pi/4 - 0.5*rectErrorLat,
+			rectBoundForPoints(Point{r3.Vector{-1, 1, -1}}, Point{r3.Vector{-1, -1, -1}}).Lat.Lo,
+		},
+
+		// Check cases where the edge passes through one of the poles.
+		{
+			math.Pi / 2,
+			rectBoundForPoints(Point{r3.Vector{.3, .4, 1}}, Point{r3.Vector{-.3, -.4, 1}}).Lat.Hi,
+		},
+		{
+			-math.Pi / 2,
+			rectBoundForPoints(Point{r3.Vector{.3, .4, -1}}, Point{r3.Vector{-.3, -.4, -1}}).Lat.Lo,
+		},
+	}
+
+	for _, test := range tests {
+		if !float64Eq(test.got, test.want) {
+			t.Errorf("RectBound for max lat on interior of edge failed; got %v want %v", test.got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilRectBounderMaxLatitudeRandom(t *testing.T) {
+	// Check that the maximum latitude of edges is computed accurately to within
+	// 3 * dblEpsilon (the expected maximum error). We concentrate on maximum
+	// latitudes near the equator and north pole since these are the extremes.
+
+	for i := 0; i < 100; i++ {
+		// Construct a right-handed coordinate frame (U,V,W) such that U points
+		// slightly above the equator, V points at the equator, and W is slightly
+		// offset from the north pole.
+		u := randomPoint()
+		u.Z = dblEpsilon * 1e-6 * math.Pow(1e12, randomFloat64())
+
+		u = Point{u.Normalize()}
+		v := Point{PointFromCoords(0, 0, 1).PointCross(u).Normalize()}
+		w := Point{u.PointCross(v).Normalize()}
+
+		// Construct a line segment AB that passes through U, and check that the
+		// maximum latitude of this segment matches the latitude of U.
+		a := Point{u.Sub(v.Mul(randomFloat64())).Normalize()}
+		b := Point{u.Add(v.Mul(randomFloat64())).Normalize()}
+		abBound := rectBoundForPoints(a, b)
+		if !float64Near(latitude(u).Radians(), abBound.Lat.Hi, rectErrorLat) {
+			t.Errorf("bound for line AB not near enough to the latitude of point %v. got %v, want %v",
+				u, latitude(u).Radians(), abBound.Lat.Hi)
+		}
+
+		// Construct a line segment CD that passes through W, and check that the
+		// maximum latitude of this segment matches the latitude of W.
+		c := Point{w.Sub(v.Mul(randomFloat64())).Normalize()}
+		d := Point{w.Add(v.Mul(randomFloat64())).Normalize()}
+		cdBound := rectBoundForPoints(c, d)
+		if !float64Near(latitude(w).Radians(), cdBound.Lat.Hi, rectErrorLat) {
+			t.Errorf("bound for line CD not near enough to the lat of point %v. got %v, want %v",
+				v, latitude(w).Radians(), cdBound.Lat.Hi)
+		}
+	}
+}
+
+func TestEdgeutilExpandForSubregions(t *testing.T) {
+	// Test the full and empty bounds.
+	if !ExpandForSubregions(FullRect()).IsFull() {
+		t.Errorf("Subregion Bound of full rect should be full")
+	}
+	if !ExpandForSubregions(EmptyRect()).IsEmpty() {
+		t.Errorf("Subregion Bound of empty rect should be empty")
+	}
+
+	tests := []struct {
+		xLat, xLng, yLat, yLng float64
+		wantFull               bool
+	}{
+		// Cases where the bound does not straddle the equator (but almost does),
+		// and spans nearly 180 degrees in longitude.
+		{3e-16, 0, 1e-14, math.Pi, true},
+		{9e-16, 0, 1e-14, math.Pi, false},
+		{1e-16, 7e-16, 1e-14, math.Pi, true},
+		{3e-16, 14e-16, 1e-14, math.Pi, false},
+		{1e-100, 14e-16, 1e-14, math.Pi, true},
+		{1e-100, 22e-16, 1e-14, math.Pi, false},
+		// Cases where the bound spans at most 90 degrees in longitude, and almost
+		// 180 degrees in latitude.  Note that DBL_EPSILON is about 2.22e-16, which
+		// implies that the double-precision value just below Pi/2 can be written as
+		// (math.Pi/2 - 2e-16).
+		{-math.Pi / 2, -1e-15, math.Pi/2 - 7e-16, 0, true},
+		{-math.Pi / 2, -1e-15, math.Pi/2 - 30e-16, 0, false},
+		{-math.Pi/2 + 4e-16, 0, math.Pi/2 - 2e-16, 1e-7, true},
+		{-math.Pi/2 + 30e-16, 0, math.Pi / 2, 1e-7, false},
+		{-math.Pi/2 + 4e-16, 0, math.Pi/2 - 4e-16, math.Pi / 2, true},
+		{-math.Pi / 2, 0, math.Pi/2 - 30e-16, math.Pi / 2, false},
+		// Cases where the bound straddles the equator and spans more than 90
+		// degrees in longitude.  These are the cases where the critical distance is
+		// between a corner of the bound and the opposite longitudinal edge.  Unlike
+		// the cases above, here the bound may contain nearly-antipodal points (to
+		// within 3.055 * DBL_EPSILON) even though the latitude and longitude ranges
+		// are both significantly less than (math.Pi - 3.055 * DBL_EPSILON).
+		{-math.Pi / 2, 0, math.Pi/2 - 1e-8, math.Pi - 1e-7, true},
+		{-math.Pi / 2, 0, math.Pi/2 - 1e-7, math.Pi - 1e-7, false},
+		{-math.Pi/2 + 1e-12, -math.Pi + 1e-4, math.Pi / 2, 0, true},
+		{-math.Pi/2 + 1e-11, -math.Pi + 1e-4, math.Pi / 2, 0, true},
+	}
+
+	for _, tc := range tests {
+		in := RectFromLatLng(LatLng{s1.Angle(tc.xLat), s1.Angle(tc.xLng)})
+		in = in.AddPoint(LatLng{s1.Angle(tc.yLat), s1.Angle(tc.yLng)})
+		got := ExpandForSubregions(in)
+
+		// Test that the bound is actually expanded.
+		if !got.Contains(in) {
+			t.Errorf("Subregion bound of (%f, %f, %f, %f) should contain original rect", tc.xLat, tc.xLng, tc.yLat, tc.yLng)
+		}
+		if in.Lat == validRectLatRange && in.Lat.ContainsInterval(got.Lat) {
+			t.Errorf("Subregion bound of (%f, %f, %f, %f) shouldn't be contained by original rect", tc.xLat, tc.xLng, tc.yLat, tc.yLng)
+		}
+
+		// We check the various situations where the bound contains nearly-antipodal points. The tests are organized into pairs
+		// where the two bounds are similar except that the first bound meets the nearly-antipodal criteria while the second does not.
+		if got.IsFull() != tc.wantFull {
+			t.Errorf("Subregion Bound of (%f, %f, %f, %f).IsFull should be %t", tc.xLat, tc.xLng, tc.yLat, tc.yLng, tc.wantFull)
+		}
+	}
+
+	rectTests := []struct {
+		xLat, xLng, yLat, yLng float64
+		wantRect               Rect
+	}{
+		{1.5, -math.Pi / 2, 1.5, math.Pi/2 - 2e-16, Rect{r1.Interval{1.5, 1.5}, s1.FullInterval()}},
+		{1.5, -math.Pi / 2, 1.5, math.Pi/2 - 7e-16, Rect{r1.Interval{1.5, 1.5}, s1.Interval{-math.Pi / 2, math.Pi/2 - 7e-16}}},
+		// Check for cases where the bound is expanded to include one of the poles
+		{-math.Pi/2 + 1e-15, 0, -math.Pi/2 + 1e-15, 0, Rect{r1.Interval{-math.Pi / 2, -math.Pi/2 + 1e-15}, s1.FullInterval()}},
+		{math.Pi/2 - 1e-15, 0, math.Pi/2 - 1e-15, 0, Rect{r1.Interval{math.Pi/2 - 1e-15, math.Pi / 2}, s1.FullInterval()}},
+	}
+
+	for _, tc := range rectTests {
+		// Now we test cases where the bound does not contain nearly-antipodal
+		// points, but it does contain points that are approximately 180 degrees
+		// apart in latitude.
+		in := RectFromLatLng(LatLng{s1.Angle(tc.xLat), s1.Angle(tc.xLng)})
+		in = in.AddPoint(LatLng{s1.Angle(tc.yLat), s1.Angle(tc.yLng)})
+		got := ExpandForSubregions(in)
+		if !rectsApproxEqual(got, tc.wantRect, rectErrorLat, rectErrorLng) {
+			t.Errorf("Subregion Bound of (%f, %f, %f, %f) = (%v) should be %v", tc.xLat, tc.xLng, tc.yLat, tc.yLng, got, tc.wantRect)
+		}
+	}
+}
+
+func TestEdgeutilIntersectsFace(t *testing.T) {
+	tests := []struct {
+		a    pointUVW
+		want bool
+	}{
+		{pointUVW{r3.Vector{2.05335e-06, 3.91604e-22, 2.90553e-06}}, false},
+		{pointUVW{r3.Vector{-3.91604e-22, -2.05335e-06, -2.90553e-06}}, false},
+		{pointUVW{r3.Vector{0.169258, -0.169258, 0.664013}}, false},
+		{pointUVW{r3.Vector{0.169258, -0.169258, -0.664013}}, false},
+		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 3.88578e-16}}, true},
+		{pointUVW{r3.Vector{-3.88578e-16, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, true},
+	}
+
+	for _, test := range tests {
+		if got := test.a.intersectsFace(); got != test.want {
+			t.Errorf("%v.intersectsFace() = %v, want %v", test.a, got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilIntersectsOppositeEdges(t *testing.T) {
+	tests := []struct {
+		a    pointUVW
+		want bool
+	}{
+		{pointUVW{r3.Vector{0.169258, -0.169258, 0.664013}}, false},
+		{pointUVW{r3.Vector{0.169258, -0.169258, -0.664013}}, false},
+
+		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), 0, -math.Sqrt(4.0 / 3.0)}}, true},
+		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), 0, math.Sqrt(4.0 / 3.0)}}, true},
+
+		{pointUVW{r3.Vector{-math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 1.66533453694e-16}}, false},
+		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0), -1.66533453694e-16}}, false},
+	}
+	for _, test := range tests {
+		if got := test.a.intersectsOppositeEdges(); got != test.want {
+			t.Errorf("%v.intersectsOppositeEdges() = %v, want %v", test.a, got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilExitAxis(t *testing.T) {
+	tests := []struct {
+		a    pointUVW
+		want axis
+	}{
+		{pointUVW{r3.Vector{0, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, axisU},
+		{pointUVW{r3.Vector{0, math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0)}}, axisU},
+		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV},
+		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), math.Sqrt(4.0 / 3.0), 0}}, axisV},
+		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 0}}, axisV},
+		{pointUVW{r3.Vector{1.67968702783622, 0, 0.870988820096491}}, axisV},
+		{pointUVW{r3.Vector{0, math.Sqrt2, math.Sqrt2}}, axisU},
+	}
+
+	for _, test := range tests {
+		if got := test.a.exitAxis(); got != test.want {
+			t.Errorf("%v.exitAxis() = %v, want %v", test.a, got, test.want)
+		}
+	}
+}
+
+func TestEdgeutilExitPoint(t *testing.T) {
+	tests := []struct {
+		a        pointUVW
+		exitAxis axis
+		want     r2.Point
+	}{
+		{pointUVW{r3.Vector{-3.88578058618805e-16, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, axisU, r2.Point{-1, 1}},
+		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV, r2.Point{-1, -1}},
+		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV, r2.Point{-1, 1}},
+		{pointUVW{r3.Vector{-6.66134e-16, math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0)}}, axisU, r2.Point{1, 1}},
+	}
+
+	for _, test := range tests {
+		if got := test.a.exitPoint(test.exitAxis); !r2PointsApproxEquals(got, test.want, epsilon) {
+			t.Errorf("%v.exitPoint() = %v, want %v", test.a, got, test.want)
+		}
+	}
+}
+
+// testClipToPaddedFace performs a comprehensive set of tests across all faces and
+// with random padding for the given points.
+//
+// We do this by defining an (x,y) coordinate system for the plane containing AB,
+// and converting points along the great circle AB to angles in the range
+// [-Pi, Pi]. We then accumulate the angle intervals spanned by each
+// clipped edge; the union over all 6 faces should approximately equal the
+// interval covered by the original edge.
+func testClipToPaddedFace(t *testing.T, a, b Point) {
+	a = Point{a.Normalize()}
+	b = Point{b.Normalize()}
+	if a.Vector == b.Mul(-1) {
+		return
+	}
+
+	norm := Point{a.PointCross(b).Normalize()}
+	aTan := Point{norm.Cross(a.Vector)}
+
+	padding := 0.0
+	if !oneIn(10) {
+		padding = 1e-10 * math.Pow(1e-5, randomFloat64())
+	}
+
+	xAxis := a
+	yAxis := aTan
+
+	// Given the points A and B, we expect all angles generated from the clipping
+	// to fall within this range.
+	expectedAngles := s1.Interval{0, float64(a.Angle(b.Vector))}
+	if expectedAngles.IsInverted() {
+		expectedAngles = s1.Interval{expectedAngles.Hi, expectedAngles.Lo}
+	}
+	maxAngles := expectedAngles.Expanded(faceClipErrorRadians)
+	var actualAngles s1.Interval
+
+	for face := 0; face < 6; face++ {
+		aUV, bUV, intersects := ClipToPaddedFace(a, b, face, padding)
+		if !intersects {
+			continue
+		}
+
+		aClip := Point{faceUVToXYZ(face, aUV.X, aUV.Y).Normalize()}
+		bClip := Point{faceUVToXYZ(face, bUV.X, bUV.Y).Normalize()}
+
+		desc := fmt.Sprintf("on face %d, a=%v, b=%v, aClip=%v, bClip=%v,", face, a, b, aClip, bClip)
+
+		if got := math.Abs(aClip.Dot(norm.Vector)); got > faceClipErrorRadians {
+			t.Errorf("%s abs(%v.Dot(%v)) = %v, want <= %v", desc, aClip, norm, got, faceClipErrorRadians)
+		}
+		if got := math.Abs(bClip.Dot(norm.Vector)); got > faceClipErrorRadians {
+			t.Errorf("%s abs(%v.Dot(%v)) = %v, want <= %v", desc, bClip, norm, got, faceClipErrorRadians)
+		}
+
+		if float64(aClip.Angle(a.Vector)) > faceClipErrorRadians {
+			if got := math.Max(math.Abs(aUV.X), math.Abs(aUV.Y)); !float64Eq(got, 1+padding) {
+				t.Errorf("%s the largest component of %v = %v, want %v", desc, aUV, got, 1+padding)
+			}
+		}
+		if float64(bClip.Angle(b.Vector)) > faceClipErrorRadians {
+			if got := math.Max(math.Abs(bUV.X), math.Abs(bUV.Y)); !float64Eq(got, 1+padding) {
+				t.Errorf("%s the largest component of %v = %v, want %v", desc, bUV, got, 1+padding)
+			}
+		}
+
+		aAngle := math.Atan2(aClip.Dot(yAxis.Vector), aClip.Dot(xAxis.Vector))
+		bAngle := math.Atan2(bClip.Dot(yAxis.Vector), bClip.Dot(xAxis.Vector))
+
+		// Rounding errors may cause bAngle to be slightly less than aAngle.
+		// We handle this by constructing the interval with FromPointPair,
+		// which is okay since the interval length is much less than math.Pi.
+		faceAngles := s1.IntervalFromEndpoints(aAngle, bAngle)
+		if faceAngles.IsInverted() {
+			faceAngles = s1.Interval{faceAngles.Hi, faceAngles.Lo}
+		}
+		if !maxAngles.ContainsInterval(faceAngles) {
+			t.Errorf("%s %v.ContainsInterval(%v) = false, but should have contained this interval", desc, maxAngles, faceAngles)
+		}
+		actualAngles = actualAngles.Union(faceAngles)
+	}
+	if !actualAngles.Expanded(faceClipErrorRadians).ContainsInterval(expectedAngles) {
+		t.Errorf("the union of all angle segments should be larger than the expected angle")
+	}
+}
+
+func TestEdgeutilFaceClipping(t *testing.T) {
+	// Start with a few simple cases.
+	// An edge that is entirely contained within one cube face:
+	testClipToPaddedFace(t, Point{r3.Vector{1, -0.5, -0.5}}, Point{r3.Vector{1, 0.5, 0.5}})
+	testClipToPaddedFace(t, Point{r3.Vector{1, 0.5, 0.5}}, Point{r3.Vector{1, -0.5, -0.5}})
+	// An edge that crosses one cube edge:
+	testClipToPaddedFace(t, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 1, 0}})
+	testClipToPaddedFace(t, Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}})
+	// An edge that crosses two opposite edges of face 0:
+	testClipToPaddedFace(t, Point{r3.Vector{0.75, 0, -1}}, Point{r3.Vector{0.75, 0, 1}})
+	testClipToPaddedFace(t, Point{r3.Vector{0.75, 0, 1}}, Point{r3.Vector{0.75, 0, -1}})
+	// An edge that crosses two adjacent edges of face 2:
+	testClipToPaddedFace(t, Point{r3.Vector{1, 0, 0.75}}, Point{r3.Vector{0, 1, 0.75}})
+	testClipToPaddedFace(t, Point{r3.Vector{0, 1, 0.75}}, Point{r3.Vector{1, 0, 0.75}})
+	// An edges that crosses three cube edges (four faces):
+	testClipToPaddedFace(t, Point{r3.Vector{1, 0.9, 0.95}}, Point{r3.Vector{-1, 0.95, 0.9}})
+	testClipToPaddedFace(t, Point{r3.Vector{-1, 0.95, 0.9}}, Point{r3.Vector{1, 0.9, 0.95}})
+
+	// Comprehensively test edges that are difficult to handle, especially those
+	// that nearly follow one of the 12 cube edges.
+	biunit := r2.Rect{r1.Interval{-1, 1}, r1.Interval{-1, 1}}
+
+	for i := 0; i < 1000; i++ {
+		// Choose two adjacent cube corners P and Q.
+		face := randomUniformInt(6)
+		i := randomUniformInt(4)
+		j := (i + 1) & 3
+		p := Point{faceUVToXYZ(face, biunit.Vertices()[i].X, biunit.Vertices()[i].Y)}
+		q := Point{faceUVToXYZ(face, biunit.Vertices()[j].X, biunit.Vertices()[j].Y)}
+
+		// Now choose two points that are nearly in the plane of PQ, preferring
+		// points that are near cube corners, face midpoints, or edge midpoints.
+		a := perturbedCornerOrMidpoint(p, q)
+		b := perturbedCornerOrMidpoint(p, q)
+		testClipToPaddedFace(t, a, b)
+	}
+}
+
+// getFraction returns the fraction t of the given point X on the line AB such that
+// x = (1-t)*a + t*b. Returns 0 if A = B.
+func getFraction(t *testing.T, x, a, b r2.Point) float64 {
+	// A bound for the error in edge clipping plus the error in the calculation
+	// (which is similar to EdgeIntersectsRect).
+	errorDist := (edgeClipErrorUVDist + intersectsRectErrorUVDist)
+	if a == b {
+		return 0.0
+	}
+	dir := b.Sub(a).Normalize()
+	if got := math.Abs(x.Sub(a).Dot(dir.Ortho())); got > errorDist {
+		t.Errorf("getFraction(%v, %v, %v) = %v, which exceeds errorDist %v", x, a, b, got, errorDist)
+	}
+	return x.Sub(a).Dot(dir)
+}
+
+// randomPointFromInterval returns a randomly selected point from the given interval
+// with one of three possible choices. All cases have reasonable probability for any
+// interval. The choices are: randomly choose a value inside the interval, choose a
+// value outside the interval, or select one of the two endpoints.
+func randomPointFromInterval(clip r1.Interval) float64 {
+	if oneIn(5) {
+		if oneIn(2) {
+			return clip.Lo
+		}
+		return clip.Hi
+	}
+
+	switch randomUniformInt(3) {
+	case 0:
+		return clip.Lo - randomFloat64()
+	case 1:
+		return clip.Hi + randomFloat64()
+	default:
+		return clip.Lo + randomFloat64()*clip.Length()
+	}
+}
+
+// Given a rectangle "clip", choose a point that may lie in the rectangle interior, along an extended edge, exactly at a vertex, or in one of the eight regions exterior to "clip" that are separated by its extended edges.  Also sometimes return points that are exactly on one of the extended diagonals of "clip". All cases are reasonably likely to occur for any given rectangle "clip".
+func chooseRectEndpoint(clip r2.Rect) r2.Point {
+	if oneIn(10) {
+		// Return a point on one of the two extended diagonals.
+		diag := randomUniformInt(2)
+		t := randomUniformFloat64(-1, 2)
+		return clip.Vertices()[diag].Mul(1 - t).Add(clip.Vertices()[diag+2].Mul(t))
+	}
+	return r2.Point{randomPointFromInterval(clip.X), randomPointFromInterval(clip.Y)}
+}
+
+// Choose a random point in the rectangle defined by points A and B, sometimes
+// returning a point on the edge AB or the points A and B themselves.
+func choosePointInRect(a, b r2.Point) r2.Point {
+	if oneIn(5) {
+		if oneIn(2) {
+			return a
+		}
+		return b
+	}
+
+	if oneIn(3) {
+		return a.Add(b.Sub(a).Mul(randomFloat64()))
+	}
+	return r2.Point{randomUniformFloat64(a.X, b.X), randomUniformFloat64(a.Y, b.Y)}
+}
+
+// Given a point P representing a possibly clipped endpoint A of an edge AB,
+// verify that clip contains P, and that if clipping occurred (i.e., P != A)
+// then P is on the boundary of clip.
+func checkPointOnBoundary(t *testing.T, p, a r2.Point, clip r2.Rect) {
+	if got := clip.ContainsPoint(p); !got {
+		t.Errorf("%v.ContainsPoint(%v) = %v, want true", clip, p, got)
+	}
+	if p != a {
+		p1 := r2.Point{math.Nextafter(p.X, a.X), math.Nextafter(p.Y, a.Y)}
+		if got := clip.ContainsPoint(p1); got {
+			t.Errorf("%v.ContainsPoint(%v) = %v, want false", clip, p1, got)
+		}
+	}
+}
+
+func TestEdgeutilEdgeClipping(t *testing.T) {
+	// A bound for the error in edge clipping plus the error in the
+	// EdgeIntersectsRect calculation below.
+	errorDist := (edgeClipErrorUVDist + intersectsRectErrorUVDist)
+	testRects := []r2.Rect{
+		// Test clipping against random rectangles.
+		r2.RectFromPoints(
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
+		r2.RectFromPoints(
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
+		r2.RectFromPoints(
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
+		r2.RectFromPoints(
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
+		r2.RectFromPoints(
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
+			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
+
+		// Also clip against one-dimensional, singleton, and empty rectangles.
+		r2.Rect{r1.Interval{-0.7, -0.7}, r1.Interval{0.3, 0.35}},
+		r2.Rect{r1.Interval{0.2, 0.5}, r1.Interval{0.3, 0.3}},
+		r2.Rect{r1.Interval{-0.7, 0.3}, r1.Interval{0, 0}},
+		r2.RectFromPoints(r2.Point{0.3, 0.8}),
+		r2.EmptyRect(),
+	}
+
+	for _, r := range testRects {
+		for i := 0; i < 1000; i++ {
+			a := chooseRectEndpoint(r)
+			b := chooseRectEndpoint(r)
+
+			aClip, bClip, intersects := ClipEdge(a, b, r)
+			if !intersects {
+				if edgeIntersectsRect(a, b, r.ExpandedByMargin(-errorDist)) {
+					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = true, want false", a, b, r, -errorDist)
+				}
+			} else {
+				if !edgeIntersectsRect(a, b, r.ExpandedByMargin(errorDist)) {
+					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = false, want true", a, b, r, errorDist)
+				}
+
+				// Check that the clipped points lie on the edge AB, and
+				// that the points have the expected order along the segment AB.
+				if gotA, gotB := getFraction(t, aClip, a, b), getFraction(t, bClip, a, b); gotA > gotB {
+					t.Errorf("getFraction(%v,%v,%v) = %v, getFraction(%v, %v, %v) = %v; %v < %v = false, want true", aClip, a, b, gotA, bClip, a, b, gotB, gotA, gotB)
+				}
+
+				// Check that the clipped portion of AB is as large as possible.
+				checkPointOnBoundary(t, aClip, a, r)
+				checkPointOnBoundary(t, bClip, b, r)
+			}
+
+			// Choose an random initial bound to pass to clipEdgeBound.
+			initialClip := r2.RectFromPoints(choosePointInRect(a, b), choosePointInRect(a, b))
+			bound := clippedEdgeBound(a, b, initialClip)
+			if bound.IsEmpty() {
+				// Precondition of clipEdgeBound not met
+				continue
+			}
+			maxBound := bound.Intersection(r)
+			if bound, intersects := clipEdgeBound(a, b, r, bound); !intersects {
+				if edgeIntersectsRect(a, b, maxBound.ExpandedByMargin(-errorDist)) {
+					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = true, want false", a, b, maxBound.ExpandedByMargin(-errorDist), -errorDist)
+				}
+			} else {
+				if !edgeIntersectsRect(a, b, maxBound.ExpandedByMargin(errorDist)) {
+					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = false, want true", a, b, maxBound.ExpandedByMargin(errorDist), errorDist)
+				}
+				// check that the bound is as large as possible.
+				ai := 0
+				if a.X > b.X {
+					ai = 1
+				}
+				aj := 0
+				if a.Y > b.Y {
+					aj = 1
+				}
+				checkPointOnBoundary(t, bound.VertexIJ(ai, aj), a, maxBound)
+				checkPointOnBoundary(t, bound.VertexIJ(1-ai, 1-aj), b, maxBound)
+			}
+		}
+	}
+}
+
+func TestCheckDistance(t *testing.T) {
+	// Uncomment once Distance / UpdateMinDistance are implemented.
+	//var zeroChordAngle s1.ChordAngle
+	tests := []struct {
+		x, a, b r3.Vector
+		distRad float64
+		want    r3.Vector
+	}{
+		{
+			x:       r3.Vector{1, 0, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: 0,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{0, 1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: 0,
+			want:    r3.Vector{0, 1, 0},
+		},
+		{
+			x:       r3.Vector{1, 3, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: 0,
+			want:    r3.Vector{1, 3, 0},
+		},
+		{
+			x:       r3.Vector{0, 0, 1},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{0, 0, -1},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{-1, -1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: 0.75 * math.Pi,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{0, 1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{1, 1, 0},
+			distRad: math.Pi / 4,
+			want:    r3.Vector{1, 1, 0},
+		},
+		{
+			x:       r3.Vector{0, -1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{1, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{0, -1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{-1, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{1, 0, 0},
+		},
+		{
+			x:       r3.Vector{-1, -1, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{-1, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{-1, 1, 0},
+		},
+		{
+			x:       r3.Vector{1, 1, 1},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: math.Asin(math.Sqrt(1.0 / 3.0)),
+			want:    r3.Vector{1, 1, 0},
+		},
+		{
+			x:       r3.Vector{1, 1, -1},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{0, 1, 0},
+			distRad: math.Asin(math.Sqrt(1.0 / 3.0)),
+			want:    r3.Vector{1, 1, 0}},
+		{
+			x:       r3.Vector{-1, 0, 0},
+			a:       r3.Vector{1, 1, 0},
+			b:       r3.Vector{1, 1, 0},
+			distRad: 0.75 * math.Pi,
+			want:    r3.Vector{1, 1, 0},
+		},
+		{
+			x:       r3.Vector{0, 0, -1},
+			a:       r3.Vector{1, 1, 0},
+			b:       r3.Vector{1, 1, 0},
+			distRad: math.Pi / 2,
+			want:    r3.Vector{1, 1, 0},
+		},
+		{
+			x:       r3.Vector{-1, 0, 0},
+			a:       r3.Vector{1, 0, 0},
+			b:       r3.Vector{1, 0, 0},
+			distRad: math.Pi,
+			want:    r3.Vector{1, 0, 0},
+		},
+	}
+
+	for _, test := range tests {
+		x := Point{test.x.Normalize()}
+		a := Point{test.a.Normalize()}
+		b := Point{test.b.Normalize()}
+		want := Point{test.want.Normalize()}
+
+		if d := DistanceFromSegment(x, a, b).Radians(); !float64Near(d, test.distRad, 1e-15) {
+			t.Errorf("DistanceFromSegment(%v, %v, %v) = %v, want %v", x, a, b, d, test.distRad)
+		}
+
+		closest := ClosestPoint(x, a, b)
+		if !closest.ApproxEqual(want) {
+			t.Errorf("ClosestPoint(%v, %v, %v) = %v, want %v", x, a, b, closest, want)
+		}
+
+		// Uncomment these once Distance / UpdateMinDistance are implemented.
+		//minDistance := zeroChordAngle
+		//if minDistance, ok := UpdateMinDistance(x, a, b, minDistance); ok {
+		//	t.Errorf("UpdateMinDistance(%x, %v, %v, %v) = %v, want %v", x, a, b, zeroChordAngle, minDistance, zeroChordAngle)
+		//}
+		//
+		//minDistance = s1.InfChordAngle()
+		//if minDistance, ok := UpdateMinDistance(x, a, b, minDistance); !ok {
+		//	t.Errorf("UpdateMinDistance(%x, %v, %v, %v) = %v, want %v", x, a, b, s1.InfChordAngle(), minDistance, s1.InfChordAngle())
+		//}
+		//
+		//if !float64Near(test.distRad, minDistance.Angle().Radians(), 1e-15) {
+		//	t.Errorf("%v != %v", minDistance.Angle().Radians(), test.distRad)
+		//}
+	}
+}
+
+func TestEdgeUtilWedges(t *testing.T) {
+	// For simplicity, all of these tests use an origin of (0, 0, 1).
+	// This shouldn't matter as long as the lower-level primitives are
+	// implemented correctly.
+	ab1 := Point{r3.Vector{0, 0, 1}}
+
+	tests := []struct {
+		desc           string
+		a0, a1, b0, b1 Point
+		contains       bool
+		intersects     bool
+		relation       WedgeRel
+	}{
+		{
+			desc:       "Intersection in one wedge",
+			a0:         Point{r3.Vector{-1, 0, 10}},
+			a1:         Point{r3.Vector{1, 2, 10}},
+			b0:         Point{r3.Vector{0, 1, 10}},
+			b1:         Point{r3.Vector{1, -2, 10}},
+			contains:   false,
+			intersects: true,
+			relation:   WedgeProperlyOverlaps,
+		},
+		{
+			desc:       "Intersection in two wedges",
+			a0:         Point{r3.Vector{-1, -1, 10}},
+			a1:         Point{r3.Vector{1, -1, 10}},
+			b0:         Point{r3.Vector{1, 0, 10}},
+			b1:         Point{r3.Vector{-1, 1, 10}},
+			contains:   false,
+			intersects: true,
+			relation:   WedgeProperlyOverlaps,
+		},
+		{
+			desc:       "Normal containment",
+			a0:         Point{r3.Vector{-1, -1, 10}},
+			a1:         Point{r3.Vector{1, -1, 10}},
+			b0:         Point{r3.Vector{-1, 0, 10}},
+			b1:         Point{r3.Vector{1, 0, 10}},
+			contains:   true,
+			intersects: true,
+			relation:   WedgeProperlyContains,
+		},
+		{
+			desc:       "Containment with equality on one side",
+			a0:         Point{r3.Vector{2, 1, 10}},
+			a1:         Point{r3.Vector{-1, -1, 10}},
+			b0:         Point{r3.Vector{2, 1, 10}},
+			b1:         Point{r3.Vector{1, -5, 10}},
+			contains:   true,
+			intersects: true,
+			relation:   WedgeProperlyContains,
+		},
+		{
+			desc:       "Containment with equality on the other side",
+			a0:         Point{r3.Vector{2, 1, 10}},
+			a1:         Point{r3.Vector{-1, -1, 10}},
+			b0:         Point{r3.Vector{1, -2, 10}},
+			b1:         Point{r3.Vector{-1, -1, 10}},
+			contains:   true,
+			intersects: true,
+			relation:   WedgeProperlyContains,
+		},
+		{
+			desc:       "Containment with equality on both sides",
+			a0:         Point{r3.Vector{-2, 3, 10}},
+			a1:         Point{r3.Vector{4, -5, 10}},
+			b0:         Point{r3.Vector{-2, 3, 10}},
+			b1:         Point{r3.Vector{4, -5, 10}},
+			contains:   true,
+			intersects: true,
+			relation:   WedgeEquals,
+		},
+		{
+			desc:       "Disjoint with equality on one side",
+			a0:         Point{r3.Vector{-2, 3, 10}},
+			a1:         Point{r3.Vector{4, -5, 10}},
+			b0:         Point{r3.Vector{4, -5, 10}},
+			b1:         Point{r3.Vector{-2, -3, 10}},
+			contains:   false,
+			intersects: false,
+			relation:   WedgeIsDisjoint,
+		},
+		{
+			desc:       "Disjoint with equality on the other side",
+			a0:         Point{r3.Vector{-2, 3, 10}},
+			a1:         Point{r3.Vector{0, 5, 10}},
+			b0:         Point{r3.Vector{4, -5, 10}},
+			b1:         Point{r3.Vector{-2, 3, 10}},
+			contains:   false,
+			intersects: false,
+			relation:   WedgeIsDisjoint,
+		},
+		{
+			desc:       "Disjoint with equality on both sides",
+			a0:         Point{r3.Vector{-2, 3, 10}},
+			a1:         Point{r3.Vector{4, -5, 10}},
+			b0:         Point{r3.Vector{4, -5, 10}},
+			b1:         Point{r3.Vector{-2, 3, 10}},
+			contains:   false,
+			intersects: false,
+			relation:   WedgeIsDisjoint,
+		},
+		{
+			desc:       "B contains A with equality on one side",
+			a0:         Point{r3.Vector{2, 1, 10}},
+			a1:         Point{r3.Vector{1, -5, 10}},
+			b0:         Point{r3.Vector{2, 1, 10}},
+			b1:         Point{r3.Vector{-1, -1, 10}},
+			contains:   false,
+			intersects: true,
+			relation:   WedgeIsProperlyContained,
+		},
+
+		{
+			desc:       "B contains A with equality on the other side",
+			a0:         Point{r3.Vector{2, 1, 10}},
+			a1:         Point{r3.Vector{1, -5, 10}},
+			b0:         Point{r3.Vector{-2, 1, 10}},
+			b1:         Point{r3.Vector{1, -5, 10}},
+			contains:   false,
+			intersects: true,
+			relation:   WedgeIsProperlyContained,
+		},
+	}
+
+	for _, test := range tests {
+		if got := WedgeContains(test.a0, ab1, test.a1, test.b0, test.b1); got != test.contains {
+			t.Errorf("%s: WedgeContains(%v, %v, %v, %v, %v) = %t, want %t", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.contains)
+		}
+		if got := WedgeIntersects(test.a0, ab1, test.a1, test.b0, test.b1); got != test.intersects {
+			t.Errorf("%s: WedgeIntersects(%v, %v, %v, %v, %v) = %t, want %t", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.intersects)
+		}
+		if got := WedgeRelation(test.a0, ab1, test.a1, test.b0, test.b1); got != test.relation {
+			t.Errorf("%s: WedgeRelation(%v, %v, %v, %v, %v) = %v, want %v", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.relation)
+		}
+	}
+}

+ 2 - 1
vendor/github.com/golang/geo/s2/latlng.go

@@ -20,6 +20,7 @@ import (
 	"fmt"
 	"math"
 
+	"github.com/golang/geo/r3"
 	"github.com/golang/geo/s1"
 )
 
@@ -87,7 +88,7 @@ func PointFromLatLng(ll LatLng) Point {
 	phi := ll.Lat.Radians()
 	theta := ll.Lng.Radians()
 	cosphi := math.Cos(phi)
-	return PointFromCoords(math.Cos(theta)*cosphi, math.Sin(theta)*cosphi, math.Sin(phi))
+	return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}}
 }
 
 // LatLngFromPoint returns an LatLng for a given Point.

+ 155 - 0
vendor/github.com/golang/geo/s2/latlng_test.go

@@ -0,0 +1,155 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/s1"
+)
+
+func TestLatLngNormalized(t *testing.T) {
+	tests := []struct {
+		desc string
+		pos  LatLng
+		want LatLng
+	}{
+		{
+			desc: "Valid lat/lng",
+			pos:  LatLngFromDegrees(21.8275043, 151.1979675),
+			want: LatLngFromDegrees(21.8275043, 151.1979675),
+		},
+		{
+			desc: "Valid lat/lng in the West",
+			pos:  LatLngFromDegrees(21.8275043, -151.1979675),
+			want: LatLngFromDegrees(21.8275043, -151.1979675),
+		},
+		{
+			desc: "Beyond the North pole",
+			pos:  LatLngFromDegrees(95, 151.1979675),
+			want: LatLngFromDegrees(90, 151.1979675),
+		},
+		{
+			desc: "Beyond the South pole",
+			pos:  LatLngFromDegrees(-95, 151.1979675),
+			want: LatLngFromDegrees(-90, 151.1979675),
+		},
+		{
+			desc: "At the date line (from East)",
+			pos:  LatLngFromDegrees(21.8275043, 180),
+			want: LatLngFromDegrees(21.8275043, 180),
+		},
+		{
+			desc: "At the date line (from West)",
+			pos:  LatLngFromDegrees(21.8275043, -180),
+			want: LatLngFromDegrees(21.8275043, -180),
+		},
+		{
+			desc: "Across the date line going East",
+			pos:  LatLngFromDegrees(21.8275043, 181.0012),
+			want: LatLngFromDegrees(21.8275043, -178.9988),
+		},
+		{
+			desc: "Across the date line going West",
+			pos:  LatLngFromDegrees(21.8275043, -181.0012),
+			want: LatLngFromDegrees(21.8275043, 178.9988),
+		},
+		{
+			desc: "All wrong",
+			pos:  LatLngFromDegrees(256, 256),
+			want: LatLngFromDegrees(90, -104),
+		},
+	}
+
+	for _, test := range tests {
+		got := test.pos.Normalized()
+		if !got.IsValid() {
+			t.Errorf("%s: A LatLng should be valid after normalization but isn't: %v", test.desc, got)
+		} else if got.Distance(test.want) > 1e-13*s1.Degree {
+			t.Errorf("%s: %v.Normalized() = %v, want %v", test.desc, test.pos, got, test.want)
+		}
+	}
+}
+
+func TestLatLngString(t *testing.T) {
+	const expected string = "[1.4142136, -2.2360680]"
+	s := LatLngFromDegrees(math.Sqrt2, -math.Sqrt(5)).String()
+	if s != expected {
+		t.Errorf("LatLng{√2, -√5}.String() = %q, want %q", s, expected)
+	}
+}
+
+func TestLatLngPointConversion(t *testing.T) {
+	// All test cases here have been verified against the C++ S2 implementation.
+	tests := []struct {
+		lat, lng float64 // degrees
+		x, y, z  float64
+	}{
+		{0, 0, 1, 0, 0},
+		{90, 0, 6.12323e-17, 0, 1},
+		{-90, 0, 6.12323e-17, 0, -1},
+		{0, 180, -1, 1.22465e-16, 0},
+		{0, -180, -1, -1.22465e-16, 0},
+		{90, 180, -6.12323e-17, 7.4988e-33, 1},
+		{90, -180, -6.12323e-17, -7.4988e-33, 1},
+		{-90, 180, -6.12323e-17, 7.4988e-33, -1},
+		{-90, -180, -6.12323e-17, -7.4988e-33, -1},
+		{-81.82750430354997, 151.19796752929685,
+			-0.12456788151479525, 0.0684875268284729, -0.989844584550441},
+	}
+	for _, test := range tests {
+		ll := LatLngFromDegrees(test.lat, test.lng)
+		p := PointFromLatLng(ll)
+		// TODO(mikeperrow): Port Point.ApproxEquals, then use here.
+		if !float64Eq(p.X, test.x) || !float64Eq(p.Y, test.y) || !float64Eq(p.Z, test.z) {
+			t.Errorf("PointFromLatLng({%v°, %v°}) = %v, want %v, %v, %v",
+				test.lat, test.lng, p, test.x, test.y, test.z)
+		}
+		ll = LatLngFromPoint(p)
+		// We need to be careful here, since if the latitude is +/- 90, any longitude
+		// is now a valid conversion.
+		isPolar := (test.lat == 90 || test.lat == -90)
+		if !float64Eq(ll.Lat.Degrees(), test.lat) ||
+			(!isPolar && (!float64Eq(ll.Lng.Degrees(), test.lng))) {
+			t.Errorf("Converting ll %v,%v to point (%v) and back gave %v.",
+				test.lat, test.lng, p, ll)
+		}
+	}
+}
+
+func TestLatLngDistance(t *testing.T) {
+	// Based on C++ S2LatLng::TestDistance.
+	tests := []struct {
+		lat1, lng1, lat2, lng2 float64
+		want, tolerance        float64
+	}{
+		{90, 0, 90, 0, 0, 0},
+		{-37, 25, -66, -155, 77, 1e-13},
+		{0, 165, 0, -80, 115, 1e-13},
+		{47, -127, -47, 53, 180, 2e-6},
+	}
+	for _, test := range tests {
+		ll1 := LatLngFromDegrees(test.lat1, test.lng1)
+		ll2 := LatLngFromDegrees(test.lat2, test.lng2)
+		d := ll1.Distance(ll2).Degrees()
+		if math.Abs(d-test.want) > test.tolerance {
+			t.Errorf("LatLng{%v, %v}.Distance(LatLng{%v, %v}).Degrees() = %v, want %v",
+				test.lat1, test.lng1, test.lat2, test.lng2, d, test.want)
+		}
+	}
+}

+ 32 - 12
vendor/github.com/golang/geo/s2/loop.go

@@ -189,17 +189,17 @@ func (l *Loop) initBound() {
 }
 
 // ContainsOrigin reports true if this loop contains s2.OriginPoint().
-func (l Loop) ContainsOrigin() bool {
+func (l *Loop) ContainsOrigin() bool {
 	return l.originInside
 }
 
 // HasInterior returns true because all loops have an interior.
-func (l Loop) HasInterior() bool {
+func (l *Loop) HasInterior() bool {
 	return true
 }
 
 // NumEdges returns the number of edges in this shape.
-func (l Loop) NumEdges() int {
+func (l *Loop) NumEdges() int {
 	if l.isEmptyOrFull() {
 		return 0
 	}
@@ -207,52 +207,72 @@ func (l Loop) NumEdges() int {
 }
 
 // Edge returns the endpoints for the given edge index.
-func (l Loop) Edge(i int) (a, b Point) {
+func (l *Loop) Edge(i int) (a, b Point) {
 	return l.Vertex(i), l.Vertex(i + 1)
 }
 
+// dimension returns the dimension of the geometry represented by this Loop.
+func (l *Loop) dimension() dimension { return polygonGeometry }
+
+// numChains reports the number of contiguous edge chains in the Loop.
+func (l *Loop) numChains() int {
+	if l.isEmptyOrFull() {
+		return 0
+	}
+	return 1
+}
+
+// chainStart returns the id of the first edge in the i-th edge chain in this Loop.
+func (l *Loop) chainStart(i int) int {
+	if i == 0 {
+		return 0
+	}
+
+	return l.NumEdges()
+}
+
 // IsEmpty reports true if this is the special "empty" loop that contains no points.
-func (l Loop) IsEmpty() bool {
+func (l *Loop) IsEmpty() bool {
 	return l.isEmptyOrFull() && !l.ContainsOrigin()
 }
 
 // IsFull reports true if this is the special "full" loop that contains all points.
-func (l Loop) IsFull() bool {
+func (l *Loop) IsFull() bool {
 	return l.isEmptyOrFull() && l.ContainsOrigin()
 }
 
 // isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops.
-func (l Loop) isEmptyOrFull() bool {
+func (l *Loop) isEmptyOrFull() bool {
 	return len(l.vertices) == 1
 }
 
 // RectBound returns a tight bounding rectangle. If the loop contains the point,
 // the bound also contains it.
-func (l Loop) RectBound() Rect {
+func (l *Loop) RectBound() Rect {
 	return l.bound
 }
 
 // CapBound returns a bounding cap that may have more padding than the corresponding
 // RectBound. The bound is conservative such that if the loop contains a point P,
 // the bound also contains it.
-func (l Loop) CapBound() Cap {
+func (l *Loop) CapBound() Cap {
 	return l.bound.CapBound()
 }
 
 // Vertex returns the vertex for the given index. For convenience, the vertex indices
 // wrap automatically for methods that do index math such as Edge.
 // i.e., Vertex(NumEdges() + n) is the same as Vertex(n).
-func (l Loop) Vertex(i int) Point {
+func (l *Loop) Vertex(i int) Point {
 	return l.vertices[i%len(l.vertices)]
 }
 
 // Vertices returns the vertices in the loop.
-func (l Loop) Vertices() []Point {
+func (l *Loop) Vertices() []Point {
 	return l.vertices
 }
 
 // ContainsPoint returns true if the loop contains the point.
-func (l Loop) ContainsPoint(p Point) bool {
+func (l *Loop) ContainsPoint(p Point) bool {
 	// TODO(sbeckman): Move to bruteForceContains and update with ShapeIndex when available.
 	// Empty and full loops don't need a special case, but invalid loops with
 	// zero vertices do, so we might as well handle them all at once.

+ 533 - 0
vendor/github.com/golang/geo/s2/loop_test.go

@@ -0,0 +1,533 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+var (
+	// The northern hemisphere, defined using two pairs of antipodal points.
+	northHemi = LoopFromPoints(parsePoints("0:-180, 0:-90, 0:0, 0:90"))
+
+	// The northern hemisphere, defined using three points 120 degrees apart.
+	northHemi3 = LoopFromPoints(parsePoints("0:-180, 0:-60, 0:60"))
+
+	// The southern hemisphere, defined using two pairs of antipodal points.
+	southHemi = LoopFromPoints(parsePoints("0:90, 0:0, 0:-90, 0:-180"))
+
+	// The western hemisphere, defined using two pairs of antipodal points.
+	westHemi = LoopFromPoints(parsePoints("0:-180, -90:0, 0:0, 90:0"))
+
+	// The eastern hemisphere, defined using two pairs of antipodal points.
+	eastHemi = LoopFromPoints(parsePoints("90:0, 0:0, -90:0, 0:-180"))
+
+	// The "near" hemisphere, defined using two pairs of antipodal points.
+	nearHemi = LoopFromPoints(parsePoints("0:-90, -90:0, 0:90, 90:0"))
+
+	// The "far" hemisphere, defined using two pairs of antipodal points.
+	farHemi = LoopFromPoints(parsePoints("90:0, 0:90, -90:0, 0:-90"))
+
+	// A spiral stripe that slightly over-wraps the equator.
+	candyCane = LoopFromPoints(parsePoints("-20:150, -20:-70, 0:70, 10:-150, 10:70, -10:-70"))
+
+	// A small clockwise loop in the northern & eastern hemisperes.
+	smallNECW = LoopFromPoints(parsePoints("35:20, 45:20, 40:25"))
+
+	// Loop around the north pole at 80 degrees.
+	arctic80 = LoopFromPoints(parsePoints("80:-150, 80:-30, 80:90"))
+
+	// Loop around the south pole at 80 degrees.
+	antarctic80 = LoopFromPoints(parsePoints("-80:120, -80:0, -80:-120"))
+
+	// A completely degenerate triangle along the equator that RobustCCW()
+	// considers to be CCW.
+	lineTriangle = LoopFromPoints(parsePoints("0:1, 0:2, 0:3"))
+
+	// A nearly-degenerate CCW chevron near the equator with very long sides
+	// (about 80 degrees).  Its area is less than 1e-640, which is too small
+	// to represent in double precision.
+	skinnyChevron = LoopFromPoints(parsePoints("0:0, -1e-320:80, 0:1e-320, 1e-320:80"))
+
+	// A diamond-shaped loop around the point 0:180.
+	loopA = LoopFromPoints(parsePoints("0:178, -1:180, 0:-179, 1:-180"))
+
+	// Like loopA, but the vertices are at leaf cell centers.
+	snappedLoopA = LoopFromPoints([]Point{
+		CellIDFromLatLng(parseLatLngs("0:178")[0]).Point(),
+		CellIDFromLatLng(parseLatLngs("-1:180")[0]).Point(),
+		CellIDFromLatLng(parseLatLngs("0:-179")[0]).Point(),
+		CellIDFromLatLng(parseLatLngs("1:-180")[0]).Point(),
+	})
+
+	// A different diamond-shaped loop around the point 0:180.
+	loopB = LoopFromPoints(parsePoints("0:179, -1:180, 0:-178, 1:-180"))
+
+	// The intersection of A and B.
+	aIntersectB = LoopFromPoints(parsePoints("0:179, -1:180, 0:-179, 1:-180"))
+
+	// The union of A and B.
+	aUnionB = LoopFromPoints(parsePoints("0:178, -1:180, 0:-178, 1:-180"))
+
+	// A minus B (concave).
+	aMinusB = LoopFromPoints(parsePoints("0:178, -1:180, 0:179, 1:-180"))
+
+	// B minus A (concave).
+	bMinusA = LoopFromPoints(parsePoints("0:-179, -1:180, 0:-178, 1:-180"))
+
+	// A shape gotten from A by adding a triangle to one edge, and
+	// subtracting a triangle from the opposite edge.
+	loopC = LoopFromPoints(parsePoints("0:178, 0:180, -1:180, 0:-179, 1:-179, 1:-180"))
+
+	// A shape gotten from A by adding a triangle to one edge, and
+	// adding another triangle to the opposite edge.
+	loopD = LoopFromPoints(parsePoints("0:178, -1:178, -1:180, 0:-179, 1:-179, 1:-180"))
+
+	//   3------------2
+	//   |            |               ^
+	//   |  7-8  b-c  |               |
+	//   |  | |  | |  |      Latitude |
+	//   0--6-9--a-d--1               |
+	//   |  | |       |               |
+	//   |  f-e       |               +----------->
+	//   |            |                 Longitude
+	//   4------------5
+	//
+	// Important: It is not okay to skip over collinear vertices when
+	// defining these loops (e.g. to define loop E as "0,1,2,3") because S2
+	// uses symbolic perturbations to ensure that no three vertices are
+	// *ever* considered collinear (e.g., vertices 0, 6, 9 are not
+	// collinear).  In other words, it is unpredictable (modulo knowing the
+	// details of the symbolic perturbations) whether 0123 contains 06123
+	// for example.
+
+	// Loop E:  0,6,9,a,d,1,2,3
+	// Loop F:  0,4,5,1,d,a,9,6
+	// Loop G:  0,6,7,8,9,a,b,c,d,1,2,3
+	// Loop H:  0,6,f,e,9,a,b,c,d,1,2,3
+	// Loop I:  7,6,f,e,9,8
+	loopE = LoopFromPoints(parsePoints("0:30, 0:34, 0:36, 0:39, 0:41, 0:44, 30:44, 30:30"))
+	loopF = LoopFromPoints(parsePoints("0:30, -30:30, -30:44, 0:44, 0:41, 0:39, 0:36, 0:34"))
+	loopG = LoopFromPoints(parsePoints("0:30, 0:34, 10:34, 10:36, 0:36, 0:39, 10:39, 10:41, 0:41, 0:44, 30:44, 30:30"))
+	loopH = LoopFromPoints(parsePoints("0:30, 0:34, -10:34, -10:36, 0:36, 0:39, 10:39, 10:41, 0:41, 0:44, 30:44, 30:30"))
+
+	loopI = LoopFromPoints(parsePoints("10:34, 0:34, -10:34, -10:36, 0:36, 10:36"))
+)
+
+func TestLoopEmptyAndFull(t *testing.T) {
+	emptyLoop := EmptyLoop()
+
+	if !emptyLoop.IsEmpty() {
+		t.Errorf("empty loop should be empty")
+	}
+	if emptyLoop.IsFull() {
+		t.Errorf("empty loop should not be full")
+	}
+	if !emptyLoop.isEmptyOrFull() {
+		t.Errorf("empty loop should pass IsEmptyOrFull")
+	}
+
+	fullLoop := FullLoop()
+
+	if fullLoop.IsEmpty() {
+		t.Errorf("full loop should not be empty")
+	}
+	if !fullLoop.IsFull() {
+		t.Errorf("full loop should be full")
+	}
+	if !fullLoop.isEmptyOrFull() {
+		t.Errorf("full loop should pass IsEmptyOrFull")
+	}
+	if emptyLoop.NumEdges() != 0 {
+		t.Errorf("empty loops should have no edges")
+	}
+	if emptyLoop.numChains() != 0 {
+		t.Errorf("empty loops should have no edge chains")
+	}
+	if fullLoop.NumEdges() != 0 {
+		t.Errorf("full loops should have no edges")
+	}
+	if fullLoop.numChains() != 0 {
+		t.Errorf("full loops should have no edge chains")
+	}
+}
+
+func TestLoopBasic(t *testing.T) {
+	shape := Shape(makeLoop("0:0, 0:1, 1:0"))
+
+	if got := shape.NumEdges(); got != 3 {
+		t.Errorf("shape.NumEdges = %d, want 3", got)
+	}
+	if got := shape.numChains(); got != 1 {
+		t.Errorf("shape.numChains = %d, want 1", got)
+	}
+	if got := shape.chainStart(0); got != 0 {
+		t.Errorf("shape.chainStart(0) = %d, want 3", got)
+	}
+	if got := shape.chainStart(1); got != 3 {
+		t.Errorf("shape.chainStart(1) = %d, want 3", got)
+	}
+
+	v2, v3 := shape.Edge(2)
+	if want := PointFromLatLng(LatLngFromDegrees(1, 0)); !v2.ApproxEqual(want) {
+		t.Errorf("shape.Edge(2) end A = %v, want %v", v2, want)
+	}
+	if want := PointFromLatLng(LatLngFromDegrees(0, 0)); !v3.ApproxEqual(want) {
+
+		t.Errorf("shape.Edge(2) end B = %v, want %v", v3, want)
+	}
+
+	if got := shape.dimension(); got != polygonGeometry {
+		t.Errorf("shape.dimension() = %d, want %v", got, polygonGeometry)
+	}
+	if !shape.HasInterior() {
+		t.Errorf("shape.HasInterior() = false, want true")
+	}
+	if shape.ContainsOrigin() {
+		t.Errorf("shape.ContainsOrigin() = true, want false")
+	}
+}
+
+func TestLoopRectBound(t *testing.T) {
+	if !EmptyLoop().RectBound().IsEmpty() {
+		t.Errorf("empty loop's RectBound should be empty")
+	}
+	if !FullLoop().RectBound().IsFull() {
+		t.Errorf("full loop's RectBound should be full")
+	}
+	if !candyCane.RectBound().Lng.IsFull() {
+		t.Errorf("candy cane loop's RectBound should have a full longitude range")
+	}
+	if got := candyCane.RectBound().Lat.Lo; got >= -0.349066 {
+		t.Errorf("candy cane loop's RectBound should have a lower latitude (%v) under -0.349066 radians", got)
+	}
+	if got := candyCane.RectBound().Lat.Hi; got <= 0.174533 {
+		t.Errorf("candy cane loop's RectBound should have an upper latitude (%v) over 0.174533 radians", got)
+	}
+	if !smallNECW.RectBound().IsFull() {
+		t.Errorf("small northeast clockwise loop's RectBound should be full")
+	}
+	if got, want := arctic80.RectBound(), rectFromDegrees(80, -180, 90, 180); !rectsApproxEqual(got, want, rectErrorLat, rectErrorLng) {
+		t.Errorf("arctic 80 loop's RectBound (%v) should be %v", got, want)
+	}
+	if got, want := antarctic80.RectBound(), rectFromDegrees(-90, -180, -80, 180); !rectsApproxEqual(got, want, rectErrorLat, rectErrorLng) {
+		t.Errorf("antarctic 80 loop's RectBound (%v) should be %v", got, want)
+	}
+	if !southHemi.RectBound().Lng.IsFull() {
+		t.Errorf("south hemi loop's RectBound should have a full longitude range")
+	}
+	got, want := southHemi.RectBound().Lat, r1.Interval{-math.Pi / 2, 0}
+	if !got.ApproxEqual(want) {
+		t.Errorf("south hemi loop's RectBound latitude interval (%v) should be %v", got, want)
+	}
+
+	// Create a loop that contains the complement of the arctic80 loop.
+	arctic80Inv := invert(arctic80)
+	// The highest latitude of each edge is attained at its midpoint.
+	mid := Point{arctic80Inv.vertices[0].Vector.Add(arctic80Inv.vertices[1].Vector).Mul(.5)}
+	if got, want := arctic80Inv.RectBound().Lat.Hi, float64(LatLngFromPoint(mid).Lat); math.Abs(got-want) > 10*dblEpsilon {
+		t.Errorf("arctic 80 inverse loop's RectBound should have a latutude hi of %v, got %v", got, want)
+	}
+}
+
+func TestLoopCapBound(t *testing.T) {
+	if !EmptyLoop().CapBound().IsEmpty() {
+		t.Errorf("empty loop's CapBound should be empty")
+	}
+	if !FullLoop().CapBound().IsFull() {
+		t.Errorf("full loop's CapBound should be full")
+	}
+	if !smallNECW.CapBound().IsFull() {
+		t.Errorf("small northeast clockwise loop's CapBound should be full")
+	}
+	if got, want := arctic80.CapBound(), rectFromDegrees(80, -180, 90, 180).CapBound(); !got.ApproxEqual(want) {
+		t.Errorf("arctic 80 loop's CapBound (%v) should be %v", got, want)
+	}
+	if got, want := antarctic80.CapBound(), rectFromDegrees(-90, -180, -80, 180).CapBound(); !got.ApproxEqual(want) {
+		t.Errorf("antarctic 80 loop's CapBound (%v) should be %v", got, want)
+	}
+}
+
+func invert(l *Loop) *Loop {
+	vertices := make([]Point, 0, len(l.vertices))
+	for i := len(l.vertices) - 1; i >= 0; i-- {
+		vertices = append(vertices, l.vertices[i])
+	}
+	return LoopFromPoints(vertices)
+}
+
+func TestLoopOriginInside(t *testing.T) {
+	if !northHemi.originInside {
+		t.Errorf("north hemisphere polygon should include origin")
+	}
+	if !northHemi3.originInside {
+		t.Errorf("north hemisphere 3 polygon should include origin")
+	}
+	if southHemi.originInside {
+		t.Errorf("south hemisphere polygon should not include origin")
+	}
+	if westHemi.originInside {
+		t.Errorf("west hemisphere polygon should not include origin")
+	}
+	if !eastHemi.originInside {
+		t.Errorf("east hemisphere polygon should include origin")
+	}
+	if nearHemi.originInside {
+		t.Errorf("near hemisphere polygon should not include origin")
+	}
+	if !farHemi.originInside {
+		t.Errorf("far hemisphere polygon should include origin")
+	}
+	if candyCane.originInside {
+		t.Errorf("candy cane polygon should not include origin")
+	}
+	if !smallNECW.originInside {
+		t.Errorf("smallNECW polygon should include origin")
+	}
+	if !arctic80.originInside {
+		t.Errorf("arctic 80 polygon should include origin")
+	}
+	if antarctic80.originInside {
+		t.Errorf("antarctic 80 polygon should not include origin")
+	}
+	if loopA.originInside {
+		t.Errorf("loop A polygon should not include origin")
+	}
+}
+
+func TestLoopContainsPoint(t *testing.T) {
+	north := Point{r3.Vector{0, 0, 1}}
+	south := Point{r3.Vector{0, 0, -1}}
+
+	if EmptyLoop().ContainsPoint(north) {
+		t.Errorf("empty loop should not not have any points")
+	}
+	if !FullLoop().ContainsPoint(south) {
+		t.Errorf("full loop should have full point vertex")
+	}
+
+	for _, tc := range []struct {
+		name string
+		l    *Loop
+		in   Point
+		out  Point
+	}{
+		{
+			"north hemisphere",
+			northHemi,
+			Point{r3.Vector{0, 0, 1}},
+			Point{r3.Vector{0, 0, -1}},
+		},
+		{
+			"south hemisphere",
+			southHemi,
+			Point{r3.Vector{0, 0, -1}},
+			Point{r3.Vector{0, 0, 1}},
+		},
+		{
+			"west hemisphere",
+			westHemi,
+			Point{r3.Vector{0, -1, 0}},
+			Point{r3.Vector{0, 1, 0}},
+		},
+		{
+			"east hemisphere",
+			eastHemi,
+			Point{r3.Vector{0, 1, 0}},
+			Point{r3.Vector{0, -1, 0}},
+		},
+		{
+			"candy cane",
+			candyCane,
+			PointFromLatLng(LatLngFromDegrees(5, 71)),
+			PointFromLatLng(LatLngFromDegrees(-8, 71)),
+		},
+	} {
+		l := tc.l
+		for i := 0; i < 4; i++ {
+			if !l.ContainsPoint(tc.in) {
+				t.Errorf("%s loop should contain %v at rotation %d", tc.name, tc.in, i)
+			}
+			if l.ContainsPoint(tc.out) {
+				t.Errorf("%s loop shouldn't contain %v at rotation %d", tc.name, tc.out, i)
+			}
+			l = rotate(l)
+		}
+	}
+}
+
+func TestLoopVertex(t *testing.T) {
+	tests := []struct {
+		loop   *Loop
+		vertex int
+		want   Point
+	}{
+		{EmptyLoop(), 0, Point{r3.Vector{0, 0, 1}}},
+		{EmptyLoop(), 1, Point{r3.Vector{0, 0, 1}}},
+		{FullLoop(), 0, Point{r3.Vector{0, 0, -1}}},
+		{FullLoop(), 1, Point{r3.Vector{0, 0, -1}}},
+		{arctic80, 0, parsePoint("80:-150")},
+		{arctic80, 1, parsePoint("80:-30")},
+		{arctic80, 2, parsePoint("80:90")},
+		{arctic80, 3, parsePoint("80:-150")},
+	}
+
+	for _, test := range tests {
+		if got := test.loop.Vertex(test.vertex); !pointsApproxEquals(got, test.want, epsilon) {
+			t.Errorf("%v.Vertex(%d) = %v, want %v", test.loop, test.vertex, got, test.want)
+		}
+	}
+
+	// Check that wrapping is correct.
+	if !pointsApproxEquals(arctic80.Vertex(2), arctic80.Vertex(5), epsilon) {
+		t.Errorf("Vertex should wrap values. %v.Vertex(2) = %v != %v.Vertex(5) = %v",
+			arctic80, arctic80.Vertex(2), arctic80, arctic80.Vertex(5))
+	}
+
+	loopAroundThrice := 2 + 3*len(arctic80.vertices)
+	if !pointsApproxEquals(arctic80.Vertex(2), arctic80.Vertex(loopAroundThrice), epsilon) {
+		t.Errorf("Vertex should wrap values. %v.Vertex(2) = %v != %v.Vertex(%d) = %v",
+			arctic80, arctic80.Vertex(2), arctic80, loopAroundThrice, arctic80.Vertex(loopAroundThrice))
+	}
+}
+
+func TestLoopNumEdges(t *testing.T) {
+	tests := []struct {
+		loop *Loop
+		want int
+	}{
+		{EmptyLoop(), 0},
+		{FullLoop(), 0},
+		{farHemi, 4},
+		{candyCane, 6},
+		{smallNECW, 3},
+		{arctic80, 3},
+		{antarctic80, 3},
+		{lineTriangle, 3},
+		{skinnyChevron, 4},
+	}
+
+	for _, test := range tests {
+		if got := test.loop.NumEdges(); got != test.want {
+			t.Errorf("%v.NumEdges() = %v, want %v", test.loop, got, test.want)
+		}
+	}
+}
+
+func TestLoopEdge(t *testing.T) {
+	tests := []struct {
+		loop  *Loop
+		edge  int
+		wantA Point
+		wantB Point
+	}{
+		{
+			loop:  farHemi,
+			edge:  2,
+			wantA: Point{r3.Vector{0, 0, -1}},
+			wantB: Point{r3.Vector{0, -1, 0}},
+		},
+		{
+			loop: candyCane,
+			edge: 0,
+
+			wantA: parsePoint("-20:150"),
+			wantB: parsePoint("-20:-70"),
+		},
+		{
+			loop:  candyCane,
+			edge:  1,
+			wantA: parsePoint("-20:-70"),
+			wantB: parsePoint("0:70"),
+		},
+		{
+			loop:  candyCane,
+			edge:  2,
+			wantA: parsePoint("0:70"),
+			wantB: parsePoint("10:-150"),
+		},
+		{
+			loop:  candyCane,
+			edge:  3,
+			wantA: parsePoint("10:-150"),
+			wantB: parsePoint("10:70"),
+		},
+		{
+			loop:  candyCane,
+			edge:  4,
+			wantA: parsePoint("10:70"),
+			wantB: parsePoint("-10:-70"),
+		},
+		{
+			loop:  candyCane,
+			edge:  5,
+			wantA: parsePoint("-10:-70"),
+			wantB: parsePoint("-20:150"),
+		},
+		{
+			loop:  skinnyChevron,
+			edge:  2,
+			wantA: parsePoint("0:1e-320"),
+			wantB: parsePoint("1e-320:80"),
+		},
+		{
+			loop:  skinnyChevron,
+			edge:  3,
+			wantA: parsePoint("1e-320:80"),
+			wantB: parsePoint("0:0"),
+		},
+	}
+
+	for _, test := range tests {
+		if a, b := test.loop.Edge(test.edge); !(pointsApproxEquals(a, test.wantA, epsilon) && pointsApproxEquals(b, test.wantB, epsilon)) {
+			t.Errorf("%v.Edge(%d) = (%v, %v), want (%v, %v)", test.loop, test.edge, a, b, test.wantA, test.wantB)
+		}
+	}
+}
+
+func rotate(l *Loop) *Loop {
+	vertices := make([]Point, 0, len(l.vertices))
+	for i := 1; i < len(l.vertices); i++ {
+		vertices = append(vertices, l.vertices[i])
+	}
+	vertices = append(vertices, l.vertices[0])
+	return LoopFromPoints(vertices)
+}
+
+func TestLoopFromCell(t *testing.T) {
+	cell := CellFromCellID(CellIDFromLatLng(LatLng{40.565459 * s1.Degree, -74.645276 * s1.Degree}))
+	loopFromCell := LoopFromCell(cell)
+
+	// Demonstrates the reason for this test; the cell bounds are more
+	// conservative than the resulting loop bounds.
+	if loopFromCell.RectBound().Contains(cell.RectBound()) {
+		t.Errorf("loopFromCell's RectBound countains the original cells RectBound, but should not")
+	}
+}
+
+func TestLoopRegularLoop(t *testing.T) {
+	loop := RegularLoop(PointFromLatLng(LatLngFromDegrees(80, 135)), 20*s1.Degree, 4)
+	if len(loop.vertices) != 4 {
+		t.Errorf("RegularLoop with 4 vertices should have 4 vertices, got %d", len(loop.vertices))
+	}
+	// The actual Points values are already tested in the s2point_test method TestRegularPoints.
+}

+ 9 - 7
vendor/github.com/golang/geo/s2/matrix3x3.go

@@ -18,6 +18,8 @@ package s2
 
 import (
 	"fmt"
+
+	"github.com/golang/geo/r3"
 )
 
 // matrix3x3 represents a traditional 3x3 matrix of floating point values.
@@ -27,12 +29,12 @@ type matrix3x3 [3][3]float64
 
 // col returns the given column as a Point.
 func (m *matrix3x3) col(col int) Point {
-	return PointFromCoords(m[0][col], m[1][col], m[2][col])
+	return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}}
 }
 
 // row returns the given row as a Point.
 func (m *matrix3x3) row(row int) Point {
-	return PointFromCoords(m[row][0], m[row][1], m[row][2])
+	return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}}
 }
 
 // setCol sets the specified column to the value in the given Point.
@@ -65,11 +67,11 @@ func (m *matrix3x3) scale(f float64) *matrix3x3 {
 // mul returns the multiplication of m by the Point p and converts the
 // resulting 1x3 matrix into a Point.
 func (m *matrix3x3) mul(p Point) Point {
-	return PointFromCoords(
-		m[0][0]*p.X+m[0][1]*p.Y+m[0][2]*p.Z,
-		m[1][0]*p.X+m[1][1]*p.Y+m[1][2]*p.Z,
-		m[2][0]*p.X+m[2][1]*p.Y+m[2][2]*p.Z,
-	)
+	return Point{r3.Vector{
+		m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z,
+		m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z,
+		m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z,
+	}}
 }
 
 // det returns the determinant of this matrix.

+ 494 - 0
vendor/github.com/golang/geo/s2/matrix3x3_test.go

@@ -0,0 +1,494 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+)
+
+func TestCol(t *testing.T) {
+	tests := []struct {
+		have *matrix3x3
+		col  int
+		want Point
+	}{
+		{&matrix3x3{}, 0, OriginPoint()},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			0,
+			Point{r3.Vector{1, 4, 7}},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			2,
+			Point{r3.Vector{3, 6, 9}},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.col(test.col); !got.ApproxEqual(test.want) {
+			t.Errorf("%v.col(%d) = %v, want %v", test.have, test.col, got, test.want)
+		}
+	}
+}
+
+func TestRow(t *testing.T) {
+	tests := []struct {
+		have *matrix3x3
+		row  int
+		want Point
+	}{
+		{&matrix3x3{}, 0, OriginPoint()},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			0,
+			Point{r3.Vector{1, 2, 3}},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			2,
+			Point{r3.Vector{7, 8, 9}},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.row(test.row); !got.ApproxEqual(test.want) {
+			t.Errorf("%v.row(%d) = %v, want %v", test.have, test.row, got, test.want)
+		}
+	}
+}
+
+func TestSetCol(t *testing.T) {
+	tests := []struct {
+		have  *matrix3x3
+		col   int
+		point Point
+		want  *matrix3x3
+	}{
+		{
+			&matrix3x3{},
+			0,
+			Point{r3.Vector{1, 1, 0}},
+			&matrix3x3{
+				{1, 0, 0},
+				{1, 0, 0},
+				{0, 0, 0},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			2,
+			Point{r3.Vector{1, 1, 0}},
+			&matrix3x3{
+				{1, 2, 1},
+				{4, 5, 1},
+				{7, 8, 0},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.setCol(test.col, test.point); !matricesApproxEqual(got, test.want) {
+			t.Errorf("%v.setCol(%d, %v) = %v, want %v", test.have, test.col, test.point, got, test.want)
+		}
+	}
+}
+
+func TestSetRow(t *testing.T) {
+	tests := []struct {
+		have  *matrix3x3
+		row   int
+		point Point
+		want  *matrix3x3
+	}{
+		{
+			&matrix3x3{},
+			0,
+			Point{r3.Vector{1, 1, 0}},
+			&matrix3x3{
+				{1, 1, 0},
+				{0, 0, 0},
+				{0, 0, 0},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			2,
+			Point{r3.Vector{1, 1, 0}},
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{1, 1, 0},
+			},
+		},
+	}
+	for _, test := range tests {
+		if got := test.have.setRow(test.row, test.point); !matricesApproxEqual(got, test.want) {
+			t.Errorf("%v.setRow(%d, %v) = %v, want %v", test.have, test.row, test.point, got, test.want)
+		}
+	}
+}
+
+func TestScale(t *testing.T) {
+	tests := []struct {
+		have  *matrix3x3
+		scale float64
+		want  *matrix3x3
+	}{
+		{
+			&matrix3x3{},
+			0,
+			&matrix3x3{},
+		},
+		{
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+			0,
+			&matrix3x3{},
+		},
+		{
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+			1,
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+			5,
+			&matrix3x3{
+				{5, 5, 5},
+				{5, 5, 5},
+				{5, 5, 5},
+			},
+		},
+		{
+			&matrix3x3{
+				{-2, 2, -3},
+				{-1, 1, 3},
+				{2, 0, -1},
+			},
+			2.75,
+			&matrix3x3{
+				{-5.5, 5.5, -8.25},
+				{-2.75, 2.75, 8.25},
+				{5.5, 0, -2.75},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.scale(test.scale); !matricesApproxEqual(got, test.want) {
+			t.Errorf("%v.scale(%f) = %v, want %v", test.have, test.scale, got, test.want)
+		}
+	}
+}
+
+func TestMul(t *testing.T) {
+	tests := []struct {
+		have  *matrix3x3
+		point Point
+		want  Point
+	}{
+		{&matrix3x3{}, Point{}, Point{}},
+		{
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+			Point{},
+			Point{},
+		},
+		{
+			// Identity times something gives back the something
+			&matrix3x3{
+				{1, 0, 0},
+				{0, 1, 0},
+				{0, 0, 1},
+			},
+			Point{},
+			Point{},
+		},
+		{
+			// Identity times something gives back the something
+			&matrix3x3{
+				{1, 0, 0},
+				{0, 1, 0},
+				{0, 0, 1},
+			},
+			Point{r3.Vector{1, 2, 3}},
+			Point{r3.Vector{1, 2, 3}},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			Point{r3.Vector{1, 1, 1}},
+			Point{r3.Vector{6, 15, 24}},
+		},
+	}
+	for _, test := range tests {
+		if got := test.have.mul(test.point); !got.ApproxEqual(test.want) {
+			t.Errorf("%v.mul(%v) = %v, want %v", test.have, test.point, got, test.want)
+		}
+	}
+}
+
+func TestDet(t *testing.T) {
+	tests := []struct {
+		have *matrix3x3
+		want float64
+	}{
+		{
+			&matrix3x3{},
+			0,
+		},
+		{
+			// Matrix of all the same values has det of 0.
+			&matrix3x3{
+				{1, 1, 1},
+				{1, 1, 1},
+				{1, 1, 1},
+			},
+			0,
+		},
+		{
+			// Identity matrix has det of 1.
+			&matrix3x3{
+				{1, 0, 0},
+				{0, 1, 0},
+				{0, 0, 1},
+			},
+			1,
+		},
+		{
+			&matrix3x3{
+				{-2, 2, -3},
+				{-1, 1, 3},
+				{2, 0, -1},
+			},
+			18,
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			0,
+		},
+		{
+			&matrix3x3{
+				{9, 8, 7},
+				{6, 5, 4},
+				{3, 2, 1},
+			},
+			0,
+		},
+		{
+			&matrix3x3{
+				{1.74, math.E, 42},
+				{math.Pi, math.Sqrt2, math.Ln10},
+				{3, math.SqrtPhi, 9.8976},
+			},
+			-56.838525224123096,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.det(); !float64Eq(got, test.want) {
+			t.Errorf("%v.det() = %v, want %v", test.have, got, test.want)
+		}
+	}
+}
+
+func TestTranspose(t *testing.T) {
+	tests := []struct {
+		have *matrix3x3
+		want *matrix3x3
+	}{
+		{&matrix3x3{}, &matrix3x3{}},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			&matrix3x3{
+				{1, 4, 7},
+				{2, 5, 8},
+				{3, 6, 9},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 0, 0},
+				{0, 2, 0},
+				{0, 0, 3},
+			},
+			&matrix3x3{
+				{1, 0, 0},
+				{0, 2, 0},
+				{0, 0, 3},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{0, 4, 5},
+				{0, 0, 6},
+			},
+			&matrix3x3{
+				{1, 0, 0},
+				{2, 4, 0},
+				{3, 5, 6},
+			},
+		},
+		{
+			&matrix3x3{
+				{1, 1, 1},
+				{0, 0, 0},
+				{0, 0, 0},
+			},
+			&matrix3x3{
+				{1, 0, 0},
+				{1, 0, 0},
+				{1, 0, 0},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.transpose().transpose(); !matricesApproxEqual(got, test.have) {
+			t.Errorf("%v.transpose().transpose() = %v, want %v", test.have, got, test.have)
+		}
+
+		if got := test.have.transpose(); !matricesApproxEqual(got, test.want) {
+			t.Errorf("%v.transpose() = %v, want %v", test.have, got, test.want)
+		}
+
+	}
+}
+
+func TestString(t *testing.T) {
+	tests := []struct {
+		have *matrix3x3
+		want string
+	}{
+		{
+			&matrix3x3{
+				{1, 2, 3},
+				{4, 5, 6},
+				{7, 8, 9},
+			},
+			`[ 1.0000 2.0000 3.0000 ] [ 4.0000 5.0000 6.0000 ] [ 7.0000 8.0000 9.0000 ]`,
+		},
+		{
+			&matrix3x3{
+				{1, 4, 7},
+				{2, 5, 8},
+				{3, 6, 9},
+			},
+			`[ 1.0000 4.0000 7.0000 ] [ 2.0000 5.0000 8.0000 ] [ 3.0000 6.0000 9.0000 ]`,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.have.String(); got != test.want {
+			t.Errorf("%v.String() = %v, want %v", test.have, got, test.want)
+		}
+	}
+}
+
+func TestFrames(t *testing.T) {
+	z := PointFromCoords(0.2, 0.5, -3.3)
+	m := getFrame(z)
+
+	if !m.col(0).IsUnit() {
+		t.Errorf("col(0) of frame not unit length")
+	}
+	if !m.col(1).IsUnit() {
+		t.Errorf("col(1) of frame not unit length")
+	}
+	if !float64Eq(m.det(), 1) {
+		t.Errorf("determinant of frame = %v, want %v", m.det(), 1)
+	}
+
+	tests := []struct {
+		a Point
+		b Point
+	}{
+		{m.col(2), z},
+
+		{toFrame(m, m.col(0)), Point{r3.Vector{1, 0, 0}}},
+		{toFrame(m, m.col(1)), Point{r3.Vector{0, 1, 0}}},
+		{toFrame(m, m.col(2)), Point{r3.Vector{0, 0, 1}}},
+
+		{fromFrame(m, Point{r3.Vector{1, 0, 0}}), m.col(0)},
+		{fromFrame(m, Point{r3.Vector{0, 1, 0}}), m.col(1)},
+		{fromFrame(m, Point{r3.Vector{0, 0, 1}}), m.col(2)},
+	}
+
+	for _, test := range tests {
+		if !pointsApproxEquals(test.a, test.b, epsilon) {
+			t.Errorf("%v != %v", test.a, test.b)
+		}
+	}
+}

+ 109 - 0
vendor/github.com/golang/geo/s2/metric_test.go

@@ -0,0 +1,109 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+)
+
+func TestMetric(t *testing.T) {
+	if got := MinWidthMetric.MaxLevel(0.001256); got != 9 {
+		t.Errorf("MinWidthMetric.MaxLevel(0.001256) = %d, want 9", got)
+	}
+
+	// Check that the maximum aspect ratio of an individual cell is consistent
+	// with the global minimums and maximums.
+	if MaxEdgeAspect < 1 {
+		t.Errorf("MaxEdgeAspect = %v, want >= 1", MaxEdgeAspect)
+	}
+	if got := MaxEdgeMetric.Deriv / MinEdgeMetric.Deriv; MaxEdgeAspect > got {
+		t.Errorf("Edge Aspect: %v/%v = %v, want <= %v", MaxEdgeMetric.Deriv, MinEdgeMetric.Deriv, got, MaxDiagAspect)
+	}
+	if MaxDiagAspect < 1 {
+		t.Errorf("MaxDiagAspect = %v, want >= 1", MaxDiagAspect)
+	}
+	if got := MaxDiagMetric.Deriv / MinDiagMetric.Deriv; MaxDiagAspect > got {
+		t.Errorf("Diag Aspect: %v/%v = %v, want <= %v", MaxDiagMetric.Deriv, MinDiagMetric.Deriv, got, MaxDiagAspect)
+	}
+
+	// Check that area is consistent with edge and width.
+	if got := MinWidthMetric.Deriv*MinEdgeMetric.Deriv - 1e-15; MinAreaMetric.Deriv < got {
+		t.Errorf("Min Area: %v*%v = %v, want >= %v", MinWidthMetric.Deriv, MinEdgeMetric.Deriv, got, MinAreaMetric.Deriv)
+	}
+	if got := MaxWidthMetric.Deriv*MaxEdgeMetric.Deriv + 1e-15; MaxAreaMetric.Deriv > got {
+		t.Errorf("Max Area: %v*%v = %v, want <= %v", MaxWidthMetric.Deriv, MaxEdgeMetric.Deriv, got, MaxAreaMetric.Deriv)
+	}
+
+	for level := -2; level <= maxLevel+3; level++ {
+		width := MinWidthMetric.Deriv * math.Pow(2, float64(-level))
+		if level >= maxLevel+3 {
+			width = 0
+		}
+
+		// Check boundary cases (exactly equal to a threshold value).
+		expected := int(math.Max(0, math.Min(maxLevel, float64(level))))
+
+		if MinWidthMetric.MinLevel(width) != expected {
+			t.Errorf("MinWidthMetric.MinLevel(%v) = %v, want %v", width, MinWidthMetric.MinLevel(width), expected)
+		}
+		if MinWidthMetric.MaxLevel(width) != expected {
+			t.Errorf("MinWidthMetric.MaxLevel(%v) = %v, want %v", width, MinWidthMetric.MaxLevel(width), expected)
+		}
+		if MinWidthMetric.ClosestLevel(width) != expected {
+			t.Errorf("MinWidthMetric.ClosestLevel(%v) = %v, want %v", width, MinWidthMetric.ClosestLevel(width), expected)
+		}
+
+		// Also check non-boundary cases.
+		if got := MinWidthMetric.MinLevel(1.2 * width); got != expected {
+			t.Errorf("non-boundary MinWidthMetric.MinLevel(%v) = %v, want %v", 1.2*width, got, expected)
+		}
+		if got := MinWidthMetric.MaxLevel(0.8 * width); got != expected {
+			t.Errorf("non-boundary MinWidthMetric.MaxLevel(%v) = %v, want %v", 0.8*width, got, expected)
+		}
+		if got := MinWidthMetric.ClosestLevel(1.2 * width); got != expected {
+			t.Errorf("non-boundary larger MinWidthMetric.ClosestLevel(%v) = %v, want %v", 1.2*width, got, expected)
+		}
+		if got := MinWidthMetric.ClosestLevel(0.8 * width); got != expected {
+			t.Errorf("non-boundary smaller MinWidthMetric.ClosestLevel(%v) = %v, want %v", 0.8*width, got, expected)
+		}
+	}
+}
+
+func TestMetricSizeRelations(t *testing.T) {
+	// check that min <= avg <= max for each metric.
+	tests := []struct {
+		min Metric
+		avg Metric
+		max Metric
+	}{
+		{MinAngleSpanMetric, AvgAngleSpanMetric, MaxAngleSpanMetric},
+		{MinWidthMetric, AvgWidthMetric, MaxWidthMetric},
+		{MinEdgeMetric, AvgEdgeMetric, MaxEdgeMetric},
+		{MinDiagMetric, AvgDiagMetric, MaxDiagMetric},
+		{MinAreaMetric, AvgAreaMetric, MaxAreaMetric},
+	}
+
+	for _, test := range tests {
+		if test.min.Deriv > test.avg.Deriv {
+			t.Errorf("Min %v > Avg %v", test.min.Deriv, test.avg.Deriv)
+		}
+		if test.avg.Deriv > test.max.Deriv {
+			t.Errorf("Avg %v > Max %v", test.avg.Deriv, test.max.Deriv)
+		}
+	}
+}

+ 197 - 0
vendor/github.com/golang/geo/s2/paddedcell_test.go

@@ -0,0 +1,197 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r2"
+)
+
+func TestPaddedCellMethods(t *testing.T) {
+	// Test the PaddedCell methods that have approximate Cell equivalents.
+	for i := 0; i < 1000; i++ {
+		cid := randomCellID()
+		padding := math.Pow(1e-15, randomFloat64())
+		cell := CellFromCellID(cid)
+		pCell := PaddedCellFromCellID(cid, padding)
+
+		if cell.id != pCell.id {
+			t.Errorf("%v.id = %v, want %v", pCell, pCell.id, cell.id)
+		}
+		if cell.id.Level() != pCell.Level() {
+			t.Errorf("%v.Level() = %v, want %v", pCell, pCell.Level(), cell.id.Level())
+		}
+
+		if padding != pCell.Padding() {
+			t.Errorf("%v.Padding() = %v, want %v", pCell, pCell.Padding(), padding)
+		}
+
+		if got, want := pCell.Bound(), cell.BoundUV().ExpandedByMargin(padding); got != want {
+			t.Errorf("%v.BoundUV() = %v, want %v", pCell, got, want)
+		}
+
+		r := r2.RectFromPoints(cell.id.centerUV()).ExpandedByMargin(padding)
+		if r != pCell.Middle() {
+			t.Errorf("%v.Middle() = %v, want %v", pCell, pCell.Middle(), r)
+		}
+
+		if cell.id.Point() != pCell.Center() {
+			t.Errorf("%v.Center() = %v, want %v", pCell, pCell.Center(), cell.id.Point())
+		}
+		if cid.IsLeaf() {
+			continue
+		}
+
+		children, ok := cell.Children()
+		if !ok {
+			t.Errorf("%v.Children() failed but should not have", cell)
+			continue
+		}
+		for pos := 0; pos < 4; pos++ {
+			i, j := pCell.ChildIJ(pos)
+
+			cellChild := children[pos]
+			pCellChild := PaddedCellFromParentIJ(pCell, i, j)
+			if cellChild.id != pCellChild.id {
+				t.Errorf("%v.id = %v, want %v", pCellChild, pCellChild.id, cellChild.id)
+			}
+			if cellChild.id.Level() != pCellChild.Level() {
+				t.Errorf("%v.Level() = %v, want %v", pCellChild, pCellChild.Level(), cellChild.id.Level())
+			}
+
+			if padding != pCellChild.Padding() {
+				t.Errorf("%v.Padding() = %v, want %v", pCellChild, pCellChild.Padding(), padding)
+			}
+
+			if got, want := pCellChild.Bound(), cellChild.BoundUV().ExpandedByMargin(padding); got != want {
+				t.Errorf("%v.BoundUV() = %v, want %v", pCellChild, got, want)
+			}
+
+			r := r2.RectFromPoints(cellChild.id.centerUV()).ExpandedByMargin(padding)
+			if got := pCellChild.Middle(); !r.ApproxEquals(got) {
+				t.Errorf("%v.Middle() = %v, want %v", pCellChild, got, r)
+			}
+
+			if cellChild.id.Point() != pCellChild.Center() {
+				t.Errorf("%v.Center() = %v, want %v", pCellChild, pCellChild.Center(), cellChild.id.Point())
+			}
+
+		}
+	}
+}
+
+func TestPaddedCellEntryExitVertices(t *testing.T) {
+	for i := 0; i < 1000; i++ {
+		id := randomCellID()
+		unpadded := PaddedCellFromCellID(id, 0)
+		padded := PaddedCellFromCellID(id, 0.5)
+
+		// Check that entry/exit vertices do not depend on padding.
+		if unpadded.EntryVertex() != padded.EntryVertex() {
+			t.Errorf("entry vertex should not depend on padding; %v != %v", unpadded.EntryVertex(), padded.EntryVertex())
+		}
+
+		if unpadded.ExitVertex() != padded.ExitVertex() {
+			t.Errorf("exit vertex should not depend on padding; %v != %v", unpadded.ExitVertex(), padded.ExitVertex())
+		}
+
+		// Check that the exit vertex of one cell is the same as the entry vertex
+		// of the immediately following cell. This also tests wrapping from the
+		// end to the start of the CellID curve with high probability.
+		if got := PaddedCellFromCellID(id.NextWrap(), 0).EntryVertex(); unpadded.ExitVertex() != got {
+			t.Errorf("PaddedCellFromCellID(%v.NextWrap(), 0).EntryVertex() = %v, want %v", id, got, unpadded.ExitVertex())
+		}
+
+		// Check that the entry vertex of a cell is the same as the entry vertex
+		// of its first child, and similarly for the exit vertex.
+		if id.IsLeaf() {
+			continue
+		}
+		if got := PaddedCellFromCellID(id.Children()[0], 0).EntryVertex(); unpadded.EntryVertex() != got {
+			t.Errorf("PaddedCellFromCellID(%v.Children()[0], 0).EntryVertex() = %v, want %v", id, got, unpadded.EntryVertex())
+		}
+		if got := PaddedCellFromCellID(id.Children()[3], 0).ExitVertex(); unpadded.ExitVertex() != got {
+			t.Errorf("PaddedCellFromCellID(%v.Children()[3], 0).ExitVertex() = %v, want %v", id, got, unpadded.ExitVertex())
+		}
+	}
+}
+
+func TestPaddedCellShrinkToFit(t *testing.T) {
+	for iter := 0; iter < 1000; iter++ {
+		// Start with the desired result and work backwards.
+		result := randomCellID()
+		resultUV := result.boundUV()
+		sizeUV := resultUV.Size()
+
+		// Find the biggest rectangle that fits in "result" after padding.
+		// (These calculations ignore numerical errors.)
+		maxPadding := 0.5 * math.Min(sizeUV.X, sizeUV.Y)
+		padding := maxPadding * randomFloat64()
+		maxRect := resultUV.ExpandedByMargin(-padding)
+
+		// Start with a random subset of the maximum rectangle.
+		a := r2.Point{
+			randomUniformFloat64(maxRect.X.Lo, maxRect.X.Hi),
+			randomUniformFloat64(maxRect.Y.Lo, maxRect.Y.Hi),
+		}
+		b := r2.Point{
+			randomUniformFloat64(maxRect.X.Lo, maxRect.X.Hi),
+			randomUniformFloat64(maxRect.Y.Lo, maxRect.Y.Hi),
+		}
+
+		if !result.IsLeaf() {
+			// If the result is not a leaf cell, we must ensure that no child of
+			// result also satisfies the conditions of ShrinkToFit().  We do this
+			// by ensuring that rect intersects at least two children of result
+			// (after padding).
+			useY := oneIn(2)
+			center := result.centerUV().X
+			if useY {
+				center = result.centerUV().Y
+			}
+
+			// Find the range of coordinates that are shared between child cells
+			// along that axis.
+			shared := r1.Interval{center - padding, center + padding}
+			if useY {
+				shared = shared.Intersection(maxRect.Y)
+			} else {
+				shared = shared.Intersection(maxRect.X)
+			}
+			mid := randomUniformFloat64(shared.Lo, shared.Hi)
+
+			if useY {
+				a.Y = randomUniformFloat64(maxRect.Y.Lo, mid)
+				b.Y = randomUniformFloat64(mid, maxRect.Y.Hi)
+			} else {
+				a.X = randomUniformFloat64(maxRect.X.Lo, mid)
+				b.X = randomUniformFloat64(mid, maxRect.X.Hi)
+			}
+		}
+		rect := r2.RectFromPoints(a, b)
+
+		// Choose an arbitrary ancestor as the PaddedCell.
+		initialID := result.Parent(randomUniformInt(result.Level() + 1))
+		pCell := PaddedCellFromCellID(initialID, padding)
+		if got := pCell.ShrinkToFit(rect); got != result {
+			t.Errorf("%v.ShrinkToFit(%v) = %v, want %v", pCell, rect, got, result)
+		}
+	}
+}

+ 26 - 6
vendor/github.com/golang/geo/s2/point.go

@@ -24,7 +24,6 @@ import (
 )
 
 // Point represents a point on the unit sphere as a normalized 3D vector.
-// Points are guaranteed to be close to normalized.
 // Fields should be treated as read-only. Use one of the factory methods for creation.
 type Point struct {
 	r3.Vector
@@ -59,8 +58,7 @@ func OriginPoint() Point {
 // PointCross returns a Point that is orthogonal to both p and op. This is similar to
 // p.Cross(op) (the true cross product) except that it does a better job of
 // ensuring orthogonality when the Point is nearly parallel to op, it returns
-// a non-zero result even when p == op or p == -op and the result is a Point,
-// so it will have norm 1.
+// a non-zero result even when p == op or p == -op and the result is a Point.
 //
 // It satisfies the following properties (f == PointCross):
 //
@@ -73,13 +71,14 @@ func (p Point) PointCross(op Point) Point {
 	// but PointCross more accurately describes how this method is used.
 	x := p.Add(op.Vector).Cross(op.Sub(p.Vector))
 
-	if x.ApproxEqual(r3.Vector{0, 0, 0}) {
+	// Compare exactly to the 0 vector.
+	if x == (r3.Vector{}) {
 		// The only result that makes sense mathematically is to return zero, but
 		// we find it more convenient to return an arbitrary orthogonal vector.
 		return Point{p.Ortho()}
 	}
 
-	return Point{x.Normalize()}
+	return Point{x}
 }
 
 // OrderedCCW returns true if the edges OA, OB, and OC are encountered in that
@@ -277,13 +276,34 @@ func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []
 
 	for i := 0; i < numVertices; i++ {
 		angle := float64(i) * radianStep
-		p := PointFromCoords(r*math.Cos(angle), r*math.Sin(angle), z)
+		p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}}
 		vertices = append(vertices, Point{fromFrame(frame, p).Normalize()})
 	}
 
 	return vertices
 }
 
+// CapBound returns a bounding cap for this point.
+func (p Point) CapBound() Cap {
+	return CapFromPoint(p)
+}
+
+// RectBound returns a bounding latitude-longitude rectangle from this point.
+func (p Point) RectBound() Rect {
+	return RectFromLatLng(LatLngFromPoint(p))
+}
+
+// ContainsCell returns false as Points do not contain any other S2 types.
+func (p Point) ContainsCell(c Cell) bool { return false }
+
+// IntersectsCell reports whether this Point intersects the given cell.
+func (p Point) IntersectsCell(c Cell) bool {
+	return c.ContainsPoint(p)
+}
+
+// Contains reports if this Point contains the other Point.
+func (p Point) Contains(other Point) bool { return p == other }
+
 // TODO: Differences from C++
 // Rotate
 // Angle

+ 384 - 0
vendor/github.com/golang/geo/s2/point_test.go

@@ -0,0 +1,384 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+func TestOriginPoint(t *testing.T) {
+	if math.Abs(OriginPoint().Norm()-1) > 1e-15 {
+		t.Errorf("Origin point norm = %v, want 1", OriginPoint().Norm())
+	}
+
+	// The point chosen below is about 66km from the north pole towards the East
+	// Siberian Sea. The purpose of the stToUV(2/3) calculation is to keep the
+	// origin as far away as possible from the longitudinal edges of large
+	// Cells. (The line of longitude through the chosen point is always 1/3
+	// or 2/3 of the way across any Cell with longitudinal edges that it
+	// passes through.)
+	p := Point{r3.Vector{-0.01, 0.01 * stToUV(2.0/3), 1}}
+	if !p.ApproxEqual(OriginPoint()) {
+		t.Errorf("Origin point should fall in the Siberian Sea, but does not.")
+	}
+
+	// Check that the origin is not too close to either pole.
+	// The Earth's mean radius in kilometers (according to NASA).
+	const earthRadiusKm = 6371.01
+	if dist := math.Acos(OriginPoint().Z) * earthRadiusKm; dist <= 50 {
+		t.Errorf("Origin point is to close to the North Pole. Got %v, want >= 50km", dist)
+	}
+}
+
+func TestPointCross(t *testing.T) {
+	tests := []struct {
+		p1x, p1y, p1z, p2x, p2y, p2z, norm float64
+	}{
+		{1, 0, 0, 1, 0, 0, 1},
+		{1, 0, 0, 0, 1, 0, 2},
+		{0, 1, 0, 1, 0, 0, 2},
+		{1, 2, 3, -4, 5, -6, 2 * math.Sqrt(934)},
+	}
+	for _, test := range tests {
+		p1 := Point{r3.Vector{test.p1x, test.p1y, test.p1z}}
+		p2 := Point{r3.Vector{test.p2x, test.p2y, test.p2z}}
+		result := p1.PointCross(p2)
+		if !float64Eq(result.Norm(), test.norm) {
+			t.Errorf("|%v ⨯ %v| = %v, want %v", p1, p2, result.Norm(), test.norm)
+		}
+		if x := result.Dot(p1.Vector); !float64Eq(x, 0) {
+			t.Errorf("|(%v ⨯ %v) · %v| = %v, want 0", p1, p2, p1, x)
+		}
+		if x := result.Dot(p2.Vector); !float64Eq(x, 0) {
+			t.Errorf("|(%v ⨯ %v) · %v| = %v, want 0", p1, p2, p2, x)
+		}
+	}
+}
+
+func TestPointDistance(t *testing.T) {
+	tests := []struct {
+		x1, y1, z1 float64
+		x2, y2, z2 float64
+		want       float64 // radians
+	}{
+		{1, 0, 0, 1, 0, 0, 0},
+		{1, 0, 0, 0, 1, 0, math.Pi / 2},
+		{1, 0, 0, 0, 1, 1, math.Pi / 2},
+		{1, 0, 0, -1, 0, 0, math.Pi},
+		{1, 2, 3, 2, 3, -1, 1.2055891055045298},
+	}
+	for _, test := range tests {
+		p1 := Point{r3.Vector{test.x1, test.y1, test.z1}}
+		p2 := Point{r3.Vector{test.x2, test.y2, test.z2}}
+		if a := p1.Distance(p2).Radians(); !float64Eq(a, test.want) {
+			t.Errorf("%v.Distance(%v) = %v, want %v", p1, p2, a, test.want)
+		}
+		if a := p2.Distance(p1).Radians(); !float64Eq(a, test.want) {
+			t.Errorf("%v.Distance(%v) = %v, want %v", p2, p1, a, test.want)
+		}
+	}
+}
+
+func TestChordAngleBetweenPoints(t *testing.T) {
+	for iter := 0; iter < 10; iter++ {
+		m := randomFrame()
+		x := m.col(0)
+		y := m.col(1)
+		z := m.col(2)
+
+		if got := ChordAngleBetweenPoints(z, z).Angle(); got != 0 {
+			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want 0", z, z, got)
+		}
+		if got, want := ChordAngleBetweenPoints(Point{z.Mul(-1)}, z).Angle().Radians(), math.Pi; !float64Near(got, want, 1e-7) {
+			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", z.Mul(-1), z, got, want)
+		}
+		if got, want := ChordAngleBetweenPoints(x, z).Angle().Radians(), math.Pi/2; !float64Eq(got, want) {
+			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", x, z, got, want)
+		}
+		w := Point{y.Add(z.Vector).Normalize()}
+		if got, want := ChordAngleBetweenPoints(w, z).Angle().Radians(), math.Pi/4; !float64Eq(got, want) {
+			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", w, z, got, want)
+		}
+	}
+}
+
+func TestPointApproxEqual(t *testing.T) {
+	tests := []struct {
+		x1, y1, z1 float64
+		x2, y2, z2 float64
+		want       bool
+	}{
+		{1, 0, 0, 1, 0, 0, true},
+		{1, 0, 0, 0, 1, 0, false},
+		{1, 0, 0, 0, 1, 1, false},
+		{1, 0, 0, -1, 0, 0, false},
+		{1, 2, 3, 2, 3, -1, false},
+		{1, 0, 0, 1 * (1 + epsilon), 0, 0, true},
+		{1, 0, 0, 1 * (1 - epsilon), 0, 0, true},
+		{1, 0, 0, 1 + epsilon, 0, 0, true},
+		{1, 0, 0, 1 - epsilon, 0, 0, true},
+		{1, 0, 0, 1, epsilon, 0, true},
+		{1, 0, 0, 1, epsilon, epsilon, false},
+		{1, epsilon, 0, 1, -epsilon, epsilon, false},
+	}
+	for _, test := range tests {
+		p1 := Point{r3.Vector{test.x1, test.y1, test.z1}}
+		p2 := Point{r3.Vector{test.x2, test.y2, test.z2}}
+		if got := p1.ApproxEqual(p2); got != test.want {
+			t.Errorf("%v.ApproxEqual(%v), got %v want %v", p1, p2, got, test.want)
+		}
+	}
+}
+
+var (
+	pz   = Point{r3.Vector{0, 0, 1}}
+	p000 = Point{r3.Vector{1, 0, 0}}
+	p045 = Point{r3.Vector{1, 1, 0}}
+	p090 = Point{r3.Vector{0, 1, 0}}
+	p180 = Point{r3.Vector{-1, 0, 0}}
+	// Degenerate triangles.
+	pr = Point{r3.Vector{0.257, -0.5723, 0.112}}
+	pq = Point{r3.Vector{-0.747, 0.401, 0.2235}}
+
+	// For testing the Girard area fall through case.
+	g1 = Point{r3.Vector{1, 1, 1}}
+	g2 = Point{g1.Add(pr.Mul(1e-15)).Normalize()}
+	g3 = Point{g1.Add(pq.Mul(1e-15)).Normalize()}
+)
+
+func TestPointArea(t *testing.T) {
+	epsilon := 1e-10
+	tests := []struct {
+		a, b, c  Point
+		want     float64
+		nearness float64
+	}{
+		{p000, p090, pz, math.Pi / 2.0, 0},
+		// This test case should give 0 as the epsilon, but either Go or C++'s value for Pi,
+		// or the accuracy of the multiplications along the way, cause a difference ~15 decimal
+		// places into the result, so it is not quite a difference of 0.
+		{p045, pz, p180, 3.0 * math.Pi / 4.0, 1e-14},
+		// Make sure that Area has good *relative* accuracy even for very small areas.
+		{Point{r3.Vector{epsilon, 0, 1}}, Point{r3.Vector{0, epsilon, 1}}, pz, 0.5 * epsilon * epsilon, 1e-14},
+		// Make sure that it can handle degenerate triangles.
+		{pr, pr, pr, 0.0, 0},
+		{pr, pq, pr, 0.0, 1e-15},
+		{p000, p045, p090, 0.0, 0},
+		// Try a very long and skinny triangle.
+		{p000, Point{r3.Vector{1, 1, epsilon}}, p090, 5.8578643762690495119753e-11, 1e-9},
+		// TODO(roberts):
+		// C++ includes a 10,000 loop of perterbations to test out the Girard area
+		// computation is less than some noise threshold.
+		// Do we need that many? Will one or two suffice?
+		{g1, g2, g3, 0.0, 1e-15},
+	}
+	for _, test := range tests {
+		if got := PointArea(test.a, test.b, test.c); !float64Near(got, test.want, test.nearness) {
+			t.Errorf("PointArea(%v, %v, %v), got %v want %v", test.a, test.b, test.c, got, test.want)
+		}
+	}
+}
+
+func TestPointAreaQuarterHemisphere(t *testing.T) {
+	tests := []struct {
+		a, b, c, d, e Point
+		want          float64
+	}{
+		// Triangles with near-180 degree edges that sum to a quarter-sphere.
+		{Point{r3.Vector{1, 0.1 * epsilon, epsilon}}, p000, p045, p180, pz, math.Pi},
+		// Four other triangles that sum to a quarter-sphere.
+		{Point{r3.Vector{1, 1, epsilon}}, p000, p045, p180, pz, math.Pi},
+		// TODO(roberts):
+		// C++ Includes a loop of 100 perturbations on a hemisphere for more tests.
+	}
+	for _, test := range tests {
+		area := PointArea(test.a, test.b, test.c) +
+			PointArea(test.a, test.c, test.d) +
+			PointArea(test.a, test.d, test.e) +
+			PointArea(test.a, test.e, test.b)
+
+		if !float64Eq(area, test.want) {
+			t.Errorf("Adding up 4 quarter hemispheres with PointArea(), got %v want %v", area, test.want)
+		}
+	}
+}
+
+func TestPointPlanarCentroid(t *testing.T) {
+	tests := []struct {
+		name             string
+		p0, p1, p2, want Point
+	}{
+		{
+			name: "xyz axis",
+			p0:   Point{r3.Vector{0, 0, 1}},
+			p1:   Point{r3.Vector{0, 1, 0}},
+			p2:   Point{r3.Vector{1, 0, 0}},
+			want: Point{r3.Vector{1. / 3, 1. / 3, 1. / 3}},
+		},
+		{
+			name: "Same point",
+			p0:   Point{r3.Vector{1, 0, 0}},
+			p1:   Point{r3.Vector{1, 0, 0}},
+			p2:   Point{r3.Vector{1, 0, 0}},
+			want: Point{r3.Vector{1, 0, 0}},
+		},
+	}
+
+	for _, test := range tests {
+		got := PlanarCentroid(test.p0, test.p1, test.p2)
+		if !got.ApproxEqual(test.want) {
+			t.Errorf("%s: PlanarCentroid(%v, %v, %v) = %v, want %v", test.name, test.p0, test.p1, test.p2, got, test.want)
+		}
+	}
+}
+
+func TestPointTrueCentroid(t *testing.T) {
+	// Test TrueCentroid with very small triangles. This test assumes that
+	// the triangle is small enough so that it is nearly planar.
+	// The centroid of a planar triangle is at the intersection of its
+	// medians, which is two-thirds of the way along each median.
+	for i := 0; i < 100; i++ {
+		f := randomFrame()
+		p := f.col(0)
+		x := f.col(1)
+		y := f.col(2)
+		d := 1e-4 * math.Pow(1e-4, randomFloat64())
+
+		// Make a triangle with two equal sides.
+		p0 := Point{p.Sub(x.Mul(d)).Normalize()}
+		p1 := Point{p.Add(x.Mul(d)).Normalize()}
+		p2 := Point{p.Add(y.Mul(d * 3)).Normalize()}
+		want := Point{p.Add(y.Mul(d)).Normalize()}
+
+		got := TrueCentroid(p0, p1, p2).Normalize()
+		if got.Distance(want.Vector) >= 2e-8 {
+			t.Errorf("TrueCentroid(%v, %v, %v).Normalize() = %v, want %v", p0, p1, p2, got, want)
+		}
+
+		// Make a triangle with a right angle.
+		p0 = p
+		p1 = Point{p.Add(x.Mul(d * 3)).Normalize()}
+		p2 = Point{p.Add(y.Mul(d * 6)).Normalize()}
+		want = Point{p.Add(x.Add(y.Mul(2)).Mul(d)).Normalize()}
+
+		got = TrueCentroid(p0, p1, p2).Normalize()
+		if got.Distance(want.Vector) >= 2e-8 {
+			t.Errorf("TrueCentroid(%v, %v, %v).Normalize() = %v, want %v", p0, p1, p2, got, want)
+		}
+	}
+}
+
+func TestPointRegularPoints(t *testing.T) {
+	// Conversion to/from degrees has a little more variability than the default epsilon.
+	const epsilon = 1e-13
+	center := PointFromLatLng(LatLngFromDegrees(80, 135))
+	radius := s1.Degree * 20
+	pts := regularPoints(center, radius, 4)
+
+	if len(pts) != 4 {
+		t.Errorf("regularPoints with 4 vertices should have 4 vertices, got %d", len(pts))
+	}
+
+	lls := []LatLng{
+		LatLngFromPoint(pts[0]),
+		LatLngFromPoint(pts[1]),
+		LatLngFromPoint(pts[2]),
+		LatLngFromPoint(pts[3]),
+	}
+	cll := LatLngFromPoint(center)
+
+	// Make sure that the radius is correct.
+	wantDist := 20.0
+	for i, ll := range lls {
+		if got := cll.Distance(ll).Degrees(); !float64Near(got, wantDist, epsilon) {
+			t.Errorf("Vertex %d distance from center = %v, want %v", i, got, wantDist)
+		}
+	}
+
+	// Make sure the angle between each point is correct.
+	wantAngle := math.Pi / 2
+	for i := 0; i < len(pts); i++ {
+		// Mod the index by 4 to wrap the values at each end.
+		v0, v1, v2 := pts[(4+i+1)%4], pts[(4+i)%4], pts[(4+i-1)%4]
+		if got := float64(v0.Sub(v1.Vector).Angle(v2.Sub(v1.Vector))); !float64Eq(got, wantAngle) {
+			t.Errorf("(%v-%v).Angle(%v-%v) = %v, want %v", v0, v1, v1, v2, got, wantAngle)
+		}
+	}
+
+	// Make sure that all edges of the polygon have the same length.
+	wantLength := 27.990890717782829
+	for i := 0; i < len(lls); i++ {
+		ll1, ll2 := lls[i], lls[(i+1)%4]
+		if got := ll1.Distance(ll2).Degrees(); !float64Near(got, wantLength, epsilon) {
+			t.Errorf("%v.Distance(%v) = %v, want %v", ll1, ll2, got, wantLength)
+		}
+	}
+
+	// Spot check an actual coordinate now that we know the points are spaced
+	// evenly apart at the same angles and radii.
+	if got, want := lls[0].Lat.Degrees(), 62.162880741097204; !float64Near(got, want, epsilon) {
+		t.Errorf("%v.Lat = %v, want %v", lls[0], got, want)
+	}
+	if got, want := lls[0].Lng.Degrees(), 103.11051028343407; !float64Near(got, want, epsilon) {
+		t.Errorf("%v.Lng = %v, want %v", lls[0], got, want)
+	}
+}
+
+func TestPointRegion(t *testing.T) {
+	p := Point{r3.Vector{1, 0, 0}}
+	r := Point{r3.Vector{1, 0, 0}}
+	if !r.Contains(p) {
+		t.Errorf("%v.Contains(%v) = false, want true", r, p)
+	}
+	if !r.Contains(r) {
+		t.Errorf("%v.Contains(%v) = false, want true", r, r)
+	}
+	if s := (Point{r3.Vector{1, 0, 1}}); r.Contains(s) {
+		t.Errorf("%v.Contains(%v) = true, want false", r, s)
+	}
+	if got, want := r.CapBound(), CapFromPoint(p); !got.ApproxEqual(want) {
+		t.Errorf("%v.CapBound() = %v, want %v", r, got, want)
+	}
+	if got, want := r.RectBound(), RectFromLatLng(LatLngFromPoint(p)); !rectsApproxEqual(got, want, epsilon, epsilon) {
+		t.Errorf("%v.RectBound() = %v, want %v", r, got, want)
+	}
+
+	// The leaf cell containing a point is still much larger than the point.
+	cell := CellFromPoint(p)
+	if r.ContainsCell(cell) {
+		t.Errorf("%v.ContainsCell(%v) = true, want false", r, cell)
+	}
+	if !r.IntersectsCell(cell) {
+		t.Errorf("%v.IntersectsCell(%v) = false, want true", r, cell)
+	}
+}
+
+func BenchmarkPointArea(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		PointArea(p000, p090, pz)
+	}
+}
+
+func BenchmarkPointAreaGirardCase(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		PointArea(g1, g2, g3)
+	}
+}

+ 140 - 5
vendor/github.com/golang/geo/s2/polygon.go

@@ -62,6 +62,9 @@ type Polygon struct {
 	// numVertices keeps the running total of all of the vertices of the contained loops.
 	numVertices int
 
+	// numEdges tracks the total number of edges in all the loops in this polygon.
+	numEdges int
+
 	// bound is a conservative bound on all points contained by this loop.
 	// If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
 	bound Rect
@@ -71,6 +74,11 @@ type Polygon struct {
 	// has been expanded sufficiently to account for this error, i.e.
 	// if A.Contains(B), then A.subregionBound.Contains(B.bound).
 	subregionBound Rect
+
+	// A slice where element i is the cumulative number of edges in the
+	// preceding loops in the polygon. This field is used for polygons that
+	// have a large number of loops, and may be empty for polygons with few loops.
+	cumulativeEdges []int
 }
 
 // PolygonFromLoops constructs a polygon from the given hierarchically nested
@@ -85,17 +93,33 @@ type Polygon struct {
 // panic if given a slice of length > 1.
 func PolygonFromLoops(loops []*Loop) *Polygon {
 	if len(loops) > 1 {
-		panic("s2.PolygonFromLoops for multiple loops is not yet implemented")
+		panic("PolygonFromLoops for multiple loops is not yet implemented")
 	}
-	return &Polygon{
+
+	p := &Polygon{
 		loops: loops,
 		// TODO(roberts): This is explicitly set as depth of 0 for the one loop in
 		// the polygon. When multiple loops are supported, fix this to set the depths.
-		loopDepths:     []int{0},
-		numVertices:    len(loops[0].Vertices()), // TODO(roberts): Once multi-loop is supported, fix this.
-		bound:          EmptyRect(),
+		loopDepths:  []int{0},
+		numVertices: len(loops[0].Vertices()), // TODO(roberts): Once multi-loop is supported, fix this.
+		// TODO(roberts): Compute these bounds.
+		bound:          loops[0].RectBound(),
 		subregionBound: EmptyRect(),
 	}
+
+	const maxLinearSearchLoops = 12 // Based on benchmarks.
+	if len(loops) > maxLinearSearchLoops {
+		p.cumulativeEdges = make([]int, 0, len(loops))
+	}
+
+	for _, l := range loops {
+		if p.cumulativeEdges != nil {
+			p.cumulativeEdges = append(p.cumulativeEdges, p.numEdges)
+		}
+		p.numEdges += len(l.Vertices())
+	}
+
+	return p
 }
 
 // FullPolygon returns a special "full" polygon.
@@ -209,3 +233,114 @@ func (p *Polygon) RectBound() Rect { return p.bound }
 // IntersectsCell reports whether the polygon intersects the given cell.
 // TODO(roberts)
 //func (p *Polygon) IntersectsCell(c Cell) bool { ... }
+
+// Shape Interface
+
+// NumEdges returns the number of edges in this shape.
+func (p *Polygon) NumEdges() int {
+	return p.numEdges
+}
+
+// Edge returns endpoints for the given edge index.
+func (p *Polygon) Edge(e int) (a, b Point) {
+	var i int
+
+	if len(p.cumulativeEdges) > 0 {
+		for i = range p.cumulativeEdges {
+			if i+1 >= len(p.cumulativeEdges) || e < p.cumulativeEdges[i+1] {
+				e -= p.cumulativeEdges[i]
+				break
+			}
+		}
+	} else {
+		// When the number of loops is small, use linear search. Most often
+		// there is exactly one loop and the code below executes zero times.
+		for i = 0; e >= len(p.Loop(i).vertices); i++ {
+			e -= len(p.Loop(i).vertices)
+		}
+	}
+
+	// TODO(roberts): C++ uses the oriented vertices from Loop. Move to those when
+	// they are implmented here.
+	return p.Loop(i).Vertex(e), p.Loop(i).Vertex(e + 1)
+}
+
+// HasInterior reports whether this Polygon has an interior.
+func (p *Polygon) HasInterior() bool {
+	return p.dimension() == polygonGeometry
+}
+
+// ContainsOrigin returns whether this shape contains the origin.
+func (p *Polygon) ContainsOrigin() bool {
+	containsOrigin := false
+	for _, l := range p.loops {
+		containsOrigin = containsOrigin != l.ContainsOrigin()
+	}
+	return containsOrigin
+}
+
+// dimension returns the dimension of the geometry represented by this Polygon.
+func (p *Polygon) dimension() dimension { return polygonGeometry }
+
+// numChains reports the number of contiguous edge chains in the Polygon.
+func (p *Polygon) numChains() int {
+	if p.IsFull() {
+		return 0
+	}
+
+	return p.NumLoops()
+}
+
+// chainStart returns the id of the first edge in the i-th edge chain in this Polygon.
+func (p *Polygon) chainStart(i int) int {
+	if p.cumulativeEdges != nil {
+		if i == p.NumLoops() {
+			return p.numEdges
+		}
+		return p.cumulativeEdges[i]
+	}
+
+	e := 0
+	for i--; i >= 0; i-- {
+		e += len(p.Loop(i).vertices)
+
+	}
+	return e
+}
+
+// TODO(roberts): Differences from C++
+// InitNestedFromLoops
+// InitFromLoop
+// InitOrientedFromLoops
+// IsValid
+// Area
+// Centroid
+// SnapLevel
+// DistanceToPoint
+// DistanceToBoundary
+// Project
+// ProjectToBoundary
+// Contains/ApproxContains/Intersects/ApproxDisjoint for Polygons
+// InitTo{Intersection/ApproxIntersection/Union/ApproxUnion/Diff/ApproxDiff}
+// InitToSimplified
+// InitToSnapped
+// IntersectWithPolyline
+// ApproxIntersectWithPolyline
+// SubtractFromPolyline
+// ApproxSubtractFromPolyline
+// DestructiveUnion
+// DestructiveApproxUnion
+// InitToCellUnionBorder
+// IsNormalized
+// Equals/BoundaryEquals/BoundaryApproxEquals/BoundaryNear Polygons
+// BreakEdgesAndAddToBuilder
+// clearLoops
+// findLoopNestingError
+// initLoops
+// initToSimplifiedInternal
+// internalClipPolyline
+// compareBoundary
+// containsBoundary
+// excludesBoundary
+// containsNonCrossingBoundary
+// excludesNonCrossingShells

+ 342 - 0
vendor/github.com/golang/geo/s2/polygon_test.go

@@ -0,0 +1,342 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"testing"
+)
+
+const (
+	// A set of nested loops around the LatLng point 0:0.
+	// Every vertex of nearLoop0 is also a vertex of nearLoop1.
+	nearPoint    = "0:0"
+	nearLoop0    = "-1:0, 0:1, 1:0, 0:-1;"
+	nearLoop1    = "-1:-1, -1:0, -1:1, 0:1, 1:1, 1:0, 1:-1, 0:-1;"
+	nearLoop2    = "-1:-2, -2:5, 5:-2;"
+	nearLoop3    = "-2:-2, -3:6, 6:-3;"
+	nearLoopHemi = "0:-90, -90:0, 0:90, 90:0;"
+
+	// A set of nested loops around the LatLng point 0:180. Every vertex of
+	// farLoop0 and farLoop2 belongs to farLoop1, and all the loops except
+	// farLoop2 are non-convex.
+	farPoint    = "0:180"
+	farLoop0    = "0:179, 1:180, 0:-179, 2:-180;"
+	farLoop1    = "0:179, -1:179, 1:180, -1:-179, 0:-179, 3:-178, 2:-180, 3:178;"
+	farLoop2    = "3:-178, 3:178, -1:179, -1:-179;"
+	farLoop3    = "-3:-178, 4:-177, 4:177, -3:178, -2:179;"
+	farLoopHemi = "0:-90, 60:90, -60:90;"
+
+	// A set of nested loops around the LatLng point -90:0.
+	southLoopPoint = "-89.9999:0.001"
+	southLoop0a    = "-90:0, -89.99:0.01, -89.99:0;"
+	southLoop0b    = "-90:0, -89.99:0.03, -89.99:0.02;"
+	southLoop0c    = "-90:0, -89.99:0.05, -89.99:0.04;"
+	southLoop1     = "-90:0, -89.9:0.1, -89.9:-0.1;"
+	southLoop2     = "-90:0, -89.8:0.2, -89.8:-0.2;"
+	southLoopHemi  = "0:-180, 0:60, 0:-60;"
+
+	// Two different loops that surround all the near and far loops except
+	// for the hemispheres.
+	nearFarLoop1 = "-1:-9, -9:-9, -9:9, 9:9, 9:-9, 1:-9, " +
+		"1:-175, 9:-175, 9:175, -9:175, -9:-175, -1:-175;"
+	nearFarLoop2 = "-2:15, -2:170, -8:-175, 8:-175, " +
+		"2:170, 2:15, 8:-4, -8:-4;"
+
+	// Loop that results from intersection of other loops.
+	farHemiSouthHemiLoop = "0:-180, 0:90, -60:90, 0:-90;"
+
+	// Rectangles that form a cross, with only shared vertices, no crossing edges.
+	// Optional holes outside the intersecting region. 1 is the horizontal rectangle,
+	// and 2 is the vertical. The intersections are shared vertices.
+	//       x---x
+	//       | 2 |
+	//   +---*---*---+
+	//   | 1 |1+2| 1 |
+	//   +---*---*---+
+	//       | 2 |
+	//       x---x
+	loopCross1          = "-2:1, -1:1, 1:1, 2:1, 2:-1, 1:-1, -1:-1, -2:-1;"
+	loopCross1SideHole  = "-1.5:0.5, -1.2:0.5, -1.2:-0.5, -1.5:-0.5;"
+	loopCrossCenterHole = "-0.5:0.5, 0.5:0.5, 0.5:-0.5, -0.5:-0.5;"
+	loopCross2SideHole  = "0.5:-1.5, 0.5:-1.2, -0.5:-1.2, -0.5:-1.5;"
+	loopCross2          = "1:-2, 1:-1, 1:1, 1:2, -1:2, -1:1, -1:-1, -1:-2;"
+
+	// Two rectangles that intersect, but no edges cross and there's always
+	// local containment (rather than crossing) at each shared vertex.
+	// In this ugly ASCII art, 1 is A+B, 2 is B+C:
+	//   +---+---+---+
+	//   | A | B | C |
+	//   +---+---+---+
+	loopOverlap1          = "0:1, 1:1, 2:1, 2:0, 1:0, 0:0;"
+	loopOverlap1SideHole  = "0.2:0.8, 0.8:0.8, 0.8:0.2, 0.2:0.2;"
+	loopOverlapCenterHole = "1.2:0.8, 1.8:0.8, 1.8:0.2, 1.2:0.2;"
+	loopOverlap2SideHole  = "2.2:0.8, 2.8:0.8, 2.8:0.2, 2.2:0.2;"
+	loopOverlap2          = "1:1, 2:1, 3:1, 3:0, 2:0, 1:0;"
+
+	// By symmetry, the intersection of the two polygons has almost half the area
+	// of either polygon.
+	//   +---+
+	//   | 3 |
+	//   +---+---+
+	//   |3+4| 4 |
+	//   +---+---+
+	loopOverlap3 = "-10:10, 0:10, 0:-10, -10:-10, -10:0"
+	loopOverlap4 = "-10:0, 10:0, 10:-10, -10:-10"
+)
+
+// Some shared polygons used in the tests.
+var (
+	emptyPolygon = &Polygon{}
+	fullPolygon  = FullPolygon()
+
+	// TODO(roberts): Uncomment once Polygons with multiple loops are supported.
+	/*
+		near0Polygon     = makePolygon(nearLoop0, true)
+		near01Polygon    = makePolygon(nearLoop0+nearLoop1, true)
+		near30Polygon    = makePolygon(nearLoop3+nearLoop0, true)
+		near23Polygon    = makePolygon(nearLoop2+nearLoop3, true)
+		near0231Polygon  = makePolygon(nearLoop0+nearLoop2+nearLoop3+nearLoop1, true)
+		near023H1Polygon = makePolygon(nearLoop0+nearLoop2+nearLoop3+nearLoopHemi+nearLoop1, true)
+
+		far01Polygon    = makePolygon(farLoop0+farLoop1, true)
+		far21Polygon    = makePolygon(farLoop2+farLoop1, true)
+		far231Polygon   = makePolygon(farLoop2+farLoop3+farLoop1, true)
+		far2H0Polygon   = makePolygon(farLoop2+farLoopHemi+farLoop0, true)
+		far2H013Polygon = makePolygon(farLoop2+farLoopHemi+farLoop0+farLoop1+farLoop3, true)
+
+		south0abPolygon     = makePolygon(southLoop0a+southLoop0b, true)
+		south2Polygon       = makePolygon(southLoop2, true)
+		south20b1Polygon    = makePolygon(southLoop2+southLoop0b+southLoop1, true)
+		south2H1Polygon     = makePolygon(southLoop2+southLoopHemi+southLoop1, true)
+		south20bH0acPolygon = makePolygon(southLoop2+southLoop0b+southLoopHemi+
+			southLoop0a+southLoop0c, true)
+
+		nf1N10F2S10abcPolygon = makePolygon(southLoop0c+farLoop2+nearLoop1+
+			nearFarLoop1+nearLoop0+southLoop1+southLoop0b+southLoop0a, true)
+
+		nf2N2F210S210abPolygon = makePolygon(farLoop2+southLoop0a+farLoop1+
+			southLoop1+farLoop0+southLoop0b+nearFarLoop2+southLoop2+nearLoop2, true)
+
+		f32n0Polygon  = makePolygon(farLoop2+nearLoop0+farLoop3, true)
+		n32s0bPolygon = makePolygon(nearLoop3+southLoop0b+nearLoop2, true)
+
+		cross1Polygon           = makePolygon(loopCross1, true)
+		cross1SideHolePolygon   = makePolygon(loopCross1+loopCross1SideHole, true)
+		cross1CenterHolePolygon = makePolygon(loopCross1+loopCrossCenterHole, true)
+		cross2Polygon           = makePolygon(loopCross2, true)
+		cross2SideHolePolygon   = makePolygon(loopCross2+loopCross2SideHole, true)
+		cross2CenterHolePolygon = makePolygon(loopCross2+loopCrossCenterHole, true)
+
+		overlap1Polygon           = makePolygon(loopOverlap1, true)
+		overlap1SideHolePolygon   = makePolygon(loopOverlap1+loopOverlap1SideHole, true)
+		overlap1CenterHolePolygon = makePolygon(loopOverlap1+loopOverlapCenterHole, true)
+		overlap2Polygon           = makePolygon(loopOverlap2, true)
+		overlap2SideHolePolygon   = makePolygon(loopOverlap2+loopOverlap2SideHole, true)
+		overlap2CenterHolePolygon = makePolygon(loopOverlap2+loopOverlapCenterHole, true)
+
+		overlap3Polygon = makePolygon(loopOverlap3, true)
+		overlap4Polygon = makePolygon(loopOverlap4, true)
+
+		farHemiPolygon      = makePolygon(farLoopHemi, true)
+		southHemiPolygon    = makePolygon(southLoopHemi, true)
+		farSouthHemiPolygon = makePolygon(farHemiSouthHemiLoop, true)
+	*/
+)
+
+func TestPolygonEmptyAndFull(t *testing.T) {
+	if !emptyPolygon.IsEmpty() {
+		t.Errorf("empty polygon should be empty")
+	}
+	if emptyPolygon.IsFull() {
+		t.Errorf("empty polygon should not be full")
+	}
+
+	if emptyPolygon.ContainsOrigin() {
+		t.Errorf("emptyPolygon.ContainsOrigin() = true, want false")
+	}
+	if got, want := emptyPolygon.NumEdges(), 0; got != want {
+		t.Errorf("emptyPolygon.NumEdges() = %v, want %v", got, want)
+	}
+
+	if got := emptyPolygon.dimension(); got != polygonGeometry {
+		t.Errorf("emptyPolygon.dimension() = %v, want %v", got, polygonGeometry)
+	}
+	if got, want := emptyPolygon.numChains(), 0; got != want {
+		t.Errorf("emptyPolygon.numChains() = %v, want %v", got, want)
+	}
+
+	if fullPolygon.IsEmpty() {
+		t.Errorf("full polygon should not be emtpy")
+	}
+	if !fullPolygon.IsFull() {
+		t.Errorf("full polygon should be full")
+	}
+
+	if !fullPolygon.ContainsOrigin() {
+		t.Errorf("fullPolygon.ContainsOrigin() = false, want true")
+	}
+	if got, want := fullPolygon.NumEdges(), 0; got != want {
+		t.Errorf("fullPolygon.NumEdges() = %v, want %v", got, want)
+	}
+
+	if got := fullPolygon.dimension(); got != polygonGeometry {
+		t.Errorf("emptyPolygon.dimension() = %v, want %v", got, polygonGeometry)
+	}
+	if got, want := fullPolygon.numChains(), 0; got != want {
+		t.Errorf("emptyPolygon.numChains() = %v, want %v", got, want)
+	}
+}
+
+func TestPolygonShape(t *testing.T) {
+	p := makePolygon("0:0, 1:0, 1:1, 2:1", true)
+	shape := Shape(p)
+
+	if got, want := shape.NumEdges(), 4; got != want {
+		t.Errorf("%v.NumEdges() = %v, want %d", shape, got, want)
+	}
+
+	if p.numVertices != shape.NumEdges() {
+		t.Errorf("the number of vertices in a polygon should equal the number of edges")
+	}
+	if p.NumLoops() != shape.numChains() {
+		t.Errorf("the number of loops in a polygon should equal the number of chains")
+	}
+	e := 0
+	v2, v3 := shape.Edge(2)
+	if want := PointFromLatLng(LatLngFromDegrees(1, 1)); !v2.ApproxEqual(want) {
+		t.Errorf("%v.Edge(%d) point A = %v  want %v", shape, 2, v2, want)
+	}
+	if want := PointFromLatLng(LatLngFromDegrees(2, 1)); !v3.ApproxEqual(want) {
+		t.Errorf("%v.Edge(%d) point B = %v  want %v", shape, 2, v3, want)
+	}
+	for i, l := range p.loops {
+		if e != shape.chainStart(i) {
+			t.Errorf("the edge id of the start of loop(%d) should equal the sum of vertices so far in the polygon. got %d, want %d", i, shape.chainStart(i), e)
+		}
+		for j := 0; j < len(l.Vertices()); j++ {
+			v0, v1 := shape.Edge(e)
+			// TODO(roberts): Update once Loop implements orientedVertex.
+			//if l.orientedVertex(j) != v0 {
+			if l.Vertex(j) != v0 {
+				t.Errorf("l.Vertex(%d) = %v, want %v", j, l.Vertex(j), v0)
+			}
+			// TODO(roberts): Update once Loop implements orientedVertex.
+			//if l.orientedVertex(j+1) != v1 {
+			if l.Vertex(j+1) != v1 {
+				t.Errorf("l.Vertex(%d) = %v, want %v", j+1, l.Vertex(j+1), v1)
+			}
+			e++
+		}
+		if e != shape.chainStart(i+1) {
+			t.Errorf("the edge id of the start of the next loop(%d+1) should equal the sum of vertices so far in the polygon. got %d, want %d", i, shape.chainStart(i+1), e)
+		}
+	}
+	if shape.dimension() != polygonGeometry {
+		t.Errorf("polygon.dimension() = %v, want %v", shape.dimension(), polygonGeometry)
+	}
+	if !shape.HasInterior() {
+		t.Errorf("polygons should always have interiors")
+	}
+	if !shape.ContainsOrigin() {
+		t.Errorf("polygon %v should contain the origin", shape)
+	}
+}
+
+func TestPolygonLoop(t *testing.T) {
+	if fullPolygon.NumLoops() != 1 {
+		t.Errorf("full polygon should have one loop")
+	}
+
+	l := &Loop{}
+	p1 := PolygonFromLoops([]*Loop{l})
+	if p1.NumLoops() != 1 {
+		t.Errorf("polygon with one loop should have one loop")
+	}
+	if p1.Loop(0) != l {
+		t.Errorf("polygon with one loop should return it")
+	}
+
+	// TODO: When multiple loops are supported, add more test cases.
+}
+
+func TestPolygonParent(t *testing.T) {
+	p1 := PolygonFromLoops([]*Loop{&Loop{}})
+	tests := []struct {
+		p    *Polygon
+		have int
+		want int
+		ok   bool
+	}{
+		{fullPolygon, 0, -1, false},
+		{p1, 0, -1, false},
+
+		// TODO: When multiple loops are supported, add more test cases to
+		// more fully show the parent levels.
+	}
+
+	for _, test := range tests {
+		if got, ok := test.p.Parent(test.have); ok != test.ok || got != test.want {
+			t.Errorf("%v.Parent(%d) = %d,%v, want %d,%v", test.p, test.have, got, ok, test.want, test.ok)
+		}
+	}
+}
+
+func TestPolygonLastDescendant(t *testing.T) {
+	p1 := PolygonFromLoops([]*Loop{&Loop{}})
+
+	tests := []struct {
+		p    *Polygon
+		have int
+		want int
+	}{
+		{fullPolygon, 0, 0},
+		{fullPolygon, -1, 0},
+
+		{p1, 0, 0},
+		{p1, -1, 0},
+
+		// TODO: When multiple loops are supported, add more test cases.
+	}
+
+	for _, test := range tests {
+		if got := test.p.LastDescendant(test.have); got != test.want {
+			t.Errorf("%v.LastDescendant(%d) = %d, want %d", test.p, test.have, got, test.want)
+		}
+	}
+}
+
+func TestPolygonLoopIsHoleAndLoopSign(t *testing.T) {
+	if fullPolygon.loopIsHole(0) {
+		t.Errorf("the full polygons only loop should not be a hole")
+	}
+	if fullPolygon.loopSign(0) != 1 {
+		t.Errorf("the full polygons only loop should be postitive")
+	}
+
+	loop := LoopFromPoints(parsePoints("30:20, 40:20, 39:43, 33:35"))
+	p := PolygonFromLoops([]*Loop{loop})
+
+	if p.loopIsHole(0) {
+		t.Errorf("first loop in a polygon should not start out as a hole")
+	}
+	if p.loopSign(0) != 1 {
+		t.Errorf("first loop in a polygon should start out as positive")
+	}
+
+	// TODO: When multiple loops are supported, add more test cases to
+	// more fully show the parent levels.
+}

+ 53 - 33
vendor/github.com/golang/geo/s2/polyline.go

@@ -28,27 +28,27 @@ import (
 type Polyline []Point
 
 // PolylineFromLatLngs creates a new Polyline from the given LatLngs.
-func PolylineFromLatLngs(points []LatLng) Polyline {
+func PolylineFromLatLngs(points []LatLng) *Polyline {
 	p := make(Polyline, len(points))
 	for k, v := range points {
 		p[k] = PointFromLatLng(v)
 	}
-	return p
+	return &p
 }
 
 // Reverse reverses the order of the Polyline vertices.
-func (p Polyline) Reverse() {
-	for i := 0; i < len(p)/2; i++ {
-		p[i], p[len(p)-i-1] = p[len(p)-i-1], p[i]
+func (p *Polyline) Reverse() {
+	for i := 0; i < len(*p)/2; i++ {
+		(*p)[i], (*p)[len(*p)-i-1] = (*p)[len(*p)-i-1], (*p)[i]
 	}
 }
 
 // Length returns the length of this Polyline.
-func (p Polyline) Length() s1.Angle {
+func (p *Polyline) Length() s1.Angle {
 	var length s1.Angle
 
-	for i := 1; i < len(p); i++ {
-		length += p[i-1].Distance(p[i])
+	for i := 1; i < len(*p); i++ {
+		length += (*p)[i-1].Distance((*p)[i])
 	}
 	return length
 }
@@ -58,14 +58,14 @@ func (p Polyline) Length() s1.Angle {
 //
 // Scaling by the Polyline length makes it easy to compute the centroid
 // of several Polylines (by simply adding up their centroids).
-func (p Polyline) Centroid() Point {
+func (p *Polyline) Centroid() Point {
 	var centroid Point
-	for i := 1; i < len(p); i++ {
+	for i := 1; i < len(*p); i++ {
 		// The centroid (multiplied by length) is a vector toward the midpoint
 		// of the edge, whose length is twice the sin of half the angle between
 		// the two vertices. Defining theta to be this angle, we have:
-		vSum := p[i-1].Add(p[i].Vector)  // Length == 2*cos(theta)
-		vDiff := p[i-1].Sub(p[i].Vector) // Length == 2*sin(theta)
+		vSum := (*p)[i-1].Add((*p)[i].Vector)  // Length == 2*cos(theta)
+		vDiff := (*p)[i-1].Sub((*p)[i].Vector) // Length == 2*sin(theta)
 
 		// Length == 2*sin(theta)
 		centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))}
@@ -74,12 +74,12 @@ func (p Polyline) Centroid() Point {
 }
 
 // Equals reports whether the given Polyline is exactly the same as this one.
-func (p Polyline) Equals(b Polyline) bool {
-	if len(p) != len(b) {
+func (p *Polyline) Equals(b *Polyline) bool {
+	if len(*p) != len(*b) {
 		return false
 	}
-	for i, v := range p {
-		if v != b[i] {
+	for i, v := range *p {
+		if v != (*b)[i] {
 			return false
 		}
 	}
@@ -88,14 +88,14 @@ func (p Polyline) Equals(b Polyline) bool {
 }
 
 // CapBound returns the bounding Cap for this Polyline.
-func (p Polyline) CapBound() Cap {
+func (p *Polyline) CapBound() Cap {
 	return p.RectBound().CapBound()
 }
 
 // RectBound returns the bounding Rect for this Polyline.
-func (p Polyline) RectBound() Rect {
+func (p *Polyline) RectBound() Rect {
 	rb := NewRectBounder()
-	for _, v := range p {
+	for _, v := range *p {
 		rb.AddPoint(v)
 	}
 	return rb.RectBound()
@@ -103,20 +103,20 @@ func (p Polyline) RectBound() Rect {
 
 // ContainsCell reports whether this Polyline contains the given Cell. Always returns false
 // because "containment" is not numerically well-defined except at the Polyline vertices.
-func (p Polyline) ContainsCell(cell Cell) bool {
+func (p *Polyline) ContainsCell(cell Cell) bool {
 	return false
 }
 
 // IntersectsCell reports whether this Polyline intersects the given Cell.
-func (p Polyline) IntersectsCell(cell Cell) bool {
-	if len(p) == 0 {
+func (p *Polyline) IntersectsCell(cell Cell) bool {
+	if len(*p) == 0 {
 		return false
 	}
 
 	// We only need to check whether the cell contains vertex 0 for correctness,
 	// but these tests are cheap compared to edge crossings so we might as well
 	// check all the vertices.
-	for _, v := range p {
+	for _, v := range *p {
 		if cell.ContainsPoint(v) {
 			return true
 		}
@@ -130,9 +130,9 @@ func (p Polyline) IntersectsCell(cell Cell) bool {
 	}
 
 	for j := 0; j < 4; j++ {
-		crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], p[0])
-		for i := 1; i < len(p); i++ {
-			if crosser.ChainCrossingSign(p[i]) != DoNotCross {
+		crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], (*p)[0])
+		for i := 1; i < len(*p); i++ {
+			if crosser.ChainCrossingSign((*p)[i]) != DoNotCross {
 				// There is a proper crossing, or two vertices were the same.
 				return true
 			}
@@ -142,25 +142,45 @@ func (p Polyline) IntersectsCell(cell Cell) bool {
 }
 
 // NumEdges returns the number of edges in this shape.
-func (p Polyline) NumEdges() int {
-	if len(p) == 0 {
+func (p *Polyline) NumEdges() int {
+	if len(*p) == 0 {
 		return 0
 	}
-	return len(p) - 1
+	return len(*p) - 1
 }
 
 // Edge returns endpoints for the given edge index.
-func (p Polyline) Edge(i int) (a, b Point) {
-	return p[i], p[i+1]
+func (p *Polyline) Edge(i int) (a, b Point) {
+	return (*p)[i], (*p)[i+1]
+}
+
+// dimension returns the dimension of the geometry represented by this Polyline.
+func (p *Polyline) dimension() dimension { return polylineGeometry }
+
+// numChains reports the number of contiguous edge chains in this Polyline.
+func (p *Polyline) numChains() int {
+	if p.NumEdges() >= 1 {
+		return 1
+	}
+	return 0
+}
+
+// chainStart returns the id of the first edge in the i-th edge chain in this Polyline.
+func (p *Polyline) chainStart(i int) int {
+	if i == 0 {
+		return 0
+	}
+
+	return p.NumEdges()
 }
 
 // HasInterior returns false as Polylines are not closed.
-func (p Polyline) HasInterior() bool {
+func (p *Polyline) HasInterior() bool {
 	return false
 }
 
 // ContainsOrigin returns false because there is no interior to contain s2.Origin.
-func (p Polyline) ContainsOrigin() bool {
+func (p *Polyline) ContainsOrigin() bool {
 	return false
 }
 

+ 144 - 0
vendor/github.com/golang/geo/s2/polyline_test.go

@@ -0,0 +1,144 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+)
+
+func TestPolylineBasics(t *testing.T) {
+	empty := Polyline{}
+	if empty.RectBound() != EmptyRect() {
+		t.Errorf("empty.RectBound() = %v, want %v", empty.RectBound(), EmptyRect())
+	}
+	if len(empty) != 0 {
+		t.Errorf("empty Polyline should have no vertices")
+	}
+	empty.Reverse()
+	if len(empty) != 0 {
+		t.Errorf("reveresed empty Polyline should have no vertices")
+	}
+
+	latlngs := []LatLng{
+		LatLngFromDegrees(0, 0),
+		LatLngFromDegrees(0, 90),
+		LatLngFromDegrees(0, 180),
+	}
+
+	semiEquator := PolylineFromLatLngs(latlngs)
+	//if got, want := semiEquator.Interpolate(0.5), Point{r3.Vector{0, 1, 0}}; !got.ApproxEqual(want) {
+	//	t.Errorf("semiEquator.Interpolate(0.5) = %v, want %v", got, want)
+	//}
+	semiEquator.Reverse()
+	if got, want := (*semiEquator)[2], (Point{r3.Vector{1, 0, 0}}); !got.ApproxEqual(want) {
+		t.Errorf("semiEquator[2] = %v, want %v", got, want)
+	}
+}
+
+func TestPolylineShape(t *testing.T) {
+	var shape Shape = makePolyline("0:0, 1:0, 1:1, 2:1")
+	if got, want := shape.NumEdges(), 3; got != want {
+		t.Errorf("%v.NumEdges() = %v, want %d", shape, got, want)
+	}
+
+	if got, want := shape.numChains(), 1; got != want {
+		t.Errorf("%v.numChains() = %d, want %d", shape, got, want)
+	}
+	if got, want := shape.chainStart(0), 0; got != want {
+		t.Errorf("%v.chainStart(0) = %d, want %d", shape, got, want)
+	}
+	if got, want := shape.chainStart(1), 3; got != want {
+		t.Errorf("%v.chainStart(1) = %d, want %d", shape, got, want)
+	}
+
+	v2, v3 := shape.Edge(2)
+	if want := PointFromLatLng(LatLngFromDegrees(1, 1)); !v2.ApproxEqual(want) {
+		t.Errorf("%v.Edge(%d) point A = %v  want %v", shape, 2, v2, want)
+	}
+	if want := PointFromLatLng(LatLngFromDegrees(2, 1)); !v3.ApproxEqual(want) {
+		t.Errorf("%v.Edge(%d) point B = %v  want %v", shape, 2, v3, want)
+	}
+
+	if shape.HasInterior() {
+		t.Errorf("polylines should not have an interior")
+	}
+	if shape.ContainsOrigin() {
+		t.Errorf("polylines should not contain the origin")
+	}
+
+	if shape.dimension() != polylineGeometry {
+		t.Errorf("polylines should have PolylineGeometry")
+	}
+
+	empty := &Polyline{}
+	if got, want := empty.NumEdges(), 0; got != want {
+		t.Errorf("%v.NumEdges() = %d, want %d", empty, got, want)
+	}
+	if got, want := empty.numChains(), 0; got != want {
+		t.Errorf("%v.numChains() = %d, want %d", empty, got, want)
+	}
+}
+
+func TestPolylineLengthAndCentroid(t *testing.T) {
+	// Construct random great circles and divide them randomly into segments.
+	// Then make sure that the length and centroid are correct.  Note that
+	// because of the way the centroid is computed, it does not matter how
+	// we split the great circle into segments.
+
+	for i := 0; i < 100; i++ {
+		// Choose a coordinate frame for the great circle.
+		f := randomFrame()
+
+		var line Polyline
+		for theta := 0.0; theta < 2*math.Pi; theta += math.Pow(randomFloat64(), 10) {
+			p := Point{f.row(0).Mul(math.Cos(theta)).Add(f.row(1).Mul(math.Sin(theta)))}
+			if len(line) == 0 || !p.ApproxEqual(line[len(line)-1]) {
+				line = append(line, p)
+			}
+		}
+
+		// Close the circle.
+		line = append(line, line[0])
+
+		length := line.Length()
+		if got, want := math.Abs(length.Radians()-2*math.Pi), 2e-14; got > want {
+			t.Errorf("%v.Length() = %v, want < %v", line, got, want)
+		}
+
+		centroid := line.Centroid()
+		if got, want := centroid.Norm(), 2e-14; got > want {
+			t.Errorf("%v.Norm() = %v, want < %v", centroid, got, want)
+		}
+	}
+}
+
+func TestPolylineIntersectsCell(t *testing.T) {
+	pline := Polyline{
+		Point{r3.Vector{1, -1.1, 0.8}.Normalize()},
+		Point{r3.Vector{1, -0.8, 1.1}.Normalize()},
+	}
+
+	for face := 0; face < 6; face++ {
+		cell := CellFromCellID(CellIDFromFace(face))
+		if got, want := pline.IntersectsCell(cell), face&1 == 0; got != want {
+			t.Errorf("%v.IntersectsCell(%v) = %v, want %v", pline, cell, got, want)
+		}
+	}
+}

+ 314 - 0
vendor/github.com/golang/geo/s2/predicates_test.go

@@ -0,0 +1,314 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+)
+
+func TestPredicatesSign(t *testing.T) {
+	tests := []struct {
+		p1x, p1y, p1z, p2x, p2y, p2z, p3x, p3y, p3z float64
+		want                                        bool
+	}{
+		{1, 0, 0, 0, 1, 0, 0, 0, 1, true},
+		{0, 1, 0, 0, 0, 1, 1, 0, 0, true},
+		{0, 0, 1, 1, 0, 0, 0, 1, 0, true},
+		{1, 1, 0, 0, 1, 1, 1, 0, 1, true},
+		{-3, -1, 4, 2, -1, -3, 1, -2, 0, true},
+
+		// All degenerate cases of Sign(). Let M_1, M_2, ... be the sequence of
+		// submatrices whose determinant sign is tested by that function. Then the
+		// i-th test below is a 3x3 matrix M (with rows A, B, C) such that:
+		//
+		//    det(M) = 0
+		//    det(M_j) = 0 for j < i
+		//    det(M_i) != 0
+		//    A < B < C in lexicographic order.
+		// det(M_1) = b0*c1 - b1*c0
+		{-3, -1, 0, -2, 1, 0, 1, -2, 0, false},
+		// det(M_2) = b2*c0 - b0*c2
+		{-6, 3, 3, -4, 2, -1, -2, 1, 4, false},
+		// det(M_3) = b1*c2 - b2*c1
+		{0, -1, -1, 0, 1, -2, 0, 2, 1, false},
+		// From this point onward, B or C must be zero, or B is proportional to C.
+		// det(M_4) = c0*a1 - c1*a0
+		{-1, 2, 7, 2, 1, -4, 4, 2, -8, false},
+		// det(M_5) = c0
+		{-4, -2, 7, 2, 1, -4, 4, 2, -8, false},
+		// det(M_6) = -c1
+		{0, -5, 7, 0, -4, 8, 0, -2, 4, false},
+		// det(M_7) = c2*a0 - c0*a2
+		{-5, -2, 7, 0, 0, -2, 0, 0, -1, false},
+		// det(M_8) = c2
+		{0, -2, 7, 0, 0, 1, 0, 0, 2, false},
+	}
+
+	for _, test := range tests {
+		p1 := Point{r3.Vector{test.p1x, test.p1y, test.p1z}}
+		p2 := Point{r3.Vector{test.p2x, test.p2y, test.p2z}}
+		p3 := Point{r3.Vector{test.p3x, test.p3y, test.p3z}}
+		result := Sign(p1, p2, p3)
+		if result != test.want {
+			t.Errorf("Sign(%v, %v, %v) = %v, want %v", p1, p2, p3, result, test.want)
+		}
+		if test.want {
+			// For these cases we can test the reversibility condition
+			result = Sign(p3, p2, p1)
+			if result == test.want {
+				t.Errorf("Sign(%v, %v, %v) = %v, want %v", p3, p2, p1, result, !test.want)
+			}
+		}
+	}
+}
+
+// Points used in the various RobustSign tests.
+var (
+	// The following points happen to be *exactly collinear* along a line that it
+	// approximate tangent to the surface of the unit sphere. In fact, C is the
+	// exact midpoint of the line segment AB. All of these points are close
+	// enough to unit length to satisfy r3.Vector.IsUnit().
+	poA = Point{r3.Vector{0.72571927877036835, 0.46058825605889098, 0.51106749730504852}}
+	poB = Point{r3.Vector{0.7257192746638208, 0.46058826573818168, 0.51106749441312738}}
+	poC = Point{r3.Vector{0.72571927671709457, 0.46058826089853633, 0.51106749585908795}}
+
+	// The points "x1" and "x2" are exactly proportional, i.e. they both lie
+	// on a common line through the origin. Both points are considered to be
+	// normalized, and in fact they both satisfy (x == x.Normalize()).
+	// Therefore the triangle (x1, x2, -x1) consists of three distinct points
+	// that all lie on a common line through the origin.
+	x1 = Point{r3.Vector{0.99999999999999989, 1.4901161193847655e-08, 0}}
+	x2 = Point{r3.Vector{1, 1.4901161193847656e-08, 0}}
+
+	// Here are two more points that are distinct, exactly proportional, and
+	// that satisfy (x == x.Normalize()).
+	x3 = Point{r3.Vector{1, 1, 1}.Normalize()}
+	x4 = Point{x3.Mul(0.99999999999999989)}
+
+	// The following three points demonstrate that Normalize() is not idempotent, i.e.
+	// y0.Normalize() != y0.Normalize().Normalize(). Both points are exactly proportional.
+	y0 = Point{r3.Vector{1, 1, 0}}
+	y1 = Point{y0.Normalize()}
+	y2 = Point{y1.Normalize()}
+)
+
+func TestPredicatesRobustSignEqualities(t *testing.T) {
+	tests := []struct {
+		p1, p2 Point
+		want   bool
+	}{
+		{Point{poC.Sub(poA.Vector)}, Point{poB.Sub(poC.Vector)}, true},
+		{x1, Point{x1.Normalize()}, true},
+		{x2, Point{x2.Normalize()}, true},
+		{x3, Point{x3.Normalize()}, true},
+		{x4, Point{x4.Normalize()}, true},
+		{x3, x4, false},
+		{y1, y2, false},
+		{y2, Point{y2.Normalize()}, true},
+	}
+
+	for _, test := range tests {
+		if got := test.p1.Vector == test.p2.Vector; got != test.want {
+			t.Errorf("Testing equality for RobustSign. %v = %v, got %v want %v", test.p1, test.p2, got, test.want)
+		}
+	}
+}
+
+func TestPredicatesRobustSign(t *testing.T) {
+	x := Point{r3.Vector{1, 0, 0}}
+	y := Point{r3.Vector{0, 1, 0}}
+	z := Point{r3.Vector{0, 0, 1}}
+
+	tests := []struct {
+		p1, p2, p3 Point
+		want       Direction
+	}{
+		// Simple collinear points test cases.
+		// a == b != c
+		{x, x, z, Indeterminate},
+		// a != b == c
+		{x, y, y, Indeterminate},
+		// c == a != b
+		{z, x, z, Indeterminate},
+		// CCW
+		{x, y, z, CounterClockwise},
+		// CW
+		{z, y, x, Clockwise},
+
+		// Edge cases:
+		// The following points happen to be *exactly collinear* along a line that it
+		// approximate tangent to the surface of the unit sphere. In fact, C is the
+		// exact midpoint of the line segment AB. All of these points are close
+		// enough to unit length to satisfy S2::IsUnitLength().
+		{
+			// Until we get ExactSign, this will only return Indeterminate.
+			// It should be Clockwise.
+			poA, poB, poC, Indeterminate,
+		},
+
+		// The points "x1" and "x2" are exactly proportional, i.e. they both lie
+		// on a common line through the origin. Both points are considered to be
+		// normalized, and in fact they both satisfy (x == x.Normalize()).
+		// Therefore the triangle (x1, x2, -x1) consists of three distinct points
+		// that all lie on a common line through the origin.
+		{
+			// Until we get ExactSign, this will only return Indeterminate.
+			// It should be CounterClockwise.
+			x1, x2, Point{x1.Mul(-1.0)}, Indeterminate,
+		},
+
+		// Here are two more points that are distinct, exactly proportional, and
+		// that satisfy (x == x.Normalize()).
+		{
+			// Until we get ExactSign, this will only return Indeterminate.
+			// It should be Clockwise.
+			x3, x4, Point{x3.Mul(-1.0)}, Indeterminate,
+		},
+
+		// The following points demonstrate that Normalize() is not idempotent,
+		// i.e. y0.Normalize() != y0.Normalize().Normalize(). Both points satisfy
+		// S2::IsNormalized(), though, and the two points are exactly proportional.
+		{
+			// Until we get ExactSign, this will only return Indeterminate.
+			// It should be CounterClockwise.
+			y1, y2, Point{y1.Mul(-1.0)}, Indeterminate,
+		},
+	}
+
+	for _, test := range tests {
+		result := RobustSign(test.p1, test.p2, test.p3)
+		if result != test.want {
+			t.Errorf("RobustSign(%v, %v, %v) got %v, want %v",
+				test.p1, test.p2, test.p3, result, test.want)
+		}
+		// Test RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
+		rotated := RobustSign(test.p2, test.p3, test.p1)
+		if rotated != result {
+			t.Errorf("RobustSign(%v, %v, %v) vs Rotated RobustSign(%v, %v, %v) got %v, want %v",
+				test.p1, test.p2, test.p3, test.p2, test.p3, test.p1, rotated, result)
+		}
+		// Test RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
+		want := Clockwise
+		if result == Clockwise {
+			want = CounterClockwise
+		} else if result == Indeterminate {
+			want = Indeterminate
+		}
+		reversed := RobustSign(test.p3, test.p2, test.p1)
+		if reversed != want {
+			t.Errorf("RobustSign(%v, %v, %v) vs Reversed RobustSign(%v, %v, %v) got %v, want %v",
+				test.p1, test.p2, test.p3, test.p3, test.p2, test.p1, reversed, -1*result)
+		}
+	}
+
+	// Test cases that should not be indeterminate.
+	/*
+		Uncomment these tests once RobustSign is completed.
+		if got := RobustSign(poA, poB, poC); got == Indeterminate {
+			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", poA, poA, poA, got)
+		}
+		if got := RobustSign(x1, x2, Point{x1.Mul(-1)}); got == Indeterminate {
+			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x1, x2, x1.Mul(-1), got)
+		}
+		if got := RobustSign(x3, x4, Point{x3.Mul(-1)}); got == Indeterminate {
+			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x3, x4, x3.Mul(-1), got)
+		}
+		if got := RobustSign(y1, y2, Point{y1.Mul(-1)}); got == Indeterminate {
+			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x1, x2, y1.Mul(-1), got)
+		}
+	*/
+}
+
+func TestPredicatesStableSignFailureRate(t *testing.T) {
+	const earthRadiusKm = 6371.01
+	const iters = 1000
+
+	// Verify that stableSign is able to handle most cases where the three
+	// points are as collinear as possible. (For reference, triageSign fails
+	// almost 100% of the time on this test.)
+	//
+	// Note that the failure rate *decreases* as the points get closer together,
+	// and the decrease is approximately linear. For example, the failure rate
+	// is 0.4% for collinear points spaced 1km apart, but only 0.0004% for
+	// collinear points spaced 1 meter apart.
+	//
+	//  1km spacing: <  1% (actual is closer to 0.4%)
+	// 10km spacing: < 10% (actual is closer to 4%)
+	want := 0.01
+	spacing := 1.0
+
+	// Estimate the probability that stableSign will not be able to compute
+	// the determinant sign of a triangle A, B, C consisting of three points
+	// that are as collinear as possible and spaced the given distance apart
+	// by counting up the times it returns Indeterminate.
+	failureCount := 0
+	m := math.Tan(spacing / earthRadiusKm)
+	for iter := 0; iter < iters; iter++ {
+		f := randomFrame()
+		a := f.col(0)
+		x := f.col(1)
+
+		b := Point{a.Sub(x.Mul(m)).Normalize()}
+		c := Point{a.Add(x.Mul(m)).Normalize()}
+		sign := stableSign(a, b, c)
+		if sign != Indeterminate {
+			// TODO(roberts): Once exactSign is implemented, uncomment this case.
+			//if got := exactSign(a, b, c, true); got != sign {
+			//	t.Errorf("exactSign(%v, %v, %v, true) = %v, want %v", a, b, c, got, sign)
+			//}
+		} else {
+			failureCount++
+		}
+	}
+
+	rate := float64(failureCount) / float64(iters)
+	if rate >= want {
+		t.Errorf("stableSign failure rate for spacing %v km = %v, want %v", spacing, rate, want)
+	}
+}
+
+func BenchmarkSign(b *testing.B) {
+	p1 := Point{r3.Vector{-3, -1, 4}}
+	p2 := Point{r3.Vector{2, -1, -3}}
+	p3 := Point{r3.Vector{1, -2, 0}}
+	for i := 0; i < b.N; i++ {
+		Sign(p1, p2, p3)
+	}
+}
+
+// BenchmarkRobustSignSimple runs the benchmark for points that satisfy the first
+// checks in RobustSign to compare the performance to that of Sign().
+func BenchmarkRobustSignSimple(b *testing.B) {
+	p1 := Point{r3.Vector{-3, -1, 4}}
+	p2 := Point{r3.Vector{2, -1, -3}}
+	p3 := Point{r3.Vector{1, -2, 0}}
+	for i := 0; i < b.N; i++ {
+		RobustSign(p1, p2, p3)
+	}
+}
+
+// BenchmarkRobustSignNearCollinear runs the benchmark for points that are almost but not
+// quite collinear, so the tests have to use most of the calculations of RobustSign
+// before getting to an answer.
+func BenchmarkRobustSignNearCollinear(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		RobustSign(poA, poB, poC)
+	}
+}

+ 4 - 3
vendor/github.com/golang/geo/s2/rect.go

@@ -21,6 +21,7 @@ import (
 	"math"
 
 	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r3"
 	"github.com/golang/geo/s1"
 )
 
@@ -239,7 +240,7 @@ func (r Rect) CapBound() Cap {
 		poleZ = 1
 		poleAngle = math.Pi/2 - r.Lat.Lo
 	}
-	poleCap := CapFromCenterAngle(PointFromCoords(0, 0, poleZ), s1.Angle(poleAngle)*s1.Radian)
+	poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian)
 
 	// For bounding rectangles that span 180 degrees or less in longitude, the
 	// maximum cap size is achieved at one of the rectangle vertices.  For
@@ -298,14 +299,14 @@ func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
 	// the sphere. They can intersect a straight edge in 0, 1, or 2 points.
 
 	// First, compute the normal to the plane AB that points vaguely north.
-	z := a.PointCross(b)
+	z := Point{a.PointCross(b).Normalize()}
 	if z.Z < 0 {
 		z = Point{z.Mul(-1)}
 	}
 
 	// Extend this to an orthonormal frame (x,y,z) where x is the direction
 	// where the great circle through AB achieves its maximium latitude.
-	y := z.PointCross(PointFromCoords(0, 0, 1))
+	y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()}
 	x := y.Cross(z.Vector)
 
 	// Compute the angle "theta" from the x-axis (in the x-y plane defined

+ 862 - 0
vendor/github.com/golang/geo/s2/rect_test.go

@@ -0,0 +1,862 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+func TestRectEmptyAndFull(t *testing.T) {
+	tests := []struct {
+		rect  Rect
+		valid bool
+		empty bool
+		full  bool
+		point bool
+	}{
+		{EmptyRect(), true, true, false, false},
+		{FullRect(), true, false, true, false},
+	}
+
+	for _, test := range tests {
+		if got := test.rect.IsValid(); got != test.valid {
+			t.Errorf("%v.IsValid() = %v, want %v", test.rect, got, test.valid)
+		}
+		if got := test.rect.IsEmpty(); got != test.empty {
+			t.Errorf("%v.IsEmpty() = %v, want %v", test.rect, got, test.empty)
+		}
+		if got := test.rect.IsFull(); got != test.full {
+			t.Errorf("%v.IsFull() = %v, want %v", test.rect, got, test.full)
+		}
+		if got := test.rect.IsPoint(); got != test.point {
+			t.Errorf("%v.IsPoint() = %v, want %v", test.rect, got, test.point)
+		}
+	}
+}
+
+func TestRectArea(t *testing.T) {
+	tests := []struct {
+		rect Rect
+		want float64
+	}{
+		{Rect{}, 0},
+		{FullRect(), 4 * math.Pi},
+		{Rect{r1.Interval{0, math.Pi / 2}, s1.Interval{0, math.Pi / 2}}, math.Pi / 2},
+	}
+	for _, test := range tests {
+		if got := test.rect.Area(); !float64Eq(got, test.want) {
+			t.Errorf("%v.Area() = %v, want %v", test.rect, got, test.want)
+		}
+	}
+}
+
+func TestRectString(t *testing.T) {
+	const want = "[Lo[-90.0000000, -180.0000000], Hi[90.0000000, 180.0000000]]"
+	if s := FullRect().String(); s != want {
+		t.Errorf("FullRect().String() = %q, want %q", s, want)
+	}
+}
+
+func TestRectFromLatLng(t *testing.T) {
+	ll := LatLngFromDegrees(23, 47)
+	got := RectFromLatLng(ll)
+	if got.Center() != ll {
+		t.Errorf("RectFromLatLng(%v).Center() = %v, want %v", ll, got.Center(), ll)
+	}
+	if !got.IsPoint() {
+		t.Errorf("RectFromLatLng(%v) = %v, want a point", ll, got)
+	}
+}
+
+func rectFromDegrees(latLo, lngLo, latHi, lngHi float64) Rect {
+	// Convenience method to construct a rectangle. This method is
+	// intentionally *not* in the S2LatLngRect interface because the
+	// argument order is ambiguous, but is fine for the test.
+	return Rect{
+		Lat: r1.Interval{
+			Lo: (s1.Angle(latLo) * s1.Degree).Radians(),
+			Hi: (s1.Angle(latHi) * s1.Degree).Radians(),
+		},
+		Lng: s1.IntervalFromEndpoints(
+			(s1.Angle(lngLo) * s1.Degree).Radians(),
+			(s1.Angle(lngHi) * s1.Degree).Radians(),
+		),
+	}
+}
+
+func TestRectFromCenterSize(t *testing.T) {
+	tests := []struct {
+		center, size LatLng
+		want         Rect
+	}{
+		{
+			LatLngFromDegrees(80, 170),
+			LatLngFromDegrees(40, 60),
+			rectFromDegrees(60, 140, 90, -160),
+		},
+		{
+			LatLngFromDegrees(10, 40),
+			LatLngFromDegrees(210, 400),
+			FullRect(),
+		},
+		{
+			LatLngFromDegrees(-90, 180),
+			LatLngFromDegrees(20, 50),
+			rectFromDegrees(-90, 155, -80, -155),
+		},
+	}
+	for _, test := range tests {
+		if got := RectFromCenterSize(test.center, test.size); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
+			t.Errorf("RectFromCenterSize(%v,%v) was %v, want %v", test.center, test.size, got, test.want)
+		}
+	}
+}
+
+func TestRectAddPoint(t *testing.T) {
+	tests := []struct {
+		input Rect
+		point LatLng
+		want  Rect
+	}{
+		{
+			Rect{r1.EmptyInterval(), s1.EmptyInterval()},
+			LatLngFromDegrees(0, 0),
+			rectFromDegrees(0, 0, 0, 0),
+		},
+		{
+			rectFromDegrees(0, 0, 0, 0),
+			LatLng{0 * s1.Radian, (-math.Pi / 2) * s1.Radian},
+			rectFromDegrees(0, -90, 0, 0),
+		},
+		{
+			rectFromDegrees(0, -90, 0, 0),
+			LatLng{(math.Pi / 4) * s1.Radian, (-math.Pi) * s1.Radian},
+			rectFromDegrees(0, -180, 45, 0),
+		},
+		{
+			rectFromDegrees(0, -180, 45, 0),
+			LatLng{(math.Pi / 2) * s1.Radian, 0 * s1.Radian},
+			rectFromDegrees(0, -180, 90, 0),
+		},
+	}
+	for _, test := range tests {
+		if got, want := test.input.AddPoint(test.point), test.want; !rectsApproxEqual(got, want, epsilon, epsilon) {
+			t.Errorf("%v.AddPoint(%v) was %v, want %v", test.input, test.point, got, want)
+		}
+	}
+}
+func TestRectVertex(t *testing.T) {
+	r1 := Rect{r1.Interval{0, math.Pi / 2}, s1.IntervalFromEndpoints(-math.Pi, 0)}
+	tests := []struct {
+		r    Rect
+		i    int
+		want LatLng
+	}{
+		{r1, 0, LatLng{0, math.Pi}},
+		{r1, 1, LatLng{0, 0}},
+		{r1, 2, LatLng{math.Pi / 2, 0}},
+		{r1, 3, LatLng{math.Pi / 2, math.Pi}},
+	}
+
+	for _, test := range tests {
+		if got := test.r.Vertex(test.i); got != test.want {
+			t.Errorf("%v.Vertex(%d) = %v, want %v", test.r, test.i, got, test.want)
+		}
+	}
+}
+func TestRectVertexCCWOrder(t *testing.T) {
+	for i := 0; i < 4; i++ {
+		lat := math.Pi / 4 * float64(i-2)
+		lng := math.Pi/2*float64(i-2) + 0.2
+		r := Rect{
+			r1.Interval{lat, lat + math.Pi/4},
+			s1.Interval{
+				math.Remainder(lng, 2*math.Pi),
+				math.Remainder(lng+math.Pi/2, 2*math.Pi),
+			},
+		}
+
+		for k := 0; k < 4; k++ {
+			if !Sign(PointFromLatLng(r.Vertex((k-1)&3)), PointFromLatLng(r.Vertex(k)), PointFromLatLng(r.Vertex((k+1)&3))) {
+				t.Errorf("%v.Vertex(%v), vertices were not in CCW order", r, k)
+			}
+		}
+	}
+}
+
+func TestRectContainsLatLng(t *testing.T) {
+	tests := []struct {
+		input Rect
+		ll    LatLng
+		want  bool
+	}{
+		{
+			rectFromDegrees(0, -180, 90, 0),
+			LatLngFromDegrees(30, -45),
+			true,
+		},
+		{
+			rectFromDegrees(0, -180, 90, 0),
+			LatLngFromDegrees(30, 45),
+			false,
+		},
+		{
+			rectFromDegrees(0, -180, 90, 0),
+			LatLngFromDegrees(0, -180),
+			true,
+		},
+		{
+			rectFromDegrees(0, -180, 90, 0),
+			LatLngFromDegrees(90, 0),
+			true,
+		},
+	}
+	for _, test := range tests {
+		if got, want := test.input.ContainsLatLng(test.ll), test.want; got != want {
+			t.Errorf("%v.ContainsLatLng(%v) was %v, want %v", test.input, test.ll, got, want)
+		}
+	}
+}
+
+func TestRectExpanded(t *testing.T) {
+	tests := []struct {
+		input  Rect
+		margin LatLng
+		want   Rect
+	}{
+		{
+			rectFromDegrees(70, 150, 80, 170),
+			LatLngFromDegrees(20, 30),
+			rectFromDegrees(50, 120, 90, -160),
+		},
+		{
+			EmptyRect(),
+			LatLngFromDegrees(20, 30),
+			EmptyRect(),
+		},
+		{
+			FullRect(),
+			LatLngFromDegrees(500, 500),
+			FullRect(),
+		},
+		{
+			rectFromDegrees(-90, 170, 10, 20),
+			LatLngFromDegrees(30, 80),
+			rectFromDegrees(-90, -180, 40, 180),
+		},
+
+		// Negative margins.
+		{
+			rectFromDegrees(10, -50, 60, 70),
+			LatLngFromDegrees(-10, -10),
+			rectFromDegrees(20, -40, 50, 60),
+		},
+		{
+			rectFromDegrees(-20, -180, 20, 180),
+			LatLngFromDegrees(-10, -10),
+			rectFromDegrees(-10, -180, 10, 180),
+		},
+		{
+			rectFromDegrees(-20, -180, 20, 180),
+			LatLngFromDegrees(-30, -30),
+			EmptyRect(),
+		},
+		{
+			rectFromDegrees(-90, 10, 90, 11),
+			LatLngFromDegrees(-10, -10),
+			EmptyRect(),
+		},
+		{
+			rectFromDegrees(-90, 10, 90, 100),
+			LatLngFromDegrees(-10, -10),
+			rectFromDegrees(-80, 20, 80, 90),
+		},
+		{
+			EmptyRect(),
+			LatLngFromDegrees(-50, -500),
+			EmptyRect(),
+		},
+		{
+			FullRect(),
+			LatLngFromDegrees(-50, -50),
+			rectFromDegrees(-40, -180, 40, 180),
+		},
+
+		// Mixed margins.
+		{
+			rectFromDegrees(10, -50, 60, 70),
+			LatLngFromDegrees(-10, 30),
+			rectFromDegrees(20, -80, 50, 100),
+		},
+		{
+			rectFromDegrees(-20, -180, 20, 180),
+			LatLngFromDegrees(10, -500),
+			rectFromDegrees(-30, -180, 30, 180),
+		},
+		{
+			rectFromDegrees(-90, -180, 80, 180),
+			LatLngFromDegrees(-30, 500),
+			rectFromDegrees(-60, -180, 50, 180),
+		},
+		{
+			rectFromDegrees(-80, -100, 80, 150),
+			LatLngFromDegrees(30, -50),
+			rectFromDegrees(-90, -50, 90, 100),
+		},
+		{
+			rectFromDegrees(0, -180, 50, 180),
+			LatLngFromDegrees(-30, 500),
+			EmptyRect(),
+		},
+		{
+			rectFromDegrees(-80, 10, 70, 20),
+			LatLngFromDegrees(30, -200),
+			EmptyRect(),
+		},
+		{
+			EmptyRect(),
+			LatLngFromDegrees(100, -100),
+			EmptyRect(),
+		},
+		{
+			FullRect(),
+			LatLngFromDegrees(100, -100),
+			FullRect(),
+		},
+	}
+	for _, test := range tests {
+		if got, want := test.input.expanded(test.margin), test.want; !rectsApproxEqual(got, want, epsilon, epsilon) {
+			t.Errorf("%v.Expanded(%v) was %v, want %v", test.input, test.margin, got, want)
+		}
+	}
+}
+
+func TestRectPolarClosure(t *testing.T) {
+	tests := []struct {
+		r    Rect
+		want Rect
+	}{
+		{
+			rectFromDegrees(-89, 0, 89, 1),
+			rectFromDegrees(-89, 0, 89, 1),
+		},
+		{
+			rectFromDegrees(-90, -30, -45, 100),
+			rectFromDegrees(-90, -180, -45, 180),
+		},
+		{
+			rectFromDegrees(89, 145, 90, 146),
+			rectFromDegrees(89, -180, 90, 180),
+		},
+		{
+			rectFromDegrees(-90, -145, 90, -144),
+			FullRect(),
+		},
+	}
+	for _, test := range tests {
+		if got := test.r.PolarClosure(); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
+			t.Errorf("%v.PolarClosure() was %v, want %v", test.r, got, test.want)
+		}
+	}
+}
+
+func TestRectCapBound(t *testing.T) {
+	tests := []struct {
+		r    Rect
+		want Cap
+	}{
+		{ // Bounding cap at center is smaller.
+			rectFromDegrees(-45, -45, 45, 45),
+			CapFromCenterHeight(Point{r3.Vector{1, 0, 0}}, 0.5),
+		},
+		{ // Bounding cap at north pole is smaller.
+			rectFromDegrees(88, -80, 89, 80),
+			CapFromCenterAngle(Point{r3.Vector{0, 0, 1}}, s1.Angle(2)*s1.Degree),
+		},
+		{ // Longitude span > 180 degrees.
+			rectFromDegrees(-30, -150, -10, 50),
+			CapFromCenterAngle(Point{r3.Vector{0, 0, -1}}, s1.Angle(80)*s1.Degree),
+		},
+	}
+	for _, test := range tests {
+		if got := test.r.CapBound(); !test.want.ApproxEqual(got) {
+			t.Errorf("%v.CapBound() was %v, want %v", test.r, got, test.want)
+		}
+	}
+}
+
+func TestRectIntervalOps(t *testing.T) {
+	// Rectangle that covers one-quarter of the sphere.
+	rect := rectFromDegrees(0, -180, 90, 0)
+
+	// Test operations where one rectangle consists of a single point.
+	rectMid := rectFromDegrees(45, -90, 45, -90)
+	rect180 := rectFromDegrees(0, -180, 0, -180)
+	northPole := rectFromDegrees(90, 0, 90, 0)
+
+	tests := []struct {
+		rect         Rect
+		other        Rect
+		contains     bool
+		intersects   bool
+		union        Rect
+		intersection Rect
+	}{
+		{
+			rect:         rect,
+			other:        rectMid,
+			contains:     true,
+			intersects:   true,
+			union:        rect,
+			intersection: rectMid,
+		},
+		{
+			rect:         rect,
+			other:        rect180,
+			contains:     true,
+			intersects:   true,
+			union:        rect,
+			intersection: rect180,
+		},
+		{
+			rect:         rect,
+			other:        northPole,
+			contains:     true,
+			intersects:   true,
+			union:        rect,
+			intersection: northPole,
+		},
+		{
+			rect:         rect,
+			other:        rectFromDegrees(-10, -1, 1, 20),
+			contains:     false,
+			intersects:   true,
+			union:        rectFromDegrees(-10, 180, 90, 20),
+			intersection: rectFromDegrees(0, -1, 1, 0),
+		},
+		{
+			rect:         rect,
+			other:        rectFromDegrees(-10, -1, 0, 20),
+			contains:     false,
+			intersects:   true,
+			union:        rectFromDegrees(-10, 180, 90, 20),
+			intersection: rectFromDegrees(0, -1, 0, 0),
+		},
+		{
+			rect:         rect,
+			other:        rectFromDegrees(-10, 0, 1, 20),
+			contains:     false,
+			intersects:   true,
+			union:        rectFromDegrees(-10, 180, 90, 20),
+			intersection: rectFromDegrees(0, 0, 1, 0),
+		},
+		{
+			rect:         rectFromDegrees(-15, -160, -15, -150),
+			other:        rectFromDegrees(20, 145, 25, 155),
+			contains:     false,
+			intersects:   false,
+			union:        rectFromDegrees(-15, 145, 25, -150),
+			intersection: EmptyRect(),
+		},
+		{
+			rect:         rectFromDegrees(70, -10, 90, -140),
+			other:        rectFromDegrees(60, 175, 80, 5),
+			contains:     false,
+			intersects:   true,
+			union:        rectFromDegrees(60, -180, 90, 180),
+			intersection: rectFromDegrees(70, 175, 80, 5),
+		},
+
+		// Check that the intersection of two rectangles that overlap in latitude
+		// but not longitude is valid, and vice versa.
+		{
+			rect:         rectFromDegrees(12, 30, 60, 60),
+			other:        rectFromDegrees(0, 0, 30, 18),
+			contains:     false,
+			intersects:   false,
+			union:        rectFromDegrees(0, 0, 60, 60),
+			intersection: EmptyRect(),
+		},
+		{
+			rect:         rectFromDegrees(0, 0, 18, 42),
+			other:        rectFromDegrees(30, 12, 42, 60),
+			contains:     false,
+			intersects:   false,
+			union:        rectFromDegrees(0, 0, 42, 60),
+			intersection: EmptyRect(),
+		},
+	}
+	for _, test := range tests {
+		if got := test.rect.Contains(test.other); got != test.contains {
+			t.Errorf("%v.Contains(%v) = %t, want %t", test.rect, test.other, got, test.contains)
+		}
+
+		if got := test.rect.Intersects(test.other); got != test.intersects {
+			t.Errorf("%v.Intersects(%v) = %t, want %t", test.rect, test.other, got, test.intersects)
+		}
+
+		if got := test.rect.Union(test.other) == test.rect; test.rect.Contains(test.other) != got {
+			t.Errorf("%v.Union(%v) == %v = %t, want %t",
+				test.rect, test.other, test.other, got, test.rect.Contains(test.other),
+			)
+		}
+
+		if got := test.rect.Intersection(test.other).IsEmpty(); test.rect.Intersects(test.other) == got {
+			t.Errorf("%v.Intersection(%v).IsEmpty() = %t, want %t",
+				test.rect, test.other, got, test.rect.Intersects(test.other))
+		}
+
+		if got := test.rect.Union(test.other); got != test.union {
+			t.Errorf("%v.Union(%v) = %v, want %v", test.rect, test.other, got, test.union)
+		}
+
+		if got := test.rect.Intersection(test.other); got != test.intersection {
+			t.Errorf("%v.Intersection(%v) = %v, want %v", test.rect, test.other, got, test.intersection)
+		}
+	}
+}
+
+func TestRectCellOps(t *testing.T) {
+	cell0 := CellFromPoint(Point{r3.Vector{1 + 1e-12, 1, 1}})
+	v0 := LatLngFromPoint(cell0.Vertex(0))
+
+	cell202 := CellFromCellID(CellIDFromFacePosLevel(2, 0, 2))
+	bound202 := cell202.RectBound()
+
+	tests := []struct {
+		r          Rect
+		c          Cell
+		contains   bool
+		intersects bool
+	}{
+		// Special cases
+		{
+			r:          EmptyRect(),
+			c:          CellFromCellID(CellIDFromFacePosLevel(3, 0, 0)),
+			contains:   false,
+			intersects: false,
+		},
+		{
+			r:          FullRect(),
+			c:          CellFromCellID(CellIDFromFacePosLevel(2, 0, 0)),
+			contains:   true,
+			intersects: true,
+		},
+		{
+			r:          FullRect(),
+			c:          CellFromCellID(CellIDFromFacePosLevel(5, 0, 25)),
+			contains:   true,
+			intersects: true,
+		},
+		// This rectangle includes the first quadrant of face 0.  It's expanded
+		// slightly because cell bounding rectangles are slightly conservative.
+		{
+			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
+			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 0)),
+			contains:   false,
+			intersects: true,
+		},
+		{
+			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
+			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 1)),
+			contains:   true,
+			intersects: true,
+		},
+		{
+			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
+			c:          CellFromCellID(CellIDFromFacePosLevel(1, 0, 1)),
+			contains:   false,
+			intersects: false,
+		},
+		// This rectangle intersects the first quadrant of face 0.
+		{
+			r:          rectFromDegrees(-10, -45, 10, 0),
+			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 0)),
+			contains:   false,
+			intersects: true,
+		},
+		{
+			r:          rectFromDegrees(-10, -45, 10, 0),
+			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 1)),
+			contains:   false,
+			intersects: true,
+		},
+		{
+			r:          rectFromDegrees(-10, -45, 10, 0),
+			c:          CellFromCellID(CellIDFromFacePosLevel(1, 0, 1)),
+			contains:   false,
+			intersects: false,
+		},
+		// Rectangle consisting of a single point.
+		{
+			r:          rectFromDegrees(4, 4, 4, 4),
+			c:          CellFromCellID(CellIDFromFace(0)),
+			contains:   false,
+			intersects: true,
+		},
+		// Rectangles that intersect the bounding rectangle of a face
+		// but not the face itself.
+		{
+			r:          rectFromDegrees(41, -87, 42, -79),
+			c:          CellFromCellID(CellIDFromFace(2)),
+			contains:   false,
+			intersects: false,
+		},
+		{
+			r:          rectFromDegrees(-41, 160, -40, -160),
+			c:          CellFromCellID(CellIDFromFace(5)),
+			contains:   false,
+			intersects: false,
+		},
+		{
+			// This is the leaf cell at the top right hand corner of face 0.
+			// It has two angles of 60 degrees and two of 120 degrees.
+			r: rectFromDegrees(v0.Lat.Degrees()-1e-8,
+				v0.Lng.Degrees()-1e-8,
+				v0.Lat.Degrees()-2e-10,
+				v0.Lng.Degrees()+1e-10),
+			c:          cell0,
+			contains:   false,
+			intersects: false,
+		},
+		{
+			// Rectangles that intersect a face but where no vertex of one region
+			// is contained by the other region.  The first one passes through
+			// a corner of one of the face cells.
+			r:          rectFromDegrees(-37, -70, -36, -20),
+			c:          CellFromCellID(CellIDFromFace(5)),
+			contains:   false,
+			intersects: true,
+		},
+		{
+			// These two intersect like a diamond and a square.
+			r: rectFromDegrees(bound202.Lo().Lat.Degrees()+3,
+				bound202.Lo().Lng.Degrees()+3,
+				bound202.Hi().Lat.Degrees()-3,
+				bound202.Hi().Lng.Degrees()-3),
+			c:          cell202,
+			contains:   false,
+			intersects: true,
+		},
+		{
+			// from a bug report
+			r:          rectFromDegrees(34.2572864, 135.2673642, 34.2707907, 135.2995742),
+			c:          CellFromCellID(0x6007500000000000),
+			contains:   false,
+			intersects: true,
+		},
+	}
+
+	for _, test := range tests {
+		if got := test.r.ContainsCell(test.c); got != test.contains {
+			t.Errorf("%v.ContainsCell(%v) = %t, want %t", test.r, test.c, got, test.contains)
+		}
+
+		if got := test.r.IntersectsCell(test.c); got != test.intersects {
+			t.Errorf("%v.IntersectsCell(%v) = %t, want %t", test.r, test.c, got, test.intersects)
+		}
+	}
+
+}
+
+func TestRectContainsPoint(t *testing.T) {
+	r1 := rectFromDegrees(0, -180, 90, 0)
+
+	tests := []struct {
+		r    Rect
+		p    Point
+		want bool
+	}{
+		{r1, Point{r3.Vector{0.5, -0.3, 0.1}}, true},
+		{r1, Point{r3.Vector{0.5, 0.2, 0.1}}, false},
+	}
+	for _, test := range tests {
+		if got, want := test.r.ContainsPoint(test.p), test.want; got != want {
+			t.Errorf("%v.ContainsPoint(%v) was %v, want %v", test.r, test.p, got, want)
+		}
+	}
+}
+
+func TestRectIntersectsLatEdge(t *testing.T) {
+	tests := []struct {
+		a, b  Point
+		lat   s1.Angle
+		lngLo s1.Angle
+		lngHi s1.Angle
+		want  bool
+	}{
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			lat:   41 * s1.Degree,
+			lngLo: -87 * s1.Degree,
+			lngHi: -79 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			lat:   42 * s1.Degree,
+			lngLo: -87 * s1.Degree,
+			lngHi: -79 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, -1}},
+			b:     Point{r3.Vector{1, 1, 0}},
+			lat:   -3 * s1.Degree,
+			lngLo: -1 * s1.Degree,
+			lngHi: 23 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{1, 0, 1}},
+			b:     Point{r3.Vector{1, -1, 0}},
+			lat:   -28 * s1.Degree,
+			lngLo: 69 * s1.Degree,
+			lngHi: 115 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{0, 1, 0}},
+			b:     Point{r3.Vector{1, -1, -1}},
+			lat:   44 * s1.Degree,
+			lngLo: 60 * s1.Degree,
+			lngHi: 177 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{0, 1, 1}},
+			b:     Point{r3.Vector{0, 1, -1}},
+			lat:   -25 * s1.Degree,
+			lngLo: -74 * s1.Degree,
+			lngHi: -165 * s1.Degree,
+			want:  true,
+		},
+		{
+			a:     Point{r3.Vector{1, 0, 0}},
+			b:     Point{r3.Vector{0, 0, -1}},
+			lat:   -4 * s1.Degree,
+			lngLo: -152 * s1.Degree,
+			lngHi: 171 * s1.Degree,
+			want:  true,
+		},
+		// from a bug report
+		{
+			a:     Point{r3.Vector{-0.589375791872893683986945, 0.583248451588733285433364, 0.558978908075738245564423}},
+			b:     Point{r3.Vector{-0.587388131301997518107783, 0.581281455376392863776402, 0.563104832905072516524569}},
+			lat:   34.2572864 * s1.Degree,
+			lngLo: 2.3608609 * s1.Radian,
+			lngHi: 2.3614230 * s1.Radian,
+			want:  true,
+		},
+	}
+
+	for _, test := range tests {
+		if got := intersectsLatEdge(test.a, test.b, test.lat, s1.Interval{float64(test.lngLo), float64(test.lngHi)}); got != test.want {
+			t.Errorf("intersectsLatEdge(%v, %v, %v, {%v, %v}) = %t, want %t",
+				test.a, test.b, test.lat, test.lngLo, test.lngHi, got, test.want)
+		}
+	}
+}
+
+func TestRectIntersectsLngEdge(t *testing.T) {
+	tests := []struct {
+		a, b  Point
+		latLo s1.Angle
+		latHi s1.Angle
+		lng   s1.Angle
+		want  bool
+	}{
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			latLo: 41 * s1.Degree,
+			latHi: 42 * s1.Degree,
+			lng:   -79 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			latLo: 41 * s1.Degree,
+			latHi: 42 * s1.Degree,
+			lng:   -87 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			latLo: 42 * s1.Degree,
+			latHi: 41 * s1.Degree,
+			lng:   79 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, 1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			latLo: 41 * s1.Degree,
+			latHi: 42 * s1.Degree,
+			lng:   87 * s1.Degree,
+			want:  false,
+		},
+		{
+			a:     Point{r3.Vector{0, -1, -1}},
+			b:     Point{r3.Vector{-1, 0, -1}},
+			latLo: -87 * s1.Degree,
+			latHi: 13 * s1.Degree,
+			lng:   -143 * s1.Degree,
+			want:  true,
+		},
+		{
+			a:     Point{r3.Vector{1, 1, -1}},
+			b:     Point{r3.Vector{1, -1, 1}},
+			latLo: -64 * s1.Degree,
+			latHi: 13 * s1.Degree,
+			lng:   40 * s1.Degree,
+			want:  true,
+		},
+		{
+			a:     Point{r3.Vector{1, 1, 0}},
+			b:     Point{r3.Vector{-1, 0, -1}},
+			latLo: -64 * s1.Degree,
+			latHi: 56 * s1.Degree,
+			lng:   151 * s1.Degree,
+			want:  true,
+		},
+		{
+			a:     Point{r3.Vector{-1, -1, 0}},
+			b:     Point{r3.Vector{1, -1, -1}},
+			latLo: -50 * s1.Degree,
+			latHi: 18 * s1.Degree,
+			lng:   -84 * s1.Degree,
+			want:  true,
+		},
+	}
+
+	for _, test := range tests {
+		if got := intersectsLngEdge(test.a, test.b, r1.Interval{float64(test.latLo), float64(test.latHi)}, test.lng); got != test.want {
+			t.Errorf("intersectsLngEdge(%v, %v, {%v, %v}, %v) = %v, want %v",
+				test.a, test.b, test.latLo, test.latHi, test.lng, got, test.want)
+		}
+	}
+}

+ 2 - 1
vendor/github.com/golang/geo/s2/region.go

@@ -44,7 +44,8 @@ var (
 	_ Region = Cap{}
 	_ Region = Cell{}
 	_ Region = (*CellUnion)(nil)
+	_ Region = Point{}
 	//_ Region = (*Polygon)(nil)
-	_ Region = Polyline{}
+	_ Region = (*Polyline)(nil)
 	_ Region = Rect{}
 )

+ 5 - 3
vendor/github.com/golang/geo/s2/regioncoverer.go

@@ -194,6 +194,10 @@ func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int
 // otherwise expands its children and inserts it into the priority queue.
 // Passing an argument of nil does nothing.
 func (c *coverer) addCandidate(cand *candidate) {
+	if cand == nil {
+		return
+	}
+
 	if cand.terminal {
 		c.result = append(c.result, cand.cell.id)
 		return
@@ -274,9 +278,7 @@ func (c *coverer) initialCandidates() {
 	cells := temp.FastCovering(c.region.CapBound())
 	c.adjustCellLevels(&cells)
 	for _, ci := range cells {
-		if cand := c.newCandidate(CellFromCellID(ci)); cand != nil {
-			c.addCandidate(cand)
-		}
+		c.addCandidate(c.newCandidate(CellFromCellID(ci)))
 	}
 }
 

+ 151 - 0
vendor/github.com/golang/geo/s2/regioncoverer_test.go

@@ -0,0 +1,151 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"math/rand"
+	"reflect"
+	"testing"
+)
+
+func TestCovererRandomCells(t *testing.T) {
+	rc := &RegionCoverer{MinLevel: 0, MaxLevel: 30, LevelMod: 1, MaxCells: 1}
+
+	// Test random cell ids at all levels.
+	for i := 0; i < 10000; i++ {
+		id := CellID(randomUint64())
+		for !id.IsValid() {
+			id = CellID(randomUint64())
+		}
+		covering := rc.Covering(Region(CellFromCellID(id)))
+		if len(covering) != 1 {
+			t.Errorf("Iteration %d, cell ID token %s, got covering size = %d, want covering size = 1", i, id.ToToken(), len(covering))
+		}
+		if (covering)[0] != id {
+			t.Errorf("Iteration %d, cell ID token %s, got covering = %v, want covering = %v", i, id.ToToken(), covering, id)
+		}
+	}
+}
+
+// checkCovering reports whether covering is a valid cover for the region.
+func checkCovering(t *testing.T, rc *RegionCoverer, r Region, covering CellUnion, interior bool) {
+	// Keep track of how many cells have the same rc.MinLevel ancestor.
+	minLevelCells := map[CellID]int{}
+	var tempCover CellUnion
+	for _, ci := range covering {
+		level := ci.Level()
+		if level < rc.MinLevel {
+			t.Errorf("CellID(%s).Level() = %d, want >= %d", ci.ToToken(), level, rc.MinLevel)
+		}
+		if level > rc.MaxLevel {
+			t.Errorf("CellID(%s).Level() = %d, want <= %d", ci.ToToken(), level, rc.MaxLevel)
+		}
+		if rem := (level - rc.MinLevel) % rc.LevelMod; rem != 0 {
+			t.Errorf("(CellID(%s).Level() - MinLevel) mod LevelMod = %d, want = %d", ci.ToToken(), rem, 0)
+		}
+		tempCover = append(tempCover, ci)
+		minLevelCells[ci.Parent(rc.MinLevel)]++
+	}
+	if len(covering) > rc.MaxCells {
+		// If the covering has more than the requested number of cells, then check
+		// that the cell count cannot be reduced by using the parent of some cell.
+		for ci, count := range minLevelCells {
+			if count > 1 {
+				t.Errorf("Min level CellID %s, count = %d, want = %d", ci.ToToken(), count, 1)
+			}
+		}
+	}
+	if interior {
+		for _, ci := range covering {
+			if !r.ContainsCell(CellFromCellID(ci)) {
+				t.Errorf("Region(%v).ContainsCell(%v) = %t, want = %t", r, CellFromCellID(ci), false, true)
+			}
+		}
+	} else {
+		tempCover.Normalize()
+		checkCoveringTight(t, r, tempCover, true, 0)
+	}
+}
+
+// checkCoveringTight checks that "cover" completely covers the given region.
+// If "checkTight" is true, also checks that it does not contain any cells that
+// do not intersect the given region. ("id" is only used internally.)
+func checkCoveringTight(t *testing.T, r Region, cover CellUnion, checkTight bool, id CellID) {
+	if !id.IsValid() {
+		for f := 0; f < 6; f++ {
+			checkCoveringTight(t, r, cover, checkTight, CellIDFromFace(f))
+		}
+		return
+	}
+
+	if !r.IntersectsCell(CellFromCellID(id)) {
+		// If region does not intersect id, then neither should the covering.
+		if got := cover.IntersectsCellID(id); checkTight && got {
+			t.Errorf("CellUnion(%v).IntersectsCellID(%s) = %t; want = %t", cover, id.ToToken(), got, false)
+		}
+	} else if !cover.ContainsCellID(id) {
+		// The region may intersect id, but we can't assert that the covering
+		// intersects id because we may discover that the region does not actually
+		// intersect upon further subdivision.  (IntersectsCell is not exact.)
+		if got := r.ContainsCell(CellFromCellID(id)); got {
+			t.Errorf("Region(%v).ContainsCell(%v) = %t; want = %t", r, CellFromCellID(id), got, false)
+		}
+		if got := id.IsLeaf(); got {
+			t.Errorf("CellID(%s).IsLeaf() = %t; want = %t", id.ToToken(), got, false)
+		}
+
+		for child := id.ChildBegin(); child != id.ChildEnd(); child = child.Next() {
+			checkCoveringTight(t, r, cover, checkTight, child)
+		}
+	}
+}
+
+func TestCovererRandomCaps(t *testing.T) {
+	rc := &RegionCoverer{}
+	for i := 0; i < 1000; i++ {
+		rc.MinLevel = int(rand.Int31n(maxLevel + 1))
+		rc.MaxLevel = int(rand.Int31n(maxLevel + 1))
+		for rc.MinLevel > rc.MaxLevel {
+			rc.MinLevel = int(rand.Int31n(maxLevel + 1))
+			rc.MaxLevel = int(rand.Int31n(maxLevel + 1))
+		}
+		rc.LevelMod = int(1 + rand.Int31n(3))
+		rc.MaxCells = int(skewedInt(10))
+
+		maxArea := math.Min(4*math.Pi, float64(3*rc.MaxCells+1)*AvgAreaMetric.Value(rc.MinLevel))
+		r := Region(randomCap(0.1*AvgAreaMetric.Value(maxLevel), maxArea))
+
+		covering := rc.Covering(r)
+		checkCovering(t, rc, r, covering, false)
+		interior := rc.InteriorCovering(r)
+		checkCovering(t, rc, r, interior, true)
+
+		// Check that Covering is deterministic.
+		covering2 := rc.Covering(r)
+		if !reflect.DeepEqual(covering, covering2) {
+			t.Errorf("Iteration %d, got covering = %v, want covering = %v", i, covering2, covering)
+		}
+
+		// Also check Denormalize. The denormalized covering
+		// may still be different and smaller than "covering" because
+		// s2.RegionCoverer does not guarantee that it will not output all four
+		// children of the same parent.
+		covering.Denormalize(rc.MinLevel, rc.LevelMod)
+		checkCovering(t, rc, r, covering, false)
+	}
+}

+ 414 - 0
vendor/github.com/golang/geo/s2/s2_test.go

@@ -0,0 +1,414 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"strconv"
+	"strings"
+
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+// float64Eq reports whether the two values are within the default epsilon.
+func float64Eq(x, y float64) bool { return float64Near(x, y, epsilon) }
+
+// float64Near reports whether the two values are within the given epsilon.
+func float64Near(x, y, ε float64) bool {
+	return math.Abs(x-y) <= ε
+}
+
+// TODO(roberts): Add in flag to allow specifying the random seed for repeatable tests.
+
+// kmToAngle converts a distance on the Earth's surface to an angle.
+func kmToAngle(km float64) s1.Angle {
+	// The Earth's mean radius in kilometers (according to NASA).
+	const earthRadiusKm = 6371.01
+	return s1.Angle(km / earthRadiusKm)
+}
+
+// randomBits returns a 64-bit random unsigned integer whose lowest "num" are random, and
+// whose other bits are zero.
+func randomBits(num uint32) uint64 {
+	// Make sure the request is for not more than 63 bits.
+	if num > 63 {
+		num = 63
+	}
+	return uint64(rand.Int63()) & ((1 << num) - 1)
+}
+
+// Return a uniformly distributed 64-bit unsigned integer.
+func randomUint64() uint64 {
+	return uint64(rand.Int63() | (rand.Int63() << 63))
+}
+
+// Return a uniformly distributed 32-bit unsigned integer.
+func randomUint32() uint32 {
+	return uint32(randomBits(32))
+}
+
+// randomFloat64 returns a uniformly distributed value in the range [0,1).
+// Note that the values returned are all multiples of 2**-53, which means that
+// not all possible values in this range are returned.
+func randomFloat64() float64 {
+	const randomFloatBits = 53
+	return math.Ldexp(float64(randomBits(randomFloatBits)), -randomFloatBits)
+}
+
+// randomUniformInt returns a uniformly distributed integer in the range [0,n).
+// NOTE: This is replicated here to stay in sync with how the C++ code generates
+// uniform randoms. (instead of using Go's math/rand package directly).
+func randomUniformInt(n int) int {
+	return int(randomFloat64() * float64(n))
+}
+
+// randomUniformFloat64 returns a uniformly distributed value in the range [min, max).
+func randomUniformFloat64(min, max float64) float64 {
+	return min + randomFloat64()*(max-min)
+}
+
+// oneIn returns true with a probability of 1/n.
+func oneIn(n int) bool {
+	return randomUniformInt(n) == 0
+}
+
+// randomPoint returns a random unit-length vector.
+func randomPoint() Point {
+	return PointFromCoords(randomUniformFloat64(-1, 1),
+		randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1))
+}
+
+// randomFrame returns a right-handed coordinate frame (three orthonormal vectors) for
+// a randomly generated point.
+func randomFrame() *matrix3x3 {
+	return randomFrameAtPoint(randomPoint())
+}
+
+// randomFrameAtPoint returns a right-handed coordinate frame using the given
+// point as the z-axis. The x- and y-axes are computed such that (x,y,z) is a
+// right-handed coordinate frame (three orthonormal vectors).
+func randomFrameAtPoint(z Point) *matrix3x3 {
+	x := Point{z.Cross(randomPoint().Vector).Normalize()}
+	y := Point{z.Cross(x.Vector).Normalize()}
+
+	m := &matrix3x3{}
+	m.setCol(0, x)
+	m.setCol(1, y)
+	m.setCol(2, z)
+	return m
+}
+
+// randomCellIDForLevel returns a random CellID at the given level.
+// The distribution is uniform over the space of cell ids, but only
+// approximately uniform over the surface of the sphere.
+func randomCellIDForLevel(level int) CellID {
+	face := randomUniformInt(numFaces)
+	pos := randomUint64() & uint64((1<<posBits)-1)
+	return CellIDFromFacePosLevel(face, pos, level)
+}
+
+// randomCellID returns a random CellID at a randomly chosen
+// level. The distribution is uniform over the space of cell ids,
+// but only approximately uniform over the surface of the sphere.
+func randomCellID() CellID {
+	return randomCellIDForLevel(randomUniformInt(maxLevel + 1))
+}
+
+// parsePoint returns an Point from the latitude-longitude coordinate in degrees
+// in the given string, or the origin if the string was invalid.
+// e.g., "-20:150"
+func parsePoint(s string) Point {
+	p := parsePoints(s)
+	if len(p) > 0 {
+		return p[0]
+	}
+
+	return Point{r3.Vector{0, 0, 0}}
+}
+
+// parseRect returns the minimal bounding Rect that contains the one or more
+// latitude-longitude coordinates in degrees in the given string.
+// Examples of input:
+//   "-20:150"                     // one point
+//   "-20:150, -20:151, -19:150"   // three points
+func parseRect(s string) Rect {
+	var rect Rect
+	lls := parseLatLngs(s)
+	if len(lls) > 0 {
+		rect = RectFromLatLng(lls[0])
+	}
+
+	for _, ll := range lls[1:] {
+		rect = rect.AddPoint(ll)
+	}
+
+	return rect
+}
+
+// parseLatLngs splits up a string of lat:lng points and returns the list of parsed
+// entries.
+func parseLatLngs(s string) []LatLng {
+	pieces := strings.Split(s, ",")
+	var lls []LatLng
+	for _, piece := range pieces {
+		piece = strings.TrimSpace(piece)
+
+		// Skip empty strings.
+		if piece == "" {
+			continue
+		}
+
+		p := strings.Split(piece, ":")
+		if len(p) != 2 {
+			panic(fmt.Sprintf("invalid input string for parseLatLngs: %q", piece))
+		}
+
+		lat, err := strconv.ParseFloat(p[0], 64)
+		if err != nil {
+			panic(fmt.Sprintf("invalid float in parseLatLngs: %q, err: %v", p[0], err))
+		}
+
+		lng, err := strconv.ParseFloat(p[1], 64)
+		if err != nil {
+			panic(fmt.Sprintf("invalid float in parseLatLngs: %q, err: %v", p[1], err))
+		}
+
+		lls = append(lls, LatLngFromDegrees(lat, lng))
+	}
+	return lls
+}
+
+// parsePoints takes a string of lat:lng points and returns the set of Points it defines.
+func parsePoints(s string) []Point {
+	lls := parseLatLngs(s)
+	points := make([]Point, len(lls))
+	for i, ll := range lls {
+		points[i] = PointFromLatLng(ll)
+	}
+	return points
+}
+
+// makeLoop constructs a loop from a comma separated string of lat:lng
+// coordinates in degrees. Example of the input format:
+//   "-20:150, 10:-120, 0.123:-170.652"
+// The special strings "empty" or "full" create an empty or full loop respectively.
+func makeLoop(s string) *Loop {
+	if s == "full" {
+		return FullLoop()
+	}
+	if s == "empty" {
+		return EmptyLoop()
+	}
+
+	return LoopFromPoints(parsePoints(s))
+}
+
+// makePolygon constructs a polygon from the set of semicolon separated CSV
+// strings of lat:lng points defining each loop in the polygon. If the normalize
+// flag is set to true, loops are normalized by inverting them
+// if necessary so that they enclose at most half of the unit sphere.
+//
+// Examples of the input format:
+//     "10:20, 90:0, 20:30"                                  // one loop
+//     "10:20, 90:0, 20:30; 5.5:6.5, -90:-180, -15.2:20.3"   // two loops
+//     ""       // the empty polygon (consisting of no loops)
+//     "full"   // the full polygon (consisting of one full loop)
+//     "empty"  // **INVALID** (a polygon consisting of one empty loop)
+func makePolygon(s string, normalize bool) *Polygon {
+	strs := strings.Split(s, ";")
+	var loops []*Loop
+	for _, str := range strs {
+		if str == "" {
+			continue
+		}
+		loop := makeLoop(strings.TrimSpace(str))
+		if normalize {
+			// TODO(roberts): Uncomment once Normalize is implemented.
+			// loop.Normalize()
+		}
+		loops = append(loops, loop)
+	}
+	return PolygonFromLoops(loops)
+}
+
+// makePolyline constructs a Polyline from the given string of lat:lng values.
+func makePolyline(s string) *Polyline {
+	p := Polyline(parsePoints(s))
+	return &p
+}
+
+// concentricLoopsPolygon constructs a polygon with the specified center as a
+// number of concentric loops and vertices per loop.
+func concentricLoopsPolygon(center Point, numLoops, verticesPerLoop int) *Polygon {
+	var loops []*Loop
+	for li := 0; li < numLoops; li++ {
+		radius := s1.Angle(0.005 * float64(li+1) / float64(numLoops))
+		loops = append(loops, RegularLoop(center, radius, verticesPerLoop))
+	}
+	return PolygonFromLoops(loops)
+}
+
+// skewedInt returns a number in the range [0,2^max_log-1] with bias towards smaller numbers.
+func skewedInt(maxLog int) int {
+	base := uint32(rand.Int31n(int32(maxLog + 1)))
+	return int(randomBits(31) & ((1 << base) - 1))
+}
+
+// randomCap returns a cap with a random axis such that the log of its area is
+// uniformly distributed between the logs of the two given values. The log of
+// the cap angle is also approximately uniformly distributed.
+func randomCap(minArea, maxArea float64) Cap {
+	capArea := maxArea * math.Pow(minArea/maxArea, randomFloat64())
+	return CapFromCenterArea(randomPoint(), capArea)
+}
+
+// pointsApproxEquals reports whether the two points are within the given distance
+// of each other. This is the same as Point.ApproxEquals but permits specifying
+// the epsilon.
+func pointsApproxEquals(a, b Point, epsilon float64) bool {
+	return float64(a.Vector.Angle(b.Vector)) <= epsilon
+}
+
+var (
+	rectErrorLat = 10 * dblEpsilon
+	rectErrorLng = dblEpsilon
+)
+
+// r2PointsApproxEqual reports whether the two points are within the given epsilon.
+func r2PointsApproxEquals(a, b r2.Point, epsilon float64) bool {
+	return float64Near(a.X, b.X, epsilon) && float64Near(a.Y, b.Y, epsilon)
+}
+
+// rectsApproxEqual reports whether the two rect are within the given tolerances
+// at each corner from each other. The tolerances are specific to each axis.
+func rectsApproxEqual(a, b Rect, tolLat, tolLng float64) bool {
+	return math.Abs(a.Lat.Lo-b.Lat.Lo) < tolLat &&
+		math.Abs(a.Lat.Hi-b.Lat.Hi) < tolLat &&
+		math.Abs(a.Lng.Lo-b.Lng.Lo) < tolLng &&
+		math.Abs(a.Lng.Hi-b.Lng.Hi) < tolLng
+}
+
+// matricesApproxEqual reports whether all cells in both matrices are equal within
+// the default floating point epsilon.
+func matricesApproxEqual(m1, m2 *matrix3x3) bool {
+	return float64Eq(m1[0][0], m2[0][0]) &&
+		float64Eq(m1[0][1], m2[0][1]) &&
+		float64Eq(m1[0][2], m2[0][2]) &&
+
+		float64Eq(m1[1][0], m2[1][0]) &&
+		float64Eq(m1[1][1], m2[1][1]) &&
+		float64Eq(m1[1][2], m2[1][2]) &&
+
+		float64Eq(m1[2][0], m2[2][0]) &&
+		float64Eq(m1[2][1], m2[2][1]) &&
+		float64Eq(m1[2][2], m2[2][2])
+}
+
+// samplePointFromRect returns a point chosen uniformly at random (with respect
+// to area on the sphere) from the given rectangle.
+func samplePointFromRect(rect Rect) Point {
+	// First choose a latitude uniformly with respect to area on the sphere.
+	sinLo := math.Sin(rect.Lat.Lo)
+	sinHi := math.Sin(rect.Lat.Hi)
+	lat := math.Asin(randomUniformFloat64(sinLo, sinHi))
+
+	// Now choose longitude uniformly within the given range.
+	lng := rect.Lng.Lo + randomFloat64()*rect.Lng.Length()
+
+	return PointFromLatLng(LatLng{s1.Angle(lat), s1.Angle(lng)}.Normalized())
+}
+
+// samplePointFromCap returns a point chosen uniformly at random (with respect
+// to area) from the given cap.
+func samplePointFromCap(c Cap) Point {
+	// We consider the cap axis to be the "z" axis. We choose two other axes to
+	// complete the coordinate frame.
+	m := getFrame(c.Center())
+
+	// The surface area of a spherical cap is directly proportional to its
+	// height. First we choose a random height, and then we choose a random
+	// point along the circle at that height.
+	h := randomFloat64() * c.Height()
+	theta := 2 * math.Pi * randomFloat64()
+	r := math.Sqrt(h * (2 - h))
+
+	// The result should already be very close to unit-length, but we might as
+	// well make it accurate as possible.
+	return Point{fromFrame(m, PointFromCoords(math.Cos(theta)*r, math.Sin(theta)*r, 1-h)).Normalize()}
+}
+
+// perturbATowardsB returns a point that has been shifted some distance towards the
+// second point based on a random number.
+func perturbATowardsB(a, b Point) Point {
+	choice := randomFloat64()
+	if choice < 0.1 {
+		return a
+	}
+	if choice < 0.3 {
+		// Return a point that is exactly proportional to A and that still
+		// satisfies IsUnitLength().
+		for {
+			b := Point{a.Mul(2 - a.Norm() + 5*(randomFloat64()-0.5)*dblEpsilon)}
+			if !b.ApproxEqual(a) && b.IsUnit() {
+				return b
+			}
+		}
+	}
+	if choice < 0.5 {
+		// Return a point such that the distance squared to A will underflow.
+		return InterpolateAtDistance(1e-300, a, b)
+	}
+	// Otherwise return a point whose distance from A is near dblEpsilon such
+	// that the log of the pdf is uniformly distributed.
+	distance := dblEpsilon * 1e-5 * math.Pow(1e6, randomFloat64())
+	return InterpolateAtDistance(s1.Angle(distance), a, b)
+}
+
+// perturbedCornerOrMidpoint returns a Point from a line segment whose endpoints are
+// difficult to handle correctly. Given two adjacent cube vertices P and Q,
+// it returns either an edge midpoint, face midpoint, or corner vertex that is
+// in the plane of PQ and that has been perturbed slightly. It also sometimes
+// returns a random point from anywhere on the sphere.
+func perturbedCornerOrMidpoint(p, q Point) Point {
+	a := p.Mul(float64(randomUniformInt(3) - 1)).Add(q.Mul(float64(randomUniformInt(3) - 1)))
+	if oneIn(10) {
+		// This perturbation often has no effect except on coordinates that are
+		// zero, in which case the perturbed value is so small that operations on
+		// it often result in underflow.
+		a = a.Add(randomPoint().Mul(math.Pow(1e-300, randomFloat64())))
+	} else if oneIn(2) {
+		// For coordinates near 1 (say > 0.5), this perturbation yields values
+		// that are only a few representable values away from the initial value.
+		a = a.Add(randomPoint().Mul(4 * dblEpsilon))
+	} else {
+		// A perturbation whose magnitude is in the range [1e-25, 1e-10].
+		a = a.Add(randomPoint().Mul(1e-10 * math.Pow(1e-15, randomFloat64())))
+	}
+
+	if a.Norm2() < math.SmallestNonzeroFloat64 {
+		// If a.Norm2() is denormalized, Normalize() loses too much precision.
+		return perturbedCornerOrMidpoint(p, q)
+	}
+	return Point{a}
+}
+
+// TODO:
+// Most of the other s2 testing methods.

+ 196 - 0
vendor/github.com/golang/geo/s2/s2_test_test.go

@@ -0,0 +1,196 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"reflect"
+	"testing"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+func TestKmToAngle(t *testing.T) {
+	const earthRadiusKm = 6371.01
+
+	tests := []struct {
+		have float64
+		want s1.Angle
+	}{
+		{0.0, 0.0},
+		{1.0, 0.00015696098420815537 * s1.Radian},
+		{earthRadiusKm, 1.0 * s1.Radian},
+		{-1.0, -0.00015696098420815537 * s1.Radian},
+		{-10000.0, -1.5696098420815536300 * s1.Radian},
+		{1e9, 156960.984208155363007 * s1.Radian},
+	}
+	for _, test := range tests {
+		if got := kmToAngle(test.have); !float64Eq(float64(got), float64(test.want)) {
+			t.Errorf("kmToAngle(%f) = %0.20f, want %0.20f", test.have, got, test.want)
+		}
+	}
+
+}
+
+func TestParsePoint(t *testing.T) {
+	tests := []struct {
+		have string
+		want Point
+	}{
+		{"0:0", Point{r3.Vector{1, 0, 0}}},
+		{"90:0", Point{r3.Vector{6.123233995736757e-17, 0, 1}}},
+		{"91:0", Point{r3.Vector{-0.017452406437283473, -0, 0.9998476951563913}}},
+		{"179.99:0", Point{r3.Vector{-0.9999999847691292, -0, 0.00017453292431344843}}},
+		{"180:0", Point{r3.Vector{-1, -0, 1.2246467991473515e-16}}},
+		{"181.0:0", Point{r3.Vector{-0.9998476951563913, -0, -0.017452406437283637}}},
+		{"-45:0", Point{r3.Vector{0.7071067811865476, 0, -0.7071067811865475}}},
+		{"0:0.01", Point{r3.Vector{0.9999999847691292, 0.00017453292431333684, 0}}},
+		{"0:30", Point{r3.Vector{0.8660254037844387, 0.49999999999999994, 0}}},
+		{"0:45", Point{r3.Vector{0.7071067811865476, 0.7071067811865475, 0}}},
+		{"0:90", Point{r3.Vector{6.123233995736757e-17, 1, 0}}},
+		{"30:30", Point{r3.Vector{0.7500000000000001, 0.4330127018922193, 0.49999999999999994}}},
+		{"-30:30", Point{r3.Vector{0.7500000000000001, 0.4330127018922193, -0.49999999999999994}}},
+		{"180:90", Point{r3.Vector{-6.123233995736757e-17, -1, 1.2246467991473515e-16}}},
+		{"37.4210:-122.0866, 37.4231:-122.0819", Point{r3.Vector{-0.4218751185559026, -0.6728760966593905, 0.6076669670863027}}},
+	}
+	for _, test := range tests {
+		if got := parsePoint(test.have); !got.ApproxEqual(test.want) {
+			t.Errorf("parsePoint(%s) = %v, want %v", test.have, got, test.want)
+		}
+	}
+}
+
+func TestParseRect(t *testing.T) {
+	tests := []struct {
+		have string
+		want Rect
+	}{
+		{"0:0", Rect{}},
+		{
+			"1:1",
+			Rect{
+				r1.Interval{float64(s1.Degree), float64(s1.Degree)},
+				s1.Interval{float64(s1.Degree), float64(s1.Degree)},
+			},
+		},
+		{
+			"1:1, 2:2, 3:3",
+			Rect{
+				r1.Interval{float64(s1.Degree), 3 * float64(s1.Degree)},
+				s1.Interval{float64(s1.Degree), 3 * float64(s1.Degree)},
+			},
+		},
+		{
+			"-90:-180, 90:180",
+			Rect{
+				r1.Interval{-90 * float64(s1.Degree), 90 * float64(s1.Degree)},
+				s1.Interval{180 * float64(s1.Degree), -180 * float64(s1.Degree)},
+			},
+		},
+		{
+			"-89.99:0, 89.99:179.99",
+			Rect{
+				r1.Interval{-89.99 * float64(s1.Degree), 89.99 * float64(s1.Degree)},
+				s1.Interval{0, 179.99 * float64(s1.Degree)},
+			},
+		},
+		{
+			"-89.99:-179.99, 89.99:179.99",
+			Rect{
+				r1.Interval{-89.99 * float64(s1.Degree), 89.99 * float64(s1.Degree)},
+				s1.Interval{179.99 * float64(s1.Degree), -179.99 * float64(s1.Degree)},
+			},
+		},
+		{
+			"37.4210:-122.0866, 37.4231:-122.0819",
+			Rect{
+				r1.Interval{float64(s1.Degree * 37.4210), float64(s1.Degree * 37.4231)},
+				s1.Interval{float64(s1.Degree * -122.0866), float64(s1.Degree * -122.0819)},
+			},
+		},
+		{
+			"-876.54:-654.43, 963.84:2468.35",
+			Rect{
+				r1.Interval{-876.54 * float64(s1.Degree), -876.54 * float64(s1.Degree)},
+				s1.Interval{-654.43 * float64(s1.Degree), -654.43 * float64(s1.Degree)},
+			},
+		},
+	}
+	for _, test := range tests {
+		if got := parseRect(test.have); got != test.want {
+			t.Errorf("parseRect(%s) = %v, want %v", test.have, got, test.want)
+		}
+	}
+}
+
+func TestParseLatLngs(t *testing.T) {
+	tests := []struct {
+		have string
+		want []LatLng
+	}{
+		{"0:0", []LatLng{{0, 0}}},
+		{
+			"37.4210:-122.0866, 37.4231:-122.0819",
+			[]LatLng{
+				{s1.Degree * 37.4210, s1.Degree * -122.0866},
+				{s1.Degree * 37.4231, s1.Degree * -122.0819},
+			},
+		},
+	}
+	for _, test := range tests {
+		got := parseLatLngs(test.have)
+		if !reflect.DeepEqual(got, test.want) {
+			t.Errorf("parseLatLngs(%s) = %v, want %v", test.have, got, test.want)
+		}
+	}
+}
+
+func TestParsePoints(t *testing.T) {
+	tests := []struct {
+		have string
+		want []Point
+	}{
+		{"0:0", []Point{{r3.Vector{1, 0, 0}}}},
+		{"      0:0,    ", []Point{{r3.Vector{1, 0, 0}}}},
+		{
+			"90:0,-90:0",
+			[]Point{
+				{r3.Vector{6.123233995736757e-17, 0, 1}},
+				{r3.Vector{6.123233995736757e-17, 0, -1}},
+			},
+		},
+		{
+			"90:0, 0:90, -90:0, 0:-90",
+			[]Point{
+				{r3.Vector{6.123233995736757e-17, 0, 1}},
+				{r3.Vector{6.123233995736757e-17, 1, 0}},
+				{r3.Vector{6.123233995736757e-17, 0, -1}},
+				{r3.Vector{6.123233995736757e-17, -1, 0}},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		got := parsePoints(test.have)
+		for i := range got { // assume we at least get the same number of points
+			if !got[i].ApproxEqual(test.want[i]) {
+				t.Errorf("parsePoints(%s): [%d]: got %v, want %v", test.have, i, got[i], test.want[i])
+			}
+		}
+	}
+}

+ 66 - 17
vendor/github.com/golang/geo/s2/shapeindex.go

@@ -20,16 +20,58 @@ import (
 	"github.com/golang/geo/r2"
 )
 
-// Shape defines an interface for any s2 type that needs to be indexable.
+// dimension defines the types of geometry dimensions that a Shape supports.
+type dimension int
+
+const (
+	pointGeometry dimension = iota
+	polylineGeometry
+	polygonGeometry
+)
+
+// Shape defines an interface for any S2 type that needs to be indexable. A shape
+// is a collection of edges that optionally defines an interior. It can be used to
+// represent a set of points, a set of polylines, or a set of polygons.
 type Shape interface {
 	// NumEdges returns the number of edges in this shape.
 	NumEdges() int
 
 	// Edge returns endpoints for the given edge index.
+	// Zero-length edges are allowed, and can be used to represent points.
 	Edge(i int) (a, b Point)
 
-	// HasInterior returns true if this shape has an interior.
-	// i.e. the Shape consists of one or more closed non-intersecting loops.
+	// numChains reports the number of contiguous edge chains in the shape.
+	// For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist
+	// of two chains (AB,BC,CD and AE,EF). This method allows some algorithms
+	// to be optimized by skipping over edge chains that do not affect the output.
+	//
+	// Note that it is always acceptable to implement this method by returning
+	// NumEdges, i.e. every chain consists of a single edge.
+	numChains() int
+
+	// chainStart returns the id of the first edge in the i-th edge chain,
+	// and returns NumEdges when i == numChains. For example, if there are
+	// two chains AB,BC,CD and AE,EF, the chain starts would be [0, 3, 5].
+	//
+	// This requires the following:
+	// 0 <= i <= numChains()
+	// chainStart(0) == 0
+	// chainStart(i) < chainStart(i+1)
+	// chainStart(numChains()) == NumEdges()
+	chainStart(i int) int
+
+	// dimension returns the dimension of the geometry represented by this shape.
+	//
+	// Note that this method allows degenerate geometry of different dimensions
+	// to be distinguished, e.g. it allows a point to be distinguished from a
+	// polyline or polygon that has been simplified to a single point.
+	dimension() dimension
+
+	// HasInterior reports whether this shape has an interior. If so, it must be possible
+	// to assemble the edges into a collection of non-crossing loops.  Edges may
+	// be returned in any order, and edges may be oriented arbitrarily with
+	// respect to the shape interior.  (However, note that some Shape types
+	// may have stronger requirements.)
 	HasInterior() bool
 
 	// ContainsOrigin returns true if this shape contains s2.Origin.
@@ -39,8 +81,9 @@ type Shape interface {
 
 // A minimal check for types that should satisfy the Shape interface.
 var (
-	_ Shape = Loop{}
-	_ Shape = Polyline{}
+	_ Shape = &Loop{}
+	_ Shape = &Polygon{}
+	_ Shape = &Polyline{}
 )
 
 // CellRelation describes the possible relationships between a target cell
@@ -147,38 +190,44 @@ type clippedEdge struct {
 	bound    r2.Rect   // Bounding box for the clipped portion
 }
 
-// ShapeIndex indexes a set of Shapes, where a Shape is some collection of
-// edges. A shape can be as simple as a single edge, or as complex as a set of loops.
-// For Shapes that have interiors, the index makes it very fast to determine which
-// Shape(s) contain a given point or region.
+// ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges
+// that optionally defines an interior. It can be used to represent a set of
+// points, a set of polylines, or a set of polygons. For Shapes that have
+// interiors, the index makes it very fast to determine which Shape(s) contain
+// a given point or region.
 type ShapeIndex struct {
-	// shapes maps all shapes to their index.
-	shapes map[Shape]int32
+	// shapes is a map of shape ID to shape.
+	shapes map[int]Shape
 
 	maxEdgesPerCell int
 
 	// nextID tracks the next ID to hand out. IDs are not reused when shapes
 	// are removed from the index.
-	nextID int32
+	nextID int
 }
 
 // NewShapeIndex creates a new ShapeIndex.
 func NewShapeIndex() *ShapeIndex {
 	return &ShapeIndex{
 		maxEdgesPerCell: 10,
-		shapes:          make(map[Shape]int32),
+		shapes:          make(map[int]Shape),
 	}
 }
 
 // Add adds the given shape to the index and assign an ID to it.
 func (s *ShapeIndex) Add(shape Shape) {
-	s.shapes[shape] = s.nextID
+	s.shapes[s.nextID] = shape
 	s.nextID++
 }
 
 // Remove removes the given shape from the index.
 func (s *ShapeIndex) Remove(shape Shape) {
-	delete(s.shapes, shape)
+	for k, v := range s.shapes {
+		if v == shape {
+			delete(s.shapes, k)
+			return
+		}
+	}
 }
 
 // Len reports the number of Shapes in this index.
@@ -188,14 +237,14 @@ func (s *ShapeIndex) Len() int {
 
 // Reset clears the contents of the index and resets it to its original state.
 func (s *ShapeIndex) Reset() {
-	s.shapes = make(map[Shape]int32)
+	s.shapes = make(map[int]Shape)
 	s.nextID = 0
 }
 
 // NumEdges returns the number of edges in this index.
 func (s *ShapeIndex) NumEdges() int {
 	numEdges := 0
-	for shape := range s.shapes {
+	for _, shape := range s.shapes {
 		numEdges += shape.NumEdges()
 	}
 	return numEdges

+ 84 - 0
vendor/github.com/golang/geo/s2/shapeindex_test.go

@@ -0,0 +1,84 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"testing"
+)
+
+// testShape is a minimal implementation of the Shape interface for use in testing
+// until such time as there are other s2 types that implement it.
+type testShape struct {
+	a, b  Point
+	edges int
+}
+
+func newTestShape() *testShape                { return &testShape{} }
+func (s *testShape) NumEdges() int            { return s.edges }
+func (s *testShape) Edge(id int) (a, b Point) { return s.a, s.b }
+func (s *testShape) dimension() dimension     { return pointGeometry }
+func (s *testShape) numChains() int           { return 0 }
+func (s *testShape) chainStart(i int) int     { return 0 }
+func (s *testShape) HasInterior() bool        { return false }
+func (s *testShape) ContainsOrigin() bool     { return false }
+
+func TestShapeIndexBasics(t *testing.T) {
+	si := NewShapeIndex()
+	s := newTestShape()
+
+	if si.Len() != 0 {
+		t.Errorf("initial index should be empty after creation")
+	}
+	si.Add(s)
+
+	if si.Len() == 0 {
+		t.Errorf("index should not be empty after adding shape")
+	}
+
+	si.Reset()
+	if si.Len() != 0 {
+		t.Errorf("index should be empty after reset")
+	}
+}
+
+func TestShapeIndexCellBasics(t *testing.T) {
+	s := &shapeIndexCell{}
+
+	if len(s.shapes) != 0 {
+		t.Errorf("len(s.shapes) = %v, want %d", len(s.shapes), 0)
+	}
+
+	// create some clipped shapes to add.
+	c1 := &clippedShape{}
+	s.add(c1)
+
+	c2 := newClippedShape(7, 1)
+	s.add(c2)
+
+	c3 := &clippedShape{}
+	s.add(c3)
+
+	// look up the element at a given index
+	if got := s.shapes[1]; got != c2 {
+		t.Errorf("%v.shapes[%d] = %v, want %v", s, 1, got, c2)
+	}
+
+	// look for the clipped shape that is part of the given shape.
+	if got := s.findByID(7); got != c2 {
+		t.Errorf("%v.findByID(%v) = %v, want %v", s, 7, got, c2)
+	}
+}

+ 321 - 0
vendor/github.com/golang/geo/s2/stuv_test.go

@@ -0,0 +1,321 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+	"math"
+	"testing"
+
+	"github.com/golang/geo/r3"
+)
+
+func TestSTUV(t *testing.T) {
+	if x := stToUV(uvToST(.125)); x != .125 {
+		t.Error("stToUV(uvToST(.125) == ", x)
+	}
+	if x := uvToST(stToUV(.125)); x != .125 {
+		t.Error("uvToST(stToUV(.125) == ", x)
+	}
+}
+
+func TestUVNorms(t *testing.T) {
+	step := 1 / 1024.0
+	for face := 0; face < 6; face++ {
+		for x := -1.0; x <= 1; x += step {
+			if !float64Eq(float64(faceUVToXYZ(face, x, -1).Cross(faceUVToXYZ(face, x, 1)).Angle(uNorm(face, x))), 0.0) {
+				t.Errorf("UNorm not orthogonal to the face(%d)", face)
+			}
+			if !float64Eq(float64(faceUVToXYZ(face, -1, x).Cross(faceUVToXYZ(face, 1, x)).Angle(vNorm(face, x))), 0.0) {
+				t.Errorf("VNorm not orthogonal to the face(%d)", face)
+			}
+		}
+	}
+}
+
+func TestFaceUVToXYZ(t *testing.T) {
+	// Check that each face appears exactly once.
+	var sum r3.Vector
+	for face := 0; face < 6; face++ {
+		center := faceUVToXYZ(face, 0, 0)
+		if !center.ApproxEqual(unitNorm(face).Vector) {
+			t.Errorf("faceUVToXYZ(%d, 0, 0) != unitNorm(%d), should be equal", face, face)
+		}
+		switch center.LargestComponent() {
+		case r3.XAxis:
+			if math.Abs(center.X) != 1 {
+				t.Errorf("%v.X = %v, want %v", center, math.Abs(center.X), 1)
+			}
+		case r3.YAxis:
+			if math.Abs(center.Y) != 1 {
+				t.Errorf("%v.Y = %v, want %v", center, math.Abs(center.Y), 1)
+			}
+		default:
+			if math.Abs(center.Z) != 1 {
+				t.Errorf("%v.Z = %v, want %v", center, math.Abs(center.Z), 1)
+			}
+		}
+		sum = sum.Add(center.Abs())
+
+		// Check that each face has a right-handed coordinate system.
+		if got := uAxis(face).Vector.Cross(vAxis(face).Vector).Dot(unitNorm(face).Vector); got != 1 {
+			t.Errorf("right-handed check failed. uAxis(%d).Cross(vAxis(%d)).Dot(unitNorm%v) = %d, want 1", face, face, face, got)
+		}
+
+		// Check that the Hilbert curves on each face combine to form a
+		// continuous curve over the entire cube.
+		// The Hilbert curve on each face starts at (-1,-1) and terminates
+		// at either (1,-1) (if axes not swapped) or (-1,1) (if swapped).
+		var sign float64 = 1
+		if face&swapMask == 1 {
+			sign = -1
+		}
+		if faceUVToXYZ(face, sign, -sign) != faceUVToXYZ((face+1)%6, -1, -1) {
+			t.Errorf("faceUVToXYZ(%v, %v, %v) != faceUVToXYZ(%v, -1, -1)", face, sign, -sign, (face+1)%6)
+		}
+	}
+
+	// Adding up the absolute value all all the face normals should equal 2 on each axis.
+	if !sum.ApproxEqual(r3.Vector{2, 2, 2}) {
+		t.Errorf("sum of the abs of the 6 face norms should = %v, got %v", r3.Vector{2, 2, 2}, sum)
+	}
+}
+
+func TestFaceXYZToUV(t *testing.T) {
+	var (
+		point    = Point{r3.Vector{1.1, 1.2, 1.3}}
+		pointNeg = Point{r3.Vector{-1.1, -1.2, -1.3}}
+	)
+
+	tests := []struct {
+		face  int
+		point Point
+		u     float64
+		v     float64
+		ok    bool
+	}{
+		{0, point, 1 + (1.0 / 11), 1 + (2.0 / 11), true},
+		{0, pointNeg, 0, 0, false},
+		{1, point, -11.0 / 12, 1 + (1.0 / 12), true},
+		{1, pointNeg, 0, 0, false},
+		{2, point, -11.0 / 13, -12.0 / 13, true},
+		{2, pointNeg, 0, 0, false},
+		{3, point, 0, 0, false},
+		{3, pointNeg, 1 + (2.0 / 11), 1 + (1.0 / 11), true},
+		{4, point, 0, 0, false},
+		{4, pointNeg, 1 + (1.0 / 12), -(11.0 / 12), true},
+		{5, point, 0, 0, false},
+		{5, pointNeg, -12.0 / 13, -11.0 / 13, true},
+	}
+
+	for _, test := range tests {
+		if u, v, ok := faceXYZToUV(test.face, test.point); !float64Eq(u, test.u) || !float64Eq(v, test.v) || ok != test.ok {
+			t.Errorf("faceXYZToUV(%d, %v) = %f, %f, %t, want %f, %f, %t", test.face, test.point, u, v, ok, test.u, test.v, test.ok)
+		}
+	}
+}
+
+func TestFaceXYZtoUVW(t *testing.T) {
+	var (
+		origin = Point{r3.Vector{0, 0, 0}}
+		posX   = Point{r3.Vector{1, 0, 0}}
+		negX   = Point{r3.Vector{-1, 0, 0}}
+		posY   = Point{r3.Vector{0, 1, 0}}
+		negY   = Point{r3.Vector{0, -1, 0}}
+		posZ   = Point{r3.Vector{0, 0, 1}}
+		negZ   = Point{r3.Vector{0, 0, -1}}
+	)
+
+	for face := 0; face < 6; face++ {
+		if got := faceXYZtoUVW(face, origin); got != origin {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, origin, got, origin)
+		}
+
+		if got := faceXYZtoUVW(face, uAxis(face)); got != posX {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, uAxis(face), got, posX)
+		}
+
+		if got := faceXYZtoUVW(face, Point{uAxis(face).Mul(-1)}); got != negX {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, uAxis(face).Mul(-1), got, negX)
+		}
+
+		if got := faceXYZtoUVW(face, vAxis(face)); got != posY {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, vAxis(face), got, posY)
+		}
+
+		if got := faceXYZtoUVW(face, Point{vAxis(face).Mul(-1)}); got != negY {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, vAxis(face).Mul(-1), got, negY)
+		}
+
+		if got := faceXYZtoUVW(face, unitNorm(face)); got != posZ {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, unitNorm(face), got, posZ)
+		}
+
+		if got := faceXYZtoUVW(face, Point{unitNorm(face).Mul(-1)}); got != negZ {
+			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, unitNorm(face).Mul(-1), got, negZ)
+		}
+	}
+}
+
+func TestUVWAxis(t *testing.T) {
+	for face := 0; face < 6; face++ {
+		// Check that the axes are consistent with faceUVtoXYZ.
+		if faceUVToXYZ(face, 1, 0).Sub(faceUVToXYZ(face, 0, 0)) != uAxis(face).Vector {
+			t.Errorf("face 1,0 - face 0,0 should equal uAxis")
+		}
+		if faceUVToXYZ(face, 0, 1).Sub(faceUVToXYZ(face, 0, 0)) != vAxis(face).Vector {
+			t.Errorf("faceUVToXYZ(%d, 0, 1).Sub(faceUVToXYZ(%d, 0, 0)) != vAxis(%d), should be equal.", face, face, face)
+		}
+		if faceUVToXYZ(face, 0, 0) != unitNorm(face).Vector {
+			t.Errorf("faceUVToXYZ(%d, 0, 0) != unitNorm(%d), should be equal", face, face)
+		}
+
+		// Check that every face coordinate frame is right-handed.
+		if got := uAxis(face).Vector.Cross(vAxis(face).Vector).Dot(unitNorm(face).Vector); got != 1 {
+			t.Errorf("right-handed check failed. got %d, want 1", got)
+		}
+
+		// Check that GetUVWAxis is consistent with GetUAxis, GetVAxis, GetNorm.
+		if uAxis(face) != uvwAxis(face, 0) {
+			t.Errorf("uAxis(%d) != uvwAxis(%d, 0), should be equal", face, face)
+		}
+		if vAxis(face) != uvwAxis(face, 1) {
+			t.Errorf("vAxis(%d) != uvwAxis(%d, 1), should be equal", face, face)
+		}
+		if unitNorm(face) != uvwAxis(face, 2) {
+			t.Errorf("unitNorm(%d) != uvwAxis(%d, 2), should be equal", face, face)
+		}
+	}
+}
+
+func TestSiTiSTRoundtrip(t *testing.T) {
+	// test int -> float -> int direction.
+	for i := 0; i < 1000; i++ {
+		si := uint64(randomUniformInt(maxSiTi))
+		if got := stToSiTi(siTiToST(si)); got != si {
+			t.Errorf("stToSiTi(siTiToST(%v)) = %v, want %v", si, got, si)
+		}
+	}
+	// test float -> int -> float direction.
+	for i := 0; i < 1000; i++ {
+		st := randomUniformFloat64(0, 1.0)
+		// this uses near not exact because there is some loss in precision
+		// when scaling down to the nearest 1/maxLevel and back.
+		if got := siTiToST(stToSiTi(st)); !float64Near(got, st, 1e-8) {
+			t.Errorf("siTiToST(stToSiTi(%v)) = %v, want %v", st, got, st)
+		}
+	}
+}
+
+func TestUVWFace(t *testing.T) {
+	// Check that uvwFace is consistent with uvwAxis.
+	for f := 0; f < 6; f++ {
+		for axis := 0; axis < 3; axis++ {
+			if got, want := face(uvwAxis(f, axis).Mul(-1)), uvwFace(f, axis, 0); got != want {
+				t.Errorf("face(%v) in positive direction = %v, want %v", uvwAxis(f, axis).Mul(-1), got, want)
+			}
+			if got, want := face(uvwAxis(f, axis).Vector), uvwFace(f, axis, 1); got != want {
+				t.Errorf("face(%v) in negative direction = %v, want %v", uvwAxis(f, axis), got, want)
+			}
+		}
+	}
+}
+
+func TestXYZToFaceSiTi(t *testing.T) {
+	for level := 0; level < maxLevel; level++ {
+		for i := 0; i < 1000; i++ {
+			ci := randomCellIDForLevel(level)
+			f, si, ti, gotLevel := xyzToFaceSiTi(ci.Point())
+			if gotLevel != level {
+				t.Errorf("level of CellID %v = %v, want %v", ci, gotLevel, level)
+			}
+			gotID := cellIDFromFaceIJ(f, int(si/2), int(ti/2)).Parent(level)
+			if gotID != ci {
+				t.Errorf("CellID = %b, want %b", gotID, ci)
+			}
+
+			// Test a point near the cell center but not equal to it.
+			pMoved := ci.Point().Add(r3.Vector{1e-13, 1e-13, 1e-13})
+			fMoved, siMoved, tiMoved, gotLevel := xyzToFaceSiTi(Point{pMoved})
+
+			if gotLevel != -1 {
+				t.Errorf("level of %v = %v, want %v", pMoved, gotLevel, -1)
+			}
+
+			if f != fMoved {
+				t.Errorf("face of %v = %v, want %v", pMoved, fMoved, f)
+			}
+
+			if si != siMoved {
+				t.Errorf("si of %v = %v, want %v", pMoved, siMoved, si)
+			}
+
+			if ti != tiMoved {
+				t.Errorf("ti of %v = %v, want %v", pMoved, tiMoved, ti)
+			}
+
+			// Finally, test some random (si,ti) values that may be at different
+			// levels, or not at a valid level at all (for example, si == 0).
+			faceRandom := randomUniformInt(numFaces)
+			mask := -1 << uint64(maxLevel-level)
+			siRandom := uint64(randomUint32() & uint32(mask))
+			tiRandom := uint64(randomUint32() & uint32(mask))
+			for siRandom > maxSiTi || tiRandom > maxSiTi {
+				siRandom = uint64(randomUint32() & uint32(mask))
+				tiRandom = uint64(randomUint32() & uint32(mask))
+			}
+
+			pRandom := faceSiTiToXYZ(faceRandom, siRandom, tiRandom)
+			f, si, ti, gotLevel = xyzToFaceSiTi(pRandom)
+
+			// The chosen point is on the edge of a top-level face cell.
+			if f != faceRandom {
+				if gotLevel != -1 {
+					t.Errorf("level of random CellID = %v, want %v", gotLevel, -1)
+				}
+				if !(si == 0 || si == maxSiTi || ti == 0 || ti == maxSiTi) {
+					t.Errorf("face %d, si = %v, ti = %v, want 0 or %v for both", f, si, ti, maxSiTi)
+				}
+				continue
+			}
+
+			if siRandom != si {
+				t.Errorf("xyzToFaceSiTi(%v).si = %v, want %v", pRandom, siRandom, si)
+			}
+			if tiRandom != ti {
+				t.Errorf("xyzToFaceSiTi(%v).ti = %v, want %v", pRandom, tiRandom, ti)
+			}
+			if gotLevel >= 0 {
+				if got := cellIDFromFaceIJ(f, int(si/2), int(ti/2)).Parent(gotLevel).Point(); !pRandom.ApproxEqual(got) {
+					t.Errorf("cellIDFromFaceIJ(%d, %d, %d).Parent(%d) = %v, want %v", f, si/2, ti/2, gotLevel, got, pRandom)
+				}
+			}
+		}
+	}
+}
+
+func TestXYZFaceSiTiRoundtrip(t *testing.T) {
+	for level := 0; level < maxLevel; level++ {
+		for i := 0; i < 1000; i++ {
+			ci := randomCellIDForLevel(level)
+			f, si, ti, _ := xyzToFaceSiTi(ci.Point())
+			op := faceSiTiToXYZ(f, si, ti)
+			if !ci.Point().ApproxEqual(op) {
+				t.Errorf("faceSiTiToXYZ(xyzToFaceSiTi(%v)) = %v, want %v", ci.Point(), op, ci.Point())
+			}
+		}
+	}
+}

+ 3 - 0
vendor/github.com/hpcloud/tail/.gitignore

@@ -0,0 +1,3 @@
+.test
+.go
+

+ 18 - 0
vendor/github.com/hpcloud/tail/.travis.yml

@@ -0,0 +1,18 @@
+language: go
+
+script:
+  - go test -race -v ./...
+
+go:
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+install:
+  - go get gopkg.in/fsnotify.v1
+  - go get gopkg.in/tomb.v1

+ 63 - 0
vendor/github.com/hpcloud/tail/CHANGES.md

@@ -0,0 +1,63 @@
+# API v1 (gopkg.in/hpcloud/tail.v1)
+
+## April, 2016
+
+* Migrated to godep, as depman is not longer supported
+* Introduced golang vendoring feature
+* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file 
+
+## July, 2015
+
+* Fix inotify watcher leak; remove `Cleanup` (#51)
+
+# API v0 (gopkg.in/hpcloud/tail.v0)
+
+## June, 2015
+
+* Don't return partial lines (PR #40)
+* Use stable version of fsnotify (#46)
+
+## July, 2014
+
+* Fix tail for Windows (PR #36)
+
+## May, 2014
+
+* Improved rate limiting using leaky bucket (PR #29)
+* Fix odd line splitting (PR #30)
+
+## Apr, 2014
+
+* LimitRate now discards read buffer (PR #28)
+* allow reading of longer lines if MaxLineSize is unset (PR #24)
+* updated deps.json to latest fsnotify (441bbc86b1)
+
+## Feb, 2014
+
+* added `Config.Logger` to suppress library logging
+
+## Nov, 2013
+
+* add Cleanup to remove leaky inotify watches (PR #20)
+
+## Aug, 2013
+
+* redesigned Location field (PR #12)
+* add tail.Tell (PR #14)
+
+## July, 2013
+
+* Rate limiting (PR #10)
+
+## May, 2013
+
+* Detect file deletions/renames in polling file watcher (PR #1)
+* Detect file truncation
+* Fix potential race condition when reopening the file (issue 5)
+* Fix potential blocking of `tail.Stop` (issue 4)
+* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
+* Support Follow=false
+
+## Feb, 2013
+
+* Initial open source release

+ 19 - 0
vendor/github.com/hpcloud/tail/Dockerfile

@@ -0,0 +1,19 @@
+FROM golang
+
+RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
+ADD . $GOPATH/src/github.com/hpcloud/tail/
+
+# expecting to fetch dependencies successfully.
+RUN go get -v github.com/hpcloud/tail
+
+# expecting to run the test successfully.
+RUN go test -v github.com/hpcloud/tail
+
+# expecting to install successfully
+RUN go install -v github.com/hpcloud/tail
+RUN go install -v github.com/hpcloud/tail/cmd/gotail
+
+RUN $GOPATH/bin/gotail -h || true
+
+ENV PATH $GOPATH/bin:$PATH
+CMD ["gotail"]

+ 15 - 0
vendor/github.com/hpcloud/tail/Godeps/Godeps.json

@@ -0,0 +1,15 @@
+{
+	"ImportPath": "github.com/hpcloud/tail",
+	"GoVersion": "go1.5.1",
+	"Deps": [
+		{
+			"ImportPath": "gopkg.in/fsnotify.v1",
+			"Comment": "v1.2.1",
+			"Rev": "7be54206639f256967dd82fa767397ba5f8f48f5"
+		},
+		{
+			"ImportPath": "gopkg.in/tomb.v1",
+			"Rev": "c131134a1947e9afd9cecfe11f4c6dff0732ae58"
+		}
+	]
+}

+ 0 - 0
Godeps/Readme → vendor/github.com/hpcloud/tail/Godeps/Readme


+ 3 - 5
vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md → vendor/github.com/hpcloud/tail/LICENSE.txt

@@ -1,8 +1,7 @@
-objx - by Mat Ryer and Tyler Bunnell
+# The MIT License (MIT)
 
-The MIT License (MIT)
-
-Copyright (c) 2014 Stretchr, Inc.
+# © Copyright 2015 Hewlett Packard Enterprise Development LP
+Copyright (c) 2014 ActiveState
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -10,7 +9,6 @@ in the Software without restriction, including without limitation the rights
 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 copies of the Software, and to permit persons to whom the Software is
 furnished to do so, subject to the following conditions:
-
 The above copyright notice and this permission notice shall be included in all
 copies or substantial portions of the Software.
 

+ 11 - 0
vendor/github.com/hpcloud/tail/Makefile

@@ -0,0 +1,11 @@
+default:	test
+
+test:	*.go
+	go test -v -race ./...
+
+fmt:
+	gofmt -w .
+
+# Run the test in an isolated environment.
+fulltest:
+	docker build -t hpcloud/tail .

+ 28 - 0
vendor/github.com/hpcloud/tail/README.md

@@ -0,0 +1,28 @@
+[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
+[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) 
+
+# Go package for tail-ing files
+
+A Go package striving to emulate the features of the BSD `tail` program. 
+
+```Go
+t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
+for line := range t.Lines {
+    fmt.Println(line.Text)
+}
+```
+
+See [API documentation](http://godoc.org/github.com/hpcloud/tail).
+
+## Log rotation
+
+Tail comes with full support for truncation/move detection as it is
+designed to work with log rotation tools.
+
+## Installing
+
+    go get github.com/hpcloud/tail/...
+
+## Windows support
+
+This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.

+ 11 - 0
vendor/github.com/hpcloud/tail/appveyor.yml

@@ -0,0 +1,11 @@
+version: 0.{build}
+skip_tags: true
+cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
+build_script:
+- SET GOPATH=c:\workspace
+- go test -v -race ./...
+test: off
+clone_folder: c:\workspace\src\github.com\hpcloud\tail
+branches:
+  only:
+  - master

+ 1 - 0
vendor/github.com/hpcloud/tail/cmd/gotail/.gitignore

@@ -0,0 +1 @@
+gotail

+ 4 - 0
vendor/github.com/hpcloud/tail/cmd/gotail/Makefile

@@ -0,0 +1,4 @@
+default:	gotail
+
+gotail:	*.go ../../*.go
+	go build

+ 66 - 0
vendor/github.com/hpcloud/tail/cmd/gotail/gotail.go

@@ -0,0 +1,66 @@
+// Copyright (c) 2015 HPE Software Inc. All rights reserved.
+// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+
+	"github.com/hpcloud/tail"
+)
+
+func args2config() (tail.Config, int64) {
+	config := tail.Config{Follow: true}
+	n := int64(0)
+	maxlinesize := int(0)
+	flag.Int64Var(&n, "n", 0, "tail from the last Nth location")
+	flag.IntVar(&maxlinesize, "max", 0, "max line size")
+	flag.BoolVar(&config.Follow, "f", false, "wait for additional data to be appended to the file")
+	flag.BoolVar(&config.ReOpen, "F", false, "follow, and track file rename/rotation")
+	flag.BoolVar(&config.Poll, "p", false, "use polling, instead of inotify")
+	flag.Parse()
+	if config.ReOpen {
+		config.Follow = true
+	}
+	config.MaxLineSize = maxlinesize
+	return config, n
+}
+
+func main() {
+	config, n := args2config()
+	if flag.NFlag() < 1 {
+		fmt.Println("need one or more files as arguments")
+		os.Exit(1)
+	}
+
+	if n != 0 {
+		config.Location = &tail.SeekInfo{-n, os.SEEK_END}
+	}
+
+	done := make(chan bool)
+	for _, filename := range flag.Args() {
+		go tailFile(filename, config, done)
+	}
+
+	for _, _ = range flag.Args() {
+		<-done
+	}
+}
+
+func tailFile(filename string, config tail.Config, done chan bool) {
+	defer func() { done <- true }()
+	t, err := tail.TailFile(filename, config)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+	for line := range t.Lines {
+		fmt.Println(line.Text)
+	}
+	err = t.Wait()
+	if err != nil {
+		fmt.Println(err)
+	}
+}

+ 7 - 0
vendor/github.com/hpcloud/tail/ratelimiter/Licence

@@ -0,0 +1,7 @@
+Copyright (C) 2013 99designs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 97 - 0
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go

@@ -0,0 +1,97 @@
+// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
+package ratelimiter
+
+import (
+	"time"
+)
+
+type LeakyBucket struct {
+	Size         uint16
+	Fill         float64
+	LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+	Lastupdate   time.Time
+	Now          func() time.Time
+}
+
+func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
+	bucket := LeakyBucket{
+		Size:         size,
+		Fill:         0,
+		LeakInterval: leakInterval,
+		Now:          time.Now,
+		Lastupdate:   time.Now(),
+	}
+
+	return &bucket
+}
+
+func (b *LeakyBucket) updateFill() {
+	now := b.Now()
+	if b.Fill > 0 {
+		elapsed := now.Sub(b.Lastupdate)
+
+		b.Fill -= float64(elapsed) / float64(b.LeakInterval)
+		if b.Fill < 0 {
+			b.Fill = 0
+		}
+	}
+	b.Lastupdate = now
+}
+
+func (b *LeakyBucket) Pour(amount uint16) bool {
+	b.updateFill()
+
+	var newfill float64 = b.Fill + float64(amount)
+
+	if newfill > float64(b.Size) {
+		return false
+	}
+
+	b.Fill = newfill
+
+	return true
+}
+
+// The time at which this bucket will be completely drained
+func (b *LeakyBucket) DrainedAt() time.Time {
+	return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
+}
+
+// The duration until this bucket is completely drained
+func (b *LeakyBucket) TimeToDrain() time.Duration {
+	return b.DrainedAt().Sub(b.Now())
+}
+
+func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
+	return b.Now().Sub(b.Lastupdate)
+}
+
+type LeakyBucketSer struct {
+	Size         uint16
+	Fill         float64
+	LeakInterval time.Duration // time.Duration for 1 unit of size to leak
+	Lastupdate   time.Time
+}
+
+func (b *LeakyBucket) Serialise() *LeakyBucketSer {
+	bucket := LeakyBucketSer{
+		Size:         b.Size,
+		Fill:         b.Fill,
+		LeakInterval: b.LeakInterval,
+		Lastupdate:   b.Lastupdate,
+	}
+
+	return &bucket
+}
+
+func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
+	bucket := LeakyBucket{
+		Size:         b.Size,
+		Fill:         b.Fill,
+		LeakInterval: b.LeakInterval,
+		Lastupdate:   b.Lastupdate,
+		Now:          time.Now,
+	}
+
+	return &bucket
+}

+ 73 - 0
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket_test.go

@@ -0,0 +1,73 @@
+package ratelimiter
+
+import (
+	"testing"
+	"time"
+)
+
+func TestPour(t *testing.T) {
+	bucket := NewLeakyBucket(60, time.Second)
+	bucket.Lastupdate = time.Unix(0, 0)
+
+	bucket.Now = func() time.Time { return time.Unix(1, 0) }
+
+	if bucket.Pour(61) {
+		t.Error("Expected false")
+	}
+
+	if !bucket.Pour(10) {
+		t.Error("Expected true")
+	}
+
+	if !bucket.Pour(49) {
+		t.Error("Expected true")
+	}
+
+	if bucket.Pour(2) {
+		t.Error("Expected false")
+	}
+
+	bucket.Now = func() time.Time { return time.Unix(61, 0) }
+	if !bucket.Pour(60) {
+		t.Error("Expected true")
+	}
+
+	if bucket.Pour(1) {
+		t.Error("Expected false")
+	}
+
+	bucket.Now = func() time.Time { return time.Unix(70, 0) }
+
+	if !bucket.Pour(1) {
+		t.Error("Expected true")
+	}
+
+}
+
+func TestTimeSinceLastUpdate(t *testing.T) {
+	bucket := NewLeakyBucket(60, time.Second)
+	bucket.Now = func() time.Time { return time.Unix(1, 0) }
+	bucket.Pour(1)
+	bucket.Now = func() time.Time { return time.Unix(2, 0) }
+
+	sinceLast := bucket.TimeSinceLastUpdate()
+	if sinceLast != time.Second*1 {
+		t.Errorf("Expected time since last update to be less than 1 second, got %d", sinceLast)
+	}
+}
+
+func TestTimeToDrain(t *testing.T) {
+	bucket := NewLeakyBucket(60, time.Second)
+	bucket.Now = func() time.Time { return time.Unix(1, 0) }
+	bucket.Pour(10)
+
+	if bucket.TimeToDrain() != time.Second*10 {
+		t.Error("Time to drain should be 10 seconds")
+	}
+
+	bucket.Now = func() time.Time { return time.Unix(2, 0) }
+
+	if bucket.TimeToDrain() != time.Second*9 {
+		t.Error("Time to drain should be 9 seconds")
+	}
+}

+ 58 - 0
vendor/github.com/hpcloud/tail/ratelimiter/memory.go

@@ -0,0 +1,58 @@
+package ratelimiter
+
+import (
+	"errors"
+	"time"
+)
+
+const GC_SIZE int = 100
+
+type Memory struct {
+	store           map[string]LeakyBucket
+	lastGCCollected time.Time
+}
+
+func NewMemory() *Memory {
+	m := new(Memory)
+	m.store = make(map[string]LeakyBucket)
+	m.lastGCCollected = time.Now()
+	return m
+}
+
+func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
+
+	bucket, ok := m.store[key]
+	if !ok {
+		return nil, errors.New("miss")
+	}
+
+	return &bucket, nil
+}
+
+func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
+
+	if len(m.store) > GC_SIZE {
+		m.GarbageCollect()
+	}
+
+	m.store[key] = bucket
+
+	return nil
+}
+
+func (m *Memory) GarbageCollect() {
+	now := time.Now()
+
+	// rate limit GC to once per minute
+	if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
+
+		for key, bucket := range m.store {
+			// if the bucket is drained, then GC
+			if bucket.DrainedAt().Unix() > now.Unix() {
+				delete(m.store, key)
+			}
+		}
+
+		m.lastGCCollected = now
+	}
+}

+ 6 - 0
vendor/github.com/hpcloud/tail/ratelimiter/storage.go

@@ -0,0 +1,6 @@
+package ratelimiter
+
+type Storage interface {
+	GetBucketFor(string) (*LeakyBucket, error)
+	SetBucketFor(string, LeakyBucket) error
+}

Some files were not shown because too many files changed in this diff