Browse Source

Update dependencies

Ask Bjørn Hansen 7 years ago
parent
commit
d281ecbade
100 changed files with 9647 additions and 18172 deletions
  1. 33 24
      Gopkg.lock
  2. 6 76
      Gopkg.toml
  3. 0 37
      vendor/github.com/abh/errorutil/README.md
  4. 0 22
      vendor/github.com/davecgh/go-spew/.gitignore
  5. 0 14
      vendor/github.com/davecgh/go-spew/.travis.yml
  6. 0 205
      vendor/github.com/davecgh/go-spew/README.md
  7. 0 22
      vendor/github.com/davecgh/go-spew/cov_report.sh
  8. 0 298
      vendor/github.com/davecgh/go-spew/spew/common_test.go
  9. 0 1042
      vendor/github.com/davecgh/go-spew/spew/dump_test.go
  10. 0 99
      vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
  11. 0 26
      vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
  12. 0 226
      vendor/github.com/davecgh/go-spew/spew/example_test.go
  13. 0 1558
      vendor/github.com/davecgh/go-spew/spew/format_test.go
  14. 0 87
      vendor/github.com/davecgh/go-spew/spew/internal_test.go
  15. 0 102
      vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
  16. 0 320
      vendor/github.com/davecgh/go-spew/spew/spew_test.go
  17. 0 82
      vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
  18. 0 61
      vendor/github.com/davecgh/go-spew/test_coverage.txt
  19. 0 151
      vendor/github.com/golang/geo/README.md
  20. 13 15
      vendor/github.com/golang/geo/r1/doc.go
  21. 13 15
      vendor/github.com/golang/geo/r1/interval.go
  22. 0 349
      vendor/github.com/golang/geo/r1/interval_test.go
  23. 13 15
      vendor/github.com/golang/geo/r2/doc.go
  24. 15 17
      vendor/github.com/golang/geo/r2/rect.go
  25. 0 476
      vendor/github.com/golang/geo/r2/rect_test.go
  26. 13 15
      vendor/github.com/golang/geo/r3/doc.go
  27. 17 19
      vendor/github.com/golang/geo/r3/precisevector.go
  28. 0 477
      vendor/github.com/golang/geo/r3/precisevector_test.go
  29. 13 15
      vendor/github.com/golang/geo/r3/vector.go
  30. 0 339
      vendor/github.com/golang/geo/r3/vector_test.go
  31. 13 15
      vendor/github.com/golang/geo/s1/angle.go
  32. 0 169
      vendor/github.com/golang/geo/s1/angle_test.go
  33. 60 24
      vendor/github.com/golang/geo/s1/chordangle.go
  34. 0 226
      vendor/github.com/golang/geo/s1/chordangle_test.go
  35. 13 15
      vendor/github.com/golang/geo/s1/doc.go
  36. 13 15
      vendor/github.com/golang/geo/s1/interval.go
  37. 0 457
      vendor/github.com/golang/geo/s1/interval_test.go
  38. 53 0
      vendor/github.com/golang/geo/s2/bits_go18.go
  39. 39 0
      vendor/github.com/golang/geo/s2/bits_go19.go
  40. 66 15
      vendor/github.com/golang/geo/s2/cap.go
  41. 0 718
      vendor/github.com/golang/geo/s2/cap_test.go
  42. 235 25
      vendor/github.com/golang/geo/s2/cell.go
  43. 0 522
      vendor/github.com/golang/geo/s2/cell_test.go
  44. 103 71
      vendor/github.com/golang/geo/s2/cellid.go
  45. 0 1052
      vendor/github.com/golang/geo/s2/cellid_test.go
  46. 405 51
      vendor/github.com/golang/geo/s2/cellunion.go
  47. 0 723
      vendor/github.com/golang/geo/s2/cellunion_test.go
  48. 63 0
      vendor/github.com/golang/geo/s2/contains_vertex_query.go
  49. 410 0
      vendor/github.com/golang/geo/s2/crossing_edge_query.go
  50. 13 15
      vendor/github.com/golang/geo/s2/doc.go
  51. 672 0
      vendor/github.com/golang/geo/s2/edge_clipping.go
  52. 227 0
      vendor/github.com/golang/geo/s2/edge_crosser.go
  53. 394 0
      vendor/github.com/golang/geo/s2/edge_crossings.go
  54. 318 0
      vendor/github.com/golang/geo/s2/edge_distances.go
  55. 0 1293
      vendor/github.com/golang/geo/s2/edgeutil.go
  56. 0 1201
      vendor/github.com/golang/geo/s2/edgeutil_test.go
  57. 237 0
      vendor/github.com/golang/geo/s2/encode.go
  58. 143 0
      vendor/github.com/golang/geo/s2/interleave.go
  59. 13 15
      vendor/github.com/golang/geo/s2/latlng.go
  60. 0 155
      vendor/github.com/golang/geo/s2/latlng_test.go
  61. 1556 49
      vendor/github.com/golang/geo/s2/loop.go
  62. 0 533
      vendor/github.com/golang/geo/s2/loop_test.go
  63. 13 15
      vendor/github.com/golang/geo/s2/matrix3x3.go
  64. 0 494
      vendor/github.com/golang/geo/s2/matrix3x3_test.go
  65. 13 15
      vendor/github.com/golang/geo/s2/metric.go
  66. 0 109
      vendor/github.com/golang/geo/s2/metric_test.go
  67. 88 0
      vendor/github.com/golang/geo/s2/nthderivative.go
  68. 21 23
      vendor/github.com/golang/geo/s2/paddedcell.go
  69. 0 197
      vendor/github.com/golang/geo/s2/paddedcell_test.go
  70. 157 25
      vendor/github.com/golang/geo/s2/point.go
  71. 0 384
      vendor/github.com/golang/geo/s2/point_test.go
  72. 319 0
      vendor/github.com/golang/geo/s2/pointcompression.go
  73. 794 97
      vendor/github.com/golang/geo/s2/polygon.go
  74. 0 342
      vendor/github.com/golang/geo/s2/polygon_test.go
  75. 230 37
      vendor/github.com/golang/geo/s2/polyline.go
  76. 0 144
      vendor/github.com/golang/geo/s2/polyline_test.go
  77. 227 25
      vendor/github.com/golang/geo/s2/predicates.go
  78. 0 314
      vendor/github.com/golang/geo/s2/predicates_test.go
  79. 55 17
      vendor/github.com/golang/geo/s2/rect.go
  80. 352 0
      vendor/github.com/golang/geo/s2/rect_bounder.go
  81. 0 862
      vendor/github.com/golang/geo/s2/rect_test.go
  82. 37 17
      vendor/github.com/golang/geo/s2/region.go
  83. 23 57
      vendor/github.com/golang/geo/s2/regioncoverer.go
  84. 0 151
      vendor/github.com/golang/geo/s2/regioncoverer_test.go
  85. 0 414
      vendor/github.com/golang/geo/s2/s2_test.go
  86. 0 196
      vendor/github.com/golang/geo/s2/s2_test_test.go
  87. 194 0
      vendor/github.com/golang/geo/s2/shape.go
  88. 1338 108
      vendor/github.com/golang/geo/s2/shapeindex.go
  89. 0 84
      vendor/github.com/golang/geo/s2/shapeindex_test.go
  90. 227 0
      vendor/github.com/golang/geo/s2/shapeutil.go
  91. 155 38
      vendor/github.com/golang/geo/s2/stuv.go
  92. 0 321
      vendor/github.com/golang/geo/s2/stuv_test.go
  93. 125 0
      vendor/github.com/golang/geo/s2/util.go
  94. 97 0
      vendor/github.com/golang/geo/s2/wedge_relations.go
  95. 0 3
      vendor/github.com/hpcloud/tail/.gitignore
  96. 0 18
      vendor/github.com/hpcloud/tail/.travis.yml
  97. 0 63
      vendor/github.com/hpcloud/tail/CHANGES.md
  98. 0 19
      vendor/github.com/hpcloud/tail/Dockerfile
  99. 0 15
      vendor/github.com/hpcloud/tail/Godeps/Godeps.json
  100. 0 5
      vendor/github.com/hpcloud/tail/Godeps/Readme

+ 33 - 24
Gopkg.lock

@@ -2,33 +2,34 @@
 
 
 
 
 [[projects]]
 [[projects]]
+  branch = "master"
   name = "github.com/abh/errorutil"
   name = "github.com/abh/errorutil"
   packages = ["."]
   packages = ["."]
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
   revision = "f9bd360d00b902548fbb80837aef90dca2c8285e"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "github.com/davecgh/go-spew"
   name = "github.com/davecgh/go-spew"
   packages = ["spew"]
   packages = ["spew"]
   revision = "346938d642f2ec3594ed81d874461961cd0faa76"
   revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+  version = "v1.1.0"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "github.com/golang/geo"
   name = "github.com/golang/geo"
   packages = ["r1","r2","r3","s1","s2"]
   packages = ["r1","r2","r3","s1","s2"]
-  revision = "5747e9816367bd031622778e3e538f9737814005"
+  revision = "a8523298cefedcf7b70bbbf4eeef24cbb3258376"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "github.com/hpcloud/tail"
   name = "github.com/hpcloud/tail"
   packages = [".","ratelimiter","util","watch","winfile"]
   packages = [".","ratelimiter","util","watch","winfile"]
-  revision = "faf842bde7ed83bbc3c65a2c454fae39bc29a95f"
+  revision = "37f4271387456dd1bf82ab1ad9229f060cc45386"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "github.com/influxdata/influxdb"
   name = "github.com/influxdata/influxdb"
   packages = ["client/v2","models","pkg/escape"]
   packages = ["client/v2","models","pkg/escape"]
-  revision = "38735b24f67ed462a310d490cf23c1b3953e53e8"
+  revision = "b228fc5f0d0aa523be7c39f0ee28187084a1e271"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
@@ -43,27 +44,28 @@
   revision = "7cafcd837844e784b526369c9bce262804aebc60"
   revision = "7cafcd837844e784b526369c9bce262804aebc60"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "github.com/miekg/dns"
   name = "github.com/miekg/dns"
   packages = ["."]
   packages = ["."]
-  revision = "f282f80e243cc2bf8f6410c30d821b93b794e168"
+  revision = "5364553f1ee9cddc7ac8b62dce148309c386695b"
+  version = "v1.0.4"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "github.com/oschwald/geoip2-golang"
   name = "github.com/oschwald/geoip2-golang"
   packages = ["."]
   packages = ["."]
-  revision = "5b1dc16861f81d05d9836bb21c2d0d65282fc0b8"
+  revision = "b1581f42de7092eb285fa413a1310f90c4f328fb"
+  version = "v1.2.0"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "github.com/oschwald/maxminddb-golang"
   name = "github.com/oschwald/maxminddb-golang"
   packages = ["."]
   packages = ["."]
-  revision = "d19f6d453e836d12ee8fe895d0494421e93ef8c1"
+  revision = "8727e98aa1b91610eb184ed1ab615943b8d9deb0"
+  version = "v1.2.1"
 
 
 [[projects]]
 [[projects]]
   name = "github.com/pborman/uuid"
   name = "github.com/pborman/uuid"
   packages = ["."]
   packages = ["."]
-  revision = "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
+  revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
+  version = "v1.1"
 
 
 [[projects]]
 [[projects]]
   name = "github.com/pmezard/go-difflib"
   name = "github.com/pmezard/go-difflib"
@@ -72,45 +74,52 @@
   version = "v1.0.0"
   version = "v1.0.0"
 
 
 [[projects]]
 [[projects]]
+  branch = "master"
   name = "github.com/rcrowley/go-metrics"
   name = "github.com/rcrowley/go-metrics"
   packages = ["."]
   packages = ["."]
-  revision = "eeba7bd0dd01ace6e690fa833b3f22aaec29af43"
+  revision = "8732c616f52954686704c8645fe1a9d59e9df7c1"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "github.com/stretchr/testify"
   name = "github.com/stretchr/testify"
   packages = ["assert","require"]
   packages = ["assert","require"]
-  revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
+  revision = "be8372ae8ec5c6daaed3cc28ebf73c54b737c240"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/crypto"
+  packages = ["ed25519","ed25519/internal/edwards25519"]
+  revision = "432090b8f568c018896cd8a0fb0345872bbac6ce"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "golang.org/x/net"
   name = "golang.org/x/net"
-  packages = ["websocket"]
-  revision = "fcc8ed8e87ee07a511396864dad3960b9632e44f"
+  packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6","websocket"]
+  revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb"
 
 
 [[projects]]
 [[projects]]
   branch = "master"
   branch = "master"
   name = "golang.org/x/sys"
   name = "golang.org/x/sys"
   packages = ["unix","windows"]
   packages = ["unix","windows"]
-  revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5"
+  revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "gopkg.in/fsnotify.v1"
   name = "gopkg.in/fsnotify.v1"
   packages = ["."]
   packages = ["."]
-  revision = "629574ca2a5df945712d3079857300b5e4da0236"
+  revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
+  version = "v1.4.7"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "gopkg.in/gcfg.v1"
   name = "gopkg.in/gcfg.v1"
   packages = [".","scanner","token","types"]
   packages = [".","scanner","token","types"]
-  revision = "27e4946190b4a327b539185f2b5b1f7c84730728"
+  revision = "298b7a6a3838f79debfaee8bd3bfb2b8d779e756"
+  version = "v1.2.1"
 
 
 [[projects]]
 [[projects]]
   branch = "v2.0"
   branch = "v2.0"
   name = "gopkg.in/natefinch/lumberjack.v2"
   name = "gopkg.in/natefinch/lumberjack.v2"
   packages = ["."]
   packages = ["."]
-  revision = "514cbda263a734ae8caac038dadf05f8f3f9f738"
+  revision = "aee4629129445bbdfb69aa565537dcfa16544311"
 
 
 [[projects]]
 [[projects]]
   branch = "v1"
   branch = "v1"
@@ -119,14 +128,14 @@
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
   revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
 
 
 [[projects]]
 [[projects]]
-  branch = "master"
   name = "gopkg.in/warnings.v0"
   name = "gopkg.in/warnings.v0"
   packages = ["."]
   packages = ["."]
-  revision = "8a331561fe74dadba6edfc59f3be66c22c3b065d"
+  revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
+  version = "v0.1.2"
 
 
 [solve-meta]
 [solve-meta]
   analyzer-name = "dep"
   analyzer-name = "dep"
   analyzer-version = 1
   analyzer-version = 1
-  inputs-digest = "da057b40b78dea546d93310e141024db552a5a898a38576f408bbddb7297a5ae"
+  inputs-digest = "f0c6f2e8ea3e595a7d2b5733fcff121dda3c3022a45d5dee81b2b30b54e5b260"
   solver-name = "gps-cdcl"
   solver-name = "gps-cdcl"
   solver-version = 1
   solver-version = 1

+ 6 - 76
Gopkg.toml

@@ -1,70 +1,7 @@
-
-## Gopkg.toml example (these lines may be deleted)
-
-## "metadata" defines metadata about the project that could be used by other independent
-## systems. The metadata defined here will be ignored by dep.
-# [metadata]
-# key1 = "value that convey data to other systems"
-# system1-data = "value that is used by a system"
-# system2-data = "value that is used by another system"
-
-## "required" lists a set of packages (not projects) that must be included in
-## Gopkg.lock. This list is merged with the set of packages imported by the current
-## project. Use it when your project needs a package it doesn't explicitly import -
-## including "main" packages.
-# required = ["github.com/user/thing/cmd/thing"]
-
-## "ignored" lists a set of packages (not projects) that are ignored when
-## dep statically analyzes source code. Ignored packages can be in this project,
-## or in a dependency.
-# ignored = ["github.com/user/project/badpkg"]
-
-## Constraints are rules for how directly imported projects
-## may be incorporated into the depgraph. They are respected by
-## dep whether coming from the Gopkg.toml of the current project or a dependency.
-# [[constraint]]
-## Required: the root import path of the project being constrained.
-# name = "github.com/user/project"
-#
-## Recommended: the version constraint to enforce for the project.
-## Only one of "branch", "version" or "revision" can be specified.
-# version = "1.0.0"
-# branch = "master"
-# revision = "abc123"
-#
-## Optional: an alternate location (URL or import path) for the project's source.
-# source = "https://github.com/myfork/package.git"
-#
-## "metadata" defines metadata about the dependency or override that could be used
-## by other independent systems. The metadata defined here will be ignored by dep.
-# [metadata]
-# key1 = "value that convey data to other systems"
-# system1-data = "value that is used by a system"
-# system2-data = "value that is used by another system"
-
-## Overrides have the same structure as [[constraint]], but supersede all
-## [[constraint]] declarations from all projects. Only [[override]] from
-## the current project's are applied.
-##
-## Overrides are a sledgehammer. Use them only as a last resort.
-# [[override]]
-## Required: the root import path of the project being constrained.
-# name = "github.com/user/project"
-#
-## Optional: specifying a version constraint override will cause all other
-## constraints on this project to be ignored; only the overridden constraint
-## need be satisfied.
-## Again, only one of "branch", "version" or "revision" can be specified.
-# version = "1.0.0"
-# branch = "master"
-# revision = "abc123"
-#
-## Optional: specifying an alternate source location as an override will
-## enforce that the alternate location is used for that project, regardless of
-## what source location any dependent projects specify.
-# source = "https://github.com/myfork/package.git"
-
-
+[prune]
+  non-go = true
+  go-tests = true
+  unused-packages = true
 
 
 [[constraint]]
 [[constraint]]
   branch = "master"
   branch = "master"
@@ -90,10 +27,6 @@
   branch = "master"
   branch = "master"
   name = "github.com/oschwald/geoip2-golang"
   name = "github.com/oschwald/geoip2-golang"
 
 
-[[constraint]]
-  name = "github.com/pmezard/go-difflib"
-  version = "1.0.0"
-
 [[constraint]]
 [[constraint]]
   branch = "master"
   branch = "master"
   name = "github.com/stretchr/testify"
   name = "github.com/stretchr/testify"
@@ -103,17 +36,14 @@
   name = "golang.org/x/net"
   name = "golang.org/x/net"
 
 
 [[constraint]]
 [[constraint]]
-  branch = "master"
+  version = "v1.4.7"
   name = "gopkg.in/fsnotify.v1"
   name = "gopkg.in/fsnotify.v1"
 
 
 [[constraint]]
 [[constraint]]
-  branch = "master"
+  version = "v1.2.1"
   name = "gopkg.in/gcfg.v1"
   name = "gopkg.in/gcfg.v1"
 
 
 [[constraint]]
 [[constraint]]
   branch = "v2.0"
   branch = "v2.0"
   name = "gopkg.in/natefinch/lumberjack.v2"
   name = "gopkg.in/natefinch/lumberjack.v2"
 
 
-[[constraint]]
-  branch = "v1"
-  name = "gopkg.in/tomb.v1"

+ 0 - 37
vendor/github.com/abh/errorutil/README.md

@@ -1,37 +0,0 @@
-errorutil
-=========
-
-Errorutil is a small go package to help show syntax errors in for example JSON documents.
-
-It was forked from [Camlistore](http://camlistore.org) to make a smaller dependency.
-
-
-Example
--------
-
-An example of how to use the package to show errors when decoding with
-[encoding/json](http://golang.org/pkg/encoding/json/).
-
-    if err = decoder.Decode(&objmap); err != nil {
-            extra := ""
-
-            // if it's a syntax error, add more information
-            if serr, ok := err.(*json.SyntaxError); ok {
-                    if _, serr := fh.Seek(0, os.SEEK_SET); serr != nil {
-                            log.Fatalf("seek error: %v", serr)
-                    }
-                    line, col, highlight := errorutil.HighlightBytePosition(fh, serr.Offset)
-                    extra = fmt.Sprintf(":\nError at line %d, column %d (file offset %d):\n%s",
-                            line, col, serr.Offset, highlight)
-            }
-
-            return nil, fmt.Errorf("error parsing JSON object in config file %s%s\n%v",
-                    fh.Name(), extra, err)
-    }
-
-
-License
--------
-
-This package is licesed under the Apache License, version 2.0. It was developed
-by Brad Fitzpatrick as part of the Camlistore project.

+ 0 - 22
vendor/github.com/davecgh/go-spew/.gitignore

@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe

+ 0 - 14
vendor/github.com/davecgh/go-spew/.travis.yml

@@ -1,14 +0,0 @@
-language: go
-go:
-    - 1.5.4
-    - 1.6.3
-    - 1.7
-install:
-    - go get -v golang.org/x/tools/cmd/cover
-script:
-    - go test -v -tags=safe ./spew
-    - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
-after_success:
-    - go get -v github.com/mattn/goveralls
-    - export PATH=$PATH:$HOME/gopath/bin
-    - goveralls -coverprofile=profile.cov -service=travis-ci

+ 0 - 205
vendor/github.com/davecgh/go-spew/README.md

@@ -1,205 +0,0 @@
-go-spew
-=======
-
-[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
-(https://travis-ci.org/davecgh/go-spew) [![ISC License]
-(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
-(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
-(https://coveralls.io/r/davecgh/go-spew?branch=master)
-
-
-Go-spew implements a deep pretty printer for Go data structures to aid in
-debugging.  A comprehensive suite of tests with 100% test coverage is provided
-to ensure proper functionality.  See `test_coverage.txt` for the gocov coverage
-report.  Go-spew is licensed under the liberal ISC license, so it may be used in
-open source or commercial projects.
-
-If you're interested in reading about how this package came to life and some
-of the challenges involved in providing a deep pretty printer, there is a blog
-post about it
-[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
-
-## Documentation
-
-[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
-(http://godoc.org/github.com/davecgh/go-spew/spew)
-
-Full `go doc` style documentation for the project can be viewed online without
-installing this package by using the excellent GoDoc site here:
-http://godoc.org/github.com/davecgh/go-spew/spew
-
-You can also view the documentation locally once the package is installed with
-the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
-http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
-
-## Installation
-
-```bash
-$ go get -u github.com/davecgh/go-spew/spew
-```
-
-## Quick Start
-
-Add this import line to the file you're working in:
-
-```Go
-import "github.com/davecgh/go-spew/spew"
-```
-
-To dump a variable with full newlines, indentation, type, and pointer
-information use Dump, Fdump, or Sdump:
-
-```Go
-spew.Dump(myVar1, myVar2, ...)
-spew.Fdump(someWriter, myVar1, myVar2, ...)
-str := spew.Sdump(myVar1, myVar2, ...)
-```
-
-Alternatively, if you would prefer to use format strings with a compacted inline
-printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
-compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
-and pointer addresses): 
-
-```Go
-spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-```
-
-## Debugging a Web Application Example
-
-Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
-
-```Go
-package main
-
-import (
-    "fmt"
-    "html"
-    "net/http"
-
-    "github.com/davecgh/go-spew/spew"
-)
-
-func handler(w http.ResponseWriter, r *http.Request) {
-    w.Header().Set("Content-Type", "text/html")
-    fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
-    fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
-}
-
-func main() {
-    http.HandleFunc("/", handler)
-    http.ListenAndServe(":8080", nil)
-}
-```
-
-## Sample Dump Output
-
-```
-(main.Foo) {
- unexportedField: (*main.Bar)(0xf84002e210)({
-  flag: (main.Flag) flagTwo,
-  data: (uintptr) <nil>
- }),
- ExportedField: (map[interface {}]interface {}) {
-  (string) "one": (bool) true
- }
-}
-([]uint8) {
- 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
- 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
- 00000020  31 32                                             |12|
-}
-```
-
-## Sample Formatter Output
-
-Double pointer to a uint8:
-```
-	  %v: <**>5
-	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
-	 %#v: (**uint8)5
-	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
-```
-
-Pointer to circular struct with a uint8 field and a pointer to itself:
-```
-	  %v: <*>{1 <*><shown>}
-	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
-	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
-	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
-```
-
-## Configuration Options
-
-Configuration of spew is handled by fields in the ConfigState type. For
-convenience, all of the top-level functions use a global state available via the
-spew.Config global.
-
-It is also possible to create a ConfigState instance that provides methods
-equivalent to the top-level functions. This allows concurrent configuration
-options. See the ConfigState documentation for more details.
-
-```
-* Indent
-	String to use for each indentation level for Dump functions.
-	It is a single space by default.  A popular alternative is "\t".
-
-* MaxDepth
-	Maximum number of levels to descend into nested data structures.
-	There is no limit by default.
-
-* DisableMethods
-	Disables invocation of error and Stringer interface methods.
-	Method invocation is enabled by default.
-
-* DisablePointerMethods
-	Disables invocation of error and Stringer interface methods on types
-	which only accept pointer receivers from non-pointer variables.  This option
-	relies on access to the unsafe package, so it will not have any effect when
-	running in environments without access to the unsafe package such as Google
-	App Engine or with the "safe" build tag specified.
-	Pointer method invocation is enabled by default.
-
-* DisablePointerAddresses
-	DisablePointerAddresses specifies whether to disable the printing of
-	pointer addresses. This is useful when diffing data structures in tests.
-
-* DisableCapacities
-	DisableCapacities specifies whether to disable the printing of capacities
-	for arrays, slices, maps and channels. This is useful when diffing data
-	structures in tests.
-
-* ContinueOnMethod
-	Enables recursion into types after invoking error and Stringer interface
-	methods. Recursion after method invocation is disabled by default.
-
-* SortKeys
-	Specifies map keys should be sorted before being printed. Use
-	this to have a more deterministic, diffable output.  Note that
-	only native types (bool, int, uint, floats, uintptr and string)
-	and types which implement error or Stringer interfaces are supported,
-	with other types sorted according to the reflect.Value.String() output
-	which guarantees display stability.  Natural map order is used by
-	default.
-
-* SpewKeys
-	SpewKeys specifies that, as a last resort attempt, map keys should be
-	spewed to strings and sorted by those strings.  This is only considered
-	if SortKeys is true.
-
-```
-
-## Unsafe Package Dependency
-
-This package relies on the unsafe package to perform some of the more advanced
-features, however it also supports a "limited" mode which allows it to work in
-environments where the unsafe package is not available.  By default, it will
-operate in this mode on Google App Engine and when compiled with GopherJS.  The
-"safe" build tag may also be specified to force the package to build without
-using the unsafe package.
-
-## License
-
-Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

+ 0 - 22
vendor/github.com/davecgh/go-spew/cov_report.sh

@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# This script uses gocov to generate a test coverage report.
-# The gocov tool my be obtained with the following command:
-#   go get github.com/axw/gocov/gocov
-#
-# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
-
-# Check for gocov.
-if ! type gocov >/dev/null 2>&1; then
-	echo >&2 "This script requires the gocov tool."
-	echo >&2 "You may obtain it with the following command:"
-	echo >&2 "go get github.com/axw/gocov/gocov"
-	exit 1
-fi
-
-# Only run the cgo tests if gcc is installed.
-if type gcc >/dev/null 2>&1; then
-	(cd spew && gocov test -tags testcgo | gocov report)
-else
-	(cd spew && gocov test | gocov report)
-fi

+ 0 - 298
vendor/github.com/davecgh/go-spew/spew/common_test.go

@@ -1,298 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew_test
-
-import (
-	"fmt"
-	"reflect"
-	"testing"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-// custom type to test Stinger interface on non-pointer receiver.
-type stringer string
-
-// String implements the Stringer interface for testing invocation of custom
-// stringers on types with non-pointer receivers.
-func (s stringer) String() string {
-	return "stringer " + string(s)
-}
-
-// custom type to test Stinger interface on pointer receiver.
-type pstringer string
-
-// String implements the Stringer interface for testing invocation of custom
-// stringers on types with only pointer receivers.
-func (s *pstringer) String() string {
-	return "stringer " + string(*s)
-}
-
-// xref1 and xref2 are cross referencing structs for testing circular reference
-// detection.
-type xref1 struct {
-	ps2 *xref2
-}
-type xref2 struct {
-	ps1 *xref1
-}
-
-// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
-// reference for testing detection.
-type indirCir1 struct {
-	ps2 *indirCir2
-}
-type indirCir2 struct {
-	ps3 *indirCir3
-}
-type indirCir3 struct {
-	ps1 *indirCir1
-}
-
-// embed is used to test embedded structures.
-type embed struct {
-	a string
-}
-
-// embedwrap is used to test embedded structures.
-type embedwrap struct {
-	*embed
-	e *embed
-}
-
-// panicer is used to intentionally cause a panic for testing spew properly
-// handles them
-type panicer int
-
-func (p panicer) String() string {
-	panic("test panic")
-}
-
-// customError is used to test custom error interface invocation.
-type customError int
-
-func (e customError) Error() string {
-	return fmt.Sprintf("error: %d", int(e))
-}
-
-// stringizeWants converts a slice of wanted test output into a format suitable
-// for a test error message.
-func stringizeWants(wants []string) string {
-	s := ""
-	for i, want := range wants {
-		if i > 0 {
-			s += fmt.Sprintf("want%d: %s", i+1, want)
-		} else {
-			s += "want: " + want
-		}
-	}
-	return s
-}
-
-// testFailed returns whether or not a test failed by checking if the result
-// of the test is in the slice of wanted strings.
-func testFailed(result string, wants []string) bool {
-	for _, want := range wants {
-		if result == want {
-			return false
-		}
-	}
-	return true
-}
-
-type sortableStruct struct {
-	x int
-}
-
-func (ss sortableStruct) String() string {
-	return fmt.Sprintf("ss.%d", ss.x)
-}
-
-type unsortableStruct struct {
-	x int
-}
-
-type sortTestCase struct {
-	input    []reflect.Value
-	expected []reflect.Value
-}
-
-func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
-	getInterfaces := func(values []reflect.Value) []interface{} {
-		interfaces := []interface{}{}
-		for _, v := range values {
-			interfaces = append(interfaces, v.Interface())
-		}
-		return interfaces
-	}
-
-	for _, test := range tests {
-		spew.SortValues(test.input, cs)
-		// reflect.DeepEqual cannot really make sense of reflect.Value,
-		// probably because of all the pointer tricks. For instance,
-		// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
-		// instead.
-		input := getInterfaces(test.input)
-		expected := getInterfaces(test.expected)
-		if !reflect.DeepEqual(input, expected) {
-			t.Errorf("Sort mismatch:\n %v != %v", input, expected)
-		}
-	}
-}
-
-// TestSortValues ensures the sort functionality for relect.Value based sorting
-// works as intended.
-func TestSortValues(t *testing.T) {
-	v := reflect.ValueOf
-
-	a := v("a")
-	b := v("b")
-	c := v("c")
-	embedA := v(embed{"a"})
-	embedB := v(embed{"b"})
-	embedC := v(embed{"c"})
-	tests := []sortTestCase{
-		// No values.
-		{
-			[]reflect.Value{},
-			[]reflect.Value{},
-		},
-		// Bools.
-		{
-			[]reflect.Value{v(false), v(true), v(false)},
-			[]reflect.Value{v(false), v(false), v(true)},
-		},
-		// Ints.
-		{
-			[]reflect.Value{v(2), v(1), v(3)},
-			[]reflect.Value{v(1), v(2), v(3)},
-		},
-		// Uints.
-		{
-			[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
-			[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
-		},
-		// Floats.
-		{
-			[]reflect.Value{v(2.0), v(1.0), v(3.0)},
-			[]reflect.Value{v(1.0), v(2.0), v(3.0)},
-		},
-		// Strings.
-		{
-			[]reflect.Value{b, a, c},
-			[]reflect.Value{a, b, c},
-		},
-		// Array
-		{
-			[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
-			[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
-		},
-		// Uintptrs.
-		{
-			[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
-			[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
-		},
-		// SortableStructs.
-		{
-			// Note: not sorted - DisableMethods is set.
-			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
-			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
-		},
-		// UnsortableStructs.
-		{
-			// Note: not sorted - SpewKeys is false.
-			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
-			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
-		},
-		// Invalid.
-		{
-			[]reflect.Value{embedB, embedA, embedC},
-			[]reflect.Value{embedB, embedA, embedC},
-		},
-	}
-	cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
-	helpTestSortValues(tests, &cs, t)
-}
-
-// TestSortValuesWithMethods ensures the sort functionality for relect.Value
-// based sorting works as intended when using string methods.
-func TestSortValuesWithMethods(t *testing.T) {
-	v := reflect.ValueOf
-
-	a := v("a")
-	b := v("b")
-	c := v("c")
-	tests := []sortTestCase{
-		// Ints.
-		{
-			[]reflect.Value{v(2), v(1), v(3)},
-			[]reflect.Value{v(1), v(2), v(3)},
-		},
-		// Strings.
-		{
-			[]reflect.Value{b, a, c},
-			[]reflect.Value{a, b, c},
-		},
-		// SortableStructs.
-		{
-			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
-			[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
-		},
-		// UnsortableStructs.
-		{
-			// Note: not sorted - SpewKeys is false.
-			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
-			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
-		},
-	}
-	cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
-	helpTestSortValues(tests, &cs, t)
-}
-
-// TestSortValuesWithSpew ensures the sort functionality for relect.Value
-// based sorting works as intended when using spew to stringify keys.
-func TestSortValuesWithSpew(t *testing.T) {
-	v := reflect.ValueOf
-
-	a := v("a")
-	b := v("b")
-	c := v("c")
-	tests := []sortTestCase{
-		// Ints.
-		{
-			[]reflect.Value{v(2), v(1), v(3)},
-			[]reflect.Value{v(1), v(2), v(3)},
-		},
-		// Strings.
-		{
-			[]reflect.Value{b, a, c},
-			[]reflect.Value{a, b, c},
-		},
-		// SortableStructs.
-		{
-			[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
-			[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
-		},
-		// UnsortableStructs.
-		{
-			[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
-			[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
-		},
-	}
-	cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
-	helpTestSortValues(tests, &cs, t)
-}

+ 0 - 1042
vendor/github.com/davecgh/go-spew/spew/dump_test.go

@@ -1,1042 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-Test Summary:
-NOTE: For each test, a nil pointer, a single pointer and double pointer to the
-base test element are also tested to ensure proper indirection across all types.
-
-- Max int8, int16, int32, int64, int
-- Max uint8, uint16, uint32, uint64, uint
-- Boolean true and false
-- Standard complex64 and complex128
-- Array containing standard ints
-- Array containing type with custom formatter on pointer receiver only
-- Array containing interfaces
-- Array containing bytes
-- Slice containing standard float32 values
-- Slice containing type with custom formatter on pointer receiver only
-- Slice containing interfaces
-- Slice containing bytes
-- Nil slice
-- Standard string
-- Nil interface
-- Sub-interface
-- Map with string keys and int vals
-- Map with custom formatter type on pointer receiver only keys and vals
-- Map with interface keys and values
-- Map with nil interface value
-- Struct with primitives
-- Struct that contains another struct
-- Struct that contains custom type with Stringer pointer interface via both
-  exported and unexported fields
-- Struct that contains embedded struct and field to same struct
-- Uintptr to 0 (null pointer)
-- Uintptr address of real variable
-- Unsafe.Pointer to 0 (null pointer)
-- Unsafe.Pointer to address of real variable
-- Nil channel
-- Standard int channel
-- Function with no params and no returns
-- Function with param and no returns
-- Function with multiple params and multiple returns
-- Struct that is circular through self referencing
-- Structs that are circular through cross referencing
-- Structs that are indirectly circular
-- Type that panics in its Stringer interface
-*/
-
-package spew_test
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-	"unsafe"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-// dumpTest is used to describe a test to be performed against the Dump method.
-type dumpTest struct {
-	in    interface{}
-	wants []string
-}
-
-// dumpTests houses all of the tests to be performed against the Dump method.
-var dumpTests = make([]dumpTest, 0)
-
-// addDumpTest is a helper method to append the passed input and desired result
-// to dumpTests
-func addDumpTest(in interface{}, wants ...string) {
-	test := dumpTest{in, wants}
-	dumpTests = append(dumpTests, test)
-}
-
-func addIntDumpTests() {
-	// Max int8.
-	v := int8(127)
-	nv := (*int8)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "int8"
-	vs := "127"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Max int16.
-	v2 := int16(32767)
-	nv2 := (*int16)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "int16"
-	v2s := "32767"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-
-	// Max int32.
-	v3 := int32(2147483647)
-	nv3 := (*int32)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "int32"
-	v3s := "2147483647"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-
-	// Max int64.
-	v4 := int64(9223372036854775807)
-	nv4 := (*int64)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "int64"
-	v4s := "9223372036854775807"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
-	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
-	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
-
-	// Max int.
-	v5 := int(2147483647)
-	nv5 := (*int)(nil)
-	pv5 := &v5
-	v5Addr := fmt.Sprintf("%p", pv5)
-	pv5Addr := fmt.Sprintf("%p", &pv5)
-	v5t := "int"
-	v5s := "2147483647"
-	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
-	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
-	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
-	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
-}
-
-func addUintDumpTests() {
-	// Max uint8.
-	v := uint8(255)
-	nv := (*uint8)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "uint8"
-	vs := "255"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Max uint16.
-	v2 := uint16(65535)
-	nv2 := (*uint16)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uint16"
-	v2s := "65535"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-
-	// Max uint32.
-	v3 := uint32(4294967295)
-	nv3 := (*uint32)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "uint32"
-	v3s := "4294967295"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-
-	// Max uint64.
-	v4 := uint64(18446744073709551615)
-	nv4 := (*uint64)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "uint64"
-	v4s := "18446744073709551615"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
-	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
-	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
-
-	// Max uint.
-	v5 := uint(4294967295)
-	nv5 := (*uint)(nil)
-	pv5 := &v5
-	v5Addr := fmt.Sprintf("%p", pv5)
-	pv5Addr := fmt.Sprintf("%p", &pv5)
-	v5t := "uint"
-	v5s := "4294967295"
-	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
-	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
-	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
-	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
-}
-
-func addBoolDumpTests() {
-	// Boolean true.
-	v := bool(true)
-	nv := (*bool)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "bool"
-	vs := "true"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Boolean false.
-	v2 := bool(false)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "bool"
-	v2s := "false"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-}
-
-func addFloatDumpTests() {
-	// Standard float32.
-	v := float32(3.1415)
-	nv := (*float32)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "float32"
-	vs := "3.1415"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Standard float64.
-	v2 := float64(3.1415926)
-	nv2 := (*float64)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "float64"
-	v2s := "3.1415926"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-}
-
-func addComplexDumpTests() {
-	// Standard complex64.
-	v := complex(float32(6), -2)
-	nv := (*complex64)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "complex64"
-	vs := "(6-2i)"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Standard complex128.
-	v2 := complex(float64(-6), 2)
-	nv2 := (*complex128)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "complex128"
-	v2s := "(-6+2i)"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-}
-
-func addArrayDumpTests() {
-	// Array containing standard ints.
-	v := [3]int{1, 2, 3}
-	vLen := fmt.Sprintf("%d", len(v))
-	vCap := fmt.Sprintf("%d", cap(v))
-	nv := (*[3]int)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "int"
-	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
-		vt + ") 2,\n (" + vt + ") 3\n}"
-	addDumpTest(v, "([3]"+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
-
-	// Array containing type with custom formatter on pointer receiver only.
-	v2i0 := pstringer("1")
-	v2i1 := pstringer("2")
-	v2i2 := pstringer("3")
-	v2 := [3]pstringer{v2i0, v2i1, v2i2}
-	v2i0Len := fmt.Sprintf("%d", len(v2i0))
-	v2i1Len := fmt.Sprintf("%d", len(v2i1))
-	v2i2Len := fmt.Sprintf("%d", len(v2i2))
-	v2Len := fmt.Sprintf("%d", len(v2))
-	v2Cap := fmt.Sprintf("%d", cap(v2))
-	nv2 := (*[3]pstringer)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.pstringer"
-	v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
-		") (len=" + v2i0Len + ") stringer 1,\n (" + v2t +
-		") (len=" + v2i1Len + ") stringer 2,\n (" + v2t +
-		") (len=" + v2i2Len + ") " + "stringer 3\n}"
-	v2s := v2sp
-	if spew.UnsafeDisabled {
-		v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
-			") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" +
-			v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len +
-			") " + "\"3\"\n}"
-	}
-	addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n")
-	addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n")
-	addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
-
-	// Array containing interfaces.
-	v3i0 := "one"
-	v3 := [3]interface{}{v3i0, int(2), uint(3)}
-	v3i0Len := fmt.Sprintf("%d", len(v3i0))
-	v3Len := fmt.Sprintf("%d", len(v3))
-	v3Cap := fmt.Sprintf("%d", cap(v3))
-	nv3 := (*[3]interface{})(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "[3]interface {}"
-	v3t2 := "string"
-	v3t3 := "int"
-	v3t4 := "uint"
-	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
-		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
-		v3t4 + ") 3\n}"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-
-	// Array containing bytes.
-	v4 := [34]byte{
-		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
-		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
-		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
-		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
-		0x31, 0x32,
-	}
-	v4Len := fmt.Sprintf("%d", len(v4))
-	v4Cap := fmt.Sprintf("%d", cap(v4))
-	nv4 := (*[34]byte)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "[34]uint8"
-	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
-		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
-		"  |............... |\n" +
-		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
-		"  |!\"#$%&'()*+,-./0|\n" +
-		" 00000020  31 32                                           " +
-		"  |12|\n}"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
-	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
-	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
-}
-
-func addSliceDumpTests() {
-	// Slice containing standard float32 values.
-	v := []float32{3.14, 6.28, 12.56}
-	vLen := fmt.Sprintf("%d", len(v))
-	vCap := fmt.Sprintf("%d", cap(v))
-	nv := (*[]float32)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "float32"
-	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
-		vt + ") 6.28,\n (" + vt + ") 12.56\n}"
-	addDumpTest(v, "([]"+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
-
-	// Slice containing type with custom formatter on pointer receiver only.
-	v2i0 := pstringer("1")
-	v2i1 := pstringer("2")
-	v2i2 := pstringer("3")
-	v2 := []pstringer{v2i0, v2i1, v2i2}
-	v2i0Len := fmt.Sprintf("%d", len(v2i0))
-	v2i1Len := fmt.Sprintf("%d", len(v2i1))
-	v2i2Len := fmt.Sprintf("%d", len(v2i2))
-	v2Len := fmt.Sprintf("%d", len(v2))
-	v2Cap := fmt.Sprintf("%d", cap(v2))
-	nv2 := (*[]pstringer)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.pstringer"
-	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
-		v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
-		") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
-		"stringer 3\n}"
-	addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
-
-	// Slice containing interfaces.
-	v3i0 := "one"
-	v3 := []interface{}{v3i0, int(2), uint(3), nil}
-	v3i0Len := fmt.Sprintf("%d", len(v3i0))
-	v3Len := fmt.Sprintf("%d", len(v3))
-	v3Cap := fmt.Sprintf("%d", cap(v3))
-	nv3 := (*[]interface{})(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "[]interface {}"
-	v3t2 := "string"
-	v3t3 := "int"
-	v3t4 := "uint"
-	v3t5 := "interface {}"
-	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
-		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
-		v3t4 + ") 3,\n (" + v3t5 + ") <nil>\n}"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-
-	// Slice containing bytes.
-	v4 := []byte{
-		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
-		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
-		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
-		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
-		0x31, 0x32,
-	}
-	v4Len := fmt.Sprintf("%d", len(v4))
-	v4Cap := fmt.Sprintf("%d", cap(v4))
-	nv4 := (*[]byte)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "[]uint8"
-	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
-		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
-		"  |............... |\n" +
-		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
-		"  |!\"#$%&'()*+,-./0|\n" +
-		" 00000020  31 32                                           " +
-		"  |12|\n}"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
-	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
-	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
-
-	// Nil slice.
-	v5 := []int(nil)
-	nv5 := (*[]int)(nil)
-	pv5 := &v5
-	v5Addr := fmt.Sprintf("%p", pv5)
-	pv5Addr := fmt.Sprintf("%p", &pv5)
-	v5t := "[]int"
-	v5s := "<nil>"
-	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
-	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
-	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
-	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
-}
-
-func addStringDumpTests() {
-	// Standard string.
-	v := "test"
-	vLen := fmt.Sprintf("%d", len(v))
-	nv := (*string)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "string"
-	vs := "(len=" + vLen + ") \"test\""
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-}
-
-func addInterfaceDumpTests() {
-	// Nil interface.
-	var v interface{}
-	nv := (*interface{})(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "interface {}"
-	vs := "<nil>"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Sub-interface.
-	v2 := interface{}(uint16(65535))
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uint16"
-	v2s := "65535"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-}
-
-func addMapDumpTests() {
-	// Map with string keys and int vals.
-	k := "one"
-	kk := "two"
-	m := map[string]int{k: 1, kk: 2}
-	klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
-	kkLen := fmt.Sprintf("%d", len(kk))
-	mLen := fmt.Sprintf("%d", len(m))
-	nilMap := map[string]int(nil)
-	nm := (*map[string]int)(nil)
-	pm := &m
-	mAddr := fmt.Sprintf("%p", pm)
-	pmAddr := fmt.Sprintf("%p", &pm)
-	mt := "map[string]int"
-	mt1 := "string"
-	mt2 := "int"
-	ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
-		"\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
-		") \"two\": (" + mt2 + ") 2\n}"
-	ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
-		"\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
-		") \"one\": (" + mt2 + ") 1\n}"
-	addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
-	addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
-		"(*"+mt+")("+mAddr+")("+ms2+")\n")
-	addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
-		"(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
-	addDumpTest(nm, "(*"+mt+")(<nil>)\n")
-	addDumpTest(nilMap, "("+mt+") <nil>\n")
-
-	// Map with custom formatter type on pointer receiver only keys and vals.
-	k2 := pstringer("one")
-	v2 := pstringer("1")
-	m2 := map[pstringer]pstringer{k2: v2}
-	k2Len := fmt.Sprintf("%d", len(k2))
-	v2Len := fmt.Sprintf("%d", len(v2))
-	m2Len := fmt.Sprintf("%d", len(m2))
-	nilMap2 := map[pstringer]pstringer(nil)
-	nm2 := (*map[pstringer]pstringer)(nil)
-	pm2 := &m2
-	m2Addr := fmt.Sprintf("%p", pm2)
-	pm2Addr := fmt.Sprintf("%p", &pm2)
-	m2t := "map[spew_test.pstringer]spew_test.pstringer"
-	m2t1 := "spew_test.pstringer"
-	m2t2 := "spew_test.pstringer"
-	m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
-		"stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
-	if spew.UnsafeDisabled {
-		m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len +
-			") " + "\"one\": (" + m2t2 + ") (len=" + v2Len +
-			") \"1\"\n}"
-	}
-	addDumpTest(m2, "("+m2t+") "+m2s+"\n")
-	addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
-	addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
-	addDumpTest(nm2, "(*"+m2t+")(<nil>)\n")
-	addDumpTest(nilMap2, "("+m2t+") <nil>\n")
-
-	// Map with interface keys and values.
-	k3 := "one"
-	k3Len := fmt.Sprintf("%d", len(k3))
-	m3 := map[interface{}]interface{}{k3: 1}
-	m3Len := fmt.Sprintf("%d", len(m3))
-	nilMap3 := map[interface{}]interface{}(nil)
-	nm3 := (*map[interface{}]interface{})(nil)
-	pm3 := &m3
-	m3Addr := fmt.Sprintf("%p", pm3)
-	pm3Addr := fmt.Sprintf("%p", &pm3)
-	m3t := "map[interface {}]interface {}"
-	m3t1 := "string"
-	m3t2 := "int"
-	m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
-		"\"one\": (" + m3t2 + ") 1\n}"
-	addDumpTest(m3, "("+m3t+") "+m3s+"\n")
-	addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
-	addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
-	addDumpTest(nm3, "(*"+m3t+")(<nil>)\n")
-	addDumpTest(nilMap3, "("+m3t+") <nil>\n")
-
-	// Map with nil interface value.
-	k4 := "nil"
-	k4Len := fmt.Sprintf("%d", len(k4))
-	m4 := map[string]interface{}{k4: nil}
-	m4Len := fmt.Sprintf("%d", len(m4))
-	nilMap4 := map[string]interface{}(nil)
-	nm4 := (*map[string]interface{})(nil)
-	pm4 := &m4
-	m4Addr := fmt.Sprintf("%p", pm4)
-	pm4Addr := fmt.Sprintf("%p", &pm4)
-	m4t := "map[string]interface {}"
-	m4t1 := "string"
-	m4t2 := "interface {}"
-	m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
-		" \"nil\": (" + m4t2 + ") <nil>\n}"
-	addDumpTest(m4, "("+m4t+") "+m4s+"\n")
-	addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
-	addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
-	addDumpTest(nm4, "(*"+m4t+")(<nil>)\n")
-	addDumpTest(nilMap4, "("+m4t+") <nil>\n")
-}
-
-func addStructDumpTests() {
-	// Struct with primitives.
-	type s1 struct {
-		a int8
-		b uint8
-	}
-	v := s1{127, 255}
-	nv := (*s1)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.s1"
-	vt2 := "int8"
-	vt3 := "uint8"
-	vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Struct that contains another struct.
-	type s2 struct {
-		s1 s1
-		b  bool
-	}
-	v2 := s2{s1{127, 255}, true}
-	nv2 := (*s2)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.s2"
-	v2t2 := "spew_test.s1"
-	v2t3 := "int8"
-	v2t4 := "uint8"
-	v2t5 := "bool"
-	v2s := "{\n s1: (" + v2t2 + ") {\n  a: (" + v2t3 + ") 127,\n  b: (" +
-		v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-
-	// Struct that contains custom type with Stringer pointer interface via both
-	// exported and unexported fields.
-	type s3 struct {
-		s pstringer
-		S pstringer
-	}
-	v3 := s3{"test", "test2"}
-	nv3 := (*s3)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "spew_test.s3"
-	v3t2 := "spew_test.pstringer"
-	v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
-		") (len=5) stringer test2\n}"
-	v3sp := v3s
-	if spew.UnsafeDisabled {
-		v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
-			v3t2 + ") (len=5) \"test2\"\n}"
-		v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
-			v3t2 + ") (len=5) stringer test2\n}"
-	}
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-
-	// Struct that contains embedded struct and field to same struct.
-	e := embed{"embedstr"}
-	eLen := fmt.Sprintf("%d", len("embedstr"))
-	v4 := embedwrap{embed: &e, e: &e}
-	nv4 := (*embedwrap)(nil)
-	pv4 := &v4
-	eAddr := fmt.Sprintf("%p", &e)
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "spew_test.embedwrap"
-	v4t2 := "spew_test.embed"
-	v4t3 := "string"
-	v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n  a: (" + v4t3 +
-		") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
-		")(" + eAddr + ")({\n  a: (" + v4t3 + ") (len=" + eLen + ")" +
-		" \"embedstr\"\n })\n}"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
-	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
-	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
-}
-
-func addUintptrDumpTests() {
-	// Null pointer.
-	v := uintptr(0)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "uintptr"
-	vs := "<nil>"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-
-	// Address of real variable.
-	i := 1
-	v2 := uintptr(unsafe.Pointer(&i))
-	nv2 := (*uintptr)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uintptr"
-	v2s := fmt.Sprintf("%p", &i)
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-}
-
-func addUnsafePointerDumpTests() {
-	// Null pointer.
-	v := unsafe.Pointer(uintptr(0))
-	nv := (*unsafe.Pointer)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "unsafe.Pointer"
-	vs := "<nil>"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Address of real variable.
-	i := 1
-	v2 := unsafe.Pointer(&i)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "unsafe.Pointer"
-	v2s := fmt.Sprintf("%p", &i)
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-}
-
-func addChanDumpTests() {
-	// Nil channel.
-	var v chan int
-	pv := &v
-	nv := (*chan int)(nil)
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "chan int"
-	vs := "<nil>"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Real channel.
-	v2 := make(chan int)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "chan int"
-	v2s := fmt.Sprintf("%p", v2)
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-}
-
-func addFuncDumpTests() {
-	// Function with no params and no returns.
-	v := addIntDumpTests
-	nv := (*func())(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "func()"
-	vs := fmt.Sprintf("%p", v)
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-
-	// Function with param and no returns.
-	v2 := TestDump
-	nv2 := (*func(*testing.T))(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "func(*testing.T)"
-	v2s := fmt.Sprintf("%p", v2)
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
-	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
-
-	// Function with multiple params and multiple returns.
-	var v3 = func(i int, s string) (b bool, err error) {
-		return true, nil
-	}
-	nv3 := (*func(int, string) (bool, error))(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "func(int, string) (bool, error)"
-	v3s := fmt.Sprintf("%p", v3)
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
-	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
-}
-
-func addCircularDumpTests() {
-	// Struct that is circular through self referencing.
-	type circular struct {
-		c *circular
-	}
-	v := circular{nil}
-	v.c = &v
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.circular"
-	vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n  c: (*" + vt + ")(" +
-		vAddr + ")(<already shown>)\n })\n}"
-	vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
-
-	// Structs that are circular through cross referencing.
-	v2 := xref1{nil}
-	ts2 := xref2{&v2}
-	v2.ps2 = &ts2
-	pv2 := &v2
-	ts2Addr := fmt.Sprintf("%p", &ts2)
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.xref1"
-	v2t2 := "spew_test.xref2"
-	v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
-		")(" + v2Addr + ")({\n   ps2: (*" + v2t2 + ")(" + ts2Addr +
-		")(<already shown>)\n  })\n })\n}"
-	v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
-		")(" + v2Addr + ")(<already shown>)\n })\n}"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
-	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
-
-	// Structs that are indirectly circular.
-	v3 := indirCir1{nil}
-	tic2 := indirCir2{nil}
-	tic3 := indirCir3{&v3}
-	tic2.ps3 = &tic3
-	v3.ps2 = &tic2
-	pv3 := &v3
-	tic2Addr := fmt.Sprintf("%p", &tic2)
-	tic3Addr := fmt.Sprintf("%p", &tic3)
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "spew_test.indirCir1"
-	v3t2 := "spew_test.indirCir2"
-	v3t3 := "spew_test.indirCir3"
-	v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
-		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
-		")({\n    ps2: (*" + v3t2 + ")(" + tic2Addr +
-		")(<already shown>)\n   })\n  })\n })\n}"
-	v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
-		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
-		")(<already shown>)\n  })\n })\n}"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
-	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
-	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
-}
-
-func addPanicDumpTests() {
-	// Type that panics in its Stringer interface.
-	v := panicer(127)
-	nv := (*panicer)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.panicer"
-	vs := "(PANIC=test panic)127"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-}
-
-func addErrorDumpTests() {
-	// Type that has a custom Error interface.
-	v := customError(127)
-	nv := (*customError)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.customError"
-	vs := "error: 127"
-	addDumpTest(v, "("+vt+") "+vs+"\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
-	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
-}
-
-// TestDump executes all of the tests described by dumpTests.
-func TestDump(t *testing.T) {
-	// Setup tests.
-	addIntDumpTests()
-	addUintDumpTests()
-	addBoolDumpTests()
-	addFloatDumpTests()
-	addComplexDumpTests()
-	addArrayDumpTests()
-	addSliceDumpTests()
-	addStringDumpTests()
-	addInterfaceDumpTests()
-	addMapDumpTests()
-	addStructDumpTests()
-	addUintptrDumpTests()
-	addUnsafePointerDumpTests()
-	addChanDumpTests()
-	addFuncDumpTests()
-	addCircularDumpTests()
-	addPanicDumpTests()
-	addErrorDumpTests()
-	addCgoDumpTests()
-
-	t.Logf("Running %d tests", len(dumpTests))
-	for i, test := range dumpTests {
-		buf := new(bytes.Buffer)
-		spew.Fdump(buf, test.in)
-		s := buf.String()
-		if testFailed(s, test.wants) {
-			t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
-			continue
-		}
-	}
-}
-
-func TestDumpSortedKeys(t *testing.T) {
-	cfg := spew.ConfigState{SortKeys: true}
-	s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
-	expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " +
-		"\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " +
-		"(len=1) \"3\"\n" +
-		"}\n"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2})
-	expected = "(map[spew_test.stringer]int) (len=3) {\n" +
-		"(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" +
-		"(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" +
-		"(spew_test.stringer) (len=1) stringer 3: (int) 3\n" +
-		"}\n"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
-	expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
-		"(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" +
-		"(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" +
-		"(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" +
-		"}\n"
-	if spew.UnsafeDisabled {
-		expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
-			"(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" +
-			"(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" +
-			"(spew_test.pstringer) (len=1) \"3\": (int) 3\n" +
-			"}\n"
-	}
-	if s != expected {
-		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
-	expected = "(map[spew_test.customError]int) (len=3) {\n" +
-		"(spew_test.customError) error: 1: (int) 1,\n" +
-		"(spew_test.customError) error: 2: (int) 2,\n" +
-		"(spew_test.customError) error: 3: (int) 3\n" +
-		"}\n"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
-	}
-
-}

+ 0 - 99
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go

@@ -1,99 +0,0 @@
-// Copyright (c) 2013-2016 Dave Collins <[email protected]>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when both cgo is supported and "-tags testcgo" is added to the go test
-// command line.  This means the cgo tests are only added (and hence run) when
-// specifially requested.  This configuration is used because spew itself
-// does not require cgo to run even though it does handle certain cgo types
-// specially.  Rather than forcing all clients to require cgo and an external
-// C compiler just to run the tests, this scheme makes them optional.
-// +build cgo,testcgo
-
-package spew_test
-
-import (
-	"fmt"
-
-	"github.com/davecgh/go-spew/spew/testdata"
-)
-
-func addCgoDumpTests() {
-	// C char pointer.
-	v := testdata.GetCgoCharPointer()
-	nv := testdata.GetCgoNullCharPointer()
-	pv := &v
-	vcAddr := fmt.Sprintf("%p", v)
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "*testdata._Ctype_char"
-	vs := "116"
-	addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
-	addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
-	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
-	addDumpTest(nv, "("+vt+")(<nil>)\n")
-
-	// C char array.
-	v2, v2l, v2c := testdata.GetCgoCharArray()
-	v2Len := fmt.Sprintf("%d", v2l)
-	v2Cap := fmt.Sprintf("%d", v2c)
-	v2t := "[6]testdata._Ctype_char"
-	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
-		"{\n 00000000  74 65 73 74 32 00                               " +
-		"  |test2.|\n}"
-	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
-
-	// C unsigned char array.
-	v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
-	v3Len := fmt.Sprintf("%d", v3l)
-	v3Cap := fmt.Sprintf("%d", v3c)
-	v3t := "[6]testdata._Ctype_unsignedchar"
-	v3t2 := "[6]testdata._Ctype_uchar"
-	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
-		"{\n 00000000  74 65 73 74 33 00                               " +
-		"  |test3.|\n}"
-	addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
-
-	// C signed char array.
-	v4, v4l, v4c := testdata.GetCgoSignedCharArray()
-	v4Len := fmt.Sprintf("%d", v4l)
-	v4Cap := fmt.Sprintf("%d", v4c)
-	v4t := "[6]testdata._Ctype_schar"
-	v4t2 := "testdata._Ctype_schar"
-	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
-		"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
-		") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
-		") 0\n}"
-	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
-
-	// C uint8_t array.
-	v5, v5l, v5c := testdata.GetCgoUint8tArray()
-	v5Len := fmt.Sprintf("%d", v5l)
-	v5Cap := fmt.Sprintf("%d", v5c)
-	v5t := "[6]testdata._Ctype_uint8_t"
-	v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
-		"{\n 00000000  74 65 73 74 35 00                               " +
-		"  |test5.|\n}"
-	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
-
-	// C typedefed unsigned char array.
-	v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
-	v6Len := fmt.Sprintf("%d", v6l)
-	v6Cap := fmt.Sprintf("%d", v6c)
-	v6t := "[6]testdata._Ctype_custom_uchar_t"
-	v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
-		"{\n 00000000  74 65 73 74 36 00                               " +
-		"  |test6.|\n}"
-	addDumpTest(v6, "("+v6t+") "+v6s+"\n")
-}

+ 0 - 26
vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go

@@ -1,26 +0,0 @@
-// Copyright (c) 2013 Dave Collins <[email protected]>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when either cgo is not supported or "-tags testcgo" is not added to the go
-// test command line.  This file intentionally does not setup any cgo tests in
-// this scenario.
-// +build !cgo !testcgo
-
-package spew_test
-
-func addCgoDumpTests() {
-	// Don't add any tests for cgo since this file is only compiled when
-	// there should not be any cgo tests.
-}

+ 0 - 226
vendor/github.com/davecgh/go-spew/spew/example_test.go

@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew_test
-
-import (
-	"fmt"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-type Flag int
-
-const (
-	flagOne Flag = iota
-	flagTwo
-)
-
-var flagStrings = map[Flag]string{
-	flagOne: "flagOne",
-	flagTwo: "flagTwo",
-}
-
-func (f Flag) String() string {
-	if s, ok := flagStrings[f]; ok {
-		return s
-	}
-	return fmt.Sprintf("Unknown flag (%d)", int(f))
-}
-
-type Bar struct {
-	data uintptr
-}
-
-type Foo struct {
-	unexportedField Bar
-	ExportedField   map[interface{}]interface{}
-}
-
-// This example demonstrates how to use Dump to dump variables to stdout.
-func ExampleDump() {
-	// The following package level declarations are assumed for this example:
-	/*
-		type Flag int
-
-		const (
-			flagOne Flag = iota
-			flagTwo
-		)
-
-		var flagStrings = map[Flag]string{
-			flagOne: "flagOne",
-			flagTwo: "flagTwo",
-		}
-
-		func (f Flag) String() string {
-			if s, ok := flagStrings[f]; ok {
-				return s
-			}
-			return fmt.Sprintf("Unknown flag (%d)", int(f))
-		}
-
-		type Bar struct {
-			data uintptr
-		}
-
-		type Foo struct {
-			unexportedField Bar
-			ExportedField   map[interface{}]interface{}
-		}
-	*/
-
-	// Setup some sample data structures for the example.
-	bar := Bar{uintptr(0)}
-	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
-	f := Flag(5)
-	b := []byte{
-		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
-		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
-		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
-		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
-		0x31, 0x32,
-	}
-
-	// Dump!
-	spew.Dump(s1, f, b)
-
-	// Output:
-	// (spew_test.Foo) {
-	//  unexportedField: (spew_test.Bar) {
-	//   data: (uintptr) <nil>
-	//  },
-	//  ExportedField: (map[interface {}]interface {}) (len=1) {
-	//   (string) (len=3) "one": (bool) true
-	//  }
-	// }
-	// (spew_test.Flag) Unknown flag (5)
-	// ([]uint8) (len=34 cap=34) {
-	//  00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
-	//  00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
-	//  00000020  31 32                                             |12|
-	// }
-	//
-}
-
-// This example demonstrates how to use Printf to display a variable with a
-// format string and inline formatting.
-func ExamplePrintf() {
-	// Create a double pointer to a uint 8.
-	ui8 := uint8(5)
-	pui8 := &ui8
-	ppui8 := &pui8
-
-	// Create a circular data type.
-	type circular struct {
-		ui8 uint8
-		c   *circular
-	}
-	c := circular{ui8: 1}
-	c.c = &c
-
-	// Print!
-	spew.Printf("ppui8: %v\n", ppui8)
-	spew.Printf("circular: %v\n", c)
-
-	// Output:
-	// ppui8: <**>5
-	// circular: {1 <*>{1 <*><shown>}}
-}
-
-// This example demonstrates how to use a ConfigState.
-func ExampleConfigState() {
-	// Modify the indent level of the ConfigState only.  The global
-	// configuration is not modified.
-	scs := spew.ConfigState{Indent: "\t"}
-
-	// Output using the ConfigState instance.
-	v := map[string]int{"one": 1}
-	scs.Printf("v: %v\n", v)
-	scs.Dump(v)
-
-	// Output:
-	// v: map[one:1]
-	// (map[string]int) (len=1) {
-	// 	(string) (len=3) "one": (int) 1
-	// }
-}
-
-// This example demonstrates how to use ConfigState.Dump to dump variables to
-// stdout
-func ExampleConfigState_Dump() {
-	// See the top-level Dump example for details on the types used in this
-	// example.
-
-	// Create two ConfigState instances with different indentation.
-	scs := spew.ConfigState{Indent: "\t"}
-	scs2 := spew.ConfigState{Indent: " "}
-
-	// Setup some sample data structures for the example.
-	bar := Bar{uintptr(0)}
-	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
-
-	// Dump using the ConfigState instances.
-	scs.Dump(s1)
-	scs2.Dump(s1)
-
-	// Output:
-	// (spew_test.Foo) {
-	// 	unexportedField: (spew_test.Bar) {
-	// 		data: (uintptr) <nil>
-	// 	},
-	// 	ExportedField: (map[interface {}]interface {}) (len=1) {
-	//		(string) (len=3) "one": (bool) true
-	// 	}
-	// }
-	// (spew_test.Foo) {
-	//  unexportedField: (spew_test.Bar) {
-	//   data: (uintptr) <nil>
-	//  },
-	//  ExportedField: (map[interface {}]interface {}) (len=1) {
-	//   (string) (len=3) "one": (bool) true
-	//  }
-	// }
-	//
-}
-
-// This example demonstrates how to use ConfigState.Printf to display a variable
-// with a format string and inline formatting.
-func ExampleConfigState_Printf() {
-	// See the top-level Dump example for details on the types used in this
-	// example.
-
-	// Create two ConfigState instances and modify the method handling of the
-	// first ConfigState only.
-	scs := spew.NewDefaultConfig()
-	scs2 := spew.NewDefaultConfig()
-	scs.DisableMethods = true
-
-	// Alternatively
-	// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
-	// scs2 := spew.ConfigState{Indent: " "}
-
-	// This is of type Flag which implements a Stringer and has raw value 1.
-	f := flagTwo
-
-	// Dump using the ConfigState instances.
-	scs.Printf("f: %v\n", f)
-	scs2.Printf("f: %v\n", f)
-
-	// Output:
-	// f: 1
-	// f: flagTwo
-}

+ 0 - 1558
vendor/github.com/davecgh/go-spew/spew/format_test.go

@@ -1,1558 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-Test Summary:
-NOTE: For each test, a nil pointer, a single pointer and double pointer to the
-base test element are also tested to ensure proper indirection across all types.
-
-- Max int8, int16, int32, int64, int
-- Max uint8, uint16, uint32, uint64, uint
-- Boolean true and false
-- Standard complex64 and complex128
-- Array containing standard ints
-- Array containing type with custom formatter on pointer receiver only
-- Array containing interfaces
-- Slice containing standard float32 values
-- Slice containing type with custom formatter on pointer receiver only
-- Slice containing interfaces
-- Nil slice
-- Standard string
-- Nil interface
-- Sub-interface
-- Map with string keys and int vals
-- Map with custom formatter type on pointer receiver only keys and vals
-- Map with interface keys and values
-- Map with nil interface value
-- Struct with primitives
-- Struct that contains another struct
-- Struct that contains custom type with Stringer pointer interface via both
-  exported and unexported fields
-- Struct that contains embedded struct and field to same struct
-- Uintptr to 0 (null pointer)
-- Uintptr address of real variable
-- Unsafe.Pointer to 0 (null pointer)
-- Unsafe.Pointer to address of real variable
-- Nil channel
-- Standard int channel
-- Function with no params and no returns
-- Function with param and no returns
-- Function with multiple params and multiple returns
-- Struct that is circular through self referencing
-- Structs that are circular through cross referencing
-- Structs that are indirectly circular
-- Type that panics in its Stringer interface
-- Type that has a custom Error interface
-- %x passthrough with uint
-- %#x passthrough with uint
-- %f passthrough with precision
-- %f passthrough with width and precision
-- %d passthrough with width
-- %q passthrough with string
-*/
-
-package spew_test
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-	"unsafe"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-// formatterTest is used to describe a test to be performed against NewFormatter.
-type formatterTest struct {
-	format string
-	in     interface{}
-	wants  []string
-}
-
-// formatterTests houses all of the tests to be performed against NewFormatter.
-var formatterTests = make([]formatterTest, 0)
-
-// addFormatterTest is a helper method to append the passed input and desired
-// result to formatterTests.
-func addFormatterTest(format string, in interface{}, wants ...string) {
-	test := formatterTest{format, in, wants}
-	formatterTests = append(formatterTests, test)
-}
-
-func addIntFormatterTests() {
-	// Max int8.
-	v := int8(127)
-	nv := (*int8)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "int8"
-	vs := "127"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Max int16.
-	v2 := int16(32767)
-	nv2 := (*int16)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "int16"
-	v2s := "32767"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Max int32.
-	v3 := int32(2147483647)
-	nv3 := (*int32)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "int32"
-	v3s := "2147483647"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-
-	// Max int64.
-	v4 := int64(9223372036854775807)
-	nv4 := (*int64)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "int64"
-	v4s := "9223372036854775807"
-	addFormatterTest("%v", v4, v4s)
-	addFormatterTest("%v", pv4, "<*>"+v4s)
-	addFormatterTest("%v", &pv4, "<**>"+v4s)
-	addFormatterTest("%v", nv4, "<nil>")
-	addFormatterTest("%+v", v4, v4s)
-	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
-	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
-	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
-	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
-	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
-
-	// Max int.
-	v5 := int(2147483647)
-	nv5 := (*int)(nil)
-	pv5 := &v5
-	v5Addr := fmt.Sprintf("%p", pv5)
-	pv5Addr := fmt.Sprintf("%p", &pv5)
-	v5t := "int"
-	v5s := "2147483647"
-	addFormatterTest("%v", v5, v5s)
-	addFormatterTest("%v", pv5, "<*>"+v5s)
-	addFormatterTest("%v", &pv5, "<**>"+v5s)
-	addFormatterTest("%v", nv5, "<nil>")
-	addFormatterTest("%+v", v5, v5s)
-	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
-	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
-	addFormatterTest("%+v", nv5, "<nil>")
-	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
-	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
-	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
-	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
-	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
-	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
-	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
-	addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"<nil>")
-}
-
-func addUintFormatterTests() {
-	// Max uint8.
-	v := uint8(255)
-	nv := (*uint8)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "uint8"
-	vs := "255"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Max uint16.
-	v2 := uint16(65535)
-	nv2 := (*uint16)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uint16"
-	v2s := "65535"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Max uint32.
-	v3 := uint32(4294967295)
-	nv3 := (*uint32)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "uint32"
-	v3s := "4294967295"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-
-	// Max uint64.
-	v4 := uint64(18446744073709551615)
-	nv4 := (*uint64)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "uint64"
-	v4s := "18446744073709551615"
-	addFormatterTest("%v", v4, v4s)
-	addFormatterTest("%v", pv4, "<*>"+v4s)
-	addFormatterTest("%v", &pv4, "<**>"+v4s)
-	addFormatterTest("%v", nv4, "<nil>")
-	addFormatterTest("%+v", v4, v4s)
-	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
-	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
-	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
-	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
-	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
-
-	// Max uint.
-	v5 := uint(4294967295)
-	nv5 := (*uint)(nil)
-	pv5 := &v5
-	v5Addr := fmt.Sprintf("%p", pv5)
-	pv5Addr := fmt.Sprintf("%p", &pv5)
-	v5t := "uint"
-	v5s := "4294967295"
-	addFormatterTest("%v", v5, v5s)
-	addFormatterTest("%v", pv5, "<*>"+v5s)
-	addFormatterTest("%v", &pv5, "<**>"+v5s)
-	addFormatterTest("%v", nv5, "<nil>")
-	addFormatterTest("%+v", v5, v5s)
-	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
-	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
-	addFormatterTest("%+v", nv5, "<nil>")
-	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
-	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
-	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
-	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
-	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
-	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
-	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
-	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
-}
-
-func addBoolFormatterTests() {
-	// Boolean true.
-	v := bool(true)
-	nv := (*bool)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "bool"
-	vs := "true"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Boolean false.
-	v2 := bool(false)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "bool"
-	v2s := "false"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-}
-
-func addFloatFormatterTests() {
-	// Standard float32.
-	v := float32(3.1415)
-	nv := (*float32)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "float32"
-	vs := "3.1415"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Standard float64.
-	v2 := float64(3.1415926)
-	nv2 := (*float64)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "float64"
-	v2s := "3.1415926"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-}
-
-func addComplexFormatterTests() {
-	// Standard complex64.
-	v := complex(float32(6), -2)
-	nv := (*complex64)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "complex64"
-	vs := "(6-2i)"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Standard complex128.
-	v2 := complex(float64(-6), 2)
-	nv2 := (*complex128)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "complex128"
-	v2s := "(-6+2i)"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-}
-
-func addArrayFormatterTests() {
-	// Array containing standard ints.
-	v := [3]int{1, 2, 3}
-	nv := (*[3]int)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "[3]int"
-	vs := "[1 2 3]"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Array containing type with custom formatter on pointer receiver only.
-	v2 := [3]pstringer{"1", "2", "3"}
-	nv2 := (*[3]pstringer)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "[3]spew_test.pstringer"
-	v2sp := "[stringer 1 stringer 2 stringer 3]"
-	v2s := v2sp
-	if spew.UnsafeDisabled {
-		v2s = "[1 2 3]"
-	}
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2sp)
-	addFormatterTest("%v", &pv2, "<**>"+v2sp)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Array containing interfaces.
-	v3 := [3]interface{}{"one", int(2), uint(3)}
-	nv3 := (*[3]interface{})(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "[3]interface {}"
-	v3t2 := "string"
-	v3t3 := "int"
-	v3t4 := "uint"
-	v3s := "[one 2 3]"
-	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
-}
-
-func addSliceFormatterTests() {
-	// Slice containing standard float32 values.
-	v := []float32{3.14, 6.28, 12.56}
-	nv := (*[]float32)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "[]float32"
-	vs := "[3.14 6.28 12.56]"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Slice containing type with custom formatter on pointer receiver only.
-	v2 := []pstringer{"1", "2", "3"}
-	nv2 := (*[]pstringer)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "[]spew_test.pstringer"
-	v2s := "[stringer 1 stringer 2 stringer 3]"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Slice containing interfaces.
-	v3 := []interface{}{"one", int(2), uint(3), nil}
-	nv3 := (*[]interface{})(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "[]interface {}"
-	v3t2 := "string"
-	v3t3 := "int"
-	v3t4 := "uint"
-	v3t5 := "interface {}"
-	v3s := "[one 2 3 <nil>]"
-	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
-		")<nil>]"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
-
-	// Nil slice.
-	var v4 []int
-	nv4 := (*[]int)(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "[]int"
-	v4s := "<nil>"
-	addFormatterTest("%v", v4, v4s)
-	addFormatterTest("%v", pv4, "<*>"+v4s)
-	addFormatterTest("%v", &pv4, "<**>"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%+v", v4, v4s)
-	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
-	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
-	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
-	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
-	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
-	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
-}
-
-func addStringFormatterTests() {
-	// Standard string.
-	v := "test"
-	nv := (*string)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "string"
-	vs := "test"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-}
-
-func addInterfaceFormatterTests() {
-	// Nil interface.
-	var v interface{}
-	nv := (*interface{})(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "interface {}"
-	vs := "<nil>"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Sub-interface.
-	v2 := interface{}(uint16(65535))
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uint16"
-	v2s := "65535"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-}
-
-func addMapFormatterTests() {
-	// Map with string keys and int vals.
-	v := map[string]int{"one": 1, "two": 2}
-	nilMap := map[string]int(nil)
-	nv := (*map[string]int)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "map[string]int"
-	vs := "map[one:1 two:2]"
-	vs2 := "map[two:2 one:1]"
-	addFormatterTest("%v", v, vs, vs2)
-	addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
-	addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
-	addFormatterTest("%+v", nilMap, "<nil>")
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs, vs2)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
-		"<**>("+pvAddr+"->"+vAddr+")"+vs2)
-	addFormatterTest("%+v", nilMap, "<nil>")
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
-	addFormatterTest("%#v", nilMap, "("+vt+")"+"<nil>")
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
-		"(*"+vt+")("+vAddr+")"+vs2)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
-		"(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
-	addFormatterTest("%#+v", nilMap, "("+vt+")"+"<nil>")
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Map with custom formatter type on pointer receiver only keys and vals.
-	v2 := map[pstringer]pstringer{"one": "1"}
-	nv2 := (*map[pstringer]pstringer)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "map[spew_test.pstringer]spew_test.pstringer"
-	v2s := "map[stringer one:stringer 1]"
-	if spew.UnsafeDisabled {
-		v2s = "map[one:1]"
-	}
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Map with interface keys and values.
-	v3 := map[interface{}]interface{}{"one": 1}
-	nv3 := (*map[interface{}]interface{})(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "map[interface {}]interface {}"
-	v3t1 := "string"
-	v3t2 := "int"
-	v3s := "map[one:1]"
-	v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
-	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
-
-	// Map with nil interface value
-	v4 := map[string]interface{}{"nil": nil}
-	nv4 := (*map[string]interface{})(nil)
-	pv4 := &v4
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "map[string]interface {}"
-	v4t1 := "interface {}"
-	v4s := "map[nil:<nil>]"
-	v4s2 := "map[nil:(" + v4t1 + ")<nil>]"
-	addFormatterTest("%v", v4, v4s)
-	addFormatterTest("%v", pv4, "<*>"+v4s)
-	addFormatterTest("%v", &pv4, "<**>"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%+v", v4, v4s)
-	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
-	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
-	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
-	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
-	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
-	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
-	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
-	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
-	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
-}
-
-func addStructFormatterTests() {
-	// Struct with primitives.
-	type s1 struct {
-		a int8
-		b uint8
-	}
-	v := s1{127, 255}
-	nv := (*s1)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.s1"
-	vt2 := "int8"
-	vt3 := "uint8"
-	vs := "{127 255}"
-	vs2 := "{a:127 b:255}"
-	vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs2)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs3)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs3)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Struct that contains another struct.
-	type s2 struct {
-		s1 s1
-		b  bool
-	}
-	v2 := s2{s1{127, 255}, true}
-	nv2 := (*s2)(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.s2"
-	v2t2 := "spew_test.s1"
-	v2t3 := "int8"
-	v2t4 := "uint8"
-	v2t5 := "bool"
-	v2s := "{{127 255} true}"
-	v2s2 := "{s1:{a:127 b:255} b:true}"
-	v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
-		v2t5 + ")true}"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s2)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Struct that contains custom type with Stringer pointer interface via both
-	// exported and unexported fields.
-	type s3 struct {
-		s pstringer
-		S pstringer
-	}
-	v3 := s3{"test", "test2"}
-	nv3 := (*s3)(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "spew_test.s3"
-	v3t2 := "spew_test.pstringer"
-	v3s := "{stringer test stringer test2}"
-	v3sp := v3s
-	v3s2 := "{s:stringer test S:stringer test2}"
-	v3s2p := v3s2
-	v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
-	v3s3p := v3s3
-	if spew.UnsafeDisabled {
-		v3s = "{test test2}"
-		v3sp = "{test stringer test2}"
-		v3s2 = "{s:test S:test2}"
-		v3s2p = "{s:test S:stringer test2}"
-		v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}"
-		v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}"
-	}
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3sp)
-	addFormatterTest("%v", &pv3, "<**>"+v3sp)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s2)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s3)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p)
-	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
-
-	// Struct that contains embedded struct and field to same struct.
-	e := embed{"embedstr"}
-	v4 := embedwrap{embed: &e, e: &e}
-	nv4 := (*embedwrap)(nil)
-	pv4 := &v4
-	eAddr := fmt.Sprintf("%p", &e)
-	v4Addr := fmt.Sprintf("%p", pv4)
-	pv4Addr := fmt.Sprintf("%p", &pv4)
-	v4t := "spew_test.embedwrap"
-	v4t2 := "spew_test.embed"
-	v4t3 := "string"
-	v4s := "{<*>{embedstr} <*>{embedstr}}"
-	v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr +
-		"){a:embedstr}}"
-	v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 +
-		"){a:(" + v4t3 + ")embedstr}}"
-	v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 +
-		")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}"
-	addFormatterTest("%v", v4, v4s)
-	addFormatterTest("%v", pv4, "<*>"+v4s)
-	addFormatterTest("%v", &pv4, "<**>"+v4s)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%+v", v4, v4s2)
-	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2)
-	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2)
-	addFormatterTest("%+v", nv4, "<nil>")
-	addFormatterTest("%#v", v4, "("+v4t+")"+v4s3)
-	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3)
-	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3)
-	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
-	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4)
-	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4)
-	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4)
-	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
-}
-
-func addUintptrFormatterTests() {
-	// Null pointer.
-	v := uintptr(0)
-	nv := (*uintptr)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "uintptr"
-	vs := "<nil>"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Address of real variable.
-	i := 1
-	v2 := uintptr(unsafe.Pointer(&i))
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "uintptr"
-	v2s := fmt.Sprintf("%p", &i)
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-}
-
-func addUnsafePointerFormatterTests() {
-	// Null pointer.
-	v := unsafe.Pointer(uintptr(0))
-	nv := (*unsafe.Pointer)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "unsafe.Pointer"
-	vs := "<nil>"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Address of real variable.
-	i := 1
-	v2 := unsafe.Pointer(&i)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "unsafe.Pointer"
-	v2s := fmt.Sprintf("%p", &i)
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-}
-
-func addChanFormatterTests() {
-	// Nil channel.
-	var v chan int
-	pv := &v
-	nv := (*chan int)(nil)
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "chan int"
-	vs := "<nil>"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Real channel.
-	v2 := make(chan int)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "chan int"
-	v2s := fmt.Sprintf("%p", v2)
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-}
-
-func addFuncFormatterTests() {
-	// Function with no params and no returns.
-	v := addIntFormatterTests
-	nv := (*func())(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "func()"
-	vs := fmt.Sprintf("%p", v)
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-
-	// Function with param and no returns.
-	v2 := TestFormatter
-	nv2 := (*func(*testing.T))(nil)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "func(*testing.T)"
-	v2s := fmt.Sprintf("%p", v2)
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s)
-	addFormatterTest("%v", &pv2, "<**>"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%+v", v2, v2s)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%+v", nv2, "<nil>")
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
-	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
-	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
-
-	// Function with multiple params and multiple returns.
-	var v3 = func(i int, s string) (b bool, err error) {
-		return true, nil
-	}
-	nv3 := (*func(int, string) (bool, error))(nil)
-	pv3 := &v3
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "func(int, string) (bool, error)"
-	v3s := fmt.Sprintf("%p", v3)
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s)
-	addFormatterTest("%v", &pv3, "<**>"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%+v", v3, v3s)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%+v", nv3, "<nil>")
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
-	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
-	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
-}
-
-func addCircularFormatterTests() {
-	// Struct that is circular through self referencing.
-	type circular struct {
-		c *circular
-	}
-	v := circular{nil}
-	v.c = &v
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.circular"
-	vs := "{<*>{<*><shown>}}"
-	vs2 := "{<*><shown>}"
-	vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")<shown>}}"
-	vs4 := "{c:<*>(" + vAddr + ")<shown>}"
-	vs5 := "{c:(*" + vt + "){c:(*" + vt + ")<shown>}}"
-	vs6 := "{c:(*" + vt + ")<shown>}"
-	vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr +
-		")<shown>}}"
-	vs8 := "{c:(*" + vt + ")(" + vAddr + ")<shown>}"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs2)
-	addFormatterTest("%v", &pv, "<**>"+vs2)
-	addFormatterTest("%+v", v, vs3)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4)
-	addFormatterTest("%#v", v, "("+vt+")"+vs5)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs6)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6)
-	addFormatterTest("%#+v", v, "("+vt+")"+vs7)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8)
-
-	// Structs that are circular through cross referencing.
-	v2 := xref1{nil}
-	ts2 := xref2{&v2}
-	v2.ps2 = &ts2
-	pv2 := &v2
-	ts2Addr := fmt.Sprintf("%p", &ts2)
-	v2Addr := fmt.Sprintf("%p", pv2)
-	pv2Addr := fmt.Sprintf("%p", &pv2)
-	v2t := "spew_test.xref1"
-	v2t2 := "spew_test.xref2"
-	v2s := "{<*>{<*>{<*><shown>}}}"
-	v2s2 := "{<*>{<*><shown>}}"
-	v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" +
-		ts2Addr + ")<shown>}}}"
-	v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")<shown>}}"
-	v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 +
-		")<shown>}}}"
-	v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")<shown>}}"
-	v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
-		")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr +
-		")<shown>}}}"
-	v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
-		")(" + v2Addr + ")<shown>}}"
-	addFormatterTest("%v", v2, v2s)
-	addFormatterTest("%v", pv2, "<*>"+v2s2)
-	addFormatterTest("%v", &pv2, "<**>"+v2s2)
-	addFormatterTest("%+v", v2, v2s3)
-	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4)
-	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4)
-	addFormatterTest("%#v", v2, "("+v2t+")"+v2s5)
-	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6)
-	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6)
-	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7)
-	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8)
-	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8)
-
-	// Structs that are indirectly circular.
-	v3 := indirCir1{nil}
-	tic2 := indirCir2{nil}
-	tic3 := indirCir3{&v3}
-	tic2.ps3 = &tic3
-	v3.ps2 = &tic2
-	pv3 := &v3
-	tic2Addr := fmt.Sprintf("%p", &tic2)
-	tic3Addr := fmt.Sprintf("%p", &tic3)
-	v3Addr := fmt.Sprintf("%p", pv3)
-	pv3Addr := fmt.Sprintf("%p", &pv3)
-	v3t := "spew_test.indirCir1"
-	v3t2 := "spew_test.indirCir2"
-	v3t3 := "spew_test.indirCir3"
-	v3s := "{<*>{<*>{<*>{<*><shown>}}}}"
-	v3s2 := "{<*>{<*>{<*><shown>}}}"
-	v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
-		v3Addr + "){ps2:<*>(" + tic2Addr + ")<shown>}}}}"
-	v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
-		v3Addr + ")<shown>}}}"
-	v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
-		"){ps2:(*" + v3t2 + ")<shown>}}}}"
-	v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
-		")<shown>}}}"
-	v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
-		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 +
-		")(" + tic2Addr + ")<shown>}}}}"
-	v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
-		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")<shown>}}}"
-	addFormatterTest("%v", v3, v3s)
-	addFormatterTest("%v", pv3, "<*>"+v3s2)
-	addFormatterTest("%v", &pv3, "<**>"+v3s2)
-	addFormatterTest("%+v", v3, v3s3)
-	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4)
-	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4)
-	addFormatterTest("%#v", v3, "("+v3t+")"+v3s5)
-	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6)
-	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6)
-	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7)
-	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8)
-	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8)
-}
-
-func addPanicFormatterTests() {
-	// Type that panics in its Stringer interface.
-	v := panicer(127)
-	nv := (*panicer)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.panicer"
-	vs := "(PANIC=test panic)127"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-}
-
-func addErrorFormatterTests() {
-	// Type that has a custom Error interface.
-	v := customError(127)
-	nv := (*customError)(nil)
-	pv := &v
-	vAddr := fmt.Sprintf("%p", pv)
-	pvAddr := fmt.Sprintf("%p", &pv)
-	vt := "spew_test.customError"
-	vs := "error: 127"
-	addFormatterTest("%v", v, vs)
-	addFormatterTest("%v", pv, "<*>"+vs)
-	addFormatterTest("%v", &pv, "<**>"+vs)
-	addFormatterTest("%v", nv, "<nil>")
-	addFormatterTest("%+v", v, vs)
-	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
-	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%+v", nv, "<nil>")
-	addFormatterTest("%#v", v, "("+vt+")"+vs)
-	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
-	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
-	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
-	addFormatterTest("%#+v", v, "("+vt+")"+vs)
-	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
-	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
-	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
-}
-
-func addPassthroughFormatterTests() {
-	// %x passthrough with uint.
-	v := uint(4294967295)
-	pv := &v
-	vAddr := fmt.Sprintf("%x", pv)
-	pvAddr := fmt.Sprintf("%x", &pv)
-	vs := "ffffffff"
-	addFormatterTest("%x", v, vs)
-	addFormatterTest("%x", pv, vAddr)
-	addFormatterTest("%x", &pv, pvAddr)
-
-	// %#x passthrough with uint.
-	v2 := int(2147483647)
-	pv2 := &v2
-	v2Addr := fmt.Sprintf("%#x", pv2)
-	pv2Addr := fmt.Sprintf("%#x", &pv2)
-	v2s := "0x7fffffff"
-	addFormatterTest("%#x", v2, v2s)
-	addFormatterTest("%#x", pv2, v2Addr)
-	addFormatterTest("%#x", &pv2, pv2Addr)
-
-	// %f passthrough with precision.
-	addFormatterTest("%.2f", 3.1415, "3.14")
-	addFormatterTest("%.3f", 3.1415, "3.142")
-	addFormatterTest("%.4f", 3.1415, "3.1415")
-
-	// %f passthrough with width and precision.
-	addFormatterTest("%5.2f", 3.1415, " 3.14")
-	addFormatterTest("%6.3f", 3.1415, " 3.142")
-	addFormatterTest("%7.4f", 3.1415, " 3.1415")
-
-	// %d passthrough with width.
-	addFormatterTest("%3d", 127, "127")
-	addFormatterTest("%4d", 127, " 127")
-	addFormatterTest("%5d", 127, "  127")
-
-	// %q passthrough with string.
-	addFormatterTest("%q", "test", "\"test\"")
-}
-
-// TestFormatter executes all of the tests described by formatterTests.
-func TestFormatter(t *testing.T) {
-	// Setup tests.
-	addIntFormatterTests()
-	addUintFormatterTests()
-	addBoolFormatterTests()
-	addFloatFormatterTests()
-	addComplexFormatterTests()
-	addArrayFormatterTests()
-	addSliceFormatterTests()
-	addStringFormatterTests()
-	addInterfaceFormatterTests()
-	addMapFormatterTests()
-	addStructFormatterTests()
-	addUintptrFormatterTests()
-	addUnsafePointerFormatterTests()
-	addChanFormatterTests()
-	addFuncFormatterTests()
-	addCircularFormatterTests()
-	addPanicFormatterTests()
-	addErrorFormatterTests()
-	addPassthroughFormatterTests()
-
-	t.Logf("Running %d tests", len(formatterTests))
-	for i, test := range formatterTests {
-		buf := new(bytes.Buffer)
-		spew.Fprintf(buf, test.format, test.in)
-		s := buf.String()
-		if testFailed(s, test.wants) {
-			t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s,
-				stringizeWants(test.wants))
-			continue
-		}
-	}
-}
-
-type testStruct struct {
-	x int
-}
-
-func (ts testStruct) String() string {
-	return fmt.Sprintf("ts.%d", ts.x)
-}
-
-type testStructP struct {
-	x int
-}
-
-func (ts *testStructP) String() string {
-	return fmt.Sprintf("ts.%d", ts.x)
-}
-
-func TestPrintSortedKeys(t *testing.T) {
-	cfg := spew.ConfigState{SortKeys: true}
-	s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"})
-	expected := "map[1:1 2:2 3:3]"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch 1:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2})
-	expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch 2:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
-	expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
-	if spew.UnsafeDisabled {
-		expected = "map[1:1 2:2 3:3]"
-	}
-	if s != expected {
-		t.Errorf("Sorted keys mismatch 3:\n  %v %v", s, expected)
-	}
-
-	s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
-	expected = "map[ts.1:1 ts.2:2 ts.3:3]"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch 4:\n  %v %v", s, expected)
-	}
-
-	if !spew.UnsafeDisabled {
-		s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
-		expected = "map[ts.1:1 ts.2:2 ts.3:3]"
-		if s != expected {
-			t.Errorf("Sorted keys mismatch 5:\n  %v %v", s, expected)
-		}
-	}
-
-	s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
-	expected = "map[error: 1:1 error: 2:2 error: 3:3]"
-	if s != expected {
-		t.Errorf("Sorted keys mismatch 6:\n  %v %v", s, expected)
-	}
-}

+ 0 - 87
vendor/github.com/davecgh/go-spew/spew/internal_test.go

@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-This test file is part of the spew package rather than than the spew_test
-package because it needs access to internals to properly test certain cases
-which are not possible via the public interface since they should never happen.
-*/
-
-package spew
-
-import (
-	"bytes"
-	"reflect"
-	"testing"
-)
-
-// dummyFmtState implements a fake fmt.State to use for testing invalid
-// reflect.Value handling.  This is necessary because the fmt package catches
-// invalid values before invoking the formatter on them.
-type dummyFmtState struct {
-	bytes.Buffer
-}
-
-func (dfs *dummyFmtState) Flag(f int) bool {
-	if f == int('+') {
-		return true
-	}
-	return false
-}
-
-func (dfs *dummyFmtState) Precision() (int, bool) {
-	return 0, false
-}
-
-func (dfs *dummyFmtState) Width() (int, bool) {
-	return 0, false
-}
-
-// TestInvalidReflectValue ensures the dump and formatter code handles an
-// invalid reflect value properly.  This needs access to internal state since it
-// should never happen in real code and therefore can't be tested via the public
-// API.
-func TestInvalidReflectValue(t *testing.T) {
-	i := 1
-
-	// Dump invalid reflect value.
-	v := new(reflect.Value)
-	buf := new(bytes.Buffer)
-	d := dumpState{w: buf, cs: &Config}
-	d.dump(*v)
-	s := buf.String()
-	want := "<invalid>"
-	if s != want {
-		t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
-	}
-	i++
-
-	// Formatter invalid reflect value.
-	buf2 := new(dummyFmtState)
-	f := formatState{value: *v, cs: &Config, fs: buf2}
-	f.format(*v)
-	s = buf2.String()
-	want = "<invalid>"
-	if s != want {
-		t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
-	}
-}
-
-// SortValues makes the internal sortValues function available to the test
-// package.
-func SortValues(values []reflect.Value, cs *ConfigState) {
-	sortValues(values, cs)
-}

+ 0 - 102
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go

@@ -1,102 +0,0 @@
-// Copyright (c) 2013-2016 Dave Collins <[email protected]>
-
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is not running on Google App Engine, compiled by GopherJS, and
-// "-tags safe" is not added to the go build command line.  The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
-
-/*
-This test file is part of the spew package rather than than the spew_test
-package because it needs access to internals to properly test certain cases
-which are not possible via the public interface since they should never happen.
-*/
-
-package spew
-
-import (
-	"bytes"
-	"reflect"
-	"testing"
-	"unsafe"
-)
-
-// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
-// the maximum kind value which does not exist.  This is needed to test the
-// fallback code which punts to the standard fmt library for new types that
-// might get added to the language.
-func changeKind(v *reflect.Value, readOnly bool) {
-	rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
-	*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
-	if readOnly {
-		*rvf |= flagRO
-	} else {
-		*rvf &= ^uintptr(flagRO)
-	}
-}
-
-// TestAddedReflectValue tests functionaly of the dump and formatter code which
-// falls back to the standard fmt library for new types that might get added to
-// the language.
-func TestAddedReflectValue(t *testing.T) {
-	i := 1
-
-	// Dump using a reflect.Value that is exported.
-	v := reflect.ValueOf(int8(5))
-	changeKind(&v, false)
-	buf := new(bytes.Buffer)
-	d := dumpState{w: buf, cs: &Config}
-	d.dump(v)
-	s := buf.String()
-	want := "(int8) 5"
-	if s != want {
-		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
-	}
-	i++
-
-	// Dump using a reflect.Value that is not exported.
-	changeKind(&v, true)
-	buf.Reset()
-	d.dump(v)
-	s = buf.String()
-	want = "(int8) <int8 Value>"
-	if s != want {
-		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
-	}
-	i++
-
-	// Formatter using a reflect.Value that is exported.
-	changeKind(&v, false)
-	buf2 := new(dummyFmtState)
-	f := formatState{value: v, cs: &Config, fs: buf2}
-	f.format(v)
-	s = buf2.String()
-	want = "5"
-	if s != want {
-		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
-	}
-	i++
-
-	// Formatter using a reflect.Value that is not exported.
-	changeKind(&v, true)
-	buf2.Reset()
-	f = formatState{value: v, cs: &Config, fs: buf2}
-	f.format(v)
-	s = buf2.String()
-	want = "<int8 Value>"
-	if s != want {
-		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
-	}
-}

+ 0 - 320
vendor/github.com/davecgh/go-spew/spew/spew_test.go

@@ -1,320 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew_test
-
-import (
-	"bytes"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"testing"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-// spewFunc is used to identify which public function of the spew package or
-// ConfigState a test applies to.
-type spewFunc int
-
-const (
-	fCSFdump spewFunc = iota
-	fCSFprint
-	fCSFprintf
-	fCSFprintln
-	fCSPrint
-	fCSPrintln
-	fCSSdump
-	fCSSprint
-	fCSSprintf
-	fCSSprintln
-	fCSErrorf
-	fCSNewFormatter
-	fErrorf
-	fFprint
-	fFprintln
-	fPrint
-	fPrintln
-	fSdump
-	fSprint
-	fSprintf
-	fSprintln
-)
-
-// Map of spewFunc values to names for pretty printing.
-var spewFuncStrings = map[spewFunc]string{
-	fCSFdump:        "ConfigState.Fdump",
-	fCSFprint:       "ConfigState.Fprint",
-	fCSFprintf:      "ConfigState.Fprintf",
-	fCSFprintln:     "ConfigState.Fprintln",
-	fCSSdump:        "ConfigState.Sdump",
-	fCSPrint:        "ConfigState.Print",
-	fCSPrintln:      "ConfigState.Println",
-	fCSSprint:       "ConfigState.Sprint",
-	fCSSprintf:      "ConfigState.Sprintf",
-	fCSSprintln:     "ConfigState.Sprintln",
-	fCSErrorf:       "ConfigState.Errorf",
-	fCSNewFormatter: "ConfigState.NewFormatter",
-	fErrorf:         "spew.Errorf",
-	fFprint:         "spew.Fprint",
-	fFprintln:       "spew.Fprintln",
-	fPrint:          "spew.Print",
-	fPrintln:        "spew.Println",
-	fSdump:          "spew.Sdump",
-	fSprint:         "spew.Sprint",
-	fSprintf:        "spew.Sprintf",
-	fSprintln:       "spew.Sprintln",
-}
-
-func (f spewFunc) String() string {
-	if s, ok := spewFuncStrings[f]; ok {
-		return s
-	}
-	return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
-}
-
-// spewTest is used to describe a test to be performed against the public
-// functions of the spew package or ConfigState.
-type spewTest struct {
-	cs     *spew.ConfigState
-	f      spewFunc
-	format string
-	in     interface{}
-	want   string
-}
-
-// spewTests houses the tests to be performed against the public functions of
-// the spew package and ConfigState.
-//
-// These tests are only intended to ensure the public functions are exercised
-// and are intentionally not exhaustive of types.  The exhaustive type
-// tests are handled in the dump and format tests.
-var spewTests []spewTest
-
-// redirStdout is a helper function to return the standard output from f as a
-// byte slice.
-func redirStdout(f func()) ([]byte, error) {
-	tempFile, err := ioutil.TempFile("", "ss-test")
-	if err != nil {
-		return nil, err
-	}
-	fileName := tempFile.Name()
-	defer os.Remove(fileName) // Ignore error
-
-	origStdout := os.Stdout
-	os.Stdout = tempFile
-	f()
-	os.Stdout = origStdout
-	tempFile.Close()
-
-	return ioutil.ReadFile(fileName)
-}
-
-func initSpewTests() {
-	// Config states with various settings.
-	scsDefault := spew.NewDefaultConfig()
-	scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
-	scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
-	scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
-	scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
-	scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
-	scsNoCap := &spew.ConfigState{DisableCapacities: true}
-
-	// Variables for tests on types which implement Stringer interface with and
-	// without a pointer receiver.
-	ts := stringer("test")
-	tps := pstringer("test")
-
-	type ptrTester struct {
-		s *struct{}
-	}
-	tptr := &ptrTester{s: &struct{}{}}
-
-	// depthTester is used to test max depth handling for structs, array, slices
-	// and maps.
-	type depthTester struct {
-		ic    indirCir1
-		arr   [1]string
-		slice []string
-		m     map[string]int
-	}
-	dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
-		map[string]int{"one": 1}}
-
-	// Variable for tests on types which implement error interface.
-	te := customError(10)
-
-	spewTests = []spewTest{
-		{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
-		{scsDefault, fCSFprint, "", int16(32767), "32767"},
-		{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
-		{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
-		{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
-		{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
-		{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
-		{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
-		{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
-		{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
-		{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
-		{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
-		{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
-		{scsDefault, fFprint, "", float32(3.14), "3.14"},
-		{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
-		{scsDefault, fPrint, "", true, "true"},
-		{scsDefault, fPrintln, "", false, "false\n"},
-		{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
-		{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
-		{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
-		{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
-		{scsNoMethods, fCSFprint, "", ts, "test"},
-		{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
-		{scsNoMethods, fCSFprint, "", tps, "test"},
-		{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
-		{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
-		{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
-		{scsNoPmethods, fCSFprint, "", tps, "test"},
-		{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
-		{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
-		{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
-			" ic: (spew_test.indirCir1) {\n  <max depth reached>\n },\n" +
-			" arr: ([1]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
-			" slice: ([]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
-			" m: (map[string]int) (len=1) {\n  <max depth reached>\n }\n}\n"},
-		{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
-		{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
-			"(len=4) (stringer test) \"test\"\n"},
-		{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
-		{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
-			"(error: 10) 10\n"},
-		{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
-		{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
-		{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
-		{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
-	}
-}
-
-// TestSpew executes all of the tests described by spewTests.
-func TestSpew(t *testing.T) {
-	initSpewTests()
-
-	t.Logf("Running %d tests", len(spewTests))
-	for i, test := range spewTests {
-		buf := new(bytes.Buffer)
-		switch test.f {
-		case fCSFdump:
-			test.cs.Fdump(buf, test.in)
-
-		case fCSFprint:
-			test.cs.Fprint(buf, test.in)
-
-		case fCSFprintf:
-			test.cs.Fprintf(buf, test.format, test.in)
-
-		case fCSFprintln:
-			test.cs.Fprintln(buf, test.in)
-
-		case fCSPrint:
-			b, err := redirStdout(func() { test.cs.Print(test.in) })
-			if err != nil {
-				t.Errorf("%v #%d %v", test.f, i, err)
-				continue
-			}
-			buf.Write(b)
-
-		case fCSPrintln:
-			b, err := redirStdout(func() { test.cs.Println(test.in) })
-			if err != nil {
-				t.Errorf("%v #%d %v", test.f, i, err)
-				continue
-			}
-			buf.Write(b)
-
-		case fCSSdump:
-			str := test.cs.Sdump(test.in)
-			buf.WriteString(str)
-
-		case fCSSprint:
-			str := test.cs.Sprint(test.in)
-			buf.WriteString(str)
-
-		case fCSSprintf:
-			str := test.cs.Sprintf(test.format, test.in)
-			buf.WriteString(str)
-
-		case fCSSprintln:
-			str := test.cs.Sprintln(test.in)
-			buf.WriteString(str)
-
-		case fCSErrorf:
-			err := test.cs.Errorf(test.format, test.in)
-			buf.WriteString(err.Error())
-
-		case fCSNewFormatter:
-			fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
-
-		case fErrorf:
-			err := spew.Errorf(test.format, test.in)
-			buf.WriteString(err.Error())
-
-		case fFprint:
-			spew.Fprint(buf, test.in)
-
-		case fFprintln:
-			spew.Fprintln(buf, test.in)
-
-		case fPrint:
-			b, err := redirStdout(func() { spew.Print(test.in) })
-			if err != nil {
-				t.Errorf("%v #%d %v", test.f, i, err)
-				continue
-			}
-			buf.Write(b)
-
-		case fPrintln:
-			b, err := redirStdout(func() { spew.Println(test.in) })
-			if err != nil {
-				t.Errorf("%v #%d %v", test.f, i, err)
-				continue
-			}
-			buf.Write(b)
-
-		case fSdump:
-			str := spew.Sdump(test.in)
-			buf.WriteString(str)
-
-		case fSprint:
-			str := spew.Sprint(test.in)
-			buf.WriteString(str)
-
-		case fSprintf:
-			str := spew.Sprintf(test.format, test.in)
-			buf.WriteString(str)
-
-		case fSprintln:
-			str := spew.Sprintln(test.in)
-			buf.WriteString(str)
-
-		default:
-			t.Errorf("%v #%d unrecognized function", test.f, i)
-			continue
-		}
-		s := buf.String()
-		if test.want != s {
-			t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
-			continue
-		}
-	}
-}

+ 0 - 82
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go

@@ -1,82 +0,0 @@
-// Copyright (c) 2013 Dave Collins <[email protected]>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when both cgo is supported and "-tags testcgo" is added to the go test
-// command line.  This code should really only be in the dumpcgo_test.go file,
-// but unfortunately Go will not allow cgo in test files, so this is a
-// workaround to allow cgo types to be tested.  This configuration is used
-// because spew itself does not require cgo to run even though it does handle
-// certain cgo types specially.  Rather than forcing all clients to require cgo
-// and an external C compiler just to run the tests, this scheme makes them
-// optional.
-// +build cgo,testcgo
-
-package testdata
-
-/*
-#include <stdint.h>
-typedef unsigned char custom_uchar_t;
-
-char            *ncp = 0;
-char            *cp = "test";
-char             ca[6] = {'t', 'e', 's', 't', '2', '\0'};
-unsigned char    uca[6] = {'t', 'e', 's', 't', '3', '\0'};
-signed char      sca[6] = {'t', 'e', 's', 't', '4', '\0'};
-uint8_t          ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
-custom_uchar_t   tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
-*/
-import "C"
-
-// GetCgoNullCharPointer returns a null char pointer via cgo.  This is only
-// used for tests.
-func GetCgoNullCharPointer() interface{} {
-	return C.ncp
-}
-
-// GetCgoCharPointer returns a char pointer via cgo.  This is only used for
-// tests.
-func GetCgoCharPointer() interface{} {
-	return C.cp
-}
-
-// GetCgoCharArray returns a char array via cgo and the array's len and cap.
-// This is only used for tests.
-func GetCgoCharArray() (interface{}, int, int) {
-	return C.ca, len(C.ca), cap(C.ca)
-}
-
-// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
-// array's len and cap.  This is only used for tests.
-func GetCgoUnsignedCharArray() (interface{}, int, int) {
-	return C.uca, len(C.uca), cap(C.uca)
-}
-
-// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
-// and cap.  This is only used for tests.
-func GetCgoSignedCharArray() (interface{}, int, int) {
-	return C.sca, len(C.sca), cap(C.sca)
-}
-
-// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
-// cap.  This is only used for tests.
-func GetCgoUint8tArray() (interface{}, int, int) {
-	return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
-}
-
-// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
-// cgo and the array's len and cap.  This is only used for tests.
-func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
-	return C.tuca, len(C.tuca), cap(C.tuca)
-}

+ 0 - 61
vendor/github.com/davecgh/go-spew/test_coverage.txt

@@ -1,61 +0,0 @@
-
-github.com/davecgh/go-spew/spew/dump.go		 dumpState.dump			 100.00% (88/88)
-github.com/davecgh/go-spew/spew/format.go	 formatState.format		 100.00% (82/82)
-github.com/davecgh/go-spew/spew/format.go	 formatState.formatPtr		 100.00% (52/52)
-github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpPtr		 100.00% (44/44)
-github.com/davecgh/go-spew/spew/dump.go		 dumpState.dumpSlice		 100.00% (39/39)
-github.com/davecgh/go-spew/spew/common.go	 handleMethods			 100.00% (30/30)
-github.com/davecgh/go-spew/spew/common.go	 printHexPtr			 100.00% (18/18)
-github.com/davecgh/go-spew/spew/common.go	 unsafeReflectValue		 100.00% (13/13)
-github.com/davecgh/go-spew/spew/format.go	 formatState.constructOrigFormat 100.00% (12/12)
-github.com/davecgh/go-spew/spew/dump.go		 fdump				 100.00% (11/11)
-github.com/davecgh/go-spew/spew/format.go	 formatState.Format		 100.00% (11/11)
-github.com/davecgh/go-spew/spew/common.go	 init				 100.00% (10/10)
-github.com/davecgh/go-spew/spew/common.go	 printComplex			 100.00% (9/9)
-github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Less		 100.00% (8/8)
-github.com/davecgh/go-spew/spew/format.go	 formatState.buildDefaultFormat	 100.00% (7/7)
-github.com/davecgh/go-spew/spew/format.go	 formatState.unpackValue	 100.00% (5/5)
-github.com/davecgh/go-spew/spew/dump.go		 dumpState.indent		 100.00% (4/4)
-github.com/davecgh/go-spew/spew/common.go	 catchPanic			 100.00% (4/4)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.convertArgs	 100.00% (4/4)
-github.com/davecgh/go-spew/spew/spew.go		 convertArgs			 100.00% (4/4)
-github.com/davecgh/go-spew/spew/format.go	 newFormatter			 100.00% (3/3)
-github.com/davecgh/go-spew/spew/dump.go		 Sdump				 100.00% (3/3)
-github.com/davecgh/go-spew/spew/common.go	 printBool			 100.00% (3/3)
-github.com/davecgh/go-spew/spew/common.go	 sortValues			 100.00% (3/3)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sdump		 100.00% (3/3)
-github.com/davecgh/go-spew/spew/dump.go		 dumpState.unpackValue		 100.00% (3/3)
-github.com/davecgh/go-spew/spew/spew.go		 Printf				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Println			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Sprint				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Sprintf			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Sprintln			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/common.go	 printFloat			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 NewDefaultConfig		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/common.go	 printInt			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/common.go	 printUint			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Len		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/common.go	 valuesSorter.Swap		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Errorf		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprint		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintf		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fprintln		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Print		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Printf		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Println		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprint		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintf		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Sprintln		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.NewFormatter	 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Fdump		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/config.go	 ConfigState.Dump		 100.00% (1/1)
-github.com/davecgh/go-spew/spew/dump.go		 Fdump				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/dump.go		 Dump				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Fprintln			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/format.go	 NewFormatter			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Errorf				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Fprint				 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Fprintf			 100.00% (1/1)
-github.com/davecgh/go-spew/spew/spew.go		 Print				 100.00% (1/1)
-github.com/davecgh/go-spew/spew			 ------------------------------- 100.00% (505/505)
-

+ 0 - 151
vendor/github.com/golang/geo/README.md

@@ -1,151 +0,0 @@
-# Overview
-
-This is a library for manipulating geometric shapes. Unlike many geometry
-libraries, S2 is primarily designed to work with _spherical geometry_, i.e.,
-shapes drawn on a sphere rather than on a planar 2D map. (In fact, the name S2
-is derived from the mathematical notation for the unit sphere.) This makes it
-especially suitable for working with geographic data.
-
-The library consists of:
-
-*   Basic representations of angles, intervals, latitude-longitude points, unit
-    3D vectors, and conversions among them.
-
-*   Various shapes over the unit sphere, such as spherical caps ("discs"),
-    latitude-longitude rectangles, polylines, and polygons. These are
-    collectively known as "regions".
-
-*   Support for spatial indexing of collections of geometry, and algorithms for
-    testing containment, finding nearby objects, finding intersections, etc.
-
-*   A hierarchical decomposition of the sphere into regions called "cells". The
-    hierarchy starts with the six faces of a projected cube and recursively
-    subdivides them in a quadtree-like fashion.
-
-*   The ability to approximate arbitrary regions as a collection of cells. This
-    is useful for building inverted indexes that allow queries over arbitrarily
-    shaped regions.
-
-The implementations attempt to be precise both in terms of mathematical
-definitions (e.g. whether regions include their boundaries, representations of
-empty and full regions) and numerical accuracy (e.g. avoiding cancellation
-error).
-
-Note that the intent of this library is to represent geometry as a mathematical
-abstraction. For example, although the unit sphere is obviously a useful
-approximation for the Earth's surface, functions that are specifically related
-to geography are not part of the core library (e.g. easting/northing
-conversions, ellipsoid approximations, geodetic vs. geocentric coordinates,
-etc).
-
-See http://godoc.org/github.com/golang/geo for specific package documentation.
-
-For an analogous library in C++, see
-https://code.google.com/archive/p/s2-geometry-library/, and in Java, see
-https://github.com/google/s2-geometry-library-java
-
-# Status of the Go Library
-
-This library is principally a port of [the C++ S2
-library](https://code.google.com/archive/p/s2-geometry-library), adapting to Go
-idioms where it makes sense. We detail the progress of this port below relative
-to that C++ library.
-
-## [ℝ¹](https://godoc.org/github.com/golang/geo/r1) - One-dimensional Cartesian coordinates
-
-Full parity with C++.
-
-## [ℝ²](https://godoc.org/github.com/golang/geo/r2) - Two-dimensional Cartesian coordinates
-
-Full parity with C++.
-
-## [ℝ³](https://godoc.org/github.com/golang/geo/r3) - Three-dimensional Cartesian coordinates
-
-Full parity with C++.
-
-## [S¹](https://godoc.org/github.com/golang/geo/s1) - Circular Geometry
-
-**Complete**
-
-*   ChordAngle
-
-**Mostly complete**
-
-*   Angle - Missing Arithmetic methods, Trigonometric methods, Conversion
-    to/from s2.Point, s2.LatLng, convenience methods from E5/E6/E7
-*   Interval - Missing ClampPoint, Complement, ComplementCenter,
-    HaussdorfDistance
-
-## [S²](https://godoc.org/github.com/golang/geo/s2) - Spherical Geometry
-
-Approximately ~40% complete.
-
-**Complete** These files have full parity with the C++ implementation.
-
-*   Cap
-*   CellID
-*   LatLng
-*   matrix3x3
-*   Metric
-*   PaddedCell
-*   Region
-*   s2stuv.go (s2coords.h in C++) - This file is a collection of helper and
-    conversion methods to and from ST-space, UV-space, and XYZ-space.
-
-**Mostly Complete** Files that have almost all of the features of the original
-C++ code, and are reasonably complete enough to use in live code. Up to date
-listing of the incomplete methods are documented at the end of each file.
-
-*   Cell - Missing Subdivide, BoundUV, DistanceToEdge, ChordDistance.
-*   CellUnion - Missing Union, Intersection, etc.
-*   Edgeutil - Missing Distance methods, LongitudePruner, FaceSegments,
-    ClosestPair.
-*   Point - Missing TurningAngle, Rotate, some Area methods.
-*   Polyline - Missing Projection, Intersects, Interpolate, etc.
-*   Rect (AKA s2latlngrect in C++) - Missing Centroid, Distance,
-    InteriorContains.
-*   RegionCoverer - Missing FloodFill and SimpleCovering.
-*   s2_test.go (AKA s2testing and s2textformat in C++) - Missing
-    ConcentricLoopsPolygon and Fractal test shape generation. This file is a
-    collection of testing helper methods.
-
-**In Progress** Files that have some work done, but are probably not complete
-enough for general use in production code.
-
-*   Loop - Loop has basic skelton complete and some tendons, but missing most
-    things. Normalize, Invert, Area, Centroid, Projection, Distance, Contains,
-    Intersects, Union, etc. A significant portion of these are under review now.
-*   Polygon - Polygon is at the partial skeleton phase, the fields all exist,
-    and some basic methods are implemented, but it's missing almost everything.
-    Init with multiple loops, Area, Centroid, Distance, Projection,
-    Intersection, Union, Contains, Normalized, etc.
-*   PolylineSimplifier - Initial work has begun on this.
-*   s2predicates.go - This file is a collection of helper methods used by other
-    parts of the library.
-*   ShapeIndex - Currently has only the minimal skeleton pieces submitted, but
-    changes are out for review for the remainder of it.
-
-**Not Started Yet.** These files (and their associated unit tests) have
-dependencies on most of the In Progress files before they can begin to be
-started.
-
-*   BoundaryOperation
-*   Builder - This is a robust tool for creating the various Shape types from
-    collection of simpler S2 types.
-*   BuilderGraph
-*   BuilderLayers
-*   BuilderSnapFunctions
-*   ClosestEdgeQuery
-*   ClosestPointQuery
-*   ConvexHullQuery
-*   CrossingEdgeQuery
-*   EdgeTesselator
-*   PointCompression
-*   PointIndex
-*   PolygonBuilder
-*   RegionIntersection
-*   RegionUnion
-*   Projections
-*   ShapeUtil - Most of this will end up in s2_test.
-*   lexicon
-*   priorityqueuesequence

+ 13 - 15
vendor/github.com/golang/geo/r1/doc.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 /*
 /*
 Package r1 implements types and functions for working with geometry in ℝ¹.
 Package r1 implements types and functions for working with geometry in ℝ¹.

+ 13 - 15
vendor/github.com/golang/geo/r1/interval.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package r1
 package r1
 
 

+ 0 - 349
vendor/github.com/golang/geo/r1/interval_test.go

@@ -1,349 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package r1
-
-import (
-	"testing"
-)
-
-// Some standard intervals for use throughout the tests.
-var (
-	unit    = Interval{0, 1}
-	negunit = Interval{-1, 0}
-	half    = Interval{0.5, 0.5}
-	empty   = EmptyInterval()
-)
-
-func TestIsEmpty(t *testing.T) {
-	var zero Interval
-	if unit.IsEmpty() {
-		t.Errorf("%v should not be empty", unit)
-	}
-	if half.IsEmpty() {
-		t.Errorf("%v should not be empty", half)
-	}
-	if !empty.IsEmpty() {
-		t.Errorf("%v should be empty", empty)
-	}
-	if zero.IsEmpty() {
-		t.Errorf("zero Interval %v should not be empty", zero)
-	}
-}
-
-func TestCenter(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		want     float64
-	}{
-		{unit, 0.5},
-		{negunit, -0.5},
-		{half, 0.5},
-	}
-	for _, test := range tests {
-		got := test.interval.Center()
-		if got != test.want {
-			t.Errorf("%v.Center() = %v, want %v", test.interval, got, test.want)
-		}
-	}
-}
-
-func TestLength(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		want     float64
-	}{
-		{unit, 1},
-		{negunit, 1},
-		{half, 0},
-	}
-	for _, test := range tests {
-		if l := test.interval.Length(); l != test.want {
-			t.Errorf("%v.Length() = %v, want %v", test.interval, l, test.want)
-		}
-	}
-	if l := empty.Length(); l >= 0 {
-		t.Errorf("empty interval has non-negative length")
-	}
-}
-
-func TestIntervalContains(t *testing.T) {
-	tests := []struct {
-		interval         Interval
-		p                float64
-		contains         bool
-		interiorContains bool
-	}{
-		{
-			interval:         unit,
-			p:                0.5,
-			contains:         true,
-			interiorContains: true,
-		},
-		{
-			interval:         unit,
-			p:                0,
-			contains:         true,
-			interiorContains: false,
-		},
-		{
-			interval:         unit,
-			p:                1,
-			contains:         true,
-			interiorContains: false,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.interval.Contains(test.p); got != test.contains {
-			t.Errorf("%v.Contains(%v) = %t, want %t", test.interval, test.p, got, test.contains)
-		}
-		if got := test.interval.InteriorContains(test.p); got != test.interiorContains {
-			t.Errorf("%v.InteriorContains(%v) = %t, want %t", test.interval, test.p, got, test.interiorContains)
-		}
-	}
-}
-
-func TestIntervalOperations(t *testing.T) {
-	tests := []struct {
-		have               Interval
-		other              Interval
-		contains           bool
-		interiorContains   bool
-		intersects         bool
-		interiorIntersects bool
-	}{
-		{
-			have:               empty,
-			other:              empty,
-			contains:           true,
-			interiorContains:   true,
-			intersects:         false,
-			interiorIntersects: false,
-		},
-		{
-			have:               empty,
-			other:              unit,
-			contains:           false,
-			interiorContains:   false,
-			intersects:         false,
-			interiorIntersects: false,
-		},
-		{
-			have:               unit,
-			other:              half,
-			contains:           true,
-			interiorContains:   true,
-			intersects:         true,
-			interiorIntersects: true,
-		},
-		{
-			have:               unit,
-			other:              unit,
-			contains:           true,
-			interiorContains:   false,
-			intersects:         true,
-			interiorIntersects: true,
-		},
-		{
-			have:               unit,
-			other:              empty,
-			contains:           true,
-			interiorContains:   true,
-			intersects:         false,
-			interiorIntersects: false,
-		},
-		{
-			have:               unit,
-			other:              negunit,
-			contains:           false,
-			interiorContains:   false,
-			intersects:         true,
-			interiorIntersects: false,
-		},
-		{
-			have:               unit,
-			other:              Interval{0, 0.5},
-			contains:           true,
-			interiorContains:   false,
-			intersects:         true,
-			interiorIntersects: true,
-		},
-		{
-			have:               half,
-			other:              Interval{0, 0.5},
-			contains:           false,
-			interiorContains:   false,
-			intersects:         true,
-			interiorIntersects: false,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.ContainsInterval(test.other); got != test.contains {
-			t.Errorf("%v.ContainsInterval(%v) = %t, want %t", test.have, test.other, got, test.contains)
-		}
-		if got := test.have.InteriorContainsInterval(test.other); got != test.interiorContains {
-			t.Errorf("%v.InteriorContainsInterval(%v) = %t, want %t", test.have, test.other, got, test.interiorContains)
-		}
-		if got := test.have.Intersects(test.other); got != test.intersects {
-			t.Errorf("%v.Intersects(%v) = %t, want %t", test.have, test.other, got, test.intersects)
-		}
-		if got := test.have.InteriorIntersects(test.other); got != test.interiorIntersects {
-			t.Errorf("%v.InteriorIntersects(%v) = %t, want %t", test.have, test.other, got, test.interiorIntersects)
-		}
-	}
-}
-
-func TestIntersection(t *testing.T) {
-	tests := []struct {
-		x, y Interval
-		want Interval
-	}{
-		{unit, half, half},
-		{unit, negunit, Interval{0, 0}},
-		{negunit, half, empty},
-		{unit, empty, empty},
-		{empty, unit, empty},
-	}
-	for _, test := range tests {
-		if got := test.x.Intersection(test.y); !got.Equal(test.want) {
-			t.Errorf("%v.Intersection(%v) = %v, want equal to %v", test.x, test.y, got, test.want)
-		}
-	}
-}
-
-func TestUnion(t *testing.T) {
-	tests := []struct {
-		x, y Interval
-		want Interval
-	}{
-		{Interval{99, 100}, empty, Interval{99, 100}},
-		{empty, Interval{99, 100}, Interval{99, 100}},
-		{Interval{5, 3}, Interval{0, -2}, empty},
-		{Interval{0, -2}, Interval{5, 3}, empty},
-		{unit, unit, unit},
-		{unit, negunit, Interval{-1, 1}},
-		{negunit, unit, Interval{-1, 1}},
-		{half, unit, unit},
-	}
-	for _, test := range tests {
-		if got := test.x.Union(test.y); !got.Equal(test.want) {
-			t.Errorf("%v.Union(%v) = %v, want equal to %v", test.x, test.y, got, test.want)
-		}
-	}
-}
-
-func TestAddPoint(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		point    float64
-		want     Interval
-	}{
-		{empty, 5, Interval{5, 5}},
-		{Interval{5, 5}, -1, Interval{-1, 5}},
-		{Interval{-1, 5}, 0, Interval{-1, 5}},
-		{Interval{-1, 5}, 6, Interval{-1, 6}},
-	}
-	for _, test := range tests {
-		if got := test.interval.AddPoint(test.point); !got.Equal(test.want) {
-			t.Errorf("%v.AddPoint(%v) = %v, want equal to %v", test.interval, test.point, got, test.want)
-		}
-	}
-}
-
-func TestClampPoint(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		clamp    float64
-		want     float64
-	}{
-		{Interval{0.1, 0.4}, 0.3, 0.3},
-		{Interval{0.1, 0.4}, -7.0, 0.1},
-		{Interval{0.1, 0.4}, 0.6, 0.4},
-	}
-	for _, test := range tests {
-		if got := test.interval.ClampPoint(test.clamp); got != test.want {
-			t.Errorf("%v.ClampPoint(%v) = %v, want equal to %v", test.interval, test.clamp, got, test.want)
-		}
-	}
-}
-
-func TestExpanded(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		margin   float64
-		want     Interval
-	}{
-		{empty, 0.45, empty},
-		{unit, 0.5, Interval{-0.5, 1.5}},
-		{unit, -0.5, Interval{0.5, 0.5}},
-		{unit, -0.51, empty},
-	}
-	for _, test := range tests {
-		if got := test.interval.Expanded(test.margin); !got.Equal(test.want) {
-			t.Errorf("%v.Expanded(%v) = %v, want equal to %v", test.interval, test.margin, got, test.want)
-		}
-	}
-}
-
-func TestIntervalString(t *testing.T) {
-	i := Interval{2, 4.5}
-	if s, exp := i.String(), "[2.0000000, 4.5000000]"; s != exp {
-		t.Errorf("i.String() = %q, want %q", s, exp)
-	}
-}
-
-func TestApproxEqual(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		other    Interval
-		want     bool
-	}{
-		// Empty intervals.
-		{EmptyInterval(), EmptyInterval(), true},
-		{Interval{0, 0}, EmptyInterval(), true},
-		{EmptyInterval(), Interval{0, 0}, true},
-		{Interval{1, 1}, EmptyInterval(), true},
-		{EmptyInterval(), Interval{1, 1}, true},
-		{EmptyInterval(), Interval{0, 1}, false},
-		{EmptyInterval(), Interval{1, 1 + 2*epsilon}, true},
-
-		// Singleton intervals.
-		{Interval{1, 1}, Interval{1, 1}, true},
-		{Interval{1, 1}, Interval{1 - epsilon, 1 - epsilon}, true},
-		{Interval{1, 1}, Interval{1 + epsilon, 1 + epsilon}, true},
-		{Interval{1, 1}, Interval{1 - 3*epsilon, 1}, false},
-		{Interval{1, 1}, Interval{1, 1 + 3*epsilon}, false},
-		{Interval{1, 1}, Interval{1 - epsilon, 1 + epsilon}, true},
-		{Interval{0, 0}, Interval{1, 1}, false},
-
-		// Other intervals.
-		{Interval{1 - epsilon, 2 + epsilon}, Interval{1, 2}, false},
-		{Interval{1 + epsilon, 2 - epsilon}, Interval{1, 2}, true},
-		{Interval{1 - 3*epsilon, 2 + epsilon}, Interval{1, 2}, false},
-		{Interval{1 + 3*epsilon, 2 - epsilon}, Interval{1, 2}, false},
-		{Interval{1 - epsilon, 2 + 3*epsilon}, Interval{1, 2}, false},
-		{Interval{1 + epsilon, 2 - 3*epsilon}, Interval{1, 2}, false},
-	}
-
-	for _, test := range tests {
-		if got := test.interval.ApproxEqual(test.other); got != test.want {
-			t.Errorf("%v.ApproxEqual(%v) = %t, want %t",
-				test.interval, test.other, got, test.want)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/r2/doc.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 /*
 /*
 Package r2 implements types and functions for working with geometry in ℝ².
 Package r2 implements types and functions for working with geometry in ℝ².

+ 15 - 17
vendor/github.com/golang/geo/r2/rect.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package r2
 package r2
 
 
@@ -248,9 +246,9 @@ func (r Rect) Intersection(other Rect) Rect {
 	return Rect{xx, yy}
 	return Rect{xx, yy}
 }
 }
 
 
-// ApproxEquals returns true if the x- and y-intervals of the two rectangles are
+// ApproxEqual returns true if the x- and y-intervals of the two rectangles are
 // the same up to the given tolerance.
 // the same up to the given tolerance.
-func (r Rect) ApproxEquals(r2 Rect) bool {
+func (r Rect) ApproxEqual(r2 Rect) bool {
 	return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
 	return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
 }
 }
 
 

+ 0 - 476
vendor/github.com/golang/geo/r2/rect_test.go

@@ -1,476 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Most of the Rect methods have trivial implementations in terms of the
-// Interval class, so most of the testing is done in that unit test.
-
-package r2
-
-import (
-	"math"
-	"reflect"
-	"testing"
-
-	"github.com/golang/geo/r1"
-)
-
-var (
-	sw = Point{0, 0.25}
-	se = Point{0.5, 0.25}
-	ne = Point{0.5, 0.75}
-	nw = Point{0, 0.75}
-
-	empty   = EmptyRect()
-	rect    = RectFromPoints(sw, ne)
-	rectMid = RectFromPoints(Point{0.25, 0.5}, Point{0.25, 0.5})
-	rectSW  = RectFromPoints(sw, sw)
-	rectNE  = RectFromPoints(ne, ne)
-)
-
-func float64Eq(x, y float64) bool { return math.Abs(x-y) < 1e-14 }
-
-func pointsApproxEqual(a, b Point) bool {
-	return float64Eq(a.X, b.X) && float64Eq(a.Y, b.Y)
-}
-
-func TestOrtho(t *testing.T) {
-	tests := []struct {
-		p    Point
-		want Point
-	}{
-		{Point{0, 0}, Point{0, 0}},
-		{Point{0, 1}, Point{-1, 0}},
-		{Point{1, 1}, Point{-1, 1}},
-		{Point{-4, 7}, Point{-7, -4}},
-		{Point{1, math.Sqrt(3)}, Point{-math.Sqrt(3), 1}},
-	}
-	for _, test := range tests {
-		if got := test.p.Ortho(); !pointsApproxEqual(got, test.want) {
-			t.Errorf("%v.Ortho() = %v, want %v", test.p, got, test.want)
-		}
-	}
-}
-
-func TestDot(t *testing.T) {
-	tests := []struct {
-		p    Point
-		op   Point
-		want float64
-	}{
-		{Point{0, 0}, Point{0, 0}, 0},
-		{Point{0, 1}, Point{0, 0}, 0},
-		{Point{1, 1}, Point{4, 3}, 7},
-		{Point{-4, 7}, Point{1, 5}, 31},
-	}
-	for _, test := range tests {
-		if got := test.p.Dot(test.op); !float64Eq(got, test.want) {
-			t.Errorf("%v.Dot(%v) = %v, want %v", test.p, test.op, got, test.want)
-		}
-	}
-}
-
-func TestCross(t *testing.T) {
-	tests := []struct {
-		p    Point
-		op   Point
-		want float64
-	}{
-		{Point{0, 0}, Point{0, 0}, 0},
-		{Point{0, 1}, Point{0, 0}, 0},
-		{Point{1, 1}, Point{-1, -1}, 0},
-		{Point{1, 1}, Point{4, 3}, -1},
-		{Point{1, 5}, Point{-2, 3}, 13},
-	}
-
-	for _, test := range tests {
-		if got := test.p.Cross(test.op); !float64Eq(got, test.want) {
-			t.Errorf("%v.Cross(%v) = %v, want %v", test.p, test.op, got, test.want)
-		}
-	}
-}
-
-func TestNorm(t *testing.T) {
-	tests := []struct {
-		p    Point
-		want float64
-	}{
-		{Point{0, 0}, 0},
-		{Point{0, 1}, 1},
-		{Point{-1, 0}, 1},
-		{Point{3, 4}, 5},
-		{Point{3, -4}, 5},
-		{Point{2, 2}, 2 * math.Sqrt(2)},
-		{Point{1, math.Sqrt(3)}, 2},
-		{Point{29, 29 * math.Sqrt(3)}, 29 * 2},
-		{Point{1, 1e15}, 1e15},
-		{Point{1e14, math.MaxFloat32 - 1}, math.MaxFloat32},
-	}
-
-	for _, test := range tests {
-		if !float64Eq(test.p.Norm(), test.want) {
-			t.Errorf("%v.Norm() = %v, want %v", test.p, test.p.Norm(), test.want)
-		}
-	}
-}
-
-func TestNormalize(t *testing.T) {
-	tests := []struct {
-		have Point
-		want Point
-	}{
-		{Point{}, Point{}},
-		{Point{0, 0}, Point{0, 0}},
-		{Point{0, 1}, Point{0, 1}},
-		{Point{-1, 0}, Point{-1, 0}},
-		{Point{3, 4}, Point{0.6, 0.8}},
-		{Point{3, -4}, Point{0.6, -0.8}},
-		{Point{2, 2}, Point{math.Sqrt(2) / 2, math.Sqrt(2) / 2}},
-		{Point{7, 7 * math.Sqrt(3)}, Point{0.5, math.Sqrt(3) / 2}},
-		{Point{1e21, 1e21 * math.Sqrt(3)}, Point{0.5, math.Sqrt(3) / 2}},
-		{Point{1, 1e16}, Point{0, 1}},
-		{Point{1e4, math.MaxFloat32 - 1}, Point{0, 1}},
-	}
-
-	for _, test := range tests {
-		if got := test.have.Normalize(); !pointsApproxEqual(got, test.want) {
-			t.Errorf("%v.Normalize() = %v, want %v", test.have, got, test.want)
-		}
-	}
-
-}
-
-func TestEmptyRect(t *testing.T) {
-	if !empty.IsValid() {
-		t.Errorf("empty Rect should be valid: %v", empty)
-	}
-	if !empty.IsEmpty() {
-		t.Errorf("empty Rect should be empty: %v", empty)
-	}
-}
-
-func TestFromVariousTypes(t *testing.T) {
-	d1 := RectFromPoints(Point{0.1, 0}, Point{0.25, 1})
-	tests := []struct {
-		r1, r2 Rect
-	}{
-		{
-			RectFromCenterSize(Point{0.3, 0.5}, Point{0.2, 0.4}),
-			RectFromPoints(Point{0.2, 0.3}, Point{0.4, 0.7}),
-		},
-		{
-			RectFromCenterSize(Point{1, 0.1}, Point{0, 2}),
-			RectFromPoints(Point{1, -0.9}, Point{1, 1.1}),
-		},
-		{
-			d1,
-			Rect{d1.X, d1.Y},
-		},
-		{
-			RectFromPoints(Point{0.15, 0.3}, Point{0.35, 0.9}),
-			RectFromPoints(Point{0.15, 0.9}, Point{0.35, 0.3}),
-		},
-		{
-			RectFromPoints(Point{0.12, 0}, Point{0.83, 0.5}),
-			RectFromPoints(Point{0.83, 0}, Point{0.12, 0.5}),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.r1.ApproxEquals(test.r2); !got {
-			t.Errorf("%v.ApproxEquals(%v); got %v want true", test.r1, test.r2, got)
-		}
-	}
-}
-
-func TestCenter(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		want Point
-	}{
-		{empty, Point{0.5, 0.5}},
-		{rect, Point{0.25, 0.5}},
-	}
-	for _, test := range tests {
-		if got := test.rect.Center(); got != test.want {
-			t.Errorf("%v.Center(); got %v want %v", test.rect, got, test.want)
-		}
-	}
-}
-
-func TestVertices(t *testing.T) {
-	want := [4]Point{sw, se, ne, nw}
-	got := rect.Vertices()
-	if !reflect.DeepEqual(got, want) {
-		t.Errorf("%v.Vertices(); got %v want %v", rect, got, want)
-	}
-}
-
-func TestContainsPoint(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		p    Point
-		want bool
-	}{
-		{rect, Point{0.2, 0.4}, true},
-		{rect, Point{0.2, 0.8}, false},
-		{rect, Point{-0.1, 0.4}, false},
-		{rect, Point{0.6, 0.1}, false},
-		{rect, Point{rect.X.Lo, rect.Y.Lo}, true},
-		{rect, Point{rect.X.Hi, rect.Y.Hi}, true},
-	}
-	for _, test := range tests {
-		if got := test.rect.ContainsPoint(test.p); got != test.want {
-			t.Errorf("%v.ContainsPoint(%v); got %v want %v", test.rect, test.p, got, test.want)
-		}
-	}
-}
-
-func TestInteriorContainsPoint(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		p    Point
-		want bool
-	}{
-		// Check corners are not contained.
-		{rect, sw, false},
-		{rect, ne, false},
-		// Check a point on the border is not contained.
-		{rect, Point{0, 0.5}, false},
-		{rect, Point{0.25, 0.25}, false},
-		{rect, Point{0.5, 0.5}, false},
-		// Check points inside are contained.
-		{rect, Point{0.125, 0.6}, true},
-	}
-	for _, test := range tests {
-		if got := test.rect.InteriorContainsPoint(test.p); got != test.want {
-			t.Errorf("%v.InteriorContainsPoint(%v); got %v want %v",
-				test.rect, test.p, got, test.want)
-		}
-	}
-}
-
-func TestIntervalOps(t *testing.T) {
-	tests := []struct {
-		r1, r2                                           Rect
-		contains, intContains, intersects, intIntersects bool
-		wantUnion, wantIntersection                      Rect
-	}{
-		{
-			rect, rectMid,
-			true, true, true, true,
-			rect, rectMid,
-		},
-		{
-			rect, rectSW,
-			true, false, true, false,
-			rect, rectSW,
-		},
-		{
-			rect, rectNE,
-			true, false, true, false,
-			rect, rectNE,
-		},
-		{
-			rect,
-			RectFromPoints(Point{0.45, 0.1}, Point{0.75, 0.3}),
-			false, false, true, true,
-			RectFromPoints(Point{0, 0.1}, Point{0.75, 0.75}),
-			RectFromPoints(Point{0.45, 0.25}, Point{0.5, 0.3}),
-		},
-		{
-			rect,
-			RectFromPoints(Point{0.5, 0.1}, Point{0.7, 0.3}),
-			false, false, true, false,
-			RectFromPoints(Point{0, 0.1}, Point{0.7, 0.75}),
-			RectFromPoints(Point{0.5, 0.25}, Point{0.5, 0.3}),
-		},
-		{
-			rect,
-			RectFromPoints(Point{0.45, 0.1}, Point{0.7, 0.25}),
-			false, false, true, false,
-			RectFromPoints(Point{0, 0.1}, Point{0.7, 0.75}),
-			RectFromPoints(Point{0.45, 0.25}, Point{0.5, 0.25}),
-		},
-		{
-			RectFromPoints(Point{0.1, 0.2}, Point{0.1, 0.3}),
-			RectFromPoints(Point{0.15, 0.7}, Point{0.2, 0.8}),
-			false, false, false, false,
-			RectFromPoints(Point{0.1, 0.2}, Point{0.2, 0.8}),
-			EmptyRect(),
-		},
-		// Check that the intersection of two rectangles that overlap in x but not y
-		// is valid, and vice versa.
-		{
-			RectFromPoints(Point{0.1, 0.2}, Point{0.4, 0.5}),
-			RectFromPoints(Point{0, 0}, Point{0.2, 0.1}),
-			false, false, false, false,
-			RectFromPoints(Point{0, 0}, Point{0.4, 0.5}),
-			EmptyRect(),
-		},
-		{
-			RectFromPoints(Point{0, 0}, Point{0.1, 0.3}),
-			RectFromPoints(Point{0.2, 0.1}, Point{0.3, 0.4}),
-			false, false, false, false,
-			RectFromPoints(Point{0, 0}, Point{0.3, 0.4}),
-			EmptyRect(),
-		},
-	}
-	for _, test := range tests {
-		if got := test.r1.Contains(test.r2); got != test.contains {
-			t.Errorf("%v.Contains(%v); got %v want %v",
-				test.r1, test.r2, got, test.contains)
-		}
-
-		if got := test.r1.InteriorContains(test.r2); got != test.intContains {
-			t.Errorf("%v.InteriorContains(%v); got %v want %v",
-				test.r1, test.r2, got, test.contains)
-		}
-
-		if got := test.r1.Intersects(test.r2); got != test.intersects {
-			t.Errorf("%v.Intersects(%v); got %v want %v",
-				test.r1, test.r2, got, test.intersects)
-		}
-
-		if got := test.r1.InteriorIntersects(test.r2); got != test.intIntersects {
-			t.Errorf("%v.InteriorIntersects(%v); got %v want %v",
-				test.r1, test.r2, got, test.intIntersects)
-		}
-
-		tCon := test.r1.Contains(test.r2)
-		if got := test.r1.Union(test.r2).ApproxEquals(test.r1); got != tCon {
-			t.Errorf("%v.Union(%v) == %v.Contains(%v); got %v want %v",
-				test.r1, test.r2, test.r1, test.r2, got, tCon)
-		}
-
-		tInter := test.r1.Intersects(test.r2)
-		if got := !test.r1.Intersection(test.r2).IsEmpty(); got != tInter {
-			t.Errorf("%v.Intersection(%v).IsEmpty() == %v.Intersects(%v); got %v want %v",
-				test.r1, test.r2, test.r1, test.r2, got, tInter)
-		}
-
-		if got := test.r1.Union(test.r2); got != test.wantUnion {
-			t.Errorf("%v.Union(%v); got %v want %v",
-				test.r1, test.r2, got, test.wantUnion)
-		}
-
-		if got := test.r1.Intersection(test.r2); got != test.wantIntersection {
-			t.Errorf("%v.Intersection(%v); got %v want %v",
-				test.r1, test.r2, got, test.wantIntersection)
-		}
-
-		r := test.r1.AddRect(test.r2)
-
-		if r != test.wantUnion {
-			t.Errorf("%v.AddRect(%v); got %v want %v", test.r1, test.r2, r, test.wantUnion)
-		}
-	}
-}
-
-func TestAddPoint(t *testing.T) {
-	r1 := rect
-	r2 := EmptyRect()
-
-	r2 = r2.AddPoint(sw)
-	r2 = r2.AddPoint(se)
-	r2 = r2.AddPoint(nw)
-	r2 = r2.AddPoint(Point{0.1, 0.4})
-
-	if !r1.ApproxEquals(r2) {
-		t.Errorf("%v.AddPoint(%v); got false want true", r1, r2)
-	}
-}
-
-func TestClampPoint(t *testing.T) {
-	r := Rect{r1.Interval{Lo: 0, Hi: 0.5}, r1.Interval{Lo: 0.25, Hi: 0.75}}
-	tests := []struct {
-		p    Point
-		want Point
-	}{
-		{Point{-0.01, 0.24}, Point{0, 0.25}},
-		{Point{-5.0, 0.48}, Point{0, 0.48}},
-		{Point{-5.0, 2.48}, Point{0, 0.75}},
-		{Point{0.19, 2.48}, Point{0.19, 0.75}},
-
-		{Point{6.19, 2.48}, Point{0.5, 0.75}},
-		{Point{6.19, 0.53}, Point{0.5, 0.53}},
-		{Point{6.19, -2.53}, Point{0.5, 0.25}},
-		{Point{0.33, -2.53}, Point{0.33, 0.25}},
-		{Point{0.33, 0.37}, Point{0.33, 0.37}},
-	}
-	for _, test := range tests {
-		if got := r.ClampPoint(test.p); got != test.want {
-			t.Errorf("%v.ClampPoint(%v); got %v want %v", r, test.p, got, test.want)
-		}
-	}
-}
-
-func TestExpandedEmpty(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		p    Point
-	}{
-		{
-			EmptyRect(),
-			Point{0.1, 0.3},
-		},
-		{
-			EmptyRect(),
-			Point{-0.1, -0.3},
-		},
-		{
-			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
-			Point{-0.1, 0.3},
-		},
-		{
-			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
-			Point{0.1, -0.2},
-		},
-	}
-	for _, test := range tests {
-		if got := test.rect.Expanded(test.p); !got.IsEmpty() {
-			t.Errorf("%v.Expanded(%v); got %v want true", test.rect, test.p, got.IsEmpty())
-		}
-	}
-}
-
-func TestExpandedEquals(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		p    Point
-		want Rect
-	}{
-		{
-			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
-			Point{0.1, 0.3},
-			RectFromPoints(Point{0.1, 0.1}, Point{0.4, 1.0}),
-		},
-		{
-			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
-			Point{0.1, -0.1},
-			RectFromPoints(Point{0.1, 0.5}, Point{0.4, 0.6}),
-		},
-		{
-			RectFromPoints(Point{0.2, 0.4}, Point{0.3, 0.7}),
-			Point{0.1, 0.1},
-			RectFromPoints(Point{0.1, 0.3}, Point{0.4, 0.8}),
-		},
-	}
-	for _, test := range tests {
-		if got := test.rect.Expanded(test.p); !got.ApproxEquals(test.want) {
-			t.Errorf("%v.Expanded(%v); got %v want %v", test.rect, test.p, got, test.want)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/r3/doc.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 /*
 /*
 Package r3 implements types and functions for working with geometry in ℝ³.
 Package r3 implements types and functions for working with geometry in ℝ³.

+ 17 - 19
vendor/github.com/golang/geo/r3/precisevector.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package r3
 package r3
 
 
@@ -92,16 +90,16 @@ func (v PreciseVector) Vector() Vector {
 	x, _ := v.X.Float64()
 	x, _ := v.X.Float64()
 	y, _ := v.Y.Float64()
 	y, _ := v.Y.Float64()
 	z, _ := v.Z.Float64()
 	z, _ := v.Z.Float64()
-	return Vector{x, y, z}
+	return Vector{x, y, z}.Normalize()
 }
 }
 
 
-// Equals reports whether v and ov are equal.
-func (v PreciseVector) Equals(ov PreciseVector) bool {
+// Equal reports whether v and ov are equal.
+func (v PreciseVector) Equal(ov PreciseVector) bool {
 	return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
 	return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
 }
 }
 
 
 func (v PreciseVector) String() string {
 func (v PreciseVector) String() string {
-	return fmt.Sprintf("(%v, %v, %v)", v.X, v.Y, v.Z)
+	return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z)
 }
 }
 
 
 // Norm2 returns the square of the norm.
 // Norm2 returns the square of the norm.

+ 0 - 477
vendor/github.com/golang/geo/r3/precisevector_test.go

@@ -1,477 +0,0 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package r3
-
-import (
-	"math/big"
-	"testing"
-)
-
-// preciseEq compares two big.Floats and checks if the are the same.
-func preciseEq(a, b *big.Float) bool {
-	return a.SetPrec(prec).Cmp(b.SetPrec(prec)) == 0
-}
-
-func TestPreciseRoundtrip(t *testing.T) {
-	tests := []struct {
-		v Vector
-	}{
-		{Vector{0, 0, 0}},
-		{Vector{1, 2, 3}},
-		{Vector{3, -4, 12}},
-		{Vector{1, 1e-16, 1e-32}},
-	}
-
-	for _, test := range tests {
-		if got := PreciseVectorFromVector(test.v).Vector(); !got.ApproxEqual(test.v) {
-			t.Errorf("PreciseVectorFromVector(%v).Vector() = %v, want %v", test.v, got, test.v)
-		}
-	}
-}
-
-func TestPreciseIsUnit(t *testing.T) {
-	const epsilon = 1e-14
-	tests := []struct {
-		v    PreciseVector
-		want bool
-	}{
-		{
-			v:    NewPreciseVector(0, 0, 0),
-			want: false,
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			want: true,
-		},
-		{
-			v:    NewPreciseVector(0, 1, 0),
-			want: true,
-		},
-		{
-			v:    NewPreciseVector(0, 0, 1),
-			want: true,
-		},
-		{
-			v:    NewPreciseVector(1+2*epsilon, 0, 0),
-			want: false,
-		},
-		{
-			v:    NewPreciseVector(0*(1+epsilon), 0, 0),
-			want: false,
-		},
-		{
-			v:    NewPreciseVector(1, 1, 1),
-			want: false,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v.IsUnit(); got != test.want {
-			t.Errorf("%v.IsUnit() = %v, want %v", test.v, got, test.want)
-		}
-	}
-}
-
-func TestPreciseNorm2(t *testing.T) {
-	tests := []struct {
-		v    PreciseVector
-		want *big.Float
-	}{
-		{
-			v:    NewPreciseVector(0, 0, 0),
-			want: precise0,
-		},
-		{
-			v:    NewPreciseVector(0, 1, 0),
-			want: precise1,
-		},
-		{
-			v:    NewPreciseVector(1, 1, 1),
-			want: precStr("3"),
-		},
-		{
-			v:    NewPreciseVector(1, 2, 3),
-			want: precStr("14"),
-		},
-		{
-			v:    NewPreciseVector(3, -4, 12),
-			want: precStr("169"),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v.Norm2(); !preciseEq(got, test.want) {
-			t.Errorf("%v.Norm2() = %v, want %v", test.v, test.v.Norm2(), test.want)
-		}
-	}
-}
-
-func TestPreciseAdd(t *testing.T) {
-	tests := []struct {
-		v1, v2, want PreciseVector
-	}{
-		{
-			v1:   NewPreciseVector(0, 0, 0),
-			v2:   NewPreciseVector(0, 0, 0),
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(0, 0, 0),
-			want: NewPreciseVector(1, 0, 0),
-		},
-		{
-			v1:   NewPreciseVector(1, 2, 3),
-			v2:   NewPreciseVector(4, 5, 7),
-			want: NewPreciseVector(5, 7, 10),
-		},
-		{
-			v1:   NewPreciseVector(1, -3, 5),
-			v2:   NewPreciseVector(1, -6, -6),
-			want: NewPreciseVector(2, -9, -1),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v1.Add(test.v2); !got.Equals(test.want) {
-			t.Errorf("%v + %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestPreciseSub(t *testing.T) {
-	tests := []struct {
-		v1, v2, want PreciseVector
-	}{
-		{
-			v1:   NewPreciseVector(0, 0, 0),
-			v2:   NewPreciseVector(0, 0, 0),
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(0, 0, 0),
-			want: NewPreciseVector(1, 0, 0),
-		},
-		{
-			v1:   NewPreciseVector(1, 2, 3),
-			v2:   NewPreciseVector(4, 5, 7),
-			want: NewPreciseVector(-3, -3, -4),
-		},
-		{
-			v1:   NewPreciseVector(1, -3, 5),
-			v2:   NewPreciseVector(1, -6, -6),
-			want: NewPreciseVector(0, 3, 11),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v1.Sub(test.v2); !got.Equals(test.want) {
-			t.Errorf("%v - %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestPreciseMul(t *testing.T) {
-	tests := []struct {
-		v    PreciseVector
-		f    *big.Float
-		want PreciseVector
-	}{
-		{
-			v:    NewPreciseVector(0, 0, 0),
-			f:    precFloat(3),
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    precFloat(1),
-			want: NewPreciseVector(1, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    precFloat(0),
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    precFloat(3),
-			want: NewPreciseVector(3, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, -3, 5),
-			f:    precFloat(-1),
-			want: NewPreciseVector(-1, 3, -5),
-		},
-		{
-			v:    NewPreciseVector(1, -3, 5),
-			f:    precFloat(2),
-			want: NewPreciseVector(2, -6, 10),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v.Mul(test.f); !got.Equals(test.want) {
-			t.Errorf("%v.Mul(%v) = %v, want %v", test.v, test.f, got, test.want)
-		}
-	}
-}
-
-func TestPreciseMulByFloat64(t *testing.T) {
-	tests := []struct {
-		v    PreciseVector
-		f    float64
-		want PreciseVector
-	}{
-		{
-			v:    NewPreciseVector(0, 0, 0),
-			f:    3,
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    1,
-			want: NewPreciseVector(1, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    0,
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, 0, 0),
-			f:    3,
-			want: NewPreciseVector(3, 0, 0),
-		},
-		{
-			v:    NewPreciseVector(1, -3, 5),
-			f:    -1,
-			want: NewPreciseVector(-1, 3, -5),
-		},
-		{
-			v:    NewPreciseVector(1, -3, 5),
-			f:    2,
-			want: NewPreciseVector(2, -6, 10),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v.MulByFloat64(test.f); !got.Equals(test.want) {
-			t.Errorf("%v.MulByFloat64(%v) = %v, want %v", test.v, test.f, got, test.want)
-		}
-	}
-}
-
-func TestPreciseDot(t *testing.T) {
-	tests := []struct {
-		v1, v2 PreciseVector
-		want   *big.Float
-	}{
-		{
-			// Dot with self should be 1.
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(1, 0, 0),
-			want: precise1,
-		},
-		{
-			// Dot with self should be 1.
-			v1:   NewPreciseVector(0, 1, 0),
-			v2:   NewPreciseVector(0, 1, 0),
-			want: precise1,
-		},
-		{
-			// Dot with self should be 1.
-			v1:   NewPreciseVector(0, 0, 1),
-			v2:   NewPreciseVector(0, 0, 1),
-			want: precise1,
-		},
-		{
-			// Perpendicular should be 0.
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(0, 1, 0),
-			want: precise0,
-		},
-		{
-			// Perpendicular should be 0.
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(0, 1, 1),
-			want: precise0,
-		},
-		{
-			v1:   NewPreciseVector(1, 1, 1),
-			v2:   NewPreciseVector(-1, -1, -1),
-			want: precStr("-3"),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v1.Dot(test.v2); !preciseEq(got, test.want) {
-			t.Errorf("%v · %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-		if got := test.v2.Dot(test.v1); !preciseEq(got, test.want) {
-			t.Errorf("%v · %v = %v, want %v", test.v2, test.v1, got, test.want)
-		}
-	}
-}
-
-func TestPreciseCross(t *testing.T) {
-	tests := []struct {
-		v1, v2, want PreciseVector
-	}{
-		{
-			// Cross with self should be 0.
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(1, 0, 0),
-			want: NewPreciseVector(0, 0, 0),
-		},
-		{
-			// Cross with perpendicular should give the remaining axis.
-			v1:   NewPreciseVector(1, 0, 0),
-			v2:   NewPreciseVector(0, 1, 0),
-			want: NewPreciseVector(0, 0, 1),
-		},
-		{
-			// Cross with perpendicular should give the remaining axis.
-			v1:   NewPreciseVector(0, 1, 0),
-			v2:   NewPreciseVector(0, 0, 1),
-			want: NewPreciseVector(1, 0, 0),
-		},
-		{
-			// Cross with perpendicular should give the remaining axis.
-			v1:   NewPreciseVector(0, 0, 1),
-			v2:   NewPreciseVector(1, 0, 0),
-			want: NewPreciseVector(0, 1, 0),
-		},
-		{
-			v1:   NewPreciseVector(0, 1, 0),
-			v2:   NewPreciseVector(1, 0, 0),
-			want: NewPreciseVector(0, 0, -1),
-		},
-		{
-			v1:   NewPreciseVector(1, 2, 3),
-			v2:   NewPreciseVector(-4, 5, -6),
-			want: NewPreciseVector(-27, -6, 13),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v1.Cross(test.v2); !got.Equals(test.want) {
-			t.Errorf("%v ⨯ %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestPreciseIdentities(t *testing.T) {
-	tests := []struct {
-		v1, v2 PreciseVector
-	}{
-		{
-			v1: NewPreciseVector(0, 0, 0),
-			v2: NewPreciseVector(0, 0, 0),
-		},
-		{
-			v1: NewPreciseVector(0, 0, 0),
-			v2: NewPreciseVector(0, 1, 2),
-		},
-		{
-			v1: NewPreciseVector(1, 0, 0),
-			v2: NewPreciseVector(0, 1, 0),
-		},
-		{
-			v1: NewPreciseVector(1, 0, 0),
-			v2: NewPreciseVector(0, 1, 1),
-		},
-		{
-			v1: NewPreciseVector(1, 1, 1),
-			v2: NewPreciseVector(-1, -1, -1),
-		},
-		{
-			v1: NewPreciseVector(1, 2, 2),
-			v2: NewPreciseVector(-0.3, 0.4, -1.2),
-		},
-	}
-
-	for _, test := range tests {
-		c1 := test.v1.Cross(test.v2)
-		c2 := test.v2.Cross(test.v1)
-		d1 := test.v1.Dot(test.v2)
-		d2 := test.v2.Dot(test.v1)
-
-		// Dot commutes
-		if !preciseEq(d1, d2) {
-			t.Errorf("%v = %v · %v != %v · %v = %v", d1, test.v1, test.v2, test.v2, test.v1, d2)
-		}
-		// Cross anti-commutes
-		if !c1.Equals(c2.MulByFloat64(-1.0)) {
-			t.Errorf("%v = %v ⨯ %v != -(%v ⨯ %v) = -%v", c1, test.v1, test.v2, test.v2, test.v1, c2)
-		}
-		// Cross is orthogonal to original vectors
-		if got := test.v1.Dot(c1); !preciseEq(got, precise0) {
-			t.Errorf("%v · (%v ⨯ %v) = %v, want %v", test.v1, test.v1, test.v2, got, precise0)
-		}
-		if got := test.v2.Dot(c1); !preciseEq(got, precise0) {
-			t.Errorf("%v · (%v ⨯ %v) = %v, want %v", test.v2, test.v1, test.v2, got, precise0)
-		}
-	}
-}
-
-func TestPreciseLargestSmallestComponents(t *testing.T) {
-	tests := []struct {
-		v                 PreciseVector
-		largest, smallest Axis
-	}{
-		{
-			v:        NewPreciseVector(0, 0, 0),
-			largest:  ZAxis,
-			smallest: ZAxis,
-		},
-		{
-			v:        NewPreciseVector(1, 0, 0),
-			largest:  XAxis,
-			smallest: ZAxis,
-		},
-		{
-			v:        NewPreciseVector(1, -1, 0),
-			largest:  YAxis,
-			smallest: ZAxis,
-		},
-		{
-			v:        NewPreciseVector(-1, -1.1, -1.1),
-			largest:  ZAxis,
-			smallest: XAxis,
-		},
-		{
-			v:        NewPreciseVector(0.5, -0.4, -0.5),
-			largest:  ZAxis,
-			smallest: YAxis,
-		},
-		{
-			v:        NewPreciseVector(1e-15, 1e-14, 1e-13),
-			largest:  ZAxis,
-			smallest: XAxis,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.v.LargestComponent(); got != test.largest {
-			t.Errorf("%v.LargestComponent() = %v, want %v", test.v, got, test.largest)
-		}
-		if got := test.v.SmallestComponent(); got != test.smallest {
-			t.Errorf("%v.SmallestComponent() = %v, want %v", test.v, got, test.smallest)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/r3/vector.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package r3
 package r3
 
 

+ 0 - 339
vendor/github.com/golang/geo/r3/vector_test.go

@@ -1,339 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package r3
-
-import (
-	"math"
-	"testing"
-)
-
-func float64Eq(x, y float64) bool { return math.Abs(x-y) < 1e-14 }
-
-func TestVectorNorm(t *testing.T) {
-	tests := []struct {
-		v    Vector
-		want float64
-	}{
-		{Vector{0, 0, 0}, 0},
-		{Vector{0, 1, 0}, 1},
-		{Vector{3, -4, 12}, 13},
-		{Vector{1, 1e-16, 1e-32}, 1},
-	}
-	for _, test := range tests {
-		if !float64Eq(test.v.Norm(), test.want) {
-			t.Errorf("%v.Norm() = %v, want %v", test.v, test.v.Norm(), test.want)
-		}
-	}
-}
-
-func TestVectorNorm2(t *testing.T) {
-	tests := []struct {
-		v    Vector
-		want float64
-	}{
-		{Vector{0, 0, 0}, 0},
-		{Vector{0, 1, 0}, 1},
-		{Vector{1, 1, 1}, 3},
-		{Vector{1, 2, 3}, 14},
-		{Vector{3, -4, 12}, 169},
-		{Vector{1, 1e-16, 1e-32}, 1},
-	}
-	for _, test := range tests {
-		if !float64Eq(test.v.Norm2(), test.want) {
-			t.Errorf("%v.Norm2() = %v, want %v", test.v, test.v.Norm2(), test.want)
-		}
-	}
-}
-
-func TestVectorNormalize(t *testing.T) {
-	vectors := []Vector{
-		{1, 0, 0},
-		{0, 1, 0},
-		{0, 0, 1},
-		{1, 1, 1},
-		{1, 1e-16, 1e-32},
-		{12.34, 56.78, 91.01},
-	}
-	for _, v := range vectors {
-		nv := v.Normalize()
-		if !float64Eq(v.X*nv.Y, v.Y*nv.X) || !float64Eq(v.X*nv.Z, v.Z*nv.X) {
-			t.Errorf("%v.Normalize() did not preserve direction", v)
-		}
-		if !float64Eq(nv.Norm(), 1.0) {
-			t.Errorf("|%v| = %v, want 1", v, v.Norm())
-		}
-	}
-}
-
-func TestVectorIsUnit(t *testing.T) {
-	const epsilon = 1e-14
-	tests := []struct {
-		v    Vector
-		want bool
-	}{
-		{Vector{0, 0, 0}, false},
-		{Vector{0, 1, 0}, true},
-		{Vector{1 + 2*epsilon, 0, 0}, true},
-		{Vector{1 * (1 + epsilon), 0, 0}, true},
-		{Vector{1, 1, 1}, false},
-		{Vector{1, 1e-16, 1e-32}, true},
-	}
-	for _, test := range tests {
-		if got := test.v.IsUnit(); got != test.want {
-			t.Errorf("%v.IsUnit() = %v, want %v", test.v, got, test.want)
-		}
-	}
-}
-func TestVectorDot(t *testing.T) {
-	tests := []struct {
-		v1, v2 Vector
-		want   float64
-	}{
-		{Vector{1, 0, 0}, Vector{1, 0, 0}, 1},
-		{Vector{1, 0, 0}, Vector{0, 1, 0}, 0},
-		{Vector{1, 0, 0}, Vector{0, 1, 1}, 0},
-		{Vector{1, 1, 1}, Vector{-1, -1, -1}, -3},
-		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}, -1.9},
-	}
-	for _, test := range tests {
-		v1 := Vector{test.v1.X, test.v1.Y, test.v1.Z}
-		v2 := Vector{test.v2.X, test.v2.Y, test.v2.Z}
-		if !float64Eq(v1.Dot(v2), test.want) {
-			t.Errorf("%v · %v = %v, want %v", v1, v2, v1.Dot(v2), test.want)
-		}
-		if !float64Eq(v2.Dot(v1), test.want) {
-			t.Errorf("%v · %v = %v, want %v", v2, v1, v2.Dot(v1), test.want)
-		}
-	}
-}
-
-func TestVectorCross(t *testing.T) {
-	tests := []struct {
-		v1, v2, want Vector
-	}{
-		{Vector{1, 0, 0}, Vector{1, 0, 0}, Vector{0, 0, 0}},
-		{Vector{1, 0, 0}, Vector{0, 1, 0}, Vector{0, 0, 1}},
-		{Vector{0, 1, 0}, Vector{1, 0, 0}, Vector{0, 0, -1}},
-		{Vector{1, 2, 3}, Vector{-4, 5, -6}, Vector{-27, -6, 13}},
-	}
-	for _, test := range tests {
-		if got := test.v1.Cross(test.v2); !got.ApproxEqual(test.want) {
-			t.Errorf("%v ⨯ %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestVectorAdd(t *testing.T) {
-	tests := []struct {
-		v1, v2, want Vector
-	}{
-		{Vector{0, 0, 0}, Vector{0, 0, 0}, Vector{0, 0, 0}},
-		{Vector{1, 0, 0}, Vector{0, 0, 0}, Vector{1, 0, 0}},
-		{Vector{1, 2, 3}, Vector{4, 5, 7}, Vector{5, 7, 10}},
-		{Vector{1, -3, 5}, Vector{1, -6, -6}, Vector{2, -9, -1}},
-	}
-	for _, test := range tests {
-		if got := test.v1.Add(test.v2); !got.ApproxEqual(test.want) {
-			t.Errorf("%v + %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestVectorSub(t *testing.T) {
-	tests := []struct {
-		v1, v2, want Vector
-	}{
-		{Vector{0, 0, 0}, Vector{0, 0, 0}, Vector{0, 0, 0}},
-		{Vector{1, 0, 0}, Vector{0, 0, 0}, Vector{1, 0, 0}},
-		{Vector{1, 2, 3}, Vector{4, 5, 7}, Vector{-3, -3, -4}},
-		{Vector{1, -3, 5}, Vector{1, -6, -6}, Vector{0, 3, 11}},
-	}
-	for _, test := range tests {
-		if got := test.v1.Sub(test.v2); !got.ApproxEqual(test.want) {
-			t.Errorf("%v - %v = %v, want %v", test.v1, test.v2, got, test.want)
-		}
-	}
-}
-
-func TestVectorDistance(t *testing.T) {
-	tests := []struct {
-		v1, v2 Vector
-		want   float64
-	}{
-		{Vector{1, 0, 0}, Vector{1, 0, 0}, 0},
-		{Vector{1, 0, 0}, Vector{0, 1, 0}, 1.41421356237310},
-		{Vector{1, 0, 0}, Vector{0, 1, 1}, 1.73205080756888},
-		{Vector{1, 1, 1}, Vector{-1, -1, -1}, 3.46410161513775},
-		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}, 3.80657326213486},
-	}
-	for _, test := range tests {
-		v1 := Vector{test.v1.X, test.v1.Y, test.v1.Z}
-		v2 := Vector{test.v2.X, test.v2.Y, test.v2.Z}
-		if got, want := v1.Distance(v2), test.want; !float64Eq(got, want) {
-			t.Errorf("%v.Distance(%v) = %v, want %v", v1, v2, got, want)
-		}
-		if got, want := v2.Distance(v1), test.want; !float64Eq(got, want) {
-			t.Errorf("%v.Distance(%v) = %v, want %v", v2, v1, got, want)
-		}
-	}
-}
-
-func TestVectorMul(t *testing.T) {
-	tests := []struct {
-		v    Vector
-		m    float64
-		want Vector
-	}{
-		{Vector{0, 0, 0}, 3, Vector{0, 0, 0}},
-		{Vector{1, 0, 0}, 1, Vector{1, 0, 0}},
-		{Vector{1, 0, 0}, 0, Vector{0, 0, 0}},
-		{Vector{1, 0, 0}, 3, Vector{3, 0, 0}},
-		{Vector{1, -3, 5}, -1, Vector{-1, 3, -5}},
-		{Vector{1, -3, 5}, 2, Vector{2, -6, 10}},
-	}
-	for _, test := range tests {
-		if !test.v.Mul(test.m).ApproxEqual(test.want) {
-			t.Errorf("%v%v = %v, want %v", test.m, test.v, test.v.Mul(test.m), test.want)
-		}
-	}
-}
-
-func TestVectorAngle(t *testing.T) {
-	tests := []struct {
-		v1, v2 Vector
-		want   float64 // radians
-	}{
-		{Vector{1, 0, 0}, Vector{1, 0, 0}, 0},
-		{Vector{1, 0, 0}, Vector{0, 1, 0}, math.Pi / 2},
-		{Vector{1, 0, 0}, Vector{0, 1, 1}, math.Pi / 2},
-		{Vector{1, 0, 0}, Vector{-1, 0, 0}, math.Pi},
-		{Vector{1, 2, 3}, Vector{2, 3, -1}, 1.2055891055045298},
-	}
-	for _, test := range tests {
-		if a := test.v1.Angle(test.v2).Radians(); !float64Eq(a, test.want) {
-			t.Errorf("%v.Angle(%v) = %v, want %v", test.v1, test.v2, a, test.want)
-		}
-		if a := test.v2.Angle(test.v1).Radians(); !float64Eq(a, test.want) {
-			t.Errorf("%v.Angle(%v) = %v, want %v", test.v2, test.v1, a, test.want)
-		}
-	}
-}
-
-func TestVectorOrtho(t *testing.T) {
-	vectors := []Vector{
-		{1, 0, 0},
-		{1, 1, 0},
-		{1, 2, 3},
-		{1, -2, -5},
-		{0.012, 0.0053, 0.00457},
-		{-0.012, -1, -0.00457},
-	}
-	for _, v := range vectors {
-		if !float64Eq(v.Dot(v.Ortho()), 0) {
-			t.Errorf("%v = not orthogonal to %v.Ortho()", v, v)
-		}
-		if !float64Eq(v.Ortho().Norm(), 1) {
-			t.Errorf("|%v.Ortho()| = %v, want 1", v, v.Ortho().Norm())
-		}
-	}
-}
-
-func TestVectorIdentities(t *testing.T) {
-	tests := []struct {
-		v1, v2 Vector
-	}{
-		{Vector{0, 0, 0}, Vector{0, 0, 0}},
-		{Vector{0, 0, 0}, Vector{0, 1, 2}},
-		{Vector{1, 0, 0}, Vector{0, 1, 0}},
-		{Vector{1, 0, 0}, Vector{0, 1, 1}},
-		{Vector{1, 1, 1}, Vector{-1, -1, -1}},
-		{Vector{1, 2, 2}, Vector{-0.3, 0.4, -1.2}},
-	}
-	for _, test := range tests {
-		a1 := test.v1.Angle(test.v2).Radians()
-		a2 := test.v2.Angle(test.v1).Radians()
-		c1 := test.v1.Cross(test.v2)
-		c2 := test.v2.Cross(test.v1)
-		d1 := test.v1.Dot(test.v2)
-		d2 := test.v2.Dot(test.v1)
-		// Angle commutes
-		if !float64Eq(a1, a2) {
-			t.Errorf("%v = %v.Angle(%v) != %v.Angle(%v) = %v", a1, test.v1, test.v2, test.v2, test.v1, a2)
-		}
-		// Dot commutes
-		if !float64Eq(d1, d2) {
-			t.Errorf("%v = %v · %v != %v · %v = %v", d1, test.v1, test.v2, test.v2, test.v1, d2)
-		}
-		// Cross anti-commutes
-		if !c1.ApproxEqual(c2.Mul(-1.0)) {
-			t.Errorf("%v = %v ⨯ %v != -(%v ⨯ %v) = -%v", c1, test.v1, test.v2, test.v2, test.v1, c2)
-		}
-		// Cross is orthogonal to original vectors
-		if !float64Eq(test.v1.Dot(c1), 0.0) {
-			t.Errorf("%v · (%v ⨯ %v) = %v != 0", test.v1, test.v1, test.v2, test.v1.Dot(c1))
-		}
-		if !float64Eq(test.v2.Dot(c1), 0.0) {
-			t.Errorf("%v · (%v ⨯ %v) = %v != 0", test.v2, test.v1, test.v2, test.v2.Dot(c1))
-		}
-	}
-}
-
-func TestVectorLargestSmallestComponents(t *testing.T) {
-	tests := []struct {
-		v                 Vector
-		largest, smallest Axis
-	}{
-		{Vector{0, 0, 0}, ZAxis, ZAxis},
-		{Vector{1, 0, 0}, XAxis, ZAxis},
-		{Vector{1, -1, 0}, YAxis, ZAxis},
-		{Vector{-1, -1.1, -1.1}, ZAxis, XAxis},
-		{Vector{0.5, -0.4, -0.5}, ZAxis, YAxis},
-		{Vector{1e-15, 1e-14, 1e-13}, ZAxis, XAxis},
-	}
-
-	for _, test := range tests {
-		if got := test.v.LargestComponent(); got != test.largest {
-			t.Errorf("%v.LargestComponent() = %v, want %v", test.v, got, test.largest)
-		}
-		if got := test.v.SmallestComponent(); got != test.smallest {
-			t.Errorf("%v.SmallestComponent() = %v, want %v", test.v, got, test.smallest)
-		}
-	}
-}
-
-func TestVectorCmp(t *testing.T) {
-	tests := []struct {
-		a, b Vector
-		want int
-	}{
-		{Vector{0, 0, 0}, Vector{0, 0, 0}, 0},
-		{Vector{0, 0, 0}, Vector{1, 0, 0}, -1},
-		{Vector{0, 1, 0}, Vector{0, 0, 0}, 1},
-		{Vector{1, 2, 3}, Vector{3, 2, 1}, -1},
-		{Vector{-1, 0, 0}, Vector{0, 0, -1}, -1},
-		{Vector{8, 6, 4}, Vector{7, 5, 3}, 1},
-		{Vector{-1, -0.5, 0}, Vector{0, 0, 0.1}, -1},
-		{Vector{1, 2, 3}, Vector{2, 3, 4}, -1},
-		{Vector{1.23, 4.56, 7.89}, Vector{1.23, 4.56, 7.89}, 0},
-	}
-
-	for _, test := range tests {
-		if got := test.a.Cmp(test.b); got != test.want {
-			t.Errorf("%v.Cmp(%v) = %d, want %d", test.a, test.b, got, test.want)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/s1/angle.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s1
 package s1
 
 

+ 0 - 169
vendor/github.com/golang/geo/s1/angle_test.go

@@ -1,169 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s1
-
-import (
-	"math"
-	"testing"
-)
-
-// float64Eq reports whether the two values are within the default epsilon.
-func float64Eq(x, y float64) bool {
-	return float64Near(x, y, epsilon)
-}
-
-// float64Near reports whether the two values are within the specified epsilon.
-func float64Near(x, y, eps float64) bool {
-	return math.Abs(x-y) <= eps
-}
-
-func TestEmptyValue(t *testing.T) {
-	var a Angle
-	if rad := a.Radians(); rad != 0 {
-		t.Errorf("Empty value of Angle was %v, want 0", rad)
-	}
-}
-
-func TestPiRadiansExactly180Degrees(t *testing.T) {
-	if rad := (math.Pi * Radian).Radians(); rad != math.Pi {
-		t.Errorf("(π * Radian).Radians() was %v, want π", rad)
-	}
-	if deg := (math.Pi * Radian).Degrees(); deg != 180 {
-		t.Errorf("(π * Radian).Degrees() was %v, want 180", deg)
-	}
-	if rad := (180 * Degree).Radians(); rad != math.Pi {
-		t.Errorf("(180 * Degree).Radians() was %v, want π", rad)
-	}
-	if deg := (180 * Degree).Degrees(); deg != 180 {
-		t.Errorf("(180 * Degree).Degrees() was %v, want 180", deg)
-	}
-
-	if deg := (math.Pi / 2 * Radian).Degrees(); deg != 90 {
-		t.Errorf("(π/2 * Radian).Degrees() was %v, want 90", deg)
-	}
-
-	// Check negative angles.
-	if deg := (-math.Pi / 2 * Radian).Degrees(); deg != -90 {
-		t.Errorf("(-π/2 * Radian).Degrees() was %v, want -90", deg)
-	}
-	if rad := (-45 * Degree).Radians(); rad != -math.Pi/4 {
-		t.Errorf("(-45 * Degree).Radians() was %v, want -π/4", rad)
-	}
-}
-
-func TestE5E6E7Representation(t *testing.T) {
-	// NOTE(dsymonds): This first test gives a variance in the 16th decimal place. I should track that down.
-	exp, act := (-45 * Degree).Radians(), (-4500000 * E5).Radians()
-	if math.Abs(exp-act) > 1e-15 {
-		t.Errorf("(-4500000 * E5).Radians() was %v, want %v", act, exp)
-	}
-	if exp, act := (-60 * Degree).Radians(), (-60000000 * E6).Radians(); exp != act {
-		t.Errorf("(-60000000 * E6).Radians() was %v, want %v", act, exp)
-	}
-	if exp, act := (75 * Degree).Radians(), (750000000 * E7).Radians(); exp != act {
-		t.Errorf("(-750000000 * E7).Radians() was %v, want %v", act, exp)
-	}
-
-	if exp, act := int32(-17256123), (-172.56123 * Degree).E5(); exp != act {
-		t.Errorf("(-172.56123°).E5() was %v, want %v", act, exp)
-	}
-	if exp, act := int32(12345678), (12.345678 * Degree).E6(); exp != act {
-		t.Errorf("(12.345678°).E6() was %v, want %v", act, exp)
-	}
-	if exp, act := int32(-123456789), (-12.3456789 * Degree).E7(); exp != act {
-		t.Errorf("(-12.3456789°).E7() was %v, want %v", act, exp)
-	}
-
-	roundingTests := []struct {
-		have Angle
-		want int32
-	}{
-		{0.500000001, 1},
-		{-0.500000001, -1},
-		{0.499999999, 0},
-		{-0.499999999, 0},
-	}
-	for _, test := range roundingTests {
-		if act := (test.have * 1e-5 * Degree).E5(); test.want != act {
-			t.Errorf("(%v°).E5() was %v, want %v", test.have, act, test.want)
-		}
-		if act := (test.have * 1e-6 * Degree).E6(); test.want != act {
-			t.Errorf("(%v°).E6() was %v, want %v", test.have, act, test.want)
-		}
-		if act := (test.have * 1e-7 * Degree).E7(); test.want != act {
-			t.Errorf("(%v°).E7() was %v, want %v", test.have, act, test.want)
-		}
-	}
-}
-
-func TestNormalizeCorrectlyCanonicalizesAngles(t *testing.T) {
-	tests := []struct {
-		in, want float64 // both in degrees
-	}{
-		{360, 0},
-		{-180, 180},
-		{180, 180},
-		{540, 180},
-		{-270, 90},
-	}
-	for _, test := range tests {
-		deg := (Angle(test.in) * Degree).Normalized().Degrees()
-		if deg != test.want {
-			t.Errorf("Normalized %.0f° = %v, want %v", test.in, deg, test.want)
-		}
-	}
-}
-
-func TestAngleString(t *testing.T) {
-	if s, exp := (180 * Degree).String(), "180.0000000"; s != exp {
-		t.Errorf("(180°).String() = %q, want %q", s, exp)
-	}
-}
-
-func TestDegreesVsRadians(t *testing.T) {
-	// This test tests the exactness of specific values between degrees and radians.
-	for k := -8; k <= 8; k++ {
-		if got, want := Angle(45*k)*Degree, Angle((float64(k)*math.Pi)/4)*Radian; got != want {
-			t.Errorf("45°*%d != (%d*π)/4 radians (%f vs %f)", k, k, got, want)
-		}
-
-		if got, want := (Angle(45*k) * Degree).Degrees(), float64(45*k); got != want {
-			t.Errorf("Angle(45°*%d).Degrees() != 45*%d, (%f vs %f)", k, k, got, want)
-		}
-	}
-
-	for k := uint64(0); k < 30; k++ {
-		m := 1 << k
-		n := float64(m)
-		for _, test := range []struct{ deg, rad float64 }{
-			{180, 1},
-			{60, 3},
-			{36, 5},
-			{20, 9},
-			{4, 45},
-		} {
-			if got, want := Angle(test.deg/n)*Degree, Angle(math.Pi/(test.rad*n))*Radian; got != want {
-				t.Errorf("%v°/%d != π/%v*%d rad (%f vs %f)", test.deg, m, test.rad, m, got, want)
-			}
-		}
-	}
-
-	// We also spot check a non-identity.
-	if got := (60 * Degree).Degrees(); float64Eq(got, 60) {
-		t.Errorf("Angle(60).Degrees() == 60, but should not (%f vs %f)", got, 60.0)
-	}
-}

+ 60 - 24
vendor/github.com/golang/geo/s1/chordangle.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s1
 package s1
 
 
@@ -40,8 +38,8 @@ type ChordAngle float64
 
 
 const (
 const (
 	// NegativeChordAngle represents a chord angle smaller than the zero angle.
 	// NegativeChordAngle represents a chord angle smaller than the zero angle.
-	// The only valid operations on a NegativeChordAngle are comparisons and
-	// Angle conversions.
+	// The only valid operations on a NegativeChordAngle are comparisons,
+	// Angle conversions, and Successor/Predecessor.
 	NegativeChordAngle = ChordAngle(-1)
 	NegativeChordAngle = ChordAngle(-1)
 
 
 	// RightChordAngle represents a chord angle of 90 degrees (a "right angle").
 	// RightChordAngle represents a chord angle of 90 degrees (a "right angle").
@@ -50,6 +48,9 @@ const (
 	// StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
 	// StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
 	// This is the maximum finite chord angle.
 	// This is the maximum finite chord angle.
 	StraightChordAngle = ChordAngle(4)
 	StraightChordAngle = ChordAngle(4)
+
+	// maxLength2 is the square of the maximum length allowed in a ChordAngle.
+	maxLength2 = 4.0
 )
 )
 
 
 // ChordAngleFromAngle returns a ChordAngle from the given Angle.
 // ChordAngleFromAngle returns a ChordAngle from the given Angle.
@@ -65,10 +66,10 @@ func ChordAngleFromAngle(a Angle) ChordAngle {
 }
 }
 
 
 // ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
 // ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
-// Note that the argument is automatically clamped to a maximum of 4.0 to
+// Note that the argument is automatically clamped to a maximum of 4 to
 // handle possible roundoff errors. The argument must be non-negative.
 // handle possible roundoff errors. The argument must be non-negative.
 func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
 func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
-	if length2 > 4 {
+	if length2 > maxLength2 {
 		return StraightChordAngle
 		return StraightChordAngle
 	}
 	}
 	return ChordAngle(length2)
 	return ChordAngle(length2)
@@ -84,7 +85,7 @@ func (c ChordAngle) Expanded(e float64) ChordAngle {
 	if c.isSpecial() {
 	if c.isSpecial() {
 		return c
 		return c
 	}
 	}
-	return ChordAngle(math.Max(0.0, math.Min(4.0, float64(c)+e)))
+	return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e)))
 }
 }
 
 
 // Angle converts this ChordAngle to an Angle.
 // Angle converts this ChordAngle to an Angle.
@@ -99,7 +100,8 @@ func (c ChordAngle) Angle() Angle {
 }
 }
 
 
 // InfChordAngle returns a chord angle larger than any finite chord angle.
 // InfChordAngle returns a chord angle larger than any finite chord angle.
-// The only valid operations on an InfChordAngle are comparisons and Angle conversions.
+// The only valid operations on an InfChordAngle are comparisons, Angle
+// conversions, and Successor/Predecessor.
 func InfChordAngle() ChordAngle {
 func InfChordAngle() ChordAngle {
 	return ChordAngle(math.Inf(1))
 	return ChordAngle(math.Inf(1))
 }
 }
@@ -116,7 +118,41 @@ func (c ChordAngle) isSpecial() bool {
 
 
 // isValid reports whether this ChordAngle is valid or not.
 // isValid reports whether this ChordAngle is valid or not.
 func (c ChordAngle) isValid() bool {
 func (c ChordAngle) isValid() bool {
-	return (c >= 0 && c <= 4) || c.isSpecial()
+	return (c >= 0 && c <= maxLength2) || c.isSpecial()
+}
+
+// Successor returns the smallest representable ChordAngle larger than this one.
+// This can be used to convert a "<" comparison to a "<=" comparison.
+//
+// Note the following special cases:
+//   NegativeChordAngle.Successor == 0
+//   StraightChordAngle.Successor == InfChordAngle
+//   InfChordAngle.Successor == InfChordAngle
+func (c ChordAngle) Successor() ChordAngle {
+	if c >= maxLength2 {
+		return InfChordAngle()
+	}
+	if c < 0 {
+		return 0
+	}
+	return ChordAngle(math.Nextafter(float64(c), 10.0))
+}
+
+// Predecessor returns the largest representable ChordAngle less than this one.
+//
+// Note the following special cases:
+//   InfChordAngle.Predecessor == StraightChordAngle
+//   ChordAngle(0).Predecessor == NegativeChordAngle
+//   NegativeChordAngle.Predecessor == NegativeChordAngle
+func (c ChordAngle) Predecessor() ChordAngle {
+	if c <= 0 {
+		return NegativeChordAngle
+	}
+	if c > maxLength2 {
+		return StraightChordAngle
+	}
+
+	return ChordAngle(math.Nextafter(float64(c), -10.0))
 }
 }
 
 
 // MaxPointError returns the maximum error size for a ChordAngle constructed
 // MaxPointError returns the maximum error size for a ChordAngle constructed
@@ -150,7 +186,7 @@ func (c ChordAngle) Add(other ChordAngle) ChordAngle {
 	}
 	}
 
 
 	// Clamp the angle sum to at most 180 degrees.
 	// Clamp the angle sum to at most 180 degrees.
-	if c+other >= 4 {
+	if c+other >= maxLength2 {
 		return StraightChordAngle
 		return StraightChordAngle
 	}
 	}
 
 
@@ -161,7 +197,7 @@ func (c ChordAngle) Add(other ChordAngle) ChordAngle {
 	//                 cos(X) = sqrt(1 - sin^2(X))
 	//                 cos(X) = sqrt(1 - sin^2(X))
 	x := float64(c * (1 - 0.25*other))
 	x := float64(c * (1 - 0.25*other))
 	y := float64(other * (1 - 0.25*c))
 	y := float64(other * (1 - 0.25*c))
-	return ChordAngle(math.Min(4.0, x+y+2*math.Sqrt(x*y)))
+	return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y)))
 }
 }
 
 
 // Sub subtracts the other ChordAngle from this one and returns the resulting
 // Sub subtracts the other ChordAngle from this one and returns the resulting

+ 0 - 226
vendor/github.com/golang/geo/s1/chordangle_test.go

@@ -1,226 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s1
-
-import (
-	"math"
-	"testing"
-)
-
-func TestChordAngleBasics(t *testing.T) {
-	var zeroChord ChordAngle
-	tests := []struct {
-		a, b     ChordAngle
-		lessThan bool
-		equal    bool
-	}{
-		{NegativeChordAngle, NegativeChordAngle, false, true},
-		{NegativeChordAngle, zeroChord, true, false},
-		{NegativeChordAngle, StraightChordAngle, true, false},
-		{NegativeChordAngle, InfChordAngle(), true, false},
-
-		{zeroChord, zeroChord, false, true},
-		{zeroChord, StraightChordAngle, true, false},
-		{zeroChord, InfChordAngle(), true, false},
-
-		{StraightChordAngle, StraightChordAngle, false, true},
-		{StraightChordAngle, InfChordAngle(), true, false},
-
-		{InfChordAngle(), InfChordAngle(), false, true},
-		{InfChordAngle(), InfChordAngle(), false, true},
-	}
-
-	for _, test := range tests {
-		if got := test.a < test.b; got != test.lessThan {
-			t.Errorf("%v should be less than %v", test.a, test.b)
-		}
-		if got := test.a == test.b; got != test.equal {
-			t.Errorf("%v should be equal to %v", test.a, test.b)
-		}
-	}
-}
-
-func TestChordAngleIsFunctions(t *testing.T) {
-	var zeroChord ChordAngle
-	tests := []struct {
-		have       ChordAngle
-		isNegative bool
-		isZero     bool
-		isInf      bool
-		isSpecial  bool
-	}{
-		{zeroChord, false, true, false, false},
-		{NegativeChordAngle, true, false, false, true},
-		{zeroChord, false, true, false, false},
-		{StraightChordAngle, false, false, false, false},
-		{InfChordAngle(), false, false, true, true},
-	}
-
-	for _, test := range tests {
-		if got := test.have < 0; got != test.isNegative {
-			t.Errorf("%v.isNegative() = %t, want %t", test.have, got, test.isNegative)
-		}
-		if got := test.have == 0; got != test.isZero {
-			t.Errorf("%v.isZero() = %t, want %t", test.have, got, test.isZero)
-		}
-		if got := test.have.isInf(); got != test.isInf {
-			t.Errorf("%v.isInf() = %t, want %t", test.have, got, test.isInf)
-		}
-		if got := test.have.isSpecial(); got != test.isSpecial {
-			t.Errorf("%v.isSpecial() = %t, want %t", test.have, got, test.isSpecial)
-		}
-	}
-}
-
-func TestChordAngleFromAngle(t *testing.T) {
-	for _, angle := range []float64{0, 1, -1, math.Pi} {
-		if got := ChordAngleFromAngle(Angle(angle)).Angle().Radians(); got != angle {
-			t.Errorf("ChordAngleFromAngle(Angle(%v)) = %v, want %v", angle, got, angle)
-		}
-	}
-
-	if got := ChordAngleFromAngle(Angle(math.Pi)); got != StraightChordAngle {
-		t.Errorf("a ChordAngle from an Angle of π = %v, want %v", got, StraightChordAngle)
-	}
-
-	if InfAngle() != ChordAngleFromAngle(InfAngle()).Angle() {
-		t.Errorf("converting infinite Angle to ChordAngle should yield infinite Angle")
-	}
-}
-
-func TestChordAngleArithmetic(t *testing.T) {
-	var (
-		zero      ChordAngle
-		degree30  = ChordAngleFromAngle(30 * Degree)
-		degree60  = ChordAngleFromAngle(60 * Degree)
-		degree90  = ChordAngleFromAngle(90 * Degree)
-		degree120 = ChordAngleFromAngle(120 * Degree)
-		degree180 = StraightChordAngle
-	)
-
-	addTests := []struct {
-		a, b ChordAngle
-		want ChordAngle
-	}{
-		{zero, zero, zero},
-		{degree60, zero, degree60},
-		{zero, degree60, degree60},
-		{degree30, degree60, degree90},
-		{degree60, degree30, degree90},
-		{degree180, zero, degree180},
-		{degree60, degree30, degree90},
-		{degree90, degree90, degree180},
-		{degree120, degree90, degree180},
-		{degree120, degree120, degree180},
-		{degree30, degree180, degree180},
-		{degree180, degree180, degree180},
-	}
-
-	subTests := []struct {
-		a, b ChordAngle
-		want ChordAngle
-	}{
-		{zero, zero, zero},
-		{degree60, degree60, zero},
-		{degree180, degree180, zero},
-		{zero, degree60, zero},
-		{degree30, degree90, zero},
-		{degree90, degree30, degree60},
-		{degree90, degree60, degree30},
-		{degree180, zero, degree180},
-	}
-
-	for _, test := range addTests {
-		if got := float64(test.a.Add(test.b)); !float64Eq(got, float64(test.want)) {
-			t.Errorf("%v.Add(%v) = %0.24f, want %0.24f", test.a.Angle().Degrees(), test.b.Angle().Degrees(), got, test.want)
-		}
-	}
-	for _, test := range subTests {
-		if got := float64(test.a.Sub(test.b)); !float64Eq(got, float64(test.want)) {
-			t.Errorf("%v.Sub(%v) = %0.24f, want %0.24f", test.a.Angle().Degrees(), test.b.Angle().Degrees(), got, test.want)
-		}
-	}
-}
-
-func TestChordAngleTrigonometry(t *testing.T) {
-	// Because of the way the math works out, the 9/10th's case has slightly more
-	// difference than all the other computations, so this gets a more generous
-	// epsilon to deal with that.
-	const epsilon = 1e-14
-	const iters = 40
-	for iter := 0; iter <= iters; iter++ {
-		radians := math.Pi * float64(iter) / float64(iters)
-		angle := ChordAngleFromAngle(Angle(radians))
-		if !float64Near(math.Sin(radians), angle.Sin(), epsilon) {
-			t.Errorf("(%d/%d)*π. %v.Sin() = %v, want %v", iter, iters, angle, angle.Sin(), math.Sin(radians))
-		}
-		if !float64Near(math.Cos(radians), angle.Cos(), epsilon) {
-			t.Errorf("(%d/%d)*π. %v.Cos() = %v, want %v", iter, iters, angle, angle.Cos(), math.Cos(radians))
-		}
-		// Since tan(x) is unbounded near pi/4, we map the result back to an
-		// angle before comparing. The assertion is that the result is equal to
-		// the tangent of a nearby angle.
-		if !float64Near(math.Atan(math.Tan(radians)), math.Atan(angle.Tan()), 1e-14) {
-			t.Errorf("(%d/%d)*π. %v.Tan() = %v, want %v", iter, iters, angle, angle.Tan(), math.Tan(radians))
-		}
-	}
-
-	// Unlike Angle, ChordAngle can represent 90 and 180 degrees exactly.
-	angle90 := ChordAngleFromSquaredLength(2)
-	angle180 := ChordAngleFromSquaredLength(4)
-	if !float64Eq(1, angle90.Sin()) {
-		t.Errorf("%v.Sin() = %v, want 1", angle90, angle90.Sin())
-	}
-	if !float64Eq(0, angle90.Cos()) {
-		t.Errorf("%v.Cos() = %v, want 0", angle90, angle90.Cos())
-	}
-	if !math.IsInf(angle90.Tan(), 0) {
-		t.Errorf("%v.Tan() should be infinite, but was not.", angle90)
-	}
-	if !float64Eq(0, angle180.Sin()) {
-		t.Errorf("%v.Sin() = %v, want 0", angle180, angle180.Sin())
-	}
-	if !float64Eq(-1, angle180.Cos()) {
-		t.Errorf("%v.Cos() = %v, want -1", angle180, angle180.Cos())
-	}
-	if !float64Eq(0, angle180.Tan()) {
-		t.Errorf("%v.Tan() = %v, want 0", angle180, angle180.Tan())
-	}
-}
-
-func TestChordAngleExpanded(t *testing.T) {
-	var zero ChordAngle
-
-	tests := []struct {
-		have ChordAngle
-		add  float64
-		want ChordAngle
-	}{
-		{NegativeChordAngle, 5, NegativeChordAngle.Expanded(5)},
-		{InfChordAngle(), -5, InfChordAngle()},
-		{StraightChordAngle, 5, ChordAngleFromSquaredLength(5)},
-		{zero, -5, zero},
-		{ChordAngleFromSquaredLength(1.25), 0.25, ChordAngleFromSquaredLength(1.5)},
-		{ChordAngleFromSquaredLength(0.75), 0.25, ChordAngleFromSquaredLength(1)},
-	}
-
-	for _, test := range tests {
-		if got := test.have.Expanded(test.add); got != test.want {
-			t.Errorf("%v.Expanded(%v) = %v, want %v", test.have, test.add, got, test.want)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/s1/doc.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 /*
 /*
 Package s1 implements types and functions for working with geometry in S¹ (circular geometry).
 Package s1 implements types and functions for working with geometry in S¹ (circular geometry).

+ 13 - 15
vendor/github.com/golang/geo/s1/interval.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s1
 package s1
 
 

+ 0 - 457
vendor/github.com/golang/geo/s1/interval_test.go

@@ -1,457 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s1
-
-import (
-	"math"
-	"testing"
-)
-
-// Some standard intervals for use throughout the tests.
-var (
-	empty = EmptyInterval()
-	full  = FullInterval()
-	// Single-point intervals:
-	zero  = IntervalFromEndpoints(0, 0)
-	pi2   = IntervalFromEndpoints(math.Pi/2, math.Pi/2)
-	pi    = IntervalFromEndpoints(math.Pi, math.Pi)
-	mipi  = IntervalFromEndpoints(-math.Pi, -math.Pi) // same as pi after normalization
-	mipi2 = IntervalFromEndpoints(-math.Pi/2, -math.Pi/2)
-	// Single quadrants:
-	quad1 = IntervalFromEndpoints(0, math.Pi/2)
-	quad2 = IntervalFromEndpoints(math.Pi/2, -math.Pi) // equivalent to (pi/2, pi)
-	quad3 = IntervalFromEndpoints(math.Pi, -math.Pi/2)
-	quad4 = IntervalFromEndpoints(-math.Pi/2, 0)
-	// Quadrant pairs:
-	quad12 = IntervalFromEndpoints(0, -math.Pi)
-	quad23 = IntervalFromEndpoints(math.Pi/2, -math.Pi/2)
-	quad34 = IntervalFromEndpoints(-math.Pi, 0)
-	quad41 = IntervalFromEndpoints(-math.Pi/2, math.Pi/2)
-	// Quadrant triples:
-	quad123 = IntervalFromEndpoints(0, -math.Pi/2)
-	quad234 = IntervalFromEndpoints(math.Pi/2, 0)
-	quad341 = IntervalFromEndpoints(math.Pi, math.Pi/2)
-	quad412 = IntervalFromEndpoints(-math.Pi/2, -math.Pi)
-	// Small intervals around the midpoints between quadrants,
-	// such that the center of each interval is offset slightly CCW from the midpoint.
-	mid12 = IntervalFromEndpoints(math.Pi/2-0.01, math.Pi/2+0.02)
-	mid23 = IntervalFromEndpoints(math.Pi-0.01, -math.Pi+0.02)
-	mid34 = IntervalFromEndpoints(-math.Pi/2-0.01, -math.Pi/2+0.02)
-	mid41 = IntervalFromEndpoints(-0.01, 0.02)
-)
-
-func TestConstructors(t *testing.T) {
-	// Check that [-π,-π] is normalized to [π,π].
-	if mipi.Lo != math.Pi {
-		t.Errorf("mipi.Lo = %v, want π", mipi.Lo)
-	}
-	if mipi.Hi != math.Pi {
-		t.Errorf("mipi.Hi = %v, want π", mipi.Lo)
-	}
-
-	var i Interval
-	if !i.IsValid() {
-		t.Errorf("Zero value Interval is not valid")
-	}
-}
-
-func TestIntervalFromPointPair(t *testing.T) {
-	tests := []struct {
-		a, b float64
-		want Interval
-	}{
-		{-math.Pi, math.Pi, pi},
-		{math.Pi, -math.Pi, pi},
-		{mid34.Hi, mid34.Lo, mid34},
-		{mid23.Lo, mid23.Hi, mid23},
-	}
-	for _, test := range tests {
-		got := IntervalFromPointPair(test.a, test.b)
-		if got != test.want {
-			t.Errorf("IntervalFromPointPair(%f, %f) = %v, want %v", test.a, test.b, got, test.want)
-		}
-	}
-}
-
-func TestSimplePredicates(t *testing.T) {
-	if !zero.IsValid() || zero.IsEmpty() || zero.IsFull() {
-		t.Errorf("Zero interval is invalid or empty or full")
-	}
-	if !empty.IsValid() || !empty.IsEmpty() || empty.IsFull() {
-		t.Errorf("Empty interval is invalid or not empty or full")
-	}
-	if !empty.IsInverted() {
-		t.Errorf("Empty interval is not inverted")
-	}
-	if !full.IsValid() || full.IsEmpty() || !full.IsFull() {
-		t.Errorf("Full interval is invalid or empty or not full")
-	}
-	if !pi.IsValid() || pi.IsEmpty() || pi.IsInverted() {
-		t.Errorf("pi is invalid or empty or inverted")
-	}
-	if !mipi.IsValid() || mipi.IsEmpty() || mipi.IsInverted() {
-		t.Errorf("mipi is invalid or empty or inverted")
-	}
-}
-
-func TestAlmostFullOrEmpty(t *testing.T) {
-	// Test that rounding errors don't cause intervals that are almost empty or
-	// full to be considered empty or full.  The following value is the greatest
-	// representable value less than Pi.
-	almostPi := math.Pi - 2*dblEpsilon
-
-	i := Interval{-almostPi, math.Pi}
-	if i.IsFull() {
-		t.Errorf("%v.IsFull should not be true", i)
-	}
-
-	i = Interval{-math.Pi, almostPi}
-	if i.IsFull() {
-		t.Errorf("%v.IsFull should not be true", i)
-	}
-
-	i = Interval{math.Pi, -almostPi}
-	if i.IsEmpty() {
-		t.Errorf("%v.IsEmpty should not be true", i)
-	}
-
-	i = Interval{almostPi, -math.Pi}
-	if i.IsEmpty() {
-		t.Errorf("%v.IsEmpty should not be true", i)
-	}
-}
-
-func TestCenter(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		want     float64
-	}{
-		{quad12, math.Pi / 2},
-		{IntervalFromEndpoints(3.1, 2.9), 3 - math.Pi},
-		{IntervalFromEndpoints(-2.9, -3.1), math.Pi - 3},
-		{IntervalFromEndpoints(2.1, -2.1), math.Pi},
-		{pi, math.Pi},
-		{mipi, math.Pi},
-		// TODO(dsymonds): The C++ test for quad23 uses fabs. Why?
-		{quad23, math.Pi},
-		// TODO(dsymonds): The C++ test for quad123 uses EXPECT_DOUBLE_EQ. Why?
-		{quad123, 0.75 * math.Pi},
-	}
-	for _, test := range tests {
-		got := test.interval.Center()
-		// TODO(dsymonds): Some are inaccurate in the 16th decimal place. Track it down.
-		if math.Abs(got-test.want) > 1e-15 {
-			t.Errorf("%v.Center() = %v, want %v", test.interval, got, test.want)
-		}
-	}
-}
-
-func TestLength(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		want     float64
-	}{
-		{quad12, math.Pi},
-		{pi, 0},
-		{mipi, 0},
-		// TODO(dsymonds): The C++ test for quad123 uses DOUBLE_EQ. Why?
-		{quad123, 1.5 * math.Pi},
-		// TODO(dsymonds): The C++ test for quad23 uses fabs. Why?
-		{quad23, math.Pi},
-		{full, 2 * math.Pi},
-	}
-	for _, test := range tests {
-		if l := test.interval.Length(); l != test.want {
-			t.Errorf("%v.Length() got %v, want %v", test.interval, l, test.want)
-		}
-	}
-	if l := empty.Length(); l >= 0 {
-		t.Errorf("empty interval has non-negative length %v", l)
-	}
-}
-
-func TestContains(t *testing.T) {
-	tests := []struct {
-		interval  Interval
-		in, out   []float64 // points that should be inside/outside the interval
-		iIn, iOut []float64 // points that should be inside/outside the interior
-	}{
-		{empty, nil, []float64{0, math.Pi, -math.Pi}, nil, []float64{math.Pi, -math.Pi}},
-		{full, []float64{0, math.Pi, -math.Pi}, nil, []float64{math.Pi, -math.Pi}, nil},
-		{quad12, []float64{0, math.Pi, -math.Pi}, nil,
-			[]float64{math.Pi / 2}, []float64{0, math.Pi, -math.Pi}},
-		{quad23, []float64{math.Pi / 2, -math.Pi / 2, math.Pi, -math.Pi}, []float64{0},
-			[]float64{math.Pi, -math.Pi}, []float64{math.Pi / 2, -math.Pi / 2, 0}},
-		{pi, []float64{math.Pi, -math.Pi}, []float64{0}, nil, []float64{math.Pi, -math.Pi}},
-		{mipi, []float64{math.Pi, -math.Pi}, []float64{0}, nil, []float64{math.Pi, -math.Pi}},
-		{zero, []float64{0}, nil, nil, []float64{0}},
-	}
-	for _, test := range tests {
-		for _, p := range test.in {
-			if !test.interval.Contains(p) {
-				t.Errorf("%v should contain %v", test.interval, p)
-			}
-		}
-		for _, p := range test.out {
-			if test.interval.Contains(p) {
-				t.Errorf("%v should not contain %v", test.interval, p)
-			}
-		}
-		for _, p := range test.iIn {
-			if !test.interval.InteriorContains(p) {
-				t.Errorf("interior of %v should contain %v", test.interval, p)
-			}
-		}
-		for _, p := range test.iOut {
-			if test.interval.InteriorContains(p) {
-				t.Errorf("interior %v should not contain %v", test.interval, p)
-			}
-		}
-	}
-}
-
-func TestIntervalOperations(t *testing.T) {
-	quad12eps := IntervalFromEndpoints(quad12.Lo, mid23.Hi)
-	quad2hi := IntervalFromEndpoints(mid23.Lo, quad12.Hi)
-	quad412eps := IntervalFromEndpoints(mid34.Lo, quad12.Hi)
-	quadeps12 := IntervalFromEndpoints(mid41.Lo, quad12.Hi)
-	quad1lo := IntervalFromEndpoints(quad12.Lo, mid41.Hi)
-	quad2lo := IntervalFromEndpoints(quad23.Lo, mid12.Hi)
-	quad3hi := IntervalFromEndpoints(mid34.Lo, quad23.Hi)
-	quadeps23 := IntervalFromEndpoints(mid12.Lo, quad23.Hi)
-	quad23eps := IntervalFromEndpoints(quad23.Lo, mid34.Hi)
-	quadeps123 := IntervalFromEndpoints(mid41.Lo, quad23.Hi)
-
-	// This massive list of test cases is ported directly from the C++ test case.
-	tests := []struct {
-		x, y                               Interval
-		xContainsY, xInteriorContainsY     bool
-		xIntersectsY, xInteriorIntersectsY bool
-		wantUnion, wantIntersection        Interval
-	}{
-		// 0
-		{empty, empty, true, true, false, false, empty, empty},
-		{empty, full, false, false, false, false, full, empty},
-		{empty, zero, false, false, false, false, zero, empty},
-		{empty, pi, false, false, false, false, pi, empty},
-		{empty, mipi, false, false, false, false, mipi, empty},
-
-		// 5
-		{full, empty, true, true, false, false, full, empty},
-		{full, full, true, true, true, true, full, full},
-		{full, zero, true, true, true, true, full, zero},
-		{full, pi, true, true, true, true, full, pi},
-		{full, mipi, true, true, true, true, full, mipi},
-		{full, quad12, true, true, true, true, full, quad12},
-		{full, quad23, true, true, true, true, full, quad23},
-
-		// 12
-		{zero, empty, true, true, false, false, zero, empty},
-		{zero, full, false, false, true, false, full, zero},
-		{zero, zero, true, false, true, false, zero, zero},
-		{zero, pi, false, false, false, false, IntervalFromEndpoints(0, math.Pi), empty},
-		{zero, pi2, false, false, false, false, quad1, empty},
-		{zero, mipi, false, false, false, false, quad12, empty},
-		{zero, mipi2, false, false, false, false, quad4, empty},
-		{zero, quad12, false, false, true, false, quad12, zero},
-		{zero, quad23, false, false, false, false, quad123, empty},
-
-		// 21
-		{pi2, empty, true, true, false, false, pi2, empty},
-		{pi2, full, false, false, true, false, full, pi2},
-		{pi2, zero, false, false, false, false, quad1, empty},
-		{pi2, pi, false, false, false, false, IntervalFromEndpoints(math.Pi/2, math.Pi), empty},
-		{pi2, pi2, true, false, true, false, pi2, pi2},
-		{pi2, mipi, false, false, false, false, quad2, empty},
-		{pi2, mipi2, false, false, false, false, quad23, empty},
-		{pi2, quad12, false, false, true, false, quad12, pi2},
-		{pi2, quad23, false, false, true, false, quad23, pi2},
-
-		// 30
-		{pi, empty, true, true, false, false, pi, empty},
-		{pi, full, false, false, true, false, full, pi},
-		{pi, zero, false, false, false, false, IntervalFromEndpoints(math.Pi, 0), empty},
-		{pi, pi, true, false, true, false, pi, pi},
-		{pi, pi2, false, false, false, false, IntervalFromEndpoints(math.Pi/2, math.Pi), empty},
-		{pi, mipi, true, false, true, false, pi, pi},
-		{pi, mipi2, false, false, false, false, quad3, empty},
-		{pi, quad12, false, false, true, false, IntervalFromEndpoints(0, math.Pi), pi},
-		{pi, quad23, false, false, true, false, quad23, pi},
-
-		// 39
-		{mipi, empty, true, true, false, false, mipi, empty},
-		{mipi, full, false, false, true, false, full, mipi},
-		{mipi, zero, false, false, false, false, quad34, empty},
-		{mipi, pi, true, false, true, false, mipi, mipi},
-		{mipi, pi2, false, false, false, false, quad2, empty},
-		{mipi, mipi, true, false, true, false, mipi, mipi},
-		{mipi, mipi2, false, false, false, false, IntervalFromEndpoints(-math.Pi, -math.Pi/2), empty},
-		{mipi, quad12, false, false, true, false, quad12, mipi},
-		{mipi, quad23, false, false, true, false, quad23, mipi},
-
-		// 48
-		{quad12, empty, true, true, false, false, quad12, empty},
-		{quad12, full, false, false, true, true, full, quad12},
-		{quad12, zero, true, false, true, false, quad12, zero},
-		{quad12, pi, true, false, true, false, quad12, pi},
-		{quad12, mipi, true, false, true, false, quad12, mipi},
-		{quad12, quad12, true, false, true, true, quad12, quad12},
-		{quad12, quad23, false, false, true, true, quad123, quad2},
-		{quad12, quad34, false, false, true, false, full, quad12},
-
-		// 56
-		{quad23, empty, true, true, false, false, quad23, empty},
-		{quad23, full, false, false, true, true, full, quad23},
-		{quad23, zero, false, false, false, false, quad234, empty},
-		{quad23, pi, true, true, true, true, quad23, pi},
-		{quad23, mipi, true, true, true, true, quad23, mipi},
-		{quad23, quad12, false, false, true, true, quad123, quad2},
-		{quad23, quad23, true, false, true, true, quad23, quad23},
-		{quad23, quad34, false, false, true, true, quad234, IntervalFromEndpoints(-math.Pi, -math.Pi/2)},
-
-		// 64
-		{quad1, quad23, false, false, true, false, quad123, IntervalFromEndpoints(math.Pi/2, math.Pi/2)},
-		{quad2, quad3, false, false, true, false, quad23, mipi},
-		{quad3, quad2, false, false, true, false, quad23, pi},
-		{quad2, pi, true, false, true, false, quad2, pi},
-		{quad2, mipi, true, false, true, false, quad2, mipi},
-		{quad3, pi, true, false, true, false, quad3, pi},
-		{quad3, mipi, true, false, true, false, quad3, mipi},
-
-		// 71
-		{quad12, mid12, true, true, true, true, quad12, mid12},
-		{mid12, quad12, false, false, true, true, quad12, mid12},
-
-		// 73
-		{quad12, mid23, false, false, true, true, quad12eps, quad2hi},
-		{mid23, quad12, false, false, true, true, quad12eps, quad2hi},
-
-		// This test checks that the union of two disjoint intervals is the smallest
-		// interval that contains both of them.  Note that the center of "mid34"
-		// slightly CCW of -Pi/2 so that there is no ambiguity about the result.
-		// 75
-		{quad12, mid34, false, false, false, false, quad412eps, empty},
-		{mid34, quad12, false, false, false, false, quad412eps, empty},
-
-		// 77
-		{quad12, mid41, false, false, true, true, quadeps12, quad1lo},
-		{mid41, quad12, false, false, true, true, quadeps12, quad1lo},
-
-		// 79
-		{quad23, mid12, false, false, true, true, quadeps23, quad2lo},
-		{mid12, quad23, false, false, true, true, quadeps23, quad2lo},
-		{quad23, mid23, true, true, true, true, quad23, mid23},
-		{mid23, quad23, false, false, true, true, quad23, mid23},
-		{quad23, mid34, false, false, true, true, quad23eps, quad3hi},
-		{mid34, quad23, false, false, true, true, quad23eps, quad3hi},
-		{quad23, mid41, false, false, false, false, quadeps123, empty},
-		{mid41, quad23, false, false, false, false, quadeps123, empty},
-	}
-	should := func(b bool) string {
-		if b {
-			return "should"
-		}
-		return "should not"
-	}
-	for _, test := range tests {
-		if test.x.ContainsInterval(test.y) != test.xContainsY {
-			t.Errorf("%v %s contain %v", test.x, should(test.xContainsY), test.y)
-		}
-		if test.x.InteriorContainsInterval(test.y) != test.xInteriorContainsY {
-			t.Errorf("interior of %v %s contain %v", test.x, should(test.xInteriorContainsY), test.y)
-		}
-		if test.x.Intersects(test.y) != test.xIntersectsY {
-			t.Errorf("%v %s intersect %v", test.x, should(test.xIntersectsY), test.y)
-		}
-		if test.x.InteriorIntersects(test.y) != test.xInteriorIntersectsY {
-			t.Errorf("interior of %v %s intersect %v", test.x, should(test.xInteriorIntersectsY), test.y)
-		}
-		if u := test.x.Union(test.y); u != test.wantUnion {
-			t.Errorf("%v ∪ %v was %v, want %v", test.x, test.y, u, test.wantUnion)
-		}
-		if u := test.x.Intersection(test.y); u != test.wantIntersection {
-			t.Errorf("%v ∩ %v was %v, want %v", test.x, test.y, u, test.wantIntersection)
-		}
-	}
-}
-
-func TestAddPoint(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		points   []float64
-		want     Interval
-	}{
-		{empty, []float64{0}, zero},
-		{empty, []float64{math.Pi}, pi},
-		{empty, []float64{-math.Pi}, mipi},
-		{empty, []float64{math.Pi, -math.Pi}, pi},
-		{empty, []float64{-math.Pi, math.Pi}, mipi},
-		{empty, []float64{mid12.Lo, mid12.Hi}, mid12},
-		{empty, []float64{mid23.Lo, mid23.Hi}, mid23},
-
-		{quad1, []float64{-0.9 * math.Pi, -math.Pi / 2}, quad123},
-		{full, []float64{0}, full},
-		{full, []float64{math.Pi}, full},
-		{full, []float64{-math.Pi}, full},
-	}
-	for _, test := range tests {
-		got := test.interval
-		for _, point := range test.points {
-			got = got.AddPoint(point)
-		}
-		want := test.want
-		if math.Abs(got.Lo-want.Lo) > 1e-15 || math.Abs(got.Hi-want.Hi) > 1e-15 {
-			t.Errorf("%v.AddPoint(%v) = %v, want %v", test.interval, test.points, got, want)
-		}
-	}
-}
-
-func TestExpanded(t *testing.T) {
-	tests := []struct {
-		interval Interval
-		margin   float64
-		want     Interval
-	}{
-		{empty, 1, empty},
-		{full, 1, full},
-		{zero, 1, Interval{-1, 1}},
-		{mipi, 0.01, Interval{math.Pi - 0.01, -math.Pi + 0.01}},
-		{pi, 27, full},
-		{pi, math.Pi / 2, quad23},
-		{pi2, math.Pi / 2, quad12},
-		{mipi2, math.Pi / 2, quad34},
-
-		{empty, -1, empty},
-		{full, -1, full},
-		{quad123, -27, empty},
-		{quad234, -27, empty},
-		{quad123, -math.Pi / 2, quad2},
-		{quad341, -math.Pi / 2, quad4},
-		{quad412, -math.Pi / 2, quad1},
-	}
-	for _, test := range tests {
-		if got, want := test.interval.Expanded(test.margin), test.want; math.Abs(got.Lo-want.Lo) > 1e-15 || math.Abs(got.Hi-want.Hi) > 1e-15 {
-			t.Errorf("%v.Expanded(%v) = %v, want %v", test.interval, test.margin, got, want)
-		}
-	}
-}
-
-func TestIntervalString(t *testing.T) {
-	if s, exp := pi.String(), "[3.1415927, 3.1415927]"; s != exp {
-		t.Errorf("pi.String() = %q, want %q", s, exp)
-	}
-}

+ 53 - 0
vendor/github.com/golang/geo/s2/bits_go18.go

@@ -0,0 +1,53 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.9
+
+package s2
+
+// This file is for the bit manipulation code pre-Go 1.9.
+
+// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
+// significant set bit. Passing zero to this function returns zero.
+func findMSBSetNonZero64(x uint64) int {
+	val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
+	shift := []uint64{1, 2, 4, 8, 16, 32}
+	var msbPos uint64
+	for i := 5; i >= 0; i-- {
+		if x&val[i] != 0 {
+			x >>= shift[i]
+			msbPos |= shift[i]
+		}
+	}
+	return int(msbPos)
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+const digitMask = uint64(1<<64 - 1)
+
+var deBruijn64Lookup = []byte{
+	0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+	62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+	63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
+// significant set bit. Passing zero to this function returns zero.
+//
+// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
+// which references (Knuth, volume 4, section 7.3.1).
+func findLSBSetNonZero64(x uint64) int {
+	return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58])
+}

+ 39 - 0
vendor/github.com/golang/geo/s2/bits_go19.go

@@ -0,0 +1,39 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.9
+
+package s2
+
+// This file is for the bit manipulation code post-Go 1.9.
+
+import "math/bits"
+
+// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
+// significant set bit. Passing zero to this function return zero.
+func findMSBSetNonZero64(x uint64) int {
+	if x == 0 {
+		return 0
+	}
+	return 63 - bits.LeadingZeros64(x)
+}
+
+// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
+// significant set bit. Passing zero to this function return zero.
+func findLSBSetNonZero64(x uint64) int {
+	if x == 0 {
+		return 0
+	}
+	return bits.TrailingZeros64(x)
+}

+ 66 - 15
vendor/github.com/golang/geo/s2/cap.go

@@ -1,23 +1,22 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"io"
 	"math"
 	"math"
 
 
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r1"
@@ -416,6 +415,30 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
 	return false
 	return false
 }
 }
 
 
+// CellUnionBound computes a covering of the Cap. In general the covering
+// consists of at most 4 cells except for very large caps, which may need
+// up to 6 cells. The output is not sorted.
+func (c Cap) CellUnionBound() []CellID {
+	// TODO(roberts): The covering could be made quite a bit tighter by mapping
+	// the cap to a rectangle in (i,j)-space and finding a covering for that.
+
+	// Find the maximum level such that the cap contains at most one cell vertex
+	// and such that CellID.AppendVertexNeighbors() can be called.
+	level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1
+
+	// If level < 0, more than three face cells are required.
+	if level < 0 {
+		cellIDs := make([]CellID, 6)
+		for face := 0; face < 6; face++ {
+			cellIDs[face] = CellIDFromFace(face)
+		}
+		return cellIDs
+	}
+	// The covering consists of the 4 cells at the given level that share the
+	// cell vertex that is closest to the cap center.
+	return cellIDFromPoint(c.center).VertexNeighbors(level)
+}
+
 // Centroid returns the true centroid of the cap multiplied by its surface area
 // Centroid returns the true centroid of the cap multiplied by its surface area
 // The result lies on the ray from the origin through the cap's center, but it
 // The result lies on the ray from the origin through the cap's center, but it
 // is not unit length. Note that if you just want the "surface centroid", i.e.
 // is not unit length. Note that if you just want the "surface centroid", i.e.
@@ -466,3 +489,31 @@ func (c Cap) Union(other Cap) Cap {
 	resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
 	resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
 	return CapFromCenterAngle(resCenter, resRadius)
 	return CapFromCenterAngle(resCenter, resRadius)
 }
 }
+
+// Encode encodes the Cap.
+func (c Cap) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	c.encode(e)
+	return e.err
+}
+
+func (c Cap) encode(e *encoder) {
+	e.writeFloat64(c.center.X)
+	e.writeFloat64(c.center.Y)
+	e.writeFloat64(c.center.Z)
+	e.writeFloat64(float64(c.radius))
+}
+
+// Decode decodes the Cap.
+func (c *Cap) Decode(r io.Reader) error {
+	d := &decoder{r: asByteReader(r)}
+	c.decode(d)
+	return d.err
+}
+
+func (c *Cap) decode(d *decoder) {
+	c.center.X = d.readFloat64()
+	c.center.Y = d.readFloat64()
+	c.center.Z = d.readFloat64()
+	c.radius = s1.ChordAngle(d.readFloat64())
+}

+ 0 - 718
vendor/github.com/golang/geo/s2/cap_test.go

@@ -1,718 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	tinyRad = 1e-10
-)
-
-var (
-	emptyCap   = EmptyCap()
-	fullCap    = FullCap()
-	defaultCap = EmptyCap()
-
-	zeroHeight  = 0.0
-	fullHeight  = 2.0
-	emptyHeight = -1.0
-
-	xAxisPt = Point{r3.Vector{1, 0, 0}}
-	yAxisPt = Point{r3.Vector{0, 1, 0}}
-
-	xAxis = CapFromPoint(xAxisPt)
-	yAxis = CapFromPoint(yAxisPt)
-	xComp = xAxis.Complement()
-
-	hemi    = CapFromCenterHeight(PointFromCoords(1, 0, 1), 1)
-	concave = CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(80, 10)), s1.Angle(150.0)*s1.Degree)
-	tiny    = CapFromCenterAngle(PointFromCoords(1, 2, 3), s1.Angle(tinyRad))
-)
-
-func TestCapBasicEmptyFullValid(t *testing.T) {
-	tests := []struct {
-		got                Cap
-		empty, full, valid bool
-	}{
-		{Cap{}, false, false, false},
-
-		{emptyCap, true, false, true},
-		{emptyCap.Complement(), false, true, true},
-		{fullCap, false, true, true},
-		{fullCap.Complement(), true, false, true},
-		{defaultCap, true, false, true},
-
-		{xComp, false, true, true},
-		{xComp.Complement(), true, false, true},
-
-		{tiny, false, false, true},
-		{concave, false, false, true},
-		{hemi, false, false, true},
-		{tiny, false, false, true},
-	}
-	for _, test := range tests {
-		if e := test.got.IsEmpty(); e != test.empty {
-			t.Errorf("%v.IsEmpty() = %t; want %t", test.got, e, test.empty)
-		}
-		if f := test.got.IsFull(); f != test.full {
-			t.Errorf("%v.IsFull() = %t; want %t", test.got, f, test.full)
-		}
-		if v := test.got.IsValid(); v != test.valid {
-			t.Errorf("%v.IsValid() = %t; want %t", test.got, v, test.valid)
-		}
-	}
-}
-
-func TestCapCenterHeightRadius(t *testing.T) {
-	if xAxis == xAxis.Complement().Complement() {
-		t.Errorf("the complement of the complement is not the original. %v == %v",
-			xAxis, xAxis.Complement().Complement())
-	}
-
-	if fullCap.Height() != fullHeight {
-		t.Error("full Caps should be full height")
-	}
-	if fullCap.Radius().Degrees() != 180.0 {
-		t.Error("radius of x-axis cap should be 180 degrees")
-	}
-
-	if emptyCap.center != defaultCap.center {
-		t.Error("empty Caps should be have the same center as the default")
-	}
-	if emptyCap.Height() != defaultCap.Height() {
-		t.Error("empty Caps should be have the same height as the default")
-	}
-
-	if yAxis.Height() != zeroHeight {
-		t.Error("y-axis cap should not be empty height")
-	}
-
-	if xAxis.Height() != zeroHeight {
-		t.Error("x-axis cap should not be empty height")
-	}
-	if xAxis.Radius().Radians() != zeroHeight {
-		t.Errorf("radius of x-axis cap got %f want %f", xAxis.Radius().Radians(), emptyHeight)
-	}
-
-	hc := Point{hemi.center.Mul(-1.0)}
-	if hc != hemi.Complement().center {
-		t.Error("hemi center and its complement should have the same center")
-	}
-	if hemi.Height() != 1.0 {
-		t.Error("hemi cap should be 1.0 in height")
-	}
-}
-
-func TestCapContains(t *testing.T) {
-	tests := []struct {
-		c1, c2 Cap
-		want   bool
-	}{
-		{emptyCap, emptyCap, true},
-		{fullCap, emptyCap, true},
-		{fullCap, fullCap, true},
-		{emptyCap, xAxis, false},
-		{fullCap, xAxis, true},
-		{xAxis, fullCap, false},
-		{xAxis, xAxis, true},
-		{xAxis, emptyCap, true},
-		{hemi, tiny, true},
-		{hemi, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/4-epsilon)), true},
-		{hemi, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/4+epsilon)), false},
-		{concave, hemi, true},
-		{concave, CapFromCenterHeight(Point{concave.center.Mul(-1.0)}, 0.1), false},
-	}
-	for _, test := range tests {
-		if got := test.c1.Contains(test.c2); got != test.want {
-			t.Errorf("%v.Contains(%v) = %t; want %t", test.c1, test.c2, got, test.want)
-		}
-	}
-}
-
-func TestCapContainsPoint(t *testing.T) {
-	// We don't use the standard epsilon in this test due different compiler
-	// math optimizations that are permissible (FMA vs no FMA) that yield
-	// slightly different floating point results between gccgo and gc.
-	const epsilon = 1e-14
-	tangent := tiny.center.Cross(r3.Vector{3, 2, 1}).Normalize()
-	tests := []struct {
-		c    Cap
-		p    Point
-		want bool
-	}{
-		{xAxis, xAxisPt, true},
-		{xAxis, Point{r3.Vector{1, 1e-20, 0}}, false},
-		{yAxis, xAxis.center, false},
-		{xComp, xAxis.center, true},
-		{xComp.Complement(), xAxis.center, false},
-		{tiny, Point{tiny.center.Add(tangent.Mul(tinyRad * 0.99))}, true},
-		{tiny, Point{tiny.center.Add(tangent.Mul(tinyRad * 1.01))}, false},
-		{hemi, PointFromCoords(1, 0, -(1 - epsilon)), true},
-		{hemi, xAxisPt, true},
-		{hemi.Complement(), xAxisPt, false},
-		{concave, PointFromLatLng(LatLngFromDegrees(-70*(1-epsilon), 10)), true},
-		{concave, PointFromLatLng(LatLngFromDegrees(-70*(1+epsilon), 10)), false},
-		// This test case is the one where the floating point values end up
-		// different in the 15th place and beyond.
-		{concave, PointFromLatLng(LatLngFromDegrees(-50*(1-epsilon), -170)), true},
-		{concave, PointFromLatLng(LatLngFromDegrees(-50*(1+epsilon), -170)), false},
-	}
-	for _, test := range tests {
-		if got := test.c.ContainsPoint(test.p); got != test.want {
-			t.Errorf("%v.ContainsPoint(%v) = %t, want %t", test.c, test.p, got, test.want)
-		}
-	}
-}
-
-func TestCapInteriorIntersects(t *testing.T) {
-	tests := []struct {
-		c1, c2 Cap
-		want   bool
-	}{
-		{emptyCap, emptyCap, false},
-		{emptyCap, xAxis, false},
-		{fullCap, emptyCap, false},
-		{fullCap, fullCap, true},
-		{fullCap, xAxis, true},
-		{xAxis, fullCap, false},
-		{xAxis, xAxis, false},
-		{xAxis, emptyCap, false},
-		{concave, hemi.Complement(), true},
-	}
-	for _, test := range tests {
-		if got := test.c1.InteriorIntersects(test.c2); got != test.want {
-			t.Errorf("%v.InteriorIntersects(%v); got %t want %t", test.c1, test.c2, got, test.want)
-		}
-	}
-}
-
-func TestCapInteriorContains(t *testing.T) {
-	if hemi.InteriorContainsPoint(Point{r3.Vector{1, 0, -(1 + epsilon)}}) {
-		t.Errorf("hemi (%v) should not contain point just past half way(%v)", hemi,
-			Point{r3.Vector{1, 0, -(1 + epsilon)}})
-	}
-}
-
-func TestCapExpanded(t *testing.T) {
-	cap50 := CapFromCenterAngle(xAxisPt, 50.0*s1.Degree)
-	cap51 := CapFromCenterAngle(xAxisPt, 51.0*s1.Degree)
-
-	if !emptyCap.Expanded(s1.Angle(fullHeight)).IsEmpty() {
-		t.Error("Expanding empty cap should return an empty cap")
-	}
-	if !fullCap.Expanded(s1.Angle(fullHeight)).IsFull() {
-		t.Error("Expanding a full cap should return an full cap")
-	}
-
-	if !cap50.Expanded(0).ApproxEqual(cap50) {
-		t.Error("Expanding a cap by 0° should be equal to the original")
-	}
-	if !cap50.Expanded(1 * s1.Degree).ApproxEqual(cap51) {
-		t.Error("Expanding 50° by 1° should equal the 51° cap")
-	}
-
-	if cap50.Expanded(129.99 * s1.Degree).IsFull() {
-		t.Error("Expanding 50° by 129.99° should not give a full cap")
-	}
-	if !cap50.Expanded(130.01 * s1.Degree).IsFull() {
-		t.Error("Expanding 50° by 130.01° should give a full cap")
-	}
-}
-
-func TestCapRadiusToHeight(t *testing.T) {
-	tests := []struct {
-		got  s1.Angle
-		want float64
-	}{
-		// Above/below boundary checks.
-		{s1.Angle(-0.5), emptyHeight},
-		{s1.Angle(0), 0},
-		{s1.Angle(math.Pi), fullHeight},
-		{s1.Angle(2 * math.Pi), fullHeight},
-		// Degree tests.
-		{-7.0 * s1.Degree, emptyHeight},
-		{-0.0 * s1.Degree, 0},
-		{0.0 * s1.Degree, 0},
-		{12.0 * s1.Degree, 0.0218523992661943},
-		{30.0 * s1.Degree, 0.1339745962155613},
-		{45.0 * s1.Degree, 0.2928932188134525},
-		{90.0 * s1.Degree, 1.0},
-		{179.99 * s1.Degree, 1.9999999847691292},
-		{180.0 * s1.Degree, fullHeight},
-		{270.0 * s1.Degree, fullHeight},
-		// Radians tests.
-		{-1.0 * s1.Radian, emptyHeight},
-		{-0.0 * s1.Radian, 0},
-		{0.0 * s1.Radian, 0},
-		{1.0 * s1.Radian, 0.45969769413186},
-		{math.Pi / 2.0 * s1.Radian, 1.0},
-		{2.0 * s1.Radian, 1.4161468365471424},
-		{3.0 * s1.Radian, 1.9899924966004454},
-		{math.Pi * s1.Radian, fullHeight},
-		{4.0 * s1.Radian, fullHeight},
-	}
-	for _, test := range tests {
-		// float64Eq comes from s2latlng_test.go
-		if got := radiusToHeight(test.got); !float64Eq(got, test.want) {
-			t.Errorf("radiusToHeight(%v) = %v; want %v", test.got, got, test.want)
-		}
-	}
-}
-
-func TestCapRectBounds(t *testing.T) {
-	const epsilon = 1e-13
-	var tests = []struct {
-		desc     string
-		have     Cap
-		latLoDeg float64
-		latHiDeg float64
-		lngLoDeg float64
-		lngHiDeg float64
-		isFull   bool
-	}{
-		{
-			"Cap that includes South Pole.",
-			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(-45, 57)), s1.Degree*50),
-			-90, 5, -180, 180, true,
-		},
-		{
-			"Cap that is tangent to the North Pole.",
-			CapFromCenterAngle(PointFromCoords(1, 0, 1), s1.Radian*(math.Pi/4.0+1e-16)),
-			0, 90, -180, 180, true,
-		},
-		{
-			"Cap that at 45 degree center that goes from equator to the pole.",
-			CapFromCenterAngle(PointFromCoords(1, 0, 1), s1.Degree*(45+5e-15)),
-			0, 90, -180, 180, true,
-		},
-		{
-			"The eastern hemisphere.",
-			CapFromCenterAngle(Point{r3.Vector{0, 1, 0}}, s1.Radian*(math.Pi/2+2e-16)),
-			-90, 90, -180, 180, true,
-		},
-		{
-			"A cap centered on the equator.",
-			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(0, 50)), s1.Degree*20),
-			-20, 20, 30, 70, false,
-		},
-		{
-			"A cap centered on the North Pole.",
-			CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(90, 123)), s1.Degree*10),
-			80, 90, -180, 180, true,
-		},
-	}
-
-	for _, test := range tests {
-		r := test.have.RectBound()
-		if !float64Near(s1.Angle(r.Lat.Lo).Degrees(), test.latLoDeg, epsilon) {
-			t.Errorf("%s: %v.RectBound(), Lat.Lo not close enough, got %0.20f, want %0.20f",
-				test.desc, test.have, s1.Angle(r.Lat.Lo).Degrees(), test.latLoDeg)
-		}
-		if !float64Near(s1.Angle(r.Lat.Hi).Degrees(), test.latHiDeg, epsilon) {
-			t.Errorf("%s: %v.RectBound(), Lat.Hi not close enough, got %0.20f, want %0.20f",
-				test.desc, test.have, s1.Angle(r.Lat.Hi).Degrees(), test.latHiDeg)
-		}
-		if !float64Near(s1.Angle(r.Lng.Lo).Degrees(), test.lngLoDeg, epsilon) {
-			t.Errorf("%s: %v.RectBound(), Lng.Lo not close enough, got %0.20f, want %0.20f",
-				test.desc, test.have, s1.Angle(r.Lng.Lo).Degrees(), test.lngLoDeg)
-		}
-		if !float64Near(s1.Angle(r.Lng.Hi).Degrees(), test.lngHiDeg, epsilon) {
-			t.Errorf("%s: %v.RectBound(), Lng.Hi not close enough, got %0.20f, want %0.20f",
-				test.desc, test.have, s1.Angle(r.Lng.Hi).Degrees(), test.lngHiDeg)
-		}
-		if got := r.Lng.IsFull(); got != test.isFull {
-			t.Errorf("%s: RectBound(%v).isFull() = %t, want %t", test.desc, test.have, got, test.isFull)
-		}
-	}
-
-	// Empty and full caps.
-	if !EmptyCap().RectBound().IsEmpty() {
-		t.Errorf("RectBound() on EmptyCap should be empty.")
-	}
-
-	if !FullCap().RectBound().IsFull() {
-		t.Errorf("RectBound() on FullCap should be full.")
-	}
-}
-
-func TestCapAddPoint(t *testing.T) {
-	const epsilon = 1e-14
-	tests := []struct {
-		have Cap
-		p    Point
-		want Cap
-	}{
-		// Cap plus its center equals itself.
-		{xAxis, xAxisPt, xAxis},
-		{yAxis, yAxisPt, yAxis},
-
-		// Cap plus opposite point equals full.
-		{xAxis, Point{r3.Vector{-1, 0, 0}}, fullCap},
-		{yAxis, Point{r3.Vector{0, -1, 0}}, fullCap},
-
-		// Cap plus orthogonal axis equals half cap.
-		{xAxis, Point{r3.Vector{0, 0, 1}}, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
-		{xAxis, Point{r3.Vector{0, 0, -1}}, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
-
-		// The 45 degree angled hemisphere plus some points.
-		{
-			hemi,
-			PointFromCoords(0, 1, -1),
-			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
-				s1.Angle(120.0)*s1.Degree),
-		},
-		{
-			hemi,
-			PointFromCoords(0, -1, -1),
-			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
-				s1.Angle(120.0)*s1.Degree),
-		},
-		{
-			hemi,
-			PointFromCoords(-1, -1, -1),
-			CapFromCenterAngle(Point{r3.Vector{1, 0, 1}},
-				s1.Angle(math.Acos(-math.Sqrt(2.0/3.0)))),
-		},
-		{hemi, Point{r3.Vector{0, 1, 1}}, hemi},
-		{hemi, Point{r3.Vector{1, 0, 0}}, hemi},
-	}
-
-	for _, test := range tests {
-		got := test.have.AddPoint(test.p)
-		if !got.ApproxEqual(test.want) {
-			t.Errorf("%v.AddPoint(%v) = %v, want %v", test.have, test.p, got, test.want)
-		}
-
-		if !got.ContainsPoint(test.p) {
-			t.Errorf("%v.AddPoint(%v) did not contain added point", test.have, test.p)
-		}
-	}
-}
-
-func TestCapAddCap(t *testing.T) {
-	tests := []struct {
-		have  Cap
-		other Cap
-		want  Cap
-	}{
-		// Identity cases.
-		{emptyCap, emptyCap, emptyCap},
-		{fullCap, fullCap, fullCap},
-
-		// Anything plus empty equals itself.
-		{fullCap, emptyCap, fullCap},
-		{emptyCap, fullCap, fullCap},
-		{xAxis, emptyCap, xAxis},
-		{emptyCap, xAxis, xAxis},
-		{yAxis, emptyCap, yAxis},
-		{emptyCap, yAxis, yAxis},
-
-		// Two halves make a whole.
-		{xAxis, xComp, fullCap},
-
-		// Two zero-height orthogonal axis caps make a half-cap.
-		{xAxis, yAxis, CapFromCenterAngle(xAxisPt, s1.Angle(math.Pi/2.0))},
-	}
-
-	for _, test := range tests {
-		got := test.have.AddCap(test.other)
-		if !got.ApproxEqual(test.want) {
-			t.Errorf("%v.AddCap(%v) = %v, want %v", test.have, test.other, got, test.want)
-		}
-	}
-}
-
-func TestCapContainsCell(t *testing.T) {
-	faceRadius := math.Atan(math.Sqrt2)
-	for face := 0; face < 6; face++ {
-		// The cell consisting of the entire face.
-		rootCell := CellFromCellID(CellIDFromFace(face))
-
-		// A leaf cell at the midpoint of the v=1 edge.
-		edgeCell := CellFromPoint(Point{faceUVToXYZ(face, 0, 1-epsilon)})
-
-		// A leaf cell at the u=1, v=1 corner
-		cornerCell := CellFromPoint(Point{faceUVToXYZ(face, 1-epsilon, 1-epsilon)})
-
-		// Quick check for full and empty caps.
-		if !fullCap.ContainsCell(rootCell) {
-			t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", fullCap, rootCell, false, true)
-		}
-
-		// Check intersections with the bounding caps of the leaf cells that are adjacent to
-		// cornerCell along the Hilbert curve.  Because this corner is at (u=1,v=1), the curve
-		// stays locally within the same cube face.
-		first := cornerCell.id.Advance(-3)
-		last := cornerCell.id.Advance(4)
-		for id := first; id < last; id = id.Next() {
-			c := CellFromCellID(id).CapBound()
-			if got, want := c.ContainsCell(cornerCell), id == cornerCell.id; got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", c, cornerCell, got, want)
-			}
-		}
-
-		for capFace := 0; capFace < 6; capFace++ {
-			// A cap that barely contains all of capFace.
-			center := unitNorm(capFace)
-			covering := CapFromCenterAngle(center, s1.Angle(faceRadius+epsilon))
-			if got, want := covering.ContainsCell(rootCell), capFace == face; got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, rootCell, got, want)
-			}
-			if got, want := covering.ContainsCell(edgeCell), center.Vector.Dot(edgeCell.id.Point().Vector) > 0.1; got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
-			}
-			if got, want := covering.ContainsCell(edgeCell), covering.IntersectsCell(edgeCell); got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
-			}
-			if got, want := covering.ContainsCell(cornerCell), capFace == face; got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", covering, cornerCell, got, want)
-			}
-
-			// A cap that barely intersects the edges of capFace.
-			bulging := CapFromCenterAngle(center, s1.Angle(math.Pi/4+epsilon))
-			if bulging.ContainsCell(rootCell) {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, rootCell, true, false)
-			}
-			if got, want := bulging.ContainsCell(edgeCell), capFace == face; got != want {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, edgeCell, got, want)
-			}
-			if bulging.ContainsCell(cornerCell) {
-				t.Errorf("Cap(%v).ContainsCell(%v) = %t; want = %t", bulging, cornerCell, true, false)
-			}
-		}
-	}
-}
-
-func TestCapIntersectsCell(t *testing.T) {
-	faceRadius := math.Atan(math.Sqrt2)
-	for face := 0; face < 6; face++ {
-		// The cell consisting of the entire face.
-		rootCell := CellFromCellID(CellIDFromFace(face))
-
-		// A leaf cell at the midpoint of the v=1 edge.
-		edgeCell := CellFromPoint(Point{faceUVToXYZ(face, 0, 1-epsilon)})
-
-		// A leaf cell at the u=1, v=1 corner
-		cornerCell := CellFromPoint(Point{faceUVToXYZ(face, 1-epsilon, 1-epsilon)})
-
-		// Quick check for full and empty caps.
-		if emptyCap.IntersectsCell(rootCell) {
-			t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", emptyCap, rootCell, true, false)
-		}
-
-		// Check intersections with the bounding caps of the leaf cells that are adjacent to
-		// cornerCell along the Hilbert curve.  Because this corner is at (u=1,v=1), the curve
-		// stays locally within the same cube face.
-		first := cornerCell.id.Advance(-3)
-		last := cornerCell.id.Advance(4)
-		for id := first; id < last; id = id.Next() {
-			c := CellFromCellID(id).CapBound()
-			if got, want := c.IntersectsCell(cornerCell), id.immediateParent().Contains(cornerCell.id); got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", c, cornerCell, got, want)
-			}
-		}
-
-		antiFace := (face + 3) % 6
-		for capFace := 0; capFace < 6; capFace++ {
-			// A cap that barely contains all of capFace.
-			center := unitNorm(capFace)
-			covering := CapFromCenterAngle(center, s1.Angle(faceRadius+epsilon))
-			if got, want := covering.IntersectsCell(rootCell), capFace != antiFace; got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, rootCell, got, want)
-			}
-			if got, want := covering.IntersectsCell(edgeCell), covering.ContainsCell(edgeCell); got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, edgeCell, got, want)
-			}
-			if got, want := covering.IntersectsCell(cornerCell), center.Vector.Dot(cornerCell.id.Point().Vector) > 0; got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", covering, cornerCell, got, want)
-			}
-
-			// A cap that barely intersects the edges of capFace.
-			bulging := CapFromCenterAngle(center, s1.Angle(math.Pi/4+epsilon))
-			if got, want := bulging.IntersectsCell(rootCell), capFace != antiFace; got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, rootCell, got, want)
-			}
-			if got, want := bulging.IntersectsCell(edgeCell), center.Vector.Dot(edgeCell.id.Point().Vector) > 0.1; got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, edgeCell, got, want)
-			}
-			if bulging.IntersectsCell(cornerCell) {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", bulging, cornerCell, true, false)
-			}
-
-			// A singleton cap.
-			singleton := CapFromCenterAngle(center, 0)
-			if got, want := singleton.IntersectsCell(rootCell), capFace == face; got != want {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, rootCell, got, want)
-			}
-			if singleton.IntersectsCell(edgeCell) {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, edgeCell, true, false)
-			}
-			if singleton.IntersectsCell(cornerCell) {
-				t.Errorf("Cap(%v).IntersectsCell(%v) = %t; want = %t", singleton, cornerCell, true, false)
-			}
-		}
-	}
-}
-
-func TestCapCentroid(t *testing.T) {
-	// Empty and full caps.
-	if got, want := EmptyCap().Centroid(), (Point{}); !got.ApproxEqual(want) {
-		t.Errorf("Centroid of EmptyCap should be zero point, got %v", want)
-	}
-	if got, want := FullCap().Centroid().Norm(), 1e-15; got > want {
-		t.Errorf("Centroid of FullCap should have a Norm of 0, got %v", want)
-	}
-
-	// Random caps.
-	for i := 0; i < 100; i++ {
-		center := randomPoint()
-		height := randomUniformFloat64(0.0, 2.0)
-		c := CapFromCenterHeight(center, height)
-		got := c.Centroid()
-		want := center.Mul((1.0 - height/2.0) * c.Area())
-		if delta := got.Sub(want).Norm(); delta > 1e-15 {
-			t.Errorf("%v.Sub(%v).Norm() = %v, want %v", got, want, delta, 1e-15)
-		}
-	}
-}
-
-func TestCapUnion(t *testing.T) {
-	// Two caps which have the same center but one has a larger radius.
-	a := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.0, 10.0)), s1.Degree*0.2)
-	b := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.0, 10.0)), s1.Degree*0.3)
-	if !b.Contains(a) {
-		t.Errorf("%v.Contains(%v) = false, want true", b, a)
-	}
-	if got := b.ApproxEqual(a.Union(b)); !got {
-		t.Errorf("%v.ApproxEqual(%v) = %v, want true", b, a.Union(b), got)
-	}
-
-	// Two caps where one is the full cap.
-	if got := a.Union(FullCap()); !got.IsFull() {
-		t.Errorf("%v.Union(%v).IsFull() = %v, want true", a, got, got.IsFull())
-	}
-
-	// Two caps where one is the empty cap.
-	if got := a.Union(EmptyCap()); !a.ApproxEqual(got) {
-		t.Errorf("%v.Union(EmptyCap) = %v, want %v", a, got, a)
-	}
-
-	// Two caps which have different centers, one entirely encompasses the other.
-	c := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(51.0, 11.0)), s1.Degree*1.5)
-	if !c.Contains(a) {
-		t.Errorf("%v.Contains(%v) = false, want true", c, a)
-	}
-	if got := a.Union(c).center; !got.ApproxEqual(c.center) {
-		t.Errorf("%v.Union(%v).center = %v, want %v", a, c, got, c.center)
-	}
-	if got := a.Union(c); !float64Eq(float64(got.Radius()), float64(c.Radius())) {
-		t.Errorf("%v.Union(%v).Radius = %v, want %v", a, c, got.Radius(), c.Radius())
-	}
-
-	// Two entirely disjoint caps.
-	d := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(51.0, 11.0)), s1.Degree*0.1)
-	if d.Contains(a) {
-		t.Errorf("%v.Contains(%v) = true, want false", d, a)
-	}
-	if d.Intersects(a) {
-		t.Errorf("%v.Intersects(%v) = true, want false", d, a)
-	}
-
-	// Check union and reverse direction are the same.
-	aUnionD := a.Union(d)
-	if !aUnionD.ApproxEqual(d.Union(a)) {
-		t.Errorf("%v.Union(%v).ApproxEqual(%v.Union(%v)) = false, want true", a, d, d, a)
-	}
-	if got, want := LatLngFromPoint(aUnionD.center).Lat.Degrees(), 50.4588; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Center.Lat = %v, want %v", aUnionD, got, want)
-	}
-	if got, want := LatLngFromPoint(aUnionD.center).Lng.Degrees(), 10.4525; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Center.Lng = %v, want %v", aUnionD, got, want)
-	}
-	if got, want := aUnionD.Radius().Degrees(), 0.7425; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Radius = %v, want %v", aUnionD, got, want)
-	}
-
-	// Two partially overlapping caps.
-	e := CapFromCenterAngle(PointFromLatLng(LatLngFromDegrees(50.3, 10.3)), s1.Degree*0.2)
-	aUnionE := a.Union(e)
-	if e.Contains(a) {
-		t.Errorf("%v.Contains(%v) = false, want true", e, a)
-	}
-	if !e.Intersects(a) {
-		t.Errorf("%v.Intersects(%v) = false, want true", e, a)
-	}
-	if !aUnionE.ApproxEqual(e.Union(a)) {
-		t.Errorf("%v.Union(%v).ApproxEqual(%v.Union(%v)) = false, want true", a, e, e, a)
-	}
-	if got, want := LatLngFromPoint(aUnionE.center).Lat.Degrees(), 50.1500; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Center.Lat = %v, want %v", aUnionE, got, want)
-	}
-	if got, want := LatLngFromPoint(aUnionE.center).Lng.Degrees(), 10.1495; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Center.Lng = %v, want %v", aUnionE, got, want)
-	}
-	if got, want := aUnionE.Radius().Degrees(), 0.3781; !float64Near(got, want, 0.001) {
-		t.Errorf("%v.Radius = %v, want %v", aUnionE, got, want)
-	}
-
-	p1 := Point{r3.Vector{0, 0, 1}}
-	p2 := Point{r3.Vector{0, 1, 0}}
-	// Two very large caps, whose radius sums to in excess of 180 degrees, and
-	// whose centers are not antipodal.
-	f := CapFromCenterAngle(p1, s1.Degree*150)
-	g := CapFromCenterAngle(p2, s1.Degree*150)
-	if !f.Union(g).IsFull() {
-		t.Errorf("%v.Union(%v).IsFull() = false, want true", f, g)
-	}
-
-	// Two non-overlapping hemisphere caps with antipodal centers.
-	hemi := CapFromCenterHeight(p1, 1)
-	if !hemi.Union(hemi.Complement()).IsFull() {
-		t.Errorf("%v.Union(%v).Complement().IsFull() = false, want true", hemi, hemi.Complement())
-	}
-}
-
-func TestCapEqual(t *testing.T) {
-	tests := []struct {
-		a, b Cap
-		want bool
-	}{
-		{EmptyCap(), EmptyCap(), true},
-		{EmptyCap(), FullCap(), false},
-		{FullCap(), FullCap(), true},
-		{
-			CapFromCenterAngle(PointFromCoords(0, 0, 1), s1.Degree*150),
-			CapFromCenterAngle(PointFromCoords(0, 0, 1), s1.Degree*151),
-			false,
-		},
-		{xAxis, xAxis, true},
-		{xAxis, yAxis, false},
-		{xComp, xAxis.Complement(), true},
-	}
-
-	for _, test := range tests {
-		if got := test.a.Equal(test.b); got != test.want {
-			t.Errorf("%v.Equal(%v) = %t, want %t", test.a, test.b, got, test.want)
-		}
-	}
-}

+ 235 - 25
vendor/github.com/golang/geo/s2/cell.go

@@ -1,26 +1,26 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
+	"io"
 	"math"
 	"math"
 
 
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r2"
 	"github.com/golang/geo/r2"
+	"github.com/golang/geo/r3"
 	"github.com/golang/geo/s1"
 	"github.com/golang/geo/s1"
 )
 )
 
 
@@ -77,11 +77,16 @@ func (c Cell) IsLeaf() bool {
 	return c.level == maxLevel
 	return c.level == maxLevel
 }
 }
 
 
-// SizeIJ returns the CellID value for the cells level.
+// SizeIJ returns the edge length of this cell in (i,j)-space.
 func (c Cell) SizeIJ() int {
 func (c Cell) SizeIJ() int {
 	return sizeIJ(int(c.level))
 	return sizeIJ(int(c.level))
 }
 }
 
 
+// SizeST returns the edge length of this cell in (s,t)-space.
+func (c Cell) SizeST() float64 {
+	return c.id.sizeST(int(c.level))
+}
+
 // Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
 // Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
 // (lower left, lower right, upper right, upper left in the UV plane).
 // (lower left, lower right, upper right, upper left in the UV plane).
 func (c Cell) Vertex(k int) Point {
 func (c Cell) Vertex(k int) Point {
@@ -212,7 +217,13 @@ func (c Cell) ContainsCell(oc Cell) bool {
 	return c.id.Contains(oc.id)
 	return c.id.Contains(oc.id)
 }
 }
 
 
-// latitude returns the latitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
+// CellUnionBound computes a covering of the Cell.
+func (c Cell) CellUnionBound() []CellID {
+	return c.CapBound().CellUnionBound()
+}
+
+// latitude returns the latitude of the cell vertex in radians given by (i,j),
+// where i and j indicate the Hi (1) or Lo (0) corner.
 func (c Cell) latitude(i, j int) float64 {
 func (c Cell) latitude(i, j int) float64 {
 	var u, v float64
 	var u, v float64
 	switch {
 	switch {
@@ -229,12 +240,13 @@ func (c Cell) latitude(i, j int) float64 {
 		u = c.uv.X.Hi
 		u = c.uv.X.Hi
 		v = c.uv.Y.Hi
 		v = c.uv.Y.Hi
 	default:
 	default:
-		panic("i and/or j is out of bound")
+		panic("i and/or j is out of bounds")
 	}
 	}
 	return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
 	return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
 }
 }
 
 
-// longitude returns the longitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
+// longitude returns the longitude of the cell vertex in radians given by (i,j),
+// where i and j indicate the Hi (1) or Lo (0) corner.
 func (c Cell) longitude(i, j int) float64 {
 func (c Cell) longitude(i, j int) float64 {
 	var u, v float64
 	var u, v float64
 	switch {
 	switch {
@@ -251,7 +263,7 @@ func (c Cell) longitude(i, j int) float64 {
 		u = c.uv.X.Hi
 		u = c.uv.X.Hi
 		v = c.uv.Y.Hi
 		v = c.uv.Y.Hi
 	default:
 	default:
-		panic("i and/or j is out of bound")
+		panic("i and/or j is out of bounds")
 	}
 	}
 	return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
 	return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
 }
 }
@@ -378,8 +390,206 @@ func (c Cell) ContainsPoint(p Point) bool {
 	return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
 	return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
 }
 }
 
 
-// BUG(roberts): Differences from C++:
-// Subdivide
-// BoundUV
-// Distance/DistanceToEdge
-// VertexChordDistance
+// Encode encodes the Cell.
+func (c Cell) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	c.encode(e)
+	return e.err
+}
+
+func (c Cell) encode(e *encoder) {
+	c.id.encode(e)
+}
+
+// Decode decodes the Cell.
+func (c *Cell) Decode(r io.Reader) error {
+	d := &decoder{r: asByteReader(r)}
+	c.decode(d)
+	return d.err
+}
+
+func (c *Cell) decode(d *decoder) {
+	c.id.decode(d)
+	*c = CellFromCellID(c.id)
+}
+
+// vertexChordDist2 returns the squared chord distance from point P to the
+// given corner vertex specified by the Hi or Lo values of each.
+func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) float64 {
+	x := c.uv.X.Lo
+	y := c.uv.Y.Lo
+	if xHi {
+		x = c.uv.X.Hi
+	}
+	if yHi {
+		y = c.uv.Y.Hi
+	}
+
+	return p.Sub(PointFromCoords(x, y, 1).Vector).Norm2()
+}
+
+// uEdgeIsClosest reports whether a point P is closer to the interior of the specified
+// Cell edge (either the lower or upper edge of the Cell) or to the endpoints.
+func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool {
+	u0 := c.uv.X.Lo
+	u1 := c.uv.X.Hi
+	v := c.uv.Y.Lo
+	if vHi {
+		v = c.uv.Y.Hi
+	}
+	// These are the normals to the planes that are perpendicular to the edge
+	// and pass through one of its two endpoints.
+	dir0 := r3.Vector{v*v + 1, -u0 * v, -u0}
+	dir1 := r3.Vector{v*v + 1, -u1 * v, -u1}
+	return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
+}
+
+// vEdgeIsClosest reports whether a point P is closer to the interior of the specified
+// Cell edge (either the right or left edge of the Cell) or to the endpoints.
+func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool {
+	v0 := c.uv.Y.Lo
+	v1 := c.uv.Y.Hi
+	u := c.uv.X.Lo
+	if uHi {
+		u = c.uv.X.Hi
+	}
+	dir0 := r3.Vector{-u * v0, u*u + 1, -v0}
+	dir1 := r3.Vector{-u * v1, u*u + 1, -v1}
+	return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
+}
+
+// edgeDistance reports the distance from a Point P to a given Cell edge. The point
+// P is given by its dot product, and the uv edge by its normal in the
+// given coordinate value.
+func edgeDistance(ij, uv float64) s1.ChordAngle {
+	// Let P by the target point and let R be the closest point on the given
+	// edge AB.  The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2
+	// where Q is the point P projected onto the plane through the great circle
+	// through AB.  We can compute the distance PQ^2 perpendicular to the plane
+	// from "dirIJ" (the dot product of the target point P with the edge
+	// normal) and the squared length the edge normal (1 + uv**2).
+	pq2 := (ij * ij) / (1 + uv*uv)
+
+	// We can compute the distance QR as (1 - OQ) where O is the sphere origin,
+	// and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem.
+	// (This calculation loses accuracy as angle POQ approaches Pi/2.)
+	qr := 1 - math.Sqrt(1-pq2)
+	return s1.ChordAngleFromSquaredLength(pq2 + qr*qr)
+}
+
+// distanceInternal reports the distance from the given point to the interior of
+// the cell if toInterior is true or to the boundary of the cell otherwise.
+func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle {
+	// All calculations are done in the (u,v,w) coordinates of this cell's face.
+	target := faceXYZtoUVW(int(c.face), targetXYZ)
+
+	// Compute dot products with all four upward or rightward-facing edge
+	// normals. dirIJ is the dot product for the edge corresponding to axis
+	// I, endpoint J. For example, dir01 is the right edge of the Cell
+	// (corresponding to the upper endpoint of the u-axis).
+	dir00 := target.X - target.Z*c.uv.X.Lo
+	dir01 := target.X - target.Z*c.uv.X.Hi
+	dir10 := target.Y - target.Z*c.uv.Y.Lo
+	dir11 := target.Y - target.Z*c.uv.Y.Hi
+	inside := true
+	if dir00 < 0 {
+		inside = false // Target is to the left of the cell
+		if c.vEdgeIsClosest(target, false) {
+			return edgeDistance(-dir00, c.uv.X.Lo)
+		}
+	}
+	if dir01 > 0 {
+		inside = false // Target is to the right of the cell
+		if c.vEdgeIsClosest(target, true) {
+			return edgeDistance(dir01, c.uv.X.Hi)
+		}
+	}
+	if dir10 < 0 {
+		inside = false // Target is below the cell
+		if c.uEdgeIsClosest(target, false) {
+			return edgeDistance(-dir10, c.uv.Y.Lo)
+		}
+	}
+	if dir11 > 0 {
+		inside = false // Target is above the cell
+		if c.uEdgeIsClosest(target, true) {
+			return edgeDistance(dir11, c.uv.Y.Hi)
+		}
+	}
+	if inside {
+		if toInterior {
+			return s1.ChordAngle(0)
+		}
+		// Although you might think of Cells as rectangles, they are actually
+		// arbitrary quadrilaterals after they are projected onto the sphere.
+		// Therefore the simplest approach is just to find the minimum distance to
+		// any of the four edges.
+		return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo),
+			edgeDistance(dir01, c.uv.X.Hi),
+			edgeDistance(-dir10, c.uv.Y.Lo),
+			edgeDistance(dir11, c.uv.Y.Hi))
+	}
+
+	// Otherwise, the closest point is one of the four cell vertices. Note that
+	// it is *not* trivial to narrow down the candidates based on the edge sign
+	// tests above, because (1) the edges don't meet at right angles and (2)
+	// there are points on the far side of the sphere that are both above *and*
+	// below the cell, etc.
+	chordDist2 := minFloat64(c.vertexChordDist2(target, false, false),
+		c.vertexChordDist2(target, true, false),
+		c.vertexChordDist2(target, false, true),
+		c.vertexChordDist2(target, true, true))
+	return s1.ChordAngleFromSquaredLength(chordDist2)
+}
+
+// Distance reports the distance from the cell to the given point. Returns zero if
+// the point is inside the cell.
+func (c Cell) Distance(target Point) s1.ChordAngle {
+	return c.distanceInternal(target, true)
+}
+
+// BoundaryDistance reports the distance from the cell boundary to the given point.
+func (c Cell) BoundaryDistance(target Point) s1.ChordAngle {
+	return c.distanceInternal(target, false)
+}
+
+// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns
+// zero if the edge intersects the cell interior.
+func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle {
+	// Possible optimizations:
+	//  - Currently the (cell vertex, edge endpoint) distances are computed
+	//    twice each, and the length of AB is computed 4 times.
+	//  - To fix this, refactor GetDistance(target) so that it skips calculating
+	//    the distance to each cell vertex. Instead, compute the cell vertices
+	//    and distances in this function, and add a low-level UpdateMinDistance
+	//    that allows the XA, XB, and AB distances to be passed in.
+	//  - It might also be more efficient to do all calculations in UVW-space,
+	//    since this would involve transforming 2 points rather than 4.
+
+	// First, check the minimum distance to the edge endpoints A and B.
+	// (This also detects whether either endpoint is inside the cell.)
+	minDist := minChordAngle(c.Distance(a), c.Distance(b))
+	if minDist == 0 {
+		return minDist
+	}
+
+	// Otherwise, check whether the edge crosses the cell boundary.
+	crosser := NewChainEdgeCrosser(a, b, c.Vertex(3))
+	for i := 0; i < 4; i++ {
+		if crosser.ChainCrossingSign(c.Vertex(i)) >= 0 {
+			return 0
+		}
+	}
+
+	// Finally, check whether the minimum distance occurs between a cell vertex
+	// and the interior of the edge AB. (Some of this work is redundant, since
+	// it also checks the distance to the endpoints A and B again.)
+	//
+	// Note that we don't need to check the distance from the interior of AB to
+	// the interior of a cell edge, because the only way that this distance can
+	// be minimal is if the two edges cross (already checked above).
+	for i := 0; i < 4; i++ {
+		minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist)
+	}
+	return minDist
+}

+ 0 - 522
vendor/github.com/golang/geo/s2/cell_test.go

@@ -1,522 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-	"unsafe"
-
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/s1"
-)
-
-// maxCellSize is the upper bounds on the number of bytes we want the Cell object to ever be.
-const maxCellSize = 48
-
-func TestCellObjectSize(t *testing.T) {
-	if sz := unsafe.Sizeof(Cell{}); sz > maxCellSize {
-		t.Errorf("Cell struct too big: %d bytes > %d bytes", sz, maxCellSize)
-	}
-}
-
-func TestCellFaces(t *testing.T) {
-	edgeCounts := make(map[Point]int)
-	vertexCounts := make(map[Point]int)
-
-	for face := 0; face < 6; face++ {
-		id := CellIDFromFace(face)
-		cell := CellFromCellID(id)
-
-		if cell.id != id {
-			t.Errorf("cell.id != id; %v != %v", cell.id, id)
-		}
-
-		if cell.face != int8(face) {
-			t.Errorf("cell.face != face: %v != %v", cell.face, face)
-		}
-
-		if cell.level != 0 {
-			t.Errorf("cell.level != 0: %v != 0", cell.level)
-		}
-
-		// Top-level faces have alternating orientations to get RHS coordinates.
-		if cell.orientation != int8(face&swapMask) {
-			t.Errorf("cell.orientation != orientation: %v != %v", cell.orientation, face&swapMask)
-		}
-
-		if cell.IsLeaf() {
-			t.Errorf("cell should not be a leaf: IsLeaf = %v", cell.IsLeaf())
-		}
-		for k := 0; k < 4; k++ {
-			edgeCounts[cell.Edge(k)]++
-			vertexCounts[cell.Vertex(k)]++
-			if d := cell.Vertex(k).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
-				t.Errorf("dot product of vertex and edge failed, got %v, want 0", d)
-			}
-			if d := cell.Vertex((k + 1) & 3).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
-				t.Errorf("dot product for edge and next vertex failed, got %v, want 0", d)
-			}
-			if d := cell.Vertex(k).Vector.Cross(cell.Vertex((k + 1) & 3).Vector).Normalize().Dot(cell.Edge(k).Vector); !float64Eq(1.0, d) {
-				t.Errorf("dot product of cross product for vertices failed, got %v, want 1.0", d)
-			}
-		}
-	}
-
-	// Check that edges have multiplicity 2 and vertices have multiplicity 3.
-	for k, v := range edgeCounts {
-		if v != 2 {
-			t.Errorf("edge %v counts wrong, got %d, want 2", k, v)
-		}
-	}
-	for k, v := range vertexCounts {
-		if v != 3 {
-			t.Errorf("vertex %v counts wrong, got %d, want 3", k, v)
-		}
-	}
-}
-
-func TestCellChildren(t *testing.T) {
-	testCellChildren(t, CellFromCellID(CellIDFromFace(0)))
-	testCellChildren(t, CellFromCellID(CellIDFromFace(3)))
-	testCellChildren(t, CellFromCellID(CellIDFromFace(5)))
-}
-
-func testCellChildren(t *testing.T, cell Cell) {
-	children, ok := cell.Children()
-	if cell.IsLeaf() && !ok {
-		return
-	}
-	if cell.IsLeaf() && ok {
-		t.Errorf("leaf cells should not be able to return children. cell %v", cell)
-	}
-
-	if !ok {
-		t.Errorf("unable to get Children for %v", cell)
-		return
-	}
-
-	childID := cell.id.ChildBegin()
-	for i, ci := range children {
-		// Check that the child geometry is consistent with its cell ID.
-		if childID != ci.id {
-			t.Errorf("%v.child[%d].id = %v, want %v", cell, i, ci.id, childID)
-		}
-
-		direct := CellFromCellID(childID)
-		if !ci.Center().ApproxEqual(childID.Point()) {
-			t.Errorf("%v.Center() = %v, want %v", ci, ci.Center(), childID.Point())
-		}
-		if ci.face != direct.face {
-			t.Errorf("%v.face = %v, want %v", ci, ci.face, direct.face)
-		}
-		if ci.level != direct.level {
-			t.Errorf("%v.level = %v, want %v", ci, ci.level, direct.level)
-		}
-		if ci.orientation != direct.orientation {
-			t.Errorf("%v.orientation = %v, want %v", ci, ci.orientation, direct.orientation)
-		}
-		if !ci.Center().ApproxEqual(direct.Center()) {
-			t.Errorf("%v.Center() = %v, want %v", ci, ci.Center(), direct.Center())
-		}
-
-		for k := 0; k < 4; k++ {
-			if !direct.Vertex(k).ApproxEqual(ci.Vertex(k)) {
-				t.Errorf("child %d %v.Vertex(%d) = %v, want %v", i, ci, k, ci.Vertex(k), direct.Vertex(k))
-			}
-			if direct.Edge(k) != ci.Edge(k) {
-				t.Errorf("child %d %v.Edge(%d) = %v, want %v", i, ci, k, ci.Edge(k), direct.Edge(k))
-			}
-		}
-
-		// Test ContainsCell() and IntersectsCell().
-		if !cell.ContainsCell(ci) {
-			t.Errorf("%v.ContainsCell(%v) = false, want true", cell, ci)
-		}
-		if !cell.IntersectsCell(ci) {
-			t.Errorf("%v.IntersectsCell(%v) = false, want true", cell, ci)
-		}
-		if ci.ContainsCell(cell) {
-			t.Errorf("%v.ContainsCell(%v) = true, want false", ci, cell)
-		}
-		if !cell.ContainsPoint(ci.Center()) {
-			t.Errorf("%v.ContainsPoint(%v) = false, want true", cell, ci.Center())
-		}
-		for j := 0; j < 4; j++ {
-			if !cell.ContainsPoint(ci.Vertex(j)) {
-				t.Errorf("%v.ContainsPoint(%v.Vertex(%d)) = false, want true", cell, ci, j)
-			}
-			if j != i {
-				if ci.ContainsPoint(children[j].Center()) {
-					t.Errorf("%v.ContainsPoint(%v[%d].Center()) = true, want false", ci, children, j)
-				}
-				if ci.IntersectsCell(children[j]) {
-					t.Errorf("%v.IntersectsCell(%v[%d]) = true, want false", ci, children, j)
-				}
-			}
-		}
-
-		// Test CapBound and RectBound.
-		parentCap := cell.CapBound()
-		parentRect := cell.RectBound()
-		if cell.ContainsPoint(PointFromCoords(0, 0, 1)) || cell.ContainsPoint(PointFromCoords(0, 0, -1)) {
-			if !parentRect.Lng.IsFull() {
-				t.Errorf("%v.Lng.IsFull() = false, want true", parentRect)
-			}
-		}
-		childCap := ci.CapBound()
-		childRect := ci.RectBound()
-		if !childCap.ContainsPoint(ci.Center()) {
-			t.Errorf("childCap %v.ContainsPoint(%v.Center()) = false, want true", childCap, ci)
-		}
-		if !childRect.ContainsPoint(ci.Center()) {
-			t.Errorf("childRect %v.ContainsPoint(%v.Center()) = false, want true", childRect, ci)
-		}
-		if !parentCap.ContainsPoint(ci.Center()) {
-			t.Errorf("parentCap %v.ContainsPoint(%v.Center()) = false, want true", parentCap, ci)
-		}
-		if !parentRect.ContainsPoint(ci.Center()) {
-			t.Errorf("parentRect %v.ContainsPoint(%v.Center()) = false, want true", parentRect, ci)
-		}
-		for j := 0; j < 4; j++ {
-			if !childCap.ContainsPoint(ci.Vertex(j)) {
-				t.Errorf("childCap %v.ContainsPoint(%v.Vertex(%d)) = false, want true", childCap, ci, j)
-			}
-			if !childRect.ContainsPoint(ci.Vertex(j)) {
-				t.Errorf("childRect %v.ContainsPoint(%v.Vertex(%d)) = false, want true", childRect, ci, j)
-			}
-			if !parentCap.ContainsPoint(ci.Vertex(j)) {
-				t.Errorf("parentCap %v.ContainsPoint(%v.Vertex(%d)) = false, want true", parentCap, ci, j)
-			}
-			if !parentRect.ContainsPoint(ci.Vertex(j)) {
-				t.Errorf("parentRect %v.ContainsPoint(%v.Vertex(%d)) = false, want true", parentRect, ci, j)
-			}
-			if j != i {
-				// The bounding caps and rectangles should be tight enough so that
-				// they exclude at least two vertices of each adjacent cell.
-				capCount := 0
-				rectCount := 0
-				for k := 0; k < 4; k++ {
-					if childCap.ContainsPoint(children[j].Vertex(k)) {
-						capCount++
-					}
-					if childRect.ContainsPoint(children[j].Vertex(k)) {
-						rectCount++
-					}
-				}
-				if capCount > 2 {
-					t.Errorf("childs bounding cap should contain no more than 2 points, got %d", capCount)
-				}
-				if childRect.Lat.Lo > -math.Pi/2 && childRect.Lat.Hi < math.Pi/2 {
-					// Bounding rectangles may be too large at the poles
-					// because the pole itself has an arbitrary longitude.
-					if rectCount > 2 {
-						t.Errorf("childs bounding rect should contain no more than 2 points, got %d", rectCount)
-					}
-				}
-			}
-		}
-
-		// Check all children for the first few levels, and then sample randomly.
-		// We also always subdivide the cells containing a few chosen points so
-		// that we have a better chance of sampling the minimum and maximum metric
-		// values.  kMaxSizeUV is the absolute value of the u- and v-coordinate
-		// where the cell size at a given level is maximal.
-		maxSizeUV := 0.3964182625366691
-		specialUV := []r2.Point{
-			r2.Point{dblEpsilon, dblEpsilon}, // Face center
-			r2.Point{dblEpsilon, 1},          // Edge midpoint
-			r2.Point{1, 1},                   // Face corner
-			r2.Point{maxSizeUV, maxSizeUV},   // Largest cell area
-			r2.Point{dblEpsilon, maxSizeUV},  // Longest edge/diagonal
-		}
-		forceSubdivide := false
-		for _, uv := range specialUV {
-			if ci.BoundUV().ContainsPoint(uv) {
-				forceSubdivide = true
-			}
-		}
-
-		// For a more in depth test, add an "|| oneIn(n)" to this condition
-		// to cause more children to be tested beyond the ones to level 5.
-		if forceSubdivide || cell.level < 5 {
-			testCellChildren(t, ci)
-		}
-
-		childID = childID.Next()
-	}
-}
-
-func TestCellAreas(t *testing.T) {
-	// relative error bounds for each type of area computation
-	var exactError = math.Log(1 + 1e-6)
-	var approxError = math.Log(1.03)
-	var avgError = math.Log(1 + 1e-15)
-
-	// Test 1. Check the area of a top level cell.
-	const level1Cell = CellID(0x1000000000000000)
-	const wantArea = 4 * math.Pi / 6
-	if area := CellFromCellID(level1Cell).ExactArea(); !float64Eq(area, wantArea) {
-		t.Fatalf("Area of a top-level cell %v = %f, want %f", level1Cell, area, wantArea)
-	}
-
-	// Test 2. Iterate inwards from this cell, checking at every level that
-	// the sum of the areas of the children is equal to the area of the parent.
-	childIndex := 1
-	for cell := CellID(0x1000000000000000); cell.Level() < 21; cell = cell.Children()[childIndex] {
-		var exactArea, approxArea, avgArea float64
-		for _, child := range cell.Children() {
-			exactArea += CellFromCellID(child).ExactArea()
-			approxArea += CellFromCellID(child).ApproxArea()
-			avgArea += CellFromCellID(child).AverageArea()
-		}
-
-		if area := CellFromCellID(cell).ExactArea(); !float64Eq(exactArea, area) {
-			t.Fatalf("Areas of children of a level-%d cell %v don't add up to parent's area. "+
-				"This cell: %e, sum of children: %e",
-				cell.Level(), cell, area, exactArea)
-		}
-
-		childIndex = (childIndex + 1) % 4
-
-		// For ExactArea(), the best relative error we can expect is about 1e-6
-		// because the precision of the unit vector coordinates is only about 1e-15
-		// and the edge length of a leaf cell is about 1e-9.
-		if logExact := math.Abs(math.Log(exactArea / CellFromCellID(cell).ExactArea())); logExact > exactError {
-			t.Errorf("The relative error of ExactArea for children of a level-%d "+
-				"cell %v should be less than %e, got %e. This cell: %e, children area: %e",
-				cell.Level(), cell, exactError, logExact,
-				CellFromCellID(cell).ExactArea(), exactArea)
-		}
-		// For ApproxArea(), the areas are accurate to within a few percent.
-		if logApprox := math.Abs(math.Log(approxArea / CellFromCellID(cell).ApproxArea())); logApprox > approxError {
-			t.Errorf("The relative error of ApproxArea for children of a level-%d "+
-				"cell %v should be within %e%%, got %e. This cell: %e, sum of children: %e",
-				cell.Level(), cell, approxError, logApprox,
-				CellFromCellID(cell).ExactArea(), exactArea)
-		}
-		// For AverageArea(), the areas themselves are not very accurate, but
-		// the average area of a parent is exactly 4 times the area of a child.
-		if logAvg := math.Abs(math.Log(avgArea / CellFromCellID(cell).AverageArea())); logAvg > avgError {
-			t.Errorf("The relative error of AverageArea for children of a level-%d "+
-				"cell %v should be less than %e, got %e. This cell: %e, sum of children: %e",
-				cell.Level(), cell, avgError, logAvg,
-				CellFromCellID(cell).AverageArea(), avgArea)
-		}
-	}
-}
-
-func TestCellIntersectsCell(t *testing.T) {
-	tests := []struct {
-		c    Cell
-		oc   Cell
-		want bool
-	}{
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
-			false,
-		},
-	}
-	for _, test := range tests {
-		if got := test.c.IntersectsCell(test.oc); got != test.want {
-			t.Errorf("Cell(%v).IntersectsCell(%v) = %t; want %t", test.c, test.oc, got, test.want)
-		}
-	}
-}
-
-func TestCellContainsCell(t *testing.T) {
-	tests := []struct {
-		c    Cell
-		oc   Cell
-		want bool
-	}{
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			false,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			false,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next()),
-			false,
-		},
-	}
-	for _, test := range tests {
-		if got := test.c.ContainsCell(test.oc); got != test.want {
-			t.Errorf("Cell(%v).ContainsCell(%v) = %t; want %t", test.c, test.oc, got, test.want)
-		}
-	}
-}
-
-func TestCellRectBound(t *testing.T) {
-	tests := []struct {
-		lat float64
-		lng float64
-	}{
-		{50, 50},
-		{-50, 50},
-		{50, -50},
-		{-50, -50},
-		{0, 0},
-		{0, 180},
-		{0, -179},
-	}
-	for _, test := range tests {
-		c := CellFromLatLng(LatLngFromDegrees(test.lat, test.lng))
-		rect := c.RectBound()
-		for i := 0; i < 4; i++ {
-			if !rect.ContainsLatLng(LatLngFromPoint(c.Vertex(i))) {
-				t.Errorf("%v should contain %v", rect, c.Vertex(i))
-			}
-		}
-	}
-}
-
-func TestCellRectBoundAroundPoleMinLat(t *testing.T) {
-	tests := []struct {
-		cellID       CellID
-		latLng       LatLng
-		wantContains bool
-	}{
-		{
-			cellID:       CellIDFromFacePosLevel(2, 0, 0),
-			latLng:       LatLngFromDegrees(3, 0),
-			wantContains: false,
-		},
-		{
-			cellID:       CellIDFromFacePosLevel(2, 0, 0),
-			latLng:       LatLngFromDegrees(50, 0),
-			wantContains: true,
-		},
-		{
-			cellID:       CellIDFromFacePosLevel(5, 0, 0),
-			latLng:       LatLngFromDegrees(-3, 0),
-			wantContains: false,
-		},
-		{
-			cellID:       CellIDFromFacePosLevel(5, 0, 0),
-			latLng:       LatLngFromDegrees(-50, 0),
-			wantContains: true,
-		},
-	}
-	for _, test := range tests {
-		if got := CellFromCellID(test.cellID).RectBound().ContainsLatLng(test.latLng); got != test.wantContains {
-			t.Errorf("CellID(%v) contains %v: got %t, want %t", test.cellID, test.latLng, got, test.wantContains)
-		}
-	}
-}
-
-func TestCellCapBound(t *testing.T) {
-	c := CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(20))
-	s2Cap := c.CapBound()
-	for i := 0; i < 4; i++ {
-		if !s2Cap.ContainsPoint(c.Vertex(i)) {
-			t.Errorf("%v should contain %v", s2Cap, c.Vertex(i))
-		}
-	}
-}
-
-func TestCellContainsPoint(t *testing.T) {
-	tests := []struct {
-		c    Cell
-		p    Point
-		want bool
-	}{
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)).Vertex(1),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2)).Vertex(1),
-			true,
-		},
-		{
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(5)),
-			CellFromCellID(CellIDFromFace(0).ChildBeginAtLevel(2).Next().ChildBeginAtLevel(5)).Vertex(1),
-			false,
-		},
-	}
-	for _, test := range tests {
-		if got := test.c.ContainsPoint(test.p); got != test.want {
-			t.Errorf("Cell(%v).ContainsPoint(%v) = %t; want %t", test.c, test.p, got, test.want)
-		}
-	}
-}
-
-func TestCellContainsPointConsistentWithS2CellIDFromPoint(t *testing.T) {
-	// Construct many points that are nearly on a Cell edge, and verify that
-	// CellFromCellID(cellIDFromPoint(p)).Contains(p) is always true.
-	for iter := 0; iter < 1000; iter++ {
-		cell := CellFromCellID(randomCellID())
-		i1 := randomUniformInt(4)
-		i2 := (i1 + 1) & 3
-		v1 := cell.Vertex(i1)
-		v2 := samplePointFromCap(CapFromCenterAngle(cell.Vertex(i2), s1.Angle(epsilon)))
-		p := Interpolate(randomFloat64(), v1, v2)
-		if !CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) {
-			t.Errorf("For p=%v, CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) was false", p)
-		}
-	}
-}
-
-func TestCellContainsPointContainsAmbiguousPoint(t *testing.T) {
-	// This tests a case where S2CellId returns the "wrong" cell for a point
-	// that is very close to the cell edge. (ConsistentWithS2CellIdFromPoint
-	// generates more examples like this.)
-	//
-	// The Point below should have x = 0, but conversion from LatLng to
-	// (x,y,z) gives x = ~6.1e-17. When xyz is converted to uv, this gives
-	// u = -6.1e-17. However when converting to st, which has a range of [0,1],
-	// the low precision bits of u are lost and we wind up with s = 0.5.
-	// cellIDFromPoint then chooses an arbitrary neighboring cell.
-	//
-	// This tests that Cell.ContainsPoint() expands the cell bounds sufficiently
-	// so that the returned cell is still considered to contain p.
-	p := PointFromLatLng(LatLngFromDegrees(-2, 90))
-	cell := CellFromCellID(cellIDFromPoint(p).Parent(1))
-	if !cell.ContainsPoint(p) {
-		t.Errorf("For p=%v, CellFromCellID(cellIDFromPoint(p)).ContainsPoint(p) was false", p)
-	}
-}

+ 103 - 71
vendor/github.com/golang/geo/s2/cellid.go

@@ -1,25 +1,25 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
+	"io"
 	"math"
 	"math"
+	"sort"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
@@ -55,16 +55,41 @@ import (
 // discrete point, it is better to use Cells.
 // discrete point, it is better to use Cells.
 type CellID uint64
 type CellID uint64
 
 
+// SentinelCellID is an invalid cell ID guaranteed to be larger than any
+// valid cell ID. It is used primarily by ShapeIndex. The value is also used
+// by some S2 types when encoding data.
+// Note that the sentinel's RangeMin == RangeMax == itself.
+const SentinelCellID = CellID(^uint64(0))
+
+// sortCellIDs sorts the slice of CellIDs in place.
+func sortCellIDs(ci []CellID) {
+	sort.Sort(cellIDs(ci))
+}
+
+// cellIDs implements the Sort interface for slices of CellIDs.
+type cellIDs []CellID
+
+func (c cellIDs) Len() int           { return len(c) }
+func (c cellIDs) Swap(i, j int)      { c[i], c[j] = c[j], c[i] }
+func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] }
+
 // TODO(dsymonds): Some of these constants should probably be exported.
 // TODO(dsymonds): Some of these constants should probably be exported.
 const (
 const (
 	faceBits = 3
 	faceBits = 3
 	numFaces = 6
 	numFaces = 6
+
+	// This is the number of levels needed to specify a leaf cell.
 	maxLevel = 30
 	maxLevel = 30
+
 	// The extra position bit (61 rather than 60) lets us encode each cell as its
 	// The extra position bit (61 rather than 60) lets us encode each cell as its
 	// Hilbert curve position at the cell center (which is halfway along the
 	// Hilbert curve position at the cell center (which is halfway along the
 	// portion of the Hilbert curve that fills that cell).
 	// portion of the Hilbert curve that fills that cell).
-	posBits    = 2*maxLevel + 1
-	maxSize    = 1 << maxLevel
+	posBits = 2*maxLevel + 1
+
+	// The maximum index of a valid leaf cell plus one. The range of valid leaf
+	// cell indices is [0..maxSize-1].
+	maxSize = 1 << maxLevel
+
 	wrapOffset = uint64(numFaces) << posBits
 	wrapOffset = uint64(numFaces) << posBits
 )
 )
 
 
@@ -424,11 +449,33 @@ func (ci CellID) AdvanceWrap(steps int64) CellID {
 	return CellID(uint64(ci) + (uint64(steps) << shift))
 	return CellID(uint64(ci) + (uint64(steps) << shift))
 }
 }
 
 
+// Encode encodes the CellID.
+func (ci CellID) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	ci.encode(e)
+	return e.err
+}
+
+func (ci CellID) encode(e *encoder) {
+	e.writeUint64(uint64(ci))
+}
+
+// Decode encodes the CellID.
+func (ci *CellID) Decode(r io.Reader) error {
+	d := &decoder{r: asByteReader(r)}
+	ci.decode(d)
+	return d.err
+}
+
+func (ci *CellID) decode(d *decoder) {
+	*ci = CellID(d.readUint64())
+}
+
 // TODO: the methods below are not exported yet.  Settle on the entire API design
 // TODO: the methods below are not exported yet.  Settle on the entire API design
 // before doing this.  Do we want to mirror the C++ one as closely as possible?
 // before doing this.  Do we want to mirror the C++ one as closely as possible?
 
 
 // distanceFromBegin returns the number of steps that this cell is from the first
 // distanceFromBegin returns the number of steps that this cell is from the first
-// node in the S2 heirarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
+// node in the S2 hierarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
 // The return value is always non-negative.
 // The return value is always non-negative.
 func (ci CellID) distanceFromBegin() int64 {
 func (ci CellID) distanceFromBegin() int64 {
 	return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
 	return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
@@ -442,7 +489,7 @@ func (ci CellID) rawPoint() r3.Vector {
 }
 }
 
 
 // faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
 // faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
-func (ci CellID) faceSiTi() (face, si, ti int) {
+func (ci CellID) faceSiTi() (face int, si, ti uint32) {
 	face, i, j, _ := ci.faceIJOrientation()
 	face, i, j, _ := ci.faceIJOrientation()
 	delta := 0
 	delta := 0
 	if ci.IsLeaf() {
 	if ci.IsLeaf() {
@@ -452,7 +499,7 @@ func (ci CellID) faceSiTi() (face, si, ti int) {
 			delta = 2
 			delta = 2
 		}
 		}
 	}
 	}
-	return face, 2*i + delta, 2*j + delta
+	return face, uint32(2*i + delta), uint32(2*j + delta)
 }
 }
 
 
 // faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
 // faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
@@ -461,8 +508,16 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
 	orientation = f & swapMask
 	orientation = f & swapMask
 	nbits := maxLevel - 7*lookupBits // first iteration
 	nbits := maxLevel - 7*lookupBits // first iteration
 
 
+	// Each iteration maps 8 bits of the Hilbert curve position into
+	// 4 bits of "i" and "j". The lookup table transforms a key of the
+	// form "ppppppppoo" to a value of the form "iiiijjjjoo", where the
+	// letters [ijpo] represents bits of "i", "j", the Hilbert curve
+	// position, and the Hilbert curve orientation respectively.
+	//
+	// On the first iteration we need to be careful to clear out the bits
+	// representing the cube face.
 	for k := 7; k >= 0; k-- {
 	for k := 7; k >= 0; k-- {
-		orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint((2 * nbits))) - 1)) << 2
+		orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2
 		orientation = lookupIJ[orientation]
 		orientation = lookupIJ[orientation]
 		i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
 		i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
 		j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
 		j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
@@ -470,6 +525,13 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
 		nbits = lookupBits // following iterations
 		nbits = lookupBits // following iterations
 	}
 	}
 
 
+	// The position of a non-leaf cell at level "n" consists of a prefix of
+	// 2*n bits that identifies the cell, followed by a suffix of
+	// 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is
+	// just "1" and has no effect. Otherwise, it consists of "10", followed
+	// by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has
+	// no effect, while each occurrence of "00" has the effect of reversing
+	// the swapMask bit.
 	if ci.lsb()&0x1111111111111110 != 0 {
 	if ci.lsb()&0x1111111111111110 != 0 {
 		orientation ^= swapMask
 		orientation ^= swapMask
 	}
 	}
@@ -506,8 +568,8 @@ func cellIDFromFaceIJWrap(f, i, j int) CellID {
 	// Convert i and j to the coordinates of a leaf cell just beyond the
 	// Convert i and j to the coordinates of a leaf cell just beyond the
 	// boundary of this face.  This prevents 32-bit overflow in the case
 	// boundary of this face.  This prevents 32-bit overflow in the case
 	// of finding the neighbors of a face cell.
 	// of finding the neighbors of a face cell.
-	i = clamp(i, -1, maxSize)
-	j = clamp(j, -1, maxSize)
+	i = clampInt(i, -1, maxSize)
+	j = clampInt(j, -1, maxSize)
 
 
 	// We want to wrap these coordinates onto the appropriate adjacent face.
 	// We want to wrap these coordinates onto the appropriate adjacent face.
 	// The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
 	// The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
@@ -540,17 +602,6 @@ func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
 	return cellIDFromFaceIJWrap(f, i, j)
 	return cellIDFromFaceIJWrap(f, i, j)
 }
 }
 
 
-// clamp returns number closest to x within the range min..max.
-func clamp(x, min, max int) int {
-	if x < min {
-		return min
-	}
-	if x > max {
-		return max
-	}
-	return x
-}
-
 // ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
 // ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
 // s- or t-value contained by that cell. The argument must be in the range
 // s- or t-value contained by that cell. The argument must be in the range
 // [0..2**30], i.e. up to one position beyond the normal range of valid leaf
 // [0..2**30], i.e. up to one position beyond the normal range of valid leaf
@@ -561,7 +612,7 @@ func ijToSTMin(i int) float64 {
 
 
 // stToIJ converts value in ST coordinates to a value in IJ coordinates.
 // stToIJ converts value in ST coordinates to a value in IJ coordinates.
 func stToIJ(s float64) int {
 func stToIJ(s float64) int {
-	return clamp(int(math.Floor(maxSize*s)), 0, maxSize-1)
+	return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1)
 }
 }
 
 
 // cellIDFromPoint returns a leaf cell containing point p. Usually there is
 // cellIDFromPoint returns a leaf cell containing point p. Usually there is
@@ -606,6 +657,21 @@ const (
 	invertMask = 0x02
 	invertMask = 0x02
 )
 )
 
 
+// The following lookup tables are used to convert efficiently between an
+// (i,j) cell index and the corresponding position along the Hilbert curve.
+//
+// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the
+// orientation of the current cell into 8 bits representing the order in which
+// that subcell is visited by the Hilbert curve, plus 2 bits indicating the
+// new orientation of the Hilbert curve within that subcell. (Cell
+// orientations are represented as combination of swapMask and invertMask.)
+//
+// lookupIJ is an inverted table used for mapping in the opposite
+// direction.
+//
+// We also experimented with looking up 16 bits at a time (14 bits of position
+// plus 2 of orientation) but found that smaller lookup tables gave better
+// performance. (2KB fits easily in the primary cache.)
 var (
 var (
 	ijToPos = [4][4]int{
 	ijToPos = [4][4]int{
 		{0, 1, 3, 2}, // canonical order
 		{0, 1, 3, 2}, // canonical order
@@ -668,40 +734,6 @@ func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) {
 	return (60 - msbPos) >> 1, true
 	return (60 - msbPos) >> 1, true
 }
 }
 
 
-// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
-// significant set bit. Passing zero to this function has undefined behavior.
-func findMSBSetNonZero64(bits uint64) int {
-	val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
-	shift := []uint64{1, 2, 4, 8, 16, 32}
-	var msbPos uint64
-	for i := 5; i >= 0; i-- {
-		if bits&val[i] != 0 {
-			bits >>= shift[i]
-			msbPos |= shift[i]
-		}
-	}
-	return int(msbPos)
-}
-
-const deBruijn64 = 0x03f79d71b4ca8b09
-const digitMask = uint64(1<<64 - 1)
-
-var deBruijn64Lookup = []byte{
-	0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
-	62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
-	63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
-	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
-}
-
-// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
-// significant set bit. Passing zero to this function has undefined behavior.
-//
-// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
-// which references (Knuth, volume 4, section 7.3.1).
-func findLSBSetNonZero64(bits uint64) int {
-	return int(deBruijn64Lookup[((bits&-bits)*(deBruijn64&digitMask))>>58])
-}
-
 // Advance advances or retreats the indicated number of steps along the
 // Advance advances or retreats the indicated number of steps along the
 // Hilbert curve at the current level, and returns the new position. The
 // Hilbert curve at the current level, and returns the new position. The
 // position is never advanced past End() or before Begin().
 // position is never advanced past End() or before Begin().
@@ -731,7 +763,7 @@ func (ci CellID) Advance(steps int64) CellID {
 // centerST return the center of the CellID in (s,t)-space.
 // centerST return the center of the CellID in (s,t)-space.
 func (ci CellID) centerST() r2.Point {
 func (ci CellID) centerST() r2.Point {
 	_, si, ti := ci.faceSiTi()
 	_, si, ti := ci.faceSiTi()
-	return r2.Point{siTiToST(uint64(si)), siTiToST(uint64(ti))}
+	return r2.Point{siTiToST(si), siTiToST(ti)}
 }
 }
 
 
 // sizeST returns the edge length of this CellID in (s,t)-space at the given level.
 // sizeST returns the edge length of this CellID in (s,t)-space at the given level.
@@ -751,7 +783,7 @@ func (ci CellID) boundST() r2.Rect {
 // the (u,v) rectangle covered by the cell.
 // the (u,v) rectangle covered by the cell.
 func (ci CellID) centerUV() r2.Point {
 func (ci CellID) centerUV() r2.Point {
 	_, si, ti := ci.faceSiTi()
 	_, si, ti := ci.faceSiTi()
-	return r2.Point{stToUV(siTiToST(uint64(si))), stToUV(siTiToST(uint64(ti)))}
+	return r2.Point{stToUV(siTiToST(si)), stToUV(siTiToST(ti))}
 }
 }
 
 
 // boundUV returns the bound of this CellID in (u,v)-space.
 // boundUV returns the bound of this CellID in (u,v)-space.

+ 0 - 1052
vendor/github.com/golang/geo/s2/cellid_test.go

@@ -1,1052 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"reflect"
-	"sort"
-	"testing"
-
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/s1"
-)
-
-func TestCellIDFromFace(t *testing.T) {
-	for face := 0; face < 6; face++ {
-		fpl := CellIDFromFacePosLevel(face, 0, 0)
-		f := CellIDFromFace(face)
-		if fpl != f {
-			t.Errorf("CellIDFromFacePosLevel(%d, 0, 0) != CellIDFromFace(%d), got %v wanted %v", face, face, f, fpl)
-		}
-	}
-}
-
-func TestCellIDParentChildRelationships(t *testing.T) {
-	ci := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
-
-	if !ci.IsValid() {
-		t.Errorf("CellID %v should be valid", ci)
-	}
-	if f := ci.Face(); f != 3 {
-		t.Errorf("ci.Face() is %v, want 3", f)
-	}
-	if p := ci.Pos(); p != 0x12345700 {
-		t.Errorf("ci.Pos() is 0x%X, want 0x12345700", p)
-	}
-	if l := ci.Level(); l != 26 { // 26 is maxLevel - 4
-		t.Errorf("ci.Level() is %v, want 26", l)
-	}
-	if ci.IsLeaf() {
-		t.Errorf("CellID %v should not be a leaf", ci)
-	}
-
-	if kid2 := ci.ChildBeginAtLevel(ci.Level() + 2).Pos(); kid2 != 0x12345610 {
-		t.Errorf("child two levels down is 0x%X, want 0x12345610", kid2)
-	}
-	if kid0 := ci.ChildBegin().Pos(); kid0 != 0x12345640 {
-		t.Errorf("first child is 0x%X, want 0x12345640", kid0)
-	}
-	if kid0 := ci.Children()[0].Pos(); kid0 != 0x12345640 {
-		t.Errorf("first child is 0x%X, want 0x12345640", kid0)
-	}
-	if parent := ci.immediateParent().Pos(); parent != 0x12345400 {
-		t.Errorf("ci.immediateParent().Pos() = 0x%X, want 0x12345400", parent)
-	}
-	if parent := ci.Parent(ci.Level() - 2).Pos(); parent != 0x12345000 {
-		t.Errorf("ci.Parent(l-2).Pos() = 0x%X, want 0x12345000", parent)
-	}
-
-	if uint64(ci.ChildBegin()) >= uint64(ci) {
-		t.Errorf("ci.ChildBegin() is 0x%X, want < 0x%X", ci.ChildBegin(), ci)
-	}
-	if uint64(ci.ChildEnd()) <= uint64(ci) {
-		t.Errorf("ci.ChildEnd() is 0x%X, want > 0x%X", ci.ChildEnd(), ci)
-	}
-	if ci.ChildEnd() != ci.ChildBegin().Next().Next().Next().Next() {
-		t.Errorf("ci.ChildEnd() is 0x%X, want 0x%X", ci.ChildEnd(), ci.ChildBegin().Next().Next().Next().Next())
-	}
-	if ci.RangeMin() != ci.ChildBeginAtLevel(maxLevel) {
-		t.Errorf("ci.RangeMin() is 0x%X, want 0x%X", ci.RangeMin(), ci.ChildBeginAtLevel(maxLevel))
-	}
-	if ci.RangeMax().Next() != ci.ChildEndAtLevel(maxLevel) {
-		t.Errorf("ci.RangeMax().Next() is 0x%X, want 0x%X", ci.RangeMax().Next(), ci.ChildEndAtLevel(maxLevel))
-	}
-}
-
-func TestCellIDContainment(t *testing.T) {
-	a := CellID(0x80855c0000000000) // Pittsburg
-	b := CellID(0x80855d0000000000) // child of a
-	c := CellID(0x80855dc000000000) // child of b
-	d := CellID(0x8085630000000000) // part of Pittsburg disjoint from a
-	tests := []struct {
-		x, y                                 CellID
-		xContainsY, yContainsX, xIntersectsY bool
-	}{
-		{a, a, true, true, true},
-		{a, b, true, false, true},
-		{a, c, true, false, true},
-		{a, d, false, false, false},
-		{b, b, true, true, true},
-		{b, c, true, false, true},
-		{b, d, false, false, false},
-		{c, c, true, true, true},
-		{c, d, false, false, false},
-		{d, d, true, true, true},
-	}
-	should := func(b bool) string {
-		if b {
-			return "should"
-		}
-		return "should not"
-	}
-	for _, test := range tests {
-		if test.x.Contains(test.y) != test.xContainsY {
-			t.Errorf("%v %s contain %v", test.x, should(test.xContainsY), test.y)
-		}
-		if test.x.Intersects(test.y) != test.xIntersectsY {
-			t.Errorf("%v %s intersect %v", test.x, should(test.xIntersectsY), test.y)
-		}
-		if test.y.Contains(test.x) != test.yContainsX {
-			t.Errorf("%v %s contain %v", test.y, should(test.yContainsX), test.x)
-		}
-	}
-
-	// TODO(dsymonds): Test Contains, Intersects better, such as with adjacent cells.
-}
-
-func TestCellIDString(t *testing.T) {
-	ci := CellID(0xbb04000000000000)
-	if s, exp := ci.String(), "5/31200"; s != exp {
-		t.Errorf("ci.String() = %q, want %q", s, exp)
-	}
-}
-
-func TestCellIDLatLng(t *testing.T) {
-	// You can generate these with the s2cellid2latlngtestcase C++ program in this directory.
-	tests := []struct {
-		id       CellID
-		lat, lng float64
-	}{
-		{0x47a1cbd595522b39, 49.703498679, 11.770681595},
-		{0x46525318b63be0f9, 55.685376759, 12.588490937},
-		{0x52b30b71698e729d, 45.486546517, -93.449700022},
-		{0x46ed8886cfadda85, 58.299984854, 23.049300056},
-		{0x3663f18a24cbe857, 34.364439040, 108.330699969},
-		{0x10a06c0a948cf5d, -30.694551352, -30.048758753},
-		{0x2b2bfd076787c5df, -25.285264027, 133.823116966},
-		{0xb09dff882a7809e1, -75.000000031, 0.000000133},
-		{0x94daa3d000000001, -24.694439215, -47.537363213},
-		{0x87a1000000000001, 38.899730392, -99.901813021},
-		{0x4fc76d5000000001, 81.647200334, -55.631712940},
-		{0x3b00955555555555, 10.050986518, 78.293170610},
-		{0x1dcc469991555555, -34.055420593, 18.551140038},
-		{0xb112966aaaaaaaab, -69.219262171, 49.670072392},
-	}
-	for _, test := range tests {
-		l1 := LatLngFromDegrees(test.lat, test.lng)
-		l2 := test.id.LatLng()
-		if l1.Distance(l2) > 1e-9*s1.Degree { // ~0.1mm on earth.
-			t.Errorf("LatLng() for CellID %x (%s) : got %s, want %s", uint64(test.id), test.id, l2, l1)
-		}
-		c1 := test.id
-		c2 := CellIDFromLatLng(l1)
-		if c1 != c2 {
-			t.Errorf("CellIDFromLatLng(%s) = %x (%s), want %s", l1, uint64(c2), c2, c1)
-		}
-	}
-}
-
-func TestCellIDEdgeNeighbors(t *testing.T) {
-	// Check the edge neighbors of face 1.
-	faces := []int{5, 3, 2, 0}
-	for i, nbr := range cellIDFromFaceIJ(1, 0, 0).Parent(0).EdgeNeighbors() {
-		if !nbr.isFace() {
-			t.Errorf("CellID(%d) is not a face", nbr)
-		}
-		if got, want := nbr.Face(), faces[i]; got != want {
-			t.Errorf("CellID(%d).Face() = %d, want %d", nbr, got, want)
-		}
-	}
-	// Check the edge neighbors of the corner cells at all levels.  This case is
-	// trickier because it requires projecting onto adjacent faces.
-	const maxIJ = maxSize - 1
-	for level := 1; level <= maxLevel; level++ {
-		id := cellIDFromFaceIJ(1, 0, 0).Parent(level)
-		// These neighbors were determined manually using the face and axis
-		// relationships.
-		levelSizeIJ := sizeIJ(level)
-		want := []CellID{
-			cellIDFromFaceIJ(5, maxIJ, maxIJ).Parent(level),
-			cellIDFromFaceIJ(1, levelSizeIJ, 0).Parent(level),
-			cellIDFromFaceIJ(1, 0, levelSizeIJ).Parent(level),
-			cellIDFromFaceIJ(0, maxIJ, 0).Parent(level),
-		}
-		for i, nbr := range id.EdgeNeighbors() {
-			if nbr != want[i] {
-				t.Errorf("CellID(%d).EdgeNeighbors()[%d] = %v, want %v", id, i, nbr, want[i])
-			}
-		}
-	}
-}
-
-type byCellID []CellID
-
-func (v byCellID) Len() int           { return len(v) }
-func (v byCellID) Swap(i, j int)      { v[i], v[j] = v[j], v[i] }
-func (v byCellID) Less(i, j int) bool { return uint64(v[i]) < uint64(v[j]) }
-
-func TestCellIDVertexNeighbors(t *testing.T) {
-	// Check the vertex neighbors of the center of face 2 at level 5.
-	id := cellIDFromPoint(PointFromCoords(0, 0, 1))
-	neighbors := id.VertexNeighbors(5)
-	sort.Sort(byCellID(neighbors))
-
-	for n, nbr := range neighbors {
-		i, j := 1<<29, 1<<29
-		if n < 2 {
-			i--
-		}
-		if n == 0 || n == 3 {
-			j--
-		}
-		want := cellIDFromFaceIJ(2, i, j).Parent(5)
-
-		if nbr != want {
-			t.Errorf("CellID(%s).VertexNeighbors()[%d] = %v, want %v", id, n, nbr, want)
-		}
-	}
-
-	// Check the vertex neighbors of the corner of faces 0, 4, and 5.
-	id = CellIDFromFacePosLevel(0, 0, maxLevel)
-	neighbors = id.VertexNeighbors(0)
-	sort.Sort(byCellID(neighbors))
-	if len(neighbors) != 3 {
-		t.Errorf("len(CellID(%d).VertexNeighbors()) = %d, wanted %d", id, len(neighbors), 3)
-	}
-	if neighbors[0] != CellIDFromFace(0) {
-		t.Errorf("CellID(%d).VertexNeighbors()[0] = %d, wanted %d", id, neighbors[0], CellIDFromFace(0))
-	}
-	if neighbors[1] != CellIDFromFace(4) {
-		t.Errorf("CellID(%d).VertexNeighbors()[1] = %d, wanted %d", id, neighbors[1], CellIDFromFace(4))
-	}
-}
-
-// dedupCellIDs returns the unique slice of CellIDs from the sorted input list.
-func dedupCellIDs(ids []CellID) []CellID {
-	var out []CellID
-	var prev CellID
-	for _, id := range ids {
-		if id != prev {
-			out = append(out, id)
-		}
-		prev = id
-	}
-
-	return out
-}
-
-func TestCellIDAllNeighbors(t *testing.T) {
-	// Check that AllNeighbors produces results that are consistent
-	// with VertexNeighbors for a bunch of random cells.
-	for i := 0; i < 1000; i++ {
-		id := randomCellID()
-		if id.IsLeaf() {
-			id = id.immediateParent()
-		}
-
-		// testAllNeighbors computes approximately 2**(2*(diff+1)) cell ids,
-		// so it's not reasonable to use large values of diff.
-		maxDiff := min(6, maxLevel-id.Level()-1)
-		level := id.Level() + randomUniformInt(maxDiff)
-
-		// We compute AllNeighbors, and then add in all the children of id
-		// at the given level. We then compare this against the result of finding
-		// all the vertex neighbors of all the vertices of children of id at the
-		// given level. These should give the same result.
-		var want []CellID
-		all := id.AllNeighbors(level)
-		end := id.ChildEndAtLevel(level + 1)
-		for c := id.ChildBeginAtLevel(level + 1); c != end; c = c.Next() {
-			all = append(all, c.immediateParent())
-			want = append(want, c.VertexNeighbors(level)...)
-		}
-
-		// Sort the results and eliminate duplicates.
-		sort.Sort(byCellID(all))
-		sort.Sort(byCellID(want))
-		all = dedupCellIDs(all)
-		want = dedupCellIDs(want)
-
-		if !reflect.DeepEqual(all, want) {
-			t.Errorf("%v.AllNeighbors(%d) = %v, want %v", id, level, all, want)
-		}
-	}
-}
-
-func TestCellIDTokensNominal(t *testing.T) {
-	tests := []struct {
-		token string
-		id    CellID
-	}{
-		{"1", 0x1000000000000000},
-		{"3", 0x3000000000000000},
-		{"14", 0x1400000000000000},
-		{"41", 0x4100000000000000},
-		{"094", 0x0940000000000000},
-		{"537", 0x5370000000000000},
-		{"3fec", 0x3fec000000000000},
-		{"72f3", 0x72f3000000000000},
-		{"52b8c", 0x52b8c00000000000},
-		{"990ed", 0x990ed00000000000},
-		{"4476dc", 0x4476dc0000000000},
-		{"2a724f", 0x2a724f0000000000},
-		{"7d4afc4", 0x7d4afc4000000000},
-		{"b675785", 0xb675785000000000},
-		{"40cd6124", 0x40cd612400000000},
-		{"3ba32f81", 0x3ba32f8100000000},
-		{"08f569b5c", 0x08f569b5c0000000},
-		{"385327157", 0x3853271570000000},
-		{"166c4d1954", 0x166c4d1954000000},
-		{"96f48d8c39", 0x96f48d8c39000000},
-		{"0bca3c7f74c", 0x0bca3c7f74c00000},
-		{"1ae3619d12f", 0x1ae3619d12f00000},
-		{"07a77802a3fc", 0x07a77802a3fc0000},
-		{"4e7887ec1801", 0x4e7887ec18010000},
-		{"4adad7ae74124", 0x4adad7ae74124000},
-		{"90aba04afe0c5", 0x90aba04afe0c5000},
-		{"8ffc3f02af305c", 0x8ffc3f02af305c00},
-		{"6fa47550938183", 0x6fa4755093818300},
-		{"aa80a565df5e7fc", 0xaa80a565df5e7fc0},
-		{"01614b5e968e121", 0x01614b5e968e1210},
-		{"aa05238e7bd3ee7c", 0xaa05238e7bd3ee7c},
-		{"48a23db9c2963e5b", 0x48a23db9c2963e5b},
-	}
-	for _, test := range tests {
-		ci := CellIDFromToken(test.token)
-		if ci != test.id {
-			t.Errorf("CellIDFromToken(%q) = %x, want %x", test.token, uint64(ci), uint64(test.id))
-		}
-
-		token := ci.ToToken()
-		if token != test.token {
-			t.Errorf("ci.ToToken = %q, want %q", token, test.token)
-		}
-	}
-}
-
-func TestCellIDFromTokensErrorCases(t *testing.T) {
-	noneToken := CellID(0).ToToken()
-	if noneToken != "X" {
-		t.Errorf("CellID(0).Token() = %q, want X", noneToken)
-	}
-	noneID := CellIDFromToken(noneToken)
-	if noneID != CellID(0) {
-		t.Errorf("CellIDFromToken(%q) = %x, want 0", noneToken, uint64(noneID))
-	}
-	tests := []string{
-		"876b e99",
-		"876bee99\n",
-		"876[ee99",
-		" 876bee99",
-	}
-	for _, test := range tests {
-		ci := CellIDFromToken(test)
-		if uint64(ci) != 0 {
-			t.Errorf("CellIDFromToken(%q) = %x, want 0", test, uint64(ci))
-		}
-	}
-}
-
-func TestIJLevelToBoundUV(t *testing.T) {
-	maxIJ := 1<<maxLevel - 1
-
-	tests := []struct {
-		i     int
-		j     int
-		level int
-		want  r2.Rect
-	}{
-		// The i/j space is [0, 2^30 - 1) which maps to [-1, 1] for the
-		// x/y axes of the face surface. Results are scaled by the size of a cell
-		// at the given level. At level 0, everything is one cell of the full size
-		// of the space.  At maxLevel, the bounding rect is almost floating point
-		// noise.
-
-		// What should be out of bounds values, but passes the C++ code as well.
-		{
-			-1, -1, 0,
-			r2.RectFromPoints(r2.Point{-5, -5}, r2.Point{-1, -1}),
-		},
-		{
-			-1 * maxIJ, -1 * maxIJ, 0,
-			r2.RectFromPoints(r2.Point{-5, -5}, r2.Point{-1, -1}),
-		},
-		{
-			-1, -1, maxLevel,
-			r2.RectFromPoints(r2.Point{-1.0000000024835267, -1.0000000024835267},
-				r2.Point{-1, -1}),
-		},
-		{
-			0, 0, maxLevel + 1,
-			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{-1, -1}),
-		},
-
-		// Minimum i,j at different levels
-		{
-			0, 0, 0,
-			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
-		},
-		{
-			0, 0, maxLevel / 2,
-			r2.RectFromPoints(r2.Point{-1, -1},
-				r2.Point{-0.999918621033430099, -0.999918621033430099}),
-		},
-		{
-			0, 0, maxLevel,
-			r2.RectFromPoints(r2.Point{-1, -1},
-				r2.Point{-0.999999997516473060, -0.999999997516473060}),
-		},
-
-		// Just a hair off the outer bounds at different levels.
-		{
-			1, 1, 0,
-			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
-		},
-		{
-			1, 1, maxLevel / 2,
-			r2.RectFromPoints(r2.Point{-1, -1},
-				r2.Point{-0.999918621033430099, -0.999918621033430099}),
-		},
-		{
-			1, 1, maxLevel,
-			r2.RectFromPoints(r2.Point{-0.9999999975164731, -0.9999999975164731},
-				r2.Point{-0.9999999950329462, -0.9999999950329462}),
-		},
-
-		// Center point of the i,j space at different levels.
-		{
-			maxIJ / 2, maxIJ / 2, 0,
-			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1})},
-		{
-			maxIJ / 2, maxIJ / 2, maxLevel / 2,
-			r2.RectFromPoints(r2.Point{-0.000040691345930099, -0.000040691345930099},
-				r2.Point{0, 0})},
-		{
-			maxIJ / 2, maxIJ / 2, maxLevel,
-			r2.RectFromPoints(r2.Point{-0.000000001241763433, -0.000000001241763433},
-				r2.Point{0, 0})},
-
-		// Maximum i, j at different levels.
-		{
-			maxIJ, maxIJ, 0,
-			r2.RectFromPoints(r2.Point{-1, -1}, r2.Point{1, 1}),
-		},
-		{
-			maxIJ, maxIJ, maxLevel / 2,
-			r2.RectFromPoints(r2.Point{0.999918621033430099, 0.999918621033430099},
-				r2.Point{1, 1}),
-		},
-		{
-			maxIJ, maxIJ, maxLevel,
-			r2.RectFromPoints(r2.Point{0.999999997516473060, 0.999999997516473060},
-				r2.Point{1, 1}),
-		},
-	}
-
-	for _, test := range tests {
-		uv := ijLevelToBoundUV(test.i, test.j, test.level)
-		if !float64Eq(uv.X.Lo, test.want.X.Lo) ||
-			!float64Eq(uv.X.Hi, test.want.X.Hi) ||
-			!float64Eq(uv.Y.Lo, test.want.Y.Lo) ||
-			!float64Eq(uv.Y.Hi, test.want.Y.Hi) {
-			t.Errorf("ijLevelToBoundUV(%d, %d, %d), got %v, want %v",
-				test.i, test.j, test.level, uv, test.want)
-		}
-	}
-}
-
-func TestCellIDCommonAncestorLevel(t *testing.T) {
-	tests := []struct {
-		ci     CellID
-		other  CellID
-		want   int
-		wantOk bool
-	}{
-		// Identical cell IDs.
-		{
-			CellIDFromFace(0),
-			CellIDFromFace(0),
-			0,
-			true,
-		},
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(30),
-			CellIDFromFace(0).ChildBeginAtLevel(30),
-			30,
-			true,
-		},
-		// One cell is a descendant of the other.
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(30),
-			CellIDFromFace(0),
-			0,
-			true,
-		},
-		{
-			CellIDFromFace(5),
-			CellIDFromFace(5).ChildEndAtLevel(30).Prev(),
-			0,
-			true,
-		},
-		// No common ancestors.
-		{
-			CellIDFromFace(0),
-			CellIDFromFace(5),
-			0,
-			false,
-		},
-		{
-			CellIDFromFace(2).ChildBeginAtLevel(30),
-			CellIDFromFace(3).ChildBeginAtLevel(20),
-			0,
-			false,
-		},
-		// Common ancestor distinct from both.
-		{
-			CellIDFromFace(5).ChildBeginAtLevel(9).Next().ChildBeginAtLevel(15),
-			CellIDFromFace(5).ChildBeginAtLevel(9).ChildBeginAtLevel(20),
-			8,
-			true,
-		},
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(2).ChildBeginAtLevel(30),
-			CellIDFromFace(0).ChildBeginAtLevel(2).Next().ChildBeginAtLevel(5),
-			1,
-			true,
-		},
-	}
-	for _, test := range tests {
-		if got, ok := test.ci.CommonAncestorLevel(test.other); ok != test.wantOk || got != test.want {
-			t.Errorf("CellID(%v).CommonAncestorLevel(%v) = %d, %t; want %d, %t", test.ci, test.other, got, ok, test.want, test.wantOk)
-		}
-	}
-}
-
-func TestCellIDDistanceToBegin(t *testing.T) {
-	tests := []struct {
-		id   CellID
-		want int64
-	}{
-		{
-			// at level 0 (i.e. full faces), there are only 6 cells from
-			// the last face to the beginning of the Hilbert curve.
-			id:   CellIDFromFace(5).ChildEndAtLevel(0),
-			want: 6,
-		},
-		{
-			// from the last cell on the last face at the smallest cell size,
-			// there are the maximum number of possible cells.
-			id:   CellIDFromFace(5).ChildEndAtLevel(maxLevel),
-			want: 6 * (1 << uint(2*maxLevel)),
-		},
-		{
-			// from the first cell on the first face.
-			id:   CellIDFromFace(0).ChildBeginAtLevel(0),
-			want: 0,
-		},
-		{
-			// from the first cell at the smallest level on the first face.
-			id:   CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
-			want: 0,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.id.distanceFromBegin(); got != test.want {
-			t.Errorf("%v.distanceToBegin() = %v, want %v", test.id, got, test.want)
-		}
-	}
-
-	// Test that advancing from the beginning by the distance from a cell gets
-	// us back to that cell.
-	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
-	if got := CellIDFromFace(0).ChildBeginAtLevel(id.Level()).Advance(id.distanceFromBegin()); got != id {
-		t.Errorf("advancing from the beginning by the distance of a cell should return us to that cell. got %v, want %v", got, id)
-	}
-}
-
-func TestFindMSBSetNonZero64(t *testing.T) {
-	testOne := uint64(0x8000000000000000)
-	testAll := uint64(0xFFFFFFFFFFFFFFFF)
-	testSome := uint64(0xFEDCBA9876543210)
-	for i := 63; i >= 0; i-- {
-		if got := findMSBSetNonZero64(testOne); got != i {
-			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testOne, got, i)
-		}
-		if got := findMSBSetNonZero64(testAll); got != i {
-			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testAll, got, i)
-		}
-		if got := findMSBSetNonZero64(testSome); got != i {
-			t.Errorf("findMSBSetNonZero64(%x) = %d, want = %d", testSome, got, i)
-		}
-		testOne >>= 1
-		testAll >>= 1
-		testSome >>= 1
-	}
-
-	if got := findMSBSetNonZero64(1); got != 0 {
-		t.Errorf("findMSBSetNonZero64(1) = %v, want 0", got)
-	}
-
-	if got := findMSBSetNonZero64(0); got != 0 {
-		t.Errorf("findMSBSetNonZero64(0) = %v, want 0", got)
-	}
-}
-
-func TestFindLSBSetNonZero64(t *testing.T) {
-	testOne := uint64(0x0000000000000001)
-	testAll := uint64(0xFFFFFFFFFFFFFFFF)
-	testSome := uint64(0x0123456789ABCDEF)
-	for i := 0; i < 64; i++ {
-		if got := findLSBSetNonZero64(testOne); got != i {
-			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testOne, got, i)
-		}
-		if got := findLSBSetNonZero64(testAll); got != i {
-			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testAll, got, i)
-		}
-		if got := findLSBSetNonZero64(testSome); got != i {
-			t.Errorf("findLSBSetNonZero64(%x) = %d, want = %d", testSome, got, i)
-		}
-		testOne <<= 1
-		testAll <<= 1
-		testSome <<= 1
-	}
-
-	if got := findLSBSetNonZero64(0); got != 0 {
-		t.Errorf("findLSBSetNonZero64(0) = %v, want 0", got)
-	}
-}
-
-func TestCellIDWrapping(t *testing.T) {
-	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4)
-
-	tests := []struct {
-		msg  string
-		got  CellID
-		want CellID
-	}{
-		{
-			"test wrap from beginning to end of Hilbert curve",
-			CellIDFromFace(5).ChildEndAtLevel(0).Prev(),
-			CellIDFromFace(0).ChildBeginAtLevel(0).PrevWrap(),
-		},
-		{
-			"smallest end leaf wraps to smallest first leaf using PrevWrap",
-			CellIDFromFacePosLevel(5, ^uint64(0)>>faceBits, maxLevel),
-			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).PrevWrap(),
-		},
-		{
-			"smallest end leaf wraps to smallest first leaf using AdvanceWrap",
-			CellIDFromFacePosLevel(5, ^uint64(0)>>faceBits, maxLevel),
-			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).AdvanceWrap(-1),
-		},
-		{
-			"PrevWrap is the same as AdvanceWrap(-1)",
-			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).AdvanceWrap(-1),
-			CellIDFromFace(0).ChildBeginAtLevel(maxLevel).PrevWrap(),
-		},
-		{
-			"Prev + NextWrap stays the same at given level",
-			CellIDFromFace(0).ChildBeginAtLevel(4),
-			CellIDFromFace(5).ChildEndAtLevel(4).Prev().NextWrap(),
-		},
-		{
-			"AdvanceWrap forward and back stays the same at given level",
-			CellIDFromFace(0).ChildBeginAtLevel(4),
-			CellIDFromFace(5).ChildEndAtLevel(4).Advance(-1).AdvanceWrap(1),
-		},
-		{
-			"Prev().NextWrap() stays same for first cell at level",
-			CellIDFromFacePosLevel(0, 0, maxLevel),
-			CellIDFromFace(5).ChildEndAtLevel(maxLevel).Prev().NextWrap(),
-		},
-		{
-			"AdvanceWrap forward and back stays same for first cell at level",
-			CellIDFromFacePosLevel(0, 0, maxLevel),
-			CellIDFromFace(5).ChildEndAtLevel(maxLevel).Advance(-1).AdvanceWrap(1),
-		},
-		// Check basic properties of AdvanceWrap().
-		{
-			"advancing 7 steps around cube should end up one past start.",
-			CellIDFromFace(1),
-			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(7),
-		},
-		{
-			"twice around should end up where we started",
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(12),
-		},
-		{
-			"backwards once around plus one step should be one before we started",
-			CellIDFromFace(4),
-			CellIDFromFace(5).AdvanceWrap(-7),
-		},
-		{
-			"wrapping even multiple of times around should end where we started",
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-			CellIDFromFace(0).ChildBeginAtLevel(0).AdvanceWrap(-12000000),
-		},
-		{
-			"wrapping combination of even times around should end where it started",
-			CellIDFromFace(0).ChildBeginAtLevel(5).AdvanceWrap(6644),
-			CellIDFromFace(0).ChildBeginAtLevel(5).AdvanceWrap(-11788),
-		},
-		{
-			"moving 256 should advance us one cell at max level",
-			id.Next().ChildBeginAtLevel(maxLevel),
-			id.ChildBeginAtLevel(maxLevel).AdvanceWrap(256),
-		},
-		{
-			"wrapping by 4 times cells per face should advance 4 faces",
-			CellIDFromFacePosLevel(1, 0, maxLevel),
-			CellIDFromFacePosLevel(5, 0, maxLevel).AdvanceWrap(2 << (2 * maxLevel)),
-		},
-	}
-
-	for _, test := range tests {
-		if test.got != test.want {
-			t.Errorf("%s: got %v want %v", test.msg, test.got, test.want)
-		}
-	}
-}
-
-func TestCellIDAdvance(t *testing.T) {
-	tests := []struct {
-		ci    CellID
-		steps int64
-		want  CellID
-	}{
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-			7,
-			CellIDFromFace(5).ChildEndAtLevel(0),
-		},
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-			12,
-			CellIDFromFace(5).ChildEndAtLevel(0),
-		},
-		{
-			CellIDFromFace(5).ChildEndAtLevel(0),
-			-7,
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-		},
-		{
-			CellIDFromFace(5).ChildEndAtLevel(0),
-			-12000000,
-			CellIDFromFace(0).ChildBeginAtLevel(0),
-		},
-		{
-			CellIDFromFace(0).ChildBeginAtLevel(5),
-			500,
-			CellIDFromFace(5).ChildEndAtLevel(5).Advance(500 - (6 << (2 * 5))),
-		},
-		{
-			CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4).ChildBeginAtLevel(maxLevel),
-			256,
-			CellIDFromFacePosLevel(3, 0x12345678, maxLevel-4).Next().ChildBeginAtLevel(maxLevel),
-		},
-		{
-			CellIDFromFacePosLevel(1, 0, maxLevel),
-			4 << (2 * maxLevel),
-			CellIDFromFacePosLevel(5, 0, maxLevel),
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.ci.Advance(test.steps); got != test.want {
-			t.Errorf("CellID(%v).Advance(%d) = %v; want = %v", test.ci, test.steps, got, test.want)
-		}
-	}
-}
-
-func TestCellIDFaceSiTi(t *testing.T) {
-	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel)
-	// Check that the (si, ti) coordinates of the center end in a
-	// 1 followed by (30 - level) 0's.
-	for level := uint64(0); level <= maxLevel; level++ {
-		l := maxLevel - int(level)
-		want := 1 << level
-		mask := 1<<(level+1) - 1
-
-		_, si, ti := id.Parent(l).faceSiTi()
-		if want != si&mask {
-			t.Errorf("CellID.Parent(%d).faceSiTi(), si = %b, want %b", l, si&mask, want)
-		}
-		if want != ti&mask {
-			t.Errorf("CellID.Parent(%d).faceSiTi(), ti = %b, want %b", l, ti&mask, want)
-		}
-	}
-}
-
-func TestCellIDContinuity(t *testing.T) {
-	const maxWalkLevel = 8
-	const cellSize = 1.0 / (1 << maxWalkLevel)
-
-	// Make sure that sequentially increasing cell ids form a continuous
-	// path over the surface of the sphere, i.e. there are no
-	// discontinuous jumps from one region to another.
-
-	maxDist := MaxWidthMetric.Value(maxWalkLevel)
-	end := CellIDFromFace(5).ChildEndAtLevel(maxWalkLevel)
-	id := CellIDFromFace(0).ChildBeginAtLevel(maxWalkLevel)
-
-	for ; id != end; id = id.Next() {
-
-		if got := id.rawPoint().Angle(id.NextWrap().rawPoint()); float64(got) > maxDist {
-			t.Errorf("%v.rawPoint().Angle(%v.NextWrap().rawPoint()) = %v > %v", id, id, got, maxDist)
-		}
-		if id.NextWrap() != id.AdvanceWrap(1) {
-			t.Errorf("%v.NextWrap() != %v.AdvanceWrap(1) %v != %v)", id, id, id.NextWrap(), id.AdvanceWrap(1))
-		}
-		if id != id.NextWrap().AdvanceWrap(-1) {
-			t.Errorf("%v.NextWrap().AdvanceWrap(-1) = %v want %v)", id, id.NextWrap().AdvanceWrap(-1), id)
-		}
-
-		// Check that the rawPoint() returns the center of each cell
-		// in (s,t) coordinates.
-		_, u, v := xyzToFaceUV(id.rawPoint())
-		if !float64Eq(math.Remainder(uvToST(u), 0.5*cellSize), 0.0) {
-			t.Errorf("uvToST(%v) = %v, want %v", u, uvToST(u), 0.5*cellSize)
-		}
-		if !float64Eq(math.Remainder(uvToST(v), 0.5*cellSize), 0.0) {
-			t.Errorf("uvToST(%v) = %v, want %v", v, uvToST(v), 0.5*cellSize)
-		}
-	}
-}
-
-// sampleBoundary returns a random point on the boundary of the given rectangle.
-func sampleBoundary(rect r2.Rect) (u, v float64) {
-	if oneIn(2) {
-		v = randomUniformFloat64(rect.Y.Lo, rect.Y.Hi)
-		if oneIn(2) {
-			u = rect.X.Lo
-		} else {
-			u = rect.X.Hi
-		}
-	} else {
-		u = randomUniformFloat64(rect.X.Lo, rect.X.Hi)
-		if oneIn(2) {
-			v = rect.Y.Lo
-		} else {
-			v = rect.Y.Hi
-		}
-	}
-	return u, v
-}
-
-// projectToBoundary returns the closest point to uv on the boundary of rect.
-func projectToBoundary(u, v float64, rect r2.Rect) r2.Point {
-	du0 := math.Abs(u - rect.X.Lo)
-	du1 := math.Abs(u - rect.X.Hi)
-	dv0 := math.Abs(v - rect.Y.Lo)
-	dv1 := math.Abs(v - rect.Y.Hi)
-
-	dmin := math.Min(math.Min(du0, du1), math.Min(dv0, dv1))
-	if du0 == dmin {
-		return r2.Point{rect.X.Lo, rect.Y.ClampPoint(v)}
-	}
-	if du1 == dmin {
-		return r2.Point{rect.X.Hi, rect.Y.ClampPoint(v)}
-	}
-	if dv0 == dmin {
-		return r2.Point{rect.X.ClampPoint(u), rect.Y.Lo}
-	}
-
-	return r2.Point{rect.X.ClampPoint(u), rect.Y.Hi}
-}
-
-func TestCellIDExpandedByDistanceUV(t *testing.T) {
-	const maxDistDegrees = 10
-	for i := 0; i < 1000; i++ {
-		id := randomCellID()
-		distance := s1.Degree * s1.Angle(randomUniformFloat64(-maxDistDegrees, maxDistDegrees))
-
-		bound := id.boundUV()
-		expanded := expandedByDistanceUV(bound, distance)
-		for iter := 0; iter < 10; iter++ {
-			// Choose a point on the boundary of the rectangle.
-			face := randomUniformInt(6)
-			centerU, centerV := sampleBoundary(bound)
-			center := Point{faceUVToXYZ(face, centerU, centerV).Normalize()}
-
-			// Now sample a point from a disc of radius (2 * distance).
-			p := samplePointFromCap(CapFromCenterHeight(center, 2*math.Abs(float64(distance))))
-
-			// Find the closest point on the boundary to the sampled point.
-			u, v, ok := faceXYZToUV(face, p)
-			if !ok {
-				continue
-			}
-
-			uv := r2.Point{u, v}
-			closestUV := projectToBoundary(u, v, bound)
-			closest := faceUVToXYZ(face, closestUV.X, closestUV.Y).Normalize()
-			actualDist := p.Distance(Point{closest})
-
-			if distance >= 0 {
-				// expanded should contain all points in the original bound,
-				// and also all points within distance of the boundary.
-				if bound.ContainsPoint(uv) || actualDist < distance {
-					if !expanded.ContainsPoint(uv) {
-						t.Errorf("expandedByDistanceUV(%v, %v).ContainsPoint(%v) = false, want true", bound, distance, uv)
-					}
-				}
-			} else {
-				// expanded should not contain any points within distance
-				// of the original boundary.
-				if actualDist < -distance {
-					if expanded.ContainsPoint(uv) {
-						t.Errorf("negatively expandedByDistanceUV(%v, %v).ContainsPoint(%v) = true, want false", bound, distance, uv)
-					}
-				}
-			}
-		}
-	}
-}
-
-func TestCellIDMaxTile(t *testing.T) {
-	// This method is also tested more thoroughly in s2cellunion_test.
-	for iter := 0; iter < 1000; iter++ {
-		id := randomCellIDForLevel(10)
-
-		// Check that limit is returned for tiles at or beyond limit.
-		if got, want := id, id.MaxTile(id); got != want {
-			t.Errorf("%v.MaxTile(%v) = %v, want %v", id, id, got, want)
-		}
-		if got, want := id, id.Children()[0].MaxTile(id); got != want {
-			t.Errorf("%v.Children()[0].MaxTile(%v) = %v, want %v", id, id, got, want)
-		}
-		if got, want := id, id.Children()[1].MaxTile(id); got != want {
-			t.Errorf("%v.Children()[1].MaxTile(%v) = %v, want %v", id, id, got, want)
-		}
-		if got, want := id, id.Next().MaxTile(id); got != want {
-			t.Errorf("%v.Next().MaxTile(%v) = %v, want %v", id, id, got, want)
-		}
-		if got, want := id.Children()[0], id.MaxTile(id.Children()[0]); got != want {
-			t.Errorf("%v.MaxTile(%v.Children()[0] = %v, want %v", id, id, got, want)
-		}
-
-		// Check that the tile size is increased when possible.
-		if got, want := id, id.Children()[0].MaxTile(id.Next()); got != want {
-			t.Errorf("%v.Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.Children()[0].MaxTile(id.Next().Children()[0]); got != want {
-			t.Errorf("%v.Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.Children()[0].MaxTile(id.Next().Children()[1].Children()[0]); got != want {
-			t.Errorf("%v.Children()[0].MaxTile(%v.Next().Children()[1].Children()[0] = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.Children()[0].Children()[0].MaxTile(id.Next()); got != want {
-			t.Errorf("%v.Children()[0].Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.Children()[0].Children()[0].Children()[0].MaxTile(id.Next()); got != want {
-			t.Errorf("%v.Children()[0].Children()[0].Children()[0].MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
-		}
-
-		// Check that the tile size is decreased when necessary.
-		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next()); got != want {
-			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next().Children()[0]); got != want {
-			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next().Children()[0]) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id.Children()[0], id.MaxTile(id.Children()[0].Next().Children()[1]); got != want {
-			t.Errorf("%v.Children()[0], id.MaxTile(%v.Children()[0].Next().Children()[1]) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id.Children()[0].Children()[0], id.MaxTile(id.Children()[0].Children()[0].Next()); got != want {
-			t.Errorf("%v.Children()[0].Children()[0], id.MaxTile(%v.Children()[0].Children()[0].Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id.Children()[0].Children()[0].Children()[0],
-			id.MaxTile(id.Children()[0].Children()[0].Children()[0].Next()); got != want {
-			t.Errorf("%v.MaxTile(%v.Children()[0].Children()[0].Children()[0].Next()) = %v, want %v", id, id, got, want)
-		}
-
-		// Check that the tile size is otherwise unchanged.
-		if got, want := id, id.MaxTile(id.Next()); got != want {
-			t.Errorf("%v.MaxTile(%v.Next()) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.MaxTile(id.Next().Children()[0]); got != want {
-			t.Errorf("%v.MaxTile(%v.Next().Children()[0]) = %v, want %v", id, id, got, want)
-		}
-
-		if got, want := id, id.MaxTile(id.Next().Children()[1].Children()[0]); got != want {
-			t.Errorf("%v.MaxTile(%v.Next().Children()[1].Children()[0]) = %v, want %v", id, id, got, want)
-		}
-	}
-}
-
-func TestCellIDCenterFaceSiTi(t *testing.T) {
-	// Check that the (si, ti) coordinates of the center end in a
-	// 1 followed by (30 - level) 0s.
-
-	id := CellIDFromFacePosLevel(3, 0x12345678, maxLevel)
-
-	tests := []struct {
-		id          CellID
-		levelOffset uint
-	}{
-		// Leaf level, 30.
-		{id, 0},
-		// Level 29.
-		{id.Parent(maxLevel - 1), 1},
-		// Level 28.
-		{id.Parent(maxLevel - 2), 2},
-		// Level 20.
-		{id.Parent(maxLevel - 10), 10},
-		// Level 10.
-		{id.Parent(maxLevel - 20), 20},
-		// Level 0.
-		{id.Parent(0), maxLevel},
-	}
-
-	for _, test := range tests {
-		_, si, ti := test.id.centerFaceSiTi()
-		want := 1 << test.levelOffset
-		mask := (1 << (test.levelOffset + 1)) - 1
-		if want != si&mask {
-			t.Errorf("Level Offset %d. %b != %b", test.levelOffset, want, si&mask)
-		}
-		if want != ti&mask {
-			t.Errorf("Level Offset: %d. %b != %b", test.levelOffset, want, ti&mask)
-		}
-	}
-}
-
-// TODO(roberts): Remaining tests to convert.
-// Coverage
-// TraversalOrder

+ 405 - 51
vendor/github.com/golang/geo/s2/cellunion.go

@@ -1,23 +1,25 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
+	"fmt"
+	"io"
 	"sort"
 	"sort"
+
+	"github.com/golang/geo/s1"
 )
 )
 
 
 // A CellUnion is a collection of CellIDs.
 // A CellUnion is a collection of CellIDs.
@@ -26,6 +28,9 @@ import (
 // Specifically, it may not contain the same CellID twice, nor a CellID that
 // Specifically, it may not contain the same CellID twice, nor a CellID that
 // is contained by another, nor the four sibling CellIDs that are children of
 // is contained by another, nor the four sibling CellIDs that are children of
 // a single higher level CellID.
 // a single higher level CellID.
+//
+// CellUnions are not required to be normalized, but certain operations will
+// return different results if they are not (e.g. Contains).
 type CellUnion []CellID
 type CellUnion []CellID
 
 
 // CellUnionFromRange creates a CellUnion that covers the half-open range
 // CellUnionFromRange creates a CellUnion that covers the half-open range
@@ -38,12 +43,157 @@ func CellUnionFromRange(begin, end CellID) CellUnion {
 	for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
 	for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
 		cu = append(cu, id)
 		cu = append(cu, id)
 	}
 	}
+	// The output is normalized because the cells are added in order by the iteration.
+	return cu
+}
+
+// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions.
+func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion {
+	var cu CellUnion
+	for _, cellUnion := range cellUnions {
+		cu = append(cu, cellUnion...)
+	}
+	cu.Normalize()
+	return cu
+}
+
+// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions.
+func CellUnionFromIntersection(x, y CellUnion) CellUnion {
+	var cu CellUnion
+
+	// This is a fairly efficient calculation that uses binary search to skip
+	// over sections of both input vectors. It takes constant time if all the
+	// cells of x come before or after all the cells of y in CellID order.
+	var i, j int
+	for i < len(x) && j < len(y) {
+		iMin := x[i].RangeMin()
+		jMin := y[j].RangeMin()
+		if iMin > jMin {
+			// Either j.Contains(i) or the two cells are disjoint.
+			if x[i] <= y[j].RangeMax() {
+				cu = append(cu, x[i])
+				i++
+			} else {
+				// Advance j to the first cell possibly contained by x[i].
+				j = y.lowerBound(j+1, len(y), iMin)
+				// The previous cell y[j-1] may now contain x[i].
+				if x[i] <= y[j-1].RangeMax() {
+					j--
+				}
+			}
+		} else if jMin > iMin {
+			// Identical to the code above with i and j reversed.
+			if y[j] <= x[i].RangeMax() {
+				cu = append(cu, y[j])
+				j++
+			} else {
+				i = x.lowerBound(i+1, len(x), jMin)
+				if y[j] <= x[i-1].RangeMax() {
+					i--
+				}
+			}
+		} else {
+			// i and j have the same RangeMin(), so one contains the other.
+			if x[i] < y[j] {
+				cu = append(cu, x[i])
+				i++
+			} else {
+				cu = append(cu, y[j])
+				j++
+			}
+		}
+	}
+
+	// The output is generated in sorted order.
+	cu.Normalize()
+	return cu
+}
+
+// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection
+// of a CellUnion with the given CellID. This can be useful for splitting a
+// CellUnion into chunks.
+func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion {
+	var cu CellUnion
+	if x.ContainsCellID(id) {
+		cu = append(cu, id)
+		cu.Normalize()
+		return cu
+	}
+
+	idmax := id.RangeMax()
+	for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ {
+		cu = append(cu, x[i])
+	}
+
+	cu.Normalize()
 	return cu
 	return cu
 }
 }
 
 
+// CellUnionFromDifference creates a CellUnion from the difference (x - y)
+// of the given CellUnions.
+func CellUnionFromDifference(x, y CellUnion) CellUnion {
+	// TODO(roberts): This is approximately O(N*log(N)), but could probably
+	// use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient.
+
+	var cu CellUnion
+	for _, xid := range x {
+		cu.cellUnionDifferenceInternal(xid, &y)
+	}
+
+	// The output is generated in sorted order, and there should not be any
+	// cells that can be merged (provided that both inputs were normalized).
+	return cu
+}
+
+// The C++ constructor methods FromNormalized and FromVerbatim are not necessary
+// since they don't call Normalize, and just set the CellIDs directly on the object,
+// so straight casting is sufficient in Go to replicate this behavior.
+
+// IsValid reports whether the cell union is valid, meaning that the CellIDs are
+// valid, non-overlapping, and sorted in increasing order.
+func (cu *CellUnion) IsValid() bool {
+	for i, cid := range *cu {
+		if !cid.IsValid() {
+			return false
+		}
+		if i == 0 {
+			continue
+		}
+		if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
+			return false
+		}
+	}
+	return true
+}
+
+// IsNormalized reports whether the cell union is normalized, meaning that it is
+// satisfies IsValid and that no four cells have a common parent.
+// Certain operations such as Contains will return a different
+// result if the cell union is not normalized.
+func (cu *CellUnion) IsNormalized() bool {
+	for i, cid := range *cu {
+		if !cid.IsValid() {
+			return false
+		}
+		if i == 0 {
+			continue
+		}
+		if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
+			return false
+		}
+		if i < 3 {
+			continue
+		}
+		if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) {
+			return false
+		}
+	}
+	return true
+}
+
 // Normalize normalizes the CellUnion.
 // Normalize normalizes the CellUnion.
 func (cu *CellUnion) Normalize() {
 func (cu *CellUnion) Normalize() {
-	sort.Sort(byID(*cu))
+	sortCellIDs(*cu)
 
 
 	output := make([]CellID, 0, len(*cu)) // the list of accepted cells
 	output := make([]CellID, 0, len(*cu)) // the list of accepted cells
 	// Loop invariant: output is a sorted list of cells with no redundancy.
 	// Loop invariant: output is a sorted list of cells with no redundancy.
@@ -76,24 +226,8 @@ func (cu *CellUnion) Normalize() {
 		// See if the last three cells plus this one can be collapsed.
 		// See if the last three cells plus this one can be collapsed.
 		// We loop because collapsing three accepted cells and adding a higher level cell
 		// We loop because collapsing three accepted cells and adding a higher level cell
 		// could cascade into previously accepted cells.
 		// could cascade into previously accepted cells.
-		for len(output) >= 3 {
-			fin := output[len(output)-3:]
-
-			// fast XOR test; a necessary but not sufficient condition
-			if fin[0]^fin[1]^fin[2]^ci != 0 {
-				break
-			}
-
-			// more expensive test; exact.
-			// Compute the two bit mask for the encoded child position,
-			// then see if they all agree.
-			mask := CellID(ci.lsb() << 1)
-			mask = ^(mask + mask<<1)
-			should := ci & mask
-			if (fin[0]&mask != should) || (fin[1]&mask != should) || (fin[2]&mask != should) || ci.isFace() {
-				break
-			}
-
+		for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) {
+			// Replace four children by their parent cell.
 			output = output[:len(output)-3]
 			output = output[:len(output)-3]
 			ci = ci.immediateParent() // checked !ci.isFace above
 			ci = ci.immediateParent() // checked !ci.isFace above
 		}
 		}
@@ -102,9 +236,7 @@ func (cu *CellUnion) Normalize() {
 	*cu = output
 	*cu = output
 }
 }
 
 
-// IntersectsCellID reports whether this cell union intersects the given cell ID.
-//
-// This method assumes that the CellUnion has been normalized.
+// IntersectsCellID reports whether this CellUnion intersects the given cell ID.
 func (cu *CellUnion) IntersectsCellID(id CellID) bool {
 func (cu *CellUnion) IntersectsCellID(id CellID) bool {
 	// Find index of array item that occurs directly after our probe cell:
 	// Find index of array item that occurs directly after our probe cell:
 	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
 	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
@@ -115,10 +247,12 @@ func (cu *CellUnion) IntersectsCellID(id CellID) bool {
 	return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
 	return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
 }
 }
 
 
-// ContainsCellID reports whether the cell union contains the given cell ID.
+// ContainsCellID reports whether the CellUnion contains the given cell ID.
 // Containment is defined with respect to regions, e.g. a cell contains its 4 children.
 // Containment is defined with respect to regions, e.g. a cell contains its 4 children.
 //
 //
-// This method assumes that the CellUnion has been normalized.
+// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups
+// of 4 child cells are *not* considered to contain their parent cell. To get
+// this behavior you must use one of the call Normalize() explicitly.
 func (cu *CellUnion) ContainsCellID(id CellID) bool {
 func (cu *CellUnion) ContainsCellID(id CellID) bool {
 	// Find index of array item that occurs directly after our probe cell:
 	// Find index of array item that occurs directly after our probe cell:
 	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
 	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
@@ -129,12 +263,6 @@ func (cu *CellUnion) ContainsCellID(id CellID) bool {
 	return i != 0 && (*cu)[i-1].RangeMax() >= id
 	return i != 0 && (*cu)[i-1].RangeMax() >= id
 }
 }
 
 
-type byID []CellID
-
-func (cu byID) Len() int           { return len(cu) }
-func (cu byID) Less(i, j int) bool { return cu[i] < cu[j] }
-func (cu byID) Swap(i, j int)      { cu[i], cu[j] = cu[j], cu[i] }
-
 // Denormalize replaces this CellUnion with an expanded version of the
 // Denormalize replaces this CellUnion with an expanded version of the
 // CellUnion where any cell whose level is less than minLevel or where
 // CellUnion where any cell whose level is less than minLevel or where
 // (level - minLevel) is not a multiple of levelMod is replaced by its
 // (level - minLevel) is not a multiple of levelMod is replaced by its
@@ -218,6 +346,16 @@ func (cu *CellUnion) IntersectsCell(c Cell) bool {
 	return cu.IntersectsCellID(c.id)
 	return cu.IntersectsCellID(c.id)
 }
 }
 
 
+// ContainsPoint reports whether this cell union contains the given point.
+func (cu *CellUnion) ContainsPoint(p Point) bool {
+	return cu.ContainsCell(CellFromPoint(p))
+}
+
+// CellUnionBound computes a covering of the CellUnion.
+func (cu *CellUnion) CellUnionBound() []CellID {
+	return cu.CapBound().CellUnionBound()
+}
+
 // LeafCellsCovered reports the number of leaf cells covered by this cell union.
 // LeafCellsCovered reports the number of leaf cells covered by this cell union.
 // This will be no more than 6*2^60 for the whole sphere.
 // This will be no more than 6*2^60 for the whole sphere.
 func (cu *CellUnion) LeafCellsCovered() int64 {
 func (cu *CellUnion) LeafCellsCovered() int64 {
@@ -228,9 +366,225 @@ func (cu *CellUnion) LeafCellsCovered() int64 {
 	return numLeaves
 	return numLeaves
 }
 }
 
 
-// BUG: Differences from C++:
-//  Contains(CellUnion)/Intersects(CellUnion)
-//  Union(CellUnion)/Intersection(CellUnion)/Difference(CellUnion)
-//  Expand
-//  ContainsPoint
-//  AverageArea/ApproxArea/ExactArea
+// Returns true if the given four cells have a common parent.
+// This requires that the four CellIDs are distinct.
+func areSiblings(a, b, c, d CellID) bool {
+	// A necessary (but not sufficient) condition is that the XOR of the
+	// four cell IDs must be zero. This is also very fast to test.
+	if (a ^ b ^ c) != d {
+		return false
+	}
+
+	// Now we do a slightly more expensive but exact test. First, compute a
+	// mask that blocks out the two bits that encode the child position of
+	// "id" with respect to its parent, then check that the other three
+	// children all agree with "mask".
+	mask := uint64(d.lsb() << 1)
+	mask = ^(mask + (mask << 1))
+	idMasked := (uint64(d) & mask)
+	return ((uint64(a)&mask) == idMasked &&
+		(uint64(b)&mask) == idMasked &&
+		(uint64(c)&mask) == idMasked &&
+		!d.isFace())
+}
+
+// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion.
+func (cu *CellUnion) Contains(o CellUnion) bool {
+	// TODO(roberts): Investigate alternatives such as divide-and-conquer
+	// or alternating-skip-search that may be significantly faster in both
+	// the average and worst case. This applies to Intersects as well.
+	for _, id := range o {
+		if !cu.ContainsCellID(id) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
+func (cu *CellUnion) Intersects(o CellUnion) bool {
+	for _, c := range *cu {
+		if o.ContainsCellID(c) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// lowerBound returns the index in this CellUnion to the first element whose value
+// is not considered to go before the given cell id. (i.e., either it is equivalent
+// or comes after the given id.) If there is no match, then end is returned.
+func (cu *CellUnion) lowerBound(begin, end int, id CellID) int {
+	for i := begin; i < end; i++ {
+		if (*cu)[i] >= id {
+			return i
+		}
+	}
+
+	return end
+}
+
+// cellUnionDifferenceInternal adds the difference between the CellID and the union to
+// the result CellUnion. If they intersect but the difference is non-empty, it divides
+// and conquers.
+func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) {
+	if !other.IntersectsCellID(id) {
+		(*cu) = append((*cu), id)
+		return
+	}
+
+	if !other.ContainsCellID(id) {
+		for _, child := range id.Children() {
+			cu.cellUnionDifferenceInternal(child, other)
+		}
+	}
+}
+
+// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel
+// around the unions boundary.
+//
+// For each cell c in the union, we add all cells at level
+// expandLevel that abut c. There are typically eight of those
+// (four edge-abutting and four sharing a vertex). However, if c is
+// finer than expandLevel, we add all cells abutting
+// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself,
+// as an expandLevel cell rarely abuts a smaller cell.
+//
+// Note that the size of the output is exponential in
+// expandLevel. For example, if expandLevel == 20 and the input
+// has a cell at level 10, there will be on the order of 4000
+// adjacent cells in the output. For most applications the
+// ExpandByRadius method below is easier to use.
+func (cu *CellUnion) ExpandAtLevel(level int) {
+	var output CellUnion
+	levelLsb := lsbForLevel(level)
+	for i := len(*cu) - 1; i >= 0; i-- {
+		id := (*cu)[i]
+		if id.lsb() < levelLsb {
+			id = id.Parent(level)
+			// Optimization: skip over any cells contained by this one. This is
+			// especially important when very small regions are being expanded.
+			for i > 0 && id.Contains((*cu)[i-1]) {
+				i--
+			}
+		}
+		output = append(output, id)
+		output = append(output, id.AllNeighbors(level)...)
+	}
+	sortCellIDs(output)
+
+	*cu = output
+	cu.Normalize()
+}
+
+// ExpandByRadius expands this CellUnion such that it contains all points whose
+// distance to the CellUnion is at most minRadius, but do not use cells that
+// are more than maxLevelDiff levels higher than the largest cell in the input.
+// The second parameter controls the tradeoff between accuracy and output size
+// when a large region is being expanded by a small amount (e.g. expanding Canada
+// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded
+// by approximately 1/16 the width of its largest cell. Note that in the worst case,
+// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times
+// larger than the number of cells in the input.
+func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) {
+	minLevel := maxLevel
+	for _, cid := range *cu {
+		minLevel = minInt(minLevel, cid.Level())
+	}
+
+	// Find the maximum level such that all cells are at least "minRadius" wide.
+	radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians())
+	if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) {
+		// The requested expansion is greater than the width of a face cell.
+		// The easiest way to handle this is to expand twice.
+		cu.ExpandAtLevel(0)
+	}
+	cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel))
+}
+
+// Equal reports whether the two CellUnions are equal.
+func (cu CellUnion) Equal(o CellUnion) bool {
+	if len(cu) != len(o) {
+		return false
+	}
+	for i := 0; i < len(cu); i++ {
+		if cu[i] != o[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// AverageArea returns the average area of this CellUnion.
+// This is accurate to within a factor of 1.7.
+func (cu *CellUnion) AverageArea() float64 {
+	return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered())
+}
+
+// ApproxArea returns the approximate area of this CellUnion. This method is accurate
+// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
+// at level 5 or higher within the union.
+func (cu *CellUnion) ApproxArea() float64 {
+	var area float64
+	for _, id := range *cu {
+		area += CellFromCellID(id).ApproxArea()
+	}
+	return area
+}
+
+// ExactArea returns the area of this CellUnion as accurately as possible.
+func (cu *CellUnion) ExactArea() float64 {
+	var area float64
+	for _, id := range *cu {
+		area += CellFromCellID(id).ExactArea()
+	}
+	return area
+}
+
+// Encode encodes the CellUnion.
+func (cu *CellUnion) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	cu.encode(e)
+	return e.err
+}
+
+func (cu *CellUnion) encode(e *encoder) {
+	e.writeInt8(encodingVersion)
+	e.writeInt64(int64(len(*cu)))
+	for _, ci := range *cu {
+		ci.encode(e)
+	}
+}
+
+// Decode decodes the CellUnion.
+func (cu *CellUnion) Decode(r io.Reader) error {
+	d := &decoder{r: asByteReader(r)}
+	cu.decode(d)
+	return d.err
+}
+
+func (cu *CellUnion) decode(d *decoder) {
+	version := d.readInt8()
+	if d.err != nil {
+		return
+	}
+	if version != encodingVersion {
+		d.err = fmt.Errorf("only version %d is supported", encodingVersion)
+		return
+	}
+	n := d.readInt64()
+	if d.err != nil {
+		return
+	}
+	const maxCells = 1000000
+	if n > maxCells {
+		d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells)
+		return
+	}
+	*cu = make([]CellID, n)
+	for i := range *cu {
+		(*cu)[i].decode(d)
+	}
+}

+ 0 - 723
vendor/github.com/golang/geo/s2/cellunion_test.go

@@ -1,723 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"reflect"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/s1"
-)
-
-func TestCellUnionNormalization(t *testing.T) {
-	cu := CellUnion{
-		0x80855c0000000000, // A: a cell over Pittsburg CA
-		0x80855d0000000000, // B, a child of A
-		0x8085634000000000, // first child of X, disjoint from A
-		0x808563c000000000, // second child of X
-		0x80855dc000000000, // a child of B
-		0x808562c000000000, // third child of X
-		0x8085624000000000, // fourth child of X
-		0x80855d0000000000, // B again
-	}
-	exp := CellUnion{
-		0x80855c0000000000, // A
-		0x8085630000000000, // X
-	}
-	cu.Normalize()
-	if !reflect.DeepEqual(cu, exp) {
-		t.Errorf("got %v, want %v", cu, exp)
-	}
-
-	// add a redundant cell
-	/* TODO(dsymonds)
-	cu.Add(0x808562c000000000)
-	if !reflect.DeepEqual(cu, exp) {
-		t.Errorf("after redundant add, got %v, want %v", cu, exp)
-	}
-	*/
-}
-
-func TestCellUnionBasic(t *testing.T) {
-	empty := CellUnion{}
-	empty.Normalize()
-	if len(empty) != 0 {
-		t.Errorf("empty CellUnion had %d cells, want 0", len(empty))
-	}
-
-	face1ID := CellIDFromFace(1)
-	face1Cell := CellFromCellID(face1ID)
-	face1Union := CellUnion{face1ID}
-	face1Union.Normalize()
-	if len(face1Union) != 1 {
-		t.Errorf("%v had %d cells, want 1", face1Union, len(face1Union))
-	}
-	if face1ID != face1Union[0] {
-		t.Errorf("%v[0] = %v, want %v", face1Union, face1Union[0], face1ID)
-	}
-	if got := face1Union.ContainsCell(face1Cell); !got {
-		t.Errorf("%v.ContainsCell(%v) = %t, want %t", face1Union, face1Cell, got, true)
-	}
-
-	face2ID := CellIDFromFace(2)
-	face2Cell := CellFromCellID(face2ID)
-	face2Union := CellUnion{face2ID}
-	face2Union.Normalize()
-	if len(face2Union) != 1 {
-		t.Errorf("%v had %d cells, want 1", face2Union, len(face2Union))
-	}
-	if face2ID != face2Union[0] {
-		t.Errorf("%v[0] = %v, want %v", face2Union, face2Union[0], face2ID)
-	}
-
-	if got := face1Union.ContainsCell(face2Cell); got {
-		t.Errorf("%v.ContainsCell(%v) = %t, want %t", face1Union, face2Cell, got, false)
-	}
-
-}
-
-func TestCellUnion(t *testing.T) {
-	tests := []struct {
-		cells     []CellID // A test CellUnion.
-		contained []CellID // List of cellIDs contained in the CellUnion.
-		overlaps  []CellID // List of CellIDs that intersects the CellUnion but not contained in it.
-		disjoint  []CellID // List of CellIDs that are disjoint from the CellUnion.
-	}{
-		{
-			// Single cell around NYC, and some simple nearby probes
-			cells: []CellID{0x89c25c0000000000},
-			contained: []CellID{
-				CellID(0x89c25c0000000000).ChildBegin(),
-				CellID(0x89c25c0000000000).ChildBeginAtLevel(28),
-			},
-			overlaps: []CellID{
-				CellID(0x89c25c0000000000).immediateParent(),
-				CellIDFromFace(CellID(0x89c25c0000000000).Face()), // the whole face
-			},
-			disjoint: []CellID{
-				CellID(0x89c25c0000000000).Next(),                       // Cell next to this one at same level
-				CellID(0x89c25c0000000000).Next().ChildBeginAtLevel(28), // Cell next to this one at deep level
-				0x89c2700000000000,                                      // Big(er) neighbor cell
-				0x89e9000000000000,                                      // Very big next door cell.
-				0x89c1000000000000,                                      // Very big cell, smaller value than probe
-			},
-		},
-
-		{
-			// NYC and SFO:
-			cells: []CellID{
-				0x89c25b0000000000, // NYC
-				0x89c2590000000000, // NYC
-				0x89c2f70000000000, // NYC
-				0x89c2f50000000000, // NYC
-				0x8085870000000000, // SFO
-				0x8085810000000000, // SFO
-				0x808f7d0000000000, // SFO
-				0x808f7f0000000000, // SFO
-			},
-			contained: []CellID{
-				0x808f7ef300000000, // SFO
-				0x808f7e5cf0000000, // SFO
-				0x808587f000000000, // SFO
-				0x89c25ac000000000, // NYC
-				0x89c259a400000000, // NYC
-				0x89c258fa10000000, // NYC
-				0x89c258f174007000, // NYC
-			},
-			overlaps: []CellID{
-				0x808c000000000000, // Big SFO
-				0x89c4000000000000, // Big NYC
-			},
-			disjoint: []CellID{
-				0x89c15a4fcb1bb000, // outside NYC
-				0x89c15a4e4aa95000, // outside NYC
-				0x8094000000000000, // outside SFO (big)
-				0x8096f10000000000, // outside SFO (smaller)
-
-				0x87c0000000000000, // Midwest very big
-			},
-		},
-		{
-			// CellUnion with cells at many levels:
-			cells: []CellID{
-				0x8100000000000000, // starting around california
-				0x8740000000000000, // adjacent cells at increasing
-				0x8790000000000000, // levels, moving eastward.
-				0x87f4000000000000,
-				0x87f9000000000000, // going down across the midwest
-				0x87ff400000000000,
-				0x87ff900000000000,
-				0x87fff40000000000,
-				0x87fff90000000000,
-				0x87ffff4000000000,
-				0x87ffff9000000000,
-				0x87fffff400000000,
-				0x87fffff900000000,
-				0x87ffffff40000000,
-				0x87ffffff90000000,
-				0x87fffffff4000000,
-				0x87fffffff9000000,
-				0x87ffffffff400000, // to a very small cell in Wisconsin
-			},
-			contained: []CellID{
-				0x808f400000000000,
-				0x80eb118b00000000,
-				0x8136a7a11d000000,
-				0x8136a7a11dac0000,
-				0x876c7c0000000000,
-				0x87f96d0000000000,
-				0x87ffffffff400000,
-			},
-			overlaps: []CellID{
-				CellID(0x8100000000000000).immediateParent(),
-				CellID(0x8740000000000000).immediateParent(),
-			},
-			disjoint: []CellID{
-				0x52aaaaaaab300000,
-				0x52aaaaaaacd00000,
-				0x87fffffffa100000,
-				0x87ffffffed500000,
-				0x87ffffffa0100000,
-				0x87fffffed5540000,
-				0x87fffffed6240000,
-				0x52aaaacccb340000,
-				0x87a0000400000000,
-				0x87a000001f000000,
-				0x87a0000029d00000,
-				0x9500000000000000,
-			},
-		},
-	}
-	for _, test := range tests {
-		union := CellUnion(test.cells)
-		union.Normalize()
-
-		// Ensure self-containment tests are correct.
-		for _, id := range test.cells {
-			if !union.IntersectsCellID(id) {
-				t.Errorf("CellUnion %v should self-intersect %v but does not", union, id)
-			}
-			if !union.ContainsCellID(id) {
-				t.Errorf("CellUnion %v should self-contain %v but does not", union, id)
-			}
-		}
-		// Test for containment specified in test case.
-		for _, id := range test.contained {
-			if !union.IntersectsCellID(id) {
-				t.Errorf("CellUnion %v should intersect %v but does not", union, id)
-			}
-			if !union.ContainsCellID(id) {
-				t.Errorf("CellUnion %v should contain %v but does not", union, id)
-			}
-		}
-		// Make sure the CellUnion intersect these cells but do not contain.
-		for _, id := range test.overlaps {
-			if !union.IntersectsCellID(id) {
-				t.Errorf("CellUnion %v should intersect %v but does not", union, id)
-			}
-			if union.ContainsCellID(id) {
-				t.Errorf("CellUnion %v should not contain %v but does", union, id)
-			}
-		}
-		// Negative cases make sure the CellUnion neither contain nor intersect these cells
-		for _, id := range test.disjoint {
-			if union.IntersectsCellID(id) {
-				t.Errorf("CellUnion %v should not intersect %v but does", union, id)
-			}
-			if union.ContainsCellID(id) {
-				t.Errorf("CellUnion %v should not contain %v but does", union, id)
-			}
-		}
-	}
-}
-
-func addCells(id CellID, selected bool, input *[]CellID, expected *[]CellID, t *testing.T) {
-	// Decides whether to add "id" and/or some of its descendants to the test case.  If "selected"
-	// is true, then the region covered by "id" *must* be added to the test case (either by adding
-	// "id" itself, or some combination of its descendants, or both).  If cell ids are to the test
-	// case "input", then the corresponding expected result after simplification is added to
-	// "expected".
-
-	if id == 0 {
-		// Initial call: decide whether to add cell(s) from each face.
-		for face := 0; face < 6; face++ {
-			addCells(CellIDFromFace(face), false, input, expected, t)
-		}
-		return
-	}
-
-	if id.IsLeaf() {
-		// The oneIn() call below ensures that the parent of a leaf cell will always be selected (if
-		// we make it that far down the hierarchy).
-		if selected != true {
-			t.Errorf("id IsLeaf() and not selected")
-		}
-		*input = append(*input, id)
-		return
-	}
-
-	// The following code ensures that the probability of selecting a cell at each level is
-	// approximately the same, i.e. we test normalization of cells at all levels.
-	if !selected && oneIn(maxLevel-id.Level()) {
-		//  Once a cell has been selected, the expected output is predetermined.  We then make sure
-		//  that cells are selected that will normalize to the desired output.
-		*expected = append(*expected, id)
-		selected = true
-
-	}
-
-	// With the rnd.OneIn() constants below, this function adds an average
-	// of 5/6 * (kMaxLevel - level) cells to "input" where "level" is the
-	// level at which the cell was first selected (level 15 on average).
-	// Therefore the average number of input cells in a test case is about
-	// (5/6 * 15 * 6) = 75.  The average number of output cells is about 6.
-
-	// If a cell is selected, we add it to "input" with probability 5/6.
-	added := false
-	if selected && !oneIn(6) {
-		*input = append(*input, id)
-		added = true
-	}
-	numChildren := 0
-	for child := id.ChildBegin(); child != id.ChildEnd(); child = child.Next() {
-		// If the cell is selected, on average we recurse on 4/12 = 1/3 child.
-		// This intentionally may result in a cell and some of its children
-		// being included in the test case.
-		//
-		// If the cell is not selected, on average we recurse on one child.
-		// We also make sure that we do not recurse on all 4 children, since
-		// then we might include all 4 children in the input case by accident
-		// (in which case the expected output would not be correct).
-		recurse := false
-		if selected {
-			recurse = oneIn(12)
-		} else {
-			recurse = oneIn(4)
-		}
-		if recurse && numChildren < 3 {
-			addCells(child, selected, input, expected, t)
-			numChildren++
-		}
-		// If this cell was selected but the cell itself was not added, we
-		// must ensure that all 4 children (or some combination of their
-		// descendants) are added.
-
-		if selected && !added {
-			addCells(child, selected, input, expected, t)
-		}
-	}
-}
-
-func TestCellUnionNormalizePseudoRandom(t *testing.T) {
-	// Try a bunch of random test cases, and keep track of average statistics
-	// for normalization (to see if they agree with the analysis above).
-
-	inSum := 0
-	outSum := 0
-	iters := 2000
-
-	for i := 0; i < iters; i++ {
-		input := []CellID{}
-		expected := []CellID{}
-		addCells(CellID(0), false, &input, &expected, t)
-		inSum += len(input)
-		outSum += len(expected)
-		cellunion := CellUnion(input)
-		cellunion.Normalize()
-
-		if len(expected) != len(cellunion) {
-			t.Errorf("Expected size of union to be %d, but got %d.",
-				len(expected), len(cellunion))
-		}
-
-		// Test GetCapBound().
-		cb := cellunion.CapBound()
-		for _, ci := range cellunion {
-			if !cb.ContainsCell(CellFromCellID(ci)) {
-				t.Errorf("CapBound %v of union %v should contain cellID %v", cb, cellunion, ci)
-			}
-		}
-
-		for _, j := range input {
-			if !cellunion.ContainsCellID(j) {
-				t.Errorf("Expected containment of CellID %v", j)
-			}
-
-			if cellunion.IntersectsCellID(j) == false {
-				t.Errorf("Expected intersection with %v.", j)
-			}
-
-			if !j.isFace() {
-				if cellunion.IntersectsCellID(j.immediateParent()) == false {
-					t.Errorf("Expected intersection with parent cell %v.", j.immediateParent())
-					if j.Level() > 1 {
-						if cellunion.IntersectsCellID(j.immediateParent().immediateParent()) == false {
-							t.Errorf("Expected intersection with parent's parent %v.",
-								j.immediateParent().immediateParent())
-						}
-						if cellunion.IntersectsCellID(j.Parent(0)) == false {
-							t.Errorf("Expected intersection with parent %v at level 0.", j.Parent(0))
-						}
-					}
-				}
-			}
-
-			if !j.IsLeaf() {
-				if cellunion.ContainsCellID(j.ChildBegin()) == false {
-					t.Errorf("Expected containment of %v.", j.ChildBegin())
-				}
-				if cellunion.IntersectsCellID(j.ChildBegin()) == false {
-					t.Errorf("Expected intersection with %v.", j.ChildBegin())
-				}
-				if cellunion.ContainsCellID(j.ChildEnd().Prev()) == false {
-					t.Errorf("Expected containment of %v.", j.ChildEnd().Prev())
-				}
-				if cellunion.IntersectsCellID(j.ChildEnd().Prev()) == false {
-					t.Errorf("Expected intersection with %v.", j.ChildEnd().Prev())
-				}
-				if cellunion.ContainsCellID(j.ChildBeginAtLevel(maxLevel)) == false {
-					t.Errorf("Expected containment of %v.", j.ChildBeginAtLevel(maxLevel))
-				}
-				if cellunion.IntersectsCellID(j.ChildBeginAtLevel(maxLevel)) == false {
-					t.Errorf("Expected intersection with %v.", j.ChildBeginAtLevel(maxLevel))
-				}
-			}
-		}
-
-		for _, exp := range expected {
-			if !exp.isFace() {
-				if cellunion.ContainsCellID(exp.Parent(exp.Level() - 1)) {
-					t.Errorf("cellunion should not contain its parent %v", exp.Parent(exp.Level()-1))
-				}
-				if cellunion.ContainsCellID(exp.Parent(0)) {
-					t.Errorf("cellunion should not contain the top level parent %v", exp.Parent(0))
-				}
-			}
-		}
-
-		var test []CellID
-		var dummy []CellID
-		addCells(CellID(0), false, &test, &dummy, t)
-		for _, j := range test {
-			intersects := false
-			contains := false
-			for _, k := range expected {
-				if k.Contains(j) {
-					contains = true
-				}
-				if k.Intersects(j) {
-					intersects = true
-				}
-			}
-			if cellunion.ContainsCellID(j) != contains {
-				t.Errorf("Expected contains with %v.", (uint64)(j))
-			}
-			if cellunion.IntersectsCellID(j) != intersects {
-				t.Errorf("Expected intersection with %v.", (uint64)(j))
-			}
-		}
-	}
-	t.Logf("avg in %.2f, avg out %.2f\n", (float64)(inSum)/(float64)(iters), (float64)(outSum)/(float64)(iters))
-}
-
-func TestCellUnionDenormalize(t *testing.T) {
-	tests := []struct {
-		name string
-		minL int
-		lMod int
-		cu   *CellUnion
-		exp  *CellUnion
-	}{
-		{
-			"not expanded, level mod == 1",
-			10,
-			1,
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(11),
-				CellIDFromFace(2).ChildBeginAtLevel(11),
-				CellIDFromFace(3).ChildBeginAtLevel(14),
-				CellIDFromFace(0).ChildBeginAtLevel(10),
-			},
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(11),
-				CellIDFromFace(2).ChildBeginAtLevel(11),
-				CellIDFromFace(3).ChildBeginAtLevel(14),
-				CellIDFromFace(0).ChildBeginAtLevel(10),
-			},
-		},
-		{
-			"not expanded, level mod > 1",
-			10,
-			2,
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(12),
-				CellIDFromFace(2).ChildBeginAtLevel(12),
-				CellIDFromFace(3).ChildBeginAtLevel(14),
-				CellIDFromFace(0).ChildBeginAtLevel(10),
-			},
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(12),
-				CellIDFromFace(2).ChildBeginAtLevel(12),
-				CellIDFromFace(3).ChildBeginAtLevel(14),
-				CellIDFromFace(0).ChildBeginAtLevel(10),
-			},
-		},
-		{
-			"expended, (level - min_level) is not multiple of level mod",
-			10,
-			3,
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(12),
-				CellIDFromFace(5).ChildBeginAtLevel(11),
-			},
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[0],
-				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[1],
-				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[2],
-				CellIDFromFace(2).ChildBeginAtLevel(12).Children()[3],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[0],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[1],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[2],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[0].Children()[3],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[0],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[1],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[2],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[1].Children()[3],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[0],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[1],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[2],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[2].Children()[3],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[0],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[1],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[2],
-				CellIDFromFace(5).ChildBeginAtLevel(11).Children()[3].Children()[3],
-			},
-		},
-		{
-			"expended, level < min_level",
-			10,
-			3,
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(9),
-			},
-			&CellUnion{
-				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[0],
-				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[1],
-				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[2],
-				CellIDFromFace(2).ChildBeginAtLevel(9).Children()[3],
-			},
-		},
-	}
-	for _, test := range tests {
-		if test.cu.Denormalize(test.minL, test.lMod); !reflect.DeepEqual(test.cu, test.exp) {
-			t.Errorf("test: %s; got %v, want %v", test.name, test.cu, test.exp)
-		}
-	}
-}
-
-func TestCellUnionRectBound(t *testing.T) {
-	tests := []struct {
-		cu   *CellUnion
-		want Rect
-	}{
-		{&CellUnion{}, EmptyRect()},
-		{
-			&CellUnion{CellIDFromFace(1)},
-			Rect{
-				r1.Interval{-math.Pi / 4, math.Pi / 4},
-				s1.Interval{math.Pi / 4, 3 * math.Pi / 4},
-			},
-		},
-		{
-			&CellUnion{
-				0x808c000000000000, // Big SFO
-			},
-			Rect{
-				r1.Interval{
-					float64(s1.Degree * 34.644220547108482),
-					float64(s1.Degree * 38.011928357226651),
-				},
-				s1.Interval{
-					float64(s1.Degree * -124.508522987668428),
-					float64(s1.Degree * -121.628309835221216),
-				},
-			},
-		},
-		{
-			&CellUnion{
-				0x89c4000000000000, // Big NYC
-			},
-			Rect{
-				r1.Interval{
-					float64(s1.Degree * 38.794595155857657),
-					float64(s1.Degree * 41.747046884651063),
-				},
-				s1.Interval{
-					float64(s1.Degree * -76.456308667788633),
-					float64(s1.Degree * -73.465162142654819),
-				},
-			},
-		},
-		{
-			&CellUnion{
-				0x89c4000000000000, // Big NYC
-				0x808c000000000000, // Big SFO
-			},
-			Rect{
-				r1.Interval{
-					float64(s1.Degree * 34.644220547108482),
-					float64(s1.Degree * 41.747046884651063),
-				},
-				s1.Interval{
-					float64(s1.Degree * -124.508522987668428),
-					float64(s1.Degree * -73.465162142654819),
-				},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.cu.RectBound(); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
-			t.Errorf("%v.RectBound() = %v, want %v", test.cu, got, test.want)
-		}
-	}
-}
-
-func TestCellUnionLeafCellsCovered(t *testing.T) {
-	tests := []struct {
-		have []CellID
-		want int64
-	}{
-		{},
-		{
-			have: []CellID{},
-			want: 0,
-		},
-		{
-			// One leaf cell on face 0.
-			have: []CellID{
-				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
-			},
-			want: 1,
-		},
-		{
-			// Face 0 itself (which includes the previous leaf cell).
-			have: []CellID{
-				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
-				CellIDFromFace(0),
-			},
-			want: 1 << 60,
-		},
-		/*
-			TODO(roberts): Once Expand is implemented, add the two tests for these
-			// Five faces.
-			cell_union.Expand(0),
-			want: 5 << 60,
-			// Whole world.
-			cell_union.Expand(0),
-			want: 6 << 60,
-		*/
-		{
-			// Add some disjoint cells.
-			have: []CellID{
-				CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
-				CellIDFromFace(0),
-				CellIDFromFace(1).ChildBeginAtLevel(1),
-				CellIDFromFace(2).ChildBeginAtLevel(2),
-				CellIDFromFace(2).ChildEndAtLevel(2).Prev(),
-				CellIDFromFace(3).ChildBeginAtLevel(14),
-				CellIDFromFace(4).ChildBeginAtLevel(27),
-				CellIDFromFace(4).ChildEndAtLevel(15).Prev(),
-				CellIDFromFace(5).ChildBeginAtLevel(30),
-			},
-			want: 1 + (1 << 6) + (1 << 30) + (1 << 32) +
-				(2 << 56) + (1 << 58) + (1 << 60),
-		},
-	}
-
-	for _, test := range tests {
-		cu := CellUnion(test.have)
-		cu.Normalize()
-		if got := cu.LeafCellsCovered(); got != test.want {
-			t.Errorf("CellUnion(%v).LeafCellsCovered() = %v, want %v", cu, got, test.want)
-		}
-	}
-}
-
-func TestCellUnionFromRange(t *testing.T) {
-	for iter := 0; iter < 100; iter++ {
-		min := randomCellIDForLevel(maxLevel)
-		max := randomCellIDForLevel(maxLevel)
-		if min > max {
-			min, max = max, min
-		}
-
-		cu := CellUnionFromRange(min, max.Next())
-		if len(cu) <= 0 {
-			t.Errorf("len(CellUnionFromRange(%v, %v)) = %d, want > 0", min, max.Next(), len(cu))
-		}
-		if min != cu[0].RangeMin() {
-			t.Errorf("%v.RangeMin of CellUnion should not be below the minimum value it was created from %v", cu[0], min)
-		}
-		if max != cu[len(cu)-1].RangeMax() {
-			t.Errorf("%v.RangeMax of CellUnion should not be above the maximum value it was created from %v", cu[len(cu)-1], max)
-		}
-		for i := 1; i < len(cu); i++ {
-			if got, want := cu[i].RangeMin(), cu[i-1].RangeMax().Next(); got != want {
-				t.Errorf("%v.RangeMin() = %v, want %v", cu[i], got, want)
-			}
-		}
-	}
-
-	// Focus on test cases that generate an empty or full range.
-
-	// Test an empty range before the minimum CellID.
-	idBegin := CellIDFromFace(0).ChildBeginAtLevel(maxLevel)
-	cu := CellUnionFromRange(idBegin, idBegin)
-	if len(cu) != 0 {
-		t.Errorf("CellUnionFromRange with begin and end as the first CellID should be empty, got %d", len(cu))
-	}
-
-	// Test an empty range after the maximum CellID.
-	idEnd := CellIDFromFace(5).ChildEndAtLevel(maxLevel)
-	cu = CellUnionFromRange(idEnd, idEnd)
-	if len(cu) != 0 {
-		t.Errorf("CellUnionFromRange with begin and end as the last CellID should be empty, got %d", len(cu))
-	}
-
-	// Test the full sphere.
-	cu = CellUnionFromRange(idBegin, idEnd)
-	if len(cu) != 6 {
-		t.Errorf("CellUnionFromRange from first CellID to last CellID should have 6 cells, got %d", len(cu))
-	}
-
-	for i := 0; i < len(cu); i++ {
-		if !cu[i].isFace() {
-			t.Errorf("CellUnionFromRange for full sphere cu[%d].isFace() = %t, want %t", i, cu[i].isFace(), true)
-		}
-	}
-}
-
-func BenchmarkCellUnionFromRange(b *testing.B) {
-	x := CellIDFromFace(0).ChildBeginAtLevel(maxLevel)
-	y := CellIDFromFace(5).ChildEndAtLevel(maxLevel)
-	for i := 0; i < b.N; i++ {
-		CellUnionFromRange(x, y)
-	}
-}

+ 63 - 0
vendor/github.com/golang/geo/s2/contains_vertex_query.go

@@ -0,0 +1,63 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// ContainsVertexQuery is used to track the edges entering and leaving the
+// given vertex of a Polygon in order to be able to determine if the point is
+// contained by the Polygon.
+//
+// Point containment is defined according to the semi-open boundary model
+// which means that if several polygons tile the region around a vertex,
+// then exactly one of those polygons contains that vertex.
+type ContainsVertexQuery struct {
+	target  Point
+	edgeMap map[Point]int
+}
+
+// NewContainsVertexQuery returns a new query for the given vertex whose
+// containment will be determined.
+func NewContainsVertexQuery(target Point) *ContainsVertexQuery {
+	return &ContainsVertexQuery{
+		target:  target,
+		edgeMap: make(map[Point]int),
+	}
+}
+
+// AddEdge adds the edge between target and v with the given direction.
+// (+1 = outgoing, -1 = incoming, 0 = degenerate).
+func (q *ContainsVertexQuery) AddEdge(v Point, direction int) {
+	q.edgeMap[v] += direction
+}
+
+// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is
+// not contained, and 0 if the incident edges consisted of matched sibling pairs.
+func (q *ContainsVertexQuery) ContainsVertex() int {
+	// Find the unmatched edge that is immediately clockwise from Ortho(P).
+	referenceDir := Point{q.target.Ortho()}
+
+	bestPoint := referenceDir
+	bestDir := 0
+
+	for k, v := range q.edgeMap {
+		if v == 0 {
+			continue // This is a "matched" edge.
+		}
+		if OrderedCCW(referenceDir, bestPoint, k, q.target) {
+			bestPoint = k
+			bestDir = v
+		}
+	}
+	return bestDir
+}

+ 410 - 0
vendor/github.com/golang/geo/s2/crossing_edge_query.go

@@ -0,0 +1,410 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"sort"
+
+	"github.com/golang/geo/r2"
+)
+
+// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by
+// a given edge(s).
+//
+// Note that if you need to query many edges, it is more efficient to declare
+// a single CrossingEdgeQuery instance and reuse it.
+//
+// If you want to find *all* the pairs of crossing edges, it is more efficient to
+// use the not yet implemented VisitCrossings in shapeutil.
+type CrossingEdgeQuery struct {
+	index *ShapeIndex
+
+	// temporary values used while processing a query.
+	a, b r2.Point
+	iter *ShapeIndexIterator
+
+	// candidate cells generated when finding crossings.
+	cells []*ShapeIndexCell
+}
+
+// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index.
+func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery {
+	c := &CrossingEdgeQuery{
+		index: index,
+		iter:  index.Iterator(),
+	}
+	return c
+}
+
+// Crossings returns the set of edge of the shape S that intersect the given edge AB.
+// If the CrossingType is Interior, then only intersections at a point interior to both
+// edges are reported, while if it is CrossingTypeAll then edges that share a vertex
+// are also reported.
+func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int {
+	edges := c.candidates(a, b, shape)
+	if len(edges) == 0 {
+		return nil
+	}
+
+	crosser := NewEdgeCrosser(a, b)
+	out := 0
+	n := len(edges)
+
+	for in := 0; in < n; in++ {
+		b := shape.Edge(edges[in])
+		sign := crosser.CrossingSign(b.V0, b.V1)
+		if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross {
+			edges[out] = edges[in]
+			out++
+		}
+	}
+
+	if out < n {
+		edges = edges[0:out]
+	}
+	return edges
+}
+
+// EdgeMap stores a sorted set of edge ids for each shape.
+type EdgeMap map[Shape][]int
+
+// CrossingsEdgeMap returns the set of all edges in the index that intersect the given
+// edge AB. If crossType is CrossingTypeInterior, then only intersections at a
+// point interior to both edges are reported, while if it is CrossingTypeAll
+// then edges that share a vertex are also reported.
+//
+// The edges are returned as a mapping from shape to the edges of that shape
+// that intersect AB. Every returned shape has at least one crossing edge.
+func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap {
+	edgeMap := c.candidatesEdgeMap(a, b)
+	if len(edgeMap) == 0 {
+		return nil
+	}
+
+	crosser := NewEdgeCrosser(a, b)
+	for shape, edges := range edgeMap {
+		out := 0
+		n := len(edges)
+		for in := 0; in < n; in++ {
+			edge := shape.Edge(edges[in])
+			sign := crosser.CrossingSign(edge.V0, edge.V1)
+			if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) {
+				edgeMap[shape][out] = edges[in]
+				out++
+			}
+		}
+
+		if out == 0 {
+			delete(edgeMap, shape)
+		} else {
+			if out < n {
+				edgeMap[shape] = edgeMap[shape][0:out]
+			}
+		}
+	}
+	return edgeMap
+}
+
+// candidates returns a superset of the edges of the given shape that intersect
+// the edge AB.
+func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int {
+	var edges []int
+
+	// For small loops it is faster to use brute force. The threshold below was
+	// determined using benchmarks.
+	const maxBruteForceEdges = 27
+	maxEdges := shape.NumEdges()
+	if maxEdges <= maxBruteForceEdges {
+		edges = make([]int, maxEdges)
+		for i := 0; i < maxEdges; i++ {
+			edges[i] = i
+		}
+		return edges
+	}
+
+	// Compute the set of index cells intersected by the query edge.
+	c.getCellsForEdge(a, b)
+	if len(c.cells) == 0 {
+		return nil
+	}
+
+	// Gather all the edges that intersect those cells and sort them.
+	// TODO(roberts): Shapes don't track their ID, so we need to range over
+	// the index to find the ID manually.
+	var shapeID int32
+	for k, v := range c.index.shapes {
+		if v == shape {
+			shapeID = k
+		}
+	}
+
+	for _, cell := range c.cells {
+		if cell == nil {
+		}
+		clipped := cell.findByShapeID(shapeID)
+		if clipped == nil {
+			continue
+		}
+		for _, j := range clipped.edges {
+			edges = append(edges, j)
+		}
+	}
+
+	if len(c.cells) > 1 {
+		edges = uniqueInts(edges)
+	}
+
+	return edges
+}
+
+// uniqueInts returns the sorted uniqued values from the given input.
+func uniqueInts(in []int) []int {
+	var edges []int
+	m := make(map[int]bool)
+	for _, i := range in {
+		if m[i] {
+			continue
+		}
+		m[i] = true
+		edges = append(edges, i)
+	}
+	sort.Ints(edges)
+	return edges
+}
+
+// candidatesEdgeMap returns a map from shapes to the superse of edges for that
+// shape that intersect the edge AB.
+//
+// CAVEAT: This method may return shapes that have an empty set of candidate edges.
+// However the return value is non-empty only if at least one shape has a candidate edge.
+func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
+	edgeMap := make(EdgeMap, 0)
+
+	// If there are only a few edges then it's faster to use brute force. We
+	// only bother with this optimization when there is a single shape.
+	if len(c.index.shapes) == 1 {
+		// Typically this method is called many times, so it is worth checking
+		// whether the edge map is empty or already consists of a single entry for
+		// this shape, and skip clearing edge map in that case.
+		shape := c.index.Shape(0)
+
+		// Note that we leave the edge map non-empty even if there are no candidates
+		// (i.e., there is a single entry with an empty set of edges).
+		edgeMap[shape] = c.candidates(a, b, shape)
+		return edgeMap
+	}
+
+	// Compute the set of index cells intersected by the query edge.
+	c.getCellsForEdge(a, b)
+	if len(c.cells) == 0 {
+		return edgeMap
+	}
+
+	// Gather all the edges that intersect those cells and sort them.
+	for _, cell := range c.cells {
+		for _, clipped := range cell.shapes {
+			s := c.index.Shape(clipped.shapeID)
+			for j := 0; j < clipped.numEdges(); j++ {
+				edgeMap[s] = append(edgeMap[s], clipped.edges[j])
+			}
+		}
+	}
+
+	if len(c.cells) > 1 {
+		for s, edges := range edgeMap {
+			edgeMap[s] = uniqueInts(edges)
+		}
+	}
+
+	return edgeMap
+}
+
+// getCells returns the set of ShapeIndexCells that might contain edges intersecting
+// the edge AB in the given cell root. This method is used primarly by loop and shapeutil.
+func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
+	aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
+	if ok {
+		c.a = aUV
+		c.b = bUV
+		edgeBound := r2.RectFromPoints(c.a, c.b)
+		if root.Bound().Intersects(edgeBound) {
+			c.computeCellsIntersected(root, edgeBound)
+		}
+	}
+
+	if len(c.cells) == 0 {
+		return nil
+	}
+
+	return c.cells
+}
+
+// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB.
+func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) {
+	c.cells = nil
+
+	segments := FaceSegments(a, b)
+	for _, segment := range segments {
+		c.a = segment.a
+		c.b = segment.b
+
+		// Optimization: rather than always starting the recursive subdivision at
+		// the top level face cell, instead we start at the smallest S2CellId that
+		// contains the edge (the edge root cell). This typically lets us skip
+		// quite a few levels of recursion since most edges are short.
+		edgeBound := r2.RectFromPoints(c.a, c.b)
+		pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0)
+		edgeRoot := pcell.ShrinkToFit(edgeBound)
+
+		// Now we need to determine how the edge root cell is related to the cells
+		// in the spatial index (cellMap). There are three cases:
+		//
+		//  1. edgeRoot is an index cell or is contained within an index cell.
+		//     In this case we only need to look at the contents of that cell.
+		//  2. edgeRoot is subdivided into one or more index cells. In this case
+		//     we recursively subdivide to find the cells intersected by AB.
+		//  3. edgeRoot does not intersect any index cells. In this case there
+		//     is nothing to do.
+		relation := c.iter.LocateCellID(edgeRoot)
+		if relation == Indexed {
+			// edgeRoot is an index cell or is contained by an index cell (case 1).
+			c.cells = append(c.cells, c.iter.IndexCell())
+		} else if relation == Subdivided {
+			// edgeRoot is subdivided into one or more index cells (case 2). We
+			// find the cells intersected by AB using recursive subdivision.
+			if !edgeRoot.isFace() {
+				pcell = PaddedCellFromCellID(edgeRoot, 0)
+			}
+			c.computeCellsIntersected(pcell, edgeBound)
+		}
+	}
+}
+
+// computeCellsIntersected computes the index cells intersected by the current
+// edge that are descendants of pcell and adds them to this queries set of cells.
+func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) {
+
+	c.iter.seek(pcell.id.RangeMin())
+	if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() {
+		// The index does not contain pcell or any of its descendants.
+		return
+	}
+	if c.iter.CellID() == pcell.id {
+		// The index contains this cell exactly.
+		c.cells = append(c.cells, c.iter.IndexCell())
+		return
+	}
+
+	// Otherwise, split the edge among the four children of pcell.
+	center := pcell.Middle().Lo()
+
+	if edgeBound.X.Hi < center.X {
+		// Edge is entirely contained in the two left children.
+		c.clipVAxis(edgeBound, center.Y, 0, pcell)
+		return
+	} else if edgeBound.X.Lo >= center.X {
+		// Edge is entirely contained in the two right children.
+		c.clipVAxis(edgeBound, center.Y, 1, pcell)
+		return
+	}
+
+	childBounds := c.splitUBound(edgeBound, center.X)
+	if edgeBound.Y.Hi < center.Y {
+		// Edge is entirely contained in the two lower children.
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0])
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1])
+	} else if edgeBound.Y.Lo >= center.Y {
+		// Edge is entirely contained in the two upper children.
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0])
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1])
+	} else {
+		// The edge bound spans all four children. The edge itself intersects
+		// at most three children (since no padding is being used).
+		c.clipVAxis(childBounds[0], center.Y, 0, pcell)
+		c.clipVAxis(childBounds[1], center.Y, 1, pcell)
+	}
+}
+
+// clipVAxis computes the intersected cells recursively for a given padded cell.
+// Given either the left (i=0) or right (i=1) side of a padded cell pcell,
+// determine whether the current edge intersects the lower child, upper child,
+// or both children, and call c.computeCellsIntersected recursively on those children.
+// The center is the v-coordinate at the center of pcell.
+func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) {
+	if edgeBound.Y.Hi < center {
+		// Edge is entirely contained in the lower child.
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound)
+	} else if edgeBound.Y.Lo >= center {
+		// Edge is entirely contained in the upper child.
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound)
+	} else {
+		// The edge intersects both children.
+		childBounds := c.splitVBound(edgeBound, center)
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0])
+		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1])
+	}
+}
+
+// splitUBound returns the bound for two children as a result of spliting the
+// current edge at the given value U.
+func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect {
+	v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y))
+	// diag indicates which diagonal of the bounding box is spanned by AB:
+	// it is 0 if AB has positive slope, and 1 if AB has negative slope.
+	var diag int
+	if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
+		diag = 1
+	}
+	return splitBound(edgeBound, 0, diag, u, v)
+}
+
+// splitVBound returns the bound for two children as a result of spliting the
+// current edge into two child edges at the given value V.
+func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect {
+	u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X))
+	var diag int
+	if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
+		diag = 1
+	}
+	return splitBound(edgeBound, diag, 0, u, v)
+}
+
+// splitBound returns the bounds for the two childrenn as a result of spliting
+// the current edge into two child edges at the given point (u,v). uEnd and vEnd
+// indicate which bound endpoints of the first child will be updated.
+func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect {
+	var childBounds = [2]r2.Rect{
+		edgeBound,
+		edgeBound,
+	}
+
+	if uEnd == 1 {
+		childBounds[0].X.Lo = u
+		childBounds[1].X.Hi = u
+	} else {
+		childBounds[0].X.Hi = u
+		childBounds[1].X.Lo = u
+	}
+
+	if vEnd == 1 {
+		childBounds[0].Y.Lo = v
+		childBounds[1].Y.Hi = v
+	} else {
+		childBounds[0].Y.Hi = v
+		childBounds[1].Y.Lo = v
+	}
+
+	return childBounds
+}

+ 13 - 15
vendor/github.com/golang/geo/s2/doc.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 /*
 /*
 Package s2 implements types and functions for working with geometry in S² (spherical geometry).
 Package s2 implements types and functions for working with geometry in S² (spherical geometry).

+ 672 - 0
vendor/github.com/golang/geo/s2/edge_clipping.go

@@ -0,0 +1,672 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file contains a collection of methods for:
+//
+//   (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube
+//       (see s2stuv), and
+//
+//   (2) Robustly clipping 2D edges against 2D rectangles.
+//
+// These functions can be used to efficiently find the set of CellIDs that
+// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery).
+
+import (
+	"math"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r2"
+	"github.com/golang/geo/r3"
+)
+
+const (
+	// edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
+	// compared to the exact result, assuming that the points A and B are in
+	// the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
+	edgeClipErrorUVCoord = 2.25 * dblEpsilon
+
+	// edgeClipErrorUVDist is the maximum distance from a clipped point to
+	// the corresponding exact result. It is equal to the error in a single
+	// coordinate because at most one coordinate is subject to error.
+	edgeClipErrorUVDist = 2.25 * dblEpsilon
+
+	// faceClipErrorRadians is the maximum angle between a returned vertex
+	// and the nearest point on the exact edge AB. It is equal to the
+	// maximum directional error in PointCross, plus the error when
+	// projecting points onto a cube face.
+	faceClipErrorRadians = 3 * dblEpsilon
+
+	// faceClipErrorDist is the same angle expressed as a maximum distance
+	// in (u,v)-space. In other words, a returned vertex is at most this far
+	// from the exact edge AB projected into (u,v)-space.
+	faceClipErrorUVDist = 9 * dblEpsilon
+
+	// faceClipErrorUVCoord is the maximum angle between a returned vertex
+	// and the nearest point on the exact edge AB expressed as the maximum error
+	// in an individual u- or v-coordinate. In other words, for each
+	// returned vertex there is a point on the exact edge AB whose u- and
+	// v-coordinates differ from the vertex by at most this amount.
+	faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
+
+	// intersectsRectErrorUVDist is the maximum error when computing if a point
+	// intersects with a given Rect. If some point of AB is inside the
+	// rectangle by at least this distance, the result is guaranteed to be true;
+	// if all points of AB are outside the rectangle by at least this distance,
+	// the result is guaranteed to be false. This bound assumes that rect is
+	// a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
+	// (e.g., by 1e-10 or less).
+	intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
+)
+
+// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, or false if the edge AB does not intersect.
+// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
+// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
+// the results may differ from those produced by FaceSegments.
+func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
+	return ClipToPaddedFace(a, b, face, 0.0)
+}
+
+// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
+// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
+// Padding must be non-negative.
+func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
+	// Fast path: both endpoints are on the given face.
+	if face(a.Vector) == f && face(b.Vector) == f {
+		au, av := validFaceXYZToUV(f, a.Vector)
+		bu, bv := validFaceXYZToUV(f, b.Vector)
+		return r2.Point{au, av}, r2.Point{bu, bv}, true
+	}
+
+	// Convert everything into the (u,v,w) coordinates of the given face. Note
+	// that the cross product *must* be computed in the original (x,y,z)
+	// coordinate system because PointCross (unlike the mathematical cross
+	// product) can produce different results in different coordinate systems
+	// when one argument is a linear multiple of the other, due to the use of
+	// symbolic perturbations.
+	normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
+	aUVW := pointUVW(faceXYZtoUVW(f, a))
+	bUVW := pointUVW(faceXYZtoUVW(f, b))
+
+	// Padding is handled by scaling the u- and v-components of the normal.
+	// Letting R=1+padding, this means that when we compute the dot product of
+	// the normal with a cube face vertex (such as (-1,-1,1)), we will actually
+	// compute the dot product with the scaled vertex (-R,-R,1). This allows
+	// methods such as intersectsFace, exitAxis, etc, to handle padding
+	// with no further modifications.
+	scaleUV := 1 + padding
+	scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
+	if !scaledN.intersectsFace() {
+		return aUV, bUV, false
+	}
+
+	// TODO(roberts): This is a workaround for extremely small vectors where some
+	// loss of precision can occur in Normalize causing underflow. When PointCross
+	// is updated to work around this, this can be removed.
+	if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
+		normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
+	}
+
+	normUVW = pointUVW{normUVW.Normalize()}
+
+	aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
+	bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
+
+	// As described in clipDestination, if the sum of the scores from clipping the two
+	// endpoints is 3 or more, then the segment does not intersect this face.
+	aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
+	bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
+
+	return aUV, bUV, aScore+bScore < 3
+}
+
+// ClipEdge returns the portion of the edge defined by AB that is contained by the
+// given rectangle. If there is no intersection, false is returned and aClip and bClip
+// are undefined.
+func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
+	// Compute the bounding rectangle of AB, clip it, and then extract the new
+	// endpoints from the clipped bound.
+	bound := r2.RectFromPoints(a, b)
+	if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
+		return aClip, bClip, false
+	}
+	ai := 0
+	if a.X > b.X {
+		ai = 1
+	}
+	aj := 0
+	if a.Y > b.Y {
+		aj = 1
+	}
+
+	return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
+}
+
+// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges)
+// all compare a sum (u + v) to a third value w. They are implemented in such a
+// way that they produce an exact result even though all calculations are done
+// with ordinary floating-point operations. Here are the principles on which these
+// functions are based:
+//
+// A. If u + v < w in floating-point, then u + v < w in exact arithmetic.
+//
+// B. If u + v < w in exact arithmetic, then at least one of the following
+//    expressions is true in floating-point:
+//       u + v < w
+//       u < w - v
+//       v < w - u
+//
+// Proof: By rearranging terms and substituting ">" for "<", we can assume
+// that all values are non-negative.  Now clearly "w" is not the smallest
+// value, so assume WLOG that "u" is the smallest.  We want to show that
+// u < w - v in floating-point.  If v >= w/2, the calculation of w - v is
+// exact since the result is smaller in magnitude than either input value,
+// so the result holds.  Otherwise we have u <= v < w/2 and w - v >= w/2
+// (even in floating point), so the result also holds.
+
+// sumEqual reports whether u + v == w exactly.
+func sumEqual(u, v, w float64) bool {
+	return (u+v == w) && (u == w-v) && (v == w-u)
+}
+
+// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
+type pointUVW Point
+
+// intersectsFace reports whether a given directed line L intersects the cube face F.
+// The line L is defined by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsFace() bool {
+	// L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
+	// products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
+	// and (-1,1,1) do not all have the same sign. This is true exactly when
+	// |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
+	u := math.Abs(p.X)
+	v := math.Abs(p.Y)
+	w := math.Abs(p.Z)
+
+	// We only need to consider the cases where u or v is the smallest value,
+	// since if w is the smallest then both expressions below will have a
+	// positive LHS and a negative RHS.
+	return (v >= w-u) && (u >= w-v)
+}
+
+// intersectsOppositeEdges reports whether a directed line L intersects two
+// opposite edges of a cube face F. This includs the case where L passes
+// exactly through a corner vertex of F. The directed line L is defined
+// by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsOppositeEdges() bool {
+	// The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
+	// and only exactly two of the corner vertices lie on each side of L. This
+	// is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
+	// expression exactly.
+	u := math.Abs(p.X)
+	v := math.Abs(p.Y)
+	w := math.Abs(p.Z)
+
+	// If w is the smallest, the following line returns an exact result.
+	if math.Abs(u-v) != w {
+		return math.Abs(u-v) >= w
+	}
+
+	// Otherwise u - v = w exactly, or w is not the smallest value. In either
+	// case the following returns the correct result.
+	if u >= v {
+		return u-w >= v
+	}
+	return v-w >= u
+}
+
+// axis represents the possible results of exitAxis.
+type axis int
+
+const (
+	axisU axis = iota
+	axisV
+)
+
+// exitAxis reports which axis the directed line L exits the cube face F on.
+// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
+// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
+// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
+// through a corner vertex of the cube face.
+func (p pointUVW) exitAxis() axis {
+	if p.intersectsOppositeEdges() {
+		// The line passes through through opposite edges of the face.
+		// It exits through the v=+1 or v=-1 edge if the u-component of N has a
+		// larger absolute magnitude than the v-component.
+		if math.Abs(p.X) >= math.Abs(p.Y) {
+			return axisV
+		}
+		return axisU
+	}
+
+	// The line passes through through two adjacent edges of the face.
+	// It exits the v=+1 or v=-1 edge if an even number of the components of N
+	// are negative. We test this using signbit() rather than multiplication
+	// to avoid the possibility of underflow.
+	var x, y, z int
+	if math.Signbit(p.X) {
+		x = 1
+	}
+	if math.Signbit(p.Y) {
+		y = 1
+	}
+	if math.Signbit(p.Z) {
+		z = 1
+	}
+
+	if x^y^z == 0 {
+		return axisV
+	}
+	return axisU
+}
+
+// exitPoint returns the UV coordinates of the point where a directed line L (represented
+// by the CCW normal of this point), exits the cube face this point is derived from along
+// the given axis.
+func (p pointUVW) exitPoint(a axis) r2.Point {
+	if a == axisU {
+		u := -1.0
+		if p.Y > 0 {
+			u = 1.0
+		}
+		return r2.Point{u, (-u*p.X - p.Z) / p.Y}
+	}
+
+	v := -1.0
+	if p.X < 0 {
+		v = 1.0
+	}
+	return r2.Point{(-v*p.Y - p.Z) / p.X, v}
+}
+
+// clipDestination returns a score which is used to indicate if the clipped edge AB
+// on the given face intersects the face at all. This function returns the score for
+// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
+// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
+//
+// First, it clips the line segment AB to find the clipped destination B' on a given
+// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
+// coordinates of that face.) Second, it partially computes whether the segment AB
+// intersects this face at all. The actual condition is fairly complicated, but it
+// turns out that it can be expressed as a "score" that can be computed independently
+// when clipping the two endpoints A and B.
+func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
+	var uv r2.Point
+
+	// Optimization: if B is within the safe region of the face, use it.
+	maxSafeUVCoord := 1 - faceClipErrorUVCoord
+	if b.Z > 0 {
+		uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+		if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
+			return uv, 0
+		}
+	}
+
+	// Otherwise find the point B' where the line AB exits the face.
+	uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
+
+	p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
+
+	// Determine if the exit point B' is contained within the segment. We do this
+	// by computing the dot products with two inward-facing tangent vectors at A
+	// and B. If either dot product is negative, we say that B' is on the "wrong
+	// side" of that point. As the point B' moves around the great circle AB past
+	// the segment endpoint B, it is initially on the wrong side of B only; as it
+	// moves further it is on the wrong side of both endpoints; and then it is on
+	// the wrong side of A only. If the exit point B' is on the wrong side of
+	// either endpoint, we can't use it; instead the segment is clipped at the
+	// original endpoint B.
+	//
+	// We reject the segment if the sum of the scores of the two endpoints is 3
+	// or more. Here is what that rule encodes:
+	//  - If B' is on the wrong side of A, then the other clipped endpoint A'
+	//    must be in the interior of AB (otherwise AB' would go the wrong way
+	//    around the circle). There is a similar rule for A'.
+	//  - If B' is on the wrong side of either endpoint (and therefore we must
+	//    use the original endpoint B instead), then it must be possible to
+	//    project B onto this face (i.e., its w-coordinate must be positive).
+	//    This rule is only necessary to handle certain zero-length edges (A=B).
+	score := 0
+	if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
+		score = 2 // B' is on wrong side of A.
+	} else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
+		score = 1 // B' is on wrong side of B.
+	}
+
+	if score > 0 { // B' is not in the interior of AB.
+		if b.Z <= 0 {
+			score = 3 // B cannot be projected onto this face.
+		} else {
+			uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+		}
+	}
+
+	return uv, score
+}
+
+// updateEndpoint returns the interval with the specified endpoint updated to
+// the given value. If the value lies beyond the opposite endpoint, nothing is
+// changed and false is returned.
+func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
+	if !highEndpoint {
+		if bound.Hi < value {
+			return bound, false
+		}
+		if bound.Lo < value {
+			bound.Lo = value
+		}
+		return bound, true
+	}
+
+	if bound.Lo > value {
+		return bound, false
+	}
+	if bound.Hi > value {
+		bound.Hi = value
+	}
+	return bound, true
+}
+
+// clipBoundAxis returns the clipped versions of the bounding intervals for the given
+// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
+// given clip interval. negSlope is a precomputed helper variable that indicates which
+// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
+// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
+// false is returned.
+func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
+	negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
+
+	if bound0.Lo < clip.Lo {
+		// If the upper bound is below the clips lower bound, there is nothing to do.
+		if bound0.Hi < clip.Lo {
+			return bound0, bound1, false
+		}
+		// narrow the intervals lower bound to the clip bound.
+		bound0.Lo = clip.Lo
+		if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated {
+			return bound0, bound1, false
+		}
+	}
+
+	if bound0.Hi > clip.Hi {
+		// If the lower bound is above the clips upper bound, there is nothing to do.
+		if bound0.Lo > clip.Hi {
+			return bound0, bound1, false
+		}
+		// narrow the intervals upper bound to the clip bound.
+		bound0.Hi = clip.Hi
+		if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated {
+			return bound0, bound1, false
+		}
+	}
+	return bound0, bound1, true
+}
+
+// edgeIntersectsRect reports whether the edge defined by AB intersects the
+// given closed rectangle to within the error bound.
+func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
+	// First check whether the bounds of a Rect around AB intersects the given rect.
+	if !r.Intersects(r2.RectFromPoints(a, b)) {
+		return false
+	}
+
+	// Otherwise AB intersects the rect if and only if all four vertices of rect
+	// do not lie on the same side of the extended line AB. We test this by finding
+	// the two vertices of rect with minimum and maximum projections onto the normal
+	// of AB, and computing their dot products with the edge normal.
+	n := b.Sub(a).Ortho()
+
+	i := 0
+	if n.X >= 0 {
+		i = 1
+	}
+	j := 0
+	if n.Y >= 0 {
+		j = 1
+	}
+
+	max := n.Dot(r.VertexIJ(i, j).Sub(a))
+	min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
+
+	return (max >= 0) && (min <= 0)
+}
+
+// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
+// by AB intersected by clip. The resulting bound may be empty. This is a convenience
+// function built on top of clipEdgeBound.
+func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
+	bound := r2.RectFromPoints(a, b)
+	if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
+		return b1
+	}
+	return r2.EmptyRect()
+}
+
+// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
+// It represents the clipped edges by their bounding boxes rather than as a pair of
+// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
+// a tight bound of A'B'. This function returns the bound that is a tight bound
+// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
+// it returns false and the original bound.
+func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
+	// negSlope indicates which diagonal of the bounding box is spanned by AB: it
+	// is false if AB has positive slope, and true if AB has negative slope. This is
+	// used to determine which interval endpoints need to be updated each time
+	// the edge is clipped.
+	negSlope := (a.X > b.X) != (a.Y > b.Y)
+
+	b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
+	if !up1 {
+		return bound, false
+	}
+	b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
+	if !up2 {
+		return r2.Rect{b0x, b0y}, false
+	}
+	return r2.Rect{X: b1x, Y: b1y}, true
+}
+
+// interpolateFloat64 returns a value with the same combination of a1 and b1 as the
+// given value x is of a and b. This function makes the following guarantees:
+//  - If x == a, then x1 = a1 (exactly).
+//  - If x == b, then x1 = b1 (exactly).
+//  - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
+// This requires a != b.
+func interpolateFloat64(x, a, b, a1, b1 float64) float64 {
+	// To get results that are accurate near both A and B, we interpolate
+	// starting from the closer of the two points.
+	if math.Abs(a-x) <= math.Abs(b-x) {
+		return a1 + (b1-a1)*(x-a)/(b-a)
+	}
+	return b1 + (a1-b1)*(x-b)/(a-b)
+}
+
+// FaceSegment represents an edge AB clipped to an S2 cube face. It is
+// represented by a face index and a pair of (u,v) coordinates.
+type FaceSegment struct {
+	face int
+	a, b r2.Point
+}
+
+// FaceSegments subdivides the given edge AB at every point where it crosses the
+// boundary between two S2 cube faces and returns the corresponding FaceSegments.
+// The segments are returned in order from A toward B. The input points must be
+// unit length.
+//
+// This function guarantees that the returned segments form a continuous path
+// from A to B, and that all vertices are within faceClipErrorUVDist of the
+// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles.
+// The results are consistent with Sign, i.e. the edge is well-defined even its
+// endpoints are antipodal.
+// TODO(roberts): Extend the implementation of PointCross so that this is true.
+func FaceSegments(a, b Point) []FaceSegment {
+	var segment FaceSegment
+
+	// Fast path: both endpoints are on the same face.
+	var aFace, bFace int
+	aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector)
+	bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector)
+	if aFace == bFace {
+		segment.face = aFace
+		return []FaceSegment{segment}
+	}
+
+	// Starting at A, we follow AB from face to face until we reach the face
+	// containing B. The following code is designed to ensure that we always
+	// reach B, even in the presence of numerical errors.
+	//
+	// First we compute the normal to the plane containing A and B. This normal
+	// becomes the ultimate definition of the line AB; it is used to resolve all
+	// questions regarding where exactly the line goes. Unfortunately due to
+	// numerical errors, the line may not quite intersect the faces containing
+	// the original endpoints. We handle this by moving A and/or B slightly if
+	// necessary so that they are on faces intersected by the line AB.
+	ab := a.PointCross(b)
+
+	aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a)
+	bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b)
+
+	// Now we simply follow AB from face to face until we reach B.
+	var segments []FaceSegment
+	segment.face = aFace
+	bSaved := segment.b
+
+	for face := aFace; face != bFace; {
+		// Complete the current segment by finding the point where AB
+		// exits the current face.
+		z := faceXYZtoUVW(face, ab)
+		n := pointUVW{z.Vector}
+
+		exitAxis := n.exitAxis()
+		segment.b = n.exitPoint(exitAxis)
+		segments = append(segments, segment)
+
+		// Compute the next face intersected by AB, and translate the exit
+		// point of the current segment into the (u,v) coordinates of the
+		// next face. This becomes the first point of the next segment.
+		exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y)
+		face = nextFace(face, segment.b, exitAxis, n, bFace)
+		exitUvw := faceXYZtoUVW(face, Point{exitXyz})
+		segment.face = face
+		segment.a = r2.Point{exitUvw.X, exitUvw.Y}
+	}
+	// Finish the last segment.
+	segment.b = bSaved
+	return append(segments, segment)
+}
+
+// moveOriginToValidFace updates the origin point to a valid face if necessary.
+// Given a line segment AB whose origin A has been projected onto a given cube
+// face, determine whether it is necessary to project A onto a different face
+// instead. This can happen because the normal of the line AB is not computed
+// exactly, so that the line AB (defined as the set of points perpendicular to
+// the normal) may not intersect the cube face containing A. Even if it does
+// intersect the face, the exit point of the line from that face may be on
+// the wrong side of A (i.e., in the direction away from B). If this happens,
+// we reproject A onto the adjacent face where the line AB approaches A most
+// closely. This moves the origin by a small amount, but never more than the
+// error tolerances.
+func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) {
+	// Fast path: if the origin is sufficiently far inside the face, it is
+	// always safe to use it.
+	const maxSafeUVCoord = 1 - faceClipErrorUVCoord
+	if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord {
+		return face, aUV
+	}
+
+	// Otherwise check whether the normal AB even intersects this face.
+	z := faceXYZtoUVW(face, ab)
+	n := pointUVW{z.Vector}
+	if n.intersectsFace() {
+		// Check whether the point where the line AB exits this face is on the
+		// wrong side of A (by more than the acceptable error tolerance).
+		uv := n.exitPoint(n.exitAxis())
+		exit := faceUVToXYZ(face, uv.X, uv.Y)
+		aTangent := ab.Normalize().Cross(a.Vector)
+
+		// We can use the given face.
+		if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians {
+			return face, aUV
+		}
+	}
+
+	// Otherwise we reproject A to the nearest adjacent face. (If line AB does
+	// not pass through a given face, it must pass through all adjacent faces.)
+	var dir int
+	if math.Abs((aUV).X) >= math.Abs((aUV).Y) {
+		// U-axis
+		if aUV.X > 0 {
+			dir = 1
+		}
+		face = uvwFace(face, 0, dir)
+	} else {
+		// V-axis
+		if aUV.Y > 0 {
+			dir = 1
+		}
+		face = uvwFace(face, 1, dir)
+	}
+
+	aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector)
+	aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X))
+	aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y))
+
+	return face, aUV
+}
+
+// nextFace returns the next face that should be visited by FaceSegments, given that
+// we have just visited face and we are following the line AB (represented
+// by its normal N in the (u,v,w) coordinates of that face). The other
+// arguments include the point where AB exits face, the corresponding
+// exit axis, and the target face containing the destination point B.
+func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int {
+	// this bit is to work around C++ cleverly casting bools to ints for you.
+	exitA := exit.X
+	exit1MinusA := exit.Y
+
+	if axis == axisV {
+		exitA = exit.Y
+		exit1MinusA = exit.X
+	}
+	exitAPos := 0
+	if exitA > 0 {
+		exitAPos = 1
+	}
+	exit1MinusAPos := 0
+	if exit1MinusA > 0 {
+		exit1MinusAPos = 1
+	}
+
+	// We return the face that is adjacent to the exit point along the given
+	// axis. If line AB exits *exactly* through a corner of the face, there are
+	// two possible next faces. If one is the target face containing B, then
+	// we guarantee that we advance to that face directly.
+	//
+	// The three conditions below check that (1) AB exits approximately through
+	// a corner, (2) the adjacent face along the non-exit axis is the target
+	// face, and (3) AB exits *exactly* through the corner. (The sumEqual
+	// code checks whether the dot product of (u,v,1) and n is exactly zero.)
+	if math.Abs(exit1MinusA) == 1 &&
+		uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace &&
+		sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) {
+		return targetFace
+	}
+
+	// Otherwise return the face that is adjacent to the exit point in the
+	// direction of the exit axis.
+	return uvwFace(face, int(axis), exitAPos)
+}

+ 227 - 0
vendor/github.com/golang/geo/s2/edge_crosser.go

@@ -0,0 +1,227 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+)
+
+// EdgeCrosser allows edges to be efficiently tested for intersection with a
+// given fixed edge AB. It is especially efficient when testing for
+// intersection with an edge chain connecting vertices v0, v1, v2, ...
+//
+// Example usage:
+//
+//	func CountIntersections(a, b Point, edges []Edge) int {
+//		count := 0
+//		crosser := NewEdgeCrosser(a, b)
+//		for _, edge := range edges {
+//			if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross {
+//				count++
+//			}
+//		}
+//		return count
+//	}
+//
+type EdgeCrosser struct {
+	a   Point
+	b   Point
+	aXb Point
+
+	// To reduce the number of calls to expensiveSign, we compute an
+	// outward-facing tangent at A and B if necessary. If the plane
+	// perpendicular to one of these tangents separates AB from CD (i.e., one
+	// edge on each side) then there is no intersection.
+	aTangent Point // Outward-facing tangent at A.
+	bTangent Point // Outward-facing tangent at B.
+
+	// The fields below are updated for each vertex in the chain.
+	c   Point     // Previous vertex in the vertex chain.
+	acb Direction // The orientation of triangle ACB.
+}
+
+// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
+func NewEdgeCrosser(a, b Point) *EdgeCrosser {
+	norm := a.PointCross(b)
+	return &EdgeCrosser{
+		a:        a,
+		b:        b,
+		aXb:      Point{a.Cross(b.Vector)},
+		aTangent: Point{a.Cross(norm.Vector)},
+		bTangent: Point{norm.Cross(b.Vector)},
+	}
+}
+
+// CrossingSign reports whether the edge AB intersects the edge CD. If any two
+// vertices from different edges are the same, returns MaybeCross. If either edge
+// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross.
+//
+// Properties of CrossingSign:
+//
+//  (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
+//  (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
+//  (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
+//  (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
+//
+// Note that if you want to check an edge against a chain of other edges,
+// it is slightly more efficient to use the single-argument version
+// ChainCrossingSign below.
+func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
+	if c != e.c {
+		e.RestartAt(c)
+	}
+	return e.ChainCrossingSign(d)
+}
+
+// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
+// CD share a vertex and VertexCrossing(a, b, c, d) is true.
+//
+// This method extends the concept of a "crossing" to the case where AB
+// and CD have a vertex in common. The two edges may or may not cross,
+// according to the rules defined in VertexCrossing above. The rules
+// are designed so that point containment tests can be implemented simply
+// by counting edge crossings. Similarly, determining whether one edge
+// chain crosses another edge chain can be implemented by counting.
+func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
+	if c != e.c {
+		e.RestartAt(c)
+	}
+	return e.EdgeOrVertexChainCrossing(d)
+}
+
+// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
+// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
+//
+// You don't need to use this or any of the chain functions unless you're trying to
+// squeeze out every last drop of performance. Essentially all you are saving is a test
+// whether the first vertex of the current edge is the same as the second vertex of the
+// previous edge.
+func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
+	e := NewEdgeCrosser(a, b)
+	e.RestartAt(c)
+	return e
+}
+
+// RestartAt sets the current point of the edge crosser to be c.
+// Call this method when your chain 'jumps' to a new place.
+// The argument must point to a value that persists until the next call.
+func (e *EdgeCrosser) RestartAt(c Point) {
+	e.c = c
+	e.acb = -triageSign(e.a, e.b, e.c)
+}
+
+// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
+// the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
+	// For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
+	// all be oriented the same way (CW or CCW). We keep the orientation of ACB
+	// as part of our state. When each new point D arrives, we compute the
+	// orientation of BDA and check whether it matches ACB. This checks whether
+	// the points C and D are on opposite sides of the great circle through AB.
+
+	// Recall that triageSign is invariant with respect to rotating its
+	// arguments, i.e. ABD has the same orientation as BDA.
+	bda := triageSign(e.a, e.b, d)
+	if e.acb == -bda && bda != Indeterminate {
+		// The most common case -- triangles have opposite orientations. Save the
+		// current vertex D as the next vertex C, and also save the orientation of
+		// the new triangle ACB (which is opposite to the current triangle BDA).
+		e.c = d
+		e.acb = -bda
+		return DoNotCross
+	}
+	return e.crossingSign(d, bda)
+}
+
+// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
+// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
+	// We need to copy e.c since it is clobbered by ChainCrossingSign.
+	c := e.c
+	switch e.ChainCrossingSign(d) {
+	case DoNotCross:
+		return false
+	case Cross:
+		return true
+	}
+	return VertexCrossing(e.a, e.b, c, d)
+}
+
+// crossingSign handle the slow path of CrossingSign.
+func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
+	// Compute the actual result, and then save the current vertex D as the next
+	// vertex C, and save the orientation of the next triangle ACB (which is
+	// opposite to the current triangle BDA).
+	defer func() {
+		e.c = d
+		e.acb = -bda
+	}()
+
+	// At this point, a very common situation is that A,B,C,D are four points on
+	// a line such that AB does not overlap CD. (For example, this happens when
+	// a line or curve is sampled finely, or when geometry is constructed by
+	// computing the union of S2CellIds.) Most of the time, we can determine
+	// that AB and CD do not intersect using the two outward-facing
+	// tangents at A and B (parallel to AB) and testing whether AB and CD are on
+	// opposite sides of the plane perpendicular to one of these tangents. This
+	// is moderately expensive but still much cheaper than expensiveSign.
+
+	// The error in RobustCrossProd is insignificant. The maximum error in
+	// the call to CrossProd (i.e., the maximum norm of the error vector) is
+	// (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
+	// DotProd below is dblEpsilon. (There is also a small relative error
+	// term that is insignificant because we are comparing the result against a
+	// constant that is very close to zero.)
+	maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
+	if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
+		return DoNotCross
+	}
+
+	// Otherwise, eliminate the cases where two vertices from different edges are
+	// equal. (These cases could be handled in the code below, but we would rather
+	// avoid calling ExpensiveSign if possible.)
+	if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
+		return MaybeCross
+	}
+
+	// Eliminate the cases where an input edge is degenerate. (Note that in
+	// most cases, if CD is degenerate then this method is not even called
+	// because acb and bda have different signs.)
+	if e.a == e.b || e.c == d {
+		return DoNotCross
+	}
+
+	// Otherwise it's time to break out the big guns.
+	if e.acb == Indeterminate {
+		e.acb = -expensiveSign(e.a, e.b, e.c)
+	}
+	if bda == Indeterminate {
+		bda = expensiveSign(e.a, e.b, d)
+	}
+
+	if bda != e.acb {
+		return DoNotCross
+	}
+
+	cbd := -RobustSign(e.c, d, e.b)
+	if cbd != e.acb {
+		return DoNotCross
+	}
+	dac := RobustSign(e.c, d, e.a)
+	if dac != e.acb {
+		return DoNotCross
+	}
+	return Cross
+}

+ 394 - 0
vendor/github.com/golang/geo/s2/edge_crossings.go

@@ -0,0 +1,394 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"fmt"
+	"math"
+
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+const (
+	// intersectionError can be set somewhat arbitrarily, because the algorithm
+	// uses more precision if necessary in order to achieve the specified error.
+	// The only strict requirement is that intersectionError >= dblEpsilon
+	// radians. However, using a larger error tolerance makes the algorithm more
+	// efficient because it reduces the number of cases where exact arithmetic is
+	// needed.
+	intersectionError = s1.Angle(8 * dblEpsilon)
+
+	// intersectionMergeRadius is used to ensure that intersection points that
+	// are supposed to be coincident are merged back together into a single
+	// vertex. This is required in order for various polygon operations (union,
+	// intersection, etc) to work correctly. It is twice the intersection error
+	// because two coincident intersection points might have errors in
+	// opposite directions.
+	intersectionMergeRadius = 2 * intersectionError
+)
+
+// A Crossing indicates how edges cross.
+type Crossing int
+
+const (
+	// Cross means the edges cross.
+	Cross Crossing = iota
+	// MaybeCross means two vertices from different edges are the same.
+	MaybeCross
+	// DoNotCross means the edges do not cross.
+	DoNotCross
+)
+
+func (c Crossing) String() string {
+	switch c {
+	case Cross:
+		return "Cross"
+	case MaybeCross:
+		return "MaybeCross"
+	case DoNotCross:
+		return "DoNotCross"
+	default:
+		return fmt.Sprintf("(BAD CROSSING %d)", c)
+	}
+}
+
+// CrossingSign reports whether the edge AB intersects the edge CD.
+// If AB crosses CD at a point that is interior to both edges, Cross is returned.
+// If any two vertices from different edges are the same it returns MaybeCross.
+// Otherwise it returns DoNotCross.
+// If either edge is degenerate (A == B or C == D), the return value is MaybeCross
+// if two vertices from different edges are the same and DoNotCross otherwise.
+//
+// Properties of CrossingSign:
+//
+//  (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
+//  (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
+//  (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
+//  (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
+//
+// This method implements an exact, consistent perturbation model such
+// that no three points are ever considered to be collinear. This means
+// that even if you have 4 points A, B, C, D that lie exactly in a line
+// (say, around the equator), C and D will be treated as being slightly to
+// one side or the other of AB. This is done in a way such that the
+// results are always consistent (see RobustSign).
+func CrossingSign(a, b, c, d Point) Crossing {
+	crosser := NewChainEdgeCrosser(a, b, c)
+	return crosser.ChainCrossingSign(d)
+}
+
+// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
+// containment tests can be implemented by counting the number of edge crossings.
+//
+// Given two edges AB and CD where at least two vertices are identical
+// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
+// occurs if AB is encountered after CD during a CCW sweep around the shared
+// vertex starting from a fixed reference point.
+//
+// Note that according to this rule, if AB crosses CD then in general CD
+// does not cross AB. However, this leads to the correct result when
+// counting polygon edge crossings. For example, suppose that A,B,C are
+// three consecutive vertices of a CCW polygon. If we now consider the edge
+// crossings of a segment BP as P sweeps around B, the crossing number
+// changes parity exactly when BP crosses BA or BC.
+//
+// Useful properties of VertexCrossing (VC):
+//
+//  (1) VC(a,a,c,d) == VC(a,b,c,c) == false
+//  (2) VC(a,b,a,b) == VC(a,b,b,a) == true
+//  (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
+//  (3) If exactly one of a,b equals one of c,d, then exactly one of
+//      VC(a,b,c,d) and VC(c,d,a,b) is true
+//
+// It is an error to call this method with 4 distinct vertices.
+func VertexCrossing(a, b, c, d Point) bool {
+	// If A == B or C == D there is no intersection. We need to check this
+	// case first in case 3 or more input points are identical.
+	if a == b || c == d {
+		return false
+	}
+
+	// If any other pair of vertices is equal, there is a crossing if and only
+	// if OrderedCCW indicates that the edge AB is further CCW around the
+	// shared vertex O (either A or B) than the edge CD, starting from an
+	// arbitrary fixed reference point.
+	switch {
+	case a == d:
+		return OrderedCCW(Point{a.Ortho()}, c, b, a)
+	case b == c:
+		return OrderedCCW(Point{b.Ortho()}, d, a, b)
+	case a == c:
+		return OrderedCCW(Point{a.Ortho()}, d, b, a)
+	case b == d:
+		return OrderedCCW(Point{b.Ortho()}, c, a, b)
+	}
+
+	return false
+}
+
+// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to
+// handle cases where all four vertices are distinct, and VertexCrossing to
+// handle cases where two or more vertices are the same. This defines a crossing
+// function such that point-in-polygon containment tests can be implemented
+// by simply counting edge crossings.
+func EdgeOrVertexCrossing(a, b, c, d Point) bool {
+	switch CrossingSign(a, b, c, d) {
+	case DoNotCross:
+		return false
+	case Cross:
+		return true
+	default:
+		return VertexCrossing(a, b, c, d)
+	}
+}
+
+// Intersection returns the intersection point of two edges AB and CD that cross
+// (CrossingSign(a,b,c,d) == Crossing).
+//
+// Useful properties of Intersection:
+//
+//  (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
+//  (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
+//
+// The returned intersection point X is guaranteed to be very close to the
+// true intersection point of AB and CD, even if the edges intersect at a
+// very small angle.
+func Intersection(a0, a1, b0, b1 Point) Point {
+	// It is difficult to compute the intersection point of two edges accurately
+	// when the angle between the edges is very small. Previously we handled
+	// this by only guaranteeing that the returned intersection point is within
+	// intersectionError of each edge. However, this means that when the edges
+	// cross at a very small angle, the computed result may be very far from the
+	// true intersection point.
+	//
+	// Instead this function now guarantees that the result is always within
+	// intersectionError of the true intersection. This requires using more
+	// sophisticated techniques and in some cases extended precision.
+	//
+	//  - intersectionStable computes the intersection point using
+	//    projection and interpolation, taking care to minimize cancellation
+	//    error.
+	//
+	//  - intersectionExact computes the intersection point using precision
+	//    arithmetic and converts the final result back to an Point.
+	pt, ok := intersectionStable(a0, a1, b0, b1)
+	if !ok {
+		pt = intersectionExact(a0, a1, b0, b1)
+	}
+
+	// Make sure the intersection point is on the correct side of the sphere.
+	// Since all vertices are unit length, and edges are less than 180 degrees,
+	// (a0 + a1) and (b0 + b1) both have positive dot product with the
+	// intersection point.  We use the sum of all vertices to make sure that the
+	// result is unchanged when the edges are swapped or reversed.
+	if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 {
+		pt = Point{pt.Mul(-1)}
+	}
+
+	return pt
+}
+
+// Computes the cross product of two vectors, normalized to be unit length.
+// Also returns the length of the cross
+// product before normalization, which is useful for estimating the amount of
+// error in the result.  For numerical stability, the vectors should both be
+// approximately unit length.
+func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) {
+	var pt r3.Vector
+	// This computes 2 * (x.Cross(y)), but has much better numerical
+	// stability when x and y are unit length.
+	tmp := x.Sub(y).Cross(x.Add(y))
+	length := tmp.Norm()
+	if length != 0 {
+		pt = tmp.Mul(1 / length)
+	}
+	return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y))
+}
+
+/*
+// intersectionSimple is not used by the C++ so it is skipped here.
+*/
+
+// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound
+// on the error in the result. aNorm is not necessarily unit length.
+//
+// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints
+// a0 and a1) allow this dot product to be computed more accurately and efficiently.
+func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) {
+	// The error in the dot product is proportional to the lengths of the input
+	// vectors, so rather than using x itself (a unit-length vector) we use
+	// the vectors from x to the closer of the two edge endpoints. This
+	// typically reduces the error by a huge factor.
+	x0 := x.Sub(a0.Vector)
+	x1 := x.Sub(a1.Vector)
+	x0Dist2 := x0.Norm2()
+	x1Dist2 := x1.Norm2()
+
+	// If both distances are the same, we need to be careful to choose one
+	// endpoint deterministically so that the result does not change if the
+	// order of the endpoints is reversed.
+	var dist float64
+	if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) {
+		dist = math.Sqrt(x0Dist2)
+		proj = x0.Dot(aNorm)
+	} else {
+		dist = math.Sqrt(x1Dist2)
+		proj = x1.Dot(aNorm)
+	}
+
+	// This calculation bounds the error from all sources: the computation of
+	// the normal, the subtraction of one endpoint, and the dot product itself.
+	// dblEpsilon appears because the input points are assumed to be
+	// normalized in double precision.
+	//
+	// For reference, the bounds that went into this calculation are:
+	// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblEpsilon) * epsilon
+	// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
+	// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
+	bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblEpsilon)*dist + 1.5*math.Abs(proj)) * epsilon
+	return proj, bound
+}
+
+// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total
+// ordering on edges that is invariant under edge reversals.
+func compareEdges(a0, a1, b0, b1 Point) bool {
+	if a0.Cmp(a1.Vector) != -1 {
+		a0, a1 = a1, a0
+	}
+	if b0.Cmp(b1.Vector) != -1 {
+		b0, b1 = b1, b0
+	}
+	return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1)
+}
+
+// intersectionStable returns the intersection point of the edges (a0,a1) and
+// (b0,b1) if it can be computed to within an error of at most intersectionError
+// by this function.
+//
+// The intersection point is not guaranteed to have the correct sign because we
+// choose to use the longest of the two edges first. The sign is corrected by
+// Intersection.
+func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) {
+	// Sort the two edges so that (a0,a1) is longer, breaking ties in a
+	// deterministic way that does not depend on the ordering of the endpoints.
+	// This is desirable for two reasons:
+	//  - So that the result doesn't change when edges are swapped or reversed.
+	//  - It reduces error, since the first edge is used to compute the edge
+	//    normal (where a longer edge means less error), and the second edge
+	//    is used for interpolation (where a shorter edge means less error).
+	aLen2 := a1.Sub(a0.Vector).Norm2()
+	bLen2 := b1.Sub(b0.Vector).Norm2()
+	if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) {
+		return intersectionStableSorted(b0, b1, a0, a1)
+	}
+	return intersectionStableSorted(a0, a1, b0, b1)
+}
+
+// intersectionStableSorted is a helper function for intersectionStable.
+// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that
+// the first edge passed in is longer.
+func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
+	var pt Point
+
+	// Compute the normal of the plane through (a0, a1) in a stable way.
+	aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector))
+	aNormLen := aNorm.Norm()
+	bLen := b1.Sub(b0.Vector).Norm()
+
+	// Compute the projection (i.e., signed distance) of b0 and b1 onto the
+	// plane through (a0, a1).  Distances are scaled by the length of aNorm.
+	b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1)
+	b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1)
+
+	// The total distance from b0 to b1 measured perpendicularly to (a0,a1) is
+	// |b0Dist - b1Dist|.  Note that b0Dist and b1Dist generally have
+	// opposite signs because b0 and b1 are on opposite sides of (a0, a1).  The
+	// code below finds the intersection point by interpolating along the edge
+	// (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist).
+	//
+	// It can be shown that the maximum error in the interpolation fraction is
+	//
+	//   (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum))
+	//
+	// We save ourselves some work by scaling the result and the error bound by
+	// "distSum", since the result is normalized to be unit length anyway.
+	distSum := math.Abs(b0Dist - b1Dist)
+	errorSum := b0Error + b1Error
+	if distSum <= errorSum {
+		return pt, false // Error is unbounded in this case.
+	}
+
+	x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist))
+	err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/
+		(distSum-errorSum) + 2*distSum*epsilon
+
+	// Finally we normalize the result, compute the corresponding error, and
+	// check whether the total error is acceptable.
+	xLen := x.Norm()
+	maxError := intersectionError
+	if err > (float64(maxError)-epsilon)*xLen {
+		return pt, false
+	}
+
+	return Point{x.Mul(1 / xLen)}, true
+}
+
+// intersectionExact returns the intersection point of (a0, a1) and (b0, b1)
+// using precise arithmetic. Note that the result is not exact because it is
+// rounded down to double precision at the end. Also, the intersection point
+// is not guaranteed to have the correct sign (i.e., the return value may need
+// to be negated).
+func intersectionExact(a0, a1, b0, b1 Point) Point {
+	// Since we are using presice arithmetic, we don't need to worry about
+	// numerical stability.
+	a0P := r3.PreciseVectorFromVector(a0.Vector)
+	a1P := r3.PreciseVectorFromVector(a1.Vector)
+	b0P := r3.PreciseVectorFromVector(b0.Vector)
+	b1P := r3.PreciseVectorFromVector(b1.Vector)
+	aNormP := a0P.Cross(a1P)
+	bNormP := b0P.Cross(b1P)
+	xP := aNormP.Cross(bNormP)
+
+	// The final Normalize() call is done in double precision, which creates a
+	// directional error of up to 2*dblEpsilon. (Precise conversion and Normalize()
+	// each contribute up to dblEpsilon of directional error.)
+	x := xP.Vector()
+
+	if x == (r3.Vector{}) {
+		// The two edges are exactly collinear, but we still consider them to be
+		// "crossing" because of simulation of simplicity. Out of the four
+		// endpoints, exactly two lie in the interior of the other edge. Of
+		// those two we return the one that is lexicographically smallest.
+		x = r3.Vector{10, 10, 10} // Greater than any valid S2Point
+
+		aNorm := Point{aNormP.Vector()}
+		bNorm := Point{bNormP.Vector()}
+		if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 {
+			return a0
+		}
+		if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 {
+			return a1
+		}
+		if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 {
+			return b0
+		}
+		if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 {
+			return b1
+		}
+	}
+
+	return Point{x}
+}

+ 318 - 0
vendor/github.com/golang/geo/s2/edge_distances.go

@@ -0,0 +1,318 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file defines a collection of methods for computing the distance to an edge,
+// interpolating along an edge, projecting points onto edges, etc.
+
+import (
+	"math"
+
+	"github.com/golang/geo/s1"
+)
+
+// DistanceFromSegment returns the distance of point X from line segment AB.
+// The points are expected to be normalized. The result is very accurate for small
+// distances but may have some numerical error if the distance is large
+// (approximately pi/2 or greater). The case A == B is handled correctly.
+func DistanceFromSegment(x, a, b Point) s1.Angle {
+	var minDist s1.ChordAngle
+	minDist, _ = updateMinDistance(x, a, b, minDist, true)
+	return minDist.Angle()
+}
+
+// IsDistanceLess reports whether the distance from X to the edge AB is less
+// than limit. This method is faster than DistanceFromSegment(). If you want to
+// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
+// once and save the value, since this conversion is relatively expensive.
+func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
+	_, less := UpdateMinDistance(x, a, b, limit)
+	return less
+}
+
+// UpdateMinDistance checks if the distance from X to the edge AB is less
+// then minDist, and if so, returns the updated value and true.
+// The case A == B is handled correctly.
+//
+// Use this method when you want to compute many distances and keep track of
+// the minimum. It is significantly faster than using DistanceFromSegment
+// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it
+// can save a lot of work by not actually computing the distance when it is
+// obviously larger than the current minimum.
+func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+	return updateMinDistance(x, a, b, minDist, false)
+}
+
+// IsInteriorDistanceLess reports whether the minimum distance from X to the
+// edge AB is attained at an interior point of AB (i.e., not an endpoint), and
+// that distance is less than limit.
+func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
+	_, less := UpdateMinInteriorDistance(x, a, b, limit)
+	return less
+}
+
+// UpdateMinInteriorDistance reports whether the minimum distance from X to AB
+// is attained at an interior point of AB (i.e., not an endpoint), and that distance
+// is less than minDist. If so, the value of minDist is updated and true is returned.
+// Otherwise it is unchanged and returns false.
+func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+	return interiorDist(x, a, b, minDist, false)
+}
+
+// Project returns the point along the edge AB that is closest to the point X.
+// The fractional distance of this point along the edge AB can be obtained
+// using DistanceFraction.
+//
+// This requires that all points are unit length.
+func Project(x, a, b Point) Point {
+	aXb := a.PointCross(b)
+	// Find the closest point to X along the great circle through AB.
+	p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
+
+	// If this point is on the edge AB, then it's the closest point.
+	if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
+		return Point{p.Normalize()}
+	}
+
+	// Otherwise, the closest point is either A or B.
+	if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
+		return a
+	}
+	return b
+}
+
+// DistanceFraction returns the distance ratio of the point X along an edge AB.
+// If X is on the line segment AB, this is the fraction T such
+// that X == Interpolate(T, A, B).
+//
+// This requires that A and B are distinct.
+func DistanceFraction(x, a, b Point) float64 {
+	d0 := x.Angle(a.Vector)
+	d1 := x.Angle(b.Vector)
+	return float64(d0 / (d0 + d1))
+}
+
+// Interpolate returns the point X along the line segment AB whose distance from A
+// is the given fraction "t" of the distance AB. Does NOT require that "t" be
+// between 0 and 1. Note that all distances are measured on the surface of
+// the sphere, so this is more complicated than just computing (1-t)*a + t*b
+// and normalizing the result.
+func Interpolate(t float64, a, b Point) Point {
+	if t == 0 {
+		return a
+	}
+	if t == 1 {
+		return b
+	}
+	ab := a.Angle(b.Vector)
+	return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
+}
+
+// InterpolateAtDistance returns the point X along the line segment AB whose
+// distance from A is the angle ax.
+func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
+	aRad := ax.Radians()
+
+	// Use PointCross to compute the tangent vector at A towards B. The
+	// result is always perpendicular to A, even if A=B or A=-B, but it is not
+	// necessarily unit length. (We effectively normalize it below.)
+	normal := a.PointCross(b)
+	tangent := normal.Vector.Cross(a.Vector)
+
+	// Now compute the appropriate linear combination of A and "tangent". With
+	// infinite precision the result would always be unit length, but we
+	// normalize it anyway to ensure that the error is within acceptable bounds.
+	// (Otherwise errors can build up when the result of one interpolation is
+	// fed into another interpolation.)
+	return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
+}
+
+// minUpdateDistanceMaxError returns the maximum error in the result of
+// UpdateMinDistance (and the associated functions such as
+// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all
+// input points are normalized to within the bounds guaranteed by r3.Vector's
+// Normalize. The error can be added or subtracted from an s1.ChordAngle
+// using its Expanded method.
+func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 {
+	// There are two cases for the maximum error in UpdateMinDistance(),
+	// depending on whether the closest point is interior to the edge.
+	return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError())
+}
+
+// minUpdateInteriorDistanceMaxError returns the maximum error in the result of
+// UpdateMinInteriorDistance, assuming that all input points are normalized
+// to within the bounds guaranteed by Point's Normalize. The error can be added
+// or subtracted from an s1.ChordAngle using its Expanded method.
+func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
+	// This bound includes all source of error, assuming that the input points
+	// are normalized. a and b are components of chord length that are
+	// perpendicular and parallel to a plane containing the edge respectively.
+	b := 0.5 * float64(dist) * float64(dist)
+	a := float64(dist) * math.Sqrt(1-0.5*b)
+	return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
+		(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
+		(23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon
+}
+
+// updateMinDistance computes the distance from a point X to a line segment AB,
+// and if either the distance was less than the given minDist, or alwaysUpdate is
+// true, the value and whether it was updated are returned.
+func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
+	if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok {
+		// Minimum distance is attained along the edge interior.
+		return d, true
+	}
+
+	// Otherwise the minimum distance is to one of the endpoints.
+	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+	dist := s1.ChordAngle(math.Min(xa2, xb2))
+	if !alwaysUpdate && dist >= minDist {
+		return minDist, false
+	}
+	return dist, true
+}
+
+// interiorDist returns the shortest distance from point x to edge ab, assuming
+// that the closest point to X is interior to AB. If the closest point is not
+// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to
+// false, the distance is only updated when the value exceeds certain the given minDist.
+func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
+	// Chord distance of x to both end points a and b.
+	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+
+	// The closest point on AB could either be one of the two vertices (the
+	// vertex case) or in the interior (the interior case). Let C = A x B.
+	// If X is in the spherical wedge extending from A to B around the axis
+	// through C, then we are in the interior case. Otherwise we are in the
+	// vertex case.
+	//
+	// Check whether we might be in the interior case. For this to be true, XAB
+	// and XBA must both be acute angles. Checking this condition exactly is
+	// expensive, so instead we consider the planar triangle ABX (which passes
+	// through the sphere's interior). The planar angles XAB and XBA are always
+	// less than the corresponding spherical angles, so if we are in the
+	// interior case then both of these angles must be acute.
+	//
+	// We check this by computing the squared edge lengths of the planar
+	// triangle ABX, and testing acuteness using the law of cosines:
+	//
+	//   max(XA^2, XB^2) < min(XA^2, XB^2) + AB^2
+	if math.Max(xa2, xb2) >= math.Min(xa2, xb2)+(a.Sub(b.Vector)).Norm2() {
+		return minDist, false
+	}
+
+	// The minimum distance might be to a point on the edge interior. Let R
+	// be closest point to X that lies on the great circle through AB. Rather
+	// than computing the geodesic distance along the surface of the sphere,
+	// instead we compute the "chord length" through the sphere's interior.
+	//
+	// The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
+	// is the point X projected onto the plane through the great circle AB.
+	// The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
+	// We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
+	// is faster and the corresponding distance on the Earth's surface is
+	// accurate to within 1% for distances up to about 1800km.
+	c := a.PointCross(b)
+	c2 := c.Norm2()
+	xDotC := x.Dot(c.Vector)
+	xDotC2 := xDotC * xDotC
+	if !alwaysUpdate && xDotC2 >= c2*float64(minDist) {
+		// The closest point on the great circle AB is too far away.
+		return minDist, false
+	}
+
+	// Otherwise we do the exact, more expensive test for the interior case.
+	// This test is very likely to succeed because of the conservative planar
+	// test we did initially.
+	cx := c.Cross(x.Vector)
+	if a.Dot(cx) >= 0 || b.Dot(cx) <= 0 {
+		return minDist, false
+	}
+
+	// Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
+	// This calculation has good accuracy for all chord lengths since it
+	// is based on both the dot product and cross product (rather than
+	// deriving one from the other). However, note that the chord length
+	// representation itself loses accuracy as the angle approaches π.
+	qr := 1 - math.Sqrt(cx.Norm2()/c2)
+	dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr))
+
+	if !alwaysUpdate && dist >= minDist {
+		return minDist, false
+	}
+
+	return dist, true
+}
+
+func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+	if minDist == 0 {
+		return 0, false
+	}
+	if CrossingSign(a0, a1, b0, b1) == Cross {
+		minDist = 0
+		return 0, true
+	}
+
+	// Otherwise, the minimum distance is achieved at an endpoint of at least
+	// one of the two edges. We ensure that all four possibilities are always checked.
+	//
+	// The calculation below computes each of the six vertex-vertex distances
+	// twice (this could be optimized).
+	var ok1, ok2, ok3, ok4 bool
+	minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist)
+	minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist)
+	minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist)
+	minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist)
+	return minDist, ok1 || ok2 || ok3 || ok4
+}
+
+// EdgePairClosestPoints returns the pair of points (a, b) that achieves the
+// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and
+// b is a point on b0b1. If the two edges intersect, a and b are both equal to
+// the intersection point. Handles a0 == a1 and b0 == b1 correctly.
+func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
+	if CrossingSign(a0, a1, b0, b1) == Cross {
+		x := Intersection(a0, a1, b0, b1)
+		return x, x
+	}
+	// We save some work by first determining which vertex/edge pair achieves
+	// the minimum distance, and then computing the closest point on that edge.
+	var minDist s1.ChordAngle
+	var ok bool
+
+	minDist, ok = updateMinDistance(a0, b0, b1, minDist, true)
+	closestVertex := 0
+	if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok {
+		closestVertex = 1
+	}
+	if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok {
+		closestVertex = 2
+	}
+	if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
+		closestVertex = 3
+	}
+	switch closestVertex {
+	case 0:
+		return a0, Project(a0, b0, b1)
+	case 1:
+		return a1, Project(a1, b0, b1)
+	case 2:
+		return Project(b0, a0, a1), b0
+	case 3:
+		return Project(b1, a0, a1), b1
+	default:
+		panic("illegal case reached")
+	}
+}

+ 0 - 1293
vendor/github.com/golang/geo/s2/edgeutil.go

@@ -1,1293 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	// edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
-	// compared to the exact result, assuming that the points A and B are in
-	// the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
-	edgeClipErrorUVCoord = 2.25 * dblEpsilon
-
-	// edgeClipErrorUVDist is the maximum distance from a clipped point to
-	// the corresponding exact result. It is equal to the error in a single
-	// coordinate because at most one coordinate is subject to error.
-	edgeClipErrorUVDist = 2.25 * dblEpsilon
-
-	// faceClipErrorRadians is the maximum angle between a returned vertex
-	// and the nearest point on the exact edge AB. It is equal to the
-	// maximum directional error in PointCross, plus the error when
-	// projecting points onto a cube face.
-	faceClipErrorRadians = 3 * dblEpsilon
-
-	// faceClipErrorDist is the same angle expressed as a maximum distance
-	// in (u,v)-space. In other words, a returned vertex is at most this far
-	// from the exact edge AB projected into (u,v)-space.
-	faceClipErrorUVDist = 9 * dblEpsilon
-
-	// faceClipErrorUVCoord is the maximum angle between a returned vertex
-	// and the nearest point on the exact edge AB expressed as the maximum error
-	// in an individual u- or v-coordinate. In other words, for each
-	// returned vertex there is a point on the exact edge AB whose u- and
-	// v-coordinates differ from the vertex by at most this amount.
-	faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
-
-	// intersectsRectErrorUVDist is the maximum error when computing if a point
-	// intersects with a given Rect. If some point of AB is inside the
-	// rectangle by at least this distance, the result is guaranteed to be true;
-	// if all points of AB are outside the rectangle by at least this distance,
-	// the result is guaranteed to be false. This bound assumes that rect is
-	// a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
-	// (e.g., by 1e-10 or less).
-	intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
-
-	// intersectionError can be set somewhat arbitrarily, because the algorithm
-	// uses more precision if necessary in order to achieve the specified error.
-	// The only strict requirement is that intersectionError >= dblEpsilon
-	// radians. However, using a larger error tolerance makes the algorithm more
-	// efficient because it reduces the number of cases where exact arithmetic is
-	// needed.
-	intersectionError = s1.Angle(4 * dblEpsilon)
-
-	// intersectionMergeRadius is used to ensure that intersection points that
-	// are supposed to be coincident are merged back together into a single
-	// vertex. This is required in order for various polygon operations (union,
-	// intersection, etc) to work correctly. It is twice the intersection error
-	// because two coincident intersection points might have errors in
-	// opposite directions.
-	intersectionMergeRadius = 2 * intersectionError
-)
-
-// SimpleCrossing reports whether edge AB crosses CD at a point that is interior
-// to both edges. Properties:
-//
-//  (1) SimpleCrossing(b,a,c,d) == SimpleCrossing(a,b,c,d)
-//  (2) SimpleCrossing(c,d,a,b) == SimpleCrossing(a,b,c,d)
-func SimpleCrossing(a, b, c, d Point) bool {
-	// We compute the equivalent of Sign for triangles ACB, CBD, BDA,
-	// and DAC. All of these triangles need to have the same orientation
-	// (CW or CCW) for an intersection to exist.
-
-	ab := a.Vector.Cross(b.Vector)
-	acb := -(ab.Dot(c.Vector))
-	bda := ab.Dot(d.Vector)
-	if acb*bda <= 0 {
-		return false
-	}
-
-	cd := c.Vector.Cross(d.Vector)
-	cbd := -(cd.Dot(b.Vector))
-	dac := cd.Dot(a.Vector)
-	return (acb*cbd > 0) && (acb*dac > 0)
-}
-
-// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
-// containment tests can be implemented by counting the number of edge crossings.
-//
-// Given two edges AB and CD where at least two vertices are identical
-// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
-// occurs if AB is encountered after CD during a CCW sweep around the shared
-// vertex starting from a fixed reference point.
-//
-// Note that according to this rule, if AB crosses CD then in general CD
-// does not cross AB.  However, this leads to the correct result when
-// counting polygon edge crossings.  For example, suppose that A,B,C are
-// three consecutive vertices of a CCW polygon.  If we now consider the edge
-// crossings of a segment BP as P sweeps around B, the crossing number
-// changes parity exactly when BP crosses BA or BC.
-//
-// Useful properties of VertexCrossing (VC):
-//
-//  (1) VC(a,a,c,d) == VC(a,b,c,c) == false
-//  (2) VC(a,b,a,b) == VC(a,b,b,a) == true
-//  (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
-//  (3) If exactly one of a,b equals one of c,d, then exactly one of
-//      VC(a,b,c,d) and VC(c,d,a,b) is true
-//
-// It is an error to call this method with 4 distinct vertices.
-func VertexCrossing(a, b, c, d Point) bool {
-	// If A == B or C == D there is no intersection. We need to check this
-	// case first in case 3 or more input points are identical.
-	if a.ApproxEqual(b) || c.ApproxEqual(d) {
-		return false
-	}
-
-	// If any other pair of vertices is equal, there is a crossing if and only
-	// if OrderedCCW indicates that the edge AB is further CCW around the
-	// shared vertex O (either A or B) than the edge CD, starting from an
-	// arbitrary fixed reference point.
-	switch {
-	case a.ApproxEqual(d):
-		return OrderedCCW(Point{a.Ortho()}, c, b, a)
-	case b.ApproxEqual(c):
-		return OrderedCCW(Point{b.Ortho()}, d, a, b)
-	case a.ApproxEqual(c):
-		return OrderedCCW(Point{a.Ortho()}, d, b, a)
-	case b.ApproxEqual(d):
-		return OrderedCCW(Point{b.Ortho()}, c, a, b)
-	}
-
-	return false
-}
-
-// DistanceFraction returns the distance ratio of the point X along an edge AB.
-// If X is on the line segment AB, this is the fraction T such
-// that X == Interpolate(T, A, B).
-//
-// This requires that A and B are distinct.
-func DistanceFraction(x, a, b Point) float64 {
-	d0 := x.Angle(a.Vector)
-	d1 := x.Angle(b.Vector)
-	return float64(d0 / (d0 + d1))
-}
-
-// Interpolate returns the point X along the line segment AB whose distance from A
-// is the given fraction "t" of the distance AB. Does NOT require that "t" be
-// between 0 and 1. Note that all distances are measured on the surface of
-// the sphere, so this is more complicated than just computing (1-t)*a + t*b
-// and normalizing the result.
-func Interpolate(t float64, a, b Point) Point {
-	if t == 0 {
-		return a
-	}
-	if t == 1 {
-		return b
-	}
-	ab := a.Angle(b.Vector)
-	return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
-}
-
-// InterpolateAtDistance returns the point X along the line segment AB whose
-// distance from A is the angle ax.
-func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
-	aRad := ax.Radians()
-
-	// Use PointCross to compute the tangent vector at A towards B. The
-	// result is always perpendicular to A, even if A=B or A=-B, but it is not
-	// necessarily unit length. (We effectively normalize it below.)
-	normal := a.PointCross(b)
-	tangent := normal.Vector.Cross(a.Vector)
-
-	// Now compute the appropriate linear combination of A and "tangent". With
-	// infinite precision the result would always be unit length, but we
-	// normalize it anyway to ensure that the error is within acceptable bounds.
-	// (Otherwise errors can build up when the result of one interpolation is
-	// fed into another interpolation.)
-	return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
-}
-
-// RectBounder is used to compute a bounding rectangle that contains all edges
-// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
-// Note that the bounding rectangle of an edge can be larger than the bounding
-// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
-//
-// The bounds are calculated conservatively to account for numerical errors
-// when points are converted to LatLngs. More precisely, this function
-// guarantees the following:
-// Let L be a closed edge chain (Loop) such that the interior of the loop does
-// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
-// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
-type RectBounder struct {
-	// The previous vertex in the chain.
-	a Point
-	// The previous vertex latitude longitude.
-	aLL   LatLng
-	bound Rect
-}
-
-// NewRectBounder returns a new instance of a RectBounder.
-func NewRectBounder() *RectBounder {
-	return &RectBounder{
-		bound: EmptyRect(),
-	}
-}
-
-// AddPoint adds the given point to the chain. The Point must be unit length.
-func (r *RectBounder) AddPoint(b Point) {
-	bLL := LatLngFromPoint(b)
-
-	if r.bound.IsEmpty() {
-		r.a = b
-		r.aLL = bLL
-		r.bound = r.bound.AddPoint(bLL)
-		return
-	}
-
-	// First compute the cross product N = A x B robustly. This is the normal
-	// to the great circle through A and B. We don't use RobustSign
-	// since that method returns an arbitrary vector orthogonal to A if the two
-	// vectors are proportional, and we want the zero vector in that case.
-	n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
-
-	// The relative error in N gets large as its norm gets very small (i.e.,
-	// when the two points are nearly identical or antipodal). We handle this
-	// by choosing a maximum allowable error, and if the error is greater than
-	// this we fall back to a different technique. Since it turns out that
-	// the other sources of error in converting the normal to a maximum
-	// latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
-	// have the total error be a multiple of dblEpsilon, we have chosen to
-	// limit the maximum error in the normal to be 3.84 * dblEpsilon.
-	// It is possible to show that the error is less than this when
-	//
-	// n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
-	//          = 1.91346e-15 (about 8.618 * dblEpsilon)
-	nNorm := n.Norm()
-	if nNorm < 1.91346e-15 {
-		// A and B are either nearly identical or nearly antipodal (to within
-		// 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
-		if r.a.Dot(b.Vector) < 0 {
-			// The two points are nearly antipodal. The easiest solution is to
-			// assume that the edge between A and B could go in any direction
-			// around the sphere.
-			r.bound = FullRect()
-		} else {
-			// The two points are nearly identical (to within 4.309 * dblEpsilon).
-			// In this case we can just use the bounding rectangle of the points,
-			// since after the expansion done by GetBound this Rect is
-			// guaranteed to include the (lat,lng) values of all points along AB.
-			r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
-		}
-		r.a = b
-		r.aLL = bLL
-		return
-	}
-
-	// Compute the longitude range spanned by AB.
-	lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
-	if lngAB.Length() >= math.Pi-2*dblEpsilon {
-		// The points lie on nearly opposite lines of longitude to within the
-		// maximum error of the calculation. The easiest solution is to assume
-		// that AB could go on either side of the pole.
-		lngAB = s1.FullInterval()
-	}
-
-	// Next we compute the latitude range spanned by the edge AB. We start
-	// with the range spanning the two endpoints of the edge:
-	latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
-
-	// This is the desired range unless the edge AB crosses the plane
-	// through N and the Z-axis (which is where the great circle through A
-	// and B attains its minimum and maximum latitudes). To test whether AB
-	// crosses this plane, we compute a vector M perpendicular to this
-	// plane and then project A and B onto it.
-	m := n.Cross(r3.Vector{0, 0, 1})
-	mA := m.Dot(r.a.Vector)
-	mB := m.Dot(b.Vector)
-
-	// We want to test the signs of "mA" and "mB", so we need to bound
-	// the error in these calculations. It is possible to show that the
-	// total error is bounded by
-	//
-	// (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
-	//   = 6.06638e-16 * nNorm + 6.83174e-31
-
-	mError := 6.06638e-16*nNorm + 6.83174e-31
-	if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
-		// Minimum/maximum latitude *may* occur in the edge interior.
-		//
-		// The maximum latitude is 90 degrees minus the latitude of N. We
-		// compute this directly using atan2 in order to get maximum accuracy
-		// near the poles.
-		//
-		// Our goal is compute a bound that contains the computed latitudes of
-		// all S2Points P that pass the point-in-polygon containment test.
-		// There are three sources of error we need to consider:
-		// - the directional error in N (at most 3.84 * dblEpsilon)
-		// - converting N to a maximum latitude
-		// - computing the latitude of the test point P
-		// The latter two sources of error are at most 0.955 * dblEpsilon
-		// individually, but it is possible to show by a more complex analysis
-		// that together they can add up to at most 1.16 * dblEpsilon, for a
-		// total error of 5 * dblEpsilon.
-		//
-		// We add 3 * dblEpsilon to the bound here, and GetBound() will pad
-		// the bound by another 2 * dblEpsilon.
-		maxLat := math.Min(
-			math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
-			math.Pi/2)
-
-		// In order to get tight bounds when the two points are close together,
-		// we also bound the min/max latitude relative to the latitudes of the
-		// endpoints A and B. First we compute the distance between A and B,
-		// and then we compute the maximum change in latitude between any two
-		// points along the great circle that are separated by this distance.
-		// This gives us a latitude change "budget". Some of this budget must
-		// be spent getting from A to B; the remainder bounds the round-trip
-		// distance (in latitude) from A or B to the min or max latitude
-		// attained along the edge AB.
-		latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
-		maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
-
-		// Test whether AB passes through the point of maximum latitude or
-		// minimum latitude. If the dot product(s) are small enough then the
-		// result may be ambiguous.
-		if mA <= mError && mB >= -mError {
-			latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
-		}
-		if mB <= mError && mA >= -mError {
-			latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
-		}
-	}
-	r.a = b
-	r.aLL = bLL
-	r.bound = r.bound.Union(Rect{latAB, lngAB})
-}
-
-// RectBound returns the bounding rectangle of the edge chain that connects the
-// vertices defined so far. This bound satisfies the guarantee made
-// above, i.e. if the edge chain defines a Loop, then the bound contains
-// the LatLng coordinates of all Points contained by the loop.
-func (r *RectBounder) RectBound() Rect {
-	return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
-}
-
-// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
-// contain the bounds of any subregion whose bounds are computed using
-// ComputeRectBound. For example, consider a loop L that defines a square.
-// GetBound ensures that if a point P is contained by this square, then
-// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
-// shaped loop S contained by L. It is possible that GetBound returns a
-// *larger* bound for S than it does for L, due to rounding errors. This
-// method expands the bound for L so that it is guaranteed to contain the
-// bounds of any subregion S.
-//
-// More precisely, if L is a loop that does not contain either pole, and S
-// is a loop such that L.Contains(S), then
-//
-//   ExpandForSubregions(L.RectBound).Contains(S.RectBound).
-//
-func ExpandForSubregions(bound Rect) Rect {
-	// Empty bounds don't need expansion.
-	if bound.IsEmpty() {
-		return bound
-	}
-
-	// First we need to check whether the bound B contains any nearly-antipodal
-	// points (to within 4.309 * dblEpsilon). If so then we need to return
-	// FullRect, since the subregion might have an edge between two
-	// such points, and AddPoint returns Full for such edges. Note that
-	// this can happen even if B is not Full for example, consider a loop
-	// that defines a 10km strip straddling the equator extending from
-	// longitudes -100 to +100 degrees.
-	//
-	// It is easy to check whether B contains any antipodal points, but checking
-	// for nearly-antipodal points is trickier. Essentially we consider the
-	// original bound B and its reflection through the origin B', and then test
-	// whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
-
-	// lngGap is a lower bound on the longitudinal distance between B and its
-	// reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
-	// endpoint longitude calculations and the Length call.)
-	lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
-
-	// minAbsLat is the minimum distance from B to the equator (if zero or
-	// negative, then B straddles the equator).
-	minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
-
-	// latGapSouth and latGapNorth measure the minimum distance from B to the
-	// south and north poles respectively.
-	latGapSouth := math.Pi/2 + bound.Lat.Lo
-	latGapNorth := math.Pi/2 - bound.Lat.Hi
-
-	if minAbsLat >= 0 {
-		// The bound B does not straddle the equator. In this case the minimum
-		// distance is between one endpoint of the latitude edge in B closest to
-		// the equator and the other endpoint of that edge in B'. The latitude
-		// distance between these two points is 2*minAbsLat, and the longitude
-		// distance is lngGap. We could compute the distance exactly using the
-		// Haversine formula, but then we would need to bound the errors in that
-		// calculation. Since we only need accuracy when the distance is very
-		// small (close to 4.309 * dblEpsilon), we substitute the Euclidean
-		// distance instead. This gives us a right triangle XYZ with two edges of
-		// length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
-		// length of the third edge z, and we have
-		//
-		//         z  ~=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
-		//
-		// Therefore the region may contain nearly antipodal points only if
-		//
-		//  2*minAbsLat + lngGap  <  sqrt(2) * 4.309 * dblEpsilon
-		//                        ~= 1.354e-15
-		//
-		// Note that because the given bound B is conservative, minAbsLat and
-		// lngGap are both lower bounds on their true values so we do not need
-		// to make any adjustments for their errors.
-		if 2*minAbsLat+lngGap < 1.354e-15 {
-			return FullRect()
-		}
-	} else if lngGap >= math.Pi/2 {
-		// B spans at most Pi/2 in longitude. The minimum distance is always
-		// between one corner of B and the diagonally opposite corner of B'. We
-		// use the same distance approximation that we used above; in this case
-		// we have an obtuse triangle XYZ with two edges of length x = latGapSouth
-		// and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
-		//
-		//         z  >=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
-		//
-		// Unlike the case above, latGapSouth and latGapNorth are not lower bounds
-		// (because of the extra addition operation, and because math.Pi/2 is not
-		// exactly equal to Pi/2); they can exceed their true values by up to
-		// 0.75 * dblEpsilon. Putting this all together, the region may contain
-		// nearly antipodal points only if
-		//
-		//   latGapSouth + latGapNorth  <  (sqrt(2) * 4.309 + 1.5) * dblEpsilon
-		//                              ~= 1.687e-15
-		if latGapSouth+latGapNorth < 1.687e-15 {
-			return FullRect()
-		}
-	} else {
-		// Otherwise we know that (1) the bound straddles the equator and (2) its
-		// width in longitude is at least Pi/2. In this case the minimum
-		// distance can occur either between a corner of B and the diagonally
-		// opposite corner of B' (as in the case above), or between a corner of B
-		// and the opposite longitudinal edge reflected in B'. It is sufficient
-		// to only consider the corner-edge case, since this distance is also a
-		// lower bound on the corner-corner distance when that case applies.
-
-		// Consider the spherical triangle XYZ where X is a corner of B with
-		// minimum absolute latitude, Y is the closest pole to X, and Z is the
-		// point closest to X on the opposite longitudinal edge of B'. This is a
-		// right triangle (Z = Pi/2), and from the spherical law of sines we have
-		//
-		//     sin(z) / sin(Z)  =  sin(y) / sin(Y)
-		//     sin(maxLatGap) / 1  =  sin(dMin) / sin(lngGap)
-		//     sin(dMin)  =  sin(maxLatGap) * sin(lngGap)
-		//
-		// where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
-		// desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
-		// for 0 <= t <= Pi/2, that we only need an accurate approximation when
-		// at least one of "maxLatGap" or lngGap is extremely small (in which
-		// case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
-		// to 0.75 * dblEpsilon, we want to test whether
-		//
-		//   maxLatGap * lngGap  <  (4.309 + 0.75) * (Pi/2) * dblEpsilon
-		//                       ~= 1.765e-15
-		if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
-			return FullRect()
-		}
-	}
-	// Next we need to check whether the subregion might contain any edges that
-	// span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
-	// sets the longitude bound to Full in that case. This corresponds to
-	// testing whether (lngGap <= 0) in lngExpansion below.
-
-	// Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
-	// In the worst case, the errors when computing the latitude bound for a
-	// subregion could go in the opposite direction as the errors when computing
-	// the bound for the original region, so we need to double this value.
-	// (More analysis shows that it's okay to round down to a multiple of
-	// dblEpsilon.)
-	//
-	// For longitude, we rely on the fact that atan2 is correctly rounded and
-	// therefore no additional bounds expansion is necessary.
-
-	latExpansion := 9 * dblEpsilon
-	lngExpansion := 0.0
-	if lngGap <= 0 {
-		lngExpansion = math.Pi
-	}
-	return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
-}
-
-// EdgeCrosser allows edges to be efficiently tested for intersection with a
-// given fixed edge AB. It is especially efficient when testing for
-// intersection with an edge chain connecting vertices v0, v1, v2, ...
-type EdgeCrosser struct {
-	a   Point
-	b   Point
-	aXb Point
-
-	// To reduce the number of calls to expensiveSign, we compute an
-	// outward-facing tangent at A and B if necessary. If the plane
-	// perpendicular to one of these tangents separates AB from CD (i.e., one
-	// edge on each side) then there is no intersection.
-	aTangent Point // Outward-facing tangent at A.
-	bTangent Point // Outward-facing tangent at B.
-
-	// The fields below are updated for each vertex in the chain.
-	c   Point     // Previous vertex in the vertex chain.
-	acb Direction // The orientation of triangle ACB.
-}
-
-// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
-func NewEdgeCrosser(a, b Point) *EdgeCrosser {
-	norm := a.PointCross(b)
-	return &EdgeCrosser{
-		a:        a,
-		b:        b,
-		aXb:      Point{a.Cross(b.Vector)},
-		aTangent: Point{a.Cross(norm.Vector)},
-		bTangent: Point{norm.Cross(b.Vector)},
-	}
-}
-
-// A Crossing indicates how edges cross.
-type Crossing int
-
-const (
-	// Cross means the edges cross.
-	Cross Crossing = iota
-	// MaybeCross means two vertices from different edges are the same.
-	MaybeCross
-	// DoNotCross means the edges do not cross.
-	DoNotCross
-)
-
-// CrossingSign reports whether the edge AB intersects the edge CD.
-// If any two vertices from different edges are the same, returns MaybeCross.
-// If either edge is degenerate (A == B or C == D), returns DoNotCross or MaybeCross.
-//
-// Properties of CrossingSign:
-//
-//  (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
-//  (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
-//  (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
-//  (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
-//
-// Note that if you want to check an edge against a chain of other edges,
-// it is slightly more efficient to use the single-argument version
-// ChainCrossingSign below.
-func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
-	if c != e.c {
-		e.RestartAt(c)
-	}
-	return e.ChainCrossingSign(d)
-}
-
-// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
-// CD share a vertex and VertexCrossing(a, b, c, d) is true.
-//
-// This method extends the concept of a "crossing" to the case where AB
-// and CD have a vertex in common. The two edges may or may not cross,
-// according to the rules defined in VertexCrossing above. The rules
-// are designed so that point containment tests can be implemented simply
-// by counting edge crossings. Similarly, determining whether one edge
-// chain crosses another edge chain can be implemented by counting.
-func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
-	if c != e.c {
-		e.RestartAt(c)
-	}
-	return e.EdgeOrVertexChainCrossing(d)
-}
-
-// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
-// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
-//
-// You don't need to use this or any of the chain functions unless you're trying to
-// squeeze out every last drop of performance. Essentially all you are saving is a test
-// whether the first vertex of the current edge is the same as the second vertex of the
-// previous edge.
-func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
-	e := NewEdgeCrosser(a, b)
-	e.RestartAt(c)
-	return e
-}
-
-// RestartAt sets the current point of the edge crosser to be c.
-// Call this method when your chain 'jumps' to a new place.
-// The argument must point to a value that persists until the next call.
-func (e *EdgeCrosser) RestartAt(c Point) {
-	e.c = c
-	e.acb = -triageSign(e.a, e.b, e.c)
-}
-
-// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
-// the crossing methods (or RestartAt) as the first vertex of the current edge.
-func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
-	// For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
-	// all be oriented the same way (CW or CCW). We keep the orientation of ACB
-	// as part of our state. When each new point D arrives, we compute the
-	// orientation of BDA and check whether it matches ACB. This checks whether
-	// the points C and D are on opposite sides of the great circle through AB.
-
-	// Recall that triageSign is invariant with respect to rotating its
-	// arguments, i.e. ABD has the same orientation as BDA.
-	bda := triageSign(e.a, e.b, d)
-	if e.acb == -bda && bda != Indeterminate {
-		// The most common case -- triangles have opposite orientations. Save the
-		// current vertex D as the next vertex C, and also save the orientation of
-		// the new triangle ACB (which is opposite to the current triangle BDA).
-		e.c = d
-		e.acb = -bda
-		return DoNotCross
-	}
-	return e.crossingSign(d, bda)
-}
-
-// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
-// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
-func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
-	// We need to copy e.c since it is clobbered by ChainCrossingSign.
-	c := e.c
-	switch e.ChainCrossingSign(d) {
-	case DoNotCross:
-		return false
-	case Cross:
-		return true
-	}
-	return VertexCrossing(e.a, e.b, c, d)
-}
-
-// crossingSign handle the slow path of CrossingSign.
-func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
-	// Compute the actual result, and then save the current vertex D as the next
-	// vertex C, and save the orientation of the next triangle ACB (which is
-	// opposite to the current triangle BDA).
-	defer func() {
-		e.c = d
-		e.acb = -bda
-	}()
-
-	// RobustSign is very expensive, so we avoid calling it if at all possible.
-	// First eliminate the cases where two vertices are equal.
-	if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
-		return MaybeCross
-	}
-
-	// At this point, a very common situation is that A,B,C,D are four points on
-	// a line such that AB does not overlap CD. (For example, this happens when
-	// a line or curve is sampled finely, or when geometry is constructed by
-	// computing the union of S2CellIds.) Most of the time, we can determine
-	// that AB and CD do not intersect using the two outward-facing
-	// tangents at A and B (parallel to AB) and testing whether AB and CD are on
-	// opposite sides of the plane perpendicular to one of these tangents. This
-	// is moderately expensive but still much cheaper than expensiveSign.
-
-	// The error in RobustCrossProd is insignificant. The maximum error in
-	// the call to CrossProd (i.e., the maximum norm of the error vector) is
-	// (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
-	// DotProd below is dblEpsilon. (There is also a small relative error
-	// term that is insignificant because we are comparing the result against a
-	// constant that is very close to zero.)
-	maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
-	if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
-		return DoNotCross
-	}
-
-	// Otherwise it's time to break out the big guns.
-	if e.acb == Indeterminate {
-		e.acb = -expensiveSign(e.a, e.b, e.c)
-	}
-	if bda == Indeterminate {
-		bda = expensiveSign(e.a, e.b, d)
-	}
-
-	if bda != e.acb {
-		return DoNotCross
-	}
-
-	cbd := -RobustSign(e.c, d, e.b)
-	if cbd != e.acb {
-		return DoNotCross
-	}
-	dac := RobustSign(e.c, d, e.a)
-	if dac == e.acb {
-		return Cross
-	}
-	return DoNotCross
-}
-
-// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
-type pointUVW Point
-
-// intersectsFace reports whether a given directed line L intersects the cube face F.
-// The line L is defined by its normal N in the (u,v,w) coordinates of F.
-func (p pointUVW) intersectsFace() bool {
-	// L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
-	// products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
-	// and (-1,1,1) do not all have the same sign. This is true exactly when
-	// |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
-	u := math.Abs(p.X)
-	v := math.Abs(p.Y)
-	w := math.Abs(p.Z)
-
-	// We only need to consider the cases where u or v is the smallest value,
-	// since if w is the smallest then both expressions below will have a
-	// positive LHS and a negative RHS.
-	return (v >= w-u) && (u >= w-v)
-}
-
-// intersectsOppositeEdges reports whether a directed line L intersects two
-// opposite edges of a cube face F. This includs the case where L passes
-// exactly through a corner vertex of F. The directed line L is defined
-// by its normal N in the (u,v,w) coordinates of F.
-func (p pointUVW) intersectsOppositeEdges() bool {
-	// The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
-	// and only exactly two of the corner vertices lie on each side of L. This
-	// is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
-	// expression exactly.
-	u := math.Abs(p.X)
-	v := math.Abs(p.Y)
-	w := math.Abs(p.Z)
-
-	// If w is the smallest, the following line returns an exact result.
-	if math.Abs(u-v) != w {
-		return math.Abs(u-v) >= w
-	}
-
-	// Otherwise u - v = w exactly, or w is not the smallest value. In either
-	// case the following returns the correct result.
-	if u >= v {
-		return u-w >= v
-	}
-	return v-w >= u
-}
-
-// axis represents the possible results of exitAxis.
-type axis int
-
-const (
-	axisU axis = iota
-	axisV
-)
-
-// exitAxis reports which axis the directed line L exits the cube face F on.
-// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
-// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
-// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
-// through a corner vertex of the cube face.
-func (p pointUVW) exitAxis() axis {
-	if p.intersectsOppositeEdges() {
-		// The line passes through through opposite edges of the face.
-		// It exits through the v=+1 or v=-1 edge if the u-component of N has a
-		// larger absolute magnitude than the v-component.
-		if math.Abs(p.X) >= math.Abs(p.Y) {
-			return axisV
-		}
-		return axisU
-	}
-
-	// The line passes through through two adjacent edges of the face.
-	// It exits the v=+1 or v=-1 edge if an even number of the components of N
-	// are negative. We test this using signbit() rather than multiplication
-	// to avoid the possibility of underflow.
-	var x, y, z int
-	if math.Signbit(p.X) {
-		x = 1
-	}
-	if math.Signbit(p.Y) {
-		y = 1
-	}
-	if math.Signbit(p.Z) {
-		z = 1
-	}
-
-	if x^y^z == 0 {
-		return axisV
-	}
-	return axisU
-}
-
-// exitPoint returns the UV coordinates of the point where a directed line L (represented
-// by the CCW normal of this point), exits the cube face this point is derived from along
-// the given axis.
-func (p pointUVW) exitPoint(a axis) r2.Point {
-	if a == axisU {
-		u := -1.0
-		if p.Y > 0 {
-			u = 1.0
-		}
-		return r2.Point{u, (-u*p.X - p.Z) / p.Y}
-	}
-
-	v := -1.0
-	if p.X < 0 {
-		v = 1.0
-	}
-	return r2.Point{(-v*p.Y - p.Z) / p.X, v}
-}
-
-// clipDestination returns a score which is used to indicate if the clipped edge AB
-// on the given face intersects the face at all. This function returns the score for
-// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
-// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
-//
-// First, it clips the line segment AB to find the clipped destination B' on a given
-// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
-// coordinates of that face.) Second, it partially computes whether the segment AB
-// intersects this face at all. The actual condition is fairly complicated, but it
-// turns out that it can be expressed as a "score" that can be computed independently
-// when clipping the two endpoints A and B.
-func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
-	var uv r2.Point
-
-	// Optimization: if B is within the safe region of the face, use it.
-	maxSafeUVCoord := 1 - faceClipErrorUVCoord
-	if b.Z > 0 {
-		uv = r2.Point{b.X / b.Z, b.Y / b.Z}
-		if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
-			return uv, 0
-		}
-	}
-
-	// Otherwise find the point B' where the line AB exits the face.
-	uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
-
-	p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
-
-	// Determine if the exit point B' is contained within the segment. We do this
-	// by computing the dot products with two inward-facing tangent vectors at A
-	// and B. If either dot product is negative, we say that B' is on the "wrong
-	// side" of that point. As the point B' moves around the great circle AB past
-	// the segment endpoint B, it is initially on the wrong side of B only; as it
-	// moves further it is on the wrong side of both endpoints; and then it is on
-	// the wrong side of A only. If the exit point B' is on the wrong side of
-	// either endpoint, we can't use it; instead the segment is clipped at the
-	// original endpoint B.
-	//
-	// We reject the segment if the sum of the scores of the two endpoints is 3
-	// or more. Here is what that rule encodes:
-	//  - If B' is on the wrong side of A, then the other clipped endpoint A'
-	//    must be in the interior of AB (otherwise AB' would go the wrong way
-	//    around the circle). There is a similar rule for A'.
-	//  - If B' is on the wrong side of either endpoint (and therefore we must
-	//    use the original endpoint B instead), then it must be possible to
-	//    project B onto this face (i.e., its w-coordinate must be positive).
-	//    This rule is only necessary to handle certain zero-length edges (A=B).
-	score := 0
-	if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
-		score = 2 // B' is on wrong side of A.
-	} else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
-		score = 1 // B' is on wrong side of B.
-	}
-
-	if score > 0 { // B' is not in the interior of AB.
-		if b.Z <= 0 {
-			score = 3 // B cannot be projected onto this face.
-		} else {
-			uv = r2.Point{b.X / b.Z, b.Y / b.Z}
-		}
-	}
-
-	return uv, score
-}
-
-// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
-// intersects the given face, or false if the edge AB does not intersect.
-// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
-// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
-// the results may differ from those produced by faceSegments.
-func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
-	return ClipToPaddedFace(a, b, face, 0.0)
-}
-
-// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
-// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
-// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
-// Padding must be non-negative.
-func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
-	// Fast path: both endpoints are on the given face.
-	if face(a.Vector) == f && face(b.Vector) == f {
-		au, av := validFaceXYZToUV(f, a.Vector)
-		bu, bv := validFaceXYZToUV(f, b.Vector)
-		return r2.Point{au, av}, r2.Point{bu, bv}, true
-	}
-
-	// Convert everything into the (u,v,w) coordinates of the given face. Note
-	// that the cross product *must* be computed in the original (x,y,z)
-	// coordinate system because PointCross (unlike the mathematical cross
-	// product) can produce different results in different coordinate systems
-	// when one argument is a linear multiple of the other, due to the use of
-	// symbolic perturbations.
-	normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
-	aUVW := pointUVW(faceXYZtoUVW(f, a))
-	bUVW := pointUVW(faceXYZtoUVW(f, b))
-
-	// Padding is handled by scaling the u- and v-components of the normal.
-	// Letting R=1+padding, this means that when we compute the dot product of
-	// the normal with a cube face vertex (such as (-1,-1,1)), we will actually
-	// compute the dot product with the scaled vertex (-R,-R,1). This allows
-	// methods such as intersectsFace, exitAxis, etc, to handle padding
-	// with no further modifications.
-	scaleUV := 1 + padding
-	scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
-	if !scaledN.intersectsFace() {
-		return aUV, bUV, false
-	}
-
-	// TODO(roberts): This is a workaround for extremely small vectors where some
-	// loss of precision can occur in Normalize causing underflow. When PointCross
-	// is updated to work around this, this can be removed.
-	if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
-		normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
-	}
-
-	normUVW = pointUVW{normUVW.Normalize()}
-
-	aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
-	bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
-
-	// As described in clipDestination, if the sum of the scores from clipping the two
-	// endpoints is 3 or more, then the segment does not intersect this face.
-	aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
-	bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
-
-	return aUV, bUV, aScore+bScore < 3
-}
-
-// interpolateDouble returns a value with the same combination of a1 and b1 as the
-// given value x is of a and b. This function makes the following guarantees:
-//  - If x == a, then x1 = a1 (exactly).
-//  - If x == b, then x1 = b1 (exactly).
-//  - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
-// This requires a != b.
-func interpolateDouble(x, a, b, a1, b1 float64) float64 {
-	// To get results that are accurate near both A and B, we interpolate
-	// starting from the closer of the two points.
-	if math.Abs(a-x) <= math.Abs(b-x) {
-		return a1 + (b1-a1)*(x-a)/(b-a)
-	}
-	return b1 + (a1-b1)*(x-b)/(a-b)
-}
-
-// updateEndpoint returns the interval with the specified endpoint updated to
-// the given value. If the value lies beyond the opposite endpoint, nothing is
-// changed and false is returned.
-func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
-	if !highEndpoint {
-		if bound.Hi < value {
-			return bound, false
-		}
-		if bound.Lo < value {
-			bound.Lo = value
-		}
-		return bound, true
-	}
-
-	if bound.Lo > value {
-		return bound, false
-	}
-	if bound.Hi > value {
-		bound.Hi = value
-	}
-	return bound, true
-}
-
-// clipBoundAxis returns the clipped versions of the bounding intervals for the given
-// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
-// given clip interval. negSlope is a precomputed helper variable that indicates which
-// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
-// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
-// false is returned.
-func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
-	negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
-
-	if bound0.Lo < clip.Lo {
-		// If the upper bound is below the clips lower bound, there is nothing to do.
-		if bound0.Hi < clip.Lo {
-			return bound0, bound1, false
-		}
-		// narrow the intervals lower bound to the clip bound.
-		bound0.Lo = clip.Lo
-		if bound1, updated = updateEndpoint(bound1, negSlope, interpolateDouble(clip.Lo, a0, b0, a1, b1)); !updated {
-			return bound0, bound1, false
-		}
-	}
-
-	if bound0.Hi > clip.Hi {
-		// If the lower bound is above the clips upper bound, there is nothing to do.
-		if bound0.Lo > clip.Hi {
-			return bound0, bound1, false
-		}
-		// narrow the intervals upper bound to the clip bound.
-		bound0.Hi = clip.Hi
-		if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateDouble(clip.Hi, a0, b0, a1, b1)); !updated {
-			return bound0, bound1, false
-		}
-	}
-	return bound0, bound1, true
-}
-
-// edgeIntersectsRect reports whether the edge defined by AB intersects the
-// given closed rectangle to within the error bound.
-func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
-	// First check whether the bounds of a Rect around AB intersects the given rect.
-	if !r.Intersects(r2.RectFromPoints(a, b)) {
-		return false
-	}
-
-	// Otherwise AB intersects the rect if and only if all four vertices of rect
-	// do not lie on the same side of the extended line AB. We test this by finding
-	// the two vertices of rect with minimum and maximum projections onto the normal
-	// of AB, and computing their dot products with the edge normal.
-	n := b.Sub(a).Ortho()
-
-	i := 0
-	if n.X >= 0 {
-		i = 1
-	}
-	j := 0
-	if n.Y >= 0 {
-		j = 1
-	}
-
-	max := n.Dot(r.VertexIJ(i, j).Sub(a))
-	min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
-
-	return (max >= 0) && (min <= 0)
-}
-
-// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
-// by AB intersected by clip. The resulting bound may be empty. This is a convenience
-// function built on top of clipEdgeBound.
-func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
-	bound := r2.RectFromPoints(a, b)
-	if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
-		return b1
-	}
-	return r2.EmptyRect()
-}
-
-// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
-// It represents the clipped edges by their bounding boxes rather than as a pair of
-// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
-// a tight bound of A'B'. This function returns the bound that is a tight bound
-// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
-// it returns false and the original bound.
-func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
-	// negSlope indicates which diagonal of the bounding box is spanned by AB: it
-	// is false if AB has positive slope, and true if AB has negative slope. This is
-	// used to determine which interval endpoints need to be updated each time
-	// the edge is clipped.
-	negSlope := (a.X > b.X) != (a.Y > b.Y)
-
-	b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
-	if !up1 {
-		return bound, false
-	}
-	b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
-	if !up2 {
-		return r2.Rect{b0x, b0y}, false
-	}
-	return r2.Rect{X: b1x, Y: b1y}, true
-}
-
-// ClipEdge returns the portion of the edge defined by AB that is contained by the
-// given rectangle. If there is no intersection, false is returned and aClip and bClip
-// are undefined.
-func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
-	// Compute the bounding rectangle of AB, clip it, and then extract the new
-	// endpoints from the clipped bound.
-	bound := r2.RectFromPoints(a, b)
-	if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
-		return aClip, bClip, false
-	}
-	ai := 0
-	if a.X > b.X {
-		ai = 1
-	}
-	aj := 0
-	if a.Y > b.Y {
-		aj = 1
-	}
-
-	return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
-}
-
-// ClosestPoint returns the point along the edge AB that is closest to the point X.
-// The fractional distance of this point along the edge AB can be obtained
-// using DistanceFraction.
-//
-// This requires that all points are unit length.
-func ClosestPoint(x, a, b Point) Point {
-	aXb := a.PointCross(b)
-	// Find the closest point to X along the great circle through AB.
-	p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
-
-	// If this point is on the edge AB, then it's the closest point.
-	if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
-		return Point{p.Normalize()}
-	}
-
-	// Otherwise, the closest point is either A or B.
-	if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
-		return a
-	}
-	return b
-}
-
-// DistanceFromSegment returns the distance of point x from line segment ab.
-// The points are expected to be normalized.
-func DistanceFromSegment(x, a, b Point) s1.Angle {
-	if d, ok := interiorDist(x, a, b); ok {
-		return d.Angle()
-	}
-	// Chord distance of x to both end points a and b.
-	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
-	return s1.ChordAngle(math.Min(xa2, xb2)).Angle()
-}
-
-// interiorDist returns the shortest distance from point x to edge ab,
-// assuming that the closest point to x is interior to ab.
-// If the closest point is not interior to ab, interiorDist returns (0, false).
-func interiorDist(x, a, b Point) (s1.ChordAngle, bool) {
-	// Chord distance of x to both end points a and b.
-	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
-
-	// The closest point on AB could either be one of the two vertices (the
-	// vertex case) or in the interior (the interior case).  Let C = A x B.
-	// If X is in the spherical wedge extending from A to B around the axis
-	// through C, then we are in the interior case.  Otherwise we are in the
-	// vertex case.
-	//
-	// Check whether we might be in the interior case.  For this to be true, XAB
-	// and XBA must both be acute angles.  Checking this condition exactly is
-	// expensive, so instead we consider the planar triangle ABX (which passes
-	// through the sphere's interior).  The planar angles XAB and XBA are always
-	// less than the corresponding spherical angles, so if we are in the
-	// interior case then both of these angles must be acute.
-	//
-	// We check this by computing the squared edge lengths of the planar
-	// triangle ABX, and testing acuteness using the law of cosines:
-	//
-	//             max(XA^2, XB^2) < min(XA^2, XB^2) + AB^2
-	if math.Max(xa2, xb2) >= math.Min(xa2, xb2)+(a.Sub(b.Vector)).Norm2() {
-		return 0, false
-	}
-
-	// The minimum distance might be to a point on the edge interior.  Let R
-	// be closest point to X that lies on the great circle through AB.  Rather
-	// than computing the geodesic distance along the surface of the sphere,
-	// instead we compute the "chord length" through the sphere's interior.
-	//
-	// The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
-	// is the point X projected onto the plane through the great circle AB.
-	// The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
-	// We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
-	// is faster and the corresponding distance on the Earth's surface is
-	// accurate to within 1% for distances up to about 1800km.
-
-	// Test for the interior case. This test is very likely to succeed because
-	// of the conservative planar test we did initially.
-	c := a.PointCross(b)
-	c2 := c.Norm2()
-	cx := c.Cross(x.Vector)
-	if a.Dot(cx) >= 0 || b.Dot(cx) <= 0 {
-		return 0, false
-	}
-
-	// Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
-	// This calculation has good accuracy for all chord lengths since it
-	// is based on both the dot product and cross product (rather than
-	// deriving one from the other).  However, note that the chord length
-	// representation itself loses accuracy as the angle approaches π.
-	xDotC := x.Dot(c.Vector)
-	xDotC2 := xDotC * xDotC
-	qr := 1 - math.Sqrt(cx.Norm2()/c2)
-	return s1.ChordAngle((xDotC2 / c2) + (qr * qr)), true
-}
-
-// WedgeRel enumerates the possible relation between two wedges A and B.
-type WedgeRel int
-
-// Define the different possible relationships between two wedges.
-const (
-	WedgeEquals              WedgeRel = iota // A and B are equal.
-	WedgeProperlyContains                    // A is a strict superset of B.
-	WedgeIsProperlyContained                 // A is a strict subset of B.
-	WedgeProperlyOverlaps                    // A-B, B-A, and A intersect B are non-empty.
-	WedgeIsDisjoint                          // A and B are disjoint.
-)
-
-// WedgeRelation reports the relation between two non-empty wedges
-// A=(a0, ab1, a2) and B=(b0, ab1, b2).
-func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel {
-	// There are 6 possible edge orderings at a shared vertex (all
-	// of these orderings are circular, i.e. abcd == bcda):
-	//
-	//  (1) a2 b2 b0 a0: A contains B
-	//  (2) a2 a0 b0 b2: B contains A
-	//  (3) a2 a0 b2 b0: A and B are disjoint
-	//  (4) a2 b0 a0 b2: A and B intersect in one wedge
-	//  (5) a2 b2 a0 b0: A and B intersect in one wedge
-	//  (6) a2 b0 b2 a0: A and B intersect in two wedges
-	//
-	// We do not distinguish between 4, 5, and 6.
-	// We pay extra attention when some of the edges overlap.  When edges
-	// overlap, several of these orderings can be satisfied, and we take
-	// the most specific.
-	if a0 == b0 && a2 == b2 {
-		return WedgeEquals
-	}
-
-	// Cases 1, 2, 5, and 6
-	if OrderedCCW(a0, a2, b2, ab1) {
-		// The cases with this vertex ordering are 1, 5, and 6,
-		if OrderedCCW(b2, b0, a0, ab1) {
-			return WedgeProperlyContains
-		}
-
-		// We are in case 5 or 6, or case 2 if a2 == b2.
-		if a2 == b2 {
-			return WedgeIsProperlyContained
-		}
-		return WedgeProperlyOverlaps
-
-	}
-	// We are in case 2, 3, or 4.
-	if OrderedCCW(a0, b0, b2, ab1) {
-		return WedgeIsProperlyContained
-	}
-
-	if OrderedCCW(a0, b0, a2, ab1) {
-		return WedgeIsDisjoint
-	}
-	return WedgeProperlyOverlaps
-}
-
-// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2).
-// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals.
-func WedgeContains(a0, ab1, a2, b0, b2 Point) bool {
-	// For A to contain B (where each loop interior is defined to be its left
-	// side), the CCW edge order around ab1 must be a2 b2 b0 a0.  We split
-	// this test into two parts that test three vertices each.
-	return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1)
-}
-
-// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2).
-// Equivalent to WedgeRelation == WedgeIsDisjoint
-func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool {
-	// For A not to intersect B (where each loop interior is defined to be
-	// its left side), the CCW edge order around ab1 must be a0 b2 b0 a2.
-	// Note that it's important to write these conditions as negatives
-	// (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct
-	// results when two vertices are the same.
-	return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1))
-}
-
-// TODO(roberts): Differences from C++
-//  LongitudePruner
-//  updateMinDistanceMaxError
-//  IsDistanceLess
-//  UpdateMinDistance
-//  IsInteriorDistanceLess
-//  UpdateMinInteriorDistance
-//  UpdateEdgePairMinDistance
-//  EdgePairClosestPoints
-//  IsEdgeBNearEdgeA
-//  FaceSegments
-//  PointFromExact
-//  IntersectionExact
-//  intersectionExactError

+ 0 - 1201
vendor/github.com/golang/geo/s2/edgeutil_test.go

@@ -1,1201 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"fmt"
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-func TestEdgeutilCrossings(t *testing.T) {
-	na1 := math.Nextafter(1, 0)
-	na2 := math.Nextafter(1, 2)
-
-	tests := []struct {
-		msg          string
-		a, b, c, d   Point
-		simpleTest   bool
-		robust       Crossing
-		vertex       bool
-		edgeOrVertex bool
-	}{
-		{
-			"two regular edges that cross",
-			Point{r3.Vector{1, 2, 1}},
-			Point{r3.Vector{1, -3, 0.5}},
-			Point{r3.Vector{1, -0.5, -3}},
-			Point{r3.Vector{0.1, 0.5, 3}},
-			true,
-			Cross,
-			true,
-			true,
-		},
-		{
-			"two regular edges that cross antipodal points",
-			Point{r3.Vector{1, 2, 1}},
-			Point{r3.Vector{1, -3, 0.5}},
-			Point{r3.Vector{-1, 0.5, 3}},
-			Point{r3.Vector{-0.1, -0.5, -3}},
-			true,
-			DoNotCross,
-			true,
-			false,
-		},
-		{
-			"two edges on the same great circle",
-			Point{r3.Vector{0, 0, -1}},
-			Point{r3.Vector{0, 1, 0}},
-			Point{r3.Vector{0, 1, 1}},
-			Point{r3.Vector{0, 0, 1}},
-			true,
-			DoNotCross,
-			false,
-			false,
-		},
-		{
-			"two edges that cross where one vertex is the OriginPoint",
-			Point{r3.Vector{1, 0, 0}},
-			OriginPoint(),
-			Point{r3.Vector{1, -0.1, 1}},
-			Point{r3.Vector{1, 1, -0.1}},
-			true,
-			Cross,
-			true,
-			true,
-		},
-		{
-			"two edges that cross antipodal points",
-			Point{r3.Vector{1, 0, 0}},
-			Point{r3.Vector{0, 1, 0}},
-			Point{r3.Vector{0, 0, -1}},
-			Point{r3.Vector{-1, -1, 1}},
-			true,
-			DoNotCross,
-			true,
-			false,
-		},
-		{
-			"two edges that share an endpoint",
-			// The Ortho() direction is (-4,0,2) and edge CD
-			// is further CCW around (2,3,4) than AB.
-			Point{r3.Vector{2, 3, 4}},
-			Point{r3.Vector{-1, 2, 5}},
-			Point{r3.Vector{7, -2, 3}},
-			Point{r3.Vector{2, 3, 4}},
-			true,
-			MaybeCross,
-			true,
-			false,
-		},
-		{
-			"two edges that barely cross near the middle of one edge",
-			// The edge AB is approximately in the x=y plane, while CD is approximately
-			// perpendicular to it and ends exactly at the x=y plane.
-			Point{r3.Vector{1, 1, 1}},
-			Point{r3.Vector{1, na1, -1}},
-			Point{r3.Vector{11, -12, -1}},
-			Point{r3.Vector{10, 10, 1}},
-			false,
-			DoNotCross, // TODO(sbeckman): Should be 1, fix once exactSign is implemented.
-			true,
-			false, // TODO(sbeckman): Should be true, fix once exactSign is implemented.
-		},
-		{
-			"two edges that barely cross near the middle separated by a distance of about 1e-15",
-			Point{r3.Vector{1, 1, 1}},
-			Point{r3.Vector{1, na2, -1}},
-			Point{r3.Vector{1, -1, 0}},
-			Point{r3.Vector{1, 1, 0}},
-			false,
-			DoNotCross,
-			false,
-			false,
-		},
-		{
-			"two edges that barely cross each other near the end of both edges",
-			// This example cannot be handled using regular double-precision
-			// arithmetic due to floating-point underflow.
-			Point{r3.Vector{0, 0, 1}},
-			Point{r3.Vector{2, -1e-323, 1}},
-			Point{r3.Vector{1, -1, 1}},
-			Point{r3.Vector{1e-323, 0, 1}},
-			false,
-			DoNotCross, // TODO(sbeckman): Should be 1, fix once exactSign is implemented.
-			false,
-			false, // TODO(sbeckman): Should be true, fix once exactSign is implemented.
-		},
-		{
-			"two edges that barely cross each other near the end separated by a distance of about 1e-640",
-			Point{r3.Vector{0, 0, 1}},
-			Point{r3.Vector{2, 1e-323, 1}},
-			Point{r3.Vector{1, -1, 1}},
-			Point{r3.Vector{1e-323, 0, 1}},
-			false,
-			DoNotCross,
-			false,
-			false,
-		},
-		{
-			"two edges that barely cross each other near the middle of one edge",
-			// Computing the exact determinant of some of the triangles in this test
-			// requires more than 2000 bits of precision.
-			Point{r3.Vector{1, -1e-323, -1e-323}},
-			Point{r3.Vector{1e-323, 1, 1e-323}},
-			Point{r3.Vector{1, -1, 1e-323}},
-			Point{r3.Vector{1, 1, 0}},
-			false,
-			Cross,
-			true,
-			true,
-		},
-		{
-			"two edges that barely cross each other near the middle separated by a distance of about 1e-640",
-			Point{r3.Vector{1, 1e-323, -1e-323}},
-			Point{r3.Vector{-1e-323, 1, 1e-323}},
-			Point{r3.Vector{1, -1, 1e-323}},
-			Point{r3.Vector{1, 1, 0}},
-			false,
-			Cross, // TODO(sbeckman): Should be -1, fix once exactSign is implemented.
-			true,
-			true, // TODO(sbeckman): Should be false, fix once exactSign is implemented.
-		},
-	}
-
-	for _, test := range tests {
-		if err := testCrossing(test.a, test.b, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-		if err := testCrossing(test.b, test.a, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-		if err := testCrossing(test.a, test.b, test.d, test.c, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-		if err := testCrossing(test.b, test.a, test.c, test.d, test.robust, test.vertex, test.edgeOrVertex, test.simpleTest); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-		if err := testCrossing(test.a, test.b, test.a, test.b, MaybeCross, true, true, false); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-		if err := testCrossing(test.c, test.d, test.a, test.b, test.robust, test.vertex, test.edgeOrVertex != (test.robust == MaybeCross), test.simpleTest); err != nil {
-			t.Errorf("%s: %v", test.msg, err)
-		}
-
-		if got := VertexCrossing(test.a, test.b, test.c, test.b); got != test.vertex {
-			t.Errorf("%s: VertexCrossing(%v,%v,%v,%v) = %t, want %t", test.msg, test.a, test.b, test.c, test.d, got, test.vertex)
-		}
-	}
-}
-
-func testCrossing(a, b, c, d Point, robust Crossing, vertex, edgeOrVertex, simple bool) error {
-	input := fmt.Sprintf("a: %v, b: %v, c: %v, d: %v", a, b, c, d)
-	if got, want := SimpleCrossing(a, b, c, d), robust == Cross; simple && got != want {
-		return fmt.Errorf("%v, SimpleCrossing(a, b, c, d) = %t, want %t", input, got, want)
-	}
-
-	crosser := NewChainEdgeCrosser(a, b, c)
-	if got, want := crosser.ChainCrossingSign(d), robust; got != want {
-		return fmt.Errorf("%v, ChainCrossingSign(d) = %d, want %d", input, got, want)
-	}
-	if got, want := crosser.ChainCrossingSign(c), robust; got != want {
-		return fmt.Errorf("%v, ChainCrossingSign(c) = %d, want %d", input, got, want)
-	}
-	if got, want := crosser.CrossingSign(d, c), robust; got != want {
-		return fmt.Errorf("%v, CrossingSign(d, c) = %d, want %d", input, got, want)
-	}
-	if got, want := crosser.CrossingSign(c, d), robust; got != want {
-		return fmt.Errorf("%v, CrossingSign(c, d) = %d, want %d", input, got, want)
-	}
-
-	crosser.RestartAt(c)
-	if got, want := crosser.EdgeOrVertexChainCrossing(d), edgeOrVertex; got != want {
-		return fmt.Errorf("%v, EdgeOrVertexChainCrossing(d) = %t, want %t", input, got, want)
-	}
-	if got, want := crosser.EdgeOrVertexChainCrossing(c), edgeOrVertex; got != want {
-		return fmt.Errorf("%v, EdgeOrVertexChainCrossing(c) = %t, want %t", input, got, want)
-	}
-	if got, want := crosser.EdgeOrVertexCrossing(d, c), edgeOrVertex; got != want {
-		return fmt.Errorf("%v, EdgeOrVertexCrossing(d, c) = %t, want %t", input, got, want)
-	}
-	if got, want := crosser.EdgeOrVertexCrossing(c, d), edgeOrVertex; got != want {
-		return fmt.Errorf("%v, EdgeOrVertexCrossing(c, d) = %t, want %t", input, got, want)
-	}
-	return nil
-}
-
-func TestEdgeutilInterpolate(t *testing.T) {
-	// Choose test points designed to expose floating-point errors.
-	p1 := PointFromCoords(0.1, 1e-30, 0.3)
-	p2 := PointFromCoords(-0.7, -0.55, -1e30)
-
-	tests := []struct {
-		a, b Point
-		dist float64
-		want Point
-	}{
-		// A zero-length edge.
-		{p1, p1, 0, p1},
-		{p1, p1, 1, p1},
-		// Start, end, and middle of a medium-length edge.
-		{p1, p2, 0, p1},
-		{p1, p2, 1, p2},
-		{p1, p2, 0.5, Point{(p1.Add(p2.Vector)).Mul(0.5)}},
-
-		// Test that interpolation is done using distances on the sphere
-		// rather than linear distances.
-		{
-			Point{r3.Vector{1, 0, 0}},
-			Point{r3.Vector{0, 1, 0}},
-			1.0 / 3.0,
-			Point{r3.Vector{math.Sqrt(3), 1, 0}},
-		},
-		{
-			Point{r3.Vector{1, 0, 0}},
-			Point{r3.Vector{0, 1, 0}},
-			2.0 / 3.0,
-			Point{r3.Vector{1, math.Sqrt(3), 0}},
-		},
-	}
-
-	for _, test := range tests {
-		// We allow a bit more than the usual 1e-15 error tolerance because
-		// Interpolate() uses trig functions.
-		if got := Interpolate(test.dist, test.a, test.b); !pointsApproxEquals(got, test.want, 3e-15) {
-			t.Errorf("Interpolate(%v, %v, %v) = %v, want %v", test.dist, test.a, test.b, got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilInterpolateOverLongEdge(t *testing.T) {
-	lng := math.Pi - 1e-2
-	a := Point{PointFromLatLng(LatLng{0, 0}).Normalize()}
-	b := Point{PointFromLatLng(LatLng{0, s1.Angle(lng)}).Normalize()}
-
-	for f := 0.4; f > 1e-15; f *= 0.1 {
-		// Test that interpolation is accurate on a long edge (but not so long that
-		// the definition of the edge itself becomes too unstable).
-		want := Point{PointFromLatLng(LatLng{0, s1.Angle(f * lng)}).Normalize()}
-		if got := Interpolate(f, a, b); !pointsApproxEquals(got, want, 3e-15) {
-			t.Errorf("long edge Interpolate(%v, %v, %v) = %v, want %v", f, a, b, got, want)
-		}
-
-		// Test the remainder of the dist also matches.
-		wantRem := Point{PointFromLatLng(LatLng{0, s1.Angle((1 - f) * lng)}).Normalize()}
-		if got := Interpolate(1-f, a, b); !pointsApproxEquals(got, wantRem, 3e-15) {
-			t.Errorf("long edge Interpolate(%v, %v, %v) = %v, want %v", 1-f, a, b, got, wantRem)
-		}
-	}
-}
-
-func TestEdgeutilInterpolateAntipodal(t *testing.T) {
-	p1 := PointFromCoords(0.1, 1e-30, 0.3)
-
-	// Test that interpolation on a 180 degree edge (antipodal endpoints) yields
-	// a result with the correct distance from each endpoint.
-	for dist := 0.0; dist <= 1.0; dist += 0.125 {
-		actual := Interpolate(dist, p1, Point{p1.Mul(-1)})
-		if !float64Near(actual.Distance(p1).Radians(), dist*math.Pi, 3e-15) {
-			t.Errorf("antipodal points Interpolate(%v, %v, %v) = %v, want %v", dist, p1, Point{p1.Mul(-1)}, actual, dist*math.Pi)
-		}
-	}
-}
-
-func TestEdgeutilRepeatedInterpolation(t *testing.T) {
-	// Check that points do not drift away from unit length when repeated
-	// interpolations are done.
-	for i := 0; i < 100; i++ {
-		a := randomPoint()
-		b := randomPoint()
-		for j := 0; j < 1000; j++ {
-			a = Interpolate(0.01, a, b)
-		}
-		if !a.Vector.IsUnit() {
-			t.Errorf("repeated Interpolate(%v, %v, %v) calls did not stay unit length for", 0.01, a, b)
-		}
-	}
-}
-
-func rectBoundForPoints(a, b Point) Rect {
-	bounder := NewRectBounder()
-	bounder.AddPoint(a)
-	bounder.AddPoint(b)
-	return bounder.RectBound()
-}
-
-func TestEdgeutilRectBounderMaxLatitudeSimple(t *testing.T) {
-	cubeLat := math.Asin(1 / math.Sqrt(3)) // 35.26 degrees
-	cubeLatRect := Rect{r1.IntervalFromPoint(-cubeLat).AddPoint(cubeLat),
-		s1.IntervalFromEndpoints(-math.Pi/4, math.Pi/4)}
-
-	tests := []struct {
-		a, b Point
-		want Rect
-	}{
-		// Check cases where the min/max latitude is attained at a vertex.
-		{
-			a:    Point{r3.Vector{1, 1, 1}},
-			b:    Point{r3.Vector{1, -1, -1}},
-			want: cubeLatRect,
-		},
-		{
-			a:    Point{r3.Vector{1, -1, 1}},
-			b:    Point{r3.Vector{1, 1, -1}},
-			want: cubeLatRect,
-		},
-	}
-
-	for _, test := range tests {
-		if got := rectBoundForPoints(test.a, test.b); !rectsApproxEqual(got, test.want, rectErrorLat, rectErrorLng) {
-			t.Errorf("RectBounder for points (%v, %v) near max lat failed: got %v, want %v", test.a, test.b, got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilRectBounderMaxLatitudeEdgeInterior(t *testing.T) {
-	// Check cases where the min/max latitude occurs in the edge interior.
-	// These tests expect the result to be pretty close to the middle of the
-	// allowable error range (i.e., by adding 0.5 * kRectError).
-
-	tests := []struct {
-		got  float64
-		want float64
-	}{
-		// Max latitude, CW edge
-		{
-			math.Pi/4 + 0.5*rectErrorLat,
-			rectBoundForPoints(Point{r3.Vector{1, 1, 1}}, Point{r3.Vector{1, -1, 1}}).Lat.Hi,
-		},
-		// Min latitude, CW edge
-		{
-			-math.Pi/4 - 0.5*rectErrorLat,
-			rectBoundForPoints(Point{r3.Vector{1, -1, -1}}, Point{r3.Vector{-1, -1, -1}}).Lat.Lo,
-		},
-		// Max latitude, CCW edge
-		{
-			math.Pi/4 + 0.5*rectErrorLat,
-			rectBoundForPoints(Point{r3.Vector{1, -1, 1}}, Point{r3.Vector{1, 1, 1}}).Lat.Hi,
-		},
-		// Min latitude, CCW edge
-		{
-			-math.Pi/4 - 0.5*rectErrorLat,
-			rectBoundForPoints(Point{r3.Vector{-1, 1, -1}}, Point{r3.Vector{-1, -1, -1}}).Lat.Lo,
-		},
-
-		// Check cases where the edge passes through one of the poles.
-		{
-			math.Pi / 2,
-			rectBoundForPoints(Point{r3.Vector{.3, .4, 1}}, Point{r3.Vector{-.3, -.4, 1}}).Lat.Hi,
-		},
-		{
-			-math.Pi / 2,
-			rectBoundForPoints(Point{r3.Vector{.3, .4, -1}}, Point{r3.Vector{-.3, -.4, -1}}).Lat.Lo,
-		},
-	}
-
-	for _, test := range tests {
-		if !float64Eq(test.got, test.want) {
-			t.Errorf("RectBound for max lat on interior of edge failed; got %v want %v", test.got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilRectBounderMaxLatitudeRandom(t *testing.T) {
-	// Check that the maximum latitude of edges is computed accurately to within
-	// 3 * dblEpsilon (the expected maximum error). We concentrate on maximum
-	// latitudes near the equator and north pole since these are the extremes.
-
-	for i := 0; i < 100; i++ {
-		// Construct a right-handed coordinate frame (U,V,W) such that U points
-		// slightly above the equator, V points at the equator, and W is slightly
-		// offset from the north pole.
-		u := randomPoint()
-		u.Z = dblEpsilon * 1e-6 * math.Pow(1e12, randomFloat64())
-
-		u = Point{u.Normalize()}
-		v := Point{PointFromCoords(0, 0, 1).PointCross(u).Normalize()}
-		w := Point{u.PointCross(v).Normalize()}
-
-		// Construct a line segment AB that passes through U, and check that the
-		// maximum latitude of this segment matches the latitude of U.
-		a := Point{u.Sub(v.Mul(randomFloat64())).Normalize()}
-		b := Point{u.Add(v.Mul(randomFloat64())).Normalize()}
-		abBound := rectBoundForPoints(a, b)
-		if !float64Near(latitude(u).Radians(), abBound.Lat.Hi, rectErrorLat) {
-			t.Errorf("bound for line AB not near enough to the latitude of point %v. got %v, want %v",
-				u, latitude(u).Radians(), abBound.Lat.Hi)
-		}
-
-		// Construct a line segment CD that passes through W, and check that the
-		// maximum latitude of this segment matches the latitude of W.
-		c := Point{w.Sub(v.Mul(randomFloat64())).Normalize()}
-		d := Point{w.Add(v.Mul(randomFloat64())).Normalize()}
-		cdBound := rectBoundForPoints(c, d)
-		if !float64Near(latitude(w).Radians(), cdBound.Lat.Hi, rectErrorLat) {
-			t.Errorf("bound for line CD not near enough to the lat of point %v. got %v, want %v",
-				v, latitude(w).Radians(), cdBound.Lat.Hi)
-		}
-	}
-}
-
-func TestEdgeutilExpandForSubregions(t *testing.T) {
-	// Test the full and empty bounds.
-	if !ExpandForSubregions(FullRect()).IsFull() {
-		t.Errorf("Subregion Bound of full rect should be full")
-	}
-	if !ExpandForSubregions(EmptyRect()).IsEmpty() {
-		t.Errorf("Subregion Bound of empty rect should be empty")
-	}
-
-	tests := []struct {
-		xLat, xLng, yLat, yLng float64
-		wantFull               bool
-	}{
-		// Cases where the bound does not straddle the equator (but almost does),
-		// and spans nearly 180 degrees in longitude.
-		{3e-16, 0, 1e-14, math.Pi, true},
-		{9e-16, 0, 1e-14, math.Pi, false},
-		{1e-16, 7e-16, 1e-14, math.Pi, true},
-		{3e-16, 14e-16, 1e-14, math.Pi, false},
-		{1e-100, 14e-16, 1e-14, math.Pi, true},
-		{1e-100, 22e-16, 1e-14, math.Pi, false},
-		// Cases where the bound spans at most 90 degrees in longitude, and almost
-		// 180 degrees in latitude.  Note that DBL_EPSILON is about 2.22e-16, which
-		// implies that the double-precision value just below Pi/2 can be written as
-		// (math.Pi/2 - 2e-16).
-		{-math.Pi / 2, -1e-15, math.Pi/2 - 7e-16, 0, true},
-		{-math.Pi / 2, -1e-15, math.Pi/2 - 30e-16, 0, false},
-		{-math.Pi/2 + 4e-16, 0, math.Pi/2 - 2e-16, 1e-7, true},
-		{-math.Pi/2 + 30e-16, 0, math.Pi / 2, 1e-7, false},
-		{-math.Pi/2 + 4e-16, 0, math.Pi/2 - 4e-16, math.Pi / 2, true},
-		{-math.Pi / 2, 0, math.Pi/2 - 30e-16, math.Pi / 2, false},
-		// Cases where the bound straddles the equator and spans more than 90
-		// degrees in longitude.  These are the cases where the critical distance is
-		// between a corner of the bound and the opposite longitudinal edge.  Unlike
-		// the cases above, here the bound may contain nearly-antipodal points (to
-		// within 3.055 * DBL_EPSILON) even though the latitude and longitude ranges
-		// are both significantly less than (math.Pi - 3.055 * DBL_EPSILON).
-		{-math.Pi / 2, 0, math.Pi/2 - 1e-8, math.Pi - 1e-7, true},
-		{-math.Pi / 2, 0, math.Pi/2 - 1e-7, math.Pi - 1e-7, false},
-		{-math.Pi/2 + 1e-12, -math.Pi + 1e-4, math.Pi / 2, 0, true},
-		{-math.Pi/2 + 1e-11, -math.Pi + 1e-4, math.Pi / 2, 0, true},
-	}
-
-	for _, tc := range tests {
-		in := RectFromLatLng(LatLng{s1.Angle(tc.xLat), s1.Angle(tc.xLng)})
-		in = in.AddPoint(LatLng{s1.Angle(tc.yLat), s1.Angle(tc.yLng)})
-		got := ExpandForSubregions(in)
-
-		// Test that the bound is actually expanded.
-		if !got.Contains(in) {
-			t.Errorf("Subregion bound of (%f, %f, %f, %f) should contain original rect", tc.xLat, tc.xLng, tc.yLat, tc.yLng)
-		}
-		if in.Lat == validRectLatRange && in.Lat.ContainsInterval(got.Lat) {
-			t.Errorf("Subregion bound of (%f, %f, %f, %f) shouldn't be contained by original rect", tc.xLat, tc.xLng, tc.yLat, tc.yLng)
-		}
-
-		// We check the various situations where the bound contains nearly-antipodal points. The tests are organized into pairs
-		// where the two bounds are similar except that the first bound meets the nearly-antipodal criteria while the second does not.
-		if got.IsFull() != tc.wantFull {
-			t.Errorf("Subregion Bound of (%f, %f, %f, %f).IsFull should be %t", tc.xLat, tc.xLng, tc.yLat, tc.yLng, tc.wantFull)
-		}
-	}
-
-	rectTests := []struct {
-		xLat, xLng, yLat, yLng float64
-		wantRect               Rect
-	}{
-		{1.5, -math.Pi / 2, 1.5, math.Pi/2 - 2e-16, Rect{r1.Interval{1.5, 1.5}, s1.FullInterval()}},
-		{1.5, -math.Pi / 2, 1.5, math.Pi/2 - 7e-16, Rect{r1.Interval{1.5, 1.5}, s1.Interval{-math.Pi / 2, math.Pi/2 - 7e-16}}},
-		// Check for cases where the bound is expanded to include one of the poles
-		{-math.Pi/2 + 1e-15, 0, -math.Pi/2 + 1e-15, 0, Rect{r1.Interval{-math.Pi / 2, -math.Pi/2 + 1e-15}, s1.FullInterval()}},
-		{math.Pi/2 - 1e-15, 0, math.Pi/2 - 1e-15, 0, Rect{r1.Interval{math.Pi/2 - 1e-15, math.Pi / 2}, s1.FullInterval()}},
-	}
-
-	for _, tc := range rectTests {
-		// Now we test cases where the bound does not contain nearly-antipodal
-		// points, but it does contain points that are approximately 180 degrees
-		// apart in latitude.
-		in := RectFromLatLng(LatLng{s1.Angle(tc.xLat), s1.Angle(tc.xLng)})
-		in = in.AddPoint(LatLng{s1.Angle(tc.yLat), s1.Angle(tc.yLng)})
-		got := ExpandForSubregions(in)
-		if !rectsApproxEqual(got, tc.wantRect, rectErrorLat, rectErrorLng) {
-			t.Errorf("Subregion Bound of (%f, %f, %f, %f) = (%v) should be %v", tc.xLat, tc.xLng, tc.yLat, tc.yLng, got, tc.wantRect)
-		}
-	}
-}
-
-func TestEdgeutilIntersectsFace(t *testing.T) {
-	tests := []struct {
-		a    pointUVW
-		want bool
-	}{
-		{pointUVW{r3.Vector{2.05335e-06, 3.91604e-22, 2.90553e-06}}, false},
-		{pointUVW{r3.Vector{-3.91604e-22, -2.05335e-06, -2.90553e-06}}, false},
-		{pointUVW{r3.Vector{0.169258, -0.169258, 0.664013}}, false},
-		{pointUVW{r3.Vector{0.169258, -0.169258, -0.664013}}, false},
-		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 3.88578e-16}}, true},
-		{pointUVW{r3.Vector{-3.88578e-16, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, true},
-	}
-
-	for _, test := range tests {
-		if got := test.a.intersectsFace(); got != test.want {
-			t.Errorf("%v.intersectsFace() = %v, want %v", test.a, got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilIntersectsOppositeEdges(t *testing.T) {
-	tests := []struct {
-		a    pointUVW
-		want bool
-	}{
-		{pointUVW{r3.Vector{0.169258, -0.169258, 0.664013}}, false},
-		{pointUVW{r3.Vector{0.169258, -0.169258, -0.664013}}, false},
-
-		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), 0, -math.Sqrt(4.0 / 3.0)}}, true},
-		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), 0, math.Sqrt(4.0 / 3.0)}}, true},
-
-		{pointUVW{r3.Vector{-math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 1.66533453694e-16}}, false},
-		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0), -1.66533453694e-16}}, false},
-	}
-	for _, test := range tests {
-		if got := test.a.intersectsOppositeEdges(); got != test.want {
-			t.Errorf("%v.intersectsOppositeEdges() = %v, want %v", test.a, got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilExitAxis(t *testing.T) {
-	tests := []struct {
-		a    pointUVW
-		want axis
-	}{
-		{pointUVW{r3.Vector{0, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, axisU},
-		{pointUVW{r3.Vector{0, math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0)}}, axisU},
-		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV},
-		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), math.Sqrt(4.0 / 3.0), 0}}, axisV},
-		{pointUVW{r3.Vector{math.Sqrt(2.0 / 3.0), -math.Sqrt(2.0 / 3.0), 0}}, axisV},
-		{pointUVW{r3.Vector{1.67968702783622, 0, 0.870988820096491}}, axisV},
-		{pointUVW{r3.Vector{0, math.Sqrt2, math.Sqrt2}}, axisU},
-	}
-
-	for _, test := range tests {
-		if got := test.a.exitAxis(); got != test.want {
-			t.Errorf("%v.exitAxis() = %v, want %v", test.a, got, test.want)
-		}
-	}
-}
-
-func TestEdgeutilExitPoint(t *testing.T) {
-	tests := []struct {
-		a        pointUVW
-		exitAxis axis
-		want     r2.Point
-	}{
-		{pointUVW{r3.Vector{-3.88578058618805e-16, -math.Sqrt(2.0 / 3.0), math.Sqrt(2.0 / 3.0)}}, axisU, r2.Point{-1, 1}},
-		{pointUVW{r3.Vector{math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV, r2.Point{-1, -1}},
-		{pointUVW{r3.Vector{-math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0), 0}}, axisV, r2.Point{-1, 1}},
-		{pointUVW{r3.Vector{-6.66134e-16, math.Sqrt(4.0 / 3.0), -math.Sqrt(4.0 / 3.0)}}, axisU, r2.Point{1, 1}},
-	}
-
-	for _, test := range tests {
-		if got := test.a.exitPoint(test.exitAxis); !r2PointsApproxEquals(got, test.want, epsilon) {
-			t.Errorf("%v.exitPoint() = %v, want %v", test.a, got, test.want)
-		}
-	}
-}
-
-// testClipToPaddedFace performs a comprehensive set of tests across all faces and
-// with random padding for the given points.
-//
-// We do this by defining an (x,y) coordinate system for the plane containing AB,
-// and converting points along the great circle AB to angles in the range
-// [-Pi, Pi]. We then accumulate the angle intervals spanned by each
-// clipped edge; the union over all 6 faces should approximately equal the
-// interval covered by the original edge.
-func testClipToPaddedFace(t *testing.T, a, b Point) {
-	a = Point{a.Normalize()}
-	b = Point{b.Normalize()}
-	if a.Vector == b.Mul(-1) {
-		return
-	}
-
-	norm := Point{a.PointCross(b).Normalize()}
-	aTan := Point{norm.Cross(a.Vector)}
-
-	padding := 0.0
-	if !oneIn(10) {
-		padding = 1e-10 * math.Pow(1e-5, randomFloat64())
-	}
-
-	xAxis := a
-	yAxis := aTan
-
-	// Given the points A and B, we expect all angles generated from the clipping
-	// to fall within this range.
-	expectedAngles := s1.Interval{0, float64(a.Angle(b.Vector))}
-	if expectedAngles.IsInverted() {
-		expectedAngles = s1.Interval{expectedAngles.Hi, expectedAngles.Lo}
-	}
-	maxAngles := expectedAngles.Expanded(faceClipErrorRadians)
-	var actualAngles s1.Interval
-
-	for face := 0; face < 6; face++ {
-		aUV, bUV, intersects := ClipToPaddedFace(a, b, face, padding)
-		if !intersects {
-			continue
-		}
-
-		aClip := Point{faceUVToXYZ(face, aUV.X, aUV.Y).Normalize()}
-		bClip := Point{faceUVToXYZ(face, bUV.X, bUV.Y).Normalize()}
-
-		desc := fmt.Sprintf("on face %d, a=%v, b=%v, aClip=%v, bClip=%v,", face, a, b, aClip, bClip)
-
-		if got := math.Abs(aClip.Dot(norm.Vector)); got > faceClipErrorRadians {
-			t.Errorf("%s abs(%v.Dot(%v)) = %v, want <= %v", desc, aClip, norm, got, faceClipErrorRadians)
-		}
-		if got := math.Abs(bClip.Dot(norm.Vector)); got > faceClipErrorRadians {
-			t.Errorf("%s abs(%v.Dot(%v)) = %v, want <= %v", desc, bClip, norm, got, faceClipErrorRadians)
-		}
-
-		if float64(aClip.Angle(a.Vector)) > faceClipErrorRadians {
-			if got := math.Max(math.Abs(aUV.X), math.Abs(aUV.Y)); !float64Eq(got, 1+padding) {
-				t.Errorf("%s the largest component of %v = %v, want %v", desc, aUV, got, 1+padding)
-			}
-		}
-		if float64(bClip.Angle(b.Vector)) > faceClipErrorRadians {
-			if got := math.Max(math.Abs(bUV.X), math.Abs(bUV.Y)); !float64Eq(got, 1+padding) {
-				t.Errorf("%s the largest component of %v = %v, want %v", desc, bUV, got, 1+padding)
-			}
-		}
-
-		aAngle := math.Atan2(aClip.Dot(yAxis.Vector), aClip.Dot(xAxis.Vector))
-		bAngle := math.Atan2(bClip.Dot(yAxis.Vector), bClip.Dot(xAxis.Vector))
-
-		// Rounding errors may cause bAngle to be slightly less than aAngle.
-		// We handle this by constructing the interval with FromPointPair,
-		// which is okay since the interval length is much less than math.Pi.
-		faceAngles := s1.IntervalFromEndpoints(aAngle, bAngle)
-		if faceAngles.IsInverted() {
-			faceAngles = s1.Interval{faceAngles.Hi, faceAngles.Lo}
-		}
-		if !maxAngles.ContainsInterval(faceAngles) {
-			t.Errorf("%s %v.ContainsInterval(%v) = false, but should have contained this interval", desc, maxAngles, faceAngles)
-		}
-		actualAngles = actualAngles.Union(faceAngles)
-	}
-	if !actualAngles.Expanded(faceClipErrorRadians).ContainsInterval(expectedAngles) {
-		t.Errorf("the union of all angle segments should be larger than the expected angle")
-	}
-}
-
-func TestEdgeutilFaceClipping(t *testing.T) {
-	// Start with a few simple cases.
-	// An edge that is entirely contained within one cube face:
-	testClipToPaddedFace(t, Point{r3.Vector{1, -0.5, -0.5}}, Point{r3.Vector{1, 0.5, 0.5}})
-	testClipToPaddedFace(t, Point{r3.Vector{1, 0.5, 0.5}}, Point{r3.Vector{1, -0.5, -0.5}})
-	// An edge that crosses one cube edge:
-	testClipToPaddedFace(t, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 1, 0}})
-	testClipToPaddedFace(t, Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}})
-	// An edge that crosses two opposite edges of face 0:
-	testClipToPaddedFace(t, Point{r3.Vector{0.75, 0, -1}}, Point{r3.Vector{0.75, 0, 1}})
-	testClipToPaddedFace(t, Point{r3.Vector{0.75, 0, 1}}, Point{r3.Vector{0.75, 0, -1}})
-	// An edge that crosses two adjacent edges of face 2:
-	testClipToPaddedFace(t, Point{r3.Vector{1, 0, 0.75}}, Point{r3.Vector{0, 1, 0.75}})
-	testClipToPaddedFace(t, Point{r3.Vector{0, 1, 0.75}}, Point{r3.Vector{1, 0, 0.75}})
-	// An edges that crosses three cube edges (four faces):
-	testClipToPaddedFace(t, Point{r3.Vector{1, 0.9, 0.95}}, Point{r3.Vector{-1, 0.95, 0.9}})
-	testClipToPaddedFace(t, Point{r3.Vector{-1, 0.95, 0.9}}, Point{r3.Vector{1, 0.9, 0.95}})
-
-	// Comprehensively test edges that are difficult to handle, especially those
-	// that nearly follow one of the 12 cube edges.
-	biunit := r2.Rect{r1.Interval{-1, 1}, r1.Interval{-1, 1}}
-
-	for i := 0; i < 1000; i++ {
-		// Choose two adjacent cube corners P and Q.
-		face := randomUniformInt(6)
-		i := randomUniformInt(4)
-		j := (i + 1) & 3
-		p := Point{faceUVToXYZ(face, biunit.Vertices()[i].X, biunit.Vertices()[i].Y)}
-		q := Point{faceUVToXYZ(face, biunit.Vertices()[j].X, biunit.Vertices()[j].Y)}
-
-		// Now choose two points that are nearly in the plane of PQ, preferring
-		// points that are near cube corners, face midpoints, or edge midpoints.
-		a := perturbedCornerOrMidpoint(p, q)
-		b := perturbedCornerOrMidpoint(p, q)
-		testClipToPaddedFace(t, a, b)
-	}
-}
-
-// getFraction returns the fraction t of the given point X on the line AB such that
-// x = (1-t)*a + t*b. Returns 0 if A = B.
-func getFraction(t *testing.T, x, a, b r2.Point) float64 {
-	// A bound for the error in edge clipping plus the error in the calculation
-	// (which is similar to EdgeIntersectsRect).
-	errorDist := (edgeClipErrorUVDist + intersectsRectErrorUVDist)
-	if a == b {
-		return 0.0
-	}
-	dir := b.Sub(a).Normalize()
-	if got := math.Abs(x.Sub(a).Dot(dir.Ortho())); got > errorDist {
-		t.Errorf("getFraction(%v, %v, %v) = %v, which exceeds errorDist %v", x, a, b, got, errorDist)
-	}
-	return x.Sub(a).Dot(dir)
-}
-
-// randomPointFromInterval returns a randomly selected point from the given interval
-// with one of three possible choices. All cases have reasonable probability for any
-// interval. The choices are: randomly choose a value inside the interval, choose a
-// value outside the interval, or select one of the two endpoints.
-func randomPointFromInterval(clip r1.Interval) float64 {
-	if oneIn(5) {
-		if oneIn(2) {
-			return clip.Lo
-		}
-		return clip.Hi
-	}
-
-	switch randomUniformInt(3) {
-	case 0:
-		return clip.Lo - randomFloat64()
-	case 1:
-		return clip.Hi + randomFloat64()
-	default:
-		return clip.Lo + randomFloat64()*clip.Length()
-	}
-}
-
-// Given a rectangle "clip", choose a point that may lie in the rectangle interior, along an extended edge, exactly at a vertex, or in one of the eight regions exterior to "clip" that are separated by its extended edges.  Also sometimes return points that are exactly on one of the extended diagonals of "clip". All cases are reasonably likely to occur for any given rectangle "clip".
-func chooseRectEndpoint(clip r2.Rect) r2.Point {
-	if oneIn(10) {
-		// Return a point on one of the two extended diagonals.
-		diag := randomUniformInt(2)
-		t := randomUniformFloat64(-1, 2)
-		return clip.Vertices()[diag].Mul(1 - t).Add(clip.Vertices()[diag+2].Mul(t))
-	}
-	return r2.Point{randomPointFromInterval(clip.X), randomPointFromInterval(clip.Y)}
-}
-
-// Choose a random point in the rectangle defined by points A and B, sometimes
-// returning a point on the edge AB or the points A and B themselves.
-func choosePointInRect(a, b r2.Point) r2.Point {
-	if oneIn(5) {
-		if oneIn(2) {
-			return a
-		}
-		return b
-	}
-
-	if oneIn(3) {
-		return a.Add(b.Sub(a).Mul(randomFloat64()))
-	}
-	return r2.Point{randomUniformFloat64(a.X, b.X), randomUniformFloat64(a.Y, b.Y)}
-}
-
-// Given a point P representing a possibly clipped endpoint A of an edge AB,
-// verify that clip contains P, and that if clipping occurred (i.e., P != A)
-// then P is on the boundary of clip.
-func checkPointOnBoundary(t *testing.T, p, a r2.Point, clip r2.Rect) {
-	if got := clip.ContainsPoint(p); !got {
-		t.Errorf("%v.ContainsPoint(%v) = %v, want true", clip, p, got)
-	}
-	if p != a {
-		p1 := r2.Point{math.Nextafter(p.X, a.X), math.Nextafter(p.Y, a.Y)}
-		if got := clip.ContainsPoint(p1); got {
-			t.Errorf("%v.ContainsPoint(%v) = %v, want false", clip, p1, got)
-		}
-	}
-}
-
-func TestEdgeutilEdgeClipping(t *testing.T) {
-	// A bound for the error in edge clipping plus the error in the
-	// EdgeIntersectsRect calculation below.
-	errorDist := (edgeClipErrorUVDist + intersectsRectErrorUVDist)
-	testRects := []r2.Rect{
-		// Test clipping against random rectangles.
-		r2.RectFromPoints(
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
-		r2.RectFromPoints(
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
-		r2.RectFromPoints(
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
-		r2.RectFromPoints(
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
-		r2.RectFromPoints(
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)},
-			r2.Point{randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1)}),
-
-		// Also clip against one-dimensional, singleton, and empty rectangles.
-		r2.Rect{r1.Interval{-0.7, -0.7}, r1.Interval{0.3, 0.35}},
-		r2.Rect{r1.Interval{0.2, 0.5}, r1.Interval{0.3, 0.3}},
-		r2.Rect{r1.Interval{-0.7, 0.3}, r1.Interval{0, 0}},
-		r2.RectFromPoints(r2.Point{0.3, 0.8}),
-		r2.EmptyRect(),
-	}
-
-	for _, r := range testRects {
-		for i := 0; i < 1000; i++ {
-			a := chooseRectEndpoint(r)
-			b := chooseRectEndpoint(r)
-
-			aClip, bClip, intersects := ClipEdge(a, b, r)
-			if !intersects {
-				if edgeIntersectsRect(a, b, r.ExpandedByMargin(-errorDist)) {
-					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = true, want false", a, b, r, -errorDist)
-				}
-			} else {
-				if !edgeIntersectsRect(a, b, r.ExpandedByMargin(errorDist)) {
-					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = false, want true", a, b, r, errorDist)
-				}
-
-				// Check that the clipped points lie on the edge AB, and
-				// that the points have the expected order along the segment AB.
-				if gotA, gotB := getFraction(t, aClip, a, b), getFraction(t, bClip, a, b); gotA > gotB {
-					t.Errorf("getFraction(%v,%v,%v) = %v, getFraction(%v, %v, %v) = %v; %v < %v = false, want true", aClip, a, b, gotA, bClip, a, b, gotB, gotA, gotB)
-				}
-
-				// Check that the clipped portion of AB is as large as possible.
-				checkPointOnBoundary(t, aClip, a, r)
-				checkPointOnBoundary(t, bClip, b, r)
-			}
-
-			// Choose an random initial bound to pass to clipEdgeBound.
-			initialClip := r2.RectFromPoints(choosePointInRect(a, b), choosePointInRect(a, b))
-			bound := clippedEdgeBound(a, b, initialClip)
-			if bound.IsEmpty() {
-				// Precondition of clipEdgeBound not met
-				continue
-			}
-			maxBound := bound.Intersection(r)
-			if bound, intersects := clipEdgeBound(a, b, r, bound); !intersects {
-				if edgeIntersectsRect(a, b, maxBound.ExpandedByMargin(-errorDist)) {
-					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = true, want false", a, b, maxBound.ExpandedByMargin(-errorDist), -errorDist)
-				}
-			} else {
-				if !edgeIntersectsRect(a, b, maxBound.ExpandedByMargin(errorDist)) {
-					t.Errorf("edgeIntersectsRect(%v, %v, %v.ExpandedByMargin(%v) = false, want true", a, b, maxBound.ExpandedByMargin(errorDist), errorDist)
-				}
-				// check that the bound is as large as possible.
-				ai := 0
-				if a.X > b.X {
-					ai = 1
-				}
-				aj := 0
-				if a.Y > b.Y {
-					aj = 1
-				}
-				checkPointOnBoundary(t, bound.VertexIJ(ai, aj), a, maxBound)
-				checkPointOnBoundary(t, bound.VertexIJ(1-ai, 1-aj), b, maxBound)
-			}
-		}
-	}
-}
-
-func TestCheckDistance(t *testing.T) {
-	// Uncomment once Distance / UpdateMinDistance are implemented.
-	//var zeroChordAngle s1.ChordAngle
-	tests := []struct {
-		x, a, b r3.Vector
-		distRad float64
-		want    r3.Vector
-	}{
-		{
-			x:       r3.Vector{1, 0, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: 0,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{0, 1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: 0,
-			want:    r3.Vector{0, 1, 0},
-		},
-		{
-			x:       r3.Vector{1, 3, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: 0,
-			want:    r3.Vector{1, 3, 0},
-		},
-		{
-			x:       r3.Vector{0, 0, 1},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{0, 0, -1},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{-1, -1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: 0.75 * math.Pi,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{0, 1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{1, 1, 0},
-			distRad: math.Pi / 4,
-			want:    r3.Vector{1, 1, 0},
-		},
-		{
-			x:       r3.Vector{0, -1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{1, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{0, -1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{-1, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{1, 0, 0},
-		},
-		{
-			x:       r3.Vector{-1, -1, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{-1, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{-1, 1, 0},
-		},
-		{
-			x:       r3.Vector{1, 1, 1},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: math.Asin(math.Sqrt(1.0 / 3.0)),
-			want:    r3.Vector{1, 1, 0},
-		},
-		{
-			x:       r3.Vector{1, 1, -1},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{0, 1, 0},
-			distRad: math.Asin(math.Sqrt(1.0 / 3.0)),
-			want:    r3.Vector{1, 1, 0}},
-		{
-			x:       r3.Vector{-1, 0, 0},
-			a:       r3.Vector{1, 1, 0},
-			b:       r3.Vector{1, 1, 0},
-			distRad: 0.75 * math.Pi,
-			want:    r3.Vector{1, 1, 0},
-		},
-		{
-			x:       r3.Vector{0, 0, -1},
-			a:       r3.Vector{1, 1, 0},
-			b:       r3.Vector{1, 1, 0},
-			distRad: math.Pi / 2,
-			want:    r3.Vector{1, 1, 0},
-		},
-		{
-			x:       r3.Vector{-1, 0, 0},
-			a:       r3.Vector{1, 0, 0},
-			b:       r3.Vector{1, 0, 0},
-			distRad: math.Pi,
-			want:    r3.Vector{1, 0, 0},
-		},
-	}
-
-	for _, test := range tests {
-		x := Point{test.x.Normalize()}
-		a := Point{test.a.Normalize()}
-		b := Point{test.b.Normalize()}
-		want := Point{test.want.Normalize()}
-
-		if d := DistanceFromSegment(x, a, b).Radians(); !float64Near(d, test.distRad, 1e-15) {
-			t.Errorf("DistanceFromSegment(%v, %v, %v) = %v, want %v", x, a, b, d, test.distRad)
-		}
-
-		closest := ClosestPoint(x, a, b)
-		if !closest.ApproxEqual(want) {
-			t.Errorf("ClosestPoint(%v, %v, %v) = %v, want %v", x, a, b, closest, want)
-		}
-
-		// Uncomment these once Distance / UpdateMinDistance are implemented.
-		//minDistance := zeroChordAngle
-		//if minDistance, ok := UpdateMinDistance(x, a, b, minDistance); ok {
-		//	t.Errorf("UpdateMinDistance(%x, %v, %v, %v) = %v, want %v", x, a, b, zeroChordAngle, minDistance, zeroChordAngle)
-		//}
-		//
-		//minDistance = s1.InfChordAngle()
-		//if minDistance, ok := UpdateMinDistance(x, a, b, minDistance); !ok {
-		//	t.Errorf("UpdateMinDistance(%x, %v, %v, %v) = %v, want %v", x, a, b, s1.InfChordAngle(), minDistance, s1.InfChordAngle())
-		//}
-		//
-		//if !float64Near(test.distRad, minDistance.Angle().Radians(), 1e-15) {
-		//	t.Errorf("%v != %v", minDistance.Angle().Radians(), test.distRad)
-		//}
-	}
-}
-
-func TestEdgeUtilWedges(t *testing.T) {
-	// For simplicity, all of these tests use an origin of (0, 0, 1).
-	// This shouldn't matter as long as the lower-level primitives are
-	// implemented correctly.
-	ab1 := Point{r3.Vector{0, 0, 1}}
-
-	tests := []struct {
-		desc           string
-		a0, a1, b0, b1 Point
-		contains       bool
-		intersects     bool
-		relation       WedgeRel
-	}{
-		{
-			desc:       "Intersection in one wedge",
-			a0:         Point{r3.Vector{-1, 0, 10}},
-			a1:         Point{r3.Vector{1, 2, 10}},
-			b0:         Point{r3.Vector{0, 1, 10}},
-			b1:         Point{r3.Vector{1, -2, 10}},
-			contains:   false,
-			intersects: true,
-			relation:   WedgeProperlyOverlaps,
-		},
-		{
-			desc:       "Intersection in two wedges",
-			a0:         Point{r3.Vector{-1, -1, 10}},
-			a1:         Point{r3.Vector{1, -1, 10}},
-			b0:         Point{r3.Vector{1, 0, 10}},
-			b1:         Point{r3.Vector{-1, 1, 10}},
-			contains:   false,
-			intersects: true,
-			relation:   WedgeProperlyOverlaps,
-		},
-		{
-			desc:       "Normal containment",
-			a0:         Point{r3.Vector{-1, -1, 10}},
-			a1:         Point{r3.Vector{1, -1, 10}},
-			b0:         Point{r3.Vector{-1, 0, 10}},
-			b1:         Point{r3.Vector{1, 0, 10}},
-			contains:   true,
-			intersects: true,
-			relation:   WedgeProperlyContains,
-		},
-		{
-			desc:       "Containment with equality on one side",
-			a0:         Point{r3.Vector{2, 1, 10}},
-			a1:         Point{r3.Vector{-1, -1, 10}},
-			b0:         Point{r3.Vector{2, 1, 10}},
-			b1:         Point{r3.Vector{1, -5, 10}},
-			contains:   true,
-			intersects: true,
-			relation:   WedgeProperlyContains,
-		},
-		{
-			desc:       "Containment with equality on the other side",
-			a0:         Point{r3.Vector{2, 1, 10}},
-			a1:         Point{r3.Vector{-1, -1, 10}},
-			b0:         Point{r3.Vector{1, -2, 10}},
-			b1:         Point{r3.Vector{-1, -1, 10}},
-			contains:   true,
-			intersects: true,
-			relation:   WedgeProperlyContains,
-		},
-		{
-			desc:       "Containment with equality on both sides",
-			a0:         Point{r3.Vector{-2, 3, 10}},
-			a1:         Point{r3.Vector{4, -5, 10}},
-			b0:         Point{r3.Vector{-2, 3, 10}},
-			b1:         Point{r3.Vector{4, -5, 10}},
-			contains:   true,
-			intersects: true,
-			relation:   WedgeEquals,
-		},
-		{
-			desc:       "Disjoint with equality on one side",
-			a0:         Point{r3.Vector{-2, 3, 10}},
-			a1:         Point{r3.Vector{4, -5, 10}},
-			b0:         Point{r3.Vector{4, -5, 10}},
-			b1:         Point{r3.Vector{-2, -3, 10}},
-			contains:   false,
-			intersects: false,
-			relation:   WedgeIsDisjoint,
-		},
-		{
-			desc:       "Disjoint with equality on the other side",
-			a0:         Point{r3.Vector{-2, 3, 10}},
-			a1:         Point{r3.Vector{0, 5, 10}},
-			b0:         Point{r3.Vector{4, -5, 10}},
-			b1:         Point{r3.Vector{-2, 3, 10}},
-			contains:   false,
-			intersects: false,
-			relation:   WedgeIsDisjoint,
-		},
-		{
-			desc:       "Disjoint with equality on both sides",
-			a0:         Point{r3.Vector{-2, 3, 10}},
-			a1:         Point{r3.Vector{4, -5, 10}},
-			b0:         Point{r3.Vector{4, -5, 10}},
-			b1:         Point{r3.Vector{-2, 3, 10}},
-			contains:   false,
-			intersects: false,
-			relation:   WedgeIsDisjoint,
-		},
-		{
-			desc:       "B contains A with equality on one side",
-			a0:         Point{r3.Vector{2, 1, 10}},
-			a1:         Point{r3.Vector{1, -5, 10}},
-			b0:         Point{r3.Vector{2, 1, 10}},
-			b1:         Point{r3.Vector{-1, -1, 10}},
-			contains:   false,
-			intersects: true,
-			relation:   WedgeIsProperlyContained,
-		},
-
-		{
-			desc:       "B contains A with equality on the other side",
-			a0:         Point{r3.Vector{2, 1, 10}},
-			a1:         Point{r3.Vector{1, -5, 10}},
-			b0:         Point{r3.Vector{-2, 1, 10}},
-			b1:         Point{r3.Vector{1, -5, 10}},
-			contains:   false,
-			intersects: true,
-			relation:   WedgeIsProperlyContained,
-		},
-	}
-
-	for _, test := range tests {
-		if got := WedgeContains(test.a0, ab1, test.a1, test.b0, test.b1); got != test.contains {
-			t.Errorf("%s: WedgeContains(%v, %v, %v, %v, %v) = %t, want %t", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.contains)
-		}
-		if got := WedgeIntersects(test.a0, ab1, test.a1, test.b0, test.b1); got != test.intersects {
-			t.Errorf("%s: WedgeIntersects(%v, %v, %v, %v, %v) = %t, want %t", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.intersects)
-		}
-		if got := WedgeRelation(test.a0, ab1, test.a1, test.b0, test.b1); got != test.relation {
-			t.Errorf("%s: WedgeRelation(%v, %v, %v, %v, %v) = %v, want %v", test.desc, test.a0, ab1, test.a1, test.b0, test.b1, got, test.relation)
-		}
-	}
-}

+ 237 - 0
vendor/github.com/golang/geo/s2/encode.go

@@ -0,0 +1,237 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"encoding/binary"
+	"io"
+)
+
+const (
+	// encodingVersion is the current version of the encoding
+	// format that is compatible with C++ and other S2 libraries.
+	encodingVersion = int8(1)
+
+	// encodingCompressedVersion is the current version of the
+	// compressed format.
+	encodingCompressedVersion = int8(4)
+)
+
+// encoder handles the specifics of encoding for S2 types.
+type encoder struct {
+	w   io.Writer // the real writer passed to Encode
+	err error
+}
+
+func (e *encoder) writeUvarint(x uint64) {
+	if e.err != nil {
+		return
+	}
+	var buf [binary.MaxVarintLen64]byte
+	n := binary.PutUvarint(buf[:], x)
+	_, e.err = e.w.Write(buf[:n])
+}
+
+func (e *encoder) writeBool(x bool) {
+	if e.err != nil {
+		return
+	}
+	var val int8
+	if x {
+		val = 1
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, val)
+}
+
+func (e *encoder) writeInt8(x int8) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt16(x int16) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt32(x int32) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt64(x int64) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeUint8(x uint8) {
+	if e.err != nil {
+		return
+	}
+	_, e.err = e.w.Write([]byte{x})
+}
+
+func (e *encoder) writeUint32(x uint32) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeUint64(x uint64) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeFloat32(x float32) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeFloat64(x float64) {
+	if e.err != nil {
+		return
+	}
+	e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+type byteReader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// byteReaderAdapter embellishes an io.Reader with a ReadByte method,
+// so that it implements the io.ByteReader interface.
+type byteReaderAdapter struct {
+	io.Reader
+}
+
+func (b byteReaderAdapter) ReadByte() (byte, error) {
+	buf := []byte{0}
+	_, err := io.ReadFull(b, buf)
+	return buf[0], err
+}
+
+func asByteReader(r io.Reader) byteReader {
+	if br, ok := r.(byteReader); ok {
+		return br
+	}
+	return byteReaderAdapter{r}
+}
+
+type decoder struct {
+	r   byteReader // the real reader passed to Decode
+	err error
+}
+
+func (d *decoder) readBool() (x bool) {
+	if d.err != nil {
+		return
+	}
+	var val int8
+	d.err = binary.Read(d.r, binary.LittleEndian, &val)
+	return val == 1
+}
+
+func (d *decoder) readInt8() (x int8) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readInt16() (x int16) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readInt32() (x int32) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readInt64() (x int64) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readUint8() (x uint8) {
+	if d.err != nil {
+		return
+	}
+	x, d.err = d.r.ReadByte()
+	return
+}
+
+func (d *decoder) readUint32() (x uint32) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readUint64() (x uint64) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readFloat32() (x float32) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readFloat64() (x float64) {
+	if d.err != nil {
+		return
+	}
+	d.err = binary.Read(d.r, binary.LittleEndian, &x)
+	return
+}
+
+func (d *decoder) readUvarint() (x uint64) {
+	if d.err != nil {
+		return
+	}
+	x, d.err = binary.ReadUvarint(d.r)
+	return
+}

+ 143 - 0
vendor/github.com/golang/geo/s2/interleave.go

@@ -0,0 +1,143 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+/*
+The lookup table below can convert a sequence of interleaved 8 bits into
+non-interleaved 4 bits. The table can convert both odd and even bits at the
+same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6),
+while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7).
+
+The lookup table below was generated using the following python code:
+
+	def deinterleave(bits):
+	  if bits == 0: return 0
+	  if bits < 4: return 1
+	  return deinterleave(bits / 4) * 2 + deinterleave(bits & 3)
+
+	for i in range(256): print "0x%x," % deinterleave(i),
+*/
+var deinterleaveLookup = [256]uint32{
+	0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3,
+	0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3,
+	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+
+	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+
+	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+
+	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+}
+
+// deinterleaveUint32 decodes the interleaved values.
+func deinterleaveUint32(code uint64) (uint32, uint32) {
+	x := (deinterleaveLookup[code&0x55]) |
+		(deinterleaveLookup[(code>>8)&0x55] << 4) |
+		(deinterleaveLookup[(code>>16)&0x55] << 8) |
+		(deinterleaveLookup[(code>>24)&0x55] << 12) |
+		(deinterleaveLookup[(code>>32)&0x55] << 16) |
+		(deinterleaveLookup[(code>>40)&0x55] << 20) |
+		(deinterleaveLookup[(code>>48)&0x55] << 24) |
+		(deinterleaveLookup[(code>>56)&0x55] << 28)
+	y := (deinterleaveLookup[code&0xaa]) |
+		(deinterleaveLookup[(code>>8)&0xaa] << 4) |
+		(deinterleaveLookup[(code>>16)&0xaa] << 8) |
+		(deinterleaveLookup[(code>>24)&0xaa] << 12) |
+		(deinterleaveLookup[(code>>32)&0xaa] << 16) |
+		(deinterleaveLookup[(code>>40)&0xaa] << 20) |
+		(deinterleaveLookup[(code>>48)&0xaa] << 24) |
+		(deinterleaveLookup[(code>>56)&0xaa] << 28)
+	return x, y
+}
+
+var interleaveLookup = [256]uint64{
+	0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
+	0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
+	0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
+	0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
+	0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
+	0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
+	0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
+	0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
+
+	0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
+	0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
+	0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
+	0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
+	0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
+	0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
+	0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
+	0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
+
+	0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
+	0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
+	0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
+	0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
+	0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
+	0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
+	0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
+	0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
+
+	0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
+	0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
+	0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
+	0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
+	0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
+	0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
+	0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
+	0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555,
+}
+
+// interleaveUint32 interleaves the given arguments into the return value.
+//
+// The 0-bit in val0 will be the 0-bit in the return value.
+// The 0-bit in val1 will be the 1-bit in the return value.
+// The 1-bit of val0 will be the 2-bit in the return value, and so on.
+func interleaveUint32(x, y uint32) uint64 {
+	return (interleaveLookup[x&0xff]) |
+		(interleaveLookup[(x>>8)&0xff] << 16) |
+		(interleaveLookup[(x>>16)&0xff] << 32) |
+		(interleaveLookup[x>>24] << 48) |
+		(interleaveLookup[y&0xff] << 1) |
+		(interleaveLookup[(y>>8)&0xff] << 17) |
+		(interleaveLookup[(y>>16)&0xff] << 33) |
+		(interleaveLookup[y>>24] << 49)
+}

+ 13 - 15
vendor/github.com/golang/geo/s2/latlng.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 

+ 0 - 155
vendor/github.com/golang/geo/s2/latlng_test.go

@@ -1,155 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/s1"
-)
-
-func TestLatLngNormalized(t *testing.T) {
-	tests := []struct {
-		desc string
-		pos  LatLng
-		want LatLng
-	}{
-		{
-			desc: "Valid lat/lng",
-			pos:  LatLngFromDegrees(21.8275043, 151.1979675),
-			want: LatLngFromDegrees(21.8275043, 151.1979675),
-		},
-		{
-			desc: "Valid lat/lng in the West",
-			pos:  LatLngFromDegrees(21.8275043, -151.1979675),
-			want: LatLngFromDegrees(21.8275043, -151.1979675),
-		},
-		{
-			desc: "Beyond the North pole",
-			pos:  LatLngFromDegrees(95, 151.1979675),
-			want: LatLngFromDegrees(90, 151.1979675),
-		},
-		{
-			desc: "Beyond the South pole",
-			pos:  LatLngFromDegrees(-95, 151.1979675),
-			want: LatLngFromDegrees(-90, 151.1979675),
-		},
-		{
-			desc: "At the date line (from East)",
-			pos:  LatLngFromDegrees(21.8275043, 180),
-			want: LatLngFromDegrees(21.8275043, 180),
-		},
-		{
-			desc: "At the date line (from West)",
-			pos:  LatLngFromDegrees(21.8275043, -180),
-			want: LatLngFromDegrees(21.8275043, -180),
-		},
-		{
-			desc: "Across the date line going East",
-			pos:  LatLngFromDegrees(21.8275043, 181.0012),
-			want: LatLngFromDegrees(21.8275043, -178.9988),
-		},
-		{
-			desc: "Across the date line going West",
-			pos:  LatLngFromDegrees(21.8275043, -181.0012),
-			want: LatLngFromDegrees(21.8275043, 178.9988),
-		},
-		{
-			desc: "All wrong",
-			pos:  LatLngFromDegrees(256, 256),
-			want: LatLngFromDegrees(90, -104),
-		},
-	}
-
-	for _, test := range tests {
-		got := test.pos.Normalized()
-		if !got.IsValid() {
-			t.Errorf("%s: A LatLng should be valid after normalization but isn't: %v", test.desc, got)
-		} else if got.Distance(test.want) > 1e-13*s1.Degree {
-			t.Errorf("%s: %v.Normalized() = %v, want %v", test.desc, test.pos, got, test.want)
-		}
-	}
-}
-
-func TestLatLngString(t *testing.T) {
-	const expected string = "[1.4142136, -2.2360680]"
-	s := LatLngFromDegrees(math.Sqrt2, -math.Sqrt(5)).String()
-	if s != expected {
-		t.Errorf("LatLng{√2, -√5}.String() = %q, want %q", s, expected)
-	}
-}
-
-func TestLatLngPointConversion(t *testing.T) {
-	// All test cases here have been verified against the C++ S2 implementation.
-	tests := []struct {
-		lat, lng float64 // degrees
-		x, y, z  float64
-	}{
-		{0, 0, 1, 0, 0},
-		{90, 0, 6.12323e-17, 0, 1},
-		{-90, 0, 6.12323e-17, 0, -1},
-		{0, 180, -1, 1.22465e-16, 0},
-		{0, -180, -1, -1.22465e-16, 0},
-		{90, 180, -6.12323e-17, 7.4988e-33, 1},
-		{90, -180, -6.12323e-17, -7.4988e-33, 1},
-		{-90, 180, -6.12323e-17, 7.4988e-33, -1},
-		{-90, -180, -6.12323e-17, -7.4988e-33, -1},
-		{-81.82750430354997, 151.19796752929685,
-			-0.12456788151479525, 0.0684875268284729, -0.989844584550441},
-	}
-	for _, test := range tests {
-		ll := LatLngFromDegrees(test.lat, test.lng)
-		p := PointFromLatLng(ll)
-		// TODO(mikeperrow): Port Point.ApproxEquals, then use here.
-		if !float64Eq(p.X, test.x) || !float64Eq(p.Y, test.y) || !float64Eq(p.Z, test.z) {
-			t.Errorf("PointFromLatLng({%v°, %v°}) = %v, want %v, %v, %v",
-				test.lat, test.lng, p, test.x, test.y, test.z)
-		}
-		ll = LatLngFromPoint(p)
-		// We need to be careful here, since if the latitude is +/- 90, any longitude
-		// is now a valid conversion.
-		isPolar := (test.lat == 90 || test.lat == -90)
-		if !float64Eq(ll.Lat.Degrees(), test.lat) ||
-			(!isPolar && (!float64Eq(ll.Lng.Degrees(), test.lng))) {
-			t.Errorf("Converting ll %v,%v to point (%v) and back gave %v.",
-				test.lat, test.lng, p, ll)
-		}
-	}
-}
-
-func TestLatLngDistance(t *testing.T) {
-	// Based on C++ S2LatLng::TestDistance.
-	tests := []struct {
-		lat1, lng1, lat2, lng2 float64
-		want, tolerance        float64
-	}{
-		{90, 0, 90, 0, 0, 0},
-		{-37, 25, -66, -155, 77, 1e-13},
-		{0, 165, 0, -80, 115, 1e-13},
-		{47, -127, -47, 53, 180, 2e-6},
-	}
-	for _, test := range tests {
-		ll1 := LatLngFromDegrees(test.lat1, test.lng1)
-		ll2 := LatLngFromDegrees(test.lat2, test.lng2)
-		d := ll1.Distance(ll2).Degrees()
-		if math.Abs(d-test.want) > test.tolerance {
-			t.Errorf("LatLng{%v, %v}.Distance(LatLng{%v, %v}).Degrees() = %v, want %v",
-				test.lat1, test.lng1, test.lat2, test.lng2, d, test.want)
-		}
-	}
-}

+ 1556 - 49
vendor/github.com/golang/geo/s2/loop.go

@@ -1,22 +1,22 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
+	"fmt"
+	"io"
 	"math"
 	"math"
 
 
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r1"
@@ -32,9 +32,10 @@ import (
 // very large area.
 // very large area.
 //
 //
 // Loops are not allowed to have any duplicate vertices (whether adjacent or
 // Loops are not allowed to have any duplicate vertices (whether adjacent or
-// not), and non-adjacent edges are not allowed to intersect. Loops must have
-// at least 3 vertices (except for the "empty" and "full" loops discussed
-// below).
+// not).  Non-adjacent edges are not allowed to intersect, and furthermore edges
+// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be
+// antipodal). Loops must have at least 3 vertices (except for the "empty" and
+// "full" loops discussed below).
 //
 //
 // There are two special loops: the "empty" loop contains no points and the
 // There are two special loops: the "empty" loop contains no points and the
 // "full" loop contains all points. These loops do not have any edges, but to
 // "full" loop contains all points. These loops do not have any edges, but to
@@ -48,15 +49,23 @@ type Loop struct {
 	// versus computing from the set of vertices every time.
 	// versus computing from the set of vertices every time.
 	originInside bool
 	originInside bool
 
 
+	// depth is the nesting depth of this Loop if it is contained by a Polygon
+	// or other shape and is used to determine if this loop represents a hole
+	// or a filled in portion.
+	depth int
+
 	// bound is a conservative bound on all points contained by this loop.
 	// bound is a conservative bound on all points contained by this loop.
 	// If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
 	// If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
 	bound Rect
 	bound Rect
 
 
-	// Since "bound" is not exact, it is possible that a loop A contains
+	// Since bound is not exact, it is possible that a loop A contains
 	// another loop B whose bounds are slightly larger. subregionBound
 	// another loop B whose bounds are slightly larger. subregionBound
 	// has been expanded sufficiently to account for this error, i.e.
 	// has been expanded sufficiently to account for this error, i.e.
 	// if A.Contains(B), then A.subregionBound.Contains(B.bound).
 	// if A.Contains(B), then A.subregionBound.Contains(B.bound).
 	subregionBound Rect
 	subregionBound Rect
+
+	// index is the spatial index for this Loop.
+	index *ShapeIndex
 }
 }
 
 
 // LoopFromPoints constructs a loop from the given points.
 // LoopFromPoints constructs a loop from the given points.
@@ -93,14 +102,20 @@ func LoopFromCell(c Cell) *Loop {
 	return l
 	return l
 }
 }
 
 
+// These two points are used for the special Empty and Full loops.
+var (
+	emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}}
+	fullLoopPoint  = Point{r3.Vector{X: 0, Y: 0, Z: -1}}
+)
+
 // EmptyLoop returns a special "empty" loop.
 // EmptyLoop returns a special "empty" loop.
 func EmptyLoop() *Loop {
 func EmptyLoop() *Loop {
-	return LoopFromPoints([]Point{{r3.Vector{X: 0, Y: 0, Z: 1}}})
+	return LoopFromPoints([]Point{emptyLoopPoint})
 }
 }
 
 
 // FullLoop returns a special "full" loop.
 // FullLoop returns a special "full" loop.
 func FullLoop() *Loop {
 func FullLoop() *Loop {
-	return LoopFromPoints([]Point{{r3.Vector{X: 0, Y: 0, Z: -1}}})
+	return LoopFromPoints([]Point{fullLoopPoint})
 }
 }
 
 
 // initOriginAndBound sets the origin containment for the given point and then calls
 // initOriginAndBound sets the origin containment for the given point and then calls
@@ -138,14 +153,14 @@ func (l *Loop) initOriginAndBound() {
 		}
 		}
 	}
 	}
 
 
-	// We *must* call initBound before initIndex, because initBound calls
-	// ContainsPoint(s2.Point), and ContainsPoint(s2.Point) does a bounds check whenever the
-	// index is not fresh (i.e., the loop has been added to the index but the
-	// index has not been updated yet).
+	// We *must* call initBound before initializing the index, because
+	// initBound calls ContainsPoint which does a bounds check before using
+	// the index.
 	l.initBound()
 	l.initBound()
 
 
-	// TODO(roberts): Depends on s2shapeindex being implemented.
-	// l.initIndex()
+	// Create a new index and add us to it.
+	l.index = NewShapeIndex()
+	l.index.Add(l)
 }
 }
 
 
 // initBound sets up the approximate bounding Rects for this loop.
 // initBound sets up the approximate bounding Rects for this loop.
@@ -188,11 +203,259 @@ func (l *Loop) initBound() {
 	l.subregionBound = ExpandForSubregions(l.bound)
 	l.subregionBound = ExpandForSubregions(l.bound)
 }
 }
 
 
+// IsValid reports whether this is a valid loop or not.
+func (l *Loop) IsValid() bool {
+	return l.findValidationError() == nil
+}
+
+// findValidationError reports whether this is not a valid loop and if so
+// returns an error describing why. This function requires the Loops ShapeIndex
+// to have been intialized.
+func (l *Loop) findValidationError() error {
+	if err := l.findValidationErrorNoIndex(); err != nil {
+		return err
+	}
+	// Check for intersections between non-adjacent edges (including at vertices)
+	// TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this.
+	// return findAnyCrossing(l.index)
+	return nil
+}
+
+// findValidationErrorNoIndex reports whether this is not a valid loop, but
+// skips checks that would require a ShapeIndex to be built for the loop. This
+// is primarily used by Polygon to do validation so it doesn't trigger the
+// creation of unneeded ShapeIndices.
+func (l *Loop) findValidationErrorNoIndex() error {
+	// All vertices must be unit length.
+	for i, v := range l.vertices {
+		if !v.IsUnit() {
+			return fmt.Errorf("vertex %d is not unit length", i)
+		}
+	}
+
+	// Loops must have at least 3 vertices (except for empty and full).
+	if len(l.vertices) < 3 {
+		if l.isEmptyOrFull() {
+			return nil // Skip remaining tests.
+		}
+		return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices")
+	}
+
+	// Loops are not allowed to have any duplicate vertices or edge crossings.
+	// We split this check into two parts. First we check that no edge is
+	// degenerate (identical endpoints). Then we check that there are no
+	// intersections between non-adjacent edges (including at vertices). The
+	// second check needs the ShapeIndex, so it does not fall within the scope
+	// of this method.
+	for i, v := range l.vertices {
+		if v == l.Vertex(i+1) {
+			return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i)
+		}
+
+		// Antipodal vertices are not allowed.
+		if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other {
+			return fmt.Errorf("vertices %d and %d are antipodal", i,
+				(i+1)%len(l.vertices))
+		}
+	}
+
+	return nil
+}
+
+// Contains reports whether the region contained by this loop is a superset of the
+// region contained by the given other loop.
+func (l *Loop) Contains(o *Loop) bool {
+	// For a loop A to contain the loop B, all of the following must
+	// be true:
+	//
+	//  (1) There are no edge crossings between A and B except at vertices.
+	//
+	//  (2) At every vertex that is shared between A and B, the local edge
+	//      ordering implies that A contains B.
+	//
+	//  (3) If there are no shared vertices, then A must contain a vertex of B
+	//      and B must not contain a vertex of A. (An arbitrary vertex may be
+	//      chosen in each case.)
+	//
+	// The second part of (3) is necessary to detect the case of two loops whose
+	// union is the entire sphere, i.e. two loops that contains each other's
+	// boundaries but not each other's interiors.
+	if !l.subregionBound.Contains(o.bound) {
+		return false
+	}
+
+	// Special cases to handle either loop being empty or full.
+	if l.isEmptyOrFull() || o.isEmptyOrFull() {
+		return l.IsFull() || o.IsEmpty()
+	}
+
+	// Check whether there are any edge crossings, and also check the loop
+	// relationship at any shared vertices.
+	relation := &containsRelation{}
+	if hasCrossingRelation(l, o, relation) {
+		return false
+	}
+
+	// There are no crossings, and if there are any shared vertices then A
+	// contains B locally at each shared vertex.
+	if relation.foundSharedVertex {
+		return true
+	}
+
+	// Since there are no edge intersections or shared vertices, we just need to
+	// test condition (3) above. We can skip this test if we discovered that A
+	// contains at least one point of B while checking for edge crossings.
+	if !l.ContainsPoint(o.Vertex(0)) {
+		return false
+	}
+
+	// We still need to check whether (A union B) is the entire sphere.
+	// Normally this check is very cheap due to the bounding box precondition.
+	if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) &&
+		o.ContainsPoint(l.Vertex(0)) {
+		return false
+	}
+	return true
+}
+
+// Intersects reports whether the region contained by this loop intersects the region
+// contained by the other loop.
+func (l *Loop) Intersects(o *Loop) bool {
+	// Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B).
+	//
+	// This code is similar to Contains, but is optimized for the case
+	// where both loops enclose less than half of the sphere.
+	if !l.bound.Intersects(o.bound) {
+		return false
+	}
+
+	// Check whether there are any edge crossings, and also check the loop
+	// relationship at any shared vertices.
+	relation := &intersectsRelation{}
+	if hasCrossingRelation(l, o, relation) {
+		return true
+	}
+	if relation.foundSharedVertex {
+		return false
+	}
+
+	// Since there are no edge intersections or shared vertices, the loops
+	// intersect only if A contains B, B contains A, or the two loops contain
+	// each other's boundaries.  These checks are usually cheap because of the
+	// bounding box preconditions.  Note that neither loop is empty (because of
+	// the bounding box check above), so it is safe to access vertex(0).
+
+	// Check whether A contains B, or A and B contain each other's boundaries.
+	// (Note that A contains all the vertices of B in either case.)
+	if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() {
+		if l.ContainsPoint(o.Vertex(0)) {
+			return true
+		}
+	}
+	// Check whether B contains A.
+	if o.subregionBound.Contains(l.bound) {
+		if o.ContainsPoint(l.Vertex(0)) {
+			return true
+		}
+	}
+	return false
+}
+
+// BoundaryEqual reports whether the two loops have the same boundary. This is
+// true if and only if the loops have the same vertices in the same cyclic order
+// (i.e., the vertices may be cyclically rotated). The empty and full loops are
+// considered to have different boundaries.
+func (l *Loop) BoundaryEqual(o *Loop) bool {
+	if len(l.vertices) != len(o.vertices) {
+		return false
+	}
+
+	// Special case to handle empty or full loops.  Since they have the same
+	// number of vertices, if one loop is empty/full then so is the other.
+	if l.isEmptyOrFull() {
+		return l.IsEmpty() == o.IsEmpty()
+	}
+
+	// Loop through the vertices to find the first of ours that matches the
+	// starting vertex of the other loop. Use that offset to then 'align' the
+	// vertices for comparison.
+	for offset, vertex := range l.vertices {
+		if vertex == o.Vertex(0) {
+			// There is at most one starting offset since loop vertices are unique.
+			for i := 0; i < len(l.vertices); i++ {
+				if l.Vertex(i+offset) != o.Vertex(i) {
+					return false
+				}
+			}
+			return true
+		}
+	}
+	return false
+}
+
+// compareBoundary returns +1 if this loop contains the boundary of the other loop,
+// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two
+// loops cross. Shared edges are handled as follows:
+//
+//   If XY is a shared edge, define Reversed(XY) to be true if XY
+//     appears in opposite directions in both loops.
+//   Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole.
+//   (Intuitively, this checks whether this loop contains a vanishingly small region
+//   extending from the boundary of the other toward the interior of the polygon to
+//   which the other belongs.)
+//
+// This function is used for testing containment and intersection of
+// multi-loop polygons. Note that this method is not symmetric, since the
+// result depends on the direction of this loop but not on the direction of
+// the other loop (in the absence of shared edges).
+//
+// This requires that neither loop is empty, and if other loop IsFull, then it must not
+// be a hole.
+func (l *Loop) compareBoundary(o *Loop) int {
+	// The bounds must intersect for containment or crossing.
+	if !l.bound.Intersects(o.bound) {
+		return -1
+	}
+
+	// Full loops are handled as though the loop surrounded the entire sphere.
+	if l.IsFull() {
+		return 1
+	}
+	if o.IsFull() {
+		return -1
+	}
+
+	// Check whether there are any edge crossings, and also check the loop
+	// relationship at any shared vertices.
+	relation := newCompareBoundaryRelation(o.IsHole())
+	if hasCrossingRelation(l, o, relation) {
+		return 0
+	}
+	if relation.foundSharedVertex {
+		if relation.containsEdge {
+			return 1
+		}
+		return -1
+	}
+
+	// There are no edge intersections or shared vertices, so we can check
+	// whether A contains an arbitrary vertex of B.
+	if l.ContainsPoint(o.Vertex(0)) {
+		return 1
+	}
+	return -1
+}
+
 // ContainsOrigin reports true if this loop contains s2.OriginPoint().
 // ContainsOrigin reports true if this loop contains s2.OriginPoint().
 func (l *Loop) ContainsOrigin() bool {
 func (l *Loop) ContainsOrigin() bool {
 	return l.originInside
 	return l.originInside
 }
 }
 
 
+// ReferencePoint returns the reference point for this loop.
+func (l *Loop) ReferencePoint() ReferencePoint {
+	return OriginReferencePoint(l.originInside)
+}
+
 // HasInterior returns true because all loops have an interior.
 // HasInterior returns true because all loops have an interior.
 func (l *Loop) HasInterior() bool {
 func (l *Loop) HasInterior() bool {
 	return true
 	return true
@@ -207,30 +470,37 @@ func (l *Loop) NumEdges() int {
 }
 }
 
 
 // Edge returns the endpoints for the given edge index.
 // Edge returns the endpoints for the given edge index.
-func (l *Loop) Edge(i int) (a, b Point) {
-	return l.Vertex(i), l.Vertex(i + 1)
+func (l *Loop) Edge(i int) Edge {
+	return Edge{l.Vertex(i), l.Vertex(i + 1)}
 }
 }
 
 
-// dimension returns the dimension of the geometry represented by this Loop.
-func (l *Loop) dimension() dimension { return polygonGeometry }
-
-// numChains reports the number of contiguous edge chains in the Loop.
-func (l *Loop) numChains() int {
+// NumChains reports the number of contiguous edge chains in the Loop.
+func (l *Loop) NumChains() int {
 	if l.isEmptyOrFull() {
 	if l.isEmptyOrFull() {
 		return 0
 		return 0
 	}
 	}
 	return 1
 	return 1
 }
 }
 
 
-// chainStart returns the id of the first edge in the i-th edge chain in this Loop.
-func (l *Loop) chainStart(i int) int {
-	if i == 0 {
-		return 0
-	}
+// Chain returns the i-th edge chain in the Shape.
+func (l *Loop) Chain(chainID int) Chain {
+	return Chain{0, l.NumEdges()}
+}
+
+// ChainEdge returns the j-th edge of the i-th edge chain.
+func (l *Loop) ChainEdge(chainID, offset int) Edge {
+	return Edge{l.Vertex(offset), l.Vertex(offset + 1)}
+}
 
 
-	return l.NumEdges()
+// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the
+// j-th edge of the Loop.
+func (l *Loop) ChainPosition(edgeID int) ChainPosition {
+	return ChainPosition{0, edgeID}
 }
 }
 
 
+// dimension returns the dimension of the geometry represented by this Loop.
+func (l *Loop) dimension() dimension { return polygonGeometry }
+
 // IsEmpty reports true if this is the special "empty" loop that contains no points.
 // IsEmpty reports true if this is the special "empty" loop that contains no points.
 func (l *Loop) IsEmpty() bool {
 func (l *Loop) IsEmpty() bool {
 	return l.isEmptyOrFull() && !l.ContainsOrigin()
 	return l.isEmptyOrFull() && !l.ContainsOrigin()
@@ -246,6 +516,11 @@ func (l *Loop) isEmptyOrFull() bool {
 	return len(l.vertices) == 1
 	return len(l.vertices) == 1
 }
 }
 
 
+// Vertices returns the vertices in the loop.
+func (l *Loop) Vertices() []Point {
+	return l.vertices
+}
+
 // RectBound returns a tight bounding rectangle. If the loop contains the point,
 // RectBound returns a tight bounding rectangle. If the loop contains the point,
 // the bound also contains it.
 // the bound also contains it.
 func (l *Loop) RectBound() Rect {
 func (l *Loop) RectBound() Rect {
@@ -266,25 +541,169 @@ func (l *Loop) Vertex(i int) Point {
 	return l.vertices[i%len(l.vertices)]
 	return l.vertices[i%len(l.vertices)]
 }
 }
 
 
-// Vertices returns the vertices in the loop.
-func (l *Loop) Vertices() []Point {
-	return l.vertices
+// OrientedVertex returns the vertex in reverse order if the loop represents a polygon
+// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where
+// n == len(vertices). This ensures that the interior of the polygon is always to
+// the left of the vertex chain.
+//
+// This requires: 0 <= i < 2 * len(vertices)
+func (l *Loop) OrientedVertex(i int) Point {
+	j := i - len(l.vertices)
+	if j < 0 {
+		j = i
+	}
+	if l.IsHole() {
+		j = len(l.vertices) - 1 - j
+	}
+	return l.Vertex(i)
+}
+
+// NumVertices returns the number of vertices in this loop.
+func (l *Loop) NumVertices() int {
+	return len(l.vertices)
+}
+
+// bruteForceContainsPoint reports if the given point is contained by this loop.
+// This method does not use the ShapeIndex, so it is only preferable below a certain
+// size of loop.
+func (l *Loop) bruteForceContainsPoint(p Point) bool {
+	origin := OriginPoint()
+	inside := l.originInside
+	crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0))
+	for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice
+		inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i))
+	}
+	return inside
 }
 }
 
 
 // ContainsPoint returns true if the loop contains the point.
 // ContainsPoint returns true if the loop contains the point.
 func (l *Loop) ContainsPoint(p Point) bool {
 func (l *Loop) ContainsPoint(p Point) bool {
-	// TODO(sbeckman): Move to bruteForceContains and update with ShapeIndex when available.
 	// Empty and full loops don't need a special case, but invalid loops with
 	// Empty and full loops don't need a special case, but invalid loops with
 	// zero vertices do, so we might as well handle them all at once.
 	// zero vertices do, so we might as well handle them all at once.
 	if len(l.vertices) < 3 {
 	if len(l.vertices) < 3 {
 		return l.originInside
 		return l.originInside
 	}
 	}
 
 
-	origin := OriginPoint()
-	inside := l.originInside
-	crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0))
-	for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice
-		inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i))
+	// For small loops, and during initial construction, it is faster to just
+	// check all the crossing.
+	const maxBruteForceVertices = 32
+	if len(l.vertices) < maxBruteForceVertices || l.index == nil {
+		return l.bruteForceContainsPoint(p)
+	}
+
+	// Otherwise, look up the point in the index.
+	it := l.index.Iterator()
+	if !it.LocatePoint(p) {
+		return false
+	}
+	return l.iteratorContainsPoint(it, p)
+}
+
+// ContainsCell reports whether the given Cell is contained by this Loop.
+func (l *Loop) ContainsCell(target Cell) bool {
+	it := l.index.Iterator()
+	relation := it.LocateCellID(target.ID())
+
+	// If "target" is disjoint from all index cells, it is not contained.
+	// Similarly, if "target" is subdivided into one or more index cells then it
+	// is not contained, since index cells are subdivided only if they (nearly)
+	// intersect a sufficient number of edges.  (But note that if "target" itself
+	// is an index cell then it may be contained, since it could be a cell with
+	// no edges in the loop interior.)
+	if relation != Indexed {
+		return false
+	}
+
+	// Otherwise check if any edges intersect "target".
+	if l.boundaryApproxIntersects(it, target) {
+		return false
+	}
+
+	// Otherwise check if the loop contains the center of "target".
+	return l.iteratorContainsPoint(it, target.Center())
+}
+
+// IntersectsCell reports whether this Loop intersects the given cell.
+func (l *Loop) IntersectsCell(target Cell) bool {
+	it := l.index.Iterator()
+	relation := it.LocateCellID(target.ID())
+
+	// If target does not overlap any index cell, there is no intersection.
+	if relation == Disjoint {
+		return false
+	}
+	// If target is subdivided into one or more index cells, there is an
+	// intersection to within the ShapeIndex error bound (see Contains).
+	if relation == Subdivided {
+		return true
+	}
+	// If target is an index cell, there is an intersection because index cells
+	// are created only if they have at least one edge or they are entirely
+	// contained by the loop.
+	if it.CellID() == target.id {
+		return true
+	}
+	// Otherwise check if any edges intersect target.
+	if l.boundaryApproxIntersects(it, target) {
+		return true
+	}
+	// Otherwise check if the loop contains the center of target.
+	return l.iteratorContainsPoint(it, target.Center())
+}
+
+// CellUnionBound computes a covering of the Loop.
+func (l *Loop) CellUnionBound() []CellID {
+	return l.CapBound().CellUnionBound()
+}
+
+// boundaryApproxIntersects reports if the loop's boundary intersects target.
+// It may also return true when the loop boundary does not intersect target but
+// some edge comes within the worst-case error tolerance.
+//
+// This requires that it.Locate(target) returned Indexed.
+func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool {
+	aClipped := it.IndexCell().findByShapeID(0)
+
+	// If there are no edges, there is no intersection.
+	if len(aClipped.edges) == 0 {
+		return false
+	}
+
+	// We can save some work if target is the index cell itself.
+	if it.CellID() == target.ID() {
+		return true
+	}
+
+	// Otherwise check whether any of the edges intersect target.
+	maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist)
+	bound := target.BoundUV().ExpandedByMargin(maxError)
+	for _, ai := range aClipped.edges {
+		v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError)
+		if ok && edgeIntersectsRect(v0, v1, bound) {
+			return true
+		}
+	}
+	return false
+}
+
+// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell
+// that may contain p, contains the point p.
+func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool {
+	// Test containment by drawing a line segment from the cell center to the
+	// given point and counting edge crossings.
+	aClipped := it.IndexCell().findByShapeID(0)
+	inside := aClipped.containsCenter
+	if len(aClipped.edges) > 0 {
+		center := it.Center()
+		crosser := NewEdgeCrosser(center, p)
+		aiPrev := -2
+		for _, ai := range aClipped.edges {
+			if ai != aiPrev+1 {
+				crosser.RestartAt(l.Vertex(ai))
+			}
+			aiPrev = ai
+			inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1))
+		}
 	}
 	}
 	return inside
 	return inside
 }
 }
@@ -300,3 +719,1091 @@ func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop {
 func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop {
 func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop {
 	return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices))
 	return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices))
 }
 }
+
+// CanonicalFirstVertex returns a first index and a direction (either +1 or -1)
+// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does
+// not change when the loop vertex order is rotated or inverted. This allows the
+// loop vertices to be traversed in a canonical order. The return values are
+// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as
+// expected by the Vertex method.
+func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) {
+	firstIdx = 0
+	n := len(l.vertices)
+	for i := 1; i < n; i++ {
+		if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 {
+			firstIdx = i
+		}
+	}
+
+	// 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1.
+	if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 {
+		return firstIdx, 1
+	}
+
+	// n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0.
+	firstIdx += n
+	return firstIdx, -1
+}
+
+// TurningAngle returns the sum of the turning angles at each vertex. The return
+// value is positive if the loop is counter-clockwise, negative if the loop is
+// clockwise, and zero if the loop is a great circle. Degenerate and
+// nearly-degenerate loops are handled consistently with Sign. So for example,
+// if a loop has zero area (i.e., it is a very small CCW loop) then the turning
+// angle will always be negative.
+//
+// This quantity is also called the "geodesic curvature" of the loop.
+func (l *Loop) TurningAngle() float64 {
+	// For empty and full loops, we return the limit value as the loop area
+	// approaches 0 or 4*Pi respectively.
+	if l.isEmptyOrFull() {
+		if l.ContainsOrigin() {
+			return -2 * math.Pi
+		}
+		return 2 * math.Pi
+	}
+
+	// Don't crash even if the loop is not well-defined.
+	if len(l.vertices) < 3 {
+		return 0
+	}
+
+	// To ensure that we get the same result when the vertex order is rotated,
+	// and that the result is negated when the vertex order is reversed, we need
+	// to add up the individual turn angles in a consistent order. (In general,
+	// adding up a set of numbers in a different order can change the sum due to
+	// rounding errors.)
+	//
+	// Furthermore, if we just accumulate an ordinary sum then the worst-case
+	// error is quadratic in the number of vertices. (This can happen with
+	// spiral shapes, where the partial sum of the turning angles can be linear
+	// in the number of vertices.) To avoid this we use the Kahan summation
+	// algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm).
+	n := len(l.vertices)
+	i, dir := l.CanonicalFirstVertex()
+	sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n))
+
+	compensation := s1.Angle(0)
+	for n-1 > 0 {
+		i += dir
+		angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir))
+		oldSum := sum
+		angle += compensation
+		sum += angle
+		compensation = (oldSum - sum) + angle
+		n--
+	}
+	return float64(dir) * float64(sum+compensation)
+}
+
+// turningAngleMaxError return the maximum error in TurningAngle. The value is not
+// constant; it depends on the loop.
+func (l *Loop) turningAngleMaxError() float64 {
+	// The maximum error can be bounded as follows:
+	//   2.24 * dblEpsilon    for RobustCrossProd(b, a)
+	//   2.24 * dblEpsilon    for RobustCrossProd(c, b)
+	//   3.25 * dblEpsilon    for Angle()
+	//   2.00 * dblEpsilon    for each addition in the Kahan summation
+	//   ------------------
+	//   9.73 * dblEpsilon
+	maxErrorPerVertex := 9.73 * dblEpsilon
+	return maxErrorPerVertex * float64(len(l.vertices))
+}
+
+// IsHole reports whether this loop represents a hole in its containing polygon.
+func (l *Loop) IsHole() bool { return l.depth&1 != 0 }
+
+// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise.
+func (l *Loop) Sign() int {
+	if l.IsHole() {
+		return -1
+	}
+	return 1
+}
+
+// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are
+// handled consistently with Sign, i.e., if a loop can be
+// expressed as the union of degenerate or nearly-degenerate CCW triangles,
+// then it will always be considered normalized.
+func (l *Loop) IsNormalized() bool {
+	// Optimization: if the longitude span is less than 180 degrees, then the
+	// loop covers less than half the sphere and is therefore normalized.
+	if l.bound.Lng.Length() < math.Pi {
+		return true
+	}
+
+	// We allow some error so that hemispheres are always considered normalized.
+	// TODO(roberts): This is no longer required by the Polygon implementation,
+	// so alternatively we could create the invariant that a loop is normalized
+	// if and only if its complement is not normalized.
+	return l.TurningAngle() >= -l.turningAngleMaxError()
+}
+
+// Normalize inverts the loop if necessary so that the area enclosed by the loop
+// is at most 2*pi.
+func (l *Loop) Normalize() {
+	if !l.IsNormalized() {
+		l.Invert()
+	}
+}
+
+// Invert reverses the order of the loop vertices, effectively complementing the
+// region represented by the loop. For example, the loop ABCD (with edges
+// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD).
+// Notice that the last edge is the same in both cases except that its
+// direction has been reversed.
+func (l *Loop) Invert() {
+	l.index.Reset()
+	if l.isEmptyOrFull() {
+		if l.IsFull() {
+			l.vertices[0] = emptyLoopPoint
+		} else {
+			l.vertices[0] = fullLoopPoint
+		}
+	} else {
+		// For non-special loops, reverse the slice of vertices.
+		for i := len(l.vertices)/2 - 1; i >= 0; i-- {
+			opp := len(l.vertices) - 1 - i
+			l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i]
+		}
+	}
+
+	// originInside must be set correctly before building the ShapeIndex.
+	l.originInside = l.originInside != true
+	if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 {
+		// The complement of this loop contains both poles.
+		l.bound = FullRect()
+		l.subregionBound = l.bound
+	} else {
+		l.initBound()
+	}
+	l.index.Add(l)
+}
+
+// findVertex returns the index of the vertex at the given Point in the range
+// 1..numVertices, and a boolean indicating if a vertex was found.
+func (l *Loop) findVertex(p Point) (index int, ok bool) {
+	const notFound = 0
+	if len(l.vertices) < 10 {
+		// Exhaustive search for loops below a small threshold.
+		for i := 1; i <= len(l.vertices); i++ {
+			if l.Vertex(i) == p {
+				return i, true
+			}
+		}
+		return notFound, false
+	}
+
+	it := l.index.Iterator()
+	if !it.LocatePoint(p) {
+		return notFound, false
+	}
+
+	aClipped := it.IndexCell().findByShapeID(0)
+	for i := aClipped.numEdges() - 1; i >= 0; i-- {
+		ai := aClipped.edges[i]
+		if l.Vertex(ai) == p {
+			if ai == 0 {
+				return len(l.vertices), true
+			}
+			return ai, true
+		}
+
+		if l.Vertex(ai+1) == p {
+			return ai + 1, true
+		}
+	}
+	return notFound, false
+}
+
+// ContainsNested reports whether the given loops is contained within this loop.
+// This function does not test for edge intersections. The two loops must meet
+// all of the Polygon requirements; for example this implies that their
+// boundaries may not cross or have any shared edges (although they may have
+// shared vertices).
+func (l *Loop) ContainsNested(other *Loop) bool {
+	if !l.subregionBound.Contains(other.bound) {
+		return false
+	}
+
+	// Special cases to handle either loop being empty or full.  Also bail out
+	// when B has no vertices to avoid heap overflow on the vertex(1) call
+	// below.  (This method is called during polygon initialization before the
+	// client has an opportunity to call IsValid().)
+	if l.isEmptyOrFull() || other.NumVertices() < 2 {
+		return l.IsFull() || other.IsEmpty()
+	}
+
+	// We are given that A and B do not share any edges, and that either one
+	// loop contains the other or they do not intersect.
+	m, ok := l.findVertex(other.Vertex(1))
+	if !ok {
+		// Since other.vertex(1) is not shared, we can check whether A contains it.
+		return l.ContainsPoint(other.Vertex(1))
+	}
+
+	// Check whether the edge order around other.Vertex(1) is compatible with
+	// A containing B.
+	return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2))
+}
+
+// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x)
+// over the loop interior, given a function f(A,B,C) that returns the
+// corresponding integral over the spherical triangle ABC. Here "oriented
+// surface integral" means:
+//
+// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise,
+//     and the integral of -f if ABC is clockwise.
+//
+// (2) The result of this function is *either* the integral of f over the
+//     loop interior, or the integral of (-f) over the loop exterior.
+//
+// Note that there are at least two common situations where it easy to work
+// around property (2) above:
+//
+//  - If the integral of f over the entire sphere is zero, then it doesn't
+//    matter which case is returned because they are always equal.
+//
+//  - If f is non-negative, then it is easy to detect when the integral over
+//    the loop exterior has been returned, and the integral over the loop
+//    interior can be obtained by adding the integral of f over the entire
+//    unit sphere (a constant) to the result.
+//
+// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well.
+func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 {
+	// We sum f over a collection T of oriented triangles, possibly
+	// overlapping. Let the sign of a triangle be +1 if it is CCW and -1
+	// otherwise, and let the sign of a point x be the sum of the signs of the
+	// triangles containing x. Then the collection of triangles T is chosen
+	// such that either:
+	//
+	//  (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or
+	//  (2) Each point in the loop exterior has sign -1, and sign 0 otherwise.
+	//
+	// The triangles basically consist of a fan from vertex 0 to every loop
+	// edge that does not include vertex 0. These triangles will always satisfy
+	// either (1) or (2). However, what makes this a bit tricky is that
+	// spherical edges become numerically unstable as their length approaches
+	// 180 degrees. Of course there is not much we can do if the loop itself
+	// contains such edges, but we would like to make sure that all the triangle
+	// edges under our control (i.e., the non-loop edges) are stable. For
+	// example, consider a loop around the equator consisting of four equally
+	// spaced points. This is a well-defined loop, but we cannot just split it
+	// into two triangles by connecting vertex 0 to vertex 2.
+	//
+	// We handle this type of situation by moving the origin of the triangle fan
+	// whenever we are about to create an unstable edge. We choose a new
+	// location for the origin such that all relevant edges are stable. We also
+	// create extra triangles with the appropriate orientation so that the sum
+	// of the triangle signs is still correct at every point.
+
+	// The maximum length of an edge for it to be considered numerically stable.
+	// The exact value is fairly arbitrary since it depends on the stability of
+	// the function f. The value below is quite conservative but could be
+	// reduced further if desired.
+	const maxLength = math.Pi - 1e-5
+
+	var sum float64
+	origin := l.Vertex(0)
+	for i := 1; i+1 < len(l.vertices); i++ {
+		// Let V_i be vertex(i), let O be the current origin, and let length(A,B)
+		// be the length of edge (A,B). At the start of each loop iteration, the
+		// "leading edge" of the triangle fan is (O,V_i), and we want to extend
+		// the triangle fan so that the leading edge is (O,V_i+1).
+		//
+		// Invariants:
+		//  1. length(O,V_i) < maxLength for all (i > 1).
+		//  2. Either O == V_0, or O is approximately perpendicular to V_0.
+		//  3. "sum" is the oriented integral of f over the area defined by
+		//     (O, V_0, V_1, ..., V_i).
+		if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
+			// We are about to create an unstable edge, so choose a new origin O'
+			// for the triangle fan.
+			oldOrigin := origin
+			if origin == l.Vertex(0) {
+				// The following point is well-separated from V_i and V_0 (and
+				// therefore V_i+1 as well).
+				origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
+			} else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
+				// All edges of the triangle (O, V_0, V_i) are stable, so we can
+				// revert to using V_0 as the origin.
+				origin = l.Vertex(0)
+			} else {
+				// (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are
+				// perpendicular. Therefore V_0.CrossProd(O) is approximately
+				// perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose
+				// this point O' as the new origin.
+				origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
+
+				// Advance the edge (V_0,O) to (V_0,O').
+				sum += f(l.Vertex(0), oldOrigin, origin)
+			}
+			// Advance the edge (O,V_i) to (O',V_i).
+			sum += f(oldOrigin, l.Vertex(i), origin)
+		}
+		// Advance the edge (O,V_i) to (O,V_i+1).
+		sum += f(origin, l.Vertex(i), l.Vertex(i+1))
+	}
+	// If the origin is not V_0, we need to sum one more triangle.
+	if origin != l.Vertex(0) {
+		// Advance the edge (O,V_n-1) to (O,V_0).
+		sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0))
+	}
+	return sum
+}
+
+// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points;
+// see that method for commentary. The C++ version uses a templated method.
+// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well.
+func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point {
+	const maxLength = math.Pi - 1e-5
+	var sum r3.Vector
+
+	origin := l.Vertex(0)
+	for i := 1; i+1 < len(l.vertices); i++ {
+		if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
+			oldOrigin := origin
+			if origin == l.Vertex(0) {
+				origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
+			} else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
+				origin = l.Vertex(0)
+			} else {
+				origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
+				sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector)
+			}
+			sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector)
+		}
+		sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector)
+	}
+	if origin != l.Vertex(0) {
+		sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector)
+	}
+	return Point{sum}
+}
+
+// Area returns the area of the loop interior, i.e. the region on the left side of
+// the loop. The return value is between 0 and 4*pi. (Note that the return
+// value is not affected by whether this loop is a "hole" or a "shell".)
+func (l *Loop) Area() float64 {
+	// It is suprisingly difficult to compute the area of a loop robustly. The
+	// main issues are (1) whether degenerate loops are considered to be CCW or
+	// not (i.e., whether their area is close to 0 or 4*pi), and (2) computing
+	// the areas of small loops with good relative accuracy.
+	//
+	// With respect to degeneracies, we would like Area to be consistent
+	// with ContainsPoint in that loops that contain many points
+	// should have large areas, and loops that contain few points should have
+	// small areas. For example, if a degenerate triangle is considered CCW
+	// according to s2predicates Sign, then it will contain very few points and
+	// its area should be approximately zero. On the other hand if it is
+	// considered clockwise, then it will contain virtually all points and so
+	// its area should be approximately 4*pi.
+	//
+	// More precisely, let U be the set of Points for which IsUnitLength
+	// is true, let P(U) be the projection of those points onto the mathematical
+	// unit sphere, and let V(P(U)) be the Voronoi diagram of the projected
+	// points. Then for every loop x, we would like Area to approximately
+	// equal the sum of the areas of the Voronoi regions of the points p for
+	// which x.ContainsPoint(p) is true.
+	//
+	// The second issue is that we want to compute the area of small loops
+	// accurately. This requires having good relative precision rather than
+	// good absolute precision. For example, if the area of a loop is 1e-12 and
+	// the error is 1e-15, then the area only has 3 digits of accuracy. (For
+	// reference, 1e-12 is about 40 square meters on the surface of the earth.)
+	// We would like to have good relative accuracy even for small loops.
+	//
+	// To achieve these goals, we combine two different methods of computing the
+	// area. This first method is based on the Gauss-Bonnet theorem, which says
+	// that the area enclosed by the loop equals 2*pi minus the total geodesic
+	// curvature of the loop (i.e., the sum of the "turning angles" at all the
+	// loop vertices). The big advantage of this method is that as long as we
+	// use Sign to compute the turning angle at each vertex, then
+	// degeneracies are always handled correctly. In other words, if a
+	// degenerate loop is CCW according to the symbolic perturbations used by
+	// Sign, then its turning angle will be approximately 2*pi.
+	//
+	// The disadvantage of the Gauss-Bonnet method is that its absolute error is
+	// about 2e-15 times the number of vertices (see turningAngleMaxError).
+	// So, it cannot compute the area of small loops accurately.
+	//
+	// The second method is based on splitting the loop into triangles and
+	// summing the area of each triangle. To avoid the difficulty and expense
+	// of decomposing the loop into a union of non-overlapping triangles,
+	// instead we compute a signed sum over triangles that may overlap (see the
+	// comments for surfaceIntegral). The advantage of this method
+	// is that the area of each triangle can be computed with much better
+	// relative accuracy (using l'Huilier's theorem). The disadvantage is that
+	// the result is a signed area: CCW loops may yield a small positive value,
+	// while CW loops may yield a small negative value (which is converted to a
+	// positive area by adding 4*pi). This means that small errors in computing
+	// the signed area may translate into a very large error in the result (if
+	// the sign of the sum is incorrect).
+	//
+	// So, our strategy is to combine these two methods as follows. First we
+	// compute the area using the "signed sum over triangles" approach (since it
+	// is generally more accurate). We also estimate the maximum error in this
+	// result. If the signed area is too close to zero (i.e., zero is within
+	// the error bounds), then we double-check the sign of the result using the
+	// Gauss-Bonnet method. (In fact we just call IsNormalized, which is
+	// based on this method.) If the two methods disagree, we return either 0
+	// or 4*pi based on the result of IsNormalized. Otherwise we return the
+	// area that we computed originally.
+	if l.isEmptyOrFull() {
+		if l.ContainsOrigin() {
+			return 4 * math.Pi
+		}
+		return 0
+	}
+	area := l.surfaceIntegralFloat64(SignedArea)
+
+	// TODO(roberts): This error estimate is very approximate. There are two
+	// issues: (1) SignedArea needs some improvements to ensure that its error
+	// is actually never higher than GirardArea, and (2) although the number of
+	// triangles in the sum is typically N-2, in theory it could be as high as
+	// 2*N for pathological inputs. But in other respects this error bound is
+	// very conservative since it assumes that the maximum error is achieved on
+	// every triangle.
+	maxError := l.turningAngleMaxError()
+
+	// The signed area should be between approximately -4*pi and 4*pi.
+	if area < 0 {
+		// We have computed the negative of the area of the loop exterior.
+		area += 4 * math.Pi
+	}
+
+	if area > 4*math.Pi {
+		area = 4 * math.Pi
+	}
+	if area < 0 {
+		area = 0
+	}
+
+	// If the area is close enough to zero or 4*pi so that the loop orientation
+	// is ambiguous, then we compute the loop orientation explicitly.
+	if area < maxError && !l.IsNormalized() {
+		return 4 * math.Pi
+	} else if area > (4*math.Pi-maxError) && l.IsNormalized() {
+		return 0
+	}
+
+	return area
+}
+
+// Centroid returns the true centroid of the loop multiplied by the area of the
+// loop. The result is not unit length, so you may want to normalize it. Also
+// note that in general, the centroid may not be contained by the loop.
+//
+// We prescale by the loop area for two reasons: (1) it is cheaper to
+// compute this way, and (2) it makes it easier to compute the centroid of
+// more complicated shapes (by splitting them into disjoint regions and
+// adding their centroids).
+//
+// Note that the return value is not affected by whether this loop is a
+// "hole" or a "shell".
+func (l *Loop) Centroid() Point {
+	// surfaceIntegralPoint() returns either the integral of position over loop
+	// interior, or the negative of the integral of position over the loop
+	// exterior. But these two values are the same (!), because the integral of
+	// position over the entire sphere is (0, 0, 0).
+	return l.surfaceIntegralPoint(TrueCentroid)
+}
+
+// Encode encodes the Loop.
+func (l Loop) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	l.encode(e)
+	return e.err
+}
+
+func (l Loop) encode(e *encoder) {
+	e.writeInt8(encodingVersion)
+	e.writeUint32(uint32(len(l.vertices)))
+	for _, v := range l.vertices {
+		e.writeFloat64(v.X)
+		e.writeFloat64(v.Y)
+		e.writeFloat64(v.Z)
+	}
+
+	e.writeBool(l.originInside)
+	e.writeInt32(int32(l.depth))
+
+	// Encode the bound.
+	l.bound.encode(e)
+}
+
+// Decode decodes a loop.
+func (l *Loop) Decode(r io.Reader) error {
+	*l = Loop{}
+	d := &decoder{r: asByteReader(r)}
+	l.decode(d)
+	return d.err
+}
+
+func (l *Loop) decode(d *decoder) {
+	version := int8(d.readUint8())
+	if d.err != nil {
+		return
+	}
+	if version != encodingVersion {
+		d.err = fmt.Errorf("cannot decode version %d", version)
+		return
+	}
+
+	// Empty loops are explicitly allowed here: a newly created loop has zero vertices
+	// and such loops encode and decode properly.
+	nvertices := d.readUint32()
+	if nvertices > maxEncodedVertices {
+		if d.err == nil {
+			d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+
+		}
+		return
+	}
+	l.vertices = make([]Point, nvertices)
+	for i := range l.vertices {
+		l.vertices[i].X = d.readFloat64()
+		l.vertices[i].Y = d.readFloat64()
+		l.vertices[i].Z = d.readFloat64()
+	}
+	l.originInside = d.readBool()
+	l.depth = int(d.readUint32())
+	l.bound.decode(d)
+	l.subregionBound = ExpandForSubregions(l.bound)
+
+	l.index = NewShapeIndex()
+	l.index.Add(l)
+}
+
+// Bitmasks to read from properties.
+const (
+	originInside = 1 << iota
+	boundEncoded
+)
+
+func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi {
+	ret := make([]xyzFaceSiTi, len(l.vertices))
+	for i, v := range l.vertices {
+		ret[i].xyz = v
+		ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v)
+	}
+	return ret
+}
+
+func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) {
+	if len(l.vertices) != len(vertices) {
+		panic("encodeCompressed: vertices must be the same length as l.vertices")
+	}
+	if len(vertices) > maxEncodedVertices {
+		if e.err == nil {
+			e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices)
+		}
+		return
+	}
+	e.writeUvarint(uint64(len(vertices)))
+	encodePointsCompressed(e, vertices, snapLevel)
+
+	props := l.compressedEncodingProperties()
+	e.writeUvarint(props)
+	e.writeUvarint(uint64(l.depth))
+	if props&boundEncoded != 0 {
+		l.bound.encode(e)
+	}
+}
+
+func (l *Loop) compressedEncodingProperties() uint64 {
+	var properties uint64
+	if l.originInside {
+		properties |= originInside
+	}
+
+	// Write whether there is a bound so we can change the threshold later.
+	// Recomputing the bound multiplies the decode time taken per vertex
+	// by a factor of about 3.5.  Without recomputing the bound, decode
+	// takes approximately 125 ns / vertex.  A loop with 63 vertices
+	// encoded without the bound will take ~30us to decode, which is
+	// acceptable.  At ~3.5 bytes / vertex without the bound, adding
+	// the bound will increase the size by <15%, which is also acceptable.
+	const minVerticesForBound = 64
+	if len(l.vertices) >= minVerticesForBound {
+		properties |= boundEncoded
+	}
+
+	return properties
+}
+
+func (l *Loop) decodeCompressed(d *decoder, snapLevel int) {
+	nvertices := d.readUvarint()
+	if d.err != nil {
+		return
+	}
+	if nvertices > maxEncodedVertices {
+		d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+		return
+	}
+	l.vertices = make([]Point, nvertices)
+	decodePointsCompressed(d, snapLevel, l.vertices)
+	properties := d.readUvarint()
+
+	// Make sure values are valid before using.
+	if d.err != nil {
+		return
+	}
+
+	l.originInside = (properties & originInside) != 0
+
+	l.depth = int(d.readUvarint())
+
+	if (properties & boundEncoded) != 0 {
+		l.bound.decode(d)
+		if d.err != nil {
+			return
+		}
+		l.subregionBound = ExpandForSubregions(l.bound)
+	} else {
+		l.initBound()
+	}
+
+	l.index = NewShapeIndex()
+	l.index.Add(l)
+}
+
+// crossingTarget is an enum representing the possible crossing target cases for relations.
+type crossingTarget int
+
+const (
+	crossingTargetDontCare crossingTarget = iota
+	crossingTargetDontCross
+	crossingTargetCross
+)
+
+// loopRelation defines the interface for checking a type of relationship between two loops.
+// Some examples of relations are Contains, Intersects, or CompareBoundary.
+type loopRelation interface {
+	// Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit
+	// condition for the loop relation. If any point P is found such that
+	//
+	//   A.ContainsPoint(P) == aCrossingTarget() &&
+	//   B.ContainsPoint(P) == bCrossingTarget()
+	//
+	// then the loop relation is assumed to be the same as if a pair of crossing
+	// edges were found. For example, the ContainsPoint relation has
+	//
+	//   aCrossingTarget() == crossingTargetDontCross
+	//   bCrossingTarget() == crossingTargetCross
+	//
+	// because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true
+	// for any point P, then it is equivalent to finding an edge crossing (i.e.,
+	// since Contains returns false in both cases).
+	//
+	// Loop relations that do not have an early-exit condition of this form
+	// should return crossingTargetDontCare for both crossing targets.
+
+	// aCrossingTarget reports whether loop A crosses the target point with
+	// the given relation type.
+	aCrossingTarget() crossingTarget
+	// bCrossingTarget reports whether loop B crosses the target point with
+	// the given relation type.
+	bCrossingTarget() crossingTarget
+
+	// wedgesCross reports if a shared vertex ab1 and the two associated wedges
+	// (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing.
+	// The loop relation is also allowed to maintain its own internal state, and
+	// can return true if it observes any sequence of wedges that are equivalent
+	// to an edge crossing.
+	wedgesCross(a0, ab1, a2, b0, b2 Point) bool
+}
+
+// loopCrosser is a helper type for determining whether two loops cross.
+// It is instantiated twice for each pair of loops to be tested, once for the
+// pair (A,B) and once for the pair (B,A), in order to be able to process
+// edges in either loop nesting order.
+type loopCrosser struct {
+	a, b            *Loop
+	relation        loopRelation
+	swapped         bool
+	aCrossingTarget crossingTarget
+	bCrossingTarget crossingTarget
+
+	// state maintained by startEdge and edgeCrossesCell.
+	crosser    *EdgeCrosser
+	aj, bjPrev int
+
+	// temporary data declared here to avoid repeated memory allocations.
+	bQuery *CrossingEdgeQuery
+	bCells []*ShapeIndexCell
+}
+
+// newLoopCrosser creates a loopCrosser from the given values. If swapped is true,
+// the loops A and B have been swapped. This affects how arguments are passed to
+// the given loop relation, since for example A.Contains(B) is not the same as
+// B.Contains(A).
+func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser {
+	l := &loopCrosser{
+		a:               a,
+		b:               b,
+		relation:        relation,
+		swapped:         swapped,
+		aCrossingTarget: relation.aCrossingTarget(),
+		bCrossingTarget: relation.bCrossingTarget(),
+		bQuery:          NewCrossingEdgeQuery(b.index),
+	}
+	if swapped {
+		l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget
+	}
+
+	return l
+}
+
+// startEdge sets the crossers state for checking the given edge of loop A.
+func (l *loopCrosser) startEdge(aj int) {
+	l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1))
+	l.aj = aj
+	l.bjPrev = -2
+}
+
+// edgeCrossesCell reports whether the current edge of loop A has any crossings with
+// edges of the index cell of loop B.
+func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool {
+	// Test the current edge of A against all edges of bClipped
+	bNumEdges := bClipped.numEdges()
+	for j := 0; j < bNumEdges; j++ {
+		bj := bClipped.edges[j]
+		if bj != l.bjPrev+1 {
+			l.crosser.RestartAt(l.b.Vertex(bj))
+		}
+		l.bjPrev = bj
+		if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross {
+			continue
+		} else if crossing == Cross {
+			return true
+		}
+
+		// We only need to check each shared vertex once, so we only
+		// consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1).
+		if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) {
+			if l.swapped {
+				if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) {
+					return true
+				}
+			} else {
+				if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) {
+					return true
+				}
+			}
+		}
+	}
+
+	return false
+}
+
+// cellCrossesCell reports whether there are any edge crossings or wedge crossings
+// within the two given cells.
+func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool {
+	// Test all edges of aClipped against all edges of bClipped.
+	for _, edge := range aClipped.edges {
+		l.startEdge(edge)
+		if l.edgeCrossesCell(bClipped) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// cellCrossesAnySubcell reports whether given an index cell of A, if there are any
+// edge or wedge crossings with any index cell of B contained within bID.
+func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool {
+	// Test all edges of aClipped against all edges of B. The relevant B
+	// edges are guaranteed to be children of bID, which lets us find the
+	// correct index cells more efficiently.
+	bRoot := PaddedCellFromCellID(bID, 0)
+	for _, aj := range aClipped.edges {
+		// Use an CrossingEdgeQuery starting at bRoot to find the index cells
+		// of B that might contain crossing edges.
+		l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot)
+		if len(l.bCells) == 0 {
+			continue
+		}
+		l.startEdge(aj)
+		for c := 0; c < len(l.bCells); c++ {
+			if l.edgeCrossesCell(l.bCells[c].shapes[0]) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// hasCrossing reports whether given two iterators positioned such that
+// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing
+// anywhere within ai.cellID(). This function advances bi only past ai.cellID().
+func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
+	// If ai.CellID() intersects many edges of B, then it is faster to use
+	// CrossingEdgeQuery to narrow down the candidates. But if it intersects
+	// only a few edges, it is faster to check all the crossings directly.
+	// We handle this by advancing bi and keeping track of how many edges we
+	// would need to test.
+	const edgeQueryMinEdges = 20 // Tuned from benchmarks.
+	var totalEdges int
+	l.bCells = nil
+
+	for {
+		if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 {
+			totalEdges += n
+			if totalEdges >= edgeQueryMinEdges {
+				// There are too many edges to test them directly, so use CrossingEdgeQuery.
+				if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) {
+					return true
+				}
+				bi.seekBeyond(ai)
+				return false
+			}
+			l.bCells = append(l.bCells, bi.indexCell())
+		}
+		bi.next()
+		if bi.cellID() > ai.rangeMax {
+			break
+		}
+	}
+
+	// Test all the edge crossings directly.
+	for _, c := range l.bCells {
+		if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds
+// to the crossing target type given. (This is to work around C++ allowing false == 0,
+// true == 1 type implicit conversions and comparisons)
+func containsCenterMatches(a *clippedShape, target crossingTarget) bool {
+	return (!a.containsCenter && target == crossingTargetDontCross) ||
+		(a.containsCenter && target == crossingTargetCross)
+}
+
+// hasCrossingRelation reports whether given two iterators positioned such that
+// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship
+// anywhere within ai.cellID(). Specifically, this method returns true if there
+// is an edge crossing, a wedge crossing, or a point P that matches both relations
+// crossing targets. This function advances both iterators past ai.cellID.
+func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
+	aClipped := ai.it.IndexCell().shapes[0]
+	if aClipped.numEdges() != 0 {
+		// The current cell of A has at least one edge, so check for crossings.
+		if l.hasCrossing(ai, bi) {
+			return true
+		}
+		ai.next()
+		return false
+	}
+
+	if containsCenterMatches(aClipped, l.aCrossingTarget) {
+		// The crossing target for A is not satisfied, so we skip over these cells of B.
+		bi.seekBeyond(ai)
+		ai.next()
+		return false
+	}
+
+	// All points within ai.cellID() satisfy the crossing target for A, so it's
+	// worth iterating through the cells of B to see whether any cell
+	// centers also satisfy the crossing target for B.
+	for bi.cellID() <= ai.rangeMax {
+		bClipped := bi.it.IndexCell().shapes[0]
+		if containsCenterMatches(bClipped, l.bCrossingTarget) {
+			return true
+		}
+		bi.next()
+	}
+	ai.next()
+	return false
+}
+
+// hasCrossingRelation checks all edges of loop A for intersection against all edges
+// of loop B and reports if there are any that satisfy the given relation. If there
+// is any shared vertex, the wedges centered at this vertex are sent to the given
+// relation to be tested.
+//
+// If the two loop boundaries cross, this method is guaranteed to return
+// true. It also returns true in certain cases if the loop relationship is
+// equivalent to crossing. For example, if the relation is Contains and a
+// point P is found such that B contains P but A does not contain P, this
+// method will return true to indicate that the result is the same as though
+// a pair of crossing edges were found (since Contains returns false in
+// both cases).
+//
+// See Contains, Intersects and CompareBoundary for the three uses of this function.
+func hasCrossingRelation(a, b *Loop, relation loopRelation) bool {
+	// We look for CellID ranges where the indexes of A and B overlap, and
+	// then test those edges for crossings.
+	ai := newRangeIterator(a.index)
+	bi := newRangeIterator(b.index)
+
+	ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B
+	ba := newLoopCrosser(b, a, relation, true)  // Tests edges of B against A
+
+	for !ai.done() || !bi.done() {
+		if ai.rangeMax < bi.rangeMin {
+			// The A and B cells don't overlap, and A precedes B.
+			ai.seekTo(bi)
+		} else if bi.rangeMax < ai.rangeMin {
+			// The A and B cells don't overlap, and B precedes A.
+			bi.seekTo(ai)
+		} else {
+			// One cell contains the other. Determine which cell is larger.
+			abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb())
+			if abRelation > 0 {
+				// A's index cell is larger.
+				if ab.hasCrossingRelation(ai, bi) {
+					return true
+				}
+			} else if abRelation < 0 {
+				// B's index cell is larger.
+				if ba.hasCrossingRelation(bi, ai) {
+					return true
+				}
+			} else {
+				// The A and B cells are the same. Since the two cells
+				// have the same center point P, check whether P satisfies
+				// the crossing targets.
+				aClipped := ai.it.IndexCell().shapes[0]
+				bClipped := bi.it.IndexCell().shapes[0]
+				if containsCenterMatches(aClipped, ab.aCrossingTarget) &&
+					containsCenterMatches(bClipped, ab.bCrossingTarget) {
+					return true
+				}
+				// Otherwise test all the edge crossings directly.
+				if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) {
+					return true
+				}
+				ai.next()
+				bi.next()
+			}
+		}
+	}
+	return false
+}
+
+// containsRelation implements loopRelation for a contains operation. If
+// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent
+// to having an edge crossing (i.e., Contains returns false).
+type containsRelation struct {
+	foundSharedVertex bool
+}
+
+func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross }
+func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
+func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+	c.foundSharedVertex = true
+	return !WedgeContains(a0, ab1, a2, b0, b2)
+}
+
+// intersectsRelation implements loopRelation for an intersects operation. Given
+// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true,
+// it is equivalent to having an edge crossing (i.e., Intersects returns true).
+type intersectsRelation struct {
+	foundSharedVertex bool
+}
+
+func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross }
+func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
+func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+	i.foundSharedVertex = true
+	return WedgeIntersects(a0, ab1, a2, b0, b2)
+}
+
+// compareBoundaryRelation implements loopRelation for comparing boundaries.
+//
+// The compare boundary relation does not have a useful early-exit condition,
+// so we return crossingTargetDontCare for both crossing targets.
+//
+// Aside: A possible early exit condition could be based on the following.
+//   If A contains a point of both B and ~B, then A intersects Boundary(B).
+//   If ~A contains a point of both B and ~B, then ~A intersects Boundary(B).
+//   So if the intersections of {A, ~A} with {B, ~B} are all non-empty,
+//   the return value is 0, i.e., Boundary(A) intersects Boundary(B).
+// Unfortunately it isn't worth detecting this situation because by the
+// time we have seen a point in all four intersection regions, we are also
+// guaranteed to have seen at least one pair of crossing edges.
+type compareBoundaryRelation struct {
+	reverse           bool // True if the other loop should be reversed.
+	foundSharedVertex bool // True if any wedge was processed.
+	containsEdge      bool // True if any edge of the other loop is contained by this loop.
+	excludesEdge      bool // True if any edge of the other loop is excluded by this loop.
+}
+
+func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation {
+	return &compareBoundaryRelation{reverse: reverse}
+}
+
+func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare }
+func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare }
+func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+	// Because we don't care about the interior of the other, only its boundary,
+	// it is sufficient to check whether this one contains the semiwedge (ab1, b2).
+	c.foundSharedVertex = true
+	if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) {
+		c.containsEdge = true
+	} else {
+		c.excludesEdge = true
+	}
+	return c.containsEdge && c.excludesEdge
+}
+
+// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the
+// "semiwedge" defined as any non-empty open set of rays immediately CCW from
+// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW;
+// this simulates what would happen if the direction of the other loop was reversed.
+func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool {
+	if b2 == a0 || b2 == a2 {
+		// We have a shared or reversed edge.
+		return (b2 == a0) == reverse
+	}
+	return OrderedCCW(a0, a2, b2, ab1)
+}
+
+// containsNonCrossingBoundary reports whether given two loops whose boundaries
+// do not cross (see compareBoundary), if this loop contains the boundary of the
+// other loop. If reverse is true, the boundary of the other loop is reversed
+// first (which only affects the result when there are shared edges). This method
+// is cheaper than compareBoundary because it does not test for edge intersections.
+//
+// This function requires that neither loop is empty, and that if the other is full,
+// then reverse == false.
+func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool {
+	// The bounds must intersect for containment.
+	if !l.bound.Intersects(other.bound) {
+		return false
+	}
+
+	// Full loops are handled as though the loop surrounded the entire sphere.
+	if l.IsFull() {
+		return true
+	}
+	if other.IsFull() {
+		return false
+	}
+
+	m, ok := l.findVertex(other.Vertex(0))
+	if !ok {
+		// Since the other loops vertex 0 is not shared, we can check if this contains it.
+		return l.ContainsPoint(other.Vertex(0))
+	}
+	// Otherwise check whether the edge (b0, b1) is contained by this loop.
+	return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1),
+		other.Vertex(1), reverseOther)
+}
+
+// TODO(roberts): Differences from the C++ version:
+// DistanceToPoint
+// DistanceToBoundary
+// Project
+// ProjectToBoundary
+// Equal
+// BoundaryEqual
+// BoundaryApproxEqual
+// BoundaryNear

+ 0 - 533
vendor/github.com/golang/geo/s2/loop_test.go

@@ -1,533 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-var (
-	// The northern hemisphere, defined using two pairs of antipodal points.
-	northHemi = LoopFromPoints(parsePoints("0:-180, 0:-90, 0:0, 0:90"))
-
-	// The northern hemisphere, defined using three points 120 degrees apart.
-	northHemi3 = LoopFromPoints(parsePoints("0:-180, 0:-60, 0:60"))
-
-	// The southern hemisphere, defined using two pairs of antipodal points.
-	southHemi = LoopFromPoints(parsePoints("0:90, 0:0, 0:-90, 0:-180"))
-
-	// The western hemisphere, defined using two pairs of antipodal points.
-	westHemi = LoopFromPoints(parsePoints("0:-180, -90:0, 0:0, 90:0"))
-
-	// The eastern hemisphere, defined using two pairs of antipodal points.
-	eastHemi = LoopFromPoints(parsePoints("90:0, 0:0, -90:0, 0:-180"))
-
-	// The "near" hemisphere, defined using two pairs of antipodal points.
-	nearHemi = LoopFromPoints(parsePoints("0:-90, -90:0, 0:90, 90:0"))
-
-	// The "far" hemisphere, defined using two pairs of antipodal points.
-	farHemi = LoopFromPoints(parsePoints("90:0, 0:90, -90:0, 0:-90"))
-
-	// A spiral stripe that slightly over-wraps the equator.
-	candyCane = LoopFromPoints(parsePoints("-20:150, -20:-70, 0:70, 10:-150, 10:70, -10:-70"))
-
-	// A small clockwise loop in the northern & eastern hemisperes.
-	smallNECW = LoopFromPoints(parsePoints("35:20, 45:20, 40:25"))
-
-	// Loop around the north pole at 80 degrees.
-	arctic80 = LoopFromPoints(parsePoints("80:-150, 80:-30, 80:90"))
-
-	// Loop around the south pole at 80 degrees.
-	antarctic80 = LoopFromPoints(parsePoints("-80:120, -80:0, -80:-120"))
-
-	// A completely degenerate triangle along the equator that RobustCCW()
-	// considers to be CCW.
-	lineTriangle = LoopFromPoints(parsePoints("0:1, 0:2, 0:3"))
-
-	// A nearly-degenerate CCW chevron near the equator with very long sides
-	// (about 80 degrees).  Its area is less than 1e-640, which is too small
-	// to represent in double precision.
-	skinnyChevron = LoopFromPoints(parsePoints("0:0, -1e-320:80, 0:1e-320, 1e-320:80"))
-
-	// A diamond-shaped loop around the point 0:180.
-	loopA = LoopFromPoints(parsePoints("0:178, -1:180, 0:-179, 1:-180"))
-
-	// Like loopA, but the vertices are at leaf cell centers.
-	snappedLoopA = LoopFromPoints([]Point{
-		CellIDFromLatLng(parseLatLngs("0:178")[0]).Point(),
-		CellIDFromLatLng(parseLatLngs("-1:180")[0]).Point(),
-		CellIDFromLatLng(parseLatLngs("0:-179")[0]).Point(),
-		CellIDFromLatLng(parseLatLngs("1:-180")[0]).Point(),
-	})
-
-	// A different diamond-shaped loop around the point 0:180.
-	loopB = LoopFromPoints(parsePoints("0:179, -1:180, 0:-178, 1:-180"))
-
-	// The intersection of A and B.
-	aIntersectB = LoopFromPoints(parsePoints("0:179, -1:180, 0:-179, 1:-180"))
-
-	// The union of A and B.
-	aUnionB = LoopFromPoints(parsePoints("0:178, -1:180, 0:-178, 1:-180"))
-
-	// A minus B (concave).
-	aMinusB = LoopFromPoints(parsePoints("0:178, -1:180, 0:179, 1:-180"))
-
-	// B minus A (concave).
-	bMinusA = LoopFromPoints(parsePoints("0:-179, -1:180, 0:-178, 1:-180"))
-
-	// A shape gotten from A by adding a triangle to one edge, and
-	// subtracting a triangle from the opposite edge.
-	loopC = LoopFromPoints(parsePoints("0:178, 0:180, -1:180, 0:-179, 1:-179, 1:-180"))
-
-	// A shape gotten from A by adding a triangle to one edge, and
-	// adding another triangle to the opposite edge.
-	loopD = LoopFromPoints(parsePoints("0:178, -1:178, -1:180, 0:-179, 1:-179, 1:-180"))
-
-	//   3------------2
-	//   |            |               ^
-	//   |  7-8  b-c  |               |
-	//   |  | |  | |  |      Latitude |
-	//   0--6-9--a-d--1               |
-	//   |  | |       |               |
-	//   |  f-e       |               +----------->
-	//   |            |                 Longitude
-	//   4------------5
-	//
-	// Important: It is not okay to skip over collinear vertices when
-	// defining these loops (e.g. to define loop E as "0,1,2,3") because S2
-	// uses symbolic perturbations to ensure that no three vertices are
-	// *ever* considered collinear (e.g., vertices 0, 6, 9 are not
-	// collinear).  In other words, it is unpredictable (modulo knowing the
-	// details of the symbolic perturbations) whether 0123 contains 06123
-	// for example.
-
-	// Loop E:  0,6,9,a,d,1,2,3
-	// Loop F:  0,4,5,1,d,a,9,6
-	// Loop G:  0,6,7,8,9,a,b,c,d,1,2,3
-	// Loop H:  0,6,f,e,9,a,b,c,d,1,2,3
-	// Loop I:  7,6,f,e,9,8
-	loopE = LoopFromPoints(parsePoints("0:30, 0:34, 0:36, 0:39, 0:41, 0:44, 30:44, 30:30"))
-	loopF = LoopFromPoints(parsePoints("0:30, -30:30, -30:44, 0:44, 0:41, 0:39, 0:36, 0:34"))
-	loopG = LoopFromPoints(parsePoints("0:30, 0:34, 10:34, 10:36, 0:36, 0:39, 10:39, 10:41, 0:41, 0:44, 30:44, 30:30"))
-	loopH = LoopFromPoints(parsePoints("0:30, 0:34, -10:34, -10:36, 0:36, 0:39, 10:39, 10:41, 0:41, 0:44, 30:44, 30:30"))
-
-	loopI = LoopFromPoints(parsePoints("10:34, 0:34, -10:34, -10:36, 0:36, 10:36"))
-)
-
-func TestLoopEmptyAndFull(t *testing.T) {
-	emptyLoop := EmptyLoop()
-
-	if !emptyLoop.IsEmpty() {
-		t.Errorf("empty loop should be empty")
-	}
-	if emptyLoop.IsFull() {
-		t.Errorf("empty loop should not be full")
-	}
-	if !emptyLoop.isEmptyOrFull() {
-		t.Errorf("empty loop should pass IsEmptyOrFull")
-	}
-
-	fullLoop := FullLoop()
-
-	if fullLoop.IsEmpty() {
-		t.Errorf("full loop should not be empty")
-	}
-	if !fullLoop.IsFull() {
-		t.Errorf("full loop should be full")
-	}
-	if !fullLoop.isEmptyOrFull() {
-		t.Errorf("full loop should pass IsEmptyOrFull")
-	}
-	if emptyLoop.NumEdges() != 0 {
-		t.Errorf("empty loops should have no edges")
-	}
-	if emptyLoop.numChains() != 0 {
-		t.Errorf("empty loops should have no edge chains")
-	}
-	if fullLoop.NumEdges() != 0 {
-		t.Errorf("full loops should have no edges")
-	}
-	if fullLoop.numChains() != 0 {
-		t.Errorf("full loops should have no edge chains")
-	}
-}
-
-func TestLoopBasic(t *testing.T) {
-	shape := Shape(makeLoop("0:0, 0:1, 1:0"))
-
-	if got := shape.NumEdges(); got != 3 {
-		t.Errorf("shape.NumEdges = %d, want 3", got)
-	}
-	if got := shape.numChains(); got != 1 {
-		t.Errorf("shape.numChains = %d, want 1", got)
-	}
-	if got := shape.chainStart(0); got != 0 {
-		t.Errorf("shape.chainStart(0) = %d, want 3", got)
-	}
-	if got := shape.chainStart(1); got != 3 {
-		t.Errorf("shape.chainStart(1) = %d, want 3", got)
-	}
-
-	v2, v3 := shape.Edge(2)
-	if want := PointFromLatLng(LatLngFromDegrees(1, 0)); !v2.ApproxEqual(want) {
-		t.Errorf("shape.Edge(2) end A = %v, want %v", v2, want)
-	}
-	if want := PointFromLatLng(LatLngFromDegrees(0, 0)); !v3.ApproxEqual(want) {
-
-		t.Errorf("shape.Edge(2) end B = %v, want %v", v3, want)
-	}
-
-	if got := shape.dimension(); got != polygonGeometry {
-		t.Errorf("shape.dimension() = %d, want %v", got, polygonGeometry)
-	}
-	if !shape.HasInterior() {
-		t.Errorf("shape.HasInterior() = false, want true")
-	}
-	if shape.ContainsOrigin() {
-		t.Errorf("shape.ContainsOrigin() = true, want false")
-	}
-}
-
-func TestLoopRectBound(t *testing.T) {
-	if !EmptyLoop().RectBound().IsEmpty() {
-		t.Errorf("empty loop's RectBound should be empty")
-	}
-	if !FullLoop().RectBound().IsFull() {
-		t.Errorf("full loop's RectBound should be full")
-	}
-	if !candyCane.RectBound().Lng.IsFull() {
-		t.Errorf("candy cane loop's RectBound should have a full longitude range")
-	}
-	if got := candyCane.RectBound().Lat.Lo; got >= -0.349066 {
-		t.Errorf("candy cane loop's RectBound should have a lower latitude (%v) under -0.349066 radians", got)
-	}
-	if got := candyCane.RectBound().Lat.Hi; got <= 0.174533 {
-		t.Errorf("candy cane loop's RectBound should have an upper latitude (%v) over 0.174533 radians", got)
-	}
-	if !smallNECW.RectBound().IsFull() {
-		t.Errorf("small northeast clockwise loop's RectBound should be full")
-	}
-	if got, want := arctic80.RectBound(), rectFromDegrees(80, -180, 90, 180); !rectsApproxEqual(got, want, rectErrorLat, rectErrorLng) {
-		t.Errorf("arctic 80 loop's RectBound (%v) should be %v", got, want)
-	}
-	if got, want := antarctic80.RectBound(), rectFromDegrees(-90, -180, -80, 180); !rectsApproxEqual(got, want, rectErrorLat, rectErrorLng) {
-		t.Errorf("antarctic 80 loop's RectBound (%v) should be %v", got, want)
-	}
-	if !southHemi.RectBound().Lng.IsFull() {
-		t.Errorf("south hemi loop's RectBound should have a full longitude range")
-	}
-	got, want := southHemi.RectBound().Lat, r1.Interval{-math.Pi / 2, 0}
-	if !got.ApproxEqual(want) {
-		t.Errorf("south hemi loop's RectBound latitude interval (%v) should be %v", got, want)
-	}
-
-	// Create a loop that contains the complement of the arctic80 loop.
-	arctic80Inv := invert(arctic80)
-	// The highest latitude of each edge is attained at its midpoint.
-	mid := Point{arctic80Inv.vertices[0].Vector.Add(arctic80Inv.vertices[1].Vector).Mul(.5)}
-	if got, want := arctic80Inv.RectBound().Lat.Hi, float64(LatLngFromPoint(mid).Lat); math.Abs(got-want) > 10*dblEpsilon {
-		t.Errorf("arctic 80 inverse loop's RectBound should have a latutude hi of %v, got %v", got, want)
-	}
-}
-
-func TestLoopCapBound(t *testing.T) {
-	if !EmptyLoop().CapBound().IsEmpty() {
-		t.Errorf("empty loop's CapBound should be empty")
-	}
-	if !FullLoop().CapBound().IsFull() {
-		t.Errorf("full loop's CapBound should be full")
-	}
-	if !smallNECW.CapBound().IsFull() {
-		t.Errorf("small northeast clockwise loop's CapBound should be full")
-	}
-	if got, want := arctic80.CapBound(), rectFromDegrees(80, -180, 90, 180).CapBound(); !got.ApproxEqual(want) {
-		t.Errorf("arctic 80 loop's CapBound (%v) should be %v", got, want)
-	}
-	if got, want := antarctic80.CapBound(), rectFromDegrees(-90, -180, -80, 180).CapBound(); !got.ApproxEqual(want) {
-		t.Errorf("antarctic 80 loop's CapBound (%v) should be %v", got, want)
-	}
-}
-
-func invert(l *Loop) *Loop {
-	vertices := make([]Point, 0, len(l.vertices))
-	for i := len(l.vertices) - 1; i >= 0; i-- {
-		vertices = append(vertices, l.vertices[i])
-	}
-	return LoopFromPoints(vertices)
-}
-
-func TestLoopOriginInside(t *testing.T) {
-	if !northHemi.originInside {
-		t.Errorf("north hemisphere polygon should include origin")
-	}
-	if !northHemi3.originInside {
-		t.Errorf("north hemisphere 3 polygon should include origin")
-	}
-	if southHemi.originInside {
-		t.Errorf("south hemisphere polygon should not include origin")
-	}
-	if westHemi.originInside {
-		t.Errorf("west hemisphere polygon should not include origin")
-	}
-	if !eastHemi.originInside {
-		t.Errorf("east hemisphere polygon should include origin")
-	}
-	if nearHemi.originInside {
-		t.Errorf("near hemisphere polygon should not include origin")
-	}
-	if !farHemi.originInside {
-		t.Errorf("far hemisphere polygon should include origin")
-	}
-	if candyCane.originInside {
-		t.Errorf("candy cane polygon should not include origin")
-	}
-	if !smallNECW.originInside {
-		t.Errorf("smallNECW polygon should include origin")
-	}
-	if !arctic80.originInside {
-		t.Errorf("arctic 80 polygon should include origin")
-	}
-	if antarctic80.originInside {
-		t.Errorf("antarctic 80 polygon should not include origin")
-	}
-	if loopA.originInside {
-		t.Errorf("loop A polygon should not include origin")
-	}
-}
-
-func TestLoopContainsPoint(t *testing.T) {
-	north := Point{r3.Vector{0, 0, 1}}
-	south := Point{r3.Vector{0, 0, -1}}
-
-	if EmptyLoop().ContainsPoint(north) {
-		t.Errorf("empty loop should not not have any points")
-	}
-	if !FullLoop().ContainsPoint(south) {
-		t.Errorf("full loop should have full point vertex")
-	}
-
-	for _, tc := range []struct {
-		name string
-		l    *Loop
-		in   Point
-		out  Point
-	}{
-		{
-			"north hemisphere",
-			northHemi,
-			Point{r3.Vector{0, 0, 1}},
-			Point{r3.Vector{0, 0, -1}},
-		},
-		{
-			"south hemisphere",
-			southHemi,
-			Point{r3.Vector{0, 0, -1}},
-			Point{r3.Vector{0, 0, 1}},
-		},
-		{
-			"west hemisphere",
-			westHemi,
-			Point{r3.Vector{0, -1, 0}},
-			Point{r3.Vector{0, 1, 0}},
-		},
-		{
-			"east hemisphere",
-			eastHemi,
-			Point{r3.Vector{0, 1, 0}},
-			Point{r3.Vector{0, -1, 0}},
-		},
-		{
-			"candy cane",
-			candyCane,
-			PointFromLatLng(LatLngFromDegrees(5, 71)),
-			PointFromLatLng(LatLngFromDegrees(-8, 71)),
-		},
-	} {
-		l := tc.l
-		for i := 0; i < 4; i++ {
-			if !l.ContainsPoint(tc.in) {
-				t.Errorf("%s loop should contain %v at rotation %d", tc.name, tc.in, i)
-			}
-			if l.ContainsPoint(tc.out) {
-				t.Errorf("%s loop shouldn't contain %v at rotation %d", tc.name, tc.out, i)
-			}
-			l = rotate(l)
-		}
-	}
-}
-
-func TestLoopVertex(t *testing.T) {
-	tests := []struct {
-		loop   *Loop
-		vertex int
-		want   Point
-	}{
-		{EmptyLoop(), 0, Point{r3.Vector{0, 0, 1}}},
-		{EmptyLoop(), 1, Point{r3.Vector{0, 0, 1}}},
-		{FullLoop(), 0, Point{r3.Vector{0, 0, -1}}},
-		{FullLoop(), 1, Point{r3.Vector{0, 0, -1}}},
-		{arctic80, 0, parsePoint("80:-150")},
-		{arctic80, 1, parsePoint("80:-30")},
-		{arctic80, 2, parsePoint("80:90")},
-		{arctic80, 3, parsePoint("80:-150")},
-	}
-
-	for _, test := range tests {
-		if got := test.loop.Vertex(test.vertex); !pointsApproxEquals(got, test.want, epsilon) {
-			t.Errorf("%v.Vertex(%d) = %v, want %v", test.loop, test.vertex, got, test.want)
-		}
-	}
-
-	// Check that wrapping is correct.
-	if !pointsApproxEquals(arctic80.Vertex(2), arctic80.Vertex(5), epsilon) {
-		t.Errorf("Vertex should wrap values. %v.Vertex(2) = %v != %v.Vertex(5) = %v",
-			arctic80, arctic80.Vertex(2), arctic80, arctic80.Vertex(5))
-	}
-
-	loopAroundThrice := 2 + 3*len(arctic80.vertices)
-	if !pointsApproxEquals(arctic80.Vertex(2), arctic80.Vertex(loopAroundThrice), epsilon) {
-		t.Errorf("Vertex should wrap values. %v.Vertex(2) = %v != %v.Vertex(%d) = %v",
-			arctic80, arctic80.Vertex(2), arctic80, loopAroundThrice, arctic80.Vertex(loopAroundThrice))
-	}
-}
-
-func TestLoopNumEdges(t *testing.T) {
-	tests := []struct {
-		loop *Loop
-		want int
-	}{
-		{EmptyLoop(), 0},
-		{FullLoop(), 0},
-		{farHemi, 4},
-		{candyCane, 6},
-		{smallNECW, 3},
-		{arctic80, 3},
-		{antarctic80, 3},
-		{lineTriangle, 3},
-		{skinnyChevron, 4},
-	}
-
-	for _, test := range tests {
-		if got := test.loop.NumEdges(); got != test.want {
-			t.Errorf("%v.NumEdges() = %v, want %v", test.loop, got, test.want)
-		}
-	}
-}
-
-func TestLoopEdge(t *testing.T) {
-	tests := []struct {
-		loop  *Loop
-		edge  int
-		wantA Point
-		wantB Point
-	}{
-		{
-			loop:  farHemi,
-			edge:  2,
-			wantA: Point{r3.Vector{0, 0, -1}},
-			wantB: Point{r3.Vector{0, -1, 0}},
-		},
-		{
-			loop: candyCane,
-			edge: 0,
-
-			wantA: parsePoint("-20:150"),
-			wantB: parsePoint("-20:-70"),
-		},
-		{
-			loop:  candyCane,
-			edge:  1,
-			wantA: parsePoint("-20:-70"),
-			wantB: parsePoint("0:70"),
-		},
-		{
-			loop:  candyCane,
-			edge:  2,
-			wantA: parsePoint("0:70"),
-			wantB: parsePoint("10:-150"),
-		},
-		{
-			loop:  candyCane,
-			edge:  3,
-			wantA: parsePoint("10:-150"),
-			wantB: parsePoint("10:70"),
-		},
-		{
-			loop:  candyCane,
-			edge:  4,
-			wantA: parsePoint("10:70"),
-			wantB: parsePoint("-10:-70"),
-		},
-		{
-			loop:  candyCane,
-			edge:  5,
-			wantA: parsePoint("-10:-70"),
-			wantB: parsePoint("-20:150"),
-		},
-		{
-			loop:  skinnyChevron,
-			edge:  2,
-			wantA: parsePoint("0:1e-320"),
-			wantB: parsePoint("1e-320:80"),
-		},
-		{
-			loop:  skinnyChevron,
-			edge:  3,
-			wantA: parsePoint("1e-320:80"),
-			wantB: parsePoint("0:0"),
-		},
-	}
-
-	for _, test := range tests {
-		if a, b := test.loop.Edge(test.edge); !(pointsApproxEquals(a, test.wantA, epsilon) && pointsApproxEquals(b, test.wantB, epsilon)) {
-			t.Errorf("%v.Edge(%d) = (%v, %v), want (%v, %v)", test.loop, test.edge, a, b, test.wantA, test.wantB)
-		}
-	}
-}
-
-func rotate(l *Loop) *Loop {
-	vertices := make([]Point, 0, len(l.vertices))
-	for i := 1; i < len(l.vertices); i++ {
-		vertices = append(vertices, l.vertices[i])
-	}
-	vertices = append(vertices, l.vertices[0])
-	return LoopFromPoints(vertices)
-}
-
-func TestLoopFromCell(t *testing.T) {
-	cell := CellFromCellID(CellIDFromLatLng(LatLng{40.565459 * s1.Degree, -74.645276 * s1.Degree}))
-	loopFromCell := LoopFromCell(cell)
-
-	// Demonstrates the reason for this test; the cell bounds are more
-	// conservative than the resulting loop bounds.
-	if loopFromCell.RectBound().Contains(cell.RectBound()) {
-		t.Errorf("loopFromCell's RectBound countains the original cells RectBound, but should not")
-	}
-}
-
-func TestLoopRegularLoop(t *testing.T) {
-	loop := RegularLoop(PointFromLatLng(LatLngFromDegrees(80, 135)), 20*s1.Degree, 4)
-	if len(loop.vertices) != 4 {
-		t.Errorf("RegularLoop with 4 vertices should have 4 vertices, got %d", len(loop.vertices))
-	}
-	// The actual Points values are already tested in the s2point_test method TestRegularPoints.
-}

+ 13 - 15
vendor/github.com/golang/geo/s2/matrix3x3.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 

+ 0 - 494
vendor/github.com/golang/geo/s2/matrix3x3_test.go

@@ -1,494 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-)
-
-func TestCol(t *testing.T) {
-	tests := []struct {
-		have *matrix3x3
-		col  int
-		want Point
-	}{
-		{&matrix3x3{}, 0, OriginPoint()},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			0,
-			Point{r3.Vector{1, 4, 7}},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			2,
-			Point{r3.Vector{3, 6, 9}},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.col(test.col); !got.ApproxEqual(test.want) {
-			t.Errorf("%v.col(%d) = %v, want %v", test.have, test.col, got, test.want)
-		}
-	}
-}
-
-func TestRow(t *testing.T) {
-	tests := []struct {
-		have *matrix3x3
-		row  int
-		want Point
-	}{
-		{&matrix3x3{}, 0, OriginPoint()},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			0,
-			Point{r3.Vector{1, 2, 3}},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			2,
-			Point{r3.Vector{7, 8, 9}},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.row(test.row); !got.ApproxEqual(test.want) {
-			t.Errorf("%v.row(%d) = %v, want %v", test.have, test.row, got, test.want)
-		}
-	}
-}
-
-func TestSetCol(t *testing.T) {
-	tests := []struct {
-		have  *matrix3x3
-		col   int
-		point Point
-		want  *matrix3x3
-	}{
-		{
-			&matrix3x3{},
-			0,
-			Point{r3.Vector{1, 1, 0}},
-			&matrix3x3{
-				{1, 0, 0},
-				{1, 0, 0},
-				{0, 0, 0},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			2,
-			Point{r3.Vector{1, 1, 0}},
-			&matrix3x3{
-				{1, 2, 1},
-				{4, 5, 1},
-				{7, 8, 0},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.setCol(test.col, test.point); !matricesApproxEqual(got, test.want) {
-			t.Errorf("%v.setCol(%d, %v) = %v, want %v", test.have, test.col, test.point, got, test.want)
-		}
-	}
-}
-
-func TestSetRow(t *testing.T) {
-	tests := []struct {
-		have  *matrix3x3
-		row   int
-		point Point
-		want  *matrix3x3
-	}{
-		{
-			&matrix3x3{},
-			0,
-			Point{r3.Vector{1, 1, 0}},
-			&matrix3x3{
-				{1, 1, 0},
-				{0, 0, 0},
-				{0, 0, 0},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			2,
-			Point{r3.Vector{1, 1, 0}},
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{1, 1, 0},
-			},
-		},
-	}
-	for _, test := range tests {
-		if got := test.have.setRow(test.row, test.point); !matricesApproxEqual(got, test.want) {
-			t.Errorf("%v.setRow(%d, %v) = %v, want %v", test.have, test.row, test.point, got, test.want)
-		}
-	}
-}
-
-func TestScale(t *testing.T) {
-	tests := []struct {
-		have  *matrix3x3
-		scale float64
-		want  *matrix3x3
-	}{
-		{
-			&matrix3x3{},
-			0,
-			&matrix3x3{},
-		},
-		{
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-			0,
-			&matrix3x3{},
-		},
-		{
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-			1,
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-			5,
-			&matrix3x3{
-				{5, 5, 5},
-				{5, 5, 5},
-				{5, 5, 5},
-			},
-		},
-		{
-			&matrix3x3{
-				{-2, 2, -3},
-				{-1, 1, 3},
-				{2, 0, -1},
-			},
-			2.75,
-			&matrix3x3{
-				{-5.5, 5.5, -8.25},
-				{-2.75, 2.75, 8.25},
-				{5.5, 0, -2.75},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.scale(test.scale); !matricesApproxEqual(got, test.want) {
-			t.Errorf("%v.scale(%f) = %v, want %v", test.have, test.scale, got, test.want)
-		}
-	}
-}
-
-func TestMul(t *testing.T) {
-	tests := []struct {
-		have  *matrix3x3
-		point Point
-		want  Point
-	}{
-		{&matrix3x3{}, Point{}, Point{}},
-		{
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-			Point{},
-			Point{},
-		},
-		{
-			// Identity times something gives back the something
-			&matrix3x3{
-				{1, 0, 0},
-				{0, 1, 0},
-				{0, 0, 1},
-			},
-			Point{},
-			Point{},
-		},
-		{
-			// Identity times something gives back the something
-			&matrix3x3{
-				{1, 0, 0},
-				{0, 1, 0},
-				{0, 0, 1},
-			},
-			Point{r3.Vector{1, 2, 3}},
-			Point{r3.Vector{1, 2, 3}},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			Point{r3.Vector{1, 1, 1}},
-			Point{r3.Vector{6, 15, 24}},
-		},
-	}
-	for _, test := range tests {
-		if got := test.have.mul(test.point); !got.ApproxEqual(test.want) {
-			t.Errorf("%v.mul(%v) = %v, want %v", test.have, test.point, got, test.want)
-		}
-	}
-}
-
-func TestDet(t *testing.T) {
-	tests := []struct {
-		have *matrix3x3
-		want float64
-	}{
-		{
-			&matrix3x3{},
-			0,
-		},
-		{
-			// Matrix of all the same values has det of 0.
-			&matrix3x3{
-				{1, 1, 1},
-				{1, 1, 1},
-				{1, 1, 1},
-			},
-			0,
-		},
-		{
-			// Identity matrix has det of 1.
-			&matrix3x3{
-				{1, 0, 0},
-				{0, 1, 0},
-				{0, 0, 1},
-			},
-			1,
-		},
-		{
-			&matrix3x3{
-				{-2, 2, -3},
-				{-1, 1, 3},
-				{2, 0, -1},
-			},
-			18,
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			0,
-		},
-		{
-			&matrix3x3{
-				{9, 8, 7},
-				{6, 5, 4},
-				{3, 2, 1},
-			},
-			0,
-		},
-		{
-			&matrix3x3{
-				{1.74, math.E, 42},
-				{math.Pi, math.Sqrt2, math.Ln10},
-				{3, math.SqrtPhi, 9.8976},
-			},
-			-56.838525224123096,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.det(); !float64Eq(got, test.want) {
-			t.Errorf("%v.det() = %v, want %v", test.have, got, test.want)
-		}
-	}
-}
-
-func TestTranspose(t *testing.T) {
-	tests := []struct {
-		have *matrix3x3
-		want *matrix3x3
-	}{
-		{&matrix3x3{}, &matrix3x3{}},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			&matrix3x3{
-				{1, 4, 7},
-				{2, 5, 8},
-				{3, 6, 9},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 0, 0},
-				{0, 2, 0},
-				{0, 0, 3},
-			},
-			&matrix3x3{
-				{1, 0, 0},
-				{0, 2, 0},
-				{0, 0, 3},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{0, 4, 5},
-				{0, 0, 6},
-			},
-			&matrix3x3{
-				{1, 0, 0},
-				{2, 4, 0},
-				{3, 5, 6},
-			},
-		},
-		{
-			&matrix3x3{
-				{1, 1, 1},
-				{0, 0, 0},
-				{0, 0, 0},
-			},
-			&matrix3x3{
-				{1, 0, 0},
-				{1, 0, 0},
-				{1, 0, 0},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.transpose().transpose(); !matricesApproxEqual(got, test.have) {
-			t.Errorf("%v.transpose().transpose() = %v, want %v", test.have, got, test.have)
-		}
-
-		if got := test.have.transpose(); !matricesApproxEqual(got, test.want) {
-			t.Errorf("%v.transpose() = %v, want %v", test.have, got, test.want)
-		}
-
-	}
-}
-
-func TestString(t *testing.T) {
-	tests := []struct {
-		have *matrix3x3
-		want string
-	}{
-		{
-			&matrix3x3{
-				{1, 2, 3},
-				{4, 5, 6},
-				{7, 8, 9},
-			},
-			`[ 1.0000 2.0000 3.0000 ] [ 4.0000 5.0000 6.0000 ] [ 7.0000 8.0000 9.0000 ]`,
-		},
-		{
-			&matrix3x3{
-				{1, 4, 7},
-				{2, 5, 8},
-				{3, 6, 9},
-			},
-			`[ 1.0000 4.0000 7.0000 ] [ 2.0000 5.0000 8.0000 ] [ 3.0000 6.0000 9.0000 ]`,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.have.String(); got != test.want {
-			t.Errorf("%v.String() = %v, want %v", test.have, got, test.want)
-		}
-	}
-}
-
-func TestFrames(t *testing.T) {
-	z := PointFromCoords(0.2, 0.5, -3.3)
-	m := getFrame(z)
-
-	if !m.col(0).IsUnit() {
-		t.Errorf("col(0) of frame not unit length")
-	}
-	if !m.col(1).IsUnit() {
-		t.Errorf("col(1) of frame not unit length")
-	}
-	if !float64Eq(m.det(), 1) {
-		t.Errorf("determinant of frame = %v, want %v", m.det(), 1)
-	}
-
-	tests := []struct {
-		a Point
-		b Point
-	}{
-		{m.col(2), z},
-
-		{toFrame(m, m.col(0)), Point{r3.Vector{1, 0, 0}}},
-		{toFrame(m, m.col(1)), Point{r3.Vector{0, 1, 0}}},
-		{toFrame(m, m.col(2)), Point{r3.Vector{0, 0, 1}}},
-
-		{fromFrame(m, Point{r3.Vector{1, 0, 0}}), m.col(0)},
-		{fromFrame(m, Point{r3.Vector{0, 1, 0}}), m.col(1)},
-		{fromFrame(m, Point{r3.Vector{0, 0, 1}}), m.col(2)},
-	}
-
-	for _, test := range tests {
-		if !pointsApproxEquals(test.a, test.b, epsilon) {
-			t.Errorf("%v != %v", test.a, test.b)
-		}
-	}
-}

+ 13 - 15
vendor/github.com/golang/geo/s2/metric.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 

+ 0 - 109
vendor/github.com/golang/geo/s2/metric_test.go

@@ -1,109 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-)
-
-func TestMetric(t *testing.T) {
-	if got := MinWidthMetric.MaxLevel(0.001256); got != 9 {
-		t.Errorf("MinWidthMetric.MaxLevel(0.001256) = %d, want 9", got)
-	}
-
-	// Check that the maximum aspect ratio of an individual cell is consistent
-	// with the global minimums and maximums.
-	if MaxEdgeAspect < 1 {
-		t.Errorf("MaxEdgeAspect = %v, want >= 1", MaxEdgeAspect)
-	}
-	if got := MaxEdgeMetric.Deriv / MinEdgeMetric.Deriv; MaxEdgeAspect > got {
-		t.Errorf("Edge Aspect: %v/%v = %v, want <= %v", MaxEdgeMetric.Deriv, MinEdgeMetric.Deriv, got, MaxDiagAspect)
-	}
-	if MaxDiagAspect < 1 {
-		t.Errorf("MaxDiagAspect = %v, want >= 1", MaxDiagAspect)
-	}
-	if got := MaxDiagMetric.Deriv / MinDiagMetric.Deriv; MaxDiagAspect > got {
-		t.Errorf("Diag Aspect: %v/%v = %v, want <= %v", MaxDiagMetric.Deriv, MinDiagMetric.Deriv, got, MaxDiagAspect)
-	}
-
-	// Check that area is consistent with edge and width.
-	if got := MinWidthMetric.Deriv*MinEdgeMetric.Deriv - 1e-15; MinAreaMetric.Deriv < got {
-		t.Errorf("Min Area: %v*%v = %v, want >= %v", MinWidthMetric.Deriv, MinEdgeMetric.Deriv, got, MinAreaMetric.Deriv)
-	}
-	if got := MaxWidthMetric.Deriv*MaxEdgeMetric.Deriv + 1e-15; MaxAreaMetric.Deriv > got {
-		t.Errorf("Max Area: %v*%v = %v, want <= %v", MaxWidthMetric.Deriv, MaxEdgeMetric.Deriv, got, MaxAreaMetric.Deriv)
-	}
-
-	for level := -2; level <= maxLevel+3; level++ {
-		width := MinWidthMetric.Deriv * math.Pow(2, float64(-level))
-		if level >= maxLevel+3 {
-			width = 0
-		}
-
-		// Check boundary cases (exactly equal to a threshold value).
-		expected := int(math.Max(0, math.Min(maxLevel, float64(level))))
-
-		if MinWidthMetric.MinLevel(width) != expected {
-			t.Errorf("MinWidthMetric.MinLevel(%v) = %v, want %v", width, MinWidthMetric.MinLevel(width), expected)
-		}
-		if MinWidthMetric.MaxLevel(width) != expected {
-			t.Errorf("MinWidthMetric.MaxLevel(%v) = %v, want %v", width, MinWidthMetric.MaxLevel(width), expected)
-		}
-		if MinWidthMetric.ClosestLevel(width) != expected {
-			t.Errorf("MinWidthMetric.ClosestLevel(%v) = %v, want %v", width, MinWidthMetric.ClosestLevel(width), expected)
-		}
-
-		// Also check non-boundary cases.
-		if got := MinWidthMetric.MinLevel(1.2 * width); got != expected {
-			t.Errorf("non-boundary MinWidthMetric.MinLevel(%v) = %v, want %v", 1.2*width, got, expected)
-		}
-		if got := MinWidthMetric.MaxLevel(0.8 * width); got != expected {
-			t.Errorf("non-boundary MinWidthMetric.MaxLevel(%v) = %v, want %v", 0.8*width, got, expected)
-		}
-		if got := MinWidthMetric.ClosestLevel(1.2 * width); got != expected {
-			t.Errorf("non-boundary larger MinWidthMetric.ClosestLevel(%v) = %v, want %v", 1.2*width, got, expected)
-		}
-		if got := MinWidthMetric.ClosestLevel(0.8 * width); got != expected {
-			t.Errorf("non-boundary smaller MinWidthMetric.ClosestLevel(%v) = %v, want %v", 0.8*width, got, expected)
-		}
-	}
-}
-
-func TestMetricSizeRelations(t *testing.T) {
-	// check that min <= avg <= max for each metric.
-	tests := []struct {
-		min Metric
-		avg Metric
-		max Metric
-	}{
-		{MinAngleSpanMetric, AvgAngleSpanMetric, MaxAngleSpanMetric},
-		{MinWidthMetric, AvgWidthMetric, MaxWidthMetric},
-		{MinEdgeMetric, AvgEdgeMetric, MaxEdgeMetric},
-		{MinDiagMetric, AvgDiagMetric, MaxDiagMetric},
-		{MinAreaMetric, AvgAreaMetric, MaxAreaMetric},
-	}
-
-	for _, test := range tests {
-		if test.min.Deriv > test.avg.Deriv {
-			t.Errorf("Min %v > Avg %v", test.min.Deriv, test.avg.Deriv)
-		}
-		if test.avg.Deriv > test.max.Deriv {
-			t.Errorf("Avg %v > Max %v", test.avg.Deriv, test.max.Deriv)
-		}
-	}
-}

+ 88 - 0
vendor/github.com/golang/geo/s2/nthderivative.go

@@ -0,0 +1,88 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// nthDerivativeCoder provides Nth Derivative Coding.
+//   (In signal processing disciplines, this is known as N-th Delta Coding.)
+//
+// Good for varint coding integer sequences with polynomial trends.
+//
+// Instead of coding a sequence of values directly, code its nth-order discrete
+// derivative.  Overflow in integer addition and subtraction makes this a
+// lossless transform.
+//
+//                                       constant     linear      quadratic
+//                                        trend       trend         trend
+//                                      /        \  /        \  /           \_
+// input                               |0  0  0  0  1  2  3  4  9  16  25  36
+// 0th derivative(identity)            |0  0  0  0  1  2  3  4  9  16  25  36
+// 1st derivative(delta coding)        |   0  0  0  1  1  1  1  5   7   9  11
+// 2nd derivative(linear prediction)   |      0  0  1  0  0  0  4   2   2   2
+//                                      -------------------------------------
+//                                      0  1  2  3  4  5  6  7  8   9  10  11
+//                                                  n in sequence
+//
+// Higher-order codings can break even or be detrimental on other sequences.
+//
+//                                           random            oscillating
+//                                      /               \  /                  \_
+// input                               |5  9  6  1   8  8  2 -2   4  -4   6  -6
+// 0th derivative(identity)            |5  9  6  1   8  8  2 -2   4  -4   6  -6
+// 1st derivative(delta coding)        |   4 -3 -5   7  0 -6 -4   6  -8  10 -12
+// 2nd derivative(linear prediction)   |     -7 -2  12 -7 -6  2  10 -14  18 -22
+//                                      ---------------------------------------
+//                                      0  1  2  3  4   5  6  7   8   9  10  11
+//                                                  n in sequence
+//
+// Note that the nth derivative isn't available until sequence item n.  Earlier
+// values are coded at lower order.  For the above table, read 5 4 -7 -2 12 ...
+type nthDerivativeCoder struct {
+	n, m   int
+	memory [10]int32
+}
+
+// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative).
+// n must be within [0,10].
+func newNthDerivativeCoder(n int) *nthDerivativeCoder {
+	c := &nthDerivativeCoder{n: n}
+	if n < 0 || n > len(c.memory) {
+		panic("unsupported n. Must be within [0,10].")
+	}
+	return c
+}
+
+func (c *nthDerivativeCoder) encode(k int32) int32 {
+	for i := 0; i < c.m; i++ {
+		delta := k - c.memory[i]
+		c.memory[i] = k
+		k = delta
+	}
+	if c.m < c.n {
+		c.memory[c.m] = k
+		c.m++
+	}
+	return k
+}
+
+func (c *nthDerivativeCoder) decode(k int32) int32 {
+	if c.m < c.n {
+		c.m++
+	}
+	for i := c.m - 1; i >= 0; i-- {
+		c.memory[i] += k
+		k = c.memory[i]
+	}
+	return k
+}

+ 21 - 23
vendor/github.com/golang/geo/s2/paddedcell.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
@@ -118,8 +116,8 @@ func (p PaddedCell) Level() int {
 // Center returns the center of this cell.
 // Center returns the center of this cell.
 func (p PaddedCell) Center() Point {
 func (p PaddedCell) Center() Point {
 	ijSize := sizeIJ(p.level)
 	ijSize := sizeIJ(p.level)
-	si := uint64(2*p.iLo + ijSize)
-	ti := uint64(2*p.jLo + ijSize)
+	si := uint32(2*p.iLo + ijSize)
+	ti := uint32(2*p.jLo + ijSize)
 	return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
 	return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
 }
 }
 
 
@@ -130,8 +128,8 @@ func (p *PaddedCell) Middle() r2.Rect {
 	// time (i.e., for cells where the recursion terminates).
 	// time (i.e., for cells where the recursion terminates).
 	if p.middle.IsEmpty() {
 	if p.middle.IsEmpty() {
 		ijSize := sizeIJ(p.level)
 		ijSize := sizeIJ(p.level)
-		u := stToUV(siTiToST(uint64(2*p.iLo + ijSize)))
-		v := stToUV(siTiToST(uint64(2*p.jLo + ijSize)))
+		u := stToUV(siTiToST(uint32(2*p.iLo + ijSize)))
+		v := stToUV(siTiToST(uint32(2*p.jLo + ijSize)))
 		p.middle = r2.Rect{
 		p.middle = r2.Rect{
 			r1.Interval{u - p.padding, u + p.padding},
 			r1.Interval{u - p.padding, u + p.padding},
 			r1.Interval{v - p.padding, v + p.padding},
 			r1.Interval{v - p.padding, v + p.padding},
@@ -164,7 +162,7 @@ func (p PaddedCell) EntryVertex() Point {
 		i += ijSize
 		i += ijSize
 		j += ijSize
 		j += ijSize
 	}
 	}
-	return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
+	return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
 }
 }
 
 
 // ExitVertex returns the vertex where the space-filling curve exits this cell.
 // ExitVertex returns the vertex where the space-filling curve exits this cell.
@@ -179,7 +177,7 @@ func (p PaddedCell) ExitVertex() Point {
 	} else {
 	} else {
 		j += ijSize
 		j += ijSize
 	}
 	}
-	return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
+	return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
 }
 }
 
 
 // ShrinkToFit returns the smallest CellID that contains all descendants of this
 // ShrinkToFit returns the smallest CellID that contains all descendants of this
@@ -205,8 +203,8 @@ func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
 	}
 	}
 
 
 	ijSize := sizeIJ(p.level)
 	ijSize := sizeIJ(p.level)
-	if rect.X.Contains(stToUV(siTiToST(uint64(2*p.iLo+ijSize)))) ||
-		rect.Y.Contains(stToUV(siTiToST(uint64(2*p.jLo+ijSize)))) {
+	if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) ||
+		rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) {
 		return p.id
 		return p.id
 	}
 	}
 
 

+ 0 - 197
vendor/github.com/golang/geo/s2/paddedcell_test.go

@@ -1,197 +0,0 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-)
-
-func TestPaddedCellMethods(t *testing.T) {
-	// Test the PaddedCell methods that have approximate Cell equivalents.
-	for i := 0; i < 1000; i++ {
-		cid := randomCellID()
-		padding := math.Pow(1e-15, randomFloat64())
-		cell := CellFromCellID(cid)
-		pCell := PaddedCellFromCellID(cid, padding)
-
-		if cell.id != pCell.id {
-			t.Errorf("%v.id = %v, want %v", pCell, pCell.id, cell.id)
-		}
-		if cell.id.Level() != pCell.Level() {
-			t.Errorf("%v.Level() = %v, want %v", pCell, pCell.Level(), cell.id.Level())
-		}
-
-		if padding != pCell.Padding() {
-			t.Errorf("%v.Padding() = %v, want %v", pCell, pCell.Padding(), padding)
-		}
-
-		if got, want := pCell.Bound(), cell.BoundUV().ExpandedByMargin(padding); got != want {
-			t.Errorf("%v.BoundUV() = %v, want %v", pCell, got, want)
-		}
-
-		r := r2.RectFromPoints(cell.id.centerUV()).ExpandedByMargin(padding)
-		if r != pCell.Middle() {
-			t.Errorf("%v.Middle() = %v, want %v", pCell, pCell.Middle(), r)
-		}
-
-		if cell.id.Point() != pCell.Center() {
-			t.Errorf("%v.Center() = %v, want %v", pCell, pCell.Center(), cell.id.Point())
-		}
-		if cid.IsLeaf() {
-			continue
-		}
-
-		children, ok := cell.Children()
-		if !ok {
-			t.Errorf("%v.Children() failed but should not have", cell)
-			continue
-		}
-		for pos := 0; pos < 4; pos++ {
-			i, j := pCell.ChildIJ(pos)
-
-			cellChild := children[pos]
-			pCellChild := PaddedCellFromParentIJ(pCell, i, j)
-			if cellChild.id != pCellChild.id {
-				t.Errorf("%v.id = %v, want %v", pCellChild, pCellChild.id, cellChild.id)
-			}
-			if cellChild.id.Level() != pCellChild.Level() {
-				t.Errorf("%v.Level() = %v, want %v", pCellChild, pCellChild.Level(), cellChild.id.Level())
-			}
-
-			if padding != pCellChild.Padding() {
-				t.Errorf("%v.Padding() = %v, want %v", pCellChild, pCellChild.Padding(), padding)
-			}
-
-			if got, want := pCellChild.Bound(), cellChild.BoundUV().ExpandedByMargin(padding); got != want {
-				t.Errorf("%v.BoundUV() = %v, want %v", pCellChild, got, want)
-			}
-
-			r := r2.RectFromPoints(cellChild.id.centerUV()).ExpandedByMargin(padding)
-			if got := pCellChild.Middle(); !r.ApproxEquals(got) {
-				t.Errorf("%v.Middle() = %v, want %v", pCellChild, got, r)
-			}
-
-			if cellChild.id.Point() != pCellChild.Center() {
-				t.Errorf("%v.Center() = %v, want %v", pCellChild, pCellChild.Center(), cellChild.id.Point())
-			}
-
-		}
-	}
-}
-
-func TestPaddedCellEntryExitVertices(t *testing.T) {
-	for i := 0; i < 1000; i++ {
-		id := randomCellID()
-		unpadded := PaddedCellFromCellID(id, 0)
-		padded := PaddedCellFromCellID(id, 0.5)
-
-		// Check that entry/exit vertices do not depend on padding.
-		if unpadded.EntryVertex() != padded.EntryVertex() {
-			t.Errorf("entry vertex should not depend on padding; %v != %v", unpadded.EntryVertex(), padded.EntryVertex())
-		}
-
-		if unpadded.ExitVertex() != padded.ExitVertex() {
-			t.Errorf("exit vertex should not depend on padding; %v != %v", unpadded.ExitVertex(), padded.ExitVertex())
-		}
-
-		// Check that the exit vertex of one cell is the same as the entry vertex
-		// of the immediately following cell. This also tests wrapping from the
-		// end to the start of the CellID curve with high probability.
-		if got := PaddedCellFromCellID(id.NextWrap(), 0).EntryVertex(); unpadded.ExitVertex() != got {
-			t.Errorf("PaddedCellFromCellID(%v.NextWrap(), 0).EntryVertex() = %v, want %v", id, got, unpadded.ExitVertex())
-		}
-
-		// Check that the entry vertex of a cell is the same as the entry vertex
-		// of its first child, and similarly for the exit vertex.
-		if id.IsLeaf() {
-			continue
-		}
-		if got := PaddedCellFromCellID(id.Children()[0], 0).EntryVertex(); unpadded.EntryVertex() != got {
-			t.Errorf("PaddedCellFromCellID(%v.Children()[0], 0).EntryVertex() = %v, want %v", id, got, unpadded.EntryVertex())
-		}
-		if got := PaddedCellFromCellID(id.Children()[3], 0).ExitVertex(); unpadded.ExitVertex() != got {
-			t.Errorf("PaddedCellFromCellID(%v.Children()[3], 0).ExitVertex() = %v, want %v", id, got, unpadded.ExitVertex())
-		}
-	}
-}
-
-func TestPaddedCellShrinkToFit(t *testing.T) {
-	for iter := 0; iter < 1000; iter++ {
-		// Start with the desired result and work backwards.
-		result := randomCellID()
-		resultUV := result.boundUV()
-		sizeUV := resultUV.Size()
-
-		// Find the biggest rectangle that fits in "result" after padding.
-		// (These calculations ignore numerical errors.)
-		maxPadding := 0.5 * math.Min(sizeUV.X, sizeUV.Y)
-		padding := maxPadding * randomFloat64()
-		maxRect := resultUV.ExpandedByMargin(-padding)
-
-		// Start with a random subset of the maximum rectangle.
-		a := r2.Point{
-			randomUniformFloat64(maxRect.X.Lo, maxRect.X.Hi),
-			randomUniformFloat64(maxRect.Y.Lo, maxRect.Y.Hi),
-		}
-		b := r2.Point{
-			randomUniformFloat64(maxRect.X.Lo, maxRect.X.Hi),
-			randomUniformFloat64(maxRect.Y.Lo, maxRect.Y.Hi),
-		}
-
-		if !result.IsLeaf() {
-			// If the result is not a leaf cell, we must ensure that no child of
-			// result also satisfies the conditions of ShrinkToFit().  We do this
-			// by ensuring that rect intersects at least two children of result
-			// (after padding).
-			useY := oneIn(2)
-			center := result.centerUV().X
-			if useY {
-				center = result.centerUV().Y
-			}
-
-			// Find the range of coordinates that are shared between child cells
-			// along that axis.
-			shared := r1.Interval{center - padding, center + padding}
-			if useY {
-				shared = shared.Intersection(maxRect.Y)
-			} else {
-				shared = shared.Intersection(maxRect.X)
-			}
-			mid := randomUniformFloat64(shared.Lo, shared.Hi)
-
-			if useY {
-				a.Y = randomUniformFloat64(maxRect.Y.Lo, mid)
-				b.Y = randomUniformFloat64(mid, maxRect.Y.Hi)
-			} else {
-				a.X = randomUniformFloat64(maxRect.X.Lo, mid)
-				b.X = randomUniformFloat64(mid, maxRect.X.Hi)
-			}
-		}
-		rect := r2.RectFromPoints(a, b)
-
-		// Choose an arbitrary ancestor as the PaddedCell.
-		initialID := result.Parent(randomUniformInt(result.Level() + 1))
-		pCell := PaddedCellFromCellID(initialID, padding)
-		if got := pCell.ShrinkToFit(rect); got != result {
-			t.Errorf("%v.ShrinkToFit(%v) = %v, want %v", pCell, rect, got, result)
-		}
-	}
-}

+ 157 - 25
vendor/github.com/golang/geo/s2/point.go

@@ -1,23 +1,24 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
+	"fmt"
+	"io"
 	"math"
 	"math"
+	"sort"
 
 
 	"github.com/golang/geo/r3"
 	"github.com/golang/geo/r3"
 	"github.com/golang/geo/s1"
 	"github.com/golang/geo/s1"
@@ -29,6 +30,18 @@ type Point struct {
 	r3.Vector
 	r3.Vector
 }
 }
 
 
+// sortPoints sorts the slice of Points in place.
+func sortPoints(e []Point) {
+	sort.Sort(points(e))
+}
+
+// points implements the Sort interface for slices of Point.
+type points []Point
+
+func (p points) Len() int           { return len(p) }
+func (p points) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 }
+
 // PointFromCoords creates a new normalized point from coordinates.
 // PointFromCoords creates a new normalized point from coordinates.
 //
 //
 // This always returns a valid point. If the given coordinates can not be normalized
 // This always returns a valid point. If the given coordinates can not be normalized
@@ -160,11 +173,7 @@ func PointArea(a, b, c Point) float64 {
 		dmin := s - math.Max(sa, math.Max(sb, sc))
 		dmin := s - math.Max(sa, math.Max(sb, sc))
 		if dmin < 1e-2*s*s*s*s*s {
 		if dmin < 1e-2*s*s*s*s*s {
 			// This triangle is skinny enough to use Girard's formula.
 			// This triangle is skinny enough to use Girard's formula.
-			ab := a.PointCross(b)
-			bc := b.PointCross(c)
-			ac := a.PointCross(c)
-			area := math.Max(0.0, float64(ab.Angle(ac.Vector)-ab.Angle(bc.Vector)+bc.Angle(ac.Vector)))
-
+			area := GirardArea(a, b, c)
 			if dmin < s*0.1*area {
 			if dmin < s*0.1*area {
 				return area
 				return area
 			}
 			}
@@ -176,6 +185,37 @@ func PointArea(a, b, c Point) float64 {
 		math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
 		math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
 }
 }
 
 
+// GirardArea returns the area of the triangle computed using Girard's formula.
+// All points should be unit length, and no two points should be antipodal.
+//
+// This method is about twice as fast as PointArea() but has poor relative
+// accuracy for small triangles. The maximum error is about 5e-15 (about
+// 0.25 square meters on the Earth's surface) and the average error is about
+// 1e-15. These bounds apply to triangles of any size, even as the maximum
+// edge length of the triangle approaches 180 degrees. But note that for
+// such triangles, tiny perturbations of the input points can change the
+// true mathematical area dramatically.
+func GirardArea(a, b, c Point) float64 {
+	// This is equivalent to the usual Girard's formula but is slightly more
+	// accurate, faster to compute, and handles a == b == c without a special
+	// case. PointCross is necessary to get good accuracy when two of
+	// the input points are very close together.
+	ab := a.PointCross(b)
+	bc := b.PointCross(c)
+	ac := a.PointCross(c)
+	area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector))
+	if area < 0 {
+		area = 0
+	}
+	return area
+}
+
+// SignedArea returns a positive value for counterclockwise triangles and a negative
+// value otherwise (similar to PointArea).
+func SignedArea(a, b, c Point) float64 {
+	return float64(RobustSign(a, b, c)) * PointArea(a, b, c)
+}
+
 // TrueCentroid returns the true centroid of the spherical triangle ABC multiplied by the
 // TrueCentroid returns the true centroid of the spherical triangle ABC multiplied by the
 // signed area of spherical triangle ABC. The result is not normalized.
 // signed area of spherical triangle ABC. The result is not normalized.
 // The reasons for multiplying by the signed area are (1) this is the quantity
 // The reasons for multiplying by the signed area are (1) this is the quantity
@@ -301,11 +341,103 @@ func (p Point) IntersectsCell(c Cell) bool {
 	return c.ContainsPoint(p)
 	return c.ContainsPoint(p)
 }
 }
 
 
+// ContainsPoint reports if this Point contains the other Point.
+// (This method is named to satisfy the Region interface.)
+func (p Point) ContainsPoint(other Point) bool {
+	return p.Contains(other)
+}
+
+// CellUnionBound computes a covering of the Point.
+func (p Point) CellUnionBound() []CellID {
+	return p.CapBound().CellUnionBound()
+}
+
 // Contains reports if this Point contains the other Point.
 // Contains reports if this Point contains the other Point.
+// (This method matches all other s2 types where the reflexive Contains
+// method does not contain the type's name.)
 func (p Point) Contains(other Point) bool { return p == other }
 func (p Point) Contains(other Point) bool { return p == other }
 
 
-// TODO: Differences from C++
-// Rotate
-// Angle
-// TurnAngle
-// SignedArea
+// Encode encodes the Point.
+func (p Point) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	p.encode(e)
+	return e.err
+}
+
+func (p Point) encode(e *encoder) {
+	e.writeInt8(encodingVersion)
+	e.writeFloat64(p.X)
+	e.writeFloat64(p.Y)
+	e.writeFloat64(p.Z)
+}
+
+// Decode decodes the Point.
+func (p *Point) Decode(r io.Reader) error {
+	d := &decoder{r: asByteReader(r)}
+	p.decode(d)
+	return d.err
+}
+
+func (p *Point) decode(d *decoder) {
+	version := d.readInt8()
+	if d.err != nil {
+		return
+	}
+	if version != encodingVersion {
+		d.err = fmt.Errorf("only version %d is supported", encodingVersion)
+		return
+	}
+	p.X = d.readFloat64()
+	p.Y = d.readFloat64()
+	p.Z = d.readFloat64()
+}
+
+// Angle returns the interior angle at the vertex B in the triangle ABC. The
+// return value is always in the range [0, pi]. All points should be
+// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c.
+//
+// The angle is undefined if A or C is diametrically opposite from B, and
+// becomes numerically unstable as the length of edge AB or BC approaches
+// 180 degrees.
+func Angle(a, b, c Point) s1.Angle {
+	return a.PointCross(b).Angle(c.PointCross(b).Vector)
+}
+
+// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The
+// return value is positive if ABC is counterclockwise and negative otherwise.
+// If you imagine an ant walking from A to B to C, this is the angle that the
+// ant turns at vertex B (positive = left = CCW, negative = right = CW).
+// This quantity is also known as the "geodesic curvature" at B.
+//
+// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct
+// a,b,c. The result is undefined if (a == b || b == c), but is either
+// -Pi or Pi if (a == c). All points should be normalized.
+func TurnAngle(a, b, c Point) s1.Angle {
+	// We use PointCross to get good accuracy when two points are very
+	// close together, and RobustSign to ensure that the sign is correct for
+	// turns that are close to 180 degrees.
+	angle := a.PointCross(b).Angle(b.PointCross(c).Vector)
+
+	// Don't return RobustSign * angle because it is legal to have (a == c).
+	if RobustSign(a, b, c) == CounterClockwise {
+		return angle
+	}
+	return -angle
+}
+
+// Rotate the given point about the given axis by the given angle. p and
+// axis must be unit length; angle has no restrictions (e.g., it can be
+// positive, negative, greater than 360 degrees, etc).
+func Rotate(p, axis Point, angle s1.Angle) Point {
+	// Let M be the plane through P that is perpendicular to axis, and let
+	// center be the point where M intersects axis. We construct a
+	// right-handed orthogonal frame (dx, dy, center) such that dx is the
+	// vector from center to P, and dy has the same length as dx. The
+	// result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center).
+	center := axis.Mul(p.Dot(axis.Vector))
+	dx := p.Sub(center)
+	dy := axis.Cross(p.Vector)
+	// Mathematically the result is unit length, but normalization is necessary
+	// to ensure that numerical errors don't accumulate.
+	return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()}
+}

+ 0 - 384
vendor/github.com/golang/geo/s2/point_test.go

@@ -1,384 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-func TestOriginPoint(t *testing.T) {
-	if math.Abs(OriginPoint().Norm()-1) > 1e-15 {
-		t.Errorf("Origin point norm = %v, want 1", OriginPoint().Norm())
-	}
-
-	// The point chosen below is about 66km from the north pole towards the East
-	// Siberian Sea. The purpose of the stToUV(2/3) calculation is to keep the
-	// origin as far away as possible from the longitudinal edges of large
-	// Cells. (The line of longitude through the chosen point is always 1/3
-	// or 2/3 of the way across any Cell with longitudinal edges that it
-	// passes through.)
-	p := Point{r3.Vector{-0.01, 0.01 * stToUV(2.0/3), 1}}
-	if !p.ApproxEqual(OriginPoint()) {
-		t.Errorf("Origin point should fall in the Siberian Sea, but does not.")
-	}
-
-	// Check that the origin is not too close to either pole.
-	// The Earth's mean radius in kilometers (according to NASA).
-	const earthRadiusKm = 6371.01
-	if dist := math.Acos(OriginPoint().Z) * earthRadiusKm; dist <= 50 {
-		t.Errorf("Origin point is to close to the North Pole. Got %v, want >= 50km", dist)
-	}
-}
-
-func TestPointCross(t *testing.T) {
-	tests := []struct {
-		p1x, p1y, p1z, p2x, p2y, p2z, norm float64
-	}{
-		{1, 0, 0, 1, 0, 0, 1},
-		{1, 0, 0, 0, 1, 0, 2},
-		{0, 1, 0, 1, 0, 0, 2},
-		{1, 2, 3, -4, 5, -6, 2 * math.Sqrt(934)},
-	}
-	for _, test := range tests {
-		p1 := Point{r3.Vector{test.p1x, test.p1y, test.p1z}}
-		p2 := Point{r3.Vector{test.p2x, test.p2y, test.p2z}}
-		result := p1.PointCross(p2)
-		if !float64Eq(result.Norm(), test.norm) {
-			t.Errorf("|%v ⨯ %v| = %v, want %v", p1, p2, result.Norm(), test.norm)
-		}
-		if x := result.Dot(p1.Vector); !float64Eq(x, 0) {
-			t.Errorf("|(%v ⨯ %v) · %v| = %v, want 0", p1, p2, p1, x)
-		}
-		if x := result.Dot(p2.Vector); !float64Eq(x, 0) {
-			t.Errorf("|(%v ⨯ %v) · %v| = %v, want 0", p1, p2, p2, x)
-		}
-	}
-}
-
-func TestPointDistance(t *testing.T) {
-	tests := []struct {
-		x1, y1, z1 float64
-		x2, y2, z2 float64
-		want       float64 // radians
-	}{
-		{1, 0, 0, 1, 0, 0, 0},
-		{1, 0, 0, 0, 1, 0, math.Pi / 2},
-		{1, 0, 0, 0, 1, 1, math.Pi / 2},
-		{1, 0, 0, -1, 0, 0, math.Pi},
-		{1, 2, 3, 2, 3, -1, 1.2055891055045298},
-	}
-	for _, test := range tests {
-		p1 := Point{r3.Vector{test.x1, test.y1, test.z1}}
-		p2 := Point{r3.Vector{test.x2, test.y2, test.z2}}
-		if a := p1.Distance(p2).Radians(); !float64Eq(a, test.want) {
-			t.Errorf("%v.Distance(%v) = %v, want %v", p1, p2, a, test.want)
-		}
-		if a := p2.Distance(p1).Radians(); !float64Eq(a, test.want) {
-			t.Errorf("%v.Distance(%v) = %v, want %v", p2, p1, a, test.want)
-		}
-	}
-}
-
-func TestChordAngleBetweenPoints(t *testing.T) {
-	for iter := 0; iter < 10; iter++ {
-		m := randomFrame()
-		x := m.col(0)
-		y := m.col(1)
-		z := m.col(2)
-
-		if got := ChordAngleBetweenPoints(z, z).Angle(); got != 0 {
-			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want 0", z, z, got)
-		}
-		if got, want := ChordAngleBetweenPoints(Point{z.Mul(-1)}, z).Angle().Radians(), math.Pi; !float64Near(got, want, 1e-7) {
-			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", z.Mul(-1), z, got, want)
-		}
-		if got, want := ChordAngleBetweenPoints(x, z).Angle().Radians(), math.Pi/2; !float64Eq(got, want) {
-			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", x, z, got, want)
-		}
-		w := Point{y.Add(z.Vector).Normalize()}
-		if got, want := ChordAngleBetweenPoints(w, z).Angle().Radians(), math.Pi/4; !float64Eq(got, want) {
-			t.Errorf("ChordAngleBetweenPoints(%v, %v) = %v, want %v", w, z, got, want)
-		}
-	}
-}
-
-func TestPointApproxEqual(t *testing.T) {
-	tests := []struct {
-		x1, y1, z1 float64
-		x2, y2, z2 float64
-		want       bool
-	}{
-		{1, 0, 0, 1, 0, 0, true},
-		{1, 0, 0, 0, 1, 0, false},
-		{1, 0, 0, 0, 1, 1, false},
-		{1, 0, 0, -1, 0, 0, false},
-		{1, 2, 3, 2, 3, -1, false},
-		{1, 0, 0, 1 * (1 + epsilon), 0, 0, true},
-		{1, 0, 0, 1 * (1 - epsilon), 0, 0, true},
-		{1, 0, 0, 1 + epsilon, 0, 0, true},
-		{1, 0, 0, 1 - epsilon, 0, 0, true},
-		{1, 0, 0, 1, epsilon, 0, true},
-		{1, 0, 0, 1, epsilon, epsilon, false},
-		{1, epsilon, 0, 1, -epsilon, epsilon, false},
-	}
-	for _, test := range tests {
-		p1 := Point{r3.Vector{test.x1, test.y1, test.z1}}
-		p2 := Point{r3.Vector{test.x2, test.y2, test.z2}}
-		if got := p1.ApproxEqual(p2); got != test.want {
-			t.Errorf("%v.ApproxEqual(%v), got %v want %v", p1, p2, got, test.want)
-		}
-	}
-}
-
-var (
-	pz   = Point{r3.Vector{0, 0, 1}}
-	p000 = Point{r3.Vector{1, 0, 0}}
-	p045 = Point{r3.Vector{1, 1, 0}}
-	p090 = Point{r3.Vector{0, 1, 0}}
-	p180 = Point{r3.Vector{-1, 0, 0}}
-	// Degenerate triangles.
-	pr = Point{r3.Vector{0.257, -0.5723, 0.112}}
-	pq = Point{r3.Vector{-0.747, 0.401, 0.2235}}
-
-	// For testing the Girard area fall through case.
-	g1 = Point{r3.Vector{1, 1, 1}}
-	g2 = Point{g1.Add(pr.Mul(1e-15)).Normalize()}
-	g3 = Point{g1.Add(pq.Mul(1e-15)).Normalize()}
-)
-
-func TestPointArea(t *testing.T) {
-	epsilon := 1e-10
-	tests := []struct {
-		a, b, c  Point
-		want     float64
-		nearness float64
-	}{
-		{p000, p090, pz, math.Pi / 2.0, 0},
-		// This test case should give 0 as the epsilon, but either Go or C++'s value for Pi,
-		// or the accuracy of the multiplications along the way, cause a difference ~15 decimal
-		// places into the result, so it is not quite a difference of 0.
-		{p045, pz, p180, 3.0 * math.Pi / 4.0, 1e-14},
-		// Make sure that Area has good *relative* accuracy even for very small areas.
-		{Point{r3.Vector{epsilon, 0, 1}}, Point{r3.Vector{0, epsilon, 1}}, pz, 0.5 * epsilon * epsilon, 1e-14},
-		// Make sure that it can handle degenerate triangles.
-		{pr, pr, pr, 0.0, 0},
-		{pr, pq, pr, 0.0, 1e-15},
-		{p000, p045, p090, 0.0, 0},
-		// Try a very long and skinny triangle.
-		{p000, Point{r3.Vector{1, 1, epsilon}}, p090, 5.8578643762690495119753e-11, 1e-9},
-		// TODO(roberts):
-		// C++ includes a 10,000 loop of perterbations to test out the Girard area
-		// computation is less than some noise threshold.
-		// Do we need that many? Will one or two suffice?
-		{g1, g2, g3, 0.0, 1e-15},
-	}
-	for _, test := range tests {
-		if got := PointArea(test.a, test.b, test.c); !float64Near(got, test.want, test.nearness) {
-			t.Errorf("PointArea(%v, %v, %v), got %v want %v", test.a, test.b, test.c, got, test.want)
-		}
-	}
-}
-
-func TestPointAreaQuarterHemisphere(t *testing.T) {
-	tests := []struct {
-		a, b, c, d, e Point
-		want          float64
-	}{
-		// Triangles with near-180 degree edges that sum to a quarter-sphere.
-		{Point{r3.Vector{1, 0.1 * epsilon, epsilon}}, p000, p045, p180, pz, math.Pi},
-		// Four other triangles that sum to a quarter-sphere.
-		{Point{r3.Vector{1, 1, epsilon}}, p000, p045, p180, pz, math.Pi},
-		// TODO(roberts):
-		// C++ Includes a loop of 100 perturbations on a hemisphere for more tests.
-	}
-	for _, test := range tests {
-		area := PointArea(test.a, test.b, test.c) +
-			PointArea(test.a, test.c, test.d) +
-			PointArea(test.a, test.d, test.e) +
-			PointArea(test.a, test.e, test.b)
-
-		if !float64Eq(area, test.want) {
-			t.Errorf("Adding up 4 quarter hemispheres with PointArea(), got %v want %v", area, test.want)
-		}
-	}
-}
-
-func TestPointPlanarCentroid(t *testing.T) {
-	tests := []struct {
-		name             string
-		p0, p1, p2, want Point
-	}{
-		{
-			name: "xyz axis",
-			p0:   Point{r3.Vector{0, 0, 1}},
-			p1:   Point{r3.Vector{0, 1, 0}},
-			p2:   Point{r3.Vector{1, 0, 0}},
-			want: Point{r3.Vector{1. / 3, 1. / 3, 1. / 3}},
-		},
-		{
-			name: "Same point",
-			p0:   Point{r3.Vector{1, 0, 0}},
-			p1:   Point{r3.Vector{1, 0, 0}},
-			p2:   Point{r3.Vector{1, 0, 0}},
-			want: Point{r3.Vector{1, 0, 0}},
-		},
-	}
-
-	for _, test := range tests {
-		got := PlanarCentroid(test.p0, test.p1, test.p2)
-		if !got.ApproxEqual(test.want) {
-			t.Errorf("%s: PlanarCentroid(%v, %v, %v) = %v, want %v", test.name, test.p0, test.p1, test.p2, got, test.want)
-		}
-	}
-}
-
-func TestPointTrueCentroid(t *testing.T) {
-	// Test TrueCentroid with very small triangles. This test assumes that
-	// the triangle is small enough so that it is nearly planar.
-	// The centroid of a planar triangle is at the intersection of its
-	// medians, which is two-thirds of the way along each median.
-	for i := 0; i < 100; i++ {
-		f := randomFrame()
-		p := f.col(0)
-		x := f.col(1)
-		y := f.col(2)
-		d := 1e-4 * math.Pow(1e-4, randomFloat64())
-
-		// Make a triangle with two equal sides.
-		p0 := Point{p.Sub(x.Mul(d)).Normalize()}
-		p1 := Point{p.Add(x.Mul(d)).Normalize()}
-		p2 := Point{p.Add(y.Mul(d * 3)).Normalize()}
-		want := Point{p.Add(y.Mul(d)).Normalize()}
-
-		got := TrueCentroid(p0, p1, p2).Normalize()
-		if got.Distance(want.Vector) >= 2e-8 {
-			t.Errorf("TrueCentroid(%v, %v, %v).Normalize() = %v, want %v", p0, p1, p2, got, want)
-		}
-
-		// Make a triangle with a right angle.
-		p0 = p
-		p1 = Point{p.Add(x.Mul(d * 3)).Normalize()}
-		p2 = Point{p.Add(y.Mul(d * 6)).Normalize()}
-		want = Point{p.Add(x.Add(y.Mul(2)).Mul(d)).Normalize()}
-
-		got = TrueCentroid(p0, p1, p2).Normalize()
-		if got.Distance(want.Vector) >= 2e-8 {
-			t.Errorf("TrueCentroid(%v, %v, %v).Normalize() = %v, want %v", p0, p1, p2, got, want)
-		}
-	}
-}
-
-func TestPointRegularPoints(t *testing.T) {
-	// Conversion to/from degrees has a little more variability than the default epsilon.
-	const epsilon = 1e-13
-	center := PointFromLatLng(LatLngFromDegrees(80, 135))
-	radius := s1.Degree * 20
-	pts := regularPoints(center, radius, 4)
-
-	if len(pts) != 4 {
-		t.Errorf("regularPoints with 4 vertices should have 4 vertices, got %d", len(pts))
-	}
-
-	lls := []LatLng{
-		LatLngFromPoint(pts[0]),
-		LatLngFromPoint(pts[1]),
-		LatLngFromPoint(pts[2]),
-		LatLngFromPoint(pts[3]),
-	}
-	cll := LatLngFromPoint(center)
-
-	// Make sure that the radius is correct.
-	wantDist := 20.0
-	for i, ll := range lls {
-		if got := cll.Distance(ll).Degrees(); !float64Near(got, wantDist, epsilon) {
-			t.Errorf("Vertex %d distance from center = %v, want %v", i, got, wantDist)
-		}
-	}
-
-	// Make sure the angle between each point is correct.
-	wantAngle := math.Pi / 2
-	for i := 0; i < len(pts); i++ {
-		// Mod the index by 4 to wrap the values at each end.
-		v0, v1, v2 := pts[(4+i+1)%4], pts[(4+i)%4], pts[(4+i-1)%4]
-		if got := float64(v0.Sub(v1.Vector).Angle(v2.Sub(v1.Vector))); !float64Eq(got, wantAngle) {
-			t.Errorf("(%v-%v).Angle(%v-%v) = %v, want %v", v0, v1, v1, v2, got, wantAngle)
-		}
-	}
-
-	// Make sure that all edges of the polygon have the same length.
-	wantLength := 27.990890717782829
-	for i := 0; i < len(lls); i++ {
-		ll1, ll2 := lls[i], lls[(i+1)%4]
-		if got := ll1.Distance(ll2).Degrees(); !float64Near(got, wantLength, epsilon) {
-			t.Errorf("%v.Distance(%v) = %v, want %v", ll1, ll2, got, wantLength)
-		}
-	}
-
-	// Spot check an actual coordinate now that we know the points are spaced
-	// evenly apart at the same angles and radii.
-	if got, want := lls[0].Lat.Degrees(), 62.162880741097204; !float64Near(got, want, epsilon) {
-		t.Errorf("%v.Lat = %v, want %v", lls[0], got, want)
-	}
-	if got, want := lls[0].Lng.Degrees(), 103.11051028343407; !float64Near(got, want, epsilon) {
-		t.Errorf("%v.Lng = %v, want %v", lls[0], got, want)
-	}
-}
-
-func TestPointRegion(t *testing.T) {
-	p := Point{r3.Vector{1, 0, 0}}
-	r := Point{r3.Vector{1, 0, 0}}
-	if !r.Contains(p) {
-		t.Errorf("%v.Contains(%v) = false, want true", r, p)
-	}
-	if !r.Contains(r) {
-		t.Errorf("%v.Contains(%v) = false, want true", r, r)
-	}
-	if s := (Point{r3.Vector{1, 0, 1}}); r.Contains(s) {
-		t.Errorf("%v.Contains(%v) = true, want false", r, s)
-	}
-	if got, want := r.CapBound(), CapFromPoint(p); !got.ApproxEqual(want) {
-		t.Errorf("%v.CapBound() = %v, want %v", r, got, want)
-	}
-	if got, want := r.RectBound(), RectFromLatLng(LatLngFromPoint(p)); !rectsApproxEqual(got, want, epsilon, epsilon) {
-		t.Errorf("%v.RectBound() = %v, want %v", r, got, want)
-	}
-
-	// The leaf cell containing a point is still much larger than the point.
-	cell := CellFromPoint(p)
-	if r.ContainsCell(cell) {
-		t.Errorf("%v.ContainsCell(%v) = true, want false", r, cell)
-	}
-	if !r.IntersectsCell(cell) {
-		t.Errorf("%v.IntersectsCell(%v) = false, want true", r, cell)
-	}
-}
-
-func BenchmarkPointArea(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		PointArea(p000, p090, pz)
-	}
-}
-
-func BenchmarkPointAreaGirardCase(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		PointArea(g1, g2, g3)
-	}
-}

+ 319 - 0
vendor/github.com/golang/geo/s2/pointcompression.go

@@ -0,0 +1,319 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/golang/geo/r3"
+)
+
+// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded.
+// On decode, this defends against malicious encodings that try and have us exceed RAM.
+const maxEncodedVertices = 50000000
+
+// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point
+// and, if this point is equal to the center of a Cell, the level of this cell
+// (-1 otherwise). This is used for Loops and Polygons to store data in a more
+// compressed format.
+type xyzFaceSiTi struct {
+	xyz    Point
+	face   int
+	si, ti uint32
+	level  int
+}
+
+const derivativeEncodingOrder = 2
+
+func appendFace(faces []faceRun, face int) []faceRun {
+	if len(faces) == 0 || faces[len(faces)-1].face != face {
+		return append(faces, faceRun{face, 1})
+	}
+	faces[len(faces)-1].count++
+	return faces
+}
+
+// encodePointsCompressed uses an optimized compressed format to encode the given values.
+func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) {
+	var faces []faceRun
+	for _, v := range vertices {
+		faces = appendFace(faces, v.face)
+	}
+	encodeFaces(e, faces)
+
+	type piQi struct {
+		pi, qi uint32
+	}
+	verticesPiQi := make([]piQi, len(vertices))
+	for i, v := range vertices {
+		verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)}
+	}
+	piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder)
+	for i, v := range verticesPiQi {
+		f := encodePointCompressed
+		if i == 0 {
+			// The first point will be just the (pi, qi) coordinates
+			// of the Point. NthDerivativeCoder will not save anything
+			// in that case, so we encode in fixed format rather than varint
+			// to avoid the varint overhead.
+			f = encodeFirstPointFixedLength
+		}
+		f(e, v.pi, v.qi, level, piCoder, qiCoder)
+	}
+
+	var offCenter []int
+	for i, v := range vertices {
+		if v.level != level {
+			offCenter = append(offCenter, i)
+		}
+	}
+	e.writeUvarint(uint64(len(offCenter)))
+	for _, idx := range offCenter {
+		e.writeUvarint(uint64(idx))
+		e.writeFloat64(vertices[idx].xyz.X)
+		e.writeFloat64(vertices[idx].xyz.Y)
+		e.writeFloat64(vertices[idx].xyz.Z)
+	}
+}
+
+func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
+	// Do not ZigZagEncode the first point, since it cannot be negative.
+	codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi))
+	// Interleave to reduce overhead from two partial bytes to one.
+	interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi))
+
+	// Write as little endian.
+	bytesRequired := (level + 7) / 8 * 2
+	for i := 0; i < bytesRequired; i++ {
+		e.writeUint8(uint8(interleaved))
+		interleaved >>= 8
+	}
+}
+
+// encodePointCompressed encodes points into e.
+// Given a sequence of Points assumed to be the center of level-k cells,
+// compresses it into a stream using the following method:
+// - decompose the points into (face, si, ti) tuples.
+// - run-length encode the faces, combining face number and count into a
+//     varint32. See the faceRun struct.
+// - right shift the (si, ti) to remove the part that's constant for all cells
+//     of level-k. The result is called the (pi, qi) space.
+// - 2nd derivative encode the pi and qi sequences (linear prediction)
+// - zig-zag encode all derivative values but the first, which cannot be
+//     negative
+// - interleave the zig-zag encoded values
+// - encode the first interleaved value in a fixed length encoding
+//     (varint would make this value larger)
+// - encode the remaining interleaved values as varint64s, as the
+//     derivative encoding should make the values small.
+// In addition, provides a lossless method to compress a sequence of points even
+// if some points are not the center of level-k cells. These points are stored
+// exactly, using 3 double precision values, after the above encoded string,
+// together with their index in the sequence (this leads to some redundancy - it
+// is expected that only a small fraction of the points are not cell centers).
+//
+// To encode leaf cells, this requires 8 bytes for the first vertex plus
+// an average of 3.8 bytes for each additional vertex, when computed on
+// Google's geographic repository.
+func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
+	// ZigZagEncode, as varint requires the maximum number of bytes for
+	// negative numbers.
+	zzPi := zigzagEncode(piCoder.encode(int32(pi)))
+	zzQi := zigzagEncode(qiCoder.encode(int32(qi)))
+	// Interleave to reduce overhead from two partial bytes to one.
+	interleaved := interleaveUint32(zzPi, zzQi)
+	e.writeUvarint(interleaved)
+}
+
+type faceRun struct {
+	face, count int
+}
+
+func decodeFaceRun(d *decoder) faceRun {
+	faceAndCount := d.readUvarint()
+	ret := faceRun{
+		face:  int(faceAndCount % numFaces),
+		count: int(faceAndCount / numFaces),
+	}
+	if ret.count <= 0 && d.err == nil {
+		d.err = errors.New("non-positive count for face run")
+	}
+	return ret
+}
+
+func decodeFaces(numVertices int, d *decoder) []faceRun {
+	var frs []faceRun
+	for nparsed := 0; nparsed < numVertices; {
+		fr := decodeFaceRun(d)
+		if d.err != nil {
+			return nil
+		}
+		frs = append(frs, fr)
+		nparsed += fr.count
+	}
+	return frs
+}
+
+// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face.
+func encodeFaceRun(e *encoder, fr faceRun) {
+	// It isn't necessary to encode the number of faces left for the last run,
+	// but since this would only help if there were more than 21 faces, it will
+	// be a small overall savings, much smaller than the bound encoding.
+	coded := numFaces*uint64(fr.count) + uint64(fr.face)
+	e.writeUvarint(coded)
+}
+
+func encodeFaces(e *encoder, frs []faceRun) {
+	for _, fr := range frs {
+		encodeFaceRun(e, fr)
+	}
+}
+
+type facesIterator struct {
+	faces []faceRun
+	// How often have we yet shown the current face?
+	numCurrentFaceShown int
+	curFace             int
+}
+
+func (fi *facesIterator) next() (ok bool) {
+	if len(fi.faces) == 0 {
+		return false
+	}
+	fi.curFace = fi.faces[0].face
+	fi.numCurrentFaceShown++
+
+	// Advance fs if needed.
+	if fi.faces[0].count <= fi.numCurrentFaceShown {
+		fi.faces = fi.faces[1:]
+		fi.numCurrentFaceShown = 0
+	}
+
+	return true
+}
+
+func decodePointsCompressed(d *decoder, level int, target []Point) {
+	faces := decodeFaces(len(target), d)
+
+	piCoder := newNthDerivativeCoder(derivativeEncodingOrder)
+	qiCoder := newNthDerivativeCoder(derivativeEncodingOrder)
+
+	iter := facesIterator{faces: faces}
+	for i := range target {
+		decodeFn := decodePointCompressed
+		if i == 0 {
+			decodeFn = decodeFirstPointFixedLength
+		}
+		pi, qi := decodeFn(d, level, piCoder, qiCoder)
+		if ok := iter.next(); !ok && d.err == nil {
+			d.err = fmt.Errorf("ran out of faces at target %d", i)
+			return
+		}
+		target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)}
+	}
+
+	numOffCenter := int(d.readUvarint())
+	if d.err != nil {
+		return
+	}
+	if numOffCenter > len(target) {
+		d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target))
+		return
+	}
+	for i := 0; i < numOffCenter; i++ {
+		idx := int(d.readUvarint())
+		if d.err != nil {
+			return
+		}
+		if idx >= len(target) {
+			d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target))
+			return
+		}
+		target[idx].X = d.readFloat64()
+		target[idx].Y = d.readFloat64()
+		target[idx].Z = d.readFloat64()
+	}
+}
+
+func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
+	bytesToRead := (level + 7) / 8 * 2
+	var interleaved uint64
+	for i := 0; i < bytesToRead; i++ {
+		rr := d.readUint8()
+		interleaved |= (uint64(rr) << uint(i*8))
+	}
+
+	piCoded, qiCoded := deinterleaveUint32(interleaved)
+
+	return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded)))
+}
+
+func zigzagEncode(x int32) uint32 {
+	return (uint32(x) << 1) ^ uint32(x>>31)
+}
+
+func zigzagDecode(x uint32) int32 {
+	return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31))
+}
+
+func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
+	interleavedZigZagEncodedDerivPiQi := d.readUvarint()
+	piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi)
+	return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag)))
+}
+
+// We introduce a new coordinate system (pi, qi), which is (si, ti)
+// with the bits that are constant for cells of that level shifted
+// off to the right.
+// si = round(s * 2^31)
+// pi = si >> (31 - level)
+//    = floor(s * 2^level)
+// If the point has been snapped to the level, the bits that are
+// shifted off will be a 1 in the msb, then 0s after that, so the
+// fractional part discarded by the cast is (close to) 0.5.
+
+// stToPiQi returns the value transformed to the PiQi coordinate space.
+func stToPiQi(s float64, level uint) uint32 {
+	return uint32(s * float64(int(1)<<level))
+}
+
+// siTiToPiQi returns the value transformed into the PiQi coordinate spade.
+// encodeFirstPointFixedLength encodes the return value using level bits,
+// so we clamp si to the range [0, 2**level - 1] before trying to encode
+// it. This is okay because if si == maxSiTi, then it is not a cell center
+// anyway and will be encoded separately as an off-center point.
+func siTitoPiQi(siTi uint32, level int) uint32 {
+	s := uint(siTi)
+	const max = maxSiTi - 1
+	if s > max {
+		s = max
+	}
+
+	return uint32(s >> (maxLevel + 1 - uint(level)))
+}
+
+// piQiToST returns the value transformed to ST space.
+func piQiToST(pi uint32, level int) float64 {
+	// We want to recover the position at the center of the cell. If the point
+	// was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5.
+	// Inverting STtoPiQi gives:
+	// s = (pi + 0.5) / 2^level.
+	return (float64(pi) + 0.5) / float64(int(1)<<uint(level))
+}
+
+func facePiQitoXYZ(face int, pi, qi uint32, level int) r3.Vector {
+	return faceUVToXYZ(face, stToUV(piQiToST(pi, level)), stToUV(piQiToST(qi, level))).Normalize()
+}

File diff suppressed because it is too large
+ 794 - 97
vendor/github.com/golang/geo/s2/polygon.go


+ 0 - 342
vendor/github.com/golang/geo/s2/polygon_test.go

@@ -1,342 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"testing"
-)
-
-const (
-	// A set of nested loops around the LatLng point 0:0.
-	// Every vertex of nearLoop0 is also a vertex of nearLoop1.
-	nearPoint    = "0:0"
-	nearLoop0    = "-1:0, 0:1, 1:0, 0:-1;"
-	nearLoop1    = "-1:-1, -1:0, -1:1, 0:1, 1:1, 1:0, 1:-1, 0:-1;"
-	nearLoop2    = "-1:-2, -2:5, 5:-2;"
-	nearLoop3    = "-2:-2, -3:6, 6:-3;"
-	nearLoopHemi = "0:-90, -90:0, 0:90, 90:0;"
-
-	// A set of nested loops around the LatLng point 0:180. Every vertex of
-	// farLoop0 and farLoop2 belongs to farLoop1, and all the loops except
-	// farLoop2 are non-convex.
-	farPoint    = "0:180"
-	farLoop0    = "0:179, 1:180, 0:-179, 2:-180;"
-	farLoop1    = "0:179, -1:179, 1:180, -1:-179, 0:-179, 3:-178, 2:-180, 3:178;"
-	farLoop2    = "3:-178, 3:178, -1:179, -1:-179;"
-	farLoop3    = "-3:-178, 4:-177, 4:177, -3:178, -2:179;"
-	farLoopHemi = "0:-90, 60:90, -60:90;"
-
-	// A set of nested loops around the LatLng point -90:0.
-	southLoopPoint = "-89.9999:0.001"
-	southLoop0a    = "-90:0, -89.99:0.01, -89.99:0;"
-	southLoop0b    = "-90:0, -89.99:0.03, -89.99:0.02;"
-	southLoop0c    = "-90:0, -89.99:0.05, -89.99:0.04;"
-	southLoop1     = "-90:0, -89.9:0.1, -89.9:-0.1;"
-	southLoop2     = "-90:0, -89.8:0.2, -89.8:-0.2;"
-	southLoopHemi  = "0:-180, 0:60, 0:-60;"
-
-	// Two different loops that surround all the near and far loops except
-	// for the hemispheres.
-	nearFarLoop1 = "-1:-9, -9:-9, -9:9, 9:9, 9:-9, 1:-9, " +
-		"1:-175, 9:-175, 9:175, -9:175, -9:-175, -1:-175;"
-	nearFarLoop2 = "-2:15, -2:170, -8:-175, 8:-175, " +
-		"2:170, 2:15, 8:-4, -8:-4;"
-
-	// Loop that results from intersection of other loops.
-	farHemiSouthHemiLoop = "0:-180, 0:90, -60:90, 0:-90;"
-
-	// Rectangles that form a cross, with only shared vertices, no crossing edges.
-	// Optional holes outside the intersecting region. 1 is the horizontal rectangle,
-	// and 2 is the vertical. The intersections are shared vertices.
-	//       x---x
-	//       | 2 |
-	//   +---*---*---+
-	//   | 1 |1+2| 1 |
-	//   +---*---*---+
-	//       | 2 |
-	//       x---x
-	loopCross1          = "-2:1, -1:1, 1:1, 2:1, 2:-1, 1:-1, -1:-1, -2:-1;"
-	loopCross1SideHole  = "-1.5:0.5, -1.2:0.5, -1.2:-0.5, -1.5:-0.5;"
-	loopCrossCenterHole = "-0.5:0.5, 0.5:0.5, 0.5:-0.5, -0.5:-0.5;"
-	loopCross2SideHole  = "0.5:-1.5, 0.5:-1.2, -0.5:-1.2, -0.5:-1.5;"
-	loopCross2          = "1:-2, 1:-1, 1:1, 1:2, -1:2, -1:1, -1:-1, -1:-2;"
-
-	// Two rectangles that intersect, but no edges cross and there's always
-	// local containment (rather than crossing) at each shared vertex.
-	// In this ugly ASCII art, 1 is A+B, 2 is B+C:
-	//   +---+---+---+
-	//   | A | B | C |
-	//   +---+---+---+
-	loopOverlap1          = "0:1, 1:1, 2:1, 2:0, 1:0, 0:0;"
-	loopOverlap1SideHole  = "0.2:0.8, 0.8:0.8, 0.8:0.2, 0.2:0.2;"
-	loopOverlapCenterHole = "1.2:0.8, 1.8:0.8, 1.8:0.2, 1.2:0.2;"
-	loopOverlap2SideHole  = "2.2:0.8, 2.8:0.8, 2.8:0.2, 2.2:0.2;"
-	loopOverlap2          = "1:1, 2:1, 3:1, 3:0, 2:0, 1:0;"
-
-	// By symmetry, the intersection of the two polygons has almost half the area
-	// of either polygon.
-	//   +---+
-	//   | 3 |
-	//   +---+---+
-	//   |3+4| 4 |
-	//   +---+---+
-	loopOverlap3 = "-10:10, 0:10, 0:-10, -10:-10, -10:0"
-	loopOverlap4 = "-10:0, 10:0, 10:-10, -10:-10"
-)
-
-// Some shared polygons used in the tests.
-var (
-	emptyPolygon = &Polygon{}
-	fullPolygon  = FullPolygon()
-
-	// TODO(roberts): Uncomment once Polygons with multiple loops are supported.
-	/*
-		near0Polygon     = makePolygon(nearLoop0, true)
-		near01Polygon    = makePolygon(nearLoop0+nearLoop1, true)
-		near30Polygon    = makePolygon(nearLoop3+nearLoop0, true)
-		near23Polygon    = makePolygon(nearLoop2+nearLoop3, true)
-		near0231Polygon  = makePolygon(nearLoop0+nearLoop2+nearLoop3+nearLoop1, true)
-		near023H1Polygon = makePolygon(nearLoop0+nearLoop2+nearLoop3+nearLoopHemi+nearLoop1, true)
-
-		far01Polygon    = makePolygon(farLoop0+farLoop1, true)
-		far21Polygon    = makePolygon(farLoop2+farLoop1, true)
-		far231Polygon   = makePolygon(farLoop2+farLoop3+farLoop1, true)
-		far2H0Polygon   = makePolygon(farLoop2+farLoopHemi+farLoop0, true)
-		far2H013Polygon = makePolygon(farLoop2+farLoopHemi+farLoop0+farLoop1+farLoop3, true)
-
-		south0abPolygon     = makePolygon(southLoop0a+southLoop0b, true)
-		south2Polygon       = makePolygon(southLoop2, true)
-		south20b1Polygon    = makePolygon(southLoop2+southLoop0b+southLoop1, true)
-		south2H1Polygon     = makePolygon(southLoop2+southLoopHemi+southLoop1, true)
-		south20bH0acPolygon = makePolygon(southLoop2+southLoop0b+southLoopHemi+
-			southLoop0a+southLoop0c, true)
-
-		nf1N10F2S10abcPolygon = makePolygon(southLoop0c+farLoop2+nearLoop1+
-			nearFarLoop1+nearLoop0+southLoop1+southLoop0b+southLoop0a, true)
-
-		nf2N2F210S210abPolygon = makePolygon(farLoop2+southLoop0a+farLoop1+
-			southLoop1+farLoop0+southLoop0b+nearFarLoop2+southLoop2+nearLoop2, true)
-
-		f32n0Polygon  = makePolygon(farLoop2+nearLoop0+farLoop3, true)
-		n32s0bPolygon = makePolygon(nearLoop3+southLoop0b+nearLoop2, true)
-
-		cross1Polygon           = makePolygon(loopCross1, true)
-		cross1SideHolePolygon   = makePolygon(loopCross1+loopCross1SideHole, true)
-		cross1CenterHolePolygon = makePolygon(loopCross1+loopCrossCenterHole, true)
-		cross2Polygon           = makePolygon(loopCross2, true)
-		cross2SideHolePolygon   = makePolygon(loopCross2+loopCross2SideHole, true)
-		cross2CenterHolePolygon = makePolygon(loopCross2+loopCrossCenterHole, true)
-
-		overlap1Polygon           = makePolygon(loopOverlap1, true)
-		overlap1SideHolePolygon   = makePolygon(loopOverlap1+loopOverlap1SideHole, true)
-		overlap1CenterHolePolygon = makePolygon(loopOverlap1+loopOverlapCenterHole, true)
-		overlap2Polygon           = makePolygon(loopOverlap2, true)
-		overlap2SideHolePolygon   = makePolygon(loopOverlap2+loopOverlap2SideHole, true)
-		overlap2CenterHolePolygon = makePolygon(loopOverlap2+loopOverlapCenterHole, true)
-
-		overlap3Polygon = makePolygon(loopOverlap3, true)
-		overlap4Polygon = makePolygon(loopOverlap4, true)
-
-		farHemiPolygon      = makePolygon(farLoopHemi, true)
-		southHemiPolygon    = makePolygon(southLoopHemi, true)
-		farSouthHemiPolygon = makePolygon(farHemiSouthHemiLoop, true)
-	*/
-)
-
-func TestPolygonEmptyAndFull(t *testing.T) {
-	if !emptyPolygon.IsEmpty() {
-		t.Errorf("empty polygon should be empty")
-	}
-	if emptyPolygon.IsFull() {
-		t.Errorf("empty polygon should not be full")
-	}
-
-	if emptyPolygon.ContainsOrigin() {
-		t.Errorf("emptyPolygon.ContainsOrigin() = true, want false")
-	}
-	if got, want := emptyPolygon.NumEdges(), 0; got != want {
-		t.Errorf("emptyPolygon.NumEdges() = %v, want %v", got, want)
-	}
-
-	if got := emptyPolygon.dimension(); got != polygonGeometry {
-		t.Errorf("emptyPolygon.dimension() = %v, want %v", got, polygonGeometry)
-	}
-	if got, want := emptyPolygon.numChains(), 0; got != want {
-		t.Errorf("emptyPolygon.numChains() = %v, want %v", got, want)
-	}
-
-	if fullPolygon.IsEmpty() {
-		t.Errorf("full polygon should not be emtpy")
-	}
-	if !fullPolygon.IsFull() {
-		t.Errorf("full polygon should be full")
-	}
-
-	if !fullPolygon.ContainsOrigin() {
-		t.Errorf("fullPolygon.ContainsOrigin() = false, want true")
-	}
-	if got, want := fullPolygon.NumEdges(), 0; got != want {
-		t.Errorf("fullPolygon.NumEdges() = %v, want %v", got, want)
-	}
-
-	if got := fullPolygon.dimension(); got != polygonGeometry {
-		t.Errorf("emptyPolygon.dimension() = %v, want %v", got, polygonGeometry)
-	}
-	if got, want := fullPolygon.numChains(), 0; got != want {
-		t.Errorf("emptyPolygon.numChains() = %v, want %v", got, want)
-	}
-}
-
-func TestPolygonShape(t *testing.T) {
-	p := makePolygon("0:0, 1:0, 1:1, 2:1", true)
-	shape := Shape(p)
-
-	if got, want := shape.NumEdges(), 4; got != want {
-		t.Errorf("%v.NumEdges() = %v, want %d", shape, got, want)
-	}
-
-	if p.numVertices != shape.NumEdges() {
-		t.Errorf("the number of vertices in a polygon should equal the number of edges")
-	}
-	if p.NumLoops() != shape.numChains() {
-		t.Errorf("the number of loops in a polygon should equal the number of chains")
-	}
-	e := 0
-	v2, v3 := shape.Edge(2)
-	if want := PointFromLatLng(LatLngFromDegrees(1, 1)); !v2.ApproxEqual(want) {
-		t.Errorf("%v.Edge(%d) point A = %v  want %v", shape, 2, v2, want)
-	}
-	if want := PointFromLatLng(LatLngFromDegrees(2, 1)); !v3.ApproxEqual(want) {
-		t.Errorf("%v.Edge(%d) point B = %v  want %v", shape, 2, v3, want)
-	}
-	for i, l := range p.loops {
-		if e != shape.chainStart(i) {
-			t.Errorf("the edge id of the start of loop(%d) should equal the sum of vertices so far in the polygon. got %d, want %d", i, shape.chainStart(i), e)
-		}
-		for j := 0; j < len(l.Vertices()); j++ {
-			v0, v1 := shape.Edge(e)
-			// TODO(roberts): Update once Loop implements orientedVertex.
-			//if l.orientedVertex(j) != v0 {
-			if l.Vertex(j) != v0 {
-				t.Errorf("l.Vertex(%d) = %v, want %v", j, l.Vertex(j), v0)
-			}
-			// TODO(roberts): Update once Loop implements orientedVertex.
-			//if l.orientedVertex(j+1) != v1 {
-			if l.Vertex(j+1) != v1 {
-				t.Errorf("l.Vertex(%d) = %v, want %v", j+1, l.Vertex(j+1), v1)
-			}
-			e++
-		}
-		if e != shape.chainStart(i+1) {
-			t.Errorf("the edge id of the start of the next loop(%d+1) should equal the sum of vertices so far in the polygon. got %d, want %d", i, shape.chainStart(i+1), e)
-		}
-	}
-	if shape.dimension() != polygonGeometry {
-		t.Errorf("polygon.dimension() = %v, want %v", shape.dimension(), polygonGeometry)
-	}
-	if !shape.HasInterior() {
-		t.Errorf("polygons should always have interiors")
-	}
-	if !shape.ContainsOrigin() {
-		t.Errorf("polygon %v should contain the origin", shape)
-	}
-}
-
-func TestPolygonLoop(t *testing.T) {
-	if fullPolygon.NumLoops() != 1 {
-		t.Errorf("full polygon should have one loop")
-	}
-
-	l := &Loop{}
-	p1 := PolygonFromLoops([]*Loop{l})
-	if p1.NumLoops() != 1 {
-		t.Errorf("polygon with one loop should have one loop")
-	}
-	if p1.Loop(0) != l {
-		t.Errorf("polygon with one loop should return it")
-	}
-
-	// TODO: When multiple loops are supported, add more test cases.
-}
-
-func TestPolygonParent(t *testing.T) {
-	p1 := PolygonFromLoops([]*Loop{&Loop{}})
-	tests := []struct {
-		p    *Polygon
-		have int
-		want int
-		ok   bool
-	}{
-		{fullPolygon, 0, -1, false},
-		{p1, 0, -1, false},
-
-		// TODO: When multiple loops are supported, add more test cases to
-		// more fully show the parent levels.
-	}
-
-	for _, test := range tests {
-		if got, ok := test.p.Parent(test.have); ok != test.ok || got != test.want {
-			t.Errorf("%v.Parent(%d) = %d,%v, want %d,%v", test.p, test.have, got, ok, test.want, test.ok)
-		}
-	}
-}
-
-func TestPolygonLastDescendant(t *testing.T) {
-	p1 := PolygonFromLoops([]*Loop{&Loop{}})
-
-	tests := []struct {
-		p    *Polygon
-		have int
-		want int
-	}{
-		{fullPolygon, 0, 0},
-		{fullPolygon, -1, 0},
-
-		{p1, 0, 0},
-		{p1, -1, 0},
-
-		// TODO: When multiple loops are supported, add more test cases.
-	}
-
-	for _, test := range tests {
-		if got := test.p.LastDescendant(test.have); got != test.want {
-			t.Errorf("%v.LastDescendant(%d) = %d, want %d", test.p, test.have, got, test.want)
-		}
-	}
-}
-
-func TestPolygonLoopIsHoleAndLoopSign(t *testing.T) {
-	if fullPolygon.loopIsHole(0) {
-		t.Errorf("the full polygons only loop should not be a hole")
-	}
-	if fullPolygon.loopSign(0) != 1 {
-		t.Errorf("the full polygons only loop should be postitive")
-	}
-
-	loop := LoopFromPoints(parsePoints("30:20, 40:20, 39:43, 33:35"))
-	p := PolygonFromLoops([]*Loop{loop})
-
-	if p.loopIsHole(0) {
-		t.Errorf("first loop in a polygon should not start out as a hole")
-	}
-	if p.loopSign(0) != 1 {
-		t.Errorf("first loop in a polygon should start out as positive")
-	}
-
-	// TODO: When multiple loops are supported, add more test cases to
-	// more fully show the parent levels.
-}

+ 230 - 37
vendor/github.com/golang/geo/s2/polyline.go

@@ -1,22 +1,22 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
+	"fmt"
+	"io"
 	"math"
 	"math"
 
 
 	"github.com/golang/geo/s1"
 	"github.com/golang/geo/s1"
@@ -73,8 +73,8 @@ func (p *Polyline) Centroid() Point {
 	return centroid
 	return centroid
 }
 }
 
 
-// Equals reports whether the given Polyline is exactly the same as this one.
-func (p *Polyline) Equals(b *Polyline) bool {
+// Equal reports whether the given Polyline is exactly the same as this one.
+func (p *Polyline) Equal(b *Polyline) bool {
 	if len(*p) != len(*b) {
 	if len(*p) != len(*b) {
 		return false
 		return false
 	}
 	}
@@ -141,6 +141,16 @@ func (p *Polyline) IntersectsCell(cell Cell) bool {
 	return false
 	return false
 }
 }
 
 
+// ContainsPoint returns false since Polylines are not closed.
+func (p *Polyline) ContainsPoint(point Point) bool {
+	return false
+}
+
+// CellUnionBound computes a covering of the Polyline.
+func (p *Polyline) CellUnionBound() []CellID {
+	return p.CapBound().CellUnionBound()
+}
+
 // NumEdges returns the number of edges in this shape.
 // NumEdges returns the number of edges in this shape.
 func (p *Polyline) NumEdges() int {
 func (p *Polyline) NumEdges() int {
 	if len(*p) == 0 {
 	if len(*p) == 0 {
@@ -150,38 +160,222 @@ func (p *Polyline) NumEdges() int {
 }
 }
 
 
 // Edge returns endpoints for the given edge index.
 // Edge returns endpoints for the given edge index.
-func (p *Polyline) Edge(i int) (a, b Point) {
-	return (*p)[i], (*p)[i+1]
+func (p *Polyline) Edge(i int) Edge {
+	return Edge{(*p)[i], (*p)[i+1]}
+}
+
+// HasInterior returns false as Polylines are not closed.
+func (p *Polyline) HasInterior() bool {
+	return false
+}
+
+// ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
+func (p *Polyline) ReferencePoint() ReferencePoint {
+	return OriginReferencePoint(false)
+}
+
+// NumChains reports the number of contiguous edge chains in this Polyline.
+func (p *Polyline) NumChains() int {
+	return minInt(1, p.NumEdges())
+}
+
+// Chain returns the i-th edge Chain in the Shape.
+func (p *Polyline) Chain(chainID int) Chain {
+	return Chain{0, p.NumEdges()}
+}
+
+// ChainEdge returns the j-th edge of the i-th edge Chain.
+func (p *Polyline) ChainEdge(chainID, offset int) Edge {
+	return Edge{(*p)[offset], (*p)[offset+1]}
+}
+
+// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
+func (p *Polyline) ChainPosition(edgeID int) ChainPosition {
+	return ChainPosition{0, edgeID}
 }
 }
 
 
 // dimension returns the dimension of the geometry represented by this Polyline.
 // dimension returns the dimension of the geometry represented by this Polyline.
 func (p *Polyline) dimension() dimension { return polylineGeometry }
 func (p *Polyline) dimension() dimension { return polylineGeometry }
 
 
-// numChains reports the number of contiguous edge chains in this Polyline.
-func (p *Polyline) numChains() int {
-	if p.NumEdges() >= 1 {
-		return 1
+// findEndVertex reports the maximal end index such that the line segment between
+// the start index and this one such that the line segment between these two
+// vertices passes within the given tolerance of all interior vertices, in order.
+func findEndVertex(p Polyline, tolerance s1.Angle, index int) int {
+	// The basic idea is to keep track of the "pie wedge" of angles
+	// from the starting vertex such that a ray from the starting
+	// vertex at that angle will pass through the discs of radius
+	// tolerance centered around all vertices processed so far.
+	//
+	// First we define a coordinate frame for the tangent and normal
+	// spaces at the starting vertex. Essentially this means picking
+	// three orthonormal vectors X,Y,Z such that X and Y span the
+	// tangent plane at the starting vertex, and Z is up. We use
+	// the coordinate frame to define a mapping from 3D direction
+	// vectors to a one-dimensional ray angle in the range (-π,
+	// π]. The angle of a direction vector is computed by
+	// transforming it into the X,Y,Z basis, and then calculating
+	// atan2(y,x). This mapping allows us to represent a wedge of
+	// angles as a 1D interval. Since the interval wraps around, we
+	// represent it as an Interval, i.e. an interval on the unit
+	// circle.
+	origin := p[index]
+	frame := getFrame(origin)
+
+	// As we go along, we keep track of the current wedge of angles
+	// and the distance to the last vertex (which must be
+	// non-decreasing).
+	currentWedge := s1.FullInterval()
+	var lastDistance s1.Angle
+
+	for index++; index < len(p); index++ {
+		candidate := p[index]
+		distance := origin.Distance(candidate)
+
+		// We don't allow simplification to create edges longer than
+		// 90 degrees, to avoid numeric instability as lengths
+		// approach 180 degrees. We do need to allow for original
+		// edges longer than 90 degrees, though.
+		if distance > math.Pi/2 && lastDistance > 0 {
+			break
+		}
+
+		// Vertices must be in increasing order along the ray, except
+		// for the initial disc around the origin.
+		if distance < lastDistance && lastDistance > tolerance {
+			break
+		}
+
+		lastDistance = distance
+
+		// Points that are within the tolerance distance of the origin
+		// do not constrain the ray direction, so we can ignore them.
+		if distance <= tolerance {
+			continue
+		}
+
+		// If the current wedge of angles does not contain the angle
+		// to this vertex, then stop right now. Note that the wedge
+		// of possible ray angles is not necessarily empty yet, but we
+		// can't continue unless we are willing to backtrack to the
+		// last vertex that was contained within the wedge (since we
+		// don't create new vertices). This would be more complicated
+		// and also make the worst-case running time more than linear.
+		direction := toFrame(frame, candidate)
+		center := math.Atan2(direction.Y, direction.X)
+		if !currentWedge.Contains(center) {
+			break
+		}
+
+		// To determine how this vertex constrains the possible ray
+		// angles, consider the triangle ABC where A is the origin, B
+		// is the candidate vertex, and C is one of the two tangent
+		// points between A and the spherical cap of radius
+		// tolerance centered at B. Then from the spherical law of
+		// sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are
+		// the lengths of the edges opposite A and C. In our case C
+		// is a 90 degree angle, therefore A = asin(sin(a) / sin(c)).
+		// Angle A is the half-angle of the allowable wedge.
+		halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians()))
+		target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle)
+		currentWedge = currentWedge.Intersection(target)
 	}
 	}
-	return 0
+
+	// We break out of the loop when we reach a vertex index that
+	// can't be included in the line segment, so back up by one
+	// vertex.
+	return index - 1
 }
 }
 
 
-// chainStart returns the id of the first edge in the i-th edge chain in this Polyline.
-func (p *Polyline) chainStart(i int) int {
-	if i == 0 {
-		return 0
+// SubsampleVertices returns a subsequence of vertex indices such that the
+// polyline connecting these vertices is never further than the given tolerance from
+// the original polyline. Provided the first and last vertices are distinct,
+// they are always preserved; if they are not, the subsequence may contain
+// only a single index.
+//
+// Some useful properties of the algorithm:
+//
+//  - It runs in linear time.
+//
+//  - The output always represents a valid polyline. In particular, adjacent
+//    output vertices are never identical or antipodal.
+//
+//  - The method is not optimal, but it tends to produce 2-3% fewer
+//    vertices than the Douglas-Peucker algorithm with the same tolerance.
+//
+//  - The output is parametrically equivalent to the original polyline to
+//    within the given tolerance. For example, if a polyline backtracks on
+//    itself and then proceeds onwards, the backtracking will be preserved
+//    (to within the given tolerance). This is different than the
+//    Douglas-Peucker algorithm which only guarantees geometric equivalence.
+func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int {
+	var result []int
+
+	if len(*p) < 1 {
+		return result
+	}
+
+	result = append(result, 0)
+	clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0))
+
+	for index := 0; index+1 < len(*p); {
+		nextIndex := findEndVertex(*p, clampedTolerance, index)
+		// Don't create duplicate adjacent vertices.
+		if (*p)[nextIndex] != (*p)[index] {
+			result = append(result, nextIndex)
+		}
+		index = nextIndex
 	}
 	}
 
 
-	return p.NumEdges()
+	return result
 }
 }
 
 
-// HasInterior returns false as Polylines are not closed.
-func (p *Polyline) HasInterior() bool {
-	return false
+// Encode encodes the Polyline.
+func (p Polyline) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	p.encode(e)
+	return e.err
 }
 }
 
 
-// ContainsOrigin returns false because there is no interior to contain s2.Origin.
-func (p *Polyline) ContainsOrigin() bool {
-	return false
+func (p Polyline) encode(e *encoder) {
+	e.writeInt8(encodingVersion)
+	e.writeUint32(uint32(len(p)))
+	for _, v := range p {
+		e.writeFloat64(v.X)
+		e.writeFloat64(v.Y)
+		e.writeFloat64(v.Z)
+	}
+}
+
+// Decode decodes the polyline.
+func (p *Polyline) Decode(r io.Reader) error {
+	d := decoder{r: asByteReader(r)}
+	p.decode(d)
+	return d.err
+}
+
+func (p *Polyline) decode(d decoder) {
+	version := d.readInt8()
+	if d.err != nil {
+		return
+	}
+	if int(version) != int(encodingVersion) {
+		d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
+		return
+	}
+	nvertices := d.readUint32()
+	if d.err != nil {
+		return
+	}
+	if nvertices > maxEncodedVertices {
+		d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+		return
+	}
+	*p = make([]Point, nvertices)
+	for i := range *p {
+		(*p)[i].X = d.readFloat64()
+		(*p)[i].Y = d.readFloat64()
+		(*p)[i].Z = d.readFloat64()
+	}
 }
 }
 
 
 // TODO(roberts): Differences from C++.
 // TODO(roberts): Differences from C++.
@@ -190,8 +384,7 @@ func (p *Polyline) ContainsOrigin() bool {
 // Interpolate/UnInterpolate
 // Interpolate/UnInterpolate
 // Project
 // Project
 // IsPointOnRight
 // IsPointOnRight
-// Intersects
+// Intersects(Polyline)
 // Reverse
 // Reverse
-// SubsampleVertices
 // ApproxEqual
 // ApproxEqual
 // NearlyCoversPolyline
 // NearlyCoversPolyline

+ 0 - 144
vendor/github.com/golang/geo/s2/polyline_test.go

@@ -1,144 +0,0 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-)
-
-func TestPolylineBasics(t *testing.T) {
-	empty := Polyline{}
-	if empty.RectBound() != EmptyRect() {
-		t.Errorf("empty.RectBound() = %v, want %v", empty.RectBound(), EmptyRect())
-	}
-	if len(empty) != 0 {
-		t.Errorf("empty Polyline should have no vertices")
-	}
-	empty.Reverse()
-	if len(empty) != 0 {
-		t.Errorf("reveresed empty Polyline should have no vertices")
-	}
-
-	latlngs := []LatLng{
-		LatLngFromDegrees(0, 0),
-		LatLngFromDegrees(0, 90),
-		LatLngFromDegrees(0, 180),
-	}
-
-	semiEquator := PolylineFromLatLngs(latlngs)
-	//if got, want := semiEquator.Interpolate(0.5), Point{r3.Vector{0, 1, 0}}; !got.ApproxEqual(want) {
-	//	t.Errorf("semiEquator.Interpolate(0.5) = %v, want %v", got, want)
-	//}
-	semiEquator.Reverse()
-	if got, want := (*semiEquator)[2], (Point{r3.Vector{1, 0, 0}}); !got.ApproxEqual(want) {
-		t.Errorf("semiEquator[2] = %v, want %v", got, want)
-	}
-}
-
-func TestPolylineShape(t *testing.T) {
-	var shape Shape = makePolyline("0:0, 1:0, 1:1, 2:1")
-	if got, want := shape.NumEdges(), 3; got != want {
-		t.Errorf("%v.NumEdges() = %v, want %d", shape, got, want)
-	}
-
-	if got, want := shape.numChains(), 1; got != want {
-		t.Errorf("%v.numChains() = %d, want %d", shape, got, want)
-	}
-	if got, want := shape.chainStart(0), 0; got != want {
-		t.Errorf("%v.chainStart(0) = %d, want %d", shape, got, want)
-	}
-	if got, want := shape.chainStart(1), 3; got != want {
-		t.Errorf("%v.chainStart(1) = %d, want %d", shape, got, want)
-	}
-
-	v2, v3 := shape.Edge(2)
-	if want := PointFromLatLng(LatLngFromDegrees(1, 1)); !v2.ApproxEqual(want) {
-		t.Errorf("%v.Edge(%d) point A = %v  want %v", shape, 2, v2, want)
-	}
-	if want := PointFromLatLng(LatLngFromDegrees(2, 1)); !v3.ApproxEqual(want) {
-		t.Errorf("%v.Edge(%d) point B = %v  want %v", shape, 2, v3, want)
-	}
-
-	if shape.HasInterior() {
-		t.Errorf("polylines should not have an interior")
-	}
-	if shape.ContainsOrigin() {
-		t.Errorf("polylines should not contain the origin")
-	}
-
-	if shape.dimension() != polylineGeometry {
-		t.Errorf("polylines should have PolylineGeometry")
-	}
-
-	empty := &Polyline{}
-	if got, want := empty.NumEdges(), 0; got != want {
-		t.Errorf("%v.NumEdges() = %d, want %d", empty, got, want)
-	}
-	if got, want := empty.numChains(), 0; got != want {
-		t.Errorf("%v.numChains() = %d, want %d", empty, got, want)
-	}
-}
-
-func TestPolylineLengthAndCentroid(t *testing.T) {
-	// Construct random great circles and divide them randomly into segments.
-	// Then make sure that the length and centroid are correct.  Note that
-	// because of the way the centroid is computed, it does not matter how
-	// we split the great circle into segments.
-
-	for i := 0; i < 100; i++ {
-		// Choose a coordinate frame for the great circle.
-		f := randomFrame()
-
-		var line Polyline
-		for theta := 0.0; theta < 2*math.Pi; theta += math.Pow(randomFloat64(), 10) {
-			p := Point{f.row(0).Mul(math.Cos(theta)).Add(f.row(1).Mul(math.Sin(theta)))}
-			if len(line) == 0 || !p.ApproxEqual(line[len(line)-1]) {
-				line = append(line, p)
-			}
-		}
-
-		// Close the circle.
-		line = append(line, line[0])
-
-		length := line.Length()
-		if got, want := math.Abs(length.Radians()-2*math.Pi), 2e-14; got > want {
-			t.Errorf("%v.Length() = %v, want < %v", line, got, want)
-		}
-
-		centroid := line.Centroid()
-		if got, want := centroid.Norm(), 2e-14; got > want {
-			t.Errorf("%v.Norm() = %v, want < %v", centroid, got, want)
-		}
-	}
-}
-
-func TestPolylineIntersectsCell(t *testing.T) {
-	pline := Polyline{
-		Point{r3.Vector{1, -1.1, 0.8}.Normalize()},
-		Point{r3.Vector{1, -0.8, 1.1}.Normalize()},
-	}
-
-	for face := 0; face < 6; face++ {
-		cell := CellFromCellID(CellIDFromFace(face))
-		if got, want := pline.IntersectsCell(cell), face&1 == 0; got != want {
-			t.Errorf("%v.IntersectsCell(%v) = %v, want %v", pline, cell, got, want)
-		}
-	}
-}

+ 227 - 25
vendor/github.com/golang/geo/s2/predicates.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
@@ -27,6 +25,7 @@ package s2
 
 
 import (
 import (
 	"math"
 	"math"
+	"math/big"
 
 
 	"github.com/golang/geo/r3"
 	"github.com/golang/geo/r3"
 )
 )
@@ -219,20 +218,223 @@ func expensiveSign(a, b, c Point) Direction {
 	// the three points are truly collinear (e.g., three points on the equator).
 	// the three points are truly collinear (e.g., three points on the equator).
 	detSign := stableSign(a, b, c)
 	detSign := stableSign(a, b, c)
 	if detSign != Indeterminate {
 	if detSign != Indeterminate {
-		return detSign
+		return Direction(detSign)
 	}
 	}
 
 
 	// Otherwise fall back to exact arithmetic and symbolic permutations.
 	// Otherwise fall back to exact arithmetic and symbolic permutations.
-	return exactSign(a, b, c, false)
+	return exactSign(a, b, c, true)
 }
 }
 
 
-// exactSign reports the direction sign of the points using exact precision arithmetic.
+// exactSign reports the direction sign of the points computed using high-precision
+// arithmetic and/or symbolic perturbations.
 func exactSign(a, b, c Point, perturb bool) Direction {
 func exactSign(a, b, c Point, perturb bool) Direction {
-	// In the C++ version, the final computation is performed using OpenSSL's
-	// Bignum exact precision math library. The existence of an equivalent
-	// library in Go is indeterminate. In C++, using the exact precision library
-	// to solve this stage is ~300x slower than the above checks.
-	// TODO(roberts): Select and incorporate an appropriate Go exact precision
-	// floating point library for the remaining calculations.
-	return Indeterminate
+	// Sort the three points in lexicographic order, keeping track of the sign
+	// of the permutation. (Each exchange inverts the sign of the determinant.)
+	permSign := Direction(CounterClockwise)
+	pa := &a
+	pb := &b
+	pc := &c
+	if pa.Cmp(pb.Vector) > 0 {
+		pa, pb = pb, pa
+		permSign = -permSign
+	}
+	if pb.Cmp(pc.Vector) > 0 {
+		pb, pc = pc, pb
+		permSign = -permSign
+	}
+	if pa.Cmp(pb.Vector) > 0 {
+		pa, pb = pb, pa
+		permSign = -permSign
+	}
+
+	// Construct multiple-precision versions of the sorted points and compute
+	// their precise 3x3 determinant.
+	xa := r3.PreciseVectorFromVector(pa.Vector)
+	xb := r3.PreciseVectorFromVector(pb.Vector)
+	xc := r3.PreciseVectorFromVector(pc.Vector)
+	xbCrossXc := xb.Cross(xc)
+	det := xa.Dot(xbCrossXc)
+
+	// The precision of big.Float is high enough that the result should always
+	// be exact enough (no rounding was performed).
+
+	// If the exact determinant is non-zero, we're done.
+	detSign := Direction(det.Sign())
+	if detSign == Indeterminate && perturb {
+		// Otherwise, we need to resort to symbolic perturbations to resolve the
+		// sign of the determinant.
+		detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
+	}
+	return permSign * Direction(detSign)
+}
+
+// symbolicallyPerturbedSign reports the sign of the determinant of three points
+// A, B, C under a model where every possible Point is slightly perturbed by
+// a unique infinitesmal amount such that no three perturbed points are
+// collinear and no four points are coplanar. The perturbations are so small
+// that they do not change the sign of any determinant that was non-zero
+// before the perturbations, and therefore can be safely ignored unless the
+// determinant of three points is exactly zero (using multiple-precision
+// arithmetic). This returns CounterClockwise or Clockwise according to the
+// sign of the determinant after the symbolic perturbations are taken into account.
+//
+// Since the symbolic perturbation of a given point is fixed (i.e., the
+// perturbation is the same for all calls to this method and does not depend
+// on the other two arguments), the results of this method are always
+// self-consistent. It will never return results that would correspond to an
+// impossible configuration of non-degenerate points.
+//
+// This requires that the 3x3 determinant of A, B, C must be exactly zero.
+// And the points must be distinct, with A < B < C in lexicographic order.
+//
+// Reference:
+//   "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
+//   Graphics, 1990).
+//
+func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
+	// This method requires that the points are sorted in lexicographically
+	// increasing order. This is because every possible Point has its own
+	// symbolic perturbation such that if A < B then the symbolic perturbation
+	// for A is much larger than the perturbation for B.
+	//
+	// Alternatively, we could sort the points in this method and keep track of
+	// the sign of the permutation, but it is more efficient to do this before
+	// converting the inputs to the multi-precision representation, and this
+	// also lets us re-use the result of the cross product B x C.
+	//
+	// Every input coordinate x[i] is assigned a symbolic perturbation dx[i].
+	// We then compute the sign of the determinant of the perturbed points,
+	// i.e.
+	//               | a.X+da.X  a.Y+da.Y  a.Z+da.Z |
+	//               | b.X+db.X  b.Y+db.Y  b.Z+db.Z |
+	//               | c.X+dc.X  c.Y+dc.Y  c.Z+dc.Z |
+	//
+	// The perturbations are chosen such that
+	//
+	//   da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X
+	//
+	// where each perturbation is so much smaller than the previous one that we
+	// don't even need to consider it unless the coefficients of all previous
+	// perturbations are zero. In fact, it is so small that we don't need to
+	// consider it unless the coefficient of all products of the previous
+	// perturbations are zero. For example, we don't need to consider the
+	// coefficient of db.Y unless the coefficient of db.Z *da.X is zero.
+	//
+	// The follow code simply enumerates the coefficients of the perturbations
+	// (and products of perturbations) that appear in the determinant above, in
+	// order of decreasing perturbation magnitude. The first non-zero
+	// coefficient determines the sign of the result. The easiest way to
+	// enumerate the coefficients in the correct order is to pretend that each
+	// perturbation is some tiny value "eps" raised to a power of two:
+	//
+	// eps**     1      2      4      8     16     32     64    128    256
+	//        da.Z   da.Y   da.X   db.Z   db.Y   db.X   dc.Z   dc.Y   dc.X
+	//
+	// Essentially we can then just count in binary and test the corresponding
+	// subset of perturbations at each step. So for example, we must test the
+	// coefficient of db.Z*da.X before db.Y because eps**12 > eps**16.
+	//
+	// Of course, not all products of these perturbations appear in the
+	// determinant above, since the determinant only contains the products of
+	// elements in distinct rows and columns. Thus we don't need to consider
+	// da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of
+	// perturbations have the same coefficient in the determinant; for example,
+	// da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore
+	// we only need to test this coefficient the first time we encounter it in
+	// the binary order above (which will be db.Y*da.X).
+	//
+	// The sequence of tests below also appears in Table 4-ii of the paper
+	// referenced above, if you just want to look it up, with the following
+	// translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that
+	// some of the signs are different because the opposite cross product is
+	// used (e.g., B x C rather than C x B).
+
+	detSign := bCrossC.Z.Sign() // da.Z
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = bCrossC.Y.Sign() // da.Y
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = bCrossC.X.Sign() // da.X
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+
+	detSign = new(big.Float).Sub(new(big.Float).Mul(c.X, a.Y), new(big.Float).Mul(c.Y, a.X)).Sign() // db.Z
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = c.X.Sign() // db.Z * da.Y
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = -(c.Y.Sign()) // db.Z * da.X
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+
+	detSign = new(big.Float).Sub(new(big.Float).Mul(c.Z, a.X), new(big.Float).Mul(c.X, a.Z)).Sign() // db.Y
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = c.Z.Sign() // db.Y * da.X
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+
+	// The following test is listed in the paper, but it is redundant because
+	// the previous tests guarantee that C == (0, 0, 0).
+	// (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
+
+	detSign = new(big.Float).Sub(new(big.Float).Mul(a.X, b.Y), new(big.Float).Mul(a.Y, b.X)).Sign() // dc.Z
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = -(b.X.Sign()) // dc.Z * da.Y
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = b.Y.Sign() // dc.Z * da.X
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	detSign = a.X.Sign() // dc.Z * db.Y
+	if detSign != 0 {
+		return Direction(detSign)
+	}
+	return CounterClockwise // dc.Z * db.Y * da.X
 }
 }
+
+// TODO(roberts): Differences from C++
+// CompareDistance(s)
+// CompareEdgeDistance
+// CompareEdgeDirections
+// EdgeCircumcenterSign
+// GetVoronoiSiteExclusion
+// GetCosDistance
+// GetSinDistance
+// GetSin2Distance
+// TriageCompareCosDistances
+// ExactCompareDistances
+// SymbolicCompareDistances
+// CompareSin2Distances
+// TriageCompareSin2Distance
+// GetClosestVertex
+// TriageCompareLineSin2Distance
+// TriageCompareLineCos2Distance
+// TriageCompareLineDistance
+// TriageCompareEdgeDistance
+// ExactCompareLineDistance
+// ExactCompareEdgeDistance
+// TriageCompareEdgeDirections
+// ExactCompareEdgeDirections
+// ArePointsAntipodal
+// ArePointsLinearlyDependent
+// GetCircumcenter
+// TriageEdgeCircumcenterSign
+// ExactEdgeCircumcenterSign
+// UnperturbedSign
+// SymbolicEdgeCircumcenterSign
+// ExactVoronoiSiteExclusion

+ 0 - 314
vendor/github.com/golang/geo/s2/predicates_test.go

@@ -1,314 +0,0 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-)
-
-func TestPredicatesSign(t *testing.T) {
-	tests := []struct {
-		p1x, p1y, p1z, p2x, p2y, p2z, p3x, p3y, p3z float64
-		want                                        bool
-	}{
-		{1, 0, 0, 0, 1, 0, 0, 0, 1, true},
-		{0, 1, 0, 0, 0, 1, 1, 0, 0, true},
-		{0, 0, 1, 1, 0, 0, 0, 1, 0, true},
-		{1, 1, 0, 0, 1, 1, 1, 0, 1, true},
-		{-3, -1, 4, 2, -1, -3, 1, -2, 0, true},
-
-		// All degenerate cases of Sign(). Let M_1, M_2, ... be the sequence of
-		// submatrices whose determinant sign is tested by that function. Then the
-		// i-th test below is a 3x3 matrix M (with rows A, B, C) such that:
-		//
-		//    det(M) = 0
-		//    det(M_j) = 0 for j < i
-		//    det(M_i) != 0
-		//    A < B < C in lexicographic order.
-		// det(M_1) = b0*c1 - b1*c0
-		{-3, -1, 0, -2, 1, 0, 1, -2, 0, false},
-		// det(M_2) = b2*c0 - b0*c2
-		{-6, 3, 3, -4, 2, -1, -2, 1, 4, false},
-		// det(M_3) = b1*c2 - b2*c1
-		{0, -1, -1, 0, 1, -2, 0, 2, 1, false},
-		// From this point onward, B or C must be zero, or B is proportional to C.
-		// det(M_4) = c0*a1 - c1*a0
-		{-1, 2, 7, 2, 1, -4, 4, 2, -8, false},
-		// det(M_5) = c0
-		{-4, -2, 7, 2, 1, -4, 4, 2, -8, false},
-		// det(M_6) = -c1
-		{0, -5, 7, 0, -4, 8, 0, -2, 4, false},
-		// det(M_7) = c2*a0 - c0*a2
-		{-5, -2, 7, 0, 0, -2, 0, 0, -1, false},
-		// det(M_8) = c2
-		{0, -2, 7, 0, 0, 1, 0, 0, 2, false},
-	}
-
-	for _, test := range tests {
-		p1 := Point{r3.Vector{test.p1x, test.p1y, test.p1z}}
-		p2 := Point{r3.Vector{test.p2x, test.p2y, test.p2z}}
-		p3 := Point{r3.Vector{test.p3x, test.p3y, test.p3z}}
-		result := Sign(p1, p2, p3)
-		if result != test.want {
-			t.Errorf("Sign(%v, %v, %v) = %v, want %v", p1, p2, p3, result, test.want)
-		}
-		if test.want {
-			// For these cases we can test the reversibility condition
-			result = Sign(p3, p2, p1)
-			if result == test.want {
-				t.Errorf("Sign(%v, %v, %v) = %v, want %v", p3, p2, p1, result, !test.want)
-			}
-		}
-	}
-}
-
-// Points used in the various RobustSign tests.
-var (
-	// The following points happen to be *exactly collinear* along a line that it
-	// approximate tangent to the surface of the unit sphere. In fact, C is the
-	// exact midpoint of the line segment AB. All of these points are close
-	// enough to unit length to satisfy r3.Vector.IsUnit().
-	poA = Point{r3.Vector{0.72571927877036835, 0.46058825605889098, 0.51106749730504852}}
-	poB = Point{r3.Vector{0.7257192746638208, 0.46058826573818168, 0.51106749441312738}}
-	poC = Point{r3.Vector{0.72571927671709457, 0.46058826089853633, 0.51106749585908795}}
-
-	// The points "x1" and "x2" are exactly proportional, i.e. they both lie
-	// on a common line through the origin. Both points are considered to be
-	// normalized, and in fact they both satisfy (x == x.Normalize()).
-	// Therefore the triangle (x1, x2, -x1) consists of three distinct points
-	// that all lie on a common line through the origin.
-	x1 = Point{r3.Vector{0.99999999999999989, 1.4901161193847655e-08, 0}}
-	x2 = Point{r3.Vector{1, 1.4901161193847656e-08, 0}}
-
-	// Here are two more points that are distinct, exactly proportional, and
-	// that satisfy (x == x.Normalize()).
-	x3 = Point{r3.Vector{1, 1, 1}.Normalize()}
-	x4 = Point{x3.Mul(0.99999999999999989)}
-
-	// The following three points demonstrate that Normalize() is not idempotent, i.e.
-	// y0.Normalize() != y0.Normalize().Normalize(). Both points are exactly proportional.
-	y0 = Point{r3.Vector{1, 1, 0}}
-	y1 = Point{y0.Normalize()}
-	y2 = Point{y1.Normalize()}
-)
-
-func TestPredicatesRobustSignEqualities(t *testing.T) {
-	tests := []struct {
-		p1, p2 Point
-		want   bool
-	}{
-		{Point{poC.Sub(poA.Vector)}, Point{poB.Sub(poC.Vector)}, true},
-		{x1, Point{x1.Normalize()}, true},
-		{x2, Point{x2.Normalize()}, true},
-		{x3, Point{x3.Normalize()}, true},
-		{x4, Point{x4.Normalize()}, true},
-		{x3, x4, false},
-		{y1, y2, false},
-		{y2, Point{y2.Normalize()}, true},
-	}
-
-	for _, test := range tests {
-		if got := test.p1.Vector == test.p2.Vector; got != test.want {
-			t.Errorf("Testing equality for RobustSign. %v = %v, got %v want %v", test.p1, test.p2, got, test.want)
-		}
-	}
-}
-
-func TestPredicatesRobustSign(t *testing.T) {
-	x := Point{r3.Vector{1, 0, 0}}
-	y := Point{r3.Vector{0, 1, 0}}
-	z := Point{r3.Vector{0, 0, 1}}
-
-	tests := []struct {
-		p1, p2, p3 Point
-		want       Direction
-	}{
-		// Simple collinear points test cases.
-		// a == b != c
-		{x, x, z, Indeterminate},
-		// a != b == c
-		{x, y, y, Indeterminate},
-		// c == a != b
-		{z, x, z, Indeterminate},
-		// CCW
-		{x, y, z, CounterClockwise},
-		// CW
-		{z, y, x, Clockwise},
-
-		// Edge cases:
-		// The following points happen to be *exactly collinear* along a line that it
-		// approximate tangent to the surface of the unit sphere. In fact, C is the
-		// exact midpoint of the line segment AB. All of these points are close
-		// enough to unit length to satisfy S2::IsUnitLength().
-		{
-			// Until we get ExactSign, this will only return Indeterminate.
-			// It should be Clockwise.
-			poA, poB, poC, Indeterminate,
-		},
-
-		// The points "x1" and "x2" are exactly proportional, i.e. they both lie
-		// on a common line through the origin. Both points are considered to be
-		// normalized, and in fact they both satisfy (x == x.Normalize()).
-		// Therefore the triangle (x1, x2, -x1) consists of three distinct points
-		// that all lie on a common line through the origin.
-		{
-			// Until we get ExactSign, this will only return Indeterminate.
-			// It should be CounterClockwise.
-			x1, x2, Point{x1.Mul(-1.0)}, Indeterminate,
-		},
-
-		// Here are two more points that are distinct, exactly proportional, and
-		// that satisfy (x == x.Normalize()).
-		{
-			// Until we get ExactSign, this will only return Indeterminate.
-			// It should be Clockwise.
-			x3, x4, Point{x3.Mul(-1.0)}, Indeterminate,
-		},
-
-		// The following points demonstrate that Normalize() is not idempotent,
-		// i.e. y0.Normalize() != y0.Normalize().Normalize(). Both points satisfy
-		// S2::IsNormalized(), though, and the two points are exactly proportional.
-		{
-			// Until we get ExactSign, this will only return Indeterminate.
-			// It should be CounterClockwise.
-			y1, y2, Point{y1.Mul(-1.0)}, Indeterminate,
-		},
-	}
-
-	for _, test := range tests {
-		result := RobustSign(test.p1, test.p2, test.p3)
-		if result != test.want {
-			t.Errorf("RobustSign(%v, %v, %v) got %v, want %v",
-				test.p1, test.p2, test.p3, result, test.want)
-		}
-		// Test RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
-		rotated := RobustSign(test.p2, test.p3, test.p1)
-		if rotated != result {
-			t.Errorf("RobustSign(%v, %v, %v) vs Rotated RobustSign(%v, %v, %v) got %v, want %v",
-				test.p1, test.p2, test.p3, test.p2, test.p3, test.p1, rotated, result)
-		}
-		// Test RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
-		want := Clockwise
-		if result == Clockwise {
-			want = CounterClockwise
-		} else if result == Indeterminate {
-			want = Indeterminate
-		}
-		reversed := RobustSign(test.p3, test.p2, test.p1)
-		if reversed != want {
-			t.Errorf("RobustSign(%v, %v, %v) vs Reversed RobustSign(%v, %v, %v) got %v, want %v",
-				test.p1, test.p2, test.p3, test.p3, test.p2, test.p1, reversed, -1*result)
-		}
-	}
-
-	// Test cases that should not be indeterminate.
-	/*
-		Uncomment these tests once RobustSign is completed.
-		if got := RobustSign(poA, poB, poC); got == Indeterminate {
-			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", poA, poA, poA, got)
-		}
-		if got := RobustSign(x1, x2, Point{x1.Mul(-1)}); got == Indeterminate {
-			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x1, x2, x1.Mul(-1), got)
-		}
-		if got := RobustSign(x3, x4, Point{x3.Mul(-1)}); got == Indeterminate {
-			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x3, x4, x3.Mul(-1), got)
-		}
-		if got := RobustSign(y1, y2, Point{y1.Mul(-1)}); got == Indeterminate {
-			t.Errorf("RobustSign(%v,%v,%v) = %v, want not Indeterminate", x1, x2, y1.Mul(-1), got)
-		}
-	*/
-}
-
-func TestPredicatesStableSignFailureRate(t *testing.T) {
-	const earthRadiusKm = 6371.01
-	const iters = 1000
-
-	// Verify that stableSign is able to handle most cases where the three
-	// points are as collinear as possible. (For reference, triageSign fails
-	// almost 100% of the time on this test.)
-	//
-	// Note that the failure rate *decreases* as the points get closer together,
-	// and the decrease is approximately linear. For example, the failure rate
-	// is 0.4% for collinear points spaced 1km apart, but only 0.0004% for
-	// collinear points spaced 1 meter apart.
-	//
-	//  1km spacing: <  1% (actual is closer to 0.4%)
-	// 10km spacing: < 10% (actual is closer to 4%)
-	want := 0.01
-	spacing := 1.0
-
-	// Estimate the probability that stableSign will not be able to compute
-	// the determinant sign of a triangle A, B, C consisting of three points
-	// that are as collinear as possible and spaced the given distance apart
-	// by counting up the times it returns Indeterminate.
-	failureCount := 0
-	m := math.Tan(spacing / earthRadiusKm)
-	for iter := 0; iter < iters; iter++ {
-		f := randomFrame()
-		a := f.col(0)
-		x := f.col(1)
-
-		b := Point{a.Sub(x.Mul(m)).Normalize()}
-		c := Point{a.Add(x.Mul(m)).Normalize()}
-		sign := stableSign(a, b, c)
-		if sign != Indeterminate {
-			// TODO(roberts): Once exactSign is implemented, uncomment this case.
-			//if got := exactSign(a, b, c, true); got != sign {
-			//	t.Errorf("exactSign(%v, %v, %v, true) = %v, want %v", a, b, c, got, sign)
-			//}
-		} else {
-			failureCount++
-		}
-	}
-
-	rate := float64(failureCount) / float64(iters)
-	if rate >= want {
-		t.Errorf("stableSign failure rate for spacing %v km = %v, want %v", spacing, rate, want)
-	}
-}
-
-func BenchmarkSign(b *testing.B) {
-	p1 := Point{r3.Vector{-3, -1, 4}}
-	p2 := Point{r3.Vector{2, -1, -3}}
-	p3 := Point{r3.Vector{1, -2, 0}}
-	for i := 0; i < b.N; i++ {
-		Sign(p1, p2, p3)
-	}
-}
-
-// BenchmarkRobustSignSimple runs the benchmark for points that satisfy the first
-// checks in RobustSign to compare the performance to that of Sign().
-func BenchmarkRobustSignSimple(b *testing.B) {
-	p1 := Point{r3.Vector{-3, -1, 4}}
-	p2 := Point{r3.Vector{2, -1, -3}}
-	p3 := Point{r3.Vector{1, -2, 0}}
-	for i := 0; i < b.N; i++ {
-		RobustSign(p1, p2, p3)
-	}
-}
-
-// BenchmarkRobustSignNearCollinear runs the benchmark for points that are almost but not
-// quite collinear, so the tests have to use most of the calculations of RobustSign
-// before getting to an answer.
-func BenchmarkRobustSignNearCollinear(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		RobustSign(poA, poB, poC)
-	}
-}

+ 55 - 17
vendor/github.com/golang/geo/s2/rect.go

@@ -1,23 +1,22 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"io"
 	"math"
 	"math"
 
 
 	"github.com/golang/geo/r1"
 	"github.com/golang/geo/r1"
@@ -292,6 +291,11 @@ func (r Rect) ContainsPoint(p Point) bool {
 	return r.ContainsLatLng(LatLngFromPoint(p))
 	return r.ContainsLatLng(LatLngFromPoint(p))
 }
 }
 
 
+// CellUnionBound computes a covering of the Rect.
+func (r Rect) CellUnionBound() []CellID {
+	return r.CapBound().CellUnionBound()
+}
+
 // intersectsLatEdge reports whether the edge AB intersects the given edge of constant
 // intersectsLatEdge reports whether the edge AB intersects the given edge of constant
 // latitude. Requires the points to have unit length.
 // latitude. Requires the points to have unit length.
 func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
 func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
@@ -354,8 +358,8 @@ func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
 func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
 func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
 	// The nice thing about edges of constant longitude is that
 	// The nice thing about edges of constant longitude is that
 	// they are straight lines on the sphere (geodesics).
 	// they are straight lines on the sphere (geodesics).
-	return SimpleCrossing(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
-		PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng}))
+	return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
+		PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross
 }
 }
 
 
 // IntersectsCell reports whether this rectangle intersects the given cell. This is an
 // IntersectsCell reports whether this rectangle intersects the given cell. This is an
@@ -423,5 +427,39 @@ func (r Rect) IntersectsCell(c Cell) bool {
 	return false
 	return false
 }
 }
 
 
+// Encode encodes the Rect.
+func (r Rect) Encode(w io.Writer) error {
+	e := &encoder{w: w}
+	r.encode(e)
+	return e.err
+}
+
+func (r Rect) encode(e *encoder) {
+	e.writeInt8(encodingVersion)
+	e.writeFloat64(r.Lat.Lo)
+	e.writeFloat64(r.Lat.Hi)
+	e.writeFloat64(r.Lng.Lo)
+	e.writeFloat64(r.Lng.Hi)
+}
+
+// Decode decodes a rectangle.
+func (r *Rect) Decode(rd io.Reader) error {
+	d := &decoder{r: asByteReader(rd)}
+	r.decode(d)
+	return d.err
+}
+
+func (r *Rect) decode(d *decoder) {
+	if version := d.readUint8(); int(version) != int(encodingVersion) && d.err == nil {
+		d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
+		return
+	}
+	r.Lat.Lo = d.readFloat64()
+	r.Lat.Hi = d.readFloat64()
+	r.Lng.Lo = d.readFloat64()
+	r.Lng.Hi = d.readFloat64()
+	return
+}
+
 // BUG: The major differences from the C++ version are:
 // BUG: The major differences from the C++ version are:
 //   - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)
 //   - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)

+ 352 - 0
vendor/github.com/golang/geo/s2/rect_bounder.go

@@ -0,0 +1,352 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"math"
+
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r3"
+	"github.com/golang/geo/s1"
+)
+
+// RectBounder is used to compute a bounding rectangle that contains all edges
+// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
+// Note that the bounding rectangle of an edge can be larger than the bounding
+// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
+//
+// The bounds are calculated conservatively to account for numerical errors
+// when points are converted to LatLngs. More precisely, this function
+// guarantees the following:
+// Let L be a closed edge chain (Loop) such that the interior of the loop does
+// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
+// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
+type RectBounder struct {
+	// The previous vertex in the chain.
+	a Point
+	// The previous vertex latitude longitude.
+	aLL   LatLng
+	bound Rect
+}
+
+// NewRectBounder returns a new instance of a RectBounder.
+func NewRectBounder() *RectBounder {
+	return &RectBounder{
+		bound: EmptyRect(),
+	}
+}
+
+// maxErrorForTests returns the maximum error in RectBound provided that the
+// result does not include either pole. It is only used for testing purposes
+func (r *RectBounder) maxErrorForTests() LatLng {
+	// The maximum error in the latitude calculation is
+	//    3.84 * dblEpsilon   for the PointCross calculation
+	//    0.96 * dblEpsilon   for the Latitude calculation
+	//    5    * dblEpsilon   added by AddPoint/RectBound to compensate for error
+	//    -----------------
+	//    9.80 * dblEpsilon   maximum error in result
+	//
+	// The maximum error in the longitude calculation is dblEpsilon. RectBound
+	// does not do any expansion because this isn't necessary in order to
+	// bound the *rounded* longitudes of contained points.
+	return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian}
+}
+
+// AddPoint adds the given point to the chain. The Point must be unit length.
+func (r *RectBounder) AddPoint(b Point) {
+	bLL := LatLngFromPoint(b)
+
+	if r.bound.IsEmpty() {
+		r.a = b
+		r.aLL = bLL
+		r.bound = r.bound.AddPoint(bLL)
+		return
+	}
+
+	// First compute the cross product N = A x B robustly. This is the normal
+	// to the great circle through A and B. We don't use RobustSign
+	// since that method returns an arbitrary vector orthogonal to A if the two
+	// vectors are proportional, and we want the zero vector in that case.
+	n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
+
+	// The relative error in N gets large as its norm gets very small (i.e.,
+	// when the two points are nearly identical or antipodal). We handle this
+	// by choosing a maximum allowable error, and if the error is greater than
+	// this we fall back to a different technique. Since it turns out that
+	// the other sources of error in converting the normal to a maximum
+	// latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
+	// have the total error be a multiple of dblEpsilon, we have chosen to
+	// limit the maximum error in the normal to be 3.84 * dblEpsilon.
+	// It is possible to show that the error is less than this when
+	//
+	// n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
+	//          = 1.91346e-15 (about 8.618 * dblEpsilon)
+	nNorm := n.Norm()
+	if nNorm < 1.91346e-15 {
+		// A and B are either nearly identical or nearly antipodal (to within
+		// 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
+		if r.a.Dot(b.Vector) < 0 {
+			// The two points are nearly antipodal. The easiest solution is to
+			// assume that the edge between A and B could go in any direction
+			// around the sphere.
+			r.bound = FullRect()
+		} else {
+			// The two points are nearly identical (to within 4.309 * dblEpsilon).
+			// In this case we can just use the bounding rectangle of the points,
+			// since after the expansion done by GetBound this Rect is
+			// guaranteed to include the (lat,lng) values of all points along AB.
+			r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
+		}
+		r.a = b
+		r.aLL = bLL
+		return
+	}
+
+	// Compute the longitude range spanned by AB.
+	lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
+	if lngAB.Length() >= math.Pi-2*dblEpsilon {
+		// The points lie on nearly opposite lines of longitude to within the
+		// maximum error of the calculation. The easiest solution is to assume
+		// that AB could go on either side of the pole.
+		lngAB = s1.FullInterval()
+	}
+
+	// Next we compute the latitude range spanned by the edge AB. We start
+	// with the range spanning the two endpoints of the edge:
+	latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
+
+	// This is the desired range unless the edge AB crosses the plane
+	// through N and the Z-axis (which is where the great circle through A
+	// and B attains its minimum and maximum latitudes). To test whether AB
+	// crosses this plane, we compute a vector M perpendicular to this
+	// plane and then project A and B onto it.
+	m := n.Cross(r3.Vector{0, 0, 1})
+	mA := m.Dot(r.a.Vector)
+	mB := m.Dot(b.Vector)
+
+	// We want to test the signs of "mA" and "mB", so we need to bound
+	// the error in these calculations. It is possible to show that the
+	// total error is bounded by
+	//
+	// (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
+	//   = 6.06638e-16 * nNorm + 6.83174e-31
+
+	mError := 6.06638e-16*nNorm + 6.83174e-31
+	if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
+		// Minimum/maximum latitude *may* occur in the edge interior.
+		//
+		// The maximum latitude is 90 degrees minus the latitude of N. We
+		// compute this directly using atan2 in order to get maximum accuracy
+		// near the poles.
+		//
+		// Our goal is compute a bound that contains the computed latitudes of
+		// all S2Points P that pass the point-in-polygon containment test.
+		// There are three sources of error we need to consider:
+		// - the directional error in N (at most 3.84 * dblEpsilon)
+		// - converting N to a maximum latitude
+		// - computing the latitude of the test point P
+		// The latter two sources of error are at most 0.955 * dblEpsilon
+		// individually, but it is possible to show by a more complex analysis
+		// that together they can add up to at most 1.16 * dblEpsilon, for a
+		// total error of 5 * dblEpsilon.
+		//
+		// We add 3 * dblEpsilon to the bound here, and GetBound() will pad
+		// the bound by another 2 * dblEpsilon.
+		maxLat := math.Min(
+			math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
+			math.Pi/2)
+
+		// In order to get tight bounds when the two points are close together,
+		// we also bound the min/max latitude relative to the latitudes of the
+		// endpoints A and B. First we compute the distance between A and B,
+		// and then we compute the maximum change in latitude between any two
+		// points along the great circle that are separated by this distance.
+		// This gives us a latitude change "budget". Some of this budget must
+		// be spent getting from A to B; the remainder bounds the round-trip
+		// distance (in latitude) from A or B to the min or max latitude
+		// attained along the edge AB.
+		latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
+		maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
+
+		// Test whether AB passes through the point of maximum latitude or
+		// minimum latitude. If the dot product(s) are small enough then the
+		// result may be ambiguous.
+		if mA <= mError && mB >= -mError {
+			latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
+		}
+		if mB <= mError && mA >= -mError {
+			latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
+		}
+	}
+	r.a = b
+	r.aLL = bLL
+	r.bound = r.bound.Union(Rect{latAB, lngAB})
+}
+
+// RectBound returns the bounding rectangle of the edge chain that connects the
+// vertices defined so far. This bound satisfies the guarantee made
+// above, i.e. if the edge chain defines a Loop, then the bound contains
+// the LatLng coordinates of all Points contained by the loop.
+func (r *RectBounder) RectBound() Rect {
+	return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
+}
+
+// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
+// contain the bounds of any subregion whose bounds are computed using
+// ComputeRectBound. For example, consider a loop L that defines a square.
+// GetBound ensures that if a point P is contained by this square, then
+// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
+// shaped loop S contained by L. It is possible that GetBound returns a
+// *larger* bound for S than it does for L, due to rounding errors. This
+// method expands the bound for L so that it is guaranteed to contain the
+// bounds of any subregion S.
+//
+// More precisely, if L is a loop that does not contain either pole, and S
+// is a loop such that L.Contains(S), then
+//
+//   ExpandForSubregions(L.RectBound).Contains(S.RectBound).
+//
+func ExpandForSubregions(bound Rect) Rect {
+	// Empty bounds don't need expansion.
+	if bound.IsEmpty() {
+		return bound
+	}
+
+	// First we need to check whether the bound B contains any nearly-antipodal
+	// points (to within 4.309 * dblEpsilon). If so then we need to return
+	// FullRect, since the subregion might have an edge between two
+	// such points, and AddPoint returns Full for such edges. Note that
+	// this can happen even if B is not Full for example, consider a loop
+	// that defines a 10km strip straddling the equator extending from
+	// longitudes -100 to +100 degrees.
+	//
+	// It is easy to check whether B contains any antipodal points, but checking
+	// for nearly-antipodal points is trickier. Essentially we consider the
+	// original bound B and its reflection through the origin B', and then test
+	// whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
+
+	// lngGap is a lower bound on the longitudinal distance between B and its
+	// reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
+	// endpoint longitude calculations and the Length call.)
+	lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
+
+	// minAbsLat is the minimum distance from B to the equator (if zero or
+	// negative, then B straddles the equator).
+	minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
+
+	// latGapSouth and latGapNorth measure the minimum distance from B to the
+	// south and north poles respectively.
+	latGapSouth := math.Pi/2 + bound.Lat.Lo
+	latGapNorth := math.Pi/2 - bound.Lat.Hi
+
+	if minAbsLat >= 0 {
+		// The bound B does not straddle the equator. In this case the minimum
+		// distance is between one endpoint of the latitude edge in B closest to
+		// the equator and the other endpoint of that edge in B'. The latitude
+		// distance between these two points is 2*minAbsLat, and the longitude
+		// distance is lngGap. We could compute the distance exactly using the
+		// Haversine formula, but then we would need to bound the errors in that
+		// calculation. Since we only need accuracy when the distance is very
+		// small (close to 4.309 * dblEpsilon), we substitute the Euclidean
+		// distance instead. This gives us a right triangle XYZ with two edges of
+		// length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
+		// length of the third edge z, and we have
+		//
+		//         z  ~=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
+		//
+		// Therefore the region may contain nearly antipodal points only if
+		//
+		//  2*minAbsLat + lngGap  <  sqrt(2) * 4.309 * dblEpsilon
+		//                        ~= 1.354e-15
+		//
+		// Note that because the given bound B is conservative, minAbsLat and
+		// lngGap are both lower bounds on their true values so we do not need
+		// to make any adjustments for their errors.
+		if 2*minAbsLat+lngGap < 1.354e-15 {
+			return FullRect()
+		}
+	} else if lngGap >= math.Pi/2 {
+		// B spans at most Pi/2 in longitude. The minimum distance is always
+		// between one corner of B and the diagonally opposite corner of B'. We
+		// use the same distance approximation that we used above; in this case
+		// we have an obtuse triangle XYZ with two edges of length x = latGapSouth
+		// and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
+		//
+		//         z  >=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
+		//
+		// Unlike the case above, latGapSouth and latGapNorth are not lower bounds
+		// (because of the extra addition operation, and because math.Pi/2 is not
+		// exactly equal to Pi/2); they can exceed their true values by up to
+		// 0.75 * dblEpsilon. Putting this all together, the region may contain
+		// nearly antipodal points only if
+		//
+		//   latGapSouth + latGapNorth  <  (sqrt(2) * 4.309 + 1.5) * dblEpsilon
+		//                              ~= 1.687e-15
+		if latGapSouth+latGapNorth < 1.687e-15 {
+			return FullRect()
+		}
+	} else {
+		// Otherwise we know that (1) the bound straddles the equator and (2) its
+		// width in longitude is at least Pi/2. In this case the minimum
+		// distance can occur either between a corner of B and the diagonally
+		// opposite corner of B' (as in the case above), or between a corner of B
+		// and the opposite longitudinal edge reflected in B'. It is sufficient
+		// to only consider the corner-edge case, since this distance is also a
+		// lower bound on the corner-corner distance when that case applies.
+
+		// Consider the spherical triangle XYZ where X is a corner of B with
+		// minimum absolute latitude, Y is the closest pole to X, and Z is the
+		// point closest to X on the opposite longitudinal edge of B'. This is a
+		// right triangle (Z = Pi/2), and from the spherical law of sines we have
+		//
+		//     sin(z) / sin(Z)  =  sin(y) / sin(Y)
+		//     sin(maxLatGap) / 1  =  sin(dMin) / sin(lngGap)
+		//     sin(dMin)  =  sin(maxLatGap) * sin(lngGap)
+		//
+		// where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
+		// desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
+		// for 0 <= t <= Pi/2, that we only need an accurate approximation when
+		// at least one of "maxLatGap" or lngGap is extremely small (in which
+		// case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
+		// to 0.75 * dblEpsilon, we want to test whether
+		//
+		//   maxLatGap * lngGap  <  (4.309 + 0.75) * (Pi/2) * dblEpsilon
+		//                       ~= 1.765e-15
+		if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
+			return FullRect()
+		}
+	}
+	// Next we need to check whether the subregion might contain any edges that
+	// span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
+	// sets the longitude bound to Full in that case. This corresponds to
+	// testing whether (lngGap <= 0) in lngExpansion below.
+
+	// Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
+	// In the worst case, the errors when computing the latitude bound for a
+	// subregion could go in the opposite direction as the errors when computing
+	// the bound for the original region, so we need to double this value.
+	// (More analysis shows that it's okay to round down to a multiple of
+	// dblEpsilon.)
+	//
+	// For longitude, we rely on the fact that atan2 is correctly rounded and
+	// therefore no additional bounds expansion is necessary.
+
+	latExpansion := 9 * dblEpsilon
+	lngExpansion := 0.0
+	if lngGap <= 0 {
+		lngExpansion = math.Pi
+	}
+	return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
+}

+ 0 - 862
vendor/github.com/golang/geo/s2/rect_test.go

@@ -1,862 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-func TestRectEmptyAndFull(t *testing.T) {
-	tests := []struct {
-		rect  Rect
-		valid bool
-		empty bool
-		full  bool
-		point bool
-	}{
-		{EmptyRect(), true, true, false, false},
-		{FullRect(), true, false, true, false},
-	}
-
-	for _, test := range tests {
-		if got := test.rect.IsValid(); got != test.valid {
-			t.Errorf("%v.IsValid() = %v, want %v", test.rect, got, test.valid)
-		}
-		if got := test.rect.IsEmpty(); got != test.empty {
-			t.Errorf("%v.IsEmpty() = %v, want %v", test.rect, got, test.empty)
-		}
-		if got := test.rect.IsFull(); got != test.full {
-			t.Errorf("%v.IsFull() = %v, want %v", test.rect, got, test.full)
-		}
-		if got := test.rect.IsPoint(); got != test.point {
-			t.Errorf("%v.IsPoint() = %v, want %v", test.rect, got, test.point)
-		}
-	}
-}
-
-func TestRectArea(t *testing.T) {
-	tests := []struct {
-		rect Rect
-		want float64
-	}{
-		{Rect{}, 0},
-		{FullRect(), 4 * math.Pi},
-		{Rect{r1.Interval{0, math.Pi / 2}, s1.Interval{0, math.Pi / 2}}, math.Pi / 2},
-	}
-	for _, test := range tests {
-		if got := test.rect.Area(); !float64Eq(got, test.want) {
-			t.Errorf("%v.Area() = %v, want %v", test.rect, got, test.want)
-		}
-	}
-}
-
-func TestRectString(t *testing.T) {
-	const want = "[Lo[-90.0000000, -180.0000000], Hi[90.0000000, 180.0000000]]"
-	if s := FullRect().String(); s != want {
-		t.Errorf("FullRect().String() = %q, want %q", s, want)
-	}
-}
-
-func TestRectFromLatLng(t *testing.T) {
-	ll := LatLngFromDegrees(23, 47)
-	got := RectFromLatLng(ll)
-	if got.Center() != ll {
-		t.Errorf("RectFromLatLng(%v).Center() = %v, want %v", ll, got.Center(), ll)
-	}
-	if !got.IsPoint() {
-		t.Errorf("RectFromLatLng(%v) = %v, want a point", ll, got)
-	}
-}
-
-func rectFromDegrees(latLo, lngLo, latHi, lngHi float64) Rect {
-	// Convenience method to construct a rectangle. This method is
-	// intentionally *not* in the S2LatLngRect interface because the
-	// argument order is ambiguous, but is fine for the test.
-	return Rect{
-		Lat: r1.Interval{
-			Lo: (s1.Angle(latLo) * s1.Degree).Radians(),
-			Hi: (s1.Angle(latHi) * s1.Degree).Radians(),
-		},
-		Lng: s1.IntervalFromEndpoints(
-			(s1.Angle(lngLo) * s1.Degree).Radians(),
-			(s1.Angle(lngHi) * s1.Degree).Radians(),
-		),
-	}
-}
-
-func TestRectFromCenterSize(t *testing.T) {
-	tests := []struct {
-		center, size LatLng
-		want         Rect
-	}{
-		{
-			LatLngFromDegrees(80, 170),
-			LatLngFromDegrees(40, 60),
-			rectFromDegrees(60, 140, 90, -160),
-		},
-		{
-			LatLngFromDegrees(10, 40),
-			LatLngFromDegrees(210, 400),
-			FullRect(),
-		},
-		{
-			LatLngFromDegrees(-90, 180),
-			LatLngFromDegrees(20, 50),
-			rectFromDegrees(-90, 155, -80, -155),
-		},
-	}
-	for _, test := range tests {
-		if got := RectFromCenterSize(test.center, test.size); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
-			t.Errorf("RectFromCenterSize(%v,%v) was %v, want %v", test.center, test.size, got, test.want)
-		}
-	}
-}
-
-func TestRectAddPoint(t *testing.T) {
-	tests := []struct {
-		input Rect
-		point LatLng
-		want  Rect
-	}{
-		{
-			Rect{r1.EmptyInterval(), s1.EmptyInterval()},
-			LatLngFromDegrees(0, 0),
-			rectFromDegrees(0, 0, 0, 0),
-		},
-		{
-			rectFromDegrees(0, 0, 0, 0),
-			LatLng{0 * s1.Radian, (-math.Pi / 2) * s1.Radian},
-			rectFromDegrees(0, -90, 0, 0),
-		},
-		{
-			rectFromDegrees(0, -90, 0, 0),
-			LatLng{(math.Pi / 4) * s1.Radian, (-math.Pi) * s1.Radian},
-			rectFromDegrees(0, -180, 45, 0),
-		},
-		{
-			rectFromDegrees(0, -180, 45, 0),
-			LatLng{(math.Pi / 2) * s1.Radian, 0 * s1.Radian},
-			rectFromDegrees(0, -180, 90, 0),
-		},
-	}
-	for _, test := range tests {
-		if got, want := test.input.AddPoint(test.point), test.want; !rectsApproxEqual(got, want, epsilon, epsilon) {
-			t.Errorf("%v.AddPoint(%v) was %v, want %v", test.input, test.point, got, want)
-		}
-	}
-}
-func TestRectVertex(t *testing.T) {
-	r1 := Rect{r1.Interval{0, math.Pi / 2}, s1.IntervalFromEndpoints(-math.Pi, 0)}
-	tests := []struct {
-		r    Rect
-		i    int
-		want LatLng
-	}{
-		{r1, 0, LatLng{0, math.Pi}},
-		{r1, 1, LatLng{0, 0}},
-		{r1, 2, LatLng{math.Pi / 2, 0}},
-		{r1, 3, LatLng{math.Pi / 2, math.Pi}},
-	}
-
-	for _, test := range tests {
-		if got := test.r.Vertex(test.i); got != test.want {
-			t.Errorf("%v.Vertex(%d) = %v, want %v", test.r, test.i, got, test.want)
-		}
-	}
-}
-func TestRectVertexCCWOrder(t *testing.T) {
-	for i := 0; i < 4; i++ {
-		lat := math.Pi / 4 * float64(i-2)
-		lng := math.Pi/2*float64(i-2) + 0.2
-		r := Rect{
-			r1.Interval{lat, lat + math.Pi/4},
-			s1.Interval{
-				math.Remainder(lng, 2*math.Pi),
-				math.Remainder(lng+math.Pi/2, 2*math.Pi),
-			},
-		}
-
-		for k := 0; k < 4; k++ {
-			if !Sign(PointFromLatLng(r.Vertex((k-1)&3)), PointFromLatLng(r.Vertex(k)), PointFromLatLng(r.Vertex((k+1)&3))) {
-				t.Errorf("%v.Vertex(%v), vertices were not in CCW order", r, k)
-			}
-		}
-	}
-}
-
-func TestRectContainsLatLng(t *testing.T) {
-	tests := []struct {
-		input Rect
-		ll    LatLng
-		want  bool
-	}{
-		{
-			rectFromDegrees(0, -180, 90, 0),
-			LatLngFromDegrees(30, -45),
-			true,
-		},
-		{
-			rectFromDegrees(0, -180, 90, 0),
-			LatLngFromDegrees(30, 45),
-			false,
-		},
-		{
-			rectFromDegrees(0, -180, 90, 0),
-			LatLngFromDegrees(0, -180),
-			true,
-		},
-		{
-			rectFromDegrees(0, -180, 90, 0),
-			LatLngFromDegrees(90, 0),
-			true,
-		},
-	}
-	for _, test := range tests {
-		if got, want := test.input.ContainsLatLng(test.ll), test.want; got != want {
-			t.Errorf("%v.ContainsLatLng(%v) was %v, want %v", test.input, test.ll, got, want)
-		}
-	}
-}
-
-func TestRectExpanded(t *testing.T) {
-	tests := []struct {
-		input  Rect
-		margin LatLng
-		want   Rect
-	}{
-		{
-			rectFromDegrees(70, 150, 80, 170),
-			LatLngFromDegrees(20, 30),
-			rectFromDegrees(50, 120, 90, -160),
-		},
-		{
-			EmptyRect(),
-			LatLngFromDegrees(20, 30),
-			EmptyRect(),
-		},
-		{
-			FullRect(),
-			LatLngFromDegrees(500, 500),
-			FullRect(),
-		},
-		{
-			rectFromDegrees(-90, 170, 10, 20),
-			LatLngFromDegrees(30, 80),
-			rectFromDegrees(-90, -180, 40, 180),
-		},
-
-		// Negative margins.
-		{
-			rectFromDegrees(10, -50, 60, 70),
-			LatLngFromDegrees(-10, -10),
-			rectFromDegrees(20, -40, 50, 60),
-		},
-		{
-			rectFromDegrees(-20, -180, 20, 180),
-			LatLngFromDegrees(-10, -10),
-			rectFromDegrees(-10, -180, 10, 180),
-		},
-		{
-			rectFromDegrees(-20, -180, 20, 180),
-			LatLngFromDegrees(-30, -30),
-			EmptyRect(),
-		},
-		{
-			rectFromDegrees(-90, 10, 90, 11),
-			LatLngFromDegrees(-10, -10),
-			EmptyRect(),
-		},
-		{
-			rectFromDegrees(-90, 10, 90, 100),
-			LatLngFromDegrees(-10, -10),
-			rectFromDegrees(-80, 20, 80, 90),
-		},
-		{
-			EmptyRect(),
-			LatLngFromDegrees(-50, -500),
-			EmptyRect(),
-		},
-		{
-			FullRect(),
-			LatLngFromDegrees(-50, -50),
-			rectFromDegrees(-40, -180, 40, 180),
-		},
-
-		// Mixed margins.
-		{
-			rectFromDegrees(10, -50, 60, 70),
-			LatLngFromDegrees(-10, 30),
-			rectFromDegrees(20, -80, 50, 100),
-		},
-		{
-			rectFromDegrees(-20, -180, 20, 180),
-			LatLngFromDegrees(10, -500),
-			rectFromDegrees(-30, -180, 30, 180),
-		},
-		{
-			rectFromDegrees(-90, -180, 80, 180),
-			LatLngFromDegrees(-30, 500),
-			rectFromDegrees(-60, -180, 50, 180),
-		},
-		{
-			rectFromDegrees(-80, -100, 80, 150),
-			LatLngFromDegrees(30, -50),
-			rectFromDegrees(-90, -50, 90, 100),
-		},
-		{
-			rectFromDegrees(0, -180, 50, 180),
-			LatLngFromDegrees(-30, 500),
-			EmptyRect(),
-		},
-		{
-			rectFromDegrees(-80, 10, 70, 20),
-			LatLngFromDegrees(30, -200),
-			EmptyRect(),
-		},
-		{
-			EmptyRect(),
-			LatLngFromDegrees(100, -100),
-			EmptyRect(),
-		},
-		{
-			FullRect(),
-			LatLngFromDegrees(100, -100),
-			FullRect(),
-		},
-	}
-	for _, test := range tests {
-		if got, want := test.input.expanded(test.margin), test.want; !rectsApproxEqual(got, want, epsilon, epsilon) {
-			t.Errorf("%v.Expanded(%v) was %v, want %v", test.input, test.margin, got, want)
-		}
-	}
-}
-
-func TestRectPolarClosure(t *testing.T) {
-	tests := []struct {
-		r    Rect
-		want Rect
-	}{
-		{
-			rectFromDegrees(-89, 0, 89, 1),
-			rectFromDegrees(-89, 0, 89, 1),
-		},
-		{
-			rectFromDegrees(-90, -30, -45, 100),
-			rectFromDegrees(-90, -180, -45, 180),
-		},
-		{
-			rectFromDegrees(89, 145, 90, 146),
-			rectFromDegrees(89, -180, 90, 180),
-		},
-		{
-			rectFromDegrees(-90, -145, 90, -144),
-			FullRect(),
-		},
-	}
-	for _, test := range tests {
-		if got := test.r.PolarClosure(); !rectsApproxEqual(got, test.want, epsilon, epsilon) {
-			t.Errorf("%v.PolarClosure() was %v, want %v", test.r, got, test.want)
-		}
-	}
-}
-
-func TestRectCapBound(t *testing.T) {
-	tests := []struct {
-		r    Rect
-		want Cap
-	}{
-		{ // Bounding cap at center is smaller.
-			rectFromDegrees(-45, -45, 45, 45),
-			CapFromCenterHeight(Point{r3.Vector{1, 0, 0}}, 0.5),
-		},
-		{ // Bounding cap at north pole is smaller.
-			rectFromDegrees(88, -80, 89, 80),
-			CapFromCenterAngle(Point{r3.Vector{0, 0, 1}}, s1.Angle(2)*s1.Degree),
-		},
-		{ // Longitude span > 180 degrees.
-			rectFromDegrees(-30, -150, -10, 50),
-			CapFromCenterAngle(Point{r3.Vector{0, 0, -1}}, s1.Angle(80)*s1.Degree),
-		},
-	}
-	for _, test := range tests {
-		if got := test.r.CapBound(); !test.want.ApproxEqual(got) {
-			t.Errorf("%v.CapBound() was %v, want %v", test.r, got, test.want)
-		}
-	}
-}
-
-func TestRectIntervalOps(t *testing.T) {
-	// Rectangle that covers one-quarter of the sphere.
-	rect := rectFromDegrees(0, -180, 90, 0)
-
-	// Test operations where one rectangle consists of a single point.
-	rectMid := rectFromDegrees(45, -90, 45, -90)
-	rect180 := rectFromDegrees(0, -180, 0, -180)
-	northPole := rectFromDegrees(90, 0, 90, 0)
-
-	tests := []struct {
-		rect         Rect
-		other        Rect
-		contains     bool
-		intersects   bool
-		union        Rect
-		intersection Rect
-	}{
-		{
-			rect:         rect,
-			other:        rectMid,
-			contains:     true,
-			intersects:   true,
-			union:        rect,
-			intersection: rectMid,
-		},
-		{
-			rect:         rect,
-			other:        rect180,
-			contains:     true,
-			intersects:   true,
-			union:        rect,
-			intersection: rect180,
-		},
-		{
-			rect:         rect,
-			other:        northPole,
-			contains:     true,
-			intersects:   true,
-			union:        rect,
-			intersection: northPole,
-		},
-		{
-			rect:         rect,
-			other:        rectFromDegrees(-10, -1, 1, 20),
-			contains:     false,
-			intersects:   true,
-			union:        rectFromDegrees(-10, 180, 90, 20),
-			intersection: rectFromDegrees(0, -1, 1, 0),
-		},
-		{
-			rect:         rect,
-			other:        rectFromDegrees(-10, -1, 0, 20),
-			contains:     false,
-			intersects:   true,
-			union:        rectFromDegrees(-10, 180, 90, 20),
-			intersection: rectFromDegrees(0, -1, 0, 0),
-		},
-		{
-			rect:         rect,
-			other:        rectFromDegrees(-10, 0, 1, 20),
-			contains:     false,
-			intersects:   true,
-			union:        rectFromDegrees(-10, 180, 90, 20),
-			intersection: rectFromDegrees(0, 0, 1, 0),
-		},
-		{
-			rect:         rectFromDegrees(-15, -160, -15, -150),
-			other:        rectFromDegrees(20, 145, 25, 155),
-			contains:     false,
-			intersects:   false,
-			union:        rectFromDegrees(-15, 145, 25, -150),
-			intersection: EmptyRect(),
-		},
-		{
-			rect:         rectFromDegrees(70, -10, 90, -140),
-			other:        rectFromDegrees(60, 175, 80, 5),
-			contains:     false,
-			intersects:   true,
-			union:        rectFromDegrees(60, -180, 90, 180),
-			intersection: rectFromDegrees(70, 175, 80, 5),
-		},
-
-		// Check that the intersection of two rectangles that overlap in latitude
-		// but not longitude is valid, and vice versa.
-		{
-			rect:         rectFromDegrees(12, 30, 60, 60),
-			other:        rectFromDegrees(0, 0, 30, 18),
-			contains:     false,
-			intersects:   false,
-			union:        rectFromDegrees(0, 0, 60, 60),
-			intersection: EmptyRect(),
-		},
-		{
-			rect:         rectFromDegrees(0, 0, 18, 42),
-			other:        rectFromDegrees(30, 12, 42, 60),
-			contains:     false,
-			intersects:   false,
-			union:        rectFromDegrees(0, 0, 42, 60),
-			intersection: EmptyRect(),
-		},
-	}
-	for _, test := range tests {
-		if got := test.rect.Contains(test.other); got != test.contains {
-			t.Errorf("%v.Contains(%v) = %t, want %t", test.rect, test.other, got, test.contains)
-		}
-
-		if got := test.rect.Intersects(test.other); got != test.intersects {
-			t.Errorf("%v.Intersects(%v) = %t, want %t", test.rect, test.other, got, test.intersects)
-		}
-
-		if got := test.rect.Union(test.other) == test.rect; test.rect.Contains(test.other) != got {
-			t.Errorf("%v.Union(%v) == %v = %t, want %t",
-				test.rect, test.other, test.other, got, test.rect.Contains(test.other),
-			)
-		}
-
-		if got := test.rect.Intersection(test.other).IsEmpty(); test.rect.Intersects(test.other) == got {
-			t.Errorf("%v.Intersection(%v).IsEmpty() = %t, want %t",
-				test.rect, test.other, got, test.rect.Intersects(test.other))
-		}
-
-		if got := test.rect.Union(test.other); got != test.union {
-			t.Errorf("%v.Union(%v) = %v, want %v", test.rect, test.other, got, test.union)
-		}
-
-		if got := test.rect.Intersection(test.other); got != test.intersection {
-			t.Errorf("%v.Intersection(%v) = %v, want %v", test.rect, test.other, got, test.intersection)
-		}
-	}
-}
-
-func TestRectCellOps(t *testing.T) {
-	cell0 := CellFromPoint(Point{r3.Vector{1 + 1e-12, 1, 1}})
-	v0 := LatLngFromPoint(cell0.Vertex(0))
-
-	cell202 := CellFromCellID(CellIDFromFacePosLevel(2, 0, 2))
-	bound202 := cell202.RectBound()
-
-	tests := []struct {
-		r          Rect
-		c          Cell
-		contains   bool
-		intersects bool
-	}{
-		// Special cases
-		{
-			r:          EmptyRect(),
-			c:          CellFromCellID(CellIDFromFacePosLevel(3, 0, 0)),
-			contains:   false,
-			intersects: false,
-		},
-		{
-			r:          FullRect(),
-			c:          CellFromCellID(CellIDFromFacePosLevel(2, 0, 0)),
-			contains:   true,
-			intersects: true,
-		},
-		{
-			r:          FullRect(),
-			c:          CellFromCellID(CellIDFromFacePosLevel(5, 0, 25)),
-			contains:   true,
-			intersects: true,
-		},
-		// This rectangle includes the first quadrant of face 0.  It's expanded
-		// slightly because cell bounding rectangles are slightly conservative.
-		{
-			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
-			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 0)),
-			contains:   false,
-			intersects: true,
-		},
-		{
-			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
-			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 1)),
-			contains:   true,
-			intersects: true,
-		},
-		{
-			r:          rectFromDegrees(-45.1, -45.1, 0.1, 0.1),
-			c:          CellFromCellID(CellIDFromFacePosLevel(1, 0, 1)),
-			contains:   false,
-			intersects: false,
-		},
-		// This rectangle intersects the first quadrant of face 0.
-		{
-			r:          rectFromDegrees(-10, -45, 10, 0),
-			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 0)),
-			contains:   false,
-			intersects: true,
-		},
-		{
-			r:          rectFromDegrees(-10, -45, 10, 0),
-			c:          CellFromCellID(CellIDFromFacePosLevel(0, 0, 1)),
-			contains:   false,
-			intersects: true,
-		},
-		{
-			r:          rectFromDegrees(-10, -45, 10, 0),
-			c:          CellFromCellID(CellIDFromFacePosLevel(1, 0, 1)),
-			contains:   false,
-			intersects: false,
-		},
-		// Rectangle consisting of a single point.
-		{
-			r:          rectFromDegrees(4, 4, 4, 4),
-			c:          CellFromCellID(CellIDFromFace(0)),
-			contains:   false,
-			intersects: true,
-		},
-		// Rectangles that intersect the bounding rectangle of a face
-		// but not the face itself.
-		{
-			r:          rectFromDegrees(41, -87, 42, -79),
-			c:          CellFromCellID(CellIDFromFace(2)),
-			contains:   false,
-			intersects: false,
-		},
-		{
-			r:          rectFromDegrees(-41, 160, -40, -160),
-			c:          CellFromCellID(CellIDFromFace(5)),
-			contains:   false,
-			intersects: false,
-		},
-		{
-			// This is the leaf cell at the top right hand corner of face 0.
-			// It has two angles of 60 degrees and two of 120 degrees.
-			r: rectFromDegrees(v0.Lat.Degrees()-1e-8,
-				v0.Lng.Degrees()-1e-8,
-				v0.Lat.Degrees()-2e-10,
-				v0.Lng.Degrees()+1e-10),
-			c:          cell0,
-			contains:   false,
-			intersects: false,
-		},
-		{
-			// Rectangles that intersect a face but where no vertex of one region
-			// is contained by the other region.  The first one passes through
-			// a corner of one of the face cells.
-			r:          rectFromDegrees(-37, -70, -36, -20),
-			c:          CellFromCellID(CellIDFromFace(5)),
-			contains:   false,
-			intersects: true,
-		},
-		{
-			// These two intersect like a diamond and a square.
-			r: rectFromDegrees(bound202.Lo().Lat.Degrees()+3,
-				bound202.Lo().Lng.Degrees()+3,
-				bound202.Hi().Lat.Degrees()-3,
-				bound202.Hi().Lng.Degrees()-3),
-			c:          cell202,
-			contains:   false,
-			intersects: true,
-		},
-		{
-			// from a bug report
-			r:          rectFromDegrees(34.2572864, 135.2673642, 34.2707907, 135.2995742),
-			c:          CellFromCellID(0x6007500000000000),
-			contains:   false,
-			intersects: true,
-		},
-	}
-
-	for _, test := range tests {
-		if got := test.r.ContainsCell(test.c); got != test.contains {
-			t.Errorf("%v.ContainsCell(%v) = %t, want %t", test.r, test.c, got, test.contains)
-		}
-
-		if got := test.r.IntersectsCell(test.c); got != test.intersects {
-			t.Errorf("%v.IntersectsCell(%v) = %t, want %t", test.r, test.c, got, test.intersects)
-		}
-	}
-
-}
-
-func TestRectContainsPoint(t *testing.T) {
-	r1 := rectFromDegrees(0, -180, 90, 0)
-
-	tests := []struct {
-		r    Rect
-		p    Point
-		want bool
-	}{
-		{r1, Point{r3.Vector{0.5, -0.3, 0.1}}, true},
-		{r1, Point{r3.Vector{0.5, 0.2, 0.1}}, false},
-	}
-	for _, test := range tests {
-		if got, want := test.r.ContainsPoint(test.p), test.want; got != want {
-			t.Errorf("%v.ContainsPoint(%v) was %v, want %v", test.r, test.p, got, want)
-		}
-	}
-}
-
-func TestRectIntersectsLatEdge(t *testing.T) {
-	tests := []struct {
-		a, b  Point
-		lat   s1.Angle
-		lngLo s1.Angle
-		lngHi s1.Angle
-		want  bool
-	}{
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			lat:   41 * s1.Degree,
-			lngLo: -87 * s1.Degree,
-			lngHi: -79 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			lat:   42 * s1.Degree,
-			lngLo: -87 * s1.Degree,
-			lngHi: -79 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, -1}},
-			b:     Point{r3.Vector{1, 1, 0}},
-			lat:   -3 * s1.Degree,
-			lngLo: -1 * s1.Degree,
-			lngHi: 23 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{1, 0, 1}},
-			b:     Point{r3.Vector{1, -1, 0}},
-			lat:   -28 * s1.Degree,
-			lngLo: 69 * s1.Degree,
-			lngHi: 115 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{0, 1, 0}},
-			b:     Point{r3.Vector{1, -1, -1}},
-			lat:   44 * s1.Degree,
-			lngLo: 60 * s1.Degree,
-			lngHi: 177 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{0, 1, 1}},
-			b:     Point{r3.Vector{0, 1, -1}},
-			lat:   -25 * s1.Degree,
-			lngLo: -74 * s1.Degree,
-			lngHi: -165 * s1.Degree,
-			want:  true,
-		},
-		{
-			a:     Point{r3.Vector{1, 0, 0}},
-			b:     Point{r3.Vector{0, 0, -1}},
-			lat:   -4 * s1.Degree,
-			lngLo: -152 * s1.Degree,
-			lngHi: 171 * s1.Degree,
-			want:  true,
-		},
-		// from a bug report
-		{
-			a:     Point{r3.Vector{-0.589375791872893683986945, 0.583248451588733285433364, 0.558978908075738245564423}},
-			b:     Point{r3.Vector{-0.587388131301997518107783, 0.581281455376392863776402, 0.563104832905072516524569}},
-			lat:   34.2572864 * s1.Degree,
-			lngLo: 2.3608609 * s1.Radian,
-			lngHi: 2.3614230 * s1.Radian,
-			want:  true,
-		},
-	}
-
-	for _, test := range tests {
-		if got := intersectsLatEdge(test.a, test.b, test.lat, s1.Interval{float64(test.lngLo), float64(test.lngHi)}); got != test.want {
-			t.Errorf("intersectsLatEdge(%v, %v, %v, {%v, %v}) = %t, want %t",
-				test.a, test.b, test.lat, test.lngLo, test.lngHi, got, test.want)
-		}
-	}
-}
-
-func TestRectIntersectsLngEdge(t *testing.T) {
-	tests := []struct {
-		a, b  Point
-		latLo s1.Angle
-		latHi s1.Angle
-		lng   s1.Angle
-		want  bool
-	}{
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			latLo: 41 * s1.Degree,
-			latHi: 42 * s1.Degree,
-			lng:   -79 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			latLo: 41 * s1.Degree,
-			latHi: 42 * s1.Degree,
-			lng:   -87 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			latLo: 42 * s1.Degree,
-			latHi: 41 * s1.Degree,
-			lng:   79 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, 1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			latLo: 41 * s1.Degree,
-			latHi: 42 * s1.Degree,
-			lng:   87 * s1.Degree,
-			want:  false,
-		},
-		{
-			a:     Point{r3.Vector{0, -1, -1}},
-			b:     Point{r3.Vector{-1, 0, -1}},
-			latLo: -87 * s1.Degree,
-			latHi: 13 * s1.Degree,
-			lng:   -143 * s1.Degree,
-			want:  true,
-		},
-		{
-			a:     Point{r3.Vector{1, 1, -1}},
-			b:     Point{r3.Vector{1, -1, 1}},
-			latLo: -64 * s1.Degree,
-			latHi: 13 * s1.Degree,
-			lng:   40 * s1.Degree,
-			want:  true,
-		},
-		{
-			a:     Point{r3.Vector{1, 1, 0}},
-			b:     Point{r3.Vector{-1, 0, -1}},
-			latLo: -64 * s1.Degree,
-			latHi: 56 * s1.Degree,
-			lng:   151 * s1.Degree,
-			want:  true,
-		},
-		{
-			a:     Point{r3.Vector{-1, -1, 0}},
-			b:     Point{r3.Vector{1, -1, -1}},
-			latLo: -50 * s1.Degree,
-			latHi: 18 * s1.Degree,
-			lng:   -84 * s1.Degree,
-			want:  true,
-		},
-	}
-
-	for _, test := range tests {
-		if got := intersectsLngEdge(test.a, test.b, r1.Interval{float64(test.latLo), float64(test.latHi)}, test.lng); got != test.want {
-			t.Errorf("intersectsLngEdge(%v, %v, {%v, %v}, %v) = %v, want %v",
-				test.a, test.b, test.latLo, test.latHi, test.lng, got, test.want)
-		}
-	}
-}

+ 37 - 17
vendor/github.com/golang/geo/s2/region.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
@@ -37,15 +35,37 @@ type Region interface {
 	// if intersection could not be determined. It returns false if the region
 	// if intersection could not be determined. It returns false if the region
 	// does not intersect.
 	// does not intersect.
 	IntersectsCell(c Cell) bool
 	IntersectsCell(c Cell) bool
+
+	// ContainsPoint reports whether the region contains the given point or not.
+	// The point should be unit length, although some implementations may relax
+	// this restriction.
+	ContainsPoint(p Point) bool
+
+	// CellUnionBound returns a small collection of CellIDs whose union covers
+	// the region. The cells are not sorted, may have redundancies (such as cells
+	// that contain other cells), and may cover much more area than necessary.
+	//
+	// This method is not intended for direct use by client code. Clients
+	// should typically use Covering, which has options to control the size and
+	// accuracy of the covering. Alternatively, if you want a fast covering and
+	// don't care about accuracy, consider calling FastCovering (which returns a
+	// cleaned-up version of the covering computed by this method).
+	//
+	// CellUnionBound implementations should attempt to return a small
+	// covering (ideally 4 cells or fewer) that covers the region and can be
+	// computed quickly. The result is used by RegionCoverer as a starting
+	// point for further refinement.
+	CellUnionBound() []CellID
 }
 }
 
 
-// Enforce interface satisfaction.
+// Enforce Region interface satisfaction.
 var (
 var (
 	_ Region = Cap{}
 	_ Region = Cap{}
 	_ Region = Cell{}
 	_ Region = Cell{}
 	_ Region = (*CellUnion)(nil)
 	_ Region = (*CellUnion)(nil)
+	_ Region = (*Loop)(nil)
 	_ Region = Point{}
 	_ Region = Point{}
-	//_ Region = (*Polygon)(nil)
+	_ Region = (*Polygon)(nil)
 	_ Region = (*Polyline)(nil)
 	_ Region = (*Polyline)(nil)
 	_ Region = Rect{}
 	_ Region = Rect{}
 )
 )

+ 23 - 57
vendor/github.com/golang/geo/s2/regioncoverer.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
@@ -99,20 +97,6 @@ type candidate struct {
 	priority    int          // Priority of the candiate.
 	priority    int          // Priority of the candiate.
 }
 }
 
 
-func min(x, y int) int {
-	if x < y {
-		return x
-	}
-	return y
-}
-
-func max(x, y int) int {
-	if x > y {
-		return x
-	}
-	return y
-}
-
 type priorityQueue []*candidate
 type priorityQueue []*candidate
 
 
 func (pq priorityQueue) Len() int {
 func (pq priorityQueue) Len() int {
@@ -273,9 +257,9 @@ func (c *coverer) adjustCellLevels(cells *CellUnion) {
 // initialCandidates computes a set of initial candidates that cover the given region.
 // initialCandidates computes a set of initial candidates that cover the given region.
 func (c *coverer) initialCandidates() {
 func (c *coverer) initialCandidates() {
 	// Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
 	// Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
-	temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: min(4, c.maxCells)}
+	temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
 
 
-	cells := temp.FastCovering(c.region.CapBound())
+	cells := temp.FastCovering(c.region)
 	c.adjustCellLevels(&cells)
 	c.adjustCellLevels(&cells)
 	for _, ci := range cells {
 	for _, ci := range cells {
 		c.addCandidate(c.newCandidate(CellFromCellID(ci)))
 		c.addCandidate(c.newCandidate(CellFromCellID(ci)))
@@ -329,9 +313,9 @@ func (c *coverer) coveringInternal(region Region) {
 // newCoverer returns an instance of coverer.
 // newCoverer returns an instance of coverer.
 func (rc *RegionCoverer) newCoverer() *coverer {
 func (rc *RegionCoverer) newCoverer() *coverer {
 	return &coverer{
 	return &coverer{
-		minLevel: max(0, min(maxLevel, rc.MinLevel)),
-		maxLevel: max(0, min(maxLevel, rc.MaxLevel)),
-		levelMod: max(1, min(3, rc.LevelMod)),
+		minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)),
+		maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)),
+		levelMod: maxInt(1, minInt(3, rc.LevelMod)),
 		maxCells: rc.MaxCells,
 		maxCells: rc.MaxCells,
 	}
 	}
 }
 }
@@ -339,14 +323,14 @@ func (rc *RegionCoverer) newCoverer() *coverer {
 // Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
 // Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
 func (rc *RegionCoverer) Covering(region Region) CellUnion {
 func (rc *RegionCoverer) Covering(region Region) CellUnion {
 	covering := rc.CellUnion(region)
 	covering := rc.CellUnion(region)
-	covering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
+	covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
 	return covering
 	return covering
 }
 }
 
 
 // InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
 // InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
 func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
 func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
 	intCovering := rc.InteriorCellUnion(region)
 	intCovering := rc.InteriorCellUnion(region)
-	intCovering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
+	intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
 	return intCovering
 	return intCovering
 }
 }
 
 
@@ -387,31 +371,13 @@ func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion {
 //
 //
 // This function is useful as a starting point for algorithms that
 // This function is useful as a starting point for algorithms that
 // recursively subdivide cells.
 // recursively subdivide cells.
-func (rc *RegionCoverer) FastCovering(cap Cap) CellUnion {
+func (rc *RegionCoverer) FastCovering(region Region) CellUnion {
 	c := rc.newCoverer()
 	c := rc.newCoverer()
-	cu := c.rawFastCovering(cap)
+	cu := CellUnion(region.CellUnionBound())
 	c.normalizeCovering(&cu)
 	c.normalizeCovering(&cu)
 	return cu
 	return cu
 }
 }
 
 
-// rawFastCovering computes a covering of the given cap. In general the covering consists of
-// at most 4 cells (except for very large caps, which may need up to 6 cells).
-// The output is not sorted.
-func (c *coverer) rawFastCovering(cap Cap) CellUnion {
-	var covering CellUnion
-	// Find the maximum level such that the cap contains at most one cell vertex
-	// and such that CellId.VertexNeighbors() can be called.
-	level := min(MinWidthMetric.MaxLevel(2*cap.Radius().Radians()), maxLevel-1)
-	if level == 0 {
-		for face := 0; face < 6; face++ {
-			covering = append(covering, CellIDFromFace(face))
-		}
-	} else {
-		covering = append(covering, cellIDFromPoint(cap.center).VertexNeighbors(level)...)
-	}
-	return covering
-}
-
 // normalizeCovering normalizes the "covering" so that it conforms to the current covering
 // normalizeCovering normalizes the "covering" so that it conforms to the current covering
 // parameters (MaxCells, minLevel, maxLevel, and levelMod).
 // parameters (MaxCells, minLevel, maxLevel, and levelMod).
 // This method makes no attempt to be optimal. In particular, if
 // This method makes no attempt to be optimal. In particular, if
@@ -425,7 +391,7 @@ func (c *coverer) normalizeCovering(covering *CellUnion) {
 	if c.maxLevel < maxLevel || c.levelMod > 1 {
 	if c.maxLevel < maxLevel || c.levelMod > 1 {
 		for i, ci := range *covering {
 		for i, ci := range *covering {
 			level := ci.Level()
 			level := ci.Level()
-			newLevel := c.adjustLevel(min(level, c.maxLevel))
+			newLevel := c.adjustLevel(minInt(level, c.maxLevel))
 			if newLevel != level {
 			if newLevel != level {
 				(*covering)[i] = ci.Parent(newLevel)
 				(*covering)[i] = ci.Parent(newLevel)
 			}
 			}

+ 0 - 151
vendor/github.com/golang/geo/s2/regioncoverer_test.go

@@ -1,151 +0,0 @@
-/*
-Copyright 2015 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"math/rand"
-	"reflect"
-	"testing"
-)
-
-func TestCovererRandomCells(t *testing.T) {
-	rc := &RegionCoverer{MinLevel: 0, MaxLevel: 30, LevelMod: 1, MaxCells: 1}
-
-	// Test random cell ids at all levels.
-	for i := 0; i < 10000; i++ {
-		id := CellID(randomUint64())
-		for !id.IsValid() {
-			id = CellID(randomUint64())
-		}
-		covering := rc.Covering(Region(CellFromCellID(id)))
-		if len(covering) != 1 {
-			t.Errorf("Iteration %d, cell ID token %s, got covering size = %d, want covering size = 1", i, id.ToToken(), len(covering))
-		}
-		if (covering)[0] != id {
-			t.Errorf("Iteration %d, cell ID token %s, got covering = %v, want covering = %v", i, id.ToToken(), covering, id)
-		}
-	}
-}
-
-// checkCovering reports whether covering is a valid cover for the region.
-func checkCovering(t *testing.T, rc *RegionCoverer, r Region, covering CellUnion, interior bool) {
-	// Keep track of how many cells have the same rc.MinLevel ancestor.
-	minLevelCells := map[CellID]int{}
-	var tempCover CellUnion
-	for _, ci := range covering {
-		level := ci.Level()
-		if level < rc.MinLevel {
-			t.Errorf("CellID(%s).Level() = %d, want >= %d", ci.ToToken(), level, rc.MinLevel)
-		}
-		if level > rc.MaxLevel {
-			t.Errorf("CellID(%s).Level() = %d, want <= %d", ci.ToToken(), level, rc.MaxLevel)
-		}
-		if rem := (level - rc.MinLevel) % rc.LevelMod; rem != 0 {
-			t.Errorf("(CellID(%s).Level() - MinLevel) mod LevelMod = %d, want = %d", ci.ToToken(), rem, 0)
-		}
-		tempCover = append(tempCover, ci)
-		minLevelCells[ci.Parent(rc.MinLevel)]++
-	}
-	if len(covering) > rc.MaxCells {
-		// If the covering has more than the requested number of cells, then check
-		// that the cell count cannot be reduced by using the parent of some cell.
-		for ci, count := range minLevelCells {
-			if count > 1 {
-				t.Errorf("Min level CellID %s, count = %d, want = %d", ci.ToToken(), count, 1)
-			}
-		}
-	}
-	if interior {
-		for _, ci := range covering {
-			if !r.ContainsCell(CellFromCellID(ci)) {
-				t.Errorf("Region(%v).ContainsCell(%v) = %t, want = %t", r, CellFromCellID(ci), false, true)
-			}
-		}
-	} else {
-		tempCover.Normalize()
-		checkCoveringTight(t, r, tempCover, true, 0)
-	}
-}
-
-// checkCoveringTight checks that "cover" completely covers the given region.
-// If "checkTight" is true, also checks that it does not contain any cells that
-// do not intersect the given region. ("id" is only used internally.)
-func checkCoveringTight(t *testing.T, r Region, cover CellUnion, checkTight bool, id CellID) {
-	if !id.IsValid() {
-		for f := 0; f < 6; f++ {
-			checkCoveringTight(t, r, cover, checkTight, CellIDFromFace(f))
-		}
-		return
-	}
-
-	if !r.IntersectsCell(CellFromCellID(id)) {
-		// If region does not intersect id, then neither should the covering.
-		if got := cover.IntersectsCellID(id); checkTight && got {
-			t.Errorf("CellUnion(%v).IntersectsCellID(%s) = %t; want = %t", cover, id.ToToken(), got, false)
-		}
-	} else if !cover.ContainsCellID(id) {
-		// The region may intersect id, but we can't assert that the covering
-		// intersects id because we may discover that the region does not actually
-		// intersect upon further subdivision.  (IntersectsCell is not exact.)
-		if got := r.ContainsCell(CellFromCellID(id)); got {
-			t.Errorf("Region(%v).ContainsCell(%v) = %t; want = %t", r, CellFromCellID(id), got, false)
-		}
-		if got := id.IsLeaf(); got {
-			t.Errorf("CellID(%s).IsLeaf() = %t; want = %t", id.ToToken(), got, false)
-		}
-
-		for child := id.ChildBegin(); child != id.ChildEnd(); child = child.Next() {
-			checkCoveringTight(t, r, cover, checkTight, child)
-		}
-	}
-}
-
-func TestCovererRandomCaps(t *testing.T) {
-	rc := &RegionCoverer{}
-	for i := 0; i < 1000; i++ {
-		rc.MinLevel = int(rand.Int31n(maxLevel + 1))
-		rc.MaxLevel = int(rand.Int31n(maxLevel + 1))
-		for rc.MinLevel > rc.MaxLevel {
-			rc.MinLevel = int(rand.Int31n(maxLevel + 1))
-			rc.MaxLevel = int(rand.Int31n(maxLevel + 1))
-		}
-		rc.LevelMod = int(1 + rand.Int31n(3))
-		rc.MaxCells = int(skewedInt(10))
-
-		maxArea := math.Min(4*math.Pi, float64(3*rc.MaxCells+1)*AvgAreaMetric.Value(rc.MinLevel))
-		r := Region(randomCap(0.1*AvgAreaMetric.Value(maxLevel), maxArea))
-
-		covering := rc.Covering(r)
-		checkCovering(t, rc, r, covering, false)
-		interior := rc.InteriorCovering(r)
-		checkCovering(t, rc, r, interior, true)
-
-		// Check that Covering is deterministic.
-		covering2 := rc.Covering(r)
-		if !reflect.DeepEqual(covering, covering2) {
-			t.Errorf("Iteration %d, got covering = %v, want covering = %v", i, covering2, covering)
-		}
-
-		// Also check Denormalize. The denormalized covering
-		// may still be different and smaller than "covering" because
-		// s2.RegionCoverer does not guarantee that it will not output all four
-		// children of the same parent.
-		covering.Denormalize(rc.MinLevel, rc.LevelMod)
-		checkCovering(t, rc, r, covering, false)
-	}
-}

+ 0 - 414
vendor/github.com/golang/geo/s2/s2_test.go

@@ -1,414 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"fmt"
-	"math"
-	"math/rand"
-	"strconv"
-	"strings"
-
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// float64Eq reports whether the two values are within the default epsilon.
-func float64Eq(x, y float64) bool { return float64Near(x, y, epsilon) }
-
-// float64Near reports whether the two values are within the given epsilon.
-func float64Near(x, y, ε float64) bool {
-	return math.Abs(x-y) <= ε
-}
-
-// TODO(roberts): Add in flag to allow specifying the random seed for repeatable tests.
-
-// kmToAngle converts a distance on the Earth's surface to an angle.
-func kmToAngle(km float64) s1.Angle {
-	// The Earth's mean radius in kilometers (according to NASA).
-	const earthRadiusKm = 6371.01
-	return s1.Angle(km / earthRadiusKm)
-}
-
-// randomBits returns a 64-bit random unsigned integer whose lowest "num" are random, and
-// whose other bits are zero.
-func randomBits(num uint32) uint64 {
-	// Make sure the request is for not more than 63 bits.
-	if num > 63 {
-		num = 63
-	}
-	return uint64(rand.Int63()) & ((1 << num) - 1)
-}
-
-// Return a uniformly distributed 64-bit unsigned integer.
-func randomUint64() uint64 {
-	return uint64(rand.Int63() | (rand.Int63() << 63))
-}
-
-// Return a uniformly distributed 32-bit unsigned integer.
-func randomUint32() uint32 {
-	return uint32(randomBits(32))
-}
-
-// randomFloat64 returns a uniformly distributed value in the range [0,1).
-// Note that the values returned are all multiples of 2**-53, which means that
-// not all possible values in this range are returned.
-func randomFloat64() float64 {
-	const randomFloatBits = 53
-	return math.Ldexp(float64(randomBits(randomFloatBits)), -randomFloatBits)
-}
-
-// randomUniformInt returns a uniformly distributed integer in the range [0,n).
-// NOTE: This is replicated here to stay in sync with how the C++ code generates
-// uniform randoms. (instead of using Go's math/rand package directly).
-func randomUniformInt(n int) int {
-	return int(randomFloat64() * float64(n))
-}
-
-// randomUniformFloat64 returns a uniformly distributed value in the range [min, max).
-func randomUniformFloat64(min, max float64) float64 {
-	return min + randomFloat64()*(max-min)
-}
-
-// oneIn returns true with a probability of 1/n.
-func oneIn(n int) bool {
-	return randomUniformInt(n) == 0
-}
-
-// randomPoint returns a random unit-length vector.
-func randomPoint() Point {
-	return PointFromCoords(randomUniformFloat64(-1, 1),
-		randomUniformFloat64(-1, 1), randomUniformFloat64(-1, 1))
-}
-
-// randomFrame returns a right-handed coordinate frame (three orthonormal vectors) for
-// a randomly generated point.
-func randomFrame() *matrix3x3 {
-	return randomFrameAtPoint(randomPoint())
-}
-
-// randomFrameAtPoint returns a right-handed coordinate frame using the given
-// point as the z-axis. The x- and y-axes are computed such that (x,y,z) is a
-// right-handed coordinate frame (three orthonormal vectors).
-func randomFrameAtPoint(z Point) *matrix3x3 {
-	x := Point{z.Cross(randomPoint().Vector).Normalize()}
-	y := Point{z.Cross(x.Vector).Normalize()}
-
-	m := &matrix3x3{}
-	m.setCol(0, x)
-	m.setCol(1, y)
-	m.setCol(2, z)
-	return m
-}
-
-// randomCellIDForLevel returns a random CellID at the given level.
-// The distribution is uniform over the space of cell ids, but only
-// approximately uniform over the surface of the sphere.
-func randomCellIDForLevel(level int) CellID {
-	face := randomUniformInt(numFaces)
-	pos := randomUint64() & uint64((1<<posBits)-1)
-	return CellIDFromFacePosLevel(face, pos, level)
-}
-
-// randomCellID returns a random CellID at a randomly chosen
-// level. The distribution is uniform over the space of cell ids,
-// but only approximately uniform over the surface of the sphere.
-func randomCellID() CellID {
-	return randomCellIDForLevel(randomUniformInt(maxLevel + 1))
-}
-
-// parsePoint returns an Point from the latitude-longitude coordinate in degrees
-// in the given string, or the origin if the string was invalid.
-// e.g., "-20:150"
-func parsePoint(s string) Point {
-	p := parsePoints(s)
-	if len(p) > 0 {
-		return p[0]
-	}
-
-	return Point{r3.Vector{0, 0, 0}}
-}
-
-// parseRect returns the minimal bounding Rect that contains the one or more
-// latitude-longitude coordinates in degrees in the given string.
-// Examples of input:
-//   "-20:150"                     // one point
-//   "-20:150, -20:151, -19:150"   // three points
-func parseRect(s string) Rect {
-	var rect Rect
-	lls := parseLatLngs(s)
-	if len(lls) > 0 {
-		rect = RectFromLatLng(lls[0])
-	}
-
-	for _, ll := range lls[1:] {
-		rect = rect.AddPoint(ll)
-	}
-
-	return rect
-}
-
-// parseLatLngs splits up a string of lat:lng points and returns the list of parsed
-// entries.
-func parseLatLngs(s string) []LatLng {
-	pieces := strings.Split(s, ",")
-	var lls []LatLng
-	for _, piece := range pieces {
-		piece = strings.TrimSpace(piece)
-
-		// Skip empty strings.
-		if piece == "" {
-			continue
-		}
-
-		p := strings.Split(piece, ":")
-		if len(p) != 2 {
-			panic(fmt.Sprintf("invalid input string for parseLatLngs: %q", piece))
-		}
-
-		lat, err := strconv.ParseFloat(p[0], 64)
-		if err != nil {
-			panic(fmt.Sprintf("invalid float in parseLatLngs: %q, err: %v", p[0], err))
-		}
-
-		lng, err := strconv.ParseFloat(p[1], 64)
-		if err != nil {
-			panic(fmt.Sprintf("invalid float in parseLatLngs: %q, err: %v", p[1], err))
-		}
-
-		lls = append(lls, LatLngFromDegrees(lat, lng))
-	}
-	return lls
-}
-
-// parsePoints takes a string of lat:lng points and returns the set of Points it defines.
-func parsePoints(s string) []Point {
-	lls := parseLatLngs(s)
-	points := make([]Point, len(lls))
-	for i, ll := range lls {
-		points[i] = PointFromLatLng(ll)
-	}
-	return points
-}
-
-// makeLoop constructs a loop from a comma separated string of lat:lng
-// coordinates in degrees. Example of the input format:
-//   "-20:150, 10:-120, 0.123:-170.652"
-// The special strings "empty" or "full" create an empty or full loop respectively.
-func makeLoop(s string) *Loop {
-	if s == "full" {
-		return FullLoop()
-	}
-	if s == "empty" {
-		return EmptyLoop()
-	}
-
-	return LoopFromPoints(parsePoints(s))
-}
-
-// makePolygon constructs a polygon from the set of semicolon separated CSV
-// strings of lat:lng points defining each loop in the polygon. If the normalize
-// flag is set to true, loops are normalized by inverting them
-// if necessary so that they enclose at most half of the unit sphere.
-//
-// Examples of the input format:
-//     "10:20, 90:0, 20:30"                                  // one loop
-//     "10:20, 90:0, 20:30; 5.5:6.5, -90:-180, -15.2:20.3"   // two loops
-//     ""       // the empty polygon (consisting of no loops)
-//     "full"   // the full polygon (consisting of one full loop)
-//     "empty"  // **INVALID** (a polygon consisting of one empty loop)
-func makePolygon(s string, normalize bool) *Polygon {
-	strs := strings.Split(s, ";")
-	var loops []*Loop
-	for _, str := range strs {
-		if str == "" {
-			continue
-		}
-		loop := makeLoop(strings.TrimSpace(str))
-		if normalize {
-			// TODO(roberts): Uncomment once Normalize is implemented.
-			// loop.Normalize()
-		}
-		loops = append(loops, loop)
-	}
-	return PolygonFromLoops(loops)
-}
-
-// makePolyline constructs a Polyline from the given string of lat:lng values.
-func makePolyline(s string) *Polyline {
-	p := Polyline(parsePoints(s))
-	return &p
-}
-
-// concentricLoopsPolygon constructs a polygon with the specified center as a
-// number of concentric loops and vertices per loop.
-func concentricLoopsPolygon(center Point, numLoops, verticesPerLoop int) *Polygon {
-	var loops []*Loop
-	for li := 0; li < numLoops; li++ {
-		radius := s1.Angle(0.005 * float64(li+1) / float64(numLoops))
-		loops = append(loops, RegularLoop(center, radius, verticesPerLoop))
-	}
-	return PolygonFromLoops(loops)
-}
-
-// skewedInt returns a number in the range [0,2^max_log-1] with bias towards smaller numbers.
-func skewedInt(maxLog int) int {
-	base := uint32(rand.Int31n(int32(maxLog + 1)))
-	return int(randomBits(31) & ((1 << base) - 1))
-}
-
-// randomCap returns a cap with a random axis such that the log of its area is
-// uniformly distributed between the logs of the two given values. The log of
-// the cap angle is also approximately uniformly distributed.
-func randomCap(minArea, maxArea float64) Cap {
-	capArea := maxArea * math.Pow(minArea/maxArea, randomFloat64())
-	return CapFromCenterArea(randomPoint(), capArea)
-}
-
-// pointsApproxEquals reports whether the two points are within the given distance
-// of each other. This is the same as Point.ApproxEquals but permits specifying
-// the epsilon.
-func pointsApproxEquals(a, b Point, epsilon float64) bool {
-	return float64(a.Vector.Angle(b.Vector)) <= epsilon
-}
-
-var (
-	rectErrorLat = 10 * dblEpsilon
-	rectErrorLng = dblEpsilon
-)
-
-// r2PointsApproxEqual reports whether the two points are within the given epsilon.
-func r2PointsApproxEquals(a, b r2.Point, epsilon float64) bool {
-	return float64Near(a.X, b.X, epsilon) && float64Near(a.Y, b.Y, epsilon)
-}
-
-// rectsApproxEqual reports whether the two rect are within the given tolerances
-// at each corner from each other. The tolerances are specific to each axis.
-func rectsApproxEqual(a, b Rect, tolLat, tolLng float64) bool {
-	return math.Abs(a.Lat.Lo-b.Lat.Lo) < tolLat &&
-		math.Abs(a.Lat.Hi-b.Lat.Hi) < tolLat &&
-		math.Abs(a.Lng.Lo-b.Lng.Lo) < tolLng &&
-		math.Abs(a.Lng.Hi-b.Lng.Hi) < tolLng
-}
-
-// matricesApproxEqual reports whether all cells in both matrices are equal within
-// the default floating point epsilon.
-func matricesApproxEqual(m1, m2 *matrix3x3) bool {
-	return float64Eq(m1[0][0], m2[0][0]) &&
-		float64Eq(m1[0][1], m2[0][1]) &&
-		float64Eq(m1[0][2], m2[0][2]) &&
-
-		float64Eq(m1[1][0], m2[1][0]) &&
-		float64Eq(m1[1][1], m2[1][1]) &&
-		float64Eq(m1[1][2], m2[1][2]) &&
-
-		float64Eq(m1[2][0], m2[2][0]) &&
-		float64Eq(m1[2][1], m2[2][1]) &&
-		float64Eq(m1[2][2], m2[2][2])
-}
-
-// samplePointFromRect returns a point chosen uniformly at random (with respect
-// to area on the sphere) from the given rectangle.
-func samplePointFromRect(rect Rect) Point {
-	// First choose a latitude uniformly with respect to area on the sphere.
-	sinLo := math.Sin(rect.Lat.Lo)
-	sinHi := math.Sin(rect.Lat.Hi)
-	lat := math.Asin(randomUniformFloat64(sinLo, sinHi))
-
-	// Now choose longitude uniformly within the given range.
-	lng := rect.Lng.Lo + randomFloat64()*rect.Lng.Length()
-
-	return PointFromLatLng(LatLng{s1.Angle(lat), s1.Angle(lng)}.Normalized())
-}
-
-// samplePointFromCap returns a point chosen uniformly at random (with respect
-// to area) from the given cap.
-func samplePointFromCap(c Cap) Point {
-	// We consider the cap axis to be the "z" axis. We choose two other axes to
-	// complete the coordinate frame.
-	m := getFrame(c.Center())
-
-	// The surface area of a spherical cap is directly proportional to its
-	// height. First we choose a random height, and then we choose a random
-	// point along the circle at that height.
-	h := randomFloat64() * c.Height()
-	theta := 2 * math.Pi * randomFloat64()
-	r := math.Sqrt(h * (2 - h))
-
-	// The result should already be very close to unit-length, but we might as
-	// well make it accurate as possible.
-	return Point{fromFrame(m, PointFromCoords(math.Cos(theta)*r, math.Sin(theta)*r, 1-h)).Normalize()}
-}
-
-// perturbATowardsB returns a point that has been shifted some distance towards the
-// second point based on a random number.
-func perturbATowardsB(a, b Point) Point {
-	choice := randomFloat64()
-	if choice < 0.1 {
-		return a
-	}
-	if choice < 0.3 {
-		// Return a point that is exactly proportional to A and that still
-		// satisfies IsUnitLength().
-		for {
-			b := Point{a.Mul(2 - a.Norm() + 5*(randomFloat64()-0.5)*dblEpsilon)}
-			if !b.ApproxEqual(a) && b.IsUnit() {
-				return b
-			}
-		}
-	}
-	if choice < 0.5 {
-		// Return a point such that the distance squared to A will underflow.
-		return InterpolateAtDistance(1e-300, a, b)
-	}
-	// Otherwise return a point whose distance from A is near dblEpsilon such
-	// that the log of the pdf is uniformly distributed.
-	distance := dblEpsilon * 1e-5 * math.Pow(1e6, randomFloat64())
-	return InterpolateAtDistance(s1.Angle(distance), a, b)
-}
-
-// perturbedCornerOrMidpoint returns a Point from a line segment whose endpoints are
-// difficult to handle correctly. Given two adjacent cube vertices P and Q,
-// it returns either an edge midpoint, face midpoint, or corner vertex that is
-// in the plane of PQ and that has been perturbed slightly. It also sometimes
-// returns a random point from anywhere on the sphere.
-func perturbedCornerOrMidpoint(p, q Point) Point {
-	a := p.Mul(float64(randomUniformInt(3) - 1)).Add(q.Mul(float64(randomUniformInt(3) - 1)))
-	if oneIn(10) {
-		// This perturbation often has no effect except on coordinates that are
-		// zero, in which case the perturbed value is so small that operations on
-		// it often result in underflow.
-		a = a.Add(randomPoint().Mul(math.Pow(1e-300, randomFloat64())))
-	} else if oneIn(2) {
-		// For coordinates near 1 (say > 0.5), this perturbation yields values
-		// that are only a few representable values away from the initial value.
-		a = a.Add(randomPoint().Mul(4 * dblEpsilon))
-	} else {
-		// A perturbation whose magnitude is in the range [1e-25, 1e-10].
-		a = a.Add(randomPoint().Mul(1e-10 * math.Pow(1e-15, randomFloat64())))
-	}
-
-	if a.Norm2() < math.SmallestNonzeroFloat64 {
-		// If a.Norm2() is denormalized, Normalize() loses too much precision.
-		return perturbedCornerOrMidpoint(p, q)
-	}
-	return Point{a}
-}
-
-// TODO:
-// Most of the other s2 testing methods.

+ 0 - 196
vendor/github.com/golang/geo/s2/s2_test_test.go

@@ -1,196 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"reflect"
-	"testing"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-func TestKmToAngle(t *testing.T) {
-	const earthRadiusKm = 6371.01
-
-	tests := []struct {
-		have float64
-		want s1.Angle
-	}{
-		{0.0, 0.0},
-		{1.0, 0.00015696098420815537 * s1.Radian},
-		{earthRadiusKm, 1.0 * s1.Radian},
-		{-1.0, -0.00015696098420815537 * s1.Radian},
-		{-10000.0, -1.5696098420815536300 * s1.Radian},
-		{1e9, 156960.984208155363007 * s1.Radian},
-	}
-	for _, test := range tests {
-		if got := kmToAngle(test.have); !float64Eq(float64(got), float64(test.want)) {
-			t.Errorf("kmToAngle(%f) = %0.20f, want %0.20f", test.have, got, test.want)
-		}
-	}
-
-}
-
-func TestParsePoint(t *testing.T) {
-	tests := []struct {
-		have string
-		want Point
-	}{
-		{"0:0", Point{r3.Vector{1, 0, 0}}},
-		{"90:0", Point{r3.Vector{6.123233995736757e-17, 0, 1}}},
-		{"91:0", Point{r3.Vector{-0.017452406437283473, -0, 0.9998476951563913}}},
-		{"179.99:0", Point{r3.Vector{-0.9999999847691292, -0, 0.00017453292431344843}}},
-		{"180:0", Point{r3.Vector{-1, -0, 1.2246467991473515e-16}}},
-		{"181.0:0", Point{r3.Vector{-0.9998476951563913, -0, -0.017452406437283637}}},
-		{"-45:0", Point{r3.Vector{0.7071067811865476, 0, -0.7071067811865475}}},
-		{"0:0.01", Point{r3.Vector{0.9999999847691292, 0.00017453292431333684, 0}}},
-		{"0:30", Point{r3.Vector{0.8660254037844387, 0.49999999999999994, 0}}},
-		{"0:45", Point{r3.Vector{0.7071067811865476, 0.7071067811865475, 0}}},
-		{"0:90", Point{r3.Vector{6.123233995736757e-17, 1, 0}}},
-		{"30:30", Point{r3.Vector{0.7500000000000001, 0.4330127018922193, 0.49999999999999994}}},
-		{"-30:30", Point{r3.Vector{0.7500000000000001, 0.4330127018922193, -0.49999999999999994}}},
-		{"180:90", Point{r3.Vector{-6.123233995736757e-17, -1, 1.2246467991473515e-16}}},
-		{"37.4210:-122.0866, 37.4231:-122.0819", Point{r3.Vector{-0.4218751185559026, -0.6728760966593905, 0.6076669670863027}}},
-	}
-	for _, test := range tests {
-		if got := parsePoint(test.have); !got.ApproxEqual(test.want) {
-			t.Errorf("parsePoint(%s) = %v, want %v", test.have, got, test.want)
-		}
-	}
-}
-
-func TestParseRect(t *testing.T) {
-	tests := []struct {
-		have string
-		want Rect
-	}{
-		{"0:0", Rect{}},
-		{
-			"1:1",
-			Rect{
-				r1.Interval{float64(s1.Degree), float64(s1.Degree)},
-				s1.Interval{float64(s1.Degree), float64(s1.Degree)},
-			},
-		},
-		{
-			"1:1, 2:2, 3:3",
-			Rect{
-				r1.Interval{float64(s1.Degree), 3 * float64(s1.Degree)},
-				s1.Interval{float64(s1.Degree), 3 * float64(s1.Degree)},
-			},
-		},
-		{
-			"-90:-180, 90:180",
-			Rect{
-				r1.Interval{-90 * float64(s1.Degree), 90 * float64(s1.Degree)},
-				s1.Interval{180 * float64(s1.Degree), -180 * float64(s1.Degree)},
-			},
-		},
-		{
-			"-89.99:0, 89.99:179.99",
-			Rect{
-				r1.Interval{-89.99 * float64(s1.Degree), 89.99 * float64(s1.Degree)},
-				s1.Interval{0, 179.99 * float64(s1.Degree)},
-			},
-		},
-		{
-			"-89.99:-179.99, 89.99:179.99",
-			Rect{
-				r1.Interval{-89.99 * float64(s1.Degree), 89.99 * float64(s1.Degree)},
-				s1.Interval{179.99 * float64(s1.Degree), -179.99 * float64(s1.Degree)},
-			},
-		},
-		{
-			"37.4210:-122.0866, 37.4231:-122.0819",
-			Rect{
-				r1.Interval{float64(s1.Degree * 37.4210), float64(s1.Degree * 37.4231)},
-				s1.Interval{float64(s1.Degree * -122.0866), float64(s1.Degree * -122.0819)},
-			},
-		},
-		{
-			"-876.54:-654.43, 963.84:2468.35",
-			Rect{
-				r1.Interval{-876.54 * float64(s1.Degree), -876.54 * float64(s1.Degree)},
-				s1.Interval{-654.43 * float64(s1.Degree), -654.43 * float64(s1.Degree)},
-			},
-		},
-	}
-	for _, test := range tests {
-		if got := parseRect(test.have); got != test.want {
-			t.Errorf("parseRect(%s) = %v, want %v", test.have, got, test.want)
-		}
-	}
-}
-
-func TestParseLatLngs(t *testing.T) {
-	tests := []struct {
-		have string
-		want []LatLng
-	}{
-		{"0:0", []LatLng{{0, 0}}},
-		{
-			"37.4210:-122.0866, 37.4231:-122.0819",
-			[]LatLng{
-				{s1.Degree * 37.4210, s1.Degree * -122.0866},
-				{s1.Degree * 37.4231, s1.Degree * -122.0819},
-			},
-		},
-	}
-	for _, test := range tests {
-		got := parseLatLngs(test.have)
-		if !reflect.DeepEqual(got, test.want) {
-			t.Errorf("parseLatLngs(%s) = %v, want %v", test.have, got, test.want)
-		}
-	}
-}
-
-func TestParsePoints(t *testing.T) {
-	tests := []struct {
-		have string
-		want []Point
-	}{
-		{"0:0", []Point{{r3.Vector{1, 0, 0}}}},
-		{"      0:0,    ", []Point{{r3.Vector{1, 0, 0}}}},
-		{
-			"90:0,-90:0",
-			[]Point{
-				{r3.Vector{6.123233995736757e-17, 0, 1}},
-				{r3.Vector{6.123233995736757e-17, 0, -1}},
-			},
-		},
-		{
-			"90:0, 0:90, -90:0, 0:-90",
-			[]Point{
-				{r3.Vector{6.123233995736757e-17, 0, 1}},
-				{r3.Vector{6.123233995736757e-17, 1, 0}},
-				{r3.Vector{6.123233995736757e-17, 0, -1}},
-				{r3.Vector{6.123233995736757e-17, -1, 0}},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		got := parsePoints(test.have)
-		for i := range got { // assume we at least get the same number of points
-			if !got[i].ApproxEqual(test.want[i]) {
-				t.Errorf("parsePoints(%s): [%d]: got %v, want %v", test.have, i, got[i], test.want[i])
-			}
-		}
-	}
-}

+ 194 - 0
vendor/github.com/golang/geo/s2/shape.go

@@ -0,0 +1,194 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+	"sort"
+)
+
+// dimension defines the types of geometry dimensions that a Shape supports.
+type dimension int
+
+const (
+	pointGeometry dimension = iota
+	polylineGeometry
+	polygonGeometry
+)
+
+// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
+// allowed, and can be used to represent points.
+type Edge struct {
+	V0, V1 Point
+}
+
+// Cmp compares the two edges using the underlying Points Cmp method and returns
+//
+//   -1 if e <  other
+//    0 if e == other
+//   +1 if e >  other
+//
+// The two edges are compared by first vertex, and then by the second vertex.
+func (e Edge) Cmp(other Edge) int {
+	if v0cmp := e.V0.Cmp(other.V0.Vector); v0cmp != 0 {
+		return v0cmp
+	}
+	return e.V1.Cmp(other.V1.Vector)
+}
+
+// sortEdges sorts the slice of Edges in place.
+func sortEdges(e []Edge) {
+	sort.Sort(edges(e))
+}
+
+// edges implements the Sort interface for slices of Edge.
+type edges []Edge
+
+func (e edges) Len() int           { return len(e) }
+func (e edges) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 }
+
+// Chain represents a range of edge IDs corresponding to a chain of connected
+// edges, specified as a (start, length) pair. The chain is defined to consist of
+// edge IDs {start, start + 1, ..., start + length - 1}.
+type Chain struct {
+	Start, Length int
+}
+
+// ChainPosition represents the position of an edge within a given edge chain,
+// specified as a (chainID, offset) pair. Chains are numbered sequentially
+// starting from zero, and offsets are measured from the start of each chain.
+type ChainPosition struct {
+	ChainID, Offset int
+}
+
+// A ReferencePoint consists of a point and a boolean indicating whether the point
+// is contained by a particular shape.
+type ReferencePoint struct {
+	Point     Point
+	Contained bool
+}
+
+// OriginReferencePoint returns a ReferencePoint with the given value for
+// contained and the origin point. It should be used when all points or no
+// points are contained.
+func OriginReferencePoint(contained bool) ReferencePoint {
+	return ReferencePoint{Point: OriginPoint(), Contained: contained}
+}
+
+// Shape represents polygonal geometry in a flexible way. It is organized as a
+// collection of edges that optionally defines an interior. All geometry
+// represented by a given Shape must have the same dimension, which means that
+// an Shape can represent either a set of points, a set of polylines, or a set
+// of polygons.
+//
+// Shape is defined as an interface in order to give clients control over the
+// underlying data representation. Sometimes an Shape does not have any data of
+// its own, but instead wraps some other type.
+//
+// Shape operations are typically defined on a ShapeIndex rather than
+// individual shapes. An ShapeIndex is simply a collection of Shapes,
+// possibly of different dimensions (e.g. 10 points and 3 polygons), organized
+// into a data structure for efficient edge access.
+//
+// The edges of a Shape are indexed by a contiguous range of edge IDs
+// starting at 0. The edges are further subdivided into chains, where each
+// chain consists of a sequence of edges connected end-to-end (a polyline).
+// For example, a Shape representing two polylines AB and CDE would have
+// three edges (AB, CD, DE) grouped into two chains: (AB) and (CD, DE).
+// Similarly, an Shape representing 5 points would have 5 chains consisting
+// of one edge each.
+//
+// Shape has methods that allow edges to be accessed either using the global
+// numbering (edge ID) or within a particular chain. The global numbering is
+// sufficient for most purposes, but the chain representation is useful for
+// certain algorithms such as intersection (see BooleanOperation).
+type Shape interface {
+	// NumEdges returns the number of edges in this shape.
+	NumEdges() int
+
+	// Edge returns the edge for the given edge index.
+	Edge(i int) Edge
+
+	// HasInterior reports whether this shape has an interior.
+	HasInterior() bool
+
+	// ReferencePoint returns an arbitrary reference point for the shape. (The
+	// containment boolean value must be false for shapes that do not have an interior.)
+	//
+	// This reference point may then be used to compute the containment of other
+	// points by counting edge crossings.
+	ReferencePoint() ReferencePoint
+
+	// NumChains reports the number of contiguous edge chains in the shape.
+	// For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist
+	// of two chains (AB,BC,CD and AE,EF). Every chain is assigned a chain Id
+	// numbered sequentially starting from zero.
+	//
+	// Note that it is always acceptable to implement this method by returning
+	// NumEdges, i.e. every chain consists of a single edge, but this may
+	// reduce the efficiency of some algorithms.
+	NumChains() int
+
+	// Chain returns the range of edge IDs corresponding to the given edge chain.
+	// Edge chains must form contiguous, non-overlapping ranges that cover
+	// the entire range of edge IDs. This is spelled out more formally below:
+	//
+	//  0 <= i < NumChains()
+	//  Chain(i).length > 0, for all i
+	//  Chain(0).start == 0
+	//  Chain(i).start + Chain(i).length == Chain(i+1).start, for i < NumChains()-1
+	//  Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1
+	Chain(chainID int) Chain
+
+	// ChainEdgeReturns the edge at offset "offset" within edge chain "chainID".
+	// Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)"
+	// but more efficient.
+	ChainEdge(chainID, offset int) Edge
+
+	// ChainPosition finds the chain containing the given edge, and returns the
+	// position of that edge as a ChainPosition(chainID, offset) pair.
+	//
+	//  shape.Chain(pos.chainID).start + pos.offset == edgeID
+	//  shape.Chain(pos.chainID+1).start > edgeID
+	//
+	// where pos == shape.ChainPosition(edgeID).
+	ChainPosition(edgeID int) ChainPosition
+
+	// dimension returns the dimension of the geometry represented by this shape.
+	//
+	//  pointGeometry: Each point is represented as a degenerate edge.
+	//
+	//  polylineGeometry:  Polyline edges may be degenerate. A shape may
+	//      represent any number of polylines. Polylines edges may intersect.
+	//
+	//  polygonGeometry:  Edges should be oriented such that the polygon
+	//      interior is always on the left. In theory the edges may be returned
+	//      in any order, but typically the edges are organized as a collection
+	//      of edge chains where each chain represents one polygon loop.
+	//      Polygons may have degeneracies (e.g., degenerate edges or sibling
+	//      pairs consisting of an edge and its corresponding reversed edge).
+	//
+	// Note that this method allows degenerate geometry of different dimensions
+	// to be distinguished, e.g. it allows a point to be distinguished from a
+	// polyline or polygon that has been simplified to a single point.
+	dimension() dimension
+}
+
+// A minimal check for types that should satisfy the Shape interface.
+var (
+	_ Shape = &Loop{}
+	_ Shape = &Polygon{}
+	_ Shape = &Polyline{}
+)

+ 1338 - 108
vendor/github.com/golang/geo/s2/shapeindex.go

@@ -1,89 +1,26 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
 import (
 import (
-	"github.com/golang/geo/r2"
-)
-
-// dimension defines the types of geometry dimensions that a Shape supports.
-type dimension int
-
-const (
-	pointGeometry dimension = iota
-	polylineGeometry
-	polygonGeometry
-)
-
-// Shape defines an interface for any S2 type that needs to be indexable. A shape
-// is a collection of edges that optionally defines an interior. It can be used to
-// represent a set of points, a set of polylines, or a set of polygons.
-type Shape interface {
-	// NumEdges returns the number of edges in this shape.
-	NumEdges() int
-
-	// Edge returns endpoints for the given edge index.
-	// Zero-length edges are allowed, and can be used to represent points.
-	Edge(i int) (a, b Point)
-
-	// numChains reports the number of contiguous edge chains in the shape.
-	// For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist
-	// of two chains (AB,BC,CD and AE,EF). This method allows some algorithms
-	// to be optimized by skipping over edge chains that do not affect the output.
-	//
-	// Note that it is always acceptable to implement this method by returning
-	// NumEdges, i.e. every chain consists of a single edge.
-	numChains() int
+	"math"
+	"sync"
+	"sync/atomic"
 
 
-	// chainStart returns the id of the first edge in the i-th edge chain,
-	// and returns NumEdges when i == numChains. For example, if there are
-	// two chains AB,BC,CD and AE,EF, the chain starts would be [0, 3, 5].
-	//
-	// This requires the following:
-	// 0 <= i <= numChains()
-	// chainStart(0) == 0
-	// chainStart(i) < chainStart(i+1)
-	// chainStart(numChains()) == NumEdges()
-	chainStart(i int) int
-
-	// dimension returns the dimension of the geometry represented by this shape.
-	//
-	// Note that this method allows degenerate geometry of different dimensions
-	// to be distinguished, e.g. it allows a point to be distinguished from a
-	// polyline or polygon that has been simplified to a single point.
-	dimension() dimension
-
-	// HasInterior reports whether this shape has an interior. If so, it must be possible
-	// to assemble the edges into a collection of non-crossing loops.  Edges may
-	// be returned in any order, and edges may be oriented arbitrarily with
-	// respect to the shape interior.  (However, note that some Shape types
-	// may have stronger requirements.)
-	HasInterior() bool
-
-	// ContainsOrigin returns true if this shape contains s2.Origin.
-	// Shapes that do not have an interior will return false.
-	ContainsOrigin() bool
-}
-
-// A minimal check for types that should satisfy the Shape interface.
-var (
-	_ Shape = &Loop{}
-	_ Shape = &Polygon{}
-	_ Shape = &Polyline{}
+	"github.com/golang/geo/r1"
+	"github.com/golang/geo/r2"
 )
 )
 
 
 // CellRelation describes the possible relationships between a target cell
 // CellRelation describes the possible relationships between a target cell
@@ -99,7 +36,7 @@ const (
 	Disjoint
 	Disjoint
 )
 )
 
 
-var (
+const (
 	// cellPadding defines the total error when clipping an edge which comes
 	// cellPadding defines the total error when clipping an edge which comes
 	// from two sources:
 	// from two sources:
 	// (1) Clipping the original spherical edge to a cube face (the face edge).
 	// (1) Clipping the original spherical edge to a cube face (the face edge).
@@ -110,8 +47,27 @@ var (
 	// double the total error so that we only need to pad edges during indexing
 	// double the total error so that we only need to pad edges during indexing
 	// and not at query time.
 	// and not at query time.
 	cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord)
 	cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord)
+
+	// cellSizeToLongEdgeRatio defines the cell size relative to the length of an
+	// edge at which it is first considered to be long. Long edges do not
+	// contribute toward the decision to subdivide a cell further. For example,
+	// a value of 2.0 means that the cell must be at least twice the size of the
+	// edge in order for that edge to be counted. There are two reasons for not
+	// counting long edges: (1) such edges typically need to be propagated to
+	// several children, which increases time and memory costs without much benefit,
+	// and (2) in pathological cases, many long edges close together could force
+	// subdivision to continue all the way to the leaf cell level.
+	cellSizeToLongEdgeRatio = 1.0
 )
 )
 
 
+// clippedShape represents the part of a shape that intersects a Cell.
+// It consists of the set of edge IDs that intersect that cell and a boolean
+// indicating whether the center of the cell is inside the shape (for shapes
+// that have an interior).
+//
+// Note that the edges themselves are not clipped; we always use the original
+// edges for intersection tests so that the results will be the same as the
+// original shape.
 type clippedShape struct {
 type clippedShape struct {
 	// shapeID is the index of the shape this clipped shape is a part of.
 	// shapeID is the index of the shape this clipped shape is a part of.
 	shapeID int32
 	shapeID int32
@@ -121,12 +77,12 @@ type clippedShape struct {
 	// have an interior.
 	// have an interior.
 	containsCenter bool
 	containsCenter bool
 
 
-	// edges is the ordered set of ShapeIndex original edge ids. Edges
-	// are stored in increasing order of edge id.
+	// edges is the ordered set of ShapeIndex original edge IDs. Edges
+	// are stored in increasing order of edge ID.
 	edges []int
 	edges []int
 }
 }
 
 
-// init initializes this shape for the given shapeID and number of expected edges.
+// newClippedShape returns a new clipped shape for the given shapeID and number of expected edges.
 func newClippedShape(id int32, numEdges int) *clippedShape {
 func newClippedShape(id int32, numEdges int) *clippedShape {
 	return &clippedShape{
 	return &clippedShape{
 		shapeID: id,
 		shapeID: id,
@@ -134,19 +90,52 @@ func newClippedShape(id int32, numEdges int) *clippedShape {
 	}
 	}
 }
 }
 
 
-// shapeIndexCell stores the index contents for a particular CellID.
-type shapeIndexCell struct {
+// numEdges returns the number of edges that intersect the CellID of the Cell this was clipped to.
+func (c *clippedShape) numEdges() int {
+	return len(c.edges)
+}
+
+// containsEdge reports if this clipped shape contains the given edge ID.
+func (c *clippedShape) containsEdge(id int) bool {
+	// Linear search is fast because the number of edges per shape is typically
+	// very small (less than 10).
+	for _, e := range c.edges {
+		if e == id {
+			return true
+		}
+	}
+	return false
+}
+
+// ShapeIndexCell stores the index contents for a particular CellID.
+type ShapeIndexCell struct {
 	shapes []*clippedShape
 	shapes []*clippedShape
 }
 }
 
 
+// NewShapeIndexCell creates a new cell that is sized to hold the given number of shapes.
+func NewShapeIndexCell(numShapes int) *ShapeIndexCell {
+	return &ShapeIndexCell{
+		shapes: make([]*clippedShape, numShapes),
+	}
+}
+
+// numEdges reports the total number of edges in all clipped shapes in this cell.
+func (s *ShapeIndexCell) numEdges() int {
+	var e int
+	for _, cs := range s.shapes {
+		e += cs.numEdges()
+	}
+	return e
+}
+
 // add adds the given clipped shape to this index cell.
 // add adds the given clipped shape to this index cell.
-func (s *shapeIndexCell) add(c *clippedShape) {
+func (s *ShapeIndexCell) add(c *clippedShape) {
 	s.shapes = append(s.shapes, c)
 	s.shapes = append(s.shapes, c)
 }
 }
 
 
-// findByID returns the clipped shape that contains the given shapeID,
+// findByShapeID returns the clipped shape that contains the given shapeID,
 // or nil if none of the clipped shapes contain it.
 // or nil if none of the clipped shapes contain it.
-func (s *shapeIndexCell) findByID(shapeID int32) *clippedShape {
+func (s *ShapeIndexCell) findByShapeID(shapeID int32) *clippedShape {
 	// Linear search is fine because the number of shapes per cell is typically
 	// Linear search is fine because the number of shapes per cell is typically
 	// very small (most often 1), and is large only for pathological inputs
 	// very small (most often 1), and is large only for pathological inputs
 	// (e.g. very deeply nested loops).
 	// (e.g. very deeply nested loops).
@@ -181,7 +170,7 @@ type faceEdge struct {
 	maxLevel    int      // Not desirable to subdivide this edge beyond this level
 	maxLevel    int      // Not desirable to subdivide this edge beyond this level
 	hasInterior bool     // Belongs to a shape that has an interior
 	hasInterior bool     // Belongs to a shape that has an interior
 	a, b        r2.Point // The edge endpoints, clipped to a given face
 	a, b        r2.Point // The edge endpoints, clipped to a given face
-	va, vb      Point    // The original Loop vertices of this edge.
+	edge        Edge     // The original edge.
 }
 }
 
 
 // clippedEdge represents the portion of that edge that has been clipped to a given Cell.
 // clippedEdge represents the portion of that edge that has been clipped to a given Cell.
@@ -190,44 +179,483 @@ type clippedEdge struct {
 	bound    r2.Rect   // Bounding box for the clipped portion
 	bound    r2.Rect   // Bounding box for the clipped portion
 }
 }
 
 
+// ShapeIndexIteratorPos defines the set of possible iterator starting positions. By
+// default iterators are unpositioned, since this avoids an extra seek in this
+// situation where one of the seek methods (such as Locate) is immediately called.
+type ShapeIndexIteratorPos int
+
+const (
+	// IteratorBegin specifies the iterator should be positioned at the beginning of the index.
+	IteratorBegin ShapeIndexIteratorPos = iota
+	// IteratorEnd specifies the iterator should be positioned at the end of the index.
+	IteratorEnd
+)
+
+// ShapeIndexIterator is an iterator that provides low-level access to
+// the cells of the index. Cells are returned in increasing order of CellID.
+//
+//   for it := index.Iterator(); !it.Done(); it.Next() {
+//     fmt.Print(it.CellID())
+//   }
+//
+type ShapeIndexIterator struct {
+	index    *ShapeIndex
+	position int
+	id       CellID
+	cell     *ShapeIndexCell
+}
+
+// NewShapeIndexIterator creates a new iterator for the given index. If a starting
+// position is specified, the iterator is positioned at the given spot.
+func NewShapeIndexIterator(index *ShapeIndex, pos ...ShapeIndexIteratorPos) *ShapeIndexIterator {
+	s := &ShapeIndexIterator{
+		index: index,
+	}
+
+	if len(pos) > 0 {
+		if len(pos) > 1 {
+			panic("too many ShapeIndexIteratorPos arguments")
+		}
+		switch pos[0] {
+		case IteratorBegin:
+			s.Begin()
+		case IteratorEnd:
+			s.End()
+		default:
+			panic("unknown ShapeIndexIteratorPos value")
+		}
+	}
+
+	return s
+}
+
+// CellID returns the CellID of the current index cell.
+// If s.Done() is true, a value larger than any valid CellID is returned.
+func (s *ShapeIndexIterator) CellID() CellID {
+	return s.id
+}
+
+// IndexCell returns the current index cell.
+func (s *ShapeIndexIterator) IndexCell() *ShapeIndexCell {
+	// TODO(roberts): C++ has this call a virtual method to allow subclasses
+	// of ShapeIndexIterator to do other work before returning the cell. Do
+	// we need such a thing?
+	return s.cell
+}
+
+// Center returns the Point at the center of the current position of the iterator.
+func (s *ShapeIndexIterator) Center() Point {
+	return s.CellID().Point()
+}
+
+// Begin positions the iterator at the beginning of the index.
+func (s *ShapeIndexIterator) Begin() {
+	if !s.index.IsFresh() {
+		s.index.maybeApplyUpdates()
+	}
+	s.position = 0
+	s.refresh()
+}
+
+// Next positions the iterator at the next index cell.
+func (s *ShapeIndexIterator) Next() {
+	s.position++
+	s.refresh()
+}
+
+// Prev advances the iterator to the previous cell in the index and returns true to
+// indicate it was not yet at the beginning of the index. If the iterator is at the
+// first cell the call does nothing and returns false.
+func (s *ShapeIndexIterator) Prev() bool {
+	if s.position <= 0 {
+		return false
+	}
+
+	s.position--
+	s.refresh()
+	return true
+}
+
+// End positions the iterator at the end of the index.
+func (s *ShapeIndexIterator) End() {
+	s.position = len(s.index.cells)
+	s.refresh()
+}
+
+// Done reports if the iterator is positioned at or after the last index cell.
+func (s *ShapeIndexIterator) Done() bool {
+	return s.id == SentinelCellID
+}
+
+// refresh updates the stored internal iterator values.
+func (s *ShapeIndexIterator) refresh() {
+	if s.position < len(s.index.cells) {
+		s.id = s.index.cells[s.position]
+		s.cell = s.index.cellMap[s.CellID()]
+	} else {
+		s.id = SentinelCellID
+		s.cell = nil
+	}
+}
+
+// seek positions the iterator at the first cell whose ID >= target, or at the
+// end of the index if no such cell exists.
+func (s *ShapeIndexIterator) seek(target CellID) {
+	s.position = 0
+	// In C++, this relies on the lower_bound method of the underlying btree_map.
+	// TODO(roberts): Convert this to a binary search since the list of cells is ordered.
+	for k, v := range s.index.cells {
+		// We've passed the cell that is after us, so we are done.
+		if v >= target {
+			s.position = k
+			break
+		}
+		// Otherwise, advance the position.
+		s.position++
+	}
+	s.refresh()
+}
+
+// LocatePoint positions the iterator at the cell that contains the given Point.
+// If no such cell exists, the iterator position is unspecified, and false is returned.
+// The cell at the matched position is guaranteed to contain all edges that might
+// intersect the line segment between target and the cell's center.
+func (s *ShapeIndexIterator) LocatePoint(p Point) bool {
+	// Let I = cellMap.LowerBound(T), where T is the leaf cell containing
+	// point P. Then if T is contained by an index cell, then the
+	// containing cell is either I or I'. We test for containment by comparing
+	// the ranges of leaf cells spanned by T, I, and I'.
+	target := cellIDFromPoint(p)
+	s.seek(target)
+	if !s.Done() && s.CellID().RangeMin() <= target {
+		return true
+	}
+
+	if s.Prev() && s.CellID().RangeMax() >= target {
+		return true
+	}
+	return false
+}
+
+// LocateCellID attempts to position the iterator at the first matching index cell
+// in the index that has some relation to the given CellID. Let T be the target CellID.
+// If T is contained by (or equal to) some index cell I, then the iterator is positioned
+// at I and returns Indexed. Otherwise if T contains one or more (smaller) index cells,
+// then the iterator is positioned at the first such cell I and return Subdivided.
+// Otherwise Disjoint is returned and the iterator position is undefined.
+func (s *ShapeIndexIterator) LocateCellID(target CellID) CellRelation {
+	// Let T be the target, let I = cellMap.LowerBound(T.RangeMin()), and
+	// let I' be the predecessor of I. If T contains any index cells, then T
+	// contains I. Similarly, if T is contained by an index cell, then the
+	// containing cell is either I or I'. We test for containment by comparing
+	// the ranges of leaf cells spanned by T, I, and I'.
+	s.seek(target.RangeMin())
+	if !s.Done() {
+		if s.CellID() >= target && s.CellID().RangeMin() <= target {
+			return Indexed
+		}
+		if s.CellID() <= target.RangeMax() {
+			return Subdivided
+		}
+	}
+	if s.Prev() && s.CellID().RangeMax() >= target {
+		return Indexed
+	}
+	return Disjoint
+}
+
+// tracker keeps track of which shapes in a given set contain a particular point
+// (the focus). It provides an efficient way to move the focus from one point
+// to another and incrementally update the set of shapes which contain it. We use
+// this to compute which shapes contain the center of every CellID in the index,
+// by advancing the focus from one cell center to the next.
+//
+// Initially the focus is at the start of the CellID space-filling curve. We then
+// visit all the cells that are being added to the ShapeIndex in increasing order
+// of CellID. For each cell, we draw two edges: one from the entry vertex to the
+// center, and another from the center to the exit vertex (where entry and exit
+// refer to the points where the space-filling curve enters and exits the cell).
+// By counting edge crossings we can incrementally compute which shapes contain
+// the cell center. Note that the same set of shapes will always contain the exit
+// point of one cell and the entry point of the next cell in the index, because
+// either (a) these two points are actually the same, or (b) the intervening
+// cells in CellID order are all empty, and therefore there are no edge crossings
+// if we follow this path from one cell to the other.
+//
+// In C++, this is S2ShapeIndex::InteriorTracker.
+type tracker struct {
+	isActive   bool
+	a          Point
+	b          Point
+	nextCellID CellID
+	crosser    *EdgeCrosser
+	shapeIDs   []int32
+
+	// Shape ids saved by saveAndClearStateBefore. The state is never saved
+	// recursively so we don't need to worry about maintaining a stack.
+	savedIDs []int32
+}
+
+// newTracker returns a new tracker with the appropriate defaults.
+func newTracker() *tracker {
+	// As shapes are added, we compute which ones contain the start of the
+	// CellID space-filling curve by drawing an edge from OriginPoint to this
+	// point and counting how many shape edges cross this edge.
+	t := &tracker{
+		isActive:   false,
+		b:          trackerOrigin(),
+		nextCellID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+	}
+	t.drawTo(Point{faceUVToXYZ(0, -1, -1).Normalize()}) // CellID curve start
+
+	return t
+}
+
+// trackerOrigin returns the initial focus point when the tracker is created
+// (corresponding to the start of the CellID space-filling curve).
+func trackerOrigin() Point {
+	// The start of the S2CellId space-filling curve.
+	return Point{faceUVToXYZ(0, -1, -1).Normalize()}
+}
+
+// focus returns the current focus point of the tracker.
+func (t *tracker) focus() Point { return t.b }
+
+// addShape adds a shape whose interior should be tracked. containsOrigin indicates
+// whether the current focus point is inside the shape. Alternatively, if
+// the focus point is in the process of being moved (via moveTo/drawTo), you
+// can also specify containsOrigin at the old focus point and call testEdge
+// for every edge of the shape that might cross the current drawTo line.
+// This updates the state to correspond to the new focus point.
+//
+// This requires shape.HasInterior
+func (t *tracker) addShape(shapeID int32, containsFocus bool) {
+	t.isActive = true
+	if containsFocus {
+		t.toggleShape(shapeID)
+	}
+}
+
+// moveTo moves the focus of the tracker to the given point. This method should
+// only be used when it is known that there are no edge crossings between the old
+// and new focus locations; otherwise use drawTo.
+func (t *tracker) moveTo(b Point) { t.b = b }
+
+// drawTo moves the focus of the tracker to the given point. After this method is
+// called, testEdge should be called with all edges that may cross the line
+// segment between the old and new focus locations.
+func (t *tracker) drawTo(b Point) {
+	t.a = t.b
+	t.b = b
+	// TODO: the edge crosser may need an in-place Init method if this gets expensive
+	t.crosser = NewEdgeCrosser(t.a, t.b)
+}
+
+// testEdge checks if the given edge crosses the current edge, and if so, then
+// toggle the state of the given shapeID.
+// This requires shape to have an interior.
+func (t *tracker) testEdge(shapeID int32, edge Edge) {
+	if t.crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) {
+		t.toggleShape(shapeID)
+	}
+}
+
+// setNextCellID is used to indicate that the last argument to moveTo or drawTo
+// was the entry vertex of the given CellID, i.e. the tracker is positioned at the
+// start of this cell. By using this method together with atCellID, the caller
+// can avoid calling moveTo in cases where the exit vertex of the previous cell
+// is the same as the entry vertex of the current cell.
+func (t *tracker) setNextCellID(nextCellID CellID) {
+	t.nextCellID = nextCellID.RangeMin()
+}
+
+// atCellID reports if the focus is already at the entry vertex of the given
+// CellID (provided that the caller calls setNextCellID as each cell is processed).
+func (t *tracker) atCellID(cellid CellID) bool {
+	return cellid.RangeMin() == t.nextCellID
+}
+
+// toggleShape adds or removes the given shapeID from the set of IDs it is tracking.
+func (t *tracker) toggleShape(shapeID int32) {
+	// Most shapeIDs slices are small, so special case the common steps.
+
+	// If there is nothing here, add it.
+	if len(t.shapeIDs) == 0 {
+		t.shapeIDs = append(t.shapeIDs, shapeID)
+		return
+	}
+
+	// If it's the first element, drop it from the slice.
+	if t.shapeIDs[0] == shapeID {
+		t.shapeIDs = t.shapeIDs[1:]
+		return
+	}
+
+	for i, s := range t.shapeIDs {
+		if s < shapeID {
+			continue
+		}
+
+		// If it's in the set, cut it out.
+		if s == shapeID {
+			copy(t.shapeIDs[i:], t.shapeIDs[i+1:]) // overwrite the ith element
+			t.shapeIDs = t.shapeIDs[:len(t.shapeIDs)-1]
+			return
+		}
+
+		// We've got to a point in the slice where we should be inserted.
+		// (the given shapeID is now less than the current positions id.)
+		t.shapeIDs = append(t.shapeIDs[0:i],
+			append([]int32{shapeID}, t.shapeIDs[i:len(t.shapeIDs)]...)...)
+		return
+	}
+
+	// We got to the end and didn't find it, so add it to the list.
+	t.shapeIDs = append(t.shapeIDs, shapeID)
+}
+
+// saveAndClearStateBefore makes an internal copy of the state for shape ids below
+// the given limit, and then clear the state for those shapes. This is used during
+// incremental updates to track the state of added and removed shapes separately.
+func (t *tracker) saveAndClearStateBefore(limitShapeID int32) {
+	limit := t.lowerBound(limitShapeID)
+	t.savedIDs = append([]int32(nil), t.shapeIDs[:limit]...)
+	t.shapeIDs = t.shapeIDs[limit:]
+}
+
+// restoreStateBefore restores the state previously saved by saveAndClearStateBefore.
+// This only affects the state for shapeIDs below "limitShapeID".
+func (t *tracker) restoreStateBefore(limitShapeID int32) {
+	limit := t.lowerBound(limitShapeID)
+	t.shapeIDs = append(append([]int32(nil), t.savedIDs...), t.shapeIDs[limit:]...)
+	t.savedIDs = nil
+}
+
+// lowerBound returns the shapeID of the first entry x where x >= shapeID.
+func (t *tracker) lowerBound(shapeID int32) int32 {
+	panic("not implemented")
+}
+
+// removedShape represents a set of edges from the given shape that is queued for removal.
+type removedShape struct {
+	shapeID               int32
+	hasInterior           bool
+	containsTrackerOrigin bool
+	edges                 []Edge
+}
+
+// There are three basic states the index can be in.
+const (
+	stale    int32 = iota // There are pending updates.
+	updating              // Updates are currently being applied.
+	fresh                 // There are no pending updates.
+)
+
 // ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges
 // ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges
 // that optionally defines an interior. It can be used to represent a set of
 // that optionally defines an interior. It can be used to represent a set of
 // points, a set of polylines, or a set of polygons. For Shapes that have
 // points, a set of polylines, or a set of polygons. For Shapes that have
 // interiors, the index makes it very fast to determine which Shape(s) contain
 // interiors, the index makes it very fast to determine which Shape(s) contain
 // a given point or region.
 // a given point or region.
+//
+// The index can be updated incrementally by adding or removing shapes. It is
+// designed to handle up to hundreds of millions of edges. All data structures
+// are designed to be small, so the index is compact; generally it is smaller
+// than the underlying data being indexed. The index is also fast to construct.
+//
+// Polygon, Loop, and Polyline implement Shape which allows these objects to
+// be indexed easily. You can find useful query methods in CrossingEdgeQuery
+// and ClosestEdgeQuery (Not yet implemented in Go).
+//
+// Example showing how to build an index of Polylines:
+//
+//   index := NewShapeIndex()
+//   for _, polyline := range polylines {
+//       index.Add(polyline);
+//   }
+//   // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here.
+//
 type ShapeIndex struct {
 type ShapeIndex struct {
 	// shapes is a map of shape ID to shape.
 	// shapes is a map of shape ID to shape.
-	shapes map[int]Shape
+	shapes map[int32]Shape
 
 
+	// The maximum number of edges per cell.
+	// TODO(roberts): Update the comments when the usage of this is implemented.
 	maxEdgesPerCell int
 	maxEdgesPerCell int
 
 
 	// nextID tracks the next ID to hand out. IDs are not reused when shapes
 	// nextID tracks the next ID to hand out. IDs are not reused when shapes
 	// are removed from the index.
 	// are removed from the index.
-	nextID int
+	nextID int32
+
+	// cellMap is a map from CellID to the set of clipped shapes that intersect that
+	// cell. The cell IDs cover a set of non-overlapping regions on the sphere.
+	// In C++, this is a BTree, so the cells are ordered naturally by the data structure.
+	cellMap map[CellID]*ShapeIndexCell
+	// Track the ordered list of cell IDs.
+	cells []CellID
+
+	// The current status of the index; accessed atomically.
+	status int32
+
+	// Additions and removals are queued and processed on the first subsequent
+	// query. There are several reasons to do this:
+	//
+	//  - It is significantly more efficient to process updates in batches if
+	//    the amount of entities added grows.
+	//  - Often the index will never be queried, in which case we can save both
+	//    the time and memory required to build it. Examples:
+	//     + Loops that are created simply to pass to an Polygon. (We don't
+	//       need the Loop index, because Polygon builds its own index.)
+	//     + Applications that load a database of geometry and then query only
+	//       a small fraction of it.
+	//
+	// The main drawback is that we need to go to some extra work to ensure that
+	// some methods are still thread-safe. Note that the goal is *not* to
+	// make this thread-safe in general, but simply to hide the fact that
+	// we defer some of the indexing work until query time.
+	//
+	// This mutex protects all of following fields in the index.
+	mu sync.RWMutex
+
+	// pendingAdditionsPos is the index of the first entry that has not been processed
+	// via applyUpdatesInternal.
+	pendingAdditionsPos int32
+
+	// The set of shapes that have been queued for removal but not processed yet by
+	// applyUpdatesInternal.
+	pendingRemovals []*removedShape
 }
 }
 
 
 // NewShapeIndex creates a new ShapeIndex.
 // NewShapeIndex creates a new ShapeIndex.
 func NewShapeIndex() *ShapeIndex {
 func NewShapeIndex() *ShapeIndex {
 	return &ShapeIndex{
 	return &ShapeIndex{
 		maxEdgesPerCell: 10,
 		maxEdgesPerCell: 10,
-		shapes:          make(map[int]Shape),
+		shapes:          make(map[int32]Shape),
+		cellMap:         make(map[CellID]*ShapeIndexCell),
+		cells:           nil,
+		status:          fresh,
 	}
 	}
 }
 }
 
 
-// Add adds the given shape to the index and assign an ID to it.
-func (s *ShapeIndex) Add(shape Shape) {
-	s.shapes[s.nextID] = shape
-	s.nextID++
+// Iterator returns an iterator for this index.
+func (s *ShapeIndex) Iterator() *ShapeIndexIterator {
+	s.maybeApplyUpdates()
+	return NewShapeIndexIterator(s, IteratorBegin)
 }
 }
 
 
-// Remove removes the given shape from the index.
-func (s *ShapeIndex) Remove(shape Shape) {
-	for k, v := range s.shapes {
-		if v == shape {
-			delete(s.shapes, k)
-			return
-		}
-	}
+// Begin positions the iterator at the first cell in the index.
+func (s *ShapeIndex) Begin() *ShapeIndexIterator {
+	s.maybeApplyUpdates()
+	return NewShapeIndexIterator(s, IteratorBegin)
+}
+
+// End positions the iterator at the last cell in the index.
+func (s *ShapeIndex) End() *ShapeIndexIterator {
+	// TODO(roberts): It's possible that updates could happen to the index between
+	// the time this is called and the time the iterators position is used and this
+	// will be invalid or not the end. For now, things will be undefined if this
+	// happens. See about referencing the IsFresh to guard for this in the future.
+	s.maybeApplyUpdates()
+	return NewShapeIndexIterator(s, IteratorEnd)
 }
 }
 
 
 // Len reports the number of Shapes in this index.
 // Len reports the number of Shapes in this index.
@@ -235,10 +663,13 @@ func (s *ShapeIndex) Len() int {
 	return len(s.shapes)
 	return len(s.shapes)
 }
 }
 
 
-// Reset clears the contents of the index and resets it to its original state.
+// Reset resets the index to its original state.
 func (s *ShapeIndex) Reset() {
 func (s *ShapeIndex) Reset() {
-	s.shapes = make(map[int]Shape)
+	s.shapes = make(map[int32]Shape)
 	s.nextID = 0
 	s.nextID = 0
+	s.cellMap = make(map[CellID]*ShapeIndexCell)
+	s.cells = nil
+	atomic.StoreInt32(&s.status, fresh)
 }
 }
 
 
 // NumEdges returns the number of edges in this index.
 // NumEdges returns the number of edges in this index.
@@ -249,3 +680,802 @@ func (s *ShapeIndex) NumEdges() int {
 	}
 	}
 	return numEdges
 	return numEdges
 }
 }
+
+// Shape returns the shape with the given ID, or nil if the shape has been removed from the index.
+func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] }
+
+// Add adds the given shape to the index and returns the assigned ID..
+func (s *ShapeIndex) Add(shape Shape) int32 {
+	s.shapes[s.nextID] = shape
+	s.nextID++
+	atomic.StoreInt32(&s.status, stale)
+	return s.nextID - 1
+}
+
+// Remove removes the given shape from the index.
+func (s *ShapeIndex) Remove(shape Shape) {
+	// The index updates itself lazily because it is much more efficient to
+	// process additions and removals in batches.
+	// Lookup the id of this shape in the index.
+	id := int32(-1)
+	for k, v := range s.shapes {
+		if v == shape {
+			id = k
+		}
+	}
+
+	// If the shape wasn't found, it's already been removed or was not in the index.
+	if s.shapes[id] == nil {
+		return
+	}
+
+	// Remove the shape from the shapes map.
+	delete(s.shapes, id)
+
+	// We are removing a shape that has not yet been added to the index,
+	// so there is nothing else to do.
+	if id >= s.pendingAdditionsPos {
+		return
+	}
+
+	numEdges := shape.NumEdges()
+	removed := &removedShape{
+		shapeID:               id,
+		hasInterior:           shape.HasInterior(),
+		containsTrackerOrigin: shape.ReferencePoint().Contained,
+		edges: make([]Edge, numEdges),
+	}
+
+	for e := 0; e < numEdges; e++ {
+		removed.edges[e] = shape.Edge(e)
+	}
+
+	s.pendingRemovals = append(s.pendingRemovals, removed)
+	atomic.StoreInt32(&s.status, stale)
+}
+
+// IsFresh reports if there are no pending updates that need to be applied.
+// This can be useful to avoid building the index unnecessarily, or for
+// choosing between two different algorithms depending on whether the index
+// is available.
+//
+// The returned index status may be slightly out of date if the index was
+// built in a different thread. This is fine for the intended use (as an
+// efficiency hint), but it should not be used by internal methods.
+func (s *ShapeIndex) IsFresh() bool {
+	return atomic.LoadInt32(&s.status) == fresh
+}
+
+// isFirstUpdate reports if this is the first update to the index.
+func (s *ShapeIndex) isFirstUpdate() bool {
+	// Note that it is not sufficient to check whether cellMap is empty, since
+	// entries are added to it during the update process.
+	return s.pendingAdditionsPos == 0
+}
+
+// isShapeBeingRemoved reports if the shape with the given ID is currently slated for removal.
+func (s *ShapeIndex) isShapeBeingRemoved(shapeID int32) bool {
+	// All shape ids being removed fall below the index position of shapes being added.
+	return shapeID < s.pendingAdditionsPos
+}
+
+// maybeApplyUpdates checks if the index pieces have changed, and if so, applies pending updates.
+func (s *ShapeIndex) maybeApplyUpdates() {
+	// TODO(roberts): To avoid acquiring and releasing the mutex on every
+	// query, we should use atomic operations when testing whether the status
+	// is fresh and when updating the status to be fresh. This guarantees
+	// that any thread that sees a status of fresh will also see the
+	// corresponding index updates.
+	if atomic.LoadInt32(&s.status) != fresh {
+		s.mu.Lock()
+		s.applyUpdatesInternal()
+		atomic.StoreInt32(&s.status, fresh)
+		s.mu.Unlock()
+	}
+}
+
+// applyUpdatesInternal does the actual work of updating the index by applying all
+// pending additions and removals. It does *not* update the indexes status.
+func (s *ShapeIndex) applyUpdatesInternal() {
+	// TODO(roberts): Building the index can use up to 20x as much memory per
+	// edge as the final index memory size. If this causes issues, add in
+	// batched updating to limit the amount of items per batch to a
+	// configurable memory footprint overhead.
+	t := newTracker()
+
+	// allEdges maps a Face to a collection of faceEdges.
+	allEdges := make([][]faceEdge, 6)
+
+	for _, p := range s.pendingRemovals {
+		s.removeShapeInternal(p, allEdges, t)
+	}
+
+	for id := s.pendingAdditionsPos; id < int32(len(s.shapes)); id++ {
+		s.addShapeInternal(id, allEdges, t)
+	}
+
+	for face := 0; face < 6; face++ {
+		s.updateFaceEdges(face, allEdges[face], t)
+	}
+
+	s.pendingRemovals = s.pendingRemovals[:0]
+	s.pendingAdditionsPos = int32(len(s.shapes))
+	// It is the caller's responsibility to update the index status.
+}
+
+// addShapeInternal clips all edges of the given shape to the six cube faces,
+// adds the clipped edges to the set of allEdges, and starts tracking its
+// interior if necessary.
+func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *tracker) {
+	shape, ok := s.shapes[shapeID]
+	if !ok {
+		// This shape has already been removed.
+		return
+	}
+
+	faceEdge := faceEdge{
+		shapeID:     shapeID,
+		hasInterior: shape.HasInterior(),
+	}
+
+	if faceEdge.hasInterior {
+		t.addShape(shapeID, containsBruteForce(shape, t.focus()))
+	}
+
+	numEdges := shape.NumEdges()
+	for e := 0; e < numEdges; e++ {
+		edge := shape.Edge(e)
+
+		faceEdge.edgeID = e
+		faceEdge.edge = edge
+		faceEdge.maxLevel = maxLevelForEdge(edge)
+		s.addFaceEdge(faceEdge, allEdges)
+	}
+}
+
+// addFaceEdge adds the given faceEdge into the collection of all edges.
+func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) {
+	aFace := face(fe.edge.V0.Vector)
+	// See if both endpoints are on the same face, and are far enough from
+	// the edge of the face that they don't intersect any (padded) adjacent face.
+	if aFace == face(fe.edge.V1.Vector) {
+		x, y := validFaceXYZToUV(aFace, fe.edge.V0.Vector)
+		fe.a = r2.Point{x, y}
+		x, y = validFaceXYZToUV(aFace, fe.edge.V1.Vector)
+		fe.b = r2.Point{x, y}
+
+		maxUV := 1 - cellPadding
+		if math.Abs(fe.a.X) <= maxUV && math.Abs(fe.a.Y) <= maxUV &&
+			math.Abs(fe.b.X) <= maxUV && math.Abs(fe.b.Y) <= maxUV {
+			allEdges[aFace] = append(allEdges[aFace], fe)
+			return
+		}
+	}
+
+	// Otherwise, we simply clip the edge to all six faces.
+	for face := 0; face < 6; face++ {
+		if aClip, bClip, intersects := ClipToPaddedFace(fe.edge.V0, fe.edge.V1, face, cellPadding); intersects {
+			fe.a = aClip
+			fe.b = bClip
+			allEdges[face] = append(allEdges[face], fe)
+		}
+	}
+	return
+}
+
+// updateFaceEdges adds or removes the various edges from the index.
+// An edge is added if shapes[id] is not nil, and removed otherwise.
+func (s *ShapeIndex) updateFaceEdges(face int, faceEdges []faceEdge, t *tracker) {
+	numEdges := len(faceEdges)
+	if numEdges == 0 && len(t.shapeIDs) == 0 {
+		return
+	}
+
+	// Create the initial clippedEdge for each faceEdge. Additional clipped
+	// edges are created when edges are split between child cells. We create
+	// two arrays, one containing the edge data and another containing pointers
+	// to those edges, so that during the recursion we only need to copy
+	// pointers in order to propagate an edge to the correct child.
+	clippedEdges := make([]*clippedEdge, numEdges)
+	bound := r2.EmptyRect()
+	for e := 0; e < numEdges; e++ {
+		clipped := &clippedEdge{
+			faceEdge: &faceEdges[e],
+		}
+		clipped.bound = r2.RectFromPoints(faceEdges[e].a, faceEdges[e].b)
+		clippedEdges[e] = clipped
+		bound = bound.AddRect(clipped.bound)
+	}
+
+	// Construct the initial face cell containing all the edges, and then update
+	// all the edges in the index recursively.
+	faceID := CellIDFromFace(face)
+	pcell := PaddedCellFromCellID(faceID, cellPadding)
+
+	disjointFromIndex := s.isFirstUpdate()
+	if numEdges > 0 {
+		shrunkID := s.shrinkToFit(pcell, bound)
+		if shrunkID != pcell.id {
+			// All the edges are contained by some descendant of the face cell. We
+			// can save a lot of work by starting directly with that cell, but if we
+			// are in the interior of at least one shape then we need to create
+			// index entries for the cells we are skipping over.
+			s.skipCellRange(faceID.RangeMin(), shrunkID.RangeMin(), t, disjointFromIndex)
+			pcell = PaddedCellFromCellID(shrunkID, cellPadding)
+			s.updateEdges(pcell, clippedEdges, t, disjointFromIndex)
+			s.skipCellRange(shrunkID.RangeMax().Next(), faceID.RangeMax().Next(), t, disjointFromIndex)
+			return
+		}
+	}
+
+	// Otherwise (no edges, or no shrinking is possible), subdivide normally.
+	s.updateEdges(pcell, clippedEdges, t, disjointFromIndex)
+}
+
+// shrinkToFit shrinks the PaddedCell to fit within the given bounds.
+func (s *ShapeIndex) shrinkToFit(pcell *PaddedCell, bound r2.Rect) CellID {
+	shrunkID := pcell.ShrinkToFit(bound)
+
+	if !s.isFirstUpdate() && shrunkID != pcell.CellID() {
+		// Don't shrink any smaller than the existing index cells, since we need
+		// to combine the new edges with those cells.
+		iter := s.Iterator()
+		if iter.LocateCellID(shrunkID) == Indexed {
+			shrunkID = iter.CellID()
+		}
+	}
+	return shrunkID
+}
+
+// skipCellRange skips over the cells in the given range, creating index cells if we are
+// currently in the interior of at least one shape.
+func (s *ShapeIndex) skipCellRange(begin, end CellID, t *tracker, disjointFromIndex bool) {
+	// If we aren't in the interior of a shape, then skipping over cells is easy.
+	if len(t.shapeIDs) == 0 {
+		return
+	}
+
+	// Otherwise generate the list of cell ids that we need to visit, and create
+	// an index entry for each one.
+	skipped := CellUnionFromRange(begin, end)
+	for _, cell := range skipped {
+		var clippedEdges []*clippedEdge
+		s.updateEdges(PaddedCellFromCellID(cell, cellPadding), clippedEdges, t, disjointFromIndex)
+	}
+}
+
+// updateEdges adds or removes the given edges whose bounding boxes intersect a
+// given cell. disjointFromIndex is an optimization hint indicating that cellMap
+// does not contain any entries that overlap the given cell.
+func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tracker, disjointFromIndex bool) {
+	// This function is recursive with a maximum recursion depth of 30 (maxLevel).
+
+	// Incremental updates are handled as follows. All edges being added or
+	// removed are combined together in edges, and all shapes with interiors
+	// are tracked using tracker. We subdivide recursively as usual until we
+	// encounter an existing index cell. At this point we absorb the index
+	// cell as follows:
+	//
+	//   - Edges and shapes that are being removed are deleted from edges and
+	//     tracker.
+	//   - All remaining edges and shapes from the index cell are added to
+	//     edges and tracker.
+	//   - Continue subdividing recursively, creating new index cells as needed.
+	//   - When the recursion gets back to the cell that was absorbed, we
+	//     restore edges and tracker to their previous state.
+	//
+	// Note that the only reason that we include removed shapes in the recursive
+	// subdivision process is so that we can find all of the index cells that
+	// contain those shapes efficiently, without maintaining an explicit list of
+	// index cells for each shape (which would be expensive in terms of memory).
+	indexCellAbsorbed := false
+	if !disjointFromIndex {
+		// There may be existing index cells contained inside pcell. If we
+		// encounter such a cell, we need to combine the edges being updated with
+		// the existing cell contents by absorbing the cell.
+		iter := s.Iterator()
+		r := iter.LocateCellID(pcell.id)
+		if r == Disjoint {
+			disjointFromIndex = true
+		} else if r == Indexed {
+			// Absorb the index cell by transferring its contents to edges and
+			// deleting it. We also start tracking the interior of any new shapes.
+			s.absorbIndexCell(pcell, iter, edges, t)
+			indexCellAbsorbed = true
+			disjointFromIndex = true
+		} else {
+			// DCHECK_EQ(SUBDIVIDED, r)
+		}
+	}
+
+	// If there are existing index cells below us, then we need to keep
+	// subdividing so that we can merge with those cells. Otherwise,
+	// makeIndexCell checks if the number of edges is small enough, and creates
+	// an index cell if possible (returning true when it does so).
+	if !disjointFromIndex || !s.makeIndexCell(pcell, edges, t) {
+		// TODO(roberts): If it turns out to have memory problems when there
+		// are 10M+ edges in the index, look into pre-allocating space so we
+		// are not always appending.
+		childEdges := [2][2][]*clippedEdge{} // [i][j]
+
+		// Compute the middle of the padded cell, defined as the rectangle in
+		// (u,v)-space that belongs to all four (padded) children. By comparing
+		// against the four boundaries of middle we can determine which children
+		// each edge needs to be propagated to.
+		middle := pcell.Middle()
+
+		// Build up a vector edges to be passed to each child cell. The (i,j)
+		// directions are left (i=0), right (i=1), lower (j=0), and upper (j=1).
+		// Note that the vast majority of edges are propagated to a single child.
+		for _, edge := range edges {
+			if edge.bound.X.Hi <= middle.X.Lo {
+				// Edge is entirely contained in the two left children.
+				a, b := s.clipVAxis(edge, middle.Y)
+				if a != nil {
+					childEdges[0][0] = append(childEdges[0][0], a)
+				}
+				if b != nil {
+					childEdges[0][1] = append(childEdges[0][1], b)
+				}
+			} else if edge.bound.X.Lo >= middle.X.Hi {
+				// Edge is entirely contained in the two right children.
+				a, b := s.clipVAxis(edge, middle.Y)
+				if a != nil {
+					childEdges[1][0] = append(childEdges[1][0], a)
+				}
+				if b != nil {
+					childEdges[1][1] = append(childEdges[1][1], b)
+				}
+			} else if edge.bound.Y.Hi <= middle.Y.Lo {
+				// Edge is entirely contained in the two lower children.
+				if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil {
+					childEdges[0][0] = append(childEdges[0][0], a)
+				}
+				if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil {
+					childEdges[1][0] = append(childEdges[1][0], b)
+				}
+			} else if edge.bound.Y.Lo >= middle.Y.Hi {
+				// Edge is entirely contained in the two upper children.
+				if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil {
+					childEdges[0][1] = append(childEdges[0][1], a)
+				}
+				if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil {
+					childEdges[1][1] = append(childEdges[1][1], b)
+				}
+			} else {
+				// The edge bound spans all four children. The edge
+				// itself intersects either three or four padded children.
+				left := s.clipUBound(edge, 1, middle.X.Hi)
+				a, b := s.clipVAxis(left, middle.Y)
+				if a != nil {
+					childEdges[0][0] = append(childEdges[0][0], a)
+				}
+				if b != nil {
+					childEdges[0][1] = append(childEdges[0][1], b)
+				}
+				right := s.clipUBound(edge, 0, middle.X.Lo)
+				a, b = s.clipVAxis(right, middle.Y)
+				if a != nil {
+					childEdges[1][0] = append(childEdges[1][0], a)
+				}
+				if b != nil {
+					childEdges[1][1] = append(childEdges[1][1], b)
+				}
+			}
+		}
+
+		// Now recursively update the edges in each child. We call the children in
+		// increasing order of CellID so that when the index is first constructed,
+		// all insertions into cellMap are at the end (which is much faster).
+		for pos := 0; pos < 4; pos++ {
+			i, j := pcell.ChildIJ(pos)
+			if len(childEdges[i][j]) > 0 || len(t.shapeIDs) > 0 {
+				s.updateEdges(PaddedCellFromParentIJ(pcell, i, j), childEdges[i][j],
+					t, disjointFromIndex)
+			}
+		}
+	}
+
+	if indexCellAbsorbed {
+		// Restore the state for any edges being removed that we are tracking.
+		t.restoreStateBefore(s.pendingAdditionsPos)
+	}
+}
+
+// makeIndexCell builds an indexCell from the given padded cell and set of edges and adds
+// it to the index. If the cell or edges are empty, no cell is added.
+func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *tracker) bool {
+	// If the cell is empty, no index cell is needed. (In most cases this
+	// situation is detected before we get to this point, but this can happen
+	// when all shapes in a cell are removed.)
+	if len(edges) == 0 && len(t.shapeIDs) == 0 {
+		return true
+	}
+
+	// Count the number of edges that have not reached their maximum level yet.
+	// Return false if there are too many such edges.
+	count := 0
+	for _, ce := range edges {
+		if p.Level() < ce.faceEdge.maxLevel {
+			count++
+		}
+
+		if count > s.maxEdgesPerCell {
+			return false
+		}
+	}
+
+	// Possible optimization: Continue subdividing as long as exactly one child
+	// of the padded cell intersects the given edges. This can be done by finding
+	// the bounding box of all the edges and calling ShrinkToFit:
+	//
+	// cellID = p.ShrinkToFit(RectBound(edges));
+	//
+	// Currently this is not beneficial; it slows down construction by 4-25%
+	// (mainly computing the union of the bounding rectangles) and also slows
+	// down queries (since more recursive clipping is required to get down to
+	// the level of a spatial index cell). But it may be worth trying again
+	// once containsCenter is computed and all algorithms are modified to
+	// take advantage of it.
+
+	// We update the InteriorTracker as follows. For every Cell in the index
+	// we construct two edges: one edge from entry vertex of the cell to its
+	// center, and one from the cell center to its exit vertex. Here entry
+	// and exit refer the CellID ordering, i.e. the order in which points
+	// are encountered along the 2 space-filling curve. The exit vertex then
+	// becomes the entry vertex for the next cell in the index, unless there are
+	// one or more empty intervening cells, in which case the InteriorTracker
+	// state is unchanged because the intervening cells have no edges.
+
+	// Shift the InteriorTracker focus point to the center of the current cell.
+	if t.isActive && len(edges) != 0 {
+		if !t.atCellID(p.id) {
+			t.moveTo(p.EntryVertex())
+		}
+		t.drawTo(p.Center())
+		s.testAllEdges(edges, t)
+	}
+
+	// Allocate and fill a new index cell. To get the total number of shapes we
+	// need to merge the shapes associated with the intersecting edges together
+	// with the shapes that happen to contain the cell center.
+	cshapeIDs := t.shapeIDs
+	numShapes := s.countShapes(edges, cshapeIDs)
+	cell := NewShapeIndexCell(numShapes)
+
+	// To fill the index cell we merge the two sources of shapes: edge shapes
+	// (those that have at least one edge that intersects this cell), and
+	// containing shapes (those that contain the cell center). We keep track
+	// of the index of the next intersecting edge and the next containing shape
+	// as we go along. Both sets of shape ids are already sorted.
+	eNext := 0
+	cNextIdx := 0
+	for i := 0; i < numShapes; i++ {
+		var clipped *clippedShape
+		// advance to next value base + i
+		eshapeID := int32(s.Len())
+		cshapeID := int32(eshapeID) // Sentinels
+
+		if eNext != len(edges) {
+			eshapeID = edges[eNext].faceEdge.shapeID
+		}
+		if cNextIdx != len(cshapeIDs) {
+			cshapeID = cshapeIDs[cNextIdx]
+		}
+		eBegin := eNext
+		if cshapeID < eshapeID {
+			// The entire cell is in the shape interior.
+			clipped = newClippedShape(cshapeID, 0)
+			clipped.containsCenter = true
+			cNextIdx++
+		} else {
+			// Count the number of edges for this shape and allocate space for them.
+			for eNext < len(edges) && edges[eNext].faceEdge.shapeID == eshapeID {
+				eNext++
+			}
+			clipped = newClippedShape(eshapeID, eNext-eBegin)
+			for e := eBegin; e < eNext; e++ {
+				clipped.edges[e-eBegin] = edges[e].faceEdge.edgeID
+			}
+			if cshapeID == eshapeID {
+				clipped.containsCenter = true
+				cNextIdx++
+			}
+		}
+		cell.shapes[i] = clipped
+	}
+
+	// Add this cell to the map.
+	s.cellMap[p.id] = cell
+	s.cells = append(s.cells, p.id)
+
+	// Shift the tracker focus point to the exit vertex of this cell.
+	if t.isActive && len(edges) != 0 {
+		t.drawTo(p.ExitVertex())
+		s.testAllEdges(edges, t)
+		t.setNextCellID(p.id.Next())
+	}
+	return true
+}
+
+// updateBound updates the specified endpoint of the given clipped edge and returns the
+// resulting clipped edge.
+func (s *ShapeIndex) updateBound(edge *clippedEdge, uEnd int, u float64, vEnd int, v float64) *clippedEdge {
+	c := &clippedEdge{faceEdge: edge.faceEdge}
+	if uEnd == 0 {
+		c.bound.X.Lo = u
+		c.bound.X.Hi = edge.bound.X.Hi
+	} else {
+		c.bound.X.Lo = edge.bound.X.Lo
+		c.bound.X.Hi = u
+	}
+
+	if vEnd == 0 {
+		c.bound.Y.Lo = v
+		c.bound.Y.Hi = edge.bound.Y.Hi
+	} else {
+		c.bound.Y.Lo = edge.bound.Y.Lo
+		c.bound.Y.Hi = v
+	}
+
+	return c
+}
+
+// clipUBound clips the given endpoint (lo=0, hi=1) of the u-axis so that
+// it does not extend past the given value of the given edge.
+func (s *ShapeIndex) clipUBound(edge *clippedEdge, uEnd int, u float64) *clippedEdge {
+	// First check whether the edge actually requires any clipping. (Sometimes
+	// this method is called when clipping is not necessary, e.g. when one edge
+	// endpoint is in the overlap area between two padded child cells.)
+	if uEnd == 0 {
+		if edge.bound.X.Lo >= u {
+			return edge
+		}
+	} else {
+		if edge.bound.X.Hi <= u {
+			return edge
+		}
+	}
+	// We interpolate the new v-value from the endpoints of the original edge.
+	// This has two advantages: (1) we don't need to store the clipped endpoints
+	// at all, just their bounding box; and (2) it avoids the accumulation of
+	// roundoff errors due to repeated interpolations. The result needs to be
+	// clamped to ensure that it is in the appropriate range.
+	e := edge.faceEdge
+	v := edge.bound.Y.ClampPoint(interpolateFloat64(u, e.a.X, e.b.X, e.a.Y, e.b.Y))
+
+	// Determine which endpoint of the v-axis bound to update. If the edge
+	// slope is positive we update the same endpoint, otherwise we update the
+	// opposite endpoint.
+	var vEnd int
+	positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y)
+	if (uEnd == 1) == positiveSlope {
+		vEnd = 1
+	}
+	return s.updateBound(edge, uEnd, u, vEnd, v)
+}
+
+// clipVBound clips the given endpoint (lo=0, hi=1) of the v-axis so that
+// it does not extend past the given value of the given edge.
+func (s *ShapeIndex) clipVBound(edge *clippedEdge, vEnd int, v float64) *clippedEdge {
+	if vEnd == 0 {
+		if edge.bound.Y.Lo >= v {
+			return edge
+		}
+	} else {
+		if edge.bound.Y.Hi <= v {
+			return edge
+		}
+	}
+
+	// We interpolate the new v-value from the endpoints of the original edge.
+	// This has two advantages: (1) we don't need to store the clipped endpoints
+	// at all, just their bounding box; and (2) it avoids the accumulation of
+	// roundoff errors due to repeated interpolations. The result needs to be
+	// clamped to ensure that it is in the appropriate range.
+	e := edge.faceEdge
+	u := edge.bound.X.ClampPoint(interpolateFloat64(v, e.a.Y, e.b.Y, e.a.X, e.b.X))
+
+	// Determine which endpoint of the v-axis bound to update. If the edge
+	// slope is positive we update the same endpoint, otherwise we update the
+	// opposite endpoint.
+	var uEnd int
+	positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y)
+	if (vEnd == 1) == positiveSlope {
+		uEnd = 1
+	}
+	return s.updateBound(edge, uEnd, u, vEnd, v)
+}
+
+// cliupVAxis returns the given edge clipped to within the boundaries of the middle
+// interval along the v-axis, and adds the result to its children.
+func (s *ShapeIndex) clipVAxis(edge *clippedEdge, middle r1.Interval) (a, b *clippedEdge) {
+	if edge.bound.Y.Hi <= middle.Lo {
+		// Edge is entirely contained in the lower child.
+		return edge, nil
+	} else if edge.bound.Y.Lo >= middle.Hi {
+		// Edge is entirely contained in the upper child.
+		return nil, edge
+	}
+	// The edge bound spans both children.
+	return s.clipVBound(edge, 1, middle.Hi), s.clipVBound(edge, 0, middle.Lo)
+}
+
+// absorbIndexCell absorbs an index cell by transferring its contents to edges
+// and/or "tracker", and then delete this cell from the index. If edges includes
+// any edges that are being removed, this method also updates their
+// InteriorTracker state to correspond to the exit vertex of this cell.
+func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, edges []*clippedEdge, t *tracker) {
+	// When we absorb a cell, we erase all the edges that are being removed.
+	// However when we are finished with this cell, we want to restore the state
+	// of those edges (since that is how we find all the index cells that need
+	// to be updated).  The edges themselves are restored automatically when
+	// UpdateEdges returns from its recursive call, but the InteriorTracker
+	// state needs to be restored explicitly.
+	//
+	// Here we first update the InteriorTracker state for removed edges to
+	// correspond to the exit vertex of this cell, and then save the
+	// InteriorTracker state.  This state will be restored by UpdateEdges when
+	// it is finished processing the contents of this cell.
+	if t.isActive && len(edges) != 0 && s.isShapeBeingRemoved(edges[0].faceEdge.shapeID) {
+		// We probably need to update the tracker. ("Probably" because
+		// it's possible that all shapes being removed do not have interiors.)
+		if !t.atCellID(p.id) {
+			t.moveTo(p.EntryVertex())
+		}
+		t.drawTo(p.ExitVertex())
+		t.setNextCellID(p.id.Next())
+		for _, edge := range edges {
+			fe := edge.faceEdge
+			if !s.isShapeBeingRemoved(fe.shapeID) {
+				break // All shapes being removed come first.
+			}
+			if fe.hasInterior {
+				t.testEdge(fe.shapeID, fe.edge)
+			}
+		}
+	}
+
+	// Save the state of the edges being removed, so that it can be restored
+	// when we are finished processing this cell and its children.  We don't
+	// need to save the state of the edges being added because they aren't being
+	// removed from "edges" and will therefore be updated normally as we visit
+	// this cell and its children.
+	t.saveAndClearStateBefore(s.pendingAdditionsPos)
+
+	// Create a faceEdge for each edge in this cell that isn't being removed.
+	var faceEdges []*faceEdge
+	trackerMoved := false
+
+	cell := iter.IndexCell()
+	for _, clipped := range cell.shapes {
+		shapeID := clipped.shapeID
+		shape := s.Shape(shapeID)
+		if shape == nil {
+			continue // This shape is being removed.
+		}
+
+		numClipped := clipped.numEdges()
+
+		// If this shape has an interior, start tracking whether we are inside the
+		// shape. updateEdges wants to know whether the entry vertex of this
+		// cell is inside the shape, but we only know whether the center of the
+		// cell is inside the shape, so we need to test all the edges against the
+		// line segment from the cell center to the entry vertex.
+		edge := &faceEdge{
+			shapeID:     shapeID,
+			hasInterior: shape.HasInterior(),
+		}
+
+		if edge.hasInterior {
+			t.addShape(shapeID, clipped.containsCenter)
+			// There might not be any edges in this entire cell (i.e., it might be
+			// in the interior of all shapes), so we delay updating the tracker
+			// until we see the first edge.
+			if !trackerMoved && numClipped > 0 {
+				t.moveTo(p.Center())
+				t.drawTo(p.EntryVertex())
+				t.setNextCellID(p.id)
+				trackerMoved = true
+			}
+		}
+		for i := 0; i < numClipped; i++ {
+			edgeID := clipped.edges[i]
+			edge.edgeID = edgeID
+			edge.edge = shape.Edge(edgeID)
+			edge.maxLevel = maxLevelForEdge(edge.edge)
+			if edge.hasInterior {
+				t.testEdge(shapeID, edge.edge)
+			}
+			var ok bool
+			edge.a, edge.b, ok = ClipToPaddedFace(edge.edge.V0, edge.edge.V1, p.id.Face(), cellPadding)
+			if !ok {
+				panic("invariant failure in ShapeIndex")
+			}
+			faceEdges = append(faceEdges, edge)
+		}
+	}
+	// Now create a clippedEdge for each faceEdge, and put them in "new_edges".
+	var newEdges []*clippedEdge
+	for _, faceEdge := range faceEdges {
+		clipped := &clippedEdge{
+			faceEdge: faceEdge,
+			bound:    clippedEdgeBound(faceEdge.a, faceEdge.b, p.bound),
+		}
+		newEdges = append(newEdges, clipped)
+	}
+
+	// Discard any edges from "edges" that are being removed, and append the
+	// remainder to "newEdges"  (This keeps the edges sorted by shape id.)
+	for i, clipped := range edges {
+		if !s.isShapeBeingRemoved(clipped.faceEdge.shapeID) {
+			newEdges = append(newEdges, edges[i:]...)
+			break
+		}
+	}
+
+	// Update the edge list and delete this cell from the index.
+	edges, newEdges = newEdges, edges
+	delete(s.cellMap, p.id)
+	// TODO(roberts): delete from s.Cells
+}
+
+// testAllEdges calls the trackers testEdge on all edges from shapes that have interiors.
+func (s *ShapeIndex) testAllEdges(edges []*clippedEdge, t *tracker) {
+	for _, edge := range edges {
+		if edge.faceEdge.hasInterior {
+			t.testEdge(edge.faceEdge.shapeID, edge.faceEdge.edge)
+		}
+	}
+}
+
+// countShapes reports the number of distinct shapes that are either associated with the
+// given edges, or that are currently stored in the InteriorTracker.
+func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
+	count := 0
+	lastShapeID := int32(-1)
+	cNext := int32(0)
+	for _, edge := range edges {
+		if edge.faceEdge.shapeID == lastShapeID {
+			continue
+		}
+
+		count++
+		lastShapeID = edge.faceEdge.shapeID
+
+		// Skip over any containing shapes up to and including this one,
+		// updating count as appropriate.
+		for ; cNext < int32(len(shapeIDs)); cNext++ {
+			if cNext > lastShapeID {
+				break
+			}
+			if cNext < lastShapeID {
+				count++
+			}
+		}
+	}
+
+	// Count any remaining containing shapes.
+	count += int(len(shapeIDs) - int(cNext))
+	return count
+}
+
+// maxLevelForEdge reports the maximum level for a given edge.
+func maxLevelForEdge(edge Edge) int {
+	// Compute the maximum cell size for which this edge is considered long.
+	// The calculation does not need to be perfectly accurate, so we use Norm
+	// rather than Angle for speed.
+	cellSize := edge.V0.Sub(edge.V1.Vector).Norm() * cellSizeToLongEdgeRatio
+	// Now return the first level encountered during subdivision where the
+	// average cell size is at most cellSize.
+	return AvgEdgeMetric.MinLevel(cellSize)
+}
+
+// removeShapeInternal does the actual work for removing a given shape from the index.
+func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) {
+	// TODO(roberts): finish the implementation of this.
+}
+
+// TODO(roberts): Differences from C++.
+// ShapeContainsPoint
+// FindContainingShapes

+ 0 - 84
vendor/github.com/golang/geo/s2/shapeindex_test.go

@@ -1,84 +0,0 @@
-/*
-Copyright 2016 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"testing"
-)
-
-// testShape is a minimal implementation of the Shape interface for use in testing
-// until such time as there are other s2 types that implement it.
-type testShape struct {
-	a, b  Point
-	edges int
-}
-
-func newTestShape() *testShape                { return &testShape{} }
-func (s *testShape) NumEdges() int            { return s.edges }
-func (s *testShape) Edge(id int) (a, b Point) { return s.a, s.b }
-func (s *testShape) dimension() dimension     { return pointGeometry }
-func (s *testShape) numChains() int           { return 0 }
-func (s *testShape) chainStart(i int) int     { return 0 }
-func (s *testShape) HasInterior() bool        { return false }
-func (s *testShape) ContainsOrigin() bool     { return false }
-
-func TestShapeIndexBasics(t *testing.T) {
-	si := NewShapeIndex()
-	s := newTestShape()
-
-	if si.Len() != 0 {
-		t.Errorf("initial index should be empty after creation")
-	}
-	si.Add(s)
-
-	if si.Len() == 0 {
-		t.Errorf("index should not be empty after adding shape")
-	}
-
-	si.Reset()
-	if si.Len() != 0 {
-		t.Errorf("index should be empty after reset")
-	}
-}
-
-func TestShapeIndexCellBasics(t *testing.T) {
-	s := &shapeIndexCell{}
-
-	if len(s.shapes) != 0 {
-		t.Errorf("len(s.shapes) = %v, want %d", len(s.shapes), 0)
-	}
-
-	// create some clipped shapes to add.
-	c1 := &clippedShape{}
-	s.add(c1)
-
-	c2 := newClippedShape(7, 1)
-	s.add(c2)
-
-	c3 := &clippedShape{}
-	s.add(c3)
-
-	// look up the element at a given index
-	if got := s.shapes[1]; got != c2 {
-		t.Errorf("%v.shapes[%d] = %v, want %v", s, 1, got, c2)
-	}
-
-	// look for the clipped shape that is part of the given shape.
-	if got := s.findByID(7); got != c2 {
-		t.Errorf("%v.findByID(%v) = %v, want %v", s, 7, got, c2)
-	}
-}

+ 227 - 0
vendor/github.com/golang/geo/s2/shapeutil.go

@@ -0,0 +1,227 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// CrossingType defines different ways of reporting edge intersections.
+type CrossingType int
+
+const (
+	// CrossingTypeInterior reports intersections that occur at a point
+	// interior to both edges (i.e., not at a vertex).
+	CrossingTypeInterior CrossingType = iota
+
+	// CrossingTypeAll reports all intersections, even those where two edges
+	// intersect only because they share a common vertex.
+	CrossingTypeAll
+
+	// CrossingTypeNonAdjacent reports all intersections except for pairs of
+	// the form (AB, BC) where both edges are from the same ShapeIndex.
+	CrossingTypeNonAdjacent
+)
+
+// rangeIterator is a wrapper over ShapeIndexIterator with extra methods
+// that are useful for merging the contents of two or more ShapeIndexes.
+type rangeIterator struct {
+	it *ShapeIndexIterator
+	// The min and max leaf cell ids covered by the current cell. If done() is
+	// true, these methods return a value larger than any valid cell id.
+	rangeMin CellID
+	rangeMax CellID
+}
+
+// newRangeIterator creates a new rangeIterator positioned at the first cell of the given index.
+func newRangeIterator(index *ShapeIndex) *rangeIterator {
+	r := &rangeIterator{
+		it: index.Iterator(),
+	}
+	r.refresh()
+	return r
+}
+
+func (r *rangeIterator) cellID() CellID             { return r.it.CellID() }
+func (r *rangeIterator) indexCell() *ShapeIndexCell { return r.it.IndexCell() }
+func (r *rangeIterator) next()                      { r.it.Next(); r.refresh() }
+func (r *rangeIterator) done() bool                 { return r.it.Done() }
+
+// seekTo positions the iterator at the first cell that overlaps or follows
+// the current range minimum of the target iterator, i.e. such that its
+// rangeMax >= target.rangeMin.
+func (r *rangeIterator) seekTo(target *rangeIterator) {
+	r.it.seek(target.rangeMin)
+	// If the current cell does not overlap target, it is possible that the
+	// previous cell is the one we are looking for. This can only happen when
+	// the previous cell contains target but has a smaller CellID.
+	if r.it.Done() || r.it.CellID().RangeMin() > target.rangeMax {
+		if r.it.Prev() && r.it.CellID().RangeMax() < target.cellID() {
+			r.it.Next()
+		}
+	}
+	r.refresh()
+}
+
+// seekBeyond positions the iterator at the first cell that follows the current
+// range minimum of the target iterator. i.e. the first cell such that its
+// rangeMin > target.rangeMax.
+func (r *rangeIterator) seekBeyond(target *rangeIterator) {
+	r.it.seek(target.rangeMax.Next())
+	if !r.it.Done() && r.it.CellID().RangeMin() <= target.rangeMax {
+		r.it.Next()
+	}
+	r.refresh()
+}
+
+// refresh updates the iterators min and max values.
+func (r *rangeIterator) refresh() {
+	r.rangeMin = r.cellID().RangeMin()
+	r.rangeMax = r.cellID().RangeMax()
+}
+
+// referencePointForShape is a helper function for implementing various Shapes
+// ReferencePoint functions.
+//
+// Given a shape consisting of closed polygonal loops, the interior of the
+// shape is defined as the region to the left of all edges (which must be
+// oriented consistently). This function then chooses an arbitrary point and
+// returns true if that point is contained by the shape.
+//
+// Unlike Loop and Polygon, this method allows duplicate vertices and
+// edges, which requires some extra care with definitions. The rule that we
+// apply is that an edge and its reverse edge cancel each other: the result
+// is the same as if that edge pair were not present. Therefore shapes that
+// consist only of degenerate loop(s) are either empty or full; by convention,
+// the shape is considered full if and only if it contains an empty loop (see
+// laxPolygon for details).
+//
+// Determining whether a loop on the sphere contains a point is harder than
+// the corresponding problem in 2D plane geometry. It cannot be implemented
+// just by counting edge crossings because there is no such thing as a point
+// at infinity that is guaranteed to be outside the loop.
+//
+// This function requires that the given Shape have an interior.
+func referencePointForShape(shape Shape) ReferencePoint {
+	if shape.NumEdges() == 0 {
+		// A shape with no edges is defined to be full if and only if it
+		// contains an empty loop.
+		return OriginReferencePoint(shape.NumChains() > 0)
+	}
+	// Define a "matched" edge as one that can be paired with a corresponding
+	// reversed edge. Define a vertex as "balanced" if all of its edges are
+	// matched. In order to determine containment, we must find an unbalanced
+	// vertex. Often every vertex is unbalanced, so we start by trying an
+	// arbitrary vertex.
+	edge := shape.Edge(0)
+
+	if ref, ok := referencePointAtVertex(shape, edge.V0); ok {
+		return ref
+	}
+
+	// That didn't work, so now we do some extra work to find an unbalanced
+	// vertex (if any). Essentially we gather a list of edges and a list of
+	// reversed edges, and then sort them. The first edge that appears in one
+	// list but not the other is guaranteed to be unmatched.
+	n := shape.NumEdges()
+	var edges = make([]Edge, n)
+	var revEdges = make([]Edge, n)
+	for i := 0; i < n; i++ {
+		edge := shape.Edge(i)
+		edges[i] = edge
+		revEdges[i] = Edge{V0: edge.V1, V1: edge.V0}
+	}
+
+	sortEdges(edges)
+	sortEdges(revEdges)
+
+	for i := 0; i < n; i++ {
+		if edges[i].Cmp(revEdges[i]) == -1 { // edges[i] is unmatched
+			if ref, ok := referencePointAtVertex(shape, edges[i].V0); ok {
+				return ref
+			}
+		}
+		if revEdges[i].Cmp(edges[i]) == -1 { // revEdges[i] is unmatched
+			if ref, ok := referencePointAtVertex(shape, revEdges[i].V0); ok {
+				return ref
+			}
+		}
+	}
+
+	// All vertices are balanced, so this polygon is either empty or full. By
+	// convention it is defined to be full if it contains any empty loop.
+	for i := 0; i < shape.NumChains(); i++ {
+		if shape.Chain(i).Length == 0 {
+			return OriginReferencePoint(true)
+		}
+	}
+
+	return OriginReferencePoint(false)
+}
+
+// referencePointAtVertex reports whether the given vertex is unbalanced, and
+// returns a ReferencePoint indicating if the point is contained.
+// Otherwise returns false.
+func referencePointAtVertex(shape Shape, vTest Point) (ReferencePoint, bool) {
+	var ref ReferencePoint
+
+	// Let P be an unbalanced vertex. Vertex P is defined to be inside the
+	// region if the region contains a particular direction vector starting from
+	// P, namely the direction p.Ortho(). This can be calculated using
+	// ContainsVertexQuery.
+
+	containsQuery := NewContainsVertexQuery(vTest)
+	n := shape.NumEdges()
+	for e := 0; e < n; e++ {
+		edge := shape.Edge(e)
+		if edge.V0 == vTest {
+			containsQuery.AddEdge(edge.V1, 1)
+		}
+		if edge.V1 == vTest {
+			containsQuery.AddEdge(edge.V0, -1)
+		}
+	}
+	containsSign := containsQuery.ContainsVertex()
+	if containsSign == 0 {
+		return ref, false // There are no unmatched edges incident to this vertex.
+	}
+	ref.Point = vTest
+	ref.Contained = containsSign > 0
+
+	return ref, true
+}
+
+// containsBruteForce reports whether the given shape contains the given point.
+// Most clients should not use this method, since its running time is linear in
+// the number of shape edges. Instead clients should create a ShapeIndex and use
+// ContainsPointQuery, since this strategy is much more efficient when many
+// points need to be tested.
+//
+// Polygon boundaries are treated as being semi-open (see ContainsPointQuery
+// and VertexModel for other options).
+func containsBruteForce(shape Shape, point Point) bool {
+	if !shape.HasInterior() {
+		return false
+	}
+
+	refPoint := shape.ReferencePoint()
+	if refPoint.Point == point {
+		return refPoint.Contained
+	}
+
+	crosser := NewEdgeCrosser(refPoint.Point, point)
+	inside := refPoint.Contained
+	for e := 0; e < shape.NumEdges(); e++ {
+		edge := shape.Edge(e)
+		inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1)
+	}
+	return inside
+}

+ 155 - 38
vendor/github.com/golang/geo/s2/stuv.go

@@ -1,18 +1,16 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
 
 
 package s2
 package s2
 
 
@@ -22,15 +20,139 @@ import (
 	"github.com/golang/geo/r3"
 	"github.com/golang/geo/r3"
 )
 )
 
 
+//
+// This file contains documentation of the various coordinate systems used
+// throughout the library. Most importantly, S2 defines a framework for
+// decomposing the unit sphere into a hierarchy of "cells". Each cell is a
+// quadrilateral bounded by four geodesics. The top level of the hierarchy is
+// obtained by projecting the six faces of a cube onto the unit sphere, and
+// lower levels are obtained by subdividing each cell into four children
+// recursively. Cells are numbered such that sequentially increasing cells
+// follow a continuous space-filling curve over the entire sphere. The
+// transformation is designed to make the cells at each level fairly uniform
+// in size.
+//
+////////////////////////// S2 Cell Decomposition /////////////////////////
+//
+// The following methods define the cube-to-sphere projection used by
+// the Cell decomposition.
+//
+// In the process of converting a latitude-longitude pair to a 64-bit cell
+// id, the following coordinate systems are used:
+//
+//  (id)
+//    An CellID is a 64-bit encoding of a face and a Hilbert curve position
+//    on that face. The Hilbert curve position implicitly encodes both the
+//    position of a cell and its subdivision level (see s2cellid.go).
+//
+//  (face, i, j)
+//    Leaf-cell coordinates. "i" and "j" are integers in the range
+//    [0,(2**30)-1] that identify a particular leaf cell on the given face.
+//    The (i, j) coordinate system is right-handed on each face, and the
+//    faces are oriented such that Hilbert curves connect continuously from
+//    one face to the next.
+//
+//  (face, s, t)
+//    Cell-space coordinates. "s" and "t" are real numbers in the range
+//    [0,1] that identify a point on the given face. For example, the point
+//    (s, t) = (0.5, 0.5) corresponds to the center of the top-level face
+//    cell. This point is also a vertex of exactly four cells at each
+//    subdivision level greater than zero.
+//
+//  (face, si, ti)
+//    Discrete cell-space coordinates. These are obtained by multiplying
+//    "s" and "t" by 2**31 and rounding to the nearest unsigned integer.
+//    Discrete coordinates lie in the range [0,2**31]. This coordinate
+//    system can represent the edge and center positions of all cells with
+//    no loss of precision (including non-leaf cells). In binary, each
+//    coordinate of a level-k cell center ends with a 1 followed by
+//    (30 - k) 0s. The coordinates of its edges end with (at least)
+//    (31 - k) 0s.
+//
+//  (face, u, v)
+//    Cube-space coordinates in the range [-1,1]. To make the cells at each
+//    level more uniform in size after they are projected onto the sphere,
+//    we apply a nonlinear transformation of the form u=f(s), v=f(t).
+//    The (u, v) coordinates after this transformation give the actual
+//    coordinates on the cube face (modulo some 90 degree rotations) before
+//    it is projected onto the unit sphere.
+//
+//  (face, u, v, w)
+//    Per-face coordinate frame. This is an extension of the (face, u, v)
+//    cube-space coordinates that adds a third axis "w" in the direction of
+//    the face normal. It is always a right-handed 3D coordinate system.
+//    Cube-space coordinates can be converted to this frame by setting w=1,
+//    while (u,v,w) coordinates can be projected onto the cube face by
+//    dividing by w, i.e. (face, u/w, v/w).
+//
+//  (x, y, z)
+//    Direction vector (Point). Direction vectors are not necessarily unit
+//    length, and are often chosen to be points on the biunit cube
+//    [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the
+//    corresponding point on the unit sphere.
+//
+//  (lat, lng)
+//    Latitude and longitude (LatLng). Latitudes must be between -90 and
+//    90 degrees inclusive, and longitudes must be between -180 and 180
+//    degrees inclusive.
+//
+// Note that the (i, j), (s, t), (si, ti), and (u, v) coordinate systems are
+// right-handed on all six faces.
+//
+//
+// There are a number of different projections from cell-space (s,t) to
+// cube-space (u,v): linear, quadratic, and tangent. They have the following
+// tradeoffs:
+//
+//   Linear - This is the fastest transformation, but also produces the least
+//   uniform cell sizes. Cell areas vary by a factor of about 5.2, with the
+//   largest cells at the center of each face and the smallest cells in
+//   the corners.
+//
+//   Tangent - Transforming the coordinates via Atan makes the cell sizes
+//   more uniform. The areas vary by a maximum ratio of 1.4 as opposed to a
+//   maximum ratio of 5.2. However, each call to Atan is about as expensive
+//   as all of the other calculations combined when converting from points to
+//   cell ids, i.e. it reduces performance by a factor of 3.
+//
+//   Quadratic - This is an approximation of the tangent projection that
+//   is much faster and produces cells that are almost as uniform in size.
+//   It is about 3 times faster than the tangent projection for converting
+//   cell ids to points or vice versa. Cell areas vary by a maximum ratio of
+//   about 2.1.
+//
+// Here is a table comparing the cell uniformity using each projection. Area
+// Ratio is the maximum ratio over all subdivision levels of the largest cell
+// area to the smallest cell area at that level, Edge Ratio is the maximum
+// ratio of the longest edge of any cell to the shortest edge of any cell at
+// the same level, and Diag Ratio is the ratio of the longest diagonal of
+// any cell to the shortest diagonal of any cell at the same level.
+//
+//               Area    Edge    Diag
+//              Ratio   Ratio   Ratio
+// -----------------------------------
+// Linear:      5.200   2.117   2.959
+// Tangent:     1.414   1.414   1.704
+// Quadratic:   2.082   1.802   1.932
+//
+// The worst-case cell aspect ratios are about the same with all three
+// projections. The maximum ratio of the longest edge to the shortest edge
+// within the same cell is about 1.4 and the maximum ratio of the diagonals
+// within the same cell is about 1.7.
+//
+// For Go we have chosen to use only the Quadratic approach. Other language
+// implementations may offer other choices.
+
 const (
 const (
 	// maxSiTi is the maximum value of an si- or ti-coordinate.
 	// maxSiTi is the maximum value of an si- or ti-coordinate.
-	// It is one shift more than maxSize.
+	// It is one shift more than maxSize. The range of valid (si,ti)
+	// values is [0..maxSiTi].
 	maxSiTi = maxSize << 1
 	maxSiTi = maxSize << 1
 )
 )
 
 
 // siTiToST converts an si- or ti-value to the corresponding s- or t-value.
 // siTiToST converts an si- or ti-value to the corresponding s- or t-value.
 // Value is capped at 1.0 because there is no DCHECK in Go.
 // Value is capped at 1.0 because there is no DCHECK in Go.
-func siTiToST(si uint64) float64 {
+func siTiToST(si uint32) float64 {
 	if si > maxSiTi {
 	if si > maxSiTi {
 		return 1.0
 		return 1.0
 	}
 	}
@@ -40,11 +162,11 @@ func siTiToST(si uint64) float64 {
 // stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate.
 // stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate.
 // The result may be outside the range of valid (si,ti)-values. Value of
 // The result may be outside the range of valid (si,ti)-values. Value of
 // 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up.
 // 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up.
-func stToSiTi(s float64) uint64 {
+func stToSiTi(s float64) uint32 {
 	if s < 0 {
 	if s < 0 {
-		return uint64(s*maxSiTi - 0.5)
+		return uint32(s*maxSiTi - 0.5)
 	}
 	}
-	return uint64(s*maxSiTi + 0.5)
+	return uint32(s*maxSiTi + 0.5)
 }
 }
 
 
 // stToUV converts an s or t value to the corresponding u or v value.
 // stToUV converts an s or t value to the corresponding u or v value.
@@ -71,21 +193,16 @@ func uvToST(u float64) float64 {
 // face returns face ID from 0 to 5 containing the r. For points on the
 // face returns face ID from 0 to 5 containing the r. For points on the
 // boundary between faces, the result is arbitrary but deterministic.
 // boundary between faces, the result is arbitrary but deterministic.
 func face(r r3.Vector) int {
 func face(r r3.Vector) int {
-	abs := r.Abs()
-	id := 0
-	value := r.X
-	if abs.Y > abs.X {
-		id = 1
-		value = r.Y
-	}
-	if abs.Z > math.Abs(value) {
-		id = 2
-		value = r.Z
-	}
-	if value < 0 {
-		id += 3
+	f := r.LargestComponent()
+	switch {
+	case f == r3.XAxis && r.X < 0:
+		f += 3
+	case f == r3.YAxis && r.Y < 0:
+		f += 3
+	case f == r3.ZAxis && r.Z < 0:
+		f += 3
 	}
 	}
-	return id
+	return int(f)
 }
 }
 
 
 // validFaceXYZToUV given a valid face for the given point r (meaning that
 // validFaceXYZToUV given a valid face for the given point r (meaning that
@@ -190,13 +307,13 @@ func faceXYZtoUVW(face int, p Point) Point {
 
 
 // faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily
 // faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily
 // unit length) Point on the given face.
 // unit length) Point on the given face.
-func faceSiTiToXYZ(face int, si, ti uint64) Point {
+func faceSiTiToXYZ(face int, si, ti uint32) Point {
 	return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))}
 	return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))}
 }
 }
 
 
 // xyzToFaceSiTi transforms the (not necessarily unit length) Point to
 // xyzToFaceSiTi transforms the (not necessarily unit length) Point to
 // (face, si, ti) coordinates and the level the Point is at.
 // (face, si, ti) coordinates and the level the Point is at.
-func xyzToFaceSiTi(p Point) (face int, si, ti uint64, level int) {
+func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) {
 	face, u, v := xyzToFaceUV(p.Vector)
 	face, u, v := xyzToFaceUV(p.Vector)
 	si = stToSiTi(uvToST(u))
 	si = stToSiTi(uvToST(u))
 	ti = stToSiTi(uvToST(v))
 	ti = stToSiTi(uvToST(v))
@@ -205,8 +322,8 @@ func xyzToFaceSiTi(p Point) (face int, si, ti uint64, level int) {
 	// center. The si,ti values of 0 and maxSiTi need to be handled specially
 	// center. The si,ti values of 0 and maxSiTi need to be handled specially
 	// because they do not correspond to cell centers at any valid level; they
 	// because they do not correspond to cell centers at any valid level; they
 	// are mapped to level -1 by the code at the end.
 	// are mapped to level -1 by the code at the end.
-	level = maxLevel - findLSBSetNonZero64(si|maxSiTi)
-	if level < 0 || level != maxLevel-findLSBSetNonZero64(ti|maxSiTi) {
+	level = maxLevel - findLSBSetNonZero64(uint64(si|maxSiTi))
+	if level < 0 || level != maxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) {
 		return face, si, ti, -1
 		return face, si, ti, -1
 	}
 	}
 
 

+ 0 - 321
vendor/github.com/golang/geo/s2/stuv_test.go

@@ -1,321 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package s2
-
-import (
-	"math"
-	"testing"
-
-	"github.com/golang/geo/r3"
-)
-
-func TestSTUV(t *testing.T) {
-	if x := stToUV(uvToST(.125)); x != .125 {
-		t.Error("stToUV(uvToST(.125) == ", x)
-	}
-	if x := uvToST(stToUV(.125)); x != .125 {
-		t.Error("uvToST(stToUV(.125) == ", x)
-	}
-}
-
-func TestUVNorms(t *testing.T) {
-	step := 1 / 1024.0
-	for face := 0; face < 6; face++ {
-		for x := -1.0; x <= 1; x += step {
-			if !float64Eq(float64(faceUVToXYZ(face, x, -1).Cross(faceUVToXYZ(face, x, 1)).Angle(uNorm(face, x))), 0.0) {
-				t.Errorf("UNorm not orthogonal to the face(%d)", face)
-			}
-			if !float64Eq(float64(faceUVToXYZ(face, -1, x).Cross(faceUVToXYZ(face, 1, x)).Angle(vNorm(face, x))), 0.0) {
-				t.Errorf("VNorm not orthogonal to the face(%d)", face)
-			}
-		}
-	}
-}
-
-func TestFaceUVToXYZ(t *testing.T) {
-	// Check that each face appears exactly once.
-	var sum r3.Vector
-	for face := 0; face < 6; face++ {
-		center := faceUVToXYZ(face, 0, 0)
-		if !center.ApproxEqual(unitNorm(face).Vector) {
-			t.Errorf("faceUVToXYZ(%d, 0, 0) != unitNorm(%d), should be equal", face, face)
-		}
-		switch center.LargestComponent() {
-		case r3.XAxis:
-			if math.Abs(center.X) != 1 {
-				t.Errorf("%v.X = %v, want %v", center, math.Abs(center.X), 1)
-			}
-		case r3.YAxis:
-			if math.Abs(center.Y) != 1 {
-				t.Errorf("%v.Y = %v, want %v", center, math.Abs(center.Y), 1)
-			}
-		default:
-			if math.Abs(center.Z) != 1 {
-				t.Errorf("%v.Z = %v, want %v", center, math.Abs(center.Z), 1)
-			}
-		}
-		sum = sum.Add(center.Abs())
-
-		// Check that each face has a right-handed coordinate system.
-		if got := uAxis(face).Vector.Cross(vAxis(face).Vector).Dot(unitNorm(face).Vector); got != 1 {
-			t.Errorf("right-handed check failed. uAxis(%d).Cross(vAxis(%d)).Dot(unitNorm%v) = %d, want 1", face, face, face, got)
-		}
-
-		// Check that the Hilbert curves on each face combine to form a
-		// continuous curve over the entire cube.
-		// The Hilbert curve on each face starts at (-1,-1) and terminates
-		// at either (1,-1) (if axes not swapped) or (-1,1) (if swapped).
-		var sign float64 = 1
-		if face&swapMask == 1 {
-			sign = -1
-		}
-		if faceUVToXYZ(face, sign, -sign) != faceUVToXYZ((face+1)%6, -1, -1) {
-			t.Errorf("faceUVToXYZ(%v, %v, %v) != faceUVToXYZ(%v, -1, -1)", face, sign, -sign, (face+1)%6)
-		}
-	}
-
-	// Adding up the absolute value all all the face normals should equal 2 on each axis.
-	if !sum.ApproxEqual(r3.Vector{2, 2, 2}) {
-		t.Errorf("sum of the abs of the 6 face norms should = %v, got %v", r3.Vector{2, 2, 2}, sum)
-	}
-}
-
-func TestFaceXYZToUV(t *testing.T) {
-	var (
-		point    = Point{r3.Vector{1.1, 1.2, 1.3}}
-		pointNeg = Point{r3.Vector{-1.1, -1.2, -1.3}}
-	)
-
-	tests := []struct {
-		face  int
-		point Point
-		u     float64
-		v     float64
-		ok    bool
-	}{
-		{0, point, 1 + (1.0 / 11), 1 + (2.0 / 11), true},
-		{0, pointNeg, 0, 0, false},
-		{1, point, -11.0 / 12, 1 + (1.0 / 12), true},
-		{1, pointNeg, 0, 0, false},
-		{2, point, -11.0 / 13, -12.0 / 13, true},
-		{2, pointNeg, 0, 0, false},
-		{3, point, 0, 0, false},
-		{3, pointNeg, 1 + (2.0 / 11), 1 + (1.0 / 11), true},
-		{4, point, 0, 0, false},
-		{4, pointNeg, 1 + (1.0 / 12), -(11.0 / 12), true},
-		{5, point, 0, 0, false},
-		{5, pointNeg, -12.0 / 13, -11.0 / 13, true},
-	}
-
-	for _, test := range tests {
-		if u, v, ok := faceXYZToUV(test.face, test.point); !float64Eq(u, test.u) || !float64Eq(v, test.v) || ok != test.ok {
-			t.Errorf("faceXYZToUV(%d, %v) = %f, %f, %t, want %f, %f, %t", test.face, test.point, u, v, ok, test.u, test.v, test.ok)
-		}
-	}
-}
-
-func TestFaceXYZtoUVW(t *testing.T) {
-	var (
-		origin = Point{r3.Vector{0, 0, 0}}
-		posX   = Point{r3.Vector{1, 0, 0}}
-		negX   = Point{r3.Vector{-1, 0, 0}}
-		posY   = Point{r3.Vector{0, 1, 0}}
-		negY   = Point{r3.Vector{0, -1, 0}}
-		posZ   = Point{r3.Vector{0, 0, 1}}
-		negZ   = Point{r3.Vector{0, 0, -1}}
-	)
-
-	for face := 0; face < 6; face++ {
-		if got := faceXYZtoUVW(face, origin); got != origin {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, origin, got, origin)
-		}
-
-		if got := faceXYZtoUVW(face, uAxis(face)); got != posX {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, uAxis(face), got, posX)
-		}
-
-		if got := faceXYZtoUVW(face, Point{uAxis(face).Mul(-1)}); got != negX {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, uAxis(face).Mul(-1), got, negX)
-		}
-
-		if got := faceXYZtoUVW(face, vAxis(face)); got != posY {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, vAxis(face), got, posY)
-		}
-
-		if got := faceXYZtoUVW(face, Point{vAxis(face).Mul(-1)}); got != negY {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, vAxis(face).Mul(-1), got, negY)
-		}
-
-		if got := faceXYZtoUVW(face, unitNorm(face)); got != posZ {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, unitNorm(face), got, posZ)
-		}
-
-		if got := faceXYZtoUVW(face, Point{unitNorm(face).Mul(-1)}); got != negZ {
-			t.Errorf("faceXYZtoUVW(%d, %v) = %v, want %v", face, unitNorm(face).Mul(-1), got, negZ)
-		}
-	}
-}
-
-func TestUVWAxis(t *testing.T) {
-	for face := 0; face < 6; face++ {
-		// Check that the axes are consistent with faceUVtoXYZ.
-		if faceUVToXYZ(face, 1, 0).Sub(faceUVToXYZ(face, 0, 0)) != uAxis(face).Vector {
-			t.Errorf("face 1,0 - face 0,0 should equal uAxis")
-		}
-		if faceUVToXYZ(face, 0, 1).Sub(faceUVToXYZ(face, 0, 0)) != vAxis(face).Vector {
-			t.Errorf("faceUVToXYZ(%d, 0, 1).Sub(faceUVToXYZ(%d, 0, 0)) != vAxis(%d), should be equal.", face, face, face)
-		}
-		if faceUVToXYZ(face, 0, 0) != unitNorm(face).Vector {
-			t.Errorf("faceUVToXYZ(%d, 0, 0) != unitNorm(%d), should be equal", face, face)
-		}
-
-		// Check that every face coordinate frame is right-handed.
-		if got := uAxis(face).Vector.Cross(vAxis(face).Vector).Dot(unitNorm(face).Vector); got != 1 {
-			t.Errorf("right-handed check failed. got %d, want 1", got)
-		}
-
-		// Check that GetUVWAxis is consistent with GetUAxis, GetVAxis, GetNorm.
-		if uAxis(face) != uvwAxis(face, 0) {
-			t.Errorf("uAxis(%d) != uvwAxis(%d, 0), should be equal", face, face)
-		}
-		if vAxis(face) != uvwAxis(face, 1) {
-			t.Errorf("vAxis(%d) != uvwAxis(%d, 1), should be equal", face, face)
-		}
-		if unitNorm(face) != uvwAxis(face, 2) {
-			t.Errorf("unitNorm(%d) != uvwAxis(%d, 2), should be equal", face, face)
-		}
-	}
-}
-
-func TestSiTiSTRoundtrip(t *testing.T) {
-	// test int -> float -> int direction.
-	for i := 0; i < 1000; i++ {
-		si := uint64(randomUniformInt(maxSiTi))
-		if got := stToSiTi(siTiToST(si)); got != si {
-			t.Errorf("stToSiTi(siTiToST(%v)) = %v, want %v", si, got, si)
-		}
-	}
-	// test float -> int -> float direction.
-	for i := 0; i < 1000; i++ {
-		st := randomUniformFloat64(0, 1.0)
-		// this uses near not exact because there is some loss in precision
-		// when scaling down to the nearest 1/maxLevel and back.
-		if got := siTiToST(stToSiTi(st)); !float64Near(got, st, 1e-8) {
-			t.Errorf("siTiToST(stToSiTi(%v)) = %v, want %v", st, got, st)
-		}
-	}
-}
-
-func TestUVWFace(t *testing.T) {
-	// Check that uvwFace is consistent with uvwAxis.
-	for f := 0; f < 6; f++ {
-		for axis := 0; axis < 3; axis++ {
-			if got, want := face(uvwAxis(f, axis).Mul(-1)), uvwFace(f, axis, 0); got != want {
-				t.Errorf("face(%v) in positive direction = %v, want %v", uvwAxis(f, axis).Mul(-1), got, want)
-			}
-			if got, want := face(uvwAxis(f, axis).Vector), uvwFace(f, axis, 1); got != want {
-				t.Errorf("face(%v) in negative direction = %v, want %v", uvwAxis(f, axis), got, want)
-			}
-		}
-	}
-}
-
-func TestXYZToFaceSiTi(t *testing.T) {
-	for level := 0; level < maxLevel; level++ {
-		for i := 0; i < 1000; i++ {
-			ci := randomCellIDForLevel(level)
-			f, si, ti, gotLevel := xyzToFaceSiTi(ci.Point())
-			if gotLevel != level {
-				t.Errorf("level of CellID %v = %v, want %v", ci, gotLevel, level)
-			}
-			gotID := cellIDFromFaceIJ(f, int(si/2), int(ti/2)).Parent(level)
-			if gotID != ci {
-				t.Errorf("CellID = %b, want %b", gotID, ci)
-			}
-
-			// Test a point near the cell center but not equal to it.
-			pMoved := ci.Point().Add(r3.Vector{1e-13, 1e-13, 1e-13})
-			fMoved, siMoved, tiMoved, gotLevel := xyzToFaceSiTi(Point{pMoved})
-
-			if gotLevel != -1 {
-				t.Errorf("level of %v = %v, want %v", pMoved, gotLevel, -1)
-			}
-
-			if f != fMoved {
-				t.Errorf("face of %v = %v, want %v", pMoved, fMoved, f)
-			}
-
-			if si != siMoved {
-				t.Errorf("si of %v = %v, want %v", pMoved, siMoved, si)
-			}
-
-			if ti != tiMoved {
-				t.Errorf("ti of %v = %v, want %v", pMoved, tiMoved, ti)
-			}
-
-			// Finally, test some random (si,ti) values that may be at different
-			// levels, or not at a valid level at all (for example, si == 0).
-			faceRandom := randomUniformInt(numFaces)
-			mask := -1 << uint64(maxLevel-level)
-			siRandom := uint64(randomUint32() & uint32(mask))
-			tiRandom := uint64(randomUint32() & uint32(mask))
-			for siRandom > maxSiTi || tiRandom > maxSiTi {
-				siRandom = uint64(randomUint32() & uint32(mask))
-				tiRandom = uint64(randomUint32() & uint32(mask))
-			}
-
-			pRandom := faceSiTiToXYZ(faceRandom, siRandom, tiRandom)
-			f, si, ti, gotLevel = xyzToFaceSiTi(pRandom)
-
-			// The chosen point is on the edge of a top-level face cell.
-			if f != faceRandom {
-				if gotLevel != -1 {
-					t.Errorf("level of random CellID = %v, want %v", gotLevel, -1)
-				}
-				if !(si == 0 || si == maxSiTi || ti == 0 || ti == maxSiTi) {
-					t.Errorf("face %d, si = %v, ti = %v, want 0 or %v for both", f, si, ti, maxSiTi)
-				}
-				continue
-			}
-
-			if siRandom != si {
-				t.Errorf("xyzToFaceSiTi(%v).si = %v, want %v", pRandom, siRandom, si)
-			}
-			if tiRandom != ti {
-				t.Errorf("xyzToFaceSiTi(%v).ti = %v, want %v", pRandom, tiRandom, ti)
-			}
-			if gotLevel >= 0 {
-				if got := cellIDFromFaceIJ(f, int(si/2), int(ti/2)).Parent(gotLevel).Point(); !pRandom.ApproxEqual(got) {
-					t.Errorf("cellIDFromFaceIJ(%d, %d, %d).Parent(%d) = %v, want %v", f, si/2, ti/2, gotLevel, got, pRandom)
-				}
-			}
-		}
-	}
-}
-
-func TestXYZFaceSiTiRoundtrip(t *testing.T) {
-	for level := 0; level < maxLevel; level++ {
-		for i := 0; i < 1000; i++ {
-			ci := randomCellIDForLevel(level)
-			f, si, ti, _ := xyzToFaceSiTi(ci.Point())
-			op := faceSiTiToXYZ(f, si, ti)
-			if !ci.Point().ApproxEqual(op) {
-				t.Errorf("faceSiTiToXYZ(xyzToFaceSiTi(%v)) = %v, want %v", ci.Point(), op, ci.Point())
-			}
-		}
-	}
-}

+ 125 - 0
vendor/github.com/golang/geo/s2/util.go

@@ -0,0 +1,125 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import "github.com/golang/geo/s1"
+
+// roundAngle returns the value rounded to nearest as an int32.
+// This does not match C++ exactly for the case of x.5.
+func roundAngle(val s1.Angle) int32 {
+	if val < 0 {
+		return int32(val - 0.5)
+	}
+	return int32(val + 0.5)
+}
+
+// minAngle returns the smallest of the given values.
+func minAngle(x s1.Angle, others ...s1.Angle) s1.Angle {
+	min := x
+	for _, y := range others {
+		if y < min {
+			min = y
+		}
+	}
+	return min
+}
+
+// maxAngle returns the largest of the given values.
+func maxAngle(x s1.Angle, others ...s1.Angle) s1.Angle {
+	max := x
+	for _, y := range others {
+		if y > max {
+			max = y
+		}
+	}
+	return max
+}
+
+// minChordAngle returns the smallest of the given values.
+func minChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle {
+	min := x
+	for _, y := range others {
+		if y < min {
+			min = y
+		}
+	}
+	return min
+}
+
+// maxChordAngle returns the largest of the given values.
+func maxChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle {
+	max := x
+	for _, y := range others {
+		if y > max {
+			max = y
+		}
+	}
+	return max
+}
+
+// minFloat64 returns the smallest of the given values.
+func minFloat64(x float64, others ...float64) float64 {
+	min := x
+	for _, y := range others {
+		if y < min {
+			min = y
+		}
+	}
+	return min
+}
+
+// maxFloat64 returns the largest of the given values.
+func maxFloat64(x float64, others ...float64) float64 {
+	max := x
+	for _, y := range others {
+		if y > max {
+			max = y
+		}
+	}
+	return max
+}
+
+// minInt returns the smallest of the given values.
+func minInt(x int, others ...int) int {
+	min := x
+	for _, y := range others {
+		if y < min {
+			min = y
+		}
+	}
+	return min
+}
+
+// maxInt returns the largest of the given values.
+func maxInt(x int, others ...int) int {
+	max := x
+	for _, y := range others {
+		if y > max {
+			max = y
+		}
+	}
+	return max
+}
+
+// clampInt returns the number closest to x within the range min..max.
+func clampInt(x, min, max int) int {
+	if x < min {
+		return min
+	}
+	if x > max {
+		return max
+	}
+	return x
+}

+ 97 - 0
vendor/github.com/golang/geo/s2/wedge_relations.go

@@ -0,0 +1,97 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// WedgeRel enumerates the possible relation between two wedges A and B.
+type WedgeRel int
+
+// Define the different possible relationships between two wedges.
+//
+// Given an edge chain (x0, x1, x2), the wedge at x1 is the region to the
+// left of the edges. More precisely, it is the set of all rays from x1x0
+// (inclusive) to x1x2 (exclusive) in the *clockwise* direction.
+const (
+	WedgeEquals              WedgeRel = iota // A and B are equal.
+	WedgeProperlyContains                    // A is a strict superset of B.
+	WedgeIsProperlyContained                 // A is a strict subset of B.
+	WedgeProperlyOverlaps                    // A-B, B-A, and A intersect B are non-empty.
+	WedgeIsDisjoint                          // A and B are disjoint.
+)
+
+// WedgeRelation reports the relation between two non-empty wedges
+// A=(a0, ab1, a2) and B=(b0, ab1, b2).
+func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel {
+	// There are 6 possible edge orderings at a shared vertex (all
+	// of these orderings are circular, i.e. abcd == bcda):
+	//
+	//  (1) a2 b2 b0 a0: A contains B
+	//  (2) a2 a0 b0 b2: B contains A
+	//  (3) a2 a0 b2 b0: A and B are disjoint
+	//  (4) a2 b0 a0 b2: A and B intersect in one wedge
+	//  (5) a2 b2 a0 b0: A and B intersect in one wedge
+	//  (6) a2 b0 b2 a0: A and B intersect in two wedges
+	//
+	// We do not distinguish between 4, 5, and 6.
+	// We pay extra attention when some of the edges overlap.  When edges
+	// overlap, several of these orderings can be satisfied, and we take
+	// the most specific.
+	if a0 == b0 && a2 == b2 {
+		return WedgeEquals
+	}
+
+	// Cases 1, 2, 5, and 6
+	if OrderedCCW(a0, a2, b2, ab1) {
+		// The cases with this vertex ordering are 1, 5, and 6,
+		if OrderedCCW(b2, b0, a0, ab1) {
+			return WedgeProperlyContains
+		}
+
+		// We are in case 5 or 6, or case 2 if a2 == b2.
+		if a2 == b2 {
+			return WedgeIsProperlyContained
+		}
+		return WedgeProperlyOverlaps
+
+	}
+	// We are in case 2, 3, or 4.
+	if OrderedCCW(a0, b0, b2, ab1) {
+		return WedgeIsProperlyContained
+	}
+
+	if OrderedCCW(a0, b0, a2, ab1) {
+		return WedgeIsDisjoint
+	}
+	return WedgeProperlyOverlaps
+}
+
+// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2).
+// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals.
+func WedgeContains(a0, ab1, a2, b0, b2 Point) bool {
+	// For A to contain B (where each loop interior is defined to be its left
+	// side), the CCW edge order around ab1 must be a2 b2 b0 a0.  We split
+	// this test into two parts that test three vertices each.
+	return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1)
+}
+
+// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2).
+// Equivalent but faster than WedgeRelation != WedgeIsDisjoint
+func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool {
+	// For A not to intersect B (where each loop interior is defined to be
+	// its left side), the CCW edge order around ab1 must be a0 b2 b0 a2.
+	// Note that it's important to write these conditions as negatives
+	// (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct
+	// results when two vertices are the same.
+	return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1))
+}

+ 0 - 3
vendor/github.com/hpcloud/tail/.gitignore

@@ -1,3 +0,0 @@
-.test
-.go
-

+ 0 - 18
vendor/github.com/hpcloud/tail/.travis.yml

@@ -1,18 +0,0 @@
-language: go
-
-script:
-  - go test -race -v ./...
-
-go:
-  - 1.4
-  - 1.5
-  - 1.6
-  - tip
-
-matrix:
-  allow_failures:
-    - go: tip
-
-install:
-  - go get gopkg.in/fsnotify.v1
-  - go get gopkg.in/tomb.v1

+ 0 - 63
vendor/github.com/hpcloud/tail/CHANGES.md

@@ -1,63 +0,0 @@
-# API v1 (gopkg.in/hpcloud/tail.v1)
-
-## April, 2016
-
-* Migrated to godep, as depman is not longer supported
-* Introduced golang vendoring feature
-* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file 
-
-## July, 2015
-
-* Fix inotify watcher leak; remove `Cleanup` (#51)
-
-# API v0 (gopkg.in/hpcloud/tail.v0)
-
-## June, 2015
-
-* Don't return partial lines (PR #40)
-* Use stable version of fsnotify (#46)
-
-## July, 2014
-
-* Fix tail for Windows (PR #36)
-
-## May, 2014
-
-* Improved rate limiting using leaky bucket (PR #29)
-* Fix odd line splitting (PR #30)
-
-## Apr, 2014
-
-* LimitRate now discards read buffer (PR #28)
-* allow reading of longer lines if MaxLineSize is unset (PR #24)
-* updated deps.json to latest fsnotify (441bbc86b1)
-
-## Feb, 2014
-
-* added `Config.Logger` to suppress library logging
-
-## Nov, 2013
-
-* add Cleanup to remove leaky inotify watches (PR #20)
-
-## Aug, 2013
-
-* redesigned Location field (PR #12)
-* add tail.Tell (PR #14)
-
-## July, 2013
-
-* Rate limiting (PR #10)
-
-## May, 2013
-
-* Detect file deletions/renames in polling file watcher (PR #1)
-* Detect file truncation
-* Fix potential race condition when reopening the file (issue 5)
-* Fix potential blocking of `tail.Stop` (issue 4)
-* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
-* Support Follow=false
-
-## Feb, 2013
-
-* Initial open source release

+ 0 - 19
vendor/github.com/hpcloud/tail/Dockerfile

@@ -1,19 +0,0 @@
-FROM golang
-
-RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
-ADD . $GOPATH/src/github.com/hpcloud/tail/
-
-# expecting to fetch dependencies successfully.
-RUN go get -v github.com/hpcloud/tail
-
-# expecting to run the test successfully.
-RUN go test -v github.com/hpcloud/tail
-
-# expecting to install successfully
-RUN go install -v github.com/hpcloud/tail
-RUN go install -v github.com/hpcloud/tail/cmd/gotail
-
-RUN $GOPATH/bin/gotail -h || true
-
-ENV PATH $GOPATH/bin:$PATH
-CMD ["gotail"]

+ 0 - 15
vendor/github.com/hpcloud/tail/Godeps/Godeps.json

@@ -1,15 +0,0 @@
-{
-	"ImportPath": "github.com/hpcloud/tail",
-	"GoVersion": "go1.5.1",
-	"Deps": [
-		{
-			"ImportPath": "gopkg.in/fsnotify.v1",
-			"Comment": "v1.2.1",
-			"Rev": "7be54206639f256967dd82fa767397ba5f8f48f5"
-		},
-		{
-			"ImportPath": "gopkg.in/tomb.v1",
-			"Rev": "c131134a1947e9afd9cecfe11f4c6dff0732ae58"
-		}
-	]
-}

+ 0 - 5
vendor/github.com/hpcloud/tail/Godeps/Readme

@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.

Some files were not shown because too many files changed in this diff