Browse Source

Remove vendor dir (yay go modules)

Ask Bjørn Hansen 4 years ago
parent
commit
b0f106fc0f
100 changed files with 0 additions and 25517 deletions
  1. 0 37
      vendor/github.com/abh/errorutil/README.md
  2. 0 58
      vendor/github.com/abh/errorutil/highlight.go
  3. 0 20
      vendor/github.com/beorn7/perks/LICENSE
  4. 0 2388
      vendor/github.com/beorn7/perks/quantile/exampledata.txt
  5. 0 316
      vendor/github.com/beorn7/perks/quantile/stream.go
  6. 0 8
      vendor/github.com/cespare/xxhash/v2/.travis.yml
  7. 0 22
      vendor/github.com/cespare/xxhash/v2/LICENSE.txt
  8. 0 67
      vendor/github.com/cespare/xxhash/v2/README.md
  9. 0 3
      vendor/github.com/cespare/xxhash/v2/go.mod
  10. 0 0
      vendor/github.com/cespare/xxhash/v2/go.sum
  11. 0 236
      vendor/github.com/cespare/xxhash/v2/xxhash.go
  12. 0 13
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
  13. 0 215
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
  14. 0 76
      vendor/github.com/cespare/xxhash/v2/xxhash_other.go
  15. 0 15
      vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
  16. 0 46
      vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
  17. 0 15
      vendor/github.com/davecgh/go-spew/LICENSE
  18. 0 145
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  19. 0 38
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  20. 0 341
      vendor/github.com/davecgh/go-spew/spew/common.go
  21. 0 306
      vendor/github.com/davecgh/go-spew/spew/config.go
  22. 0 211
      vendor/github.com/davecgh/go-spew/spew/doc.go
  23. 0 509
      vendor/github.com/davecgh/go-spew/spew/dump.go
  24. 0 419
      vendor/github.com/davecgh/go-spew/spew/format.go
  25. 0 148
      vendor/github.com/davecgh/go-spew/spew/spew.go
  26. 0 12
      vendor/github.com/fsnotify/fsnotify/.editorconfig
  27. 0 1
      vendor/github.com/fsnotify/fsnotify/.gitattributes
  28. 0 6
      vendor/github.com/fsnotify/fsnotify/.gitignore
  29. 0 36
      vendor/github.com/fsnotify/fsnotify/.travis.yml
  30. 0 52
      vendor/github.com/fsnotify/fsnotify/AUTHORS
  31. 0 317
      vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
  32. 0 77
      vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
  33. 0 28
      vendor/github.com/fsnotify/fsnotify/LICENSE
  34. 0 130
      vendor/github.com/fsnotify/fsnotify/README.md
  35. 0 37
      vendor/github.com/fsnotify/fsnotify/fen.go
  36. 0 68
      vendor/github.com/fsnotify/fsnotify/fsnotify.go
  37. 0 5
      vendor/github.com/fsnotify/fsnotify/go.mod
  38. 0 2
      vendor/github.com/fsnotify/fsnotify/go.sum
  39. 0 337
      vendor/github.com/fsnotify/fsnotify/inotify.go
  40. 0 187
      vendor/github.com/fsnotify/fsnotify/inotify_poller.go
  41. 0 521
      vendor/github.com/fsnotify/fsnotify/kqueue.go
  42. 0 11
      vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
  43. 0 12
      vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
  44. 0 561
      vendor/github.com/fsnotify/fsnotify/windows.go
  45. 0 202
      vendor/github.com/golang/geo/LICENSE
  46. 0 20
      vendor/github.com/golang/geo/r1/doc.go
  47. 0 177
      vendor/github.com/golang/geo/r1/interval.go
  48. 0 20
      vendor/github.com/golang/geo/r2/doc.go
  49. 0 255
      vendor/github.com/golang/geo/r2/rect.go
  50. 0 20
      vendor/github.com/golang/geo/r3/doc.go
  51. 0 198
      vendor/github.com/golang/geo/r3/precisevector.go
  52. 0 183
      vendor/github.com/golang/geo/r3/vector.go
  53. 0 120
      vendor/github.com/golang/geo/s1/angle.go
  54. 0 250
      vendor/github.com/golang/geo/s1/chordangle.go
  55. 0 20
      vendor/github.com/golang/geo/s1/doc.go
  56. 0 462
      vendor/github.com/golang/geo/s1/interval.go
  57. 0 53
      vendor/github.com/golang/geo/s2/bits_go18.go
  58. 0 39
      vendor/github.com/golang/geo/s2/bits_go19.go
  59. 0 519
      vendor/github.com/golang/geo/s2/cap.go
  60. 0 698
      vendor/github.com/golang/geo/s2/cell.go
  61. 0 942
      vendor/github.com/golang/geo/s2/cellid.go
  62. 0 590
      vendor/github.com/golang/geo/s2/cellunion.go
  63. 0 133
      vendor/github.com/golang/geo/s2/centroids.go
  64. 0 190
      vendor/github.com/golang/geo/s2/contains_point_query.go
  65. 0 63
      vendor/github.com/golang/geo/s2/contains_vertex_query.go
  66. 0 239
      vendor/github.com/golang/geo/s2/convex_hull_query.go
  67. 0 409
      vendor/github.com/golang/geo/s2/crossing_edge_query.go
  68. 0 149
      vendor/github.com/golang/geo/s2/distance_target.go
  69. 0 29
      vendor/github.com/golang/geo/s2/doc.go
  70. 0 672
      vendor/github.com/golang/geo/s2/edge_clipping.go
  71. 0 227
      vendor/github.com/golang/geo/s2/edge_crosser.go
  72. 0 396
      vendor/github.com/golang/geo/s2/edge_crossings.go
  73. 0 408
      vendor/github.com/golang/geo/s2/edge_distances.go
  74. 0 512
      vendor/github.com/golang/geo/s2/edge_query.go
  75. 0 167
      vendor/github.com/golang/geo/s2/edge_tessellator.go
  76. 0 237
      vendor/github.com/golang/geo/s2/encode.go
  77. 0 143
      vendor/github.com/golang/geo/s2/interleave.go
  78. 0 101
      vendor/github.com/golang/geo/s2/latlng.go
  79. 0 175
      vendor/github.com/golang/geo/s2/lexicon.go
  80. 0 1816
      vendor/github.com/golang/geo/s2/loop.go
  81. 0 127
      vendor/github.com/golang/geo/s2/matrix3x3.go
  82. 0 306
      vendor/github.com/golang/geo/s2/max_distance_targets.go
  83. 0 164
      vendor/github.com/golang/geo/s2/metric.go
  84. 0 362
      vendor/github.com/golang/geo/s2/min_distance_targets.go
  85. 0 88
      vendor/github.com/golang/geo/s2/nthderivative.go
  86. 0 252
      vendor/github.com/golang/geo/s2/paddedcell.go
  87. 0 258
      vendor/github.com/golang/geo/s2/point.go
  88. 0 149
      vendor/github.com/golang/geo/s2/point_measures.go
  89. 0 42
      vendor/github.com/golang/geo/s2/point_vector.go
  90. 0 319
      vendor/github.com/golang/geo/s2/pointcompression.go
  91. 0 1212
      vendor/github.com/golang/geo/s2/polygon.go
  92. 0 589
      vendor/github.com/golang/geo/s2/polyline.go
  93. 0 53
      vendor/github.com/golang/geo/s2/polyline_measures.go
  94. 0 701
      vendor/github.com/golang/geo/s2/predicates.go
  95. 0 203
      vendor/github.com/golang/geo/s2/projections.go
  96. 0 196
      vendor/github.com/golang/geo/s2/query_options.go
  97. 0 710
      vendor/github.com/golang/geo/s2/rect.go
  98. 0 352
      vendor/github.com/golang/geo/s2/rect_bounder.go
  99. 0 71
      vendor/github.com/golang/geo/s2/region.go
  100. 0 477
      vendor/github.com/golang/geo/s2/regioncoverer.go

+ 0 - 37
vendor/github.com/abh/errorutil/README.md

@@ -1,37 +0,0 @@
-errorutil
-=========
-
-Errorutil is a small go package to help show syntax errors in for example JSON documents.
-
-It was forked from [Camlistore](http://camlistore.org) to make a smaller dependency.
-
-
-Example
--------
-
-An example of how to use the package to show errors when decoding with
-[encoding/json](http://golang.org/pkg/encoding/json/).
-
-    if err = decoder.Decode(&objmap); err != nil {
-            extra := ""
-
-            // if it's a syntax error, add more information
-            if serr, ok := err.(*json.SyntaxError); ok {
-                    if _, serr := fh.Seek(0, os.SEEK_SET); serr != nil {
-                            log.Fatalf("seek error: %v", serr)
-                    }
-                    line, col, highlight := errorutil.HighlightBytePosition(fh, serr.Offset)
-                    extra = fmt.Sprintf(":\nError at line %d, column %d (file offset %d):\n%s",
-                            line, col, serr.Offset, highlight)
-            }
-
-            return nil, fmt.Errorf("error parsing JSON object in config file %s%s\n%v",
-                    fh.Name(), extra, err)
-    }
-
-
-License
--------
-
-This package is licesed under the Apache License, version 2.0. It was developed
-by Brad Fitzpatrick as part of the Camlistore project.

+ 0 - 58
vendor/github.com/abh/errorutil/highlight.go

@@ -1,58 +0,0 @@
-/*
-Copyright 2011 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package errorutil helps make better error messages.
-package errorutil
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-)
-
-// HighlightBytePosition takes a reader and the location in bytes of a parse
-// error (for instance, from json.SyntaxError.Offset) and returns the line, column,
-// and pretty-printed context around the error with an arrow indicating the exact
-// position of the syntax error.
-func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) {
-	line = 1
-	br := bufio.NewReader(f)
-	lastLine := ""
-	thisLine := new(bytes.Buffer)
-	for n := int64(0); n < pos; n++ {
-		b, err := br.ReadByte()
-		if err != nil {
-			break
-		}
-		if b == '\n' {
-			lastLine = thisLine.String()
-			thisLine.Reset()
-			line++
-			col = 1
-		} else {
-			col++
-			thisLine.WriteByte(b)
-		}
-	}
-	if line > 1 {
-		highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine)
-	}
-	highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String())
-	highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5))
-	return
-}

+ 0 - 20
vendor/github.com/beorn7/perks/LICENSE

@@ -1,20 +0,0 @@
-Copyright (C) 2013 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt

@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4

+ 0 - 316
vendor/github.com/beorn7/perks/quantile/stream.go

@@ -1,316 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
-	"math"
-	"sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
-	Value float64 `json:",string"`
-	Width float64 `json:",string"`
-	Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int           { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
-	ƒ := func(s *stream, r float64) float64 {
-		return 2 * epsilon * r
-	}
-	return newStream(ƒ)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
-	ƒ := func(s *stream, r float64) float64 {
-		return 2 * epsilon * (s.n - r)
-	}
-	return newStream(ƒ)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targetMap map[float64]float64) *Stream {
-	// Convert map to slice to avoid slow iterations on a map.
-	// ƒ is called on the hot path, so converting the map to a slice
-	// beforehand results in significant CPU savings.
-	targets := targetMapToSlice(targetMap)
-
-	ƒ := func(s *stream, r float64) float64 {
-		var m = math.MaxFloat64
-		var f float64
-		for _, t := range targets {
-			if t.quantile*s.n <= r {
-				f = (2 * t.epsilon * r) / t.quantile
-			} else {
-				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
-			}
-			if f < m {
-				m = f
-			}
-		}
-		return m
-	}
-	return newStream(ƒ)
-}
-
-type target struct {
-	quantile float64
-	epsilon  float64
-}
-
-func targetMapToSlice(targetMap map[float64]float64) []target {
-	targets := make([]target, 0, len(targetMap))
-
-	for quantile, epsilon := range targetMap {
-		t := target{
-			quantile: quantile,
-			epsilon:  epsilon,
-		}
-		targets = append(targets, t)
-	}
-
-	return targets
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
-	*stream
-	b      Samples
-	sorted bool
-}
-
-func newStream(ƒ invariant) *Stream {
-	x := &stream{ƒ: ƒ}
-	return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
-	s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
-	s.b = append(s.b, sample)
-	s.sorted = false
-	if len(s.b) == cap(s.b) {
-		s.flush()
-	}
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
-	if !s.flushed() {
-		// Fast path when there hasn't been enough data for a flush;
-		// this also yields better accuracy for small sets of data.
-		l := len(s.b)
-		if l == 0 {
-			return 0
-		}
-		i := int(math.Ceil(float64(l) * q))
-		if i > 0 {
-			i -= 1
-		}
-		s.maybeSort()
-		return s.b[i].Value
-	}
-	s.flush()
-	return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
-	sort.Sort(samples)
-	s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
-	s.stream.reset()
-	s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
-	if !s.flushed() {
-		return s.b
-	}
-	s.flush()
-	return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
-	return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
-	s.maybeSort()
-	s.stream.merge(s.b)
-	s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
-	if !s.sorted {
-		s.sorted = true
-		sort.Sort(s.b)
-	}
-}
-
-func (s *Stream) flushed() bool {
-	return len(s.stream.l) > 0
-}
-
-type stream struct {
-	n float64
-	l []Sample
-	ƒ invariant
-}
-
-func (s *stream) reset() {
-	s.l = s.l[:0]
-	s.n = 0
-}
-
-func (s *stream) insert(v float64) {
-	s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
-	// TODO(beorn7): This tries to merge not only individual samples, but
-	// whole summaries. The paper doesn't mention merging summaries at
-	// all. Unittests show that the merging is inaccurate. Find out how to
-	// do merges properly.
-	var r float64
-	i := 0
-	for _, sample := range samples {
-		for ; i < len(s.l); i++ {
-			c := s.l[i]
-			if c.Value > sample.Value {
-				// Insert at position i.
-				s.l = append(s.l, Sample{})
-				copy(s.l[i+1:], s.l[i:])
-				s.l[i] = Sample{
-					sample.Value,
-					sample.Width,
-					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
-					// TODO(beorn7): How to calculate delta correctly?
-				}
-				i++
-				goto inserted
-			}
-			r += c.Width
-		}
-		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
-		i++
-	inserted:
-		s.n += sample.Width
-		r += sample.Width
-	}
-	s.compress()
-}
-
-func (s *stream) count() int {
-	return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
-	t := math.Ceil(q * s.n)
-	t += math.Ceil(s.ƒ(s, t) / 2)
-	p := s.l[0]
-	var r float64
-	for _, c := range s.l[1:] {
-		r += p.Width
-		if r+c.Width+c.Delta > t {
-			return p.Value
-		}
-		p = c
-	}
-	return p.Value
-}
-
-func (s *stream) compress() {
-	if len(s.l) < 2 {
-		return
-	}
-	x := s.l[len(s.l)-1]
-	xi := len(s.l) - 1
-	r := s.n - 1 - x.Width
-
-	for i := len(s.l) - 2; i >= 0; i-- {
-		c := s.l[i]
-		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
-			x.Width += c.Width
-			s.l[xi] = x
-			// Remove element at i.
-			copy(s.l[i:], s.l[i+1:])
-			s.l = s.l[:len(s.l)-1]
-			xi -= 1
-		} else {
-			x = c
-			xi = i
-		}
-		r -= c.Width
-	}
-}
-
-func (s *stream) samples() Samples {
-	samples := make(Samples, len(s.l))
-	copy(samples, s.l)
-	return samples
-}

+ 0 - 8
vendor/github.com/cespare/xxhash/v2/.travis.yml

@@ -1,8 +0,0 @@
-language: go
-go:
-  - "1.x"
-  - master
-env:
-  - TAGS=""
-  - TAGS="-tags purego"
-script: go test $TAGS -v ./...

+ 0 - 22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt

@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 67
vendor/github.com/cespare/xxhash/v2/README.md

@@ -1,67 +0,0 @@
-# xxhash
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-This package provides a straightforward API:
-
-```
-func Sum64(b []byte) uint64
-func Sum64String(s string) uint64
-type Digest struct{ ... }
-    func New() *Digest
-```
-
-The `Digest` type implements hash.Hash64. Its key methods are:
-
-```
-func (*Digest) Write([]byte) (int, error)
-func (*Digest) WriteString(string) (int, error)
-func (*Digest) Sum64() uint64
-```
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Compatibility
-
-This package is in a module and the latest code is in version 2 of the module.
-You need a version of Go with at least "minimal module compatibility" to use
-github.com/cespare/xxhash/v2:
-
-* 1.9.7+ for Go 1.9
-* 1.10.3+ for Go 1.10
-* Go 1.11 or later
-
-I recommend using the latest release of Go.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64.
-
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
-| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
-
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
-
-```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
-- [FreeCache](https://github.com/coocood/freecache)

+ 0 - 3
vendor/github.com/cespare/xxhash/v2/go.mod

@@ -1,3 +0,0 @@
-module github.com/cespare/xxhash/v2
-
-go 1.11

+ 0 - 0
vendor/github.com/cespare/xxhash/v2/go.sum


+ 0 - 236
vendor/github.com/cespare/xxhash/v2/xxhash.go

@@ -1,236 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
-	"encoding/binary"
-	"errors"
-	"math/bits"
-)
-
-const (
-	prime1 uint64 = 11400714785074694791
-	prime2 uint64 = 14029467366897019727
-	prime3 uint64 = 1609587929392839161
-	prime4 uint64 = 9650029242287828579
-	prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
-	prime1v = prime1
-	prime2v = prime2
-	prime3v = prime3
-	prime4v = prime4
-	prime5v = prime5
-)
-
-// Digest implements hash.Hash64.
-type Digest struct {
-	v1    uint64
-	v2    uint64
-	v3    uint64
-	v4    uint64
-	total uint64
-	mem   [32]byte
-	n     int // how much of mem is used
-}
-
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
-func New() *Digest {
-	var d Digest
-	d.Reset()
-	return &d
-}
-
-// Reset clears the Digest's state so that it can be reused.
-func (d *Digest) Reset() {
-	d.v1 = prime1v + prime2
-	d.v2 = prime2
-	d.v3 = 0
-	d.v4 = -prime1v
-	d.total = 0
-	d.n = 0
-}
-
-// Size always returns 8 bytes.
-func (d *Digest) Size() int { return 8 }
-
-// BlockSize always returns 32 bytes.
-func (d *Digest) BlockSize() int { return 32 }
-
-// Write adds more data to d. It always returns len(b), nil.
-func (d *Digest) Write(b []byte) (n int, err error) {
-	n = len(b)
-	d.total += uint64(n)
-
-	if d.n+n < 32 {
-		// This new data doesn't even fill the current block.
-		copy(d.mem[d.n:], b)
-		d.n += n
-		return
-	}
-
-	if d.n > 0 {
-		// Finish off the partial block.
-		copy(d.mem[d.n:], b)
-		d.v1 = round(d.v1, u64(d.mem[0:8]))
-		d.v2 = round(d.v2, u64(d.mem[8:16]))
-		d.v3 = round(d.v3, u64(d.mem[16:24]))
-		d.v4 = round(d.v4, u64(d.mem[24:32]))
-		b = b[32-d.n:]
-		d.n = 0
-	}
-
-	if len(b) >= 32 {
-		// One or more full blocks left.
-		nw := writeBlocks(d, b)
-		b = b[nw:]
-	}
-
-	// Store any remaining partial block.
-	copy(d.mem[:], b)
-	d.n = len(b)
-
-	return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-func (d *Digest) Sum(b []byte) []byte {
-	s := d.Sum64()
-	return append(
-		b,
-		byte(s>>56),
-		byte(s>>48),
-		byte(s>>40),
-		byte(s>>32),
-		byte(s>>24),
-		byte(s>>16),
-		byte(s>>8),
-		byte(s),
-	)
-}
-
-// Sum64 returns the current hash.
-func (d *Digest) Sum64() uint64 {
-	var h uint64
-
-	if d.total >= 32 {
-		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
-		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-		h = mergeRound(h, v1)
-		h = mergeRound(h, v2)
-		h = mergeRound(h, v3)
-		h = mergeRound(h, v4)
-	} else {
-		h = d.v3 + prime5
-	}
-
-	h += d.total
-
-	i, end := 0, d.n
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(d.mem[i:i+8]))
-		h ^= k1
-		h = rol27(h)*prime1 + prime4
-	}
-	if i+4 <= end {
-		h ^= uint64(u32(d.mem[i:i+4])) * prime1
-		h = rol23(h)*prime2 + prime3
-		i += 4
-	}
-	for i < end {
-		h ^= uint64(d.mem[i]) * prime5
-		h = rol11(h) * prime1
-		i++
-	}
-
-	h ^= h >> 33
-	h *= prime2
-	h ^= h >> 29
-	h *= prime3
-	h ^= h >> 32
-
-	return h
-}
-
-const (
-	magic         = "xxh\x06"
-	marshaledSize = len(magic) + 8*5 + 32
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (d *Digest) MarshalBinary() ([]byte, error) {
-	b := make([]byte, 0, marshaledSize)
-	b = append(b, magic...)
-	b = appendUint64(b, d.v1)
-	b = appendUint64(b, d.v2)
-	b = appendUint64(b, d.v3)
-	b = appendUint64(b, d.v4)
-	b = appendUint64(b, d.total)
-	b = append(b, d.mem[:d.n]...)
-	b = b[:len(b)+len(d.mem)-d.n]
-	return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (d *Digest) UnmarshalBinary(b []byte) error {
-	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
-		return errors.New("xxhash: invalid hash state identifier")
-	}
-	if len(b) != marshaledSize {
-		return errors.New("xxhash: invalid hash state size")
-	}
-	b = b[len(magic):]
-	b, d.v1 = consumeUint64(b)
-	b, d.v2 = consumeUint64(b)
-	b, d.v3 = consumeUint64(b)
-	b, d.v4 = consumeUint64(b)
-	b, d.total = consumeUint64(b)
-	copy(d.mem[:], b)
-	b = b[len(d.mem):]
-	d.n = int(d.total % uint64(len(d.mem)))
-	return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
-	var a [8]byte
-	binary.LittleEndian.PutUint64(a[:], x)
-	return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
-	x := u64(b)
-	return b[8:], x
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
-	acc += input * prime2
-	acc = rol31(acc)
-	acc *= prime1
-	return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
-	val = round(0, val)
-	acc ^= val
-	acc = acc*prime1 + prime4
-	return acc
-}
-
-func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

+ 0 - 13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go

@@ -1,13 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-//go:noescape
-func writeBlocks(d *Digest, b []byte) int

+ 0 - 215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s

@@ -1,215 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX	h
-// CX	pointer to advance through b
-// DX	n
-// BX	loop end
-// R8	v1, k1
-// R9	v2
-// R10	v3
-// R11	v4
-// R12	tmp
-// R13	prime1v
-// R14	prime2v
-// R15	prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
-	MOVQ  (CX), R12 \
-	ADDQ  $8, CX    \
-	IMULQ R14, R12  \
-	ADDQ  R12, r    \
-	ROLQ  $31, r    \
-	IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
-	IMULQ R14, val \
-	ROLQ  $31, val \
-	IMULQ R13, val \
-	XORQ  val, acc \
-	IMULQ R13, acc \
-	ADDQ  R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
-	// Load fixed primes.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
-	MOVQ ·prime4v(SB), R15
-
-	// Load slice.
-	MOVQ b_base+0(FP), CX
-	MOVQ b_len+8(FP), DX
-	LEAQ (CX)(DX*1), BX
-
-	// The first loop limit will be len(b)-32.
-	SUBQ $32, BX
-
-	// Check whether we have at least one block.
-	CMPQ DX, $32
-	JLT  noBlocks
-
-	// Set up initial state (v1, v2, v3, v4).
-	MOVQ R13, R8
-	ADDQ R14, R8
-	MOVQ R14, R9
-	XORQ R10, R10
-	XORQ R11, R11
-	SUBQ R13, R11
-
-	// Loop until CX > BX.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ CX, BX
-	JLE  blockLoop
-
-	MOVQ R8, AX
-	ROLQ $1, AX
-	MOVQ R9, R12
-	ROLQ $7, R12
-	ADDQ R12, AX
-	MOVQ R10, R12
-	ROLQ $12, R12
-	ADDQ R12, AX
-	MOVQ R11, R12
-	ROLQ $18, R12
-	ADDQ R12, AX
-
-	mergeRound(AX, R8)
-	mergeRound(AX, R9)
-	mergeRound(AX, R10)
-	mergeRound(AX, R11)
-
-	JMP afterBlocks
-
-noBlocks:
-	MOVQ ·prime5v(SB), AX
-
-afterBlocks:
-	ADDQ DX, AX
-
-	// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
-	ADDQ $24, BX
-
-	CMPQ CX, BX
-	JG   fourByte
-
-wordLoop:
-	// Calculate k1.
-	MOVQ  (CX), R8
-	ADDQ  $8, CX
-	IMULQ R14, R8
-	ROLQ  $31, R8
-	IMULQ R13, R8
-
-	XORQ  R8, AX
-	ROLQ  $27, AX
-	IMULQ R13, AX
-	ADDQ  R15, AX
-
-	CMPQ CX, BX
-	JLE  wordLoop
-
-fourByte:
-	ADDQ $4, BX
-	CMPQ CX, BX
-	JG   singles
-
-	MOVL  (CX), R8
-	ADDQ  $4, CX
-	IMULQ R13, R8
-	XORQ  R8, AX
-
-	ROLQ  $23, AX
-	IMULQ R14, AX
-	ADDQ  ·prime3v(SB), AX
-
-singles:
-	ADDQ $4, BX
-	CMPQ CX, BX
-	JGE  finalize
-
-singlesLoop:
-	MOVBQZX (CX), R12
-	ADDQ    $1, CX
-	IMULQ   ·prime5v(SB), R12
-	XORQ    R12, AX
-
-	ROLQ  $11, AX
-	IMULQ R13, AX
-
-	CMPQ CX, BX
-	JL   singlesLoop
-
-finalize:
-	MOVQ  AX, R12
-	SHRQ  $33, R12
-	XORQ  R12, AX
-	IMULQ R14, AX
-	MOVQ  AX, R12
-	SHRQ  $29, R12
-	XORQ  R12, AX
-	IMULQ ·prime3v(SB), AX
-	MOVQ  AX, R12
-	SHRQ  $32, R12
-	XORQ  R12, AX
-
-	MOVQ AX, ret+24(FP)
-	RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
-// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
-	// Load fixed primes needed for round.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
-
-	// Load slice.
-	MOVQ b_base+8(FP), CX
-	MOVQ b_len+16(FP), DX
-	LEAQ (CX)(DX*1), BX
-	SUBQ $32, BX
-
-	// Load vN from d.
-	MOVQ d+0(FP), AX
-	MOVQ 0(AX), R8   // v1
-	MOVQ 8(AX), R9   // v2
-	MOVQ 16(AX), R10 // v3
-	MOVQ 24(AX), R11 // v4
-
-	// We don't need to check the loop condition here; this function is
-	// always called with at least one block of data to process.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ CX, BX
-	JLE  blockLoop
-
-	// Copy vN back to d.
-	MOVQ R8, 0(AX)
-	MOVQ R9, 8(AX)
-	MOVQ R10, 16(AX)
-	MOVQ R11, 24(AX)
-
-	// The number of bytes written is CX minus the old base pointer.
-	SUBQ b_base+8(FP), CX
-	MOVQ CX, ret+32(FP)
-
-	RET

+ 0 - 76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go

@@ -1,76 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
-	// A simpler version would be
-	//   d := New()
-	//   d.Write(b)
-	//   return d.Sum64()
-	// but this is faster, particularly for small inputs.
-
-	n := len(b)
-	var h uint64
-
-	if n >= 32 {
-		v1 := prime1v + prime2
-		v2 := prime2
-		v3 := uint64(0)
-		v4 := -prime1v
-		for len(b) >= 32 {
-			v1 = round(v1, u64(b[0:8:len(b)]))
-			v2 = round(v2, u64(b[8:16:len(b)]))
-			v3 = round(v3, u64(b[16:24:len(b)]))
-			v4 = round(v4, u64(b[24:32:len(b)]))
-			b = b[32:len(b):len(b)]
-		}
-		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-		h = mergeRound(h, v1)
-		h = mergeRound(h, v2)
-		h = mergeRound(h, v3)
-		h = mergeRound(h, v4)
-	} else {
-		h = prime5
-	}
-
-	h += uint64(n)
-
-	i, end := 0, len(b)
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(b[i:i+8:len(b)]))
-		h ^= k1
-		h = rol27(h)*prime1 + prime4
-	}
-	if i+4 <= end {
-		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
-		h = rol23(h)*prime2 + prime3
-		i += 4
-	}
-	for ; i < end; i++ {
-		h ^= uint64(b[i]) * prime5
-		h = rol11(h) * prime1
-	}
-
-	h ^= h >> 33
-	h *= prime2
-	h ^= h >> 29
-	h *= prime3
-	h ^= h >> 32
-
-	return h
-}
-
-func writeBlocks(d *Digest, b []byte) int {
-	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
-	n := len(b)
-	for len(b) >= 32 {
-		v1 = round(v1, u64(b[0:8:len(b)]))
-		v2 = round(v2, u64(b[8:16:len(b)]))
-		v3 = round(v3, u64(b[16:24:len(b)]))
-		v4 = round(v4, u64(b[24:32:len(b)]))
-		b = b[32:len(b):len(b)]
-	}
-	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
-	return n - len(b)
-}

+ 0 - 15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go

@@ -1,15 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
-	return Sum64([]byte(s))
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-func (d *Digest) WriteString(s string) (n int, err error) {
-	return d.Write([]byte(s))
-}

+ 0 - 46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go

@@ -1,46 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-// Notes:
-//
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
-//
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
-//
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-func Sum64String(s string) uint64 {
-	var b []byte
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-	bh.Len = len(s)
-	bh.Cap = len(s)
-	return Sum64(b)
-}
-
-// WriteString adds more data to d. It always returns len(s), nil.
-// It may be faster than Write([]byte(s)) by avoiding a copy.
-func (d *Digest) WriteString(s string) (n int, err error) {
-	var b []byte
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-	bh.Len = len(s)
-	bh.Cap = len(s)
-	return d.Write(b)
-}

+ 0 - 15
vendor/github.com/davecgh/go-spew/LICENSE

@@ -1,15 +0,0 @@
-ISC License
-
-Copyright (c) 2012-2016 Dave Collins <[email protected]>
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 0 - 145
vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -1,145 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins <[email protected]>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is not running on Google App Engine, compiled by GopherJS, and
-// "-tags safe" is not added to the go build command line.  The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// Go versions prior to 1.4 are disabled because they use a different layout
-// for interfaces which make the implementation of unsafeReflectValue more complex.
-// +build !js,!appengine,!safe,!disableunsafe,go1.4
-
-package spew
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-const (
-	// UnsafeDisabled is a build-time constant which specifies whether or
-	// not access to the unsafe package is available.
-	UnsafeDisabled = false
-
-	// ptrSize is the size of a pointer on the current arch.
-	ptrSize = unsafe.Sizeof((*byte)(nil))
-)
-
-type flag uintptr
-
-var (
-	// flagRO indicates whether the value field of a reflect.Value
-	// is read-only.
-	flagRO flag
-
-	// flagAddr indicates whether the address of the reflect.Value's
-	// value may be taken.
-	flagAddr flag
-)
-
-// flagKindMask holds the bits that make up the kind
-// part of the flags field. In all the supported versions,
-// it is in the lower 5 bits.
-const flagKindMask = flag(0x1f)
-
-// Different versions of Go have used different
-// bit layouts for the flags type. This table
-// records the known combinations.
-var okFlags = []struct {
-	ro, addr flag
-}{{
-	// From Go 1.4 to 1.5
-	ro:   1 << 5,
-	addr: 1 << 7,
-}, {
-	// Up to Go tip.
-	ro:   1<<5 | 1<<6,
-	addr: 1 << 8,
-}}
-
-var flagValOffset = func() uintptr {
-	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
-	if !ok {
-		panic("reflect.Value has no flag field")
-	}
-	return field.Offset
-}()
-
-// flagField returns a pointer to the flag field of a reflect.Value.
-func flagField(v *reflect.Value) *flag {
-	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
-}
-
-// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
-// the typical safety restrictions preventing access to unaddressable and
-// unexported data.  It works by digging the raw pointer to the underlying
-// value out of the protected value and generating a new unprotected (unsafe)
-// reflect.Value to it.
-//
-// This allows us to check for implementations of the Stringer and error
-// interfaces to be used for pretty printing ordinarily unaddressable and
-// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) reflect.Value {
-	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
-		return v
-	}
-	flagFieldPtr := flagField(&v)
-	*flagFieldPtr &^= flagRO
-	*flagFieldPtr |= flagAddr
-	return v
-}
-
-// Sanity checks against future reflect package changes
-// to the type or semantics of the Value.flag field.
-func init() {
-	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
-	if !ok {
-		panic("reflect.Value has no flag field")
-	}
-	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
-		panic("reflect.Value flag field has changed kind")
-	}
-	type t0 int
-	var t struct {
-		A t0
-		// t0 will have flagEmbedRO set.
-		t0
-		// a will have flagStickyRO set
-		a t0
-	}
-	vA := reflect.ValueOf(t).FieldByName("A")
-	va := reflect.ValueOf(t).FieldByName("a")
-	vt0 := reflect.ValueOf(t).FieldByName("t0")
-
-	// Infer flagRO from the difference between the flags
-	// for the (otherwise identical) fields in t.
-	flagPublic := *flagField(&vA)
-	flagWithRO := *flagField(&va) | *flagField(&vt0)
-	flagRO = flagPublic ^ flagWithRO
-
-	// Infer flagAddr from the difference between a value
-	// taken from a pointer and not.
-	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
-	flagNoPtr := *flagField(&vA)
-	flagPtr := *flagField(&vPtrA)
-	flagAddr = flagNoPtr ^ flagPtr
-
-	// Check that the inferred flags tally with one of the known versions.
-	for _, f := range okFlags {
-		if flagRO == f.ro && flagAddr == f.addr {
-			return
-		}
-	}
-	panic("reflect.Value read-only flag has changed semantics")
-}

+ 0 - 38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -1,38 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins <[email protected]>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is running on Google App Engine, compiled by GopherJS, or
-// "-tags safe" is added to the go build command line.  The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe !go1.4
-
-package spew
-
-import "reflect"
-
-const (
-	// UnsafeDisabled is a build-time constant which specifies whether or
-	// not access to the unsafe package is available.
-	UnsafeDisabled = true
-)
-
-// unsafeReflectValue typically converts the passed reflect.Value into a one
-// that bypasses the typical safety restrictions preventing access to
-// unaddressable and unexported data.  However, doing this relies on access to
-// the unsafe package.  This is a stub version which simply returns the passed
-// reflect.Value when the unsafe package is not available.
-func unsafeReflectValue(v reflect.Value) reflect.Value {
-	return v
-}

+ 0 - 341
vendor/github.com/davecgh/go-spew/spew/common.go

@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"reflect"
-	"sort"
-	"strconv"
-)
-
-// Some constants in the form of bytes to avoid string overhead.  This mirrors
-// the technique used in the fmt package.
-var (
-	panicBytes            = []byte("(PANIC=")
-	plusBytes             = []byte("+")
-	iBytes                = []byte("i")
-	trueBytes             = []byte("true")
-	falseBytes            = []byte("false")
-	interfaceBytes        = []byte("(interface {})")
-	commaNewlineBytes     = []byte(",\n")
-	newlineBytes          = []byte("\n")
-	openBraceBytes        = []byte("{")
-	openBraceNewlineBytes = []byte("{\n")
-	closeBraceBytes       = []byte("}")
-	asteriskBytes         = []byte("*")
-	colonBytes            = []byte(":")
-	colonSpaceBytes       = []byte(": ")
-	openParenBytes        = []byte("(")
-	closeParenBytes       = []byte(")")
-	spaceBytes            = []byte(" ")
-	pointerChainBytes     = []byte("->")
-	nilAngleBytes         = []byte("<nil>")
-	maxNewlineBytes       = []byte("<max depth reached>\n")
-	maxShortBytes         = []byte("<max>")
-	circularBytes         = []byte("<already shown>")
-	circularShortBytes    = []byte("<shown>")
-	invalidAngleBytes     = []byte("<invalid>")
-	openBracketBytes      = []byte("[")
-	closeBracketBytes     = []byte("]")
-	percentBytes          = []byte("%")
-	precisionBytes        = []byte(".")
-	openAngleBytes        = []byte("<")
-	closeAngleBytes       = []byte(">")
-	openMapBytes          = []byte("map[")
-	closeMapBytes         = []byte("]")
-	lenEqualsBytes        = []byte("len=")
-	capEqualsBytes        = []byte("cap=")
-)
-
-// hexDigits is used to map a decimal value to a hex digit.
-var hexDigits = "0123456789abcdef"
-
-// catchPanic handles any panics that might occur during the handleMethods
-// calls.
-func catchPanic(w io.Writer, v reflect.Value) {
-	if err := recover(); err != nil {
-		w.Write(panicBytes)
-		fmt.Fprintf(w, "%v", err)
-		w.Write(closeParenBytes)
-	}
-}
-
-// handleMethods attempts to call the Error and String methods on the underlying
-// type the passed reflect.Value represents and outputes the result to Writer w.
-//
-// It handles panics in any called methods by catching and displaying the error
-// as the formatted value.
-func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
-	// We need an interface to check if the type implements the error or
-	// Stringer interface.  However, the reflect package won't give us an
-	// interface on certain things like unexported struct fields in order
-	// to enforce visibility rules.  We use unsafe, when it's available,
-	// to bypass these restrictions since this package does not mutate the
-	// values.
-	if !v.CanInterface() {
-		if UnsafeDisabled {
-			return false
-		}
-
-		v = unsafeReflectValue(v)
-	}
-
-	// Choose whether or not to do error and Stringer interface lookups against
-	// the base type or a pointer to the base type depending on settings.
-	// Technically calling one of these methods with a pointer receiver can
-	// mutate the value, however, types which choose to satisify an error or
-	// Stringer interface with a pointer receiver should not be mutating their
-	// state inside these interface methods.
-	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
-		v = unsafeReflectValue(v)
-	}
-	if v.CanAddr() {
-		v = v.Addr()
-	}
-
-	// Is it an error or Stringer?
-	switch iface := v.Interface().(type) {
-	case error:
-		defer catchPanic(w, v)
-		if cs.ContinueOnMethod {
-			w.Write(openParenBytes)
-			w.Write([]byte(iface.Error()))
-			w.Write(closeParenBytes)
-			w.Write(spaceBytes)
-			return false
-		}
-
-		w.Write([]byte(iface.Error()))
-		return true
-
-	case fmt.Stringer:
-		defer catchPanic(w, v)
-		if cs.ContinueOnMethod {
-			w.Write(openParenBytes)
-			w.Write([]byte(iface.String()))
-			w.Write(closeParenBytes)
-			w.Write(spaceBytes)
-			return false
-		}
-		w.Write([]byte(iface.String()))
-		return true
-	}
-	return false
-}
-
-// printBool outputs a boolean value as true or false to Writer w.
-func printBool(w io.Writer, val bool) {
-	if val {
-		w.Write(trueBytes)
-	} else {
-		w.Write(falseBytes)
-	}
-}
-
-// printInt outputs a signed integer value to Writer w.
-func printInt(w io.Writer, val int64, base int) {
-	w.Write([]byte(strconv.FormatInt(val, base)))
-}
-
-// printUint outputs an unsigned integer value to Writer w.
-func printUint(w io.Writer, val uint64, base int) {
-	w.Write([]byte(strconv.FormatUint(val, base)))
-}
-
-// printFloat outputs a floating point value using the specified precision,
-// which is expected to be 32 or 64bit, to Writer w.
-func printFloat(w io.Writer, val float64, precision int) {
-	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
-}
-
-// printComplex outputs a complex value using the specified float precision
-// for the real and imaginary parts to Writer w.
-func printComplex(w io.Writer, c complex128, floatPrecision int) {
-	r := real(c)
-	w.Write(openParenBytes)
-	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
-	i := imag(c)
-	if i >= 0 {
-		w.Write(plusBytes)
-	}
-	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
-	w.Write(iBytes)
-	w.Write(closeParenBytes)
-}
-
-// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
-// prefix to Writer w.
-func printHexPtr(w io.Writer, p uintptr) {
-	// Null pointer.
-	num := uint64(p)
-	if num == 0 {
-		w.Write(nilAngleBytes)
-		return
-	}
-
-	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
-	buf := make([]byte, 18)
-
-	// It's simpler to construct the hex string right to left.
-	base := uint64(16)
-	i := len(buf) - 1
-	for num >= base {
-		buf[i] = hexDigits[num%base]
-		num /= base
-		i--
-	}
-	buf[i] = hexDigits[num]
-
-	// Add '0x' prefix.
-	i--
-	buf[i] = 'x'
-	i--
-	buf[i] = '0'
-
-	// Strip unused leading bytes.
-	buf = buf[i:]
-	w.Write(buf)
-}
-
-// valuesSorter implements sort.Interface to allow a slice of reflect.Value
-// elements to be sorted.
-type valuesSorter struct {
-	values  []reflect.Value
-	strings []string // either nil or same len and values
-	cs      *ConfigState
-}
-
-// newValuesSorter initializes a valuesSorter instance, which holds a set of
-// surrogate keys on which the data should be sorted.  It uses flags in
-// ConfigState to decide if and how to populate those surrogate keys.
-func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
-	vs := &valuesSorter{values: values, cs: cs}
-	if canSortSimply(vs.values[0].Kind()) {
-		return vs
-	}
-	if !cs.DisableMethods {
-		vs.strings = make([]string, len(values))
-		for i := range vs.values {
-			b := bytes.Buffer{}
-			if !handleMethods(cs, &b, vs.values[i]) {
-				vs.strings = nil
-				break
-			}
-			vs.strings[i] = b.String()
-		}
-	}
-	if vs.strings == nil && cs.SpewKeys {
-		vs.strings = make([]string, len(values))
-		for i := range vs.values {
-			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
-		}
-	}
-	return vs
-}
-
-// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
-// directly, or whether it should be considered for sorting by surrogate keys
-// (if the ConfigState allows it).
-func canSortSimply(kind reflect.Kind) bool {
-	// This switch parallels valueSortLess, except for the default case.
-	switch kind {
-	case reflect.Bool:
-		return true
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		return true
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		return true
-	case reflect.Float32, reflect.Float64:
-		return true
-	case reflect.String:
-		return true
-	case reflect.Uintptr:
-		return true
-	case reflect.Array:
-		return true
-	}
-	return false
-}
-
-// Len returns the number of values in the slice.  It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Len() int {
-	return len(s.values)
-}
-
-// Swap swaps the values at the passed indices.  It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Swap(i, j int) {
-	s.values[i], s.values[j] = s.values[j], s.values[i]
-	if s.strings != nil {
-		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
-	}
-}
-
-// valueSortLess returns whether the first value should sort before the second
-// value.  It is used by valueSorter.Less as part of the sort.Interface
-// implementation.
-func valueSortLess(a, b reflect.Value) bool {
-	switch a.Kind() {
-	case reflect.Bool:
-		return !a.Bool() && b.Bool()
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		return a.Int() < b.Int()
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		return a.Uint() < b.Uint()
-	case reflect.Float32, reflect.Float64:
-		return a.Float() < b.Float()
-	case reflect.String:
-		return a.String() < b.String()
-	case reflect.Uintptr:
-		return a.Uint() < b.Uint()
-	case reflect.Array:
-		// Compare the contents of both arrays.
-		l := a.Len()
-		for i := 0; i < l; i++ {
-			av := a.Index(i)
-			bv := b.Index(i)
-			if av.Interface() == bv.Interface() {
-				continue
-			}
-			return valueSortLess(av, bv)
-		}
-	}
-	return a.String() < b.String()
-}
-
-// Less returns whether the value at index i should sort before the
-// value at index j.  It is part of the sort.Interface implementation.
-func (s *valuesSorter) Less(i, j int) bool {
-	if s.strings == nil {
-		return valueSortLess(s.values[i], s.values[j])
-	}
-	return s.strings[i] < s.strings[j]
-}
-
-// sortValues is a sort function that handles both native types and any type that
-// can be converted to error or Stringer.  Other inputs are sorted according to
-// their Value.String() value to ensure display stability.
-func sortValues(values []reflect.Value, cs *ConfigState) {
-	if len(values) == 0 {
-		return
-	}
-	sort.Sort(newValuesSorter(values, cs))
-}

+ 0 - 306
vendor/github.com/davecgh/go-spew/spew/config.go

@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-)
-
-// ConfigState houses the configuration options used by spew to format and
-// display values.  There is a global instance, Config, that is used to control
-// all top-level Formatter and Dump functionality.  Each ConfigState instance
-// provides methods equivalent to the top-level functions.
-//
-// The zero value for ConfigState provides no indentation.  You would typically
-// want to set it to a space or a tab.
-//
-// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
-// with default settings.  See the documentation of NewDefaultConfig for default
-// values.
-type ConfigState struct {
-	// Indent specifies the string to use for each indentation level.  The
-	// global config instance that all top-level functions use set this to a
-	// single space by default.  If you would like more indentation, you might
-	// set this to a tab with "\t" or perhaps two spaces with "  ".
-	Indent string
-
-	// MaxDepth controls the maximum number of levels to descend into nested
-	// data structures.  The default, 0, means there is no limit.
-	//
-	// NOTE: Circular data structures are properly detected, so it is not
-	// necessary to set this value unless you specifically want to limit deeply
-	// nested data structures.
-	MaxDepth int
-
-	// DisableMethods specifies whether or not error and Stringer interfaces are
-	// invoked for types that implement them.
-	DisableMethods bool
-
-	// DisablePointerMethods specifies whether or not to check for and invoke
-	// error and Stringer interfaces on types which only accept a pointer
-	// receiver when the current type is not a pointer.
-	//
-	// NOTE: This might be an unsafe action since calling one of these methods
-	// with a pointer receiver could technically mutate the value, however,
-	// in practice, types which choose to satisify an error or Stringer
-	// interface with a pointer receiver should not be mutating their state
-	// inside these interface methods.  As a result, this option relies on
-	// access to the unsafe package, so it will not have any effect when
-	// running in environments without access to the unsafe package such as
-	// Google App Engine or with the "safe" build tag specified.
-	DisablePointerMethods bool
-
-	// DisablePointerAddresses specifies whether to disable the printing of
-	// pointer addresses. This is useful when diffing data structures in tests.
-	DisablePointerAddresses bool
-
-	// DisableCapacities specifies whether to disable the printing of capacities
-	// for arrays, slices, maps and channels. This is useful when diffing
-	// data structures in tests.
-	DisableCapacities bool
-
-	// ContinueOnMethod specifies whether or not recursion should continue once
-	// a custom error or Stringer interface is invoked.  The default, false,
-	// means it will print the results of invoking the custom error or Stringer
-	// interface and return immediately instead of continuing to recurse into
-	// the internals of the data type.
-	//
-	// NOTE: This flag does not have any effect if method invocation is disabled
-	// via the DisableMethods or DisablePointerMethods options.
-	ContinueOnMethod bool
-
-	// SortKeys specifies map keys should be sorted before being printed. Use
-	// this to have a more deterministic, diffable output.  Note that only
-	// native types (bool, int, uint, floats, uintptr and string) and types
-	// that support the error or Stringer interfaces (if methods are
-	// enabled) are supported, with other types sorted according to the
-	// reflect.Value.String() output which guarantees display stability.
-	SortKeys bool
-
-	// SpewKeys specifies that, as a last resort attempt, map keys should
-	// be spewed to strings and sorted by those strings.  This is only
-	// considered if SortKeys is true.
-	SpewKeys bool
-}
-
-// Config is the active configuration of the top-level functions.
-// The configuration can be changed by modifying the contents of spew.Config.
-var Config = ConfigState{Indent: " "}
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the formatted string as a value that satisfies error.  See NewFormatter
-// for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
-	return fmt.Errorf(format, c.convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprint(w, c.convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
-	return fmt.Fprintf(w, format, c.convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a Formatter interface returned by c.NewFormatter.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprintln(w, c.convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
-	return fmt.Print(c.convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
-	return fmt.Printf(format, c.convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
-	return fmt.Println(c.convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprint(a ...interface{}) string {
-	return fmt.Sprint(c.convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
-	return fmt.Sprintf(format, c.convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a Formatter interface returned by c.NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintln(a ...interface{}) string {
-	return fmt.Sprintln(c.convertArgs(a)...)
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface.  As a result, it integrates cleanly with standard fmt package
-printing functions.  The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly.  It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-c.Printf, c.Println, or c.Printf.
-*/
-func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
-	return newFormatter(c, v)
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w.  It formats
-// exactly the same as Dump.
-func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
-	fdump(c, w, a...)
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value.  It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by modifying the public members
-of c.  See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func (c *ConfigState) Dump(a ...interface{}) {
-	fdump(c, os.Stdout, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func (c *ConfigState) Sdump(a ...interface{}) string {
-	var buf bytes.Buffer
-	fdump(c, &buf, a...)
-	return buf.String()
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a spew Formatter interface using
-// the ConfigState associated with s.
-func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
-	formatters = make([]interface{}, len(args))
-	for index, arg := range args {
-		formatters[index] = newFormatter(c, arg)
-	}
-	return formatters
-}
-
-// NewDefaultConfig returns a ConfigState with the following default settings.
-//
-// 	Indent: " "
-// 	MaxDepth: 0
-// 	DisableMethods: false
-// 	DisablePointerMethods: false
-// 	ContinueOnMethod: false
-// 	SortKeys: false
-func NewDefaultConfig() *ConfigState {
-	return &ConfigState{Indent: " "}
-}

+ 0 - 211
vendor/github.com/davecgh/go-spew/spew/doc.go

@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-Package spew implements a deep pretty printer for Go data structures to aid in
-debugging.
-
-A quick overview of the additional features spew provides over the built-in
-printing facilities for Go data types are as follows:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output (only when using
-	  Dump style)
-
-There are two different approaches spew allows for dumping Go data structures:
-
-	* Dump style which prints with newlines, customizable indentation,
-	  and additional debug information such as types and all pointer addresses
-	  used to indirect to the final value
-	* A custom Formatter interface that integrates cleanly with the standard fmt
-	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
-	  similar to the default %v while providing the additional functionality
-	  outlined above and passing unsupported format verbs such as %x and %q
-	  along to fmt
-
-Quick Start
-
-This section demonstrates how to quickly get started with spew.  See the
-sections below for further details on formatting and configuration options.
-
-To dump a variable with full newlines, indentation, type, and pointer
-information use Dump, Fdump, or Sdump:
-	spew.Dump(myVar1, myVar2, ...)
-	spew.Fdump(someWriter, myVar1, myVar2, ...)
-	str := spew.Sdump(myVar1, myVar2, ...)
-
-Alternatively, if you would prefer to use format strings with a compacted inline
-printing style, use the convenience wrappers Printf, Fprintf, etc with
-%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
-%#+v (adds types and pointer addresses):
-	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-Configuration Options
-
-Configuration of spew is handled by fields in the ConfigState type.  For
-convenience, all of the top-level functions use a global state available
-via the spew.Config global.
-
-It is also possible to create a ConfigState instance that provides methods
-equivalent to the top-level functions.  This allows concurrent configuration
-options.  See the ConfigState documentation for more details.
-
-The following configuration options are available:
-	* Indent
-		String to use for each indentation level for Dump functions.
-		It is a single space by default.  A popular alternative is "\t".
-
-	* MaxDepth
-		Maximum number of levels to descend into nested data structures.
-		There is no limit by default.
-
-	* DisableMethods
-		Disables invocation of error and Stringer interface methods.
-		Method invocation is enabled by default.
-
-	* DisablePointerMethods
-		Disables invocation of error and Stringer interface methods on types
-		which only accept pointer receivers from non-pointer variables.
-		Pointer method invocation is enabled by default.
-
-	* DisablePointerAddresses
-		DisablePointerAddresses specifies whether to disable the printing of
-		pointer addresses. This is useful when diffing data structures in tests.
-
-	* DisableCapacities
-		DisableCapacities specifies whether to disable the printing of
-		capacities for arrays, slices, maps and channels. This is useful when
-		diffing data structures in tests.
-
-	* ContinueOnMethod
-		Enables recursion into types after invoking error and Stringer interface
-		methods. Recursion after method invocation is disabled by default.
-
-	* SortKeys
-		Specifies map keys should be sorted before being printed. Use
-		this to have a more deterministic, diffable output.  Note that
-		only native types (bool, int, uint, floats, uintptr and string)
-		and types which implement error or Stringer interfaces are
-		supported with other types sorted according to the
-		reflect.Value.String() output which guarantees display
-		stability.  Natural map order is used by default.
-
-	* SpewKeys
-		Specifies that, as a last resort attempt, map keys should be
-		spewed to strings and sorted by those strings.  This is only
-		considered if SortKeys is true.
-
-Dump Usage
-
-Simply call spew.Dump with a list of variables you want to dump:
-
-	spew.Dump(myVar1, myVar2, ...)
-
-You may also call spew.Fdump if you would prefer to output to an arbitrary
-io.Writer.  For example, to dump to standard error:
-
-	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
-
-A third option is to call spew.Sdump to get the formatted output as a string:
-
-	str := spew.Sdump(myVar1, myVar2, ...)
-
-Sample Dump Output
-
-See the Dump example for details on the setup of the types and variables being
-shown here.
-
-	(main.Foo) {
-	 unexportedField: (*main.Bar)(0xf84002e210)({
-	  flag: (main.Flag) flagTwo,
-	  data: (uintptr) <nil>
-	 }),
-	 ExportedField: (map[interface {}]interface {}) (len=1) {
-	  (string) (len=3) "one": (bool) true
-	 }
-	}
-
-Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
-command as shown.
-	([]uint8) (len=32 cap=32) {
-	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
-	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
-	 00000020  31 32                                             |12|
-	}
-
-Custom Formatter
-
-Spew provides a custom formatter that implements the fmt.Formatter interface
-so that it integrates cleanly with standard fmt package printing functions. The
-formatter is useful for inline printing of smaller data types similar to the
-standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Custom Formatter Usage
-
-The simplest way to make use of the spew custom formatter is to call one of the
-convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
-functions have syntax you are most likely already familiar with:
-
-	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-	spew.Println(myVar, myVar2)
-	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-See the Index for the full list convenience functions.
-
-Sample Formatter Output
-
-Double pointer to a uint8:
-	  %v: <**>5
-	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
-	 %#v: (**uint8)5
-	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
-
-Pointer to circular struct with a uint8 field and a pointer to itself:
-	  %v: <*>{1 <*><shown>}
-	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
-	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
-	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
-
-See the Printf example for details on the setup of variables being shown
-here.
-
-Errors
-
-Since it is possible for custom Stringer/error interfaces to panic, spew
-detects them and handles them internally by printing the panic information
-inline with the output.  Since spew is intended to provide deep pretty printing
-capabilities on structures, it intentionally does not return any errors.
-*/
-package spew

+ 0 - 509
vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"encoding/hex"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"regexp"
-	"strconv"
-	"strings"
-)
-
-var (
-	// uint8Type is a reflect.Type representing a uint8.  It is used to
-	// convert cgo types to uint8 slices for hexdumping.
-	uint8Type = reflect.TypeOf(uint8(0))
-
-	// cCharRE is a regular expression that matches a cgo char.
-	// It is used to detect character arrays to hexdump them.
-	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
-
-	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
-	// char.  It is used to detect unsigned character arrays to hexdump
-	// them.
-	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
-
-	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
-	// It is used to detect uint8_t arrays to hexdump them.
-	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
-)
-
-// dumpState contains information about the state of a dump operation.
-type dumpState struct {
-	w                io.Writer
-	depth            int
-	pointers         map[uintptr]int
-	ignoreNextType   bool
-	ignoreNextIndent bool
-	cs               *ConfigState
-}
-
-// indent performs indentation according to the depth level and cs.Indent
-// option.
-func (d *dumpState) indent() {
-	if d.ignoreNextIndent {
-		d.ignoreNextIndent = false
-		return
-	}
-	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface && !v.IsNil() {
-		v = v.Elem()
-	}
-	return v
-}
-
-// dumpPtr handles formatting of pointers by indirecting them as necessary.
-func (d *dumpState) dumpPtr(v reflect.Value) {
-	// Remove pointers at or below the current depth from map used to detect
-	// circular refs.
-	for k, depth := range d.pointers {
-		if depth >= d.depth {
-			delete(d.pointers, k)
-		}
-	}
-
-	// Keep list of all dereferenced pointers to show later.
-	pointerChain := make([]uintptr, 0)
-
-	// Figure out how many levels of indirection there are by dereferencing
-	// pointers and unpacking interfaces down the chain while detecting circular
-	// references.
-	nilFound := false
-	cycleFound := false
-	indirects := 0
-	ve := v
-	for ve.Kind() == reflect.Ptr {
-		if ve.IsNil() {
-			nilFound = true
-			break
-		}
-		indirects++
-		addr := ve.Pointer()
-		pointerChain = append(pointerChain, addr)
-		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
-			cycleFound = true
-			indirects--
-			break
-		}
-		d.pointers[addr] = d.depth
-
-		ve = ve.Elem()
-		if ve.Kind() == reflect.Interface {
-			if ve.IsNil() {
-				nilFound = true
-				break
-			}
-			ve = ve.Elem()
-		}
-	}
-
-	// Display type information.
-	d.w.Write(openParenBytes)
-	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
-	d.w.Write([]byte(ve.Type().String()))
-	d.w.Write(closeParenBytes)
-
-	// Display pointer information.
-	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
-		d.w.Write(openParenBytes)
-		for i, addr := range pointerChain {
-			if i > 0 {
-				d.w.Write(pointerChainBytes)
-			}
-			printHexPtr(d.w, addr)
-		}
-		d.w.Write(closeParenBytes)
-	}
-
-	// Display dereferenced value.
-	d.w.Write(openParenBytes)
-	switch {
-	case nilFound:
-		d.w.Write(nilAngleBytes)
-
-	case cycleFound:
-		d.w.Write(circularBytes)
-
-	default:
-		d.ignoreNextType = true
-		d.dump(ve)
-	}
-	d.w.Write(closeParenBytes)
-}
-
-// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
-// reflection) arrays and slices are dumped in hexdump -C fashion.
-func (d *dumpState) dumpSlice(v reflect.Value) {
-	// Determine whether this type should be hex dumped or not.  Also,
-	// for types which should be hexdumped, try to use the underlying data
-	// first, then fall back to trying to convert them to a uint8 slice.
-	var buf []uint8
-	doConvert := false
-	doHexDump := false
-	numEntries := v.Len()
-	if numEntries > 0 {
-		vt := v.Index(0).Type()
-		vts := vt.String()
-		switch {
-		// C types that need to be converted.
-		case cCharRE.MatchString(vts):
-			fallthrough
-		case cUnsignedCharRE.MatchString(vts):
-			fallthrough
-		case cUint8tCharRE.MatchString(vts):
-			doConvert = true
-
-		// Try to use existing uint8 slices and fall back to converting
-		// and copying if that fails.
-		case vt.Kind() == reflect.Uint8:
-			// We need an addressable interface to convert the type
-			// to a byte slice.  However, the reflect package won't
-			// give us an interface on certain things like
-			// unexported struct fields in order to enforce
-			// visibility rules.  We use unsafe, when available, to
-			// bypass these restrictions since this package does not
-			// mutate the values.
-			vs := v
-			if !vs.CanInterface() || !vs.CanAddr() {
-				vs = unsafeReflectValue(vs)
-			}
-			if !UnsafeDisabled {
-				vs = vs.Slice(0, numEntries)
-
-				// Use the existing uint8 slice if it can be
-				// type asserted.
-				iface := vs.Interface()
-				if slice, ok := iface.([]uint8); ok {
-					buf = slice
-					doHexDump = true
-					break
-				}
-			}
-
-			// The underlying data needs to be converted if it can't
-			// be type asserted to a uint8 slice.
-			doConvert = true
-		}
-
-		// Copy and convert the underlying type if needed.
-		if doConvert && vt.ConvertibleTo(uint8Type) {
-			// Convert and copy each element into a uint8 byte
-			// slice.
-			buf = make([]uint8, numEntries)
-			for i := 0; i < numEntries; i++ {
-				vv := v.Index(i)
-				buf[i] = uint8(vv.Convert(uint8Type).Uint())
-			}
-			doHexDump = true
-		}
-	}
-
-	// Hexdump the entire slice as needed.
-	if doHexDump {
-		indent := strings.Repeat(d.cs.Indent, d.depth)
-		str := indent + hex.Dump(buf)
-		str = strings.Replace(str, "\n", "\n"+indent, -1)
-		str = strings.TrimRight(str, d.cs.Indent)
-		d.w.Write([]byte(str))
-		return
-	}
-
-	// Recursively call dump for each item.
-	for i := 0; i < numEntries; i++ {
-		d.dump(d.unpackValue(v.Index(i)))
-		if i < (numEntries - 1) {
-			d.w.Write(commaNewlineBytes)
-		} else {
-			d.w.Write(newlineBytes)
-		}
-	}
-}
-
-// dump is the main workhorse for dumping a value.  It uses the passed reflect
-// value to figure out what kind of object we are dealing with and formats it
-// appropriately.  It is a recursive function, however circular data structures
-// are detected and handled properly.
-func (d *dumpState) dump(v reflect.Value) {
-	// Handle invalid reflect values immediately.
-	kind := v.Kind()
-	if kind == reflect.Invalid {
-		d.w.Write(invalidAngleBytes)
-		return
-	}
-
-	// Handle pointers specially.
-	if kind == reflect.Ptr {
-		d.indent()
-		d.dumpPtr(v)
-		return
-	}
-
-	// Print type information unless already handled elsewhere.
-	if !d.ignoreNextType {
-		d.indent()
-		d.w.Write(openParenBytes)
-		d.w.Write([]byte(v.Type().String()))
-		d.w.Write(closeParenBytes)
-		d.w.Write(spaceBytes)
-	}
-	d.ignoreNextType = false
-
-	// Display length and capacity if the built-in len and cap functions
-	// work with the value's kind and the len/cap itself is non-zero.
-	valueLen, valueCap := 0, 0
-	switch v.Kind() {
-	case reflect.Array, reflect.Slice, reflect.Chan:
-		valueLen, valueCap = v.Len(), v.Cap()
-	case reflect.Map, reflect.String:
-		valueLen = v.Len()
-	}
-	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
-		d.w.Write(openParenBytes)
-		if valueLen != 0 {
-			d.w.Write(lenEqualsBytes)
-			printInt(d.w, int64(valueLen), 10)
-		}
-		if !d.cs.DisableCapacities && valueCap != 0 {
-			if valueLen != 0 {
-				d.w.Write(spaceBytes)
-			}
-			d.w.Write(capEqualsBytes)
-			printInt(d.w, int64(valueCap), 10)
-		}
-		d.w.Write(closeParenBytes)
-		d.w.Write(spaceBytes)
-	}
-
-	// Call Stringer/error interfaces if they exist and the handle methods flag
-	// is enabled
-	if !d.cs.DisableMethods {
-		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
-			if handled := handleMethods(d.cs, d.w, v); handled {
-				return
-			}
-		}
-	}
-
-	switch kind {
-	case reflect.Invalid:
-		// Do nothing.  We should never get here since invalid has already
-		// been handled above.
-
-	case reflect.Bool:
-		printBool(d.w, v.Bool())
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		printInt(d.w, v.Int(), 10)
-
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		printUint(d.w, v.Uint(), 10)
-
-	case reflect.Float32:
-		printFloat(d.w, v.Float(), 32)
-
-	case reflect.Float64:
-		printFloat(d.w, v.Float(), 64)
-
-	case reflect.Complex64:
-		printComplex(d.w, v.Complex(), 32)
-
-	case reflect.Complex128:
-		printComplex(d.w, v.Complex(), 64)
-
-	case reflect.Slice:
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-			break
-		}
-		fallthrough
-
-	case reflect.Array:
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			d.dumpSlice(v)
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.String:
-		d.w.Write([]byte(strconv.Quote(v.String())))
-
-	case reflect.Interface:
-		// The only time we should get here is for nil interfaces due to
-		// unpackValue calls.
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-		}
-
-	case reflect.Ptr:
-		// Do nothing.  We should never get here since pointers have already
-		// been handled above.
-
-	case reflect.Map:
-		// nil maps should be indicated as different than empty maps
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-			break
-		}
-
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			numEntries := v.Len()
-			keys := v.MapKeys()
-			if d.cs.SortKeys {
-				sortValues(keys, d.cs)
-			}
-			for i, key := range keys {
-				d.dump(d.unpackValue(key))
-				d.w.Write(colonSpaceBytes)
-				d.ignoreNextIndent = true
-				d.dump(d.unpackValue(v.MapIndex(key)))
-				if i < (numEntries - 1) {
-					d.w.Write(commaNewlineBytes)
-				} else {
-					d.w.Write(newlineBytes)
-				}
-			}
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.Struct:
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			vt := v.Type()
-			numFields := v.NumField()
-			for i := 0; i < numFields; i++ {
-				d.indent()
-				vtf := vt.Field(i)
-				d.w.Write([]byte(vtf.Name))
-				d.w.Write(colonSpaceBytes)
-				d.ignoreNextIndent = true
-				d.dump(d.unpackValue(v.Field(i)))
-				if i < (numFields - 1) {
-					d.w.Write(commaNewlineBytes)
-				} else {
-					d.w.Write(newlineBytes)
-				}
-			}
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.Uintptr:
-		printHexPtr(d.w, uintptr(v.Uint()))
-
-	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		printHexPtr(d.w, v.Pointer())
-
-	// There were not any other types at the time this code was written, but
-	// fall back to letting the default fmt package handle it in case any new
-	// types are added.
-	default:
-		if v.CanInterface() {
-			fmt.Fprintf(d.w, "%v", v.Interface())
-		} else {
-			fmt.Fprintf(d.w, "%v", v.String())
-		}
-	}
-}
-
-// fdump is a helper function to consolidate the logic from the various public
-// methods which take varying writers and config states.
-func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
-	for _, arg := range a {
-		if arg == nil {
-			w.Write(interfaceBytes)
-			w.Write(spaceBytes)
-			w.Write(nilAngleBytes)
-			w.Write(newlineBytes)
-			continue
-		}
-
-		d := dumpState{w: w, cs: cs}
-		d.pointers = make(map[uintptr]int)
-		d.dump(reflect.ValueOf(arg))
-		d.w.Write(newlineBytes)
-	}
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w.  It formats
-// exactly the same as Dump.
-func Fdump(w io.Writer, a ...interface{}) {
-	fdump(&Config, w, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func Sdump(a ...interface{}) string {
-	var buf bytes.Buffer
-	fdump(&Config, &buf, a...)
-	return buf.String()
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value.  It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by an exported package global,
-spew.Config.  See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func Dump(a ...interface{}) {
-	fdump(&Config, os.Stdout, a...)
-}

+ 0 - 419
vendor/github.com/davecgh/go-spew/spew/format.go

@@ -1,419 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// supportedFlags is a list of all the character flags supported by fmt package.
-const supportedFlags = "0-+# "
-
-// formatState implements the fmt.Formatter interface and contains information
-// about the state of a formatting operation.  The NewFormatter function can
-// be used to get a new Formatter which can be used directly as arguments
-// in standard fmt package printing calls.
-type formatState struct {
-	value          interface{}
-	fs             fmt.State
-	depth          int
-	pointers       map[uintptr]int
-	ignoreNextType bool
-	cs             *ConfigState
-}
-
-// buildDefaultFormat recreates the original format string without precision
-// and width information to pass in to fmt.Sprintf in the case of an
-// unrecognized type.  Unless new types are added to the language, this
-// function won't ever be called.
-func (f *formatState) buildDefaultFormat() (format string) {
-	buf := bytes.NewBuffer(percentBytes)
-
-	for _, flag := range supportedFlags {
-		if f.fs.Flag(int(flag)) {
-			buf.WriteRune(flag)
-		}
-	}
-
-	buf.WriteRune('v')
-
-	format = buf.String()
-	return format
-}
-
-// constructOrigFormat recreates the original format string including precision
-// and width information to pass along to the standard fmt package.  This allows
-// automatic deferral of all format strings this package doesn't support.
-func (f *formatState) constructOrigFormat(verb rune) (format string) {
-	buf := bytes.NewBuffer(percentBytes)
-
-	for _, flag := range supportedFlags {
-		if f.fs.Flag(int(flag)) {
-			buf.WriteRune(flag)
-		}
-	}
-
-	if width, ok := f.fs.Width(); ok {
-		buf.WriteString(strconv.Itoa(width))
-	}
-
-	if precision, ok := f.fs.Precision(); ok {
-		buf.Write(precisionBytes)
-		buf.WriteString(strconv.Itoa(precision))
-	}
-
-	buf.WriteRune(verb)
-
-	format = buf.String()
-	return format
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible and
-// ensures that types for values which have been unpacked from an interface
-// are displayed when the show types flag is also set.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface {
-		f.ignoreNextType = false
-		if !v.IsNil() {
-			v = v.Elem()
-		}
-	}
-	return v
-}
-
-// formatPtr handles formatting of pointers by indirecting them as necessary.
-func (f *formatState) formatPtr(v reflect.Value) {
-	// Display nil if top level pointer is nil.
-	showTypes := f.fs.Flag('#')
-	if v.IsNil() && (!showTypes || f.ignoreNextType) {
-		f.fs.Write(nilAngleBytes)
-		return
-	}
-
-	// Remove pointers at or below the current depth from map used to detect
-	// circular refs.
-	for k, depth := range f.pointers {
-		if depth >= f.depth {
-			delete(f.pointers, k)
-		}
-	}
-
-	// Keep list of all dereferenced pointers to possibly show later.
-	pointerChain := make([]uintptr, 0)
-
-	// Figure out how many levels of indirection there are by derferencing
-	// pointers and unpacking interfaces down the chain while detecting circular
-	// references.
-	nilFound := false
-	cycleFound := false
-	indirects := 0
-	ve := v
-	for ve.Kind() == reflect.Ptr {
-		if ve.IsNil() {
-			nilFound = true
-			break
-		}
-		indirects++
-		addr := ve.Pointer()
-		pointerChain = append(pointerChain, addr)
-		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
-			cycleFound = true
-			indirects--
-			break
-		}
-		f.pointers[addr] = f.depth
-
-		ve = ve.Elem()
-		if ve.Kind() == reflect.Interface {
-			if ve.IsNil() {
-				nilFound = true
-				break
-			}
-			ve = ve.Elem()
-		}
-	}
-
-	// Display type or indirection level depending on flags.
-	if showTypes && !f.ignoreNextType {
-		f.fs.Write(openParenBytes)
-		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
-		f.fs.Write([]byte(ve.Type().String()))
-		f.fs.Write(closeParenBytes)
-	} else {
-		if nilFound || cycleFound {
-			indirects += strings.Count(ve.Type().String(), "*")
-		}
-		f.fs.Write(openAngleBytes)
-		f.fs.Write([]byte(strings.Repeat("*", indirects)))
-		f.fs.Write(closeAngleBytes)
-	}
-
-	// Display pointer information depending on flags.
-	if f.fs.Flag('+') && (len(pointerChain) > 0) {
-		f.fs.Write(openParenBytes)
-		for i, addr := range pointerChain {
-			if i > 0 {
-				f.fs.Write(pointerChainBytes)
-			}
-			printHexPtr(f.fs, addr)
-		}
-		f.fs.Write(closeParenBytes)
-	}
-
-	// Display dereferenced value.
-	switch {
-	case nilFound:
-		f.fs.Write(nilAngleBytes)
-
-	case cycleFound:
-		f.fs.Write(circularShortBytes)
-
-	default:
-		f.ignoreNextType = true
-		f.format(ve)
-	}
-}
-
-// format is the main workhorse for providing the Formatter interface.  It
-// uses the passed reflect value to figure out what kind of object we are
-// dealing with and formats it appropriately.  It is a recursive function,
-// however circular data structures are detected and handled properly.
-func (f *formatState) format(v reflect.Value) {
-	// Handle invalid reflect values immediately.
-	kind := v.Kind()
-	if kind == reflect.Invalid {
-		f.fs.Write(invalidAngleBytes)
-		return
-	}
-
-	// Handle pointers specially.
-	if kind == reflect.Ptr {
-		f.formatPtr(v)
-		return
-	}
-
-	// Print type information unless already handled elsewhere.
-	if !f.ignoreNextType && f.fs.Flag('#') {
-		f.fs.Write(openParenBytes)
-		f.fs.Write([]byte(v.Type().String()))
-		f.fs.Write(closeParenBytes)
-	}
-	f.ignoreNextType = false
-
-	// Call Stringer/error interfaces if they exist and the handle methods
-	// flag is enabled.
-	if !f.cs.DisableMethods {
-		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
-			if handled := handleMethods(f.cs, f.fs, v); handled {
-				return
-			}
-		}
-	}
-
-	switch kind {
-	case reflect.Invalid:
-		// Do nothing.  We should never get here since invalid has already
-		// been handled above.
-
-	case reflect.Bool:
-		printBool(f.fs, v.Bool())
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		printInt(f.fs, v.Int(), 10)
-
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		printUint(f.fs, v.Uint(), 10)
-
-	case reflect.Float32:
-		printFloat(f.fs, v.Float(), 32)
-
-	case reflect.Float64:
-		printFloat(f.fs, v.Float(), 64)
-
-	case reflect.Complex64:
-		printComplex(f.fs, v.Complex(), 32)
-
-	case reflect.Complex128:
-		printComplex(f.fs, v.Complex(), 64)
-
-	case reflect.Slice:
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-			break
-		}
-		fallthrough
-
-	case reflect.Array:
-		f.fs.Write(openBracketBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			numEntries := v.Len()
-			for i := 0; i < numEntries; i++ {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				f.ignoreNextType = true
-				f.format(f.unpackValue(v.Index(i)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeBracketBytes)
-
-	case reflect.String:
-		f.fs.Write([]byte(v.String()))
-
-	case reflect.Interface:
-		// The only time we should get here is for nil interfaces due to
-		// unpackValue calls.
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-		}
-
-	case reflect.Ptr:
-		// Do nothing.  We should never get here since pointers have already
-		// been handled above.
-
-	case reflect.Map:
-		// nil maps should be indicated as different than empty maps
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-			break
-		}
-
-		f.fs.Write(openMapBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			keys := v.MapKeys()
-			if f.cs.SortKeys {
-				sortValues(keys, f.cs)
-			}
-			for i, key := range keys {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				f.ignoreNextType = true
-				f.format(f.unpackValue(key))
-				f.fs.Write(colonBytes)
-				f.ignoreNextType = true
-				f.format(f.unpackValue(v.MapIndex(key)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeMapBytes)
-
-	case reflect.Struct:
-		numFields := v.NumField()
-		f.fs.Write(openBraceBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			vt := v.Type()
-			for i := 0; i < numFields; i++ {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				vtf := vt.Field(i)
-				if f.fs.Flag('+') || f.fs.Flag('#') {
-					f.fs.Write([]byte(vtf.Name))
-					f.fs.Write(colonBytes)
-				}
-				f.format(f.unpackValue(v.Field(i)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeBraceBytes)
-
-	case reflect.Uintptr:
-		printHexPtr(f.fs, uintptr(v.Uint()))
-
-	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		printHexPtr(f.fs, v.Pointer())
-
-	// There were not any other types at the time this code was written, but
-	// fall back to letting the default fmt package handle it if any get added.
-	default:
-		format := f.buildDefaultFormat()
-		if v.CanInterface() {
-			fmt.Fprintf(f.fs, format, v.Interface())
-		} else {
-			fmt.Fprintf(f.fs, format, v.String())
-		}
-	}
-}
-
-// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
-// details.
-func (f *formatState) Format(fs fmt.State, verb rune) {
-	f.fs = fs
-
-	// Use standard formatting for verbs that are not v.
-	if verb != 'v' {
-		format := f.constructOrigFormat(verb)
-		fmt.Fprintf(fs, format, f.value)
-		return
-	}
-
-	if f.value == nil {
-		if fs.Flag('#') {
-			fs.Write(interfaceBytes)
-		}
-		fs.Write(nilAngleBytes)
-		return
-	}
-
-	f.format(reflect.ValueOf(f.value))
-}
-
-// newFormatter is a helper function to consolidate the logic from the various
-// public methods which take varying config states.
-func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
-	fs := &formatState{value: v, cs: cs}
-	fs.pointers = make(map[uintptr]int)
-	return fs
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface.  As a result, it integrates cleanly with standard fmt package
-printing functions.  The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly.  It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-Printf, Println, or Fprintf.
-*/
-func NewFormatter(v interface{}) fmt.Formatter {
-	return newFormatter(&Config, v)
-}

+ 0 - 148
vendor/github.com/davecgh/go-spew/spew/spew.go

@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <[email protected]>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"fmt"
-	"io"
-)
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the formatted string as a value that satisfies error.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Errorf(format string, a ...interface{}) (err error) {
-	return fmt.Errorf(format, convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprint(w, convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
-	return fmt.Fprintf(w, format, convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a default Formatter interface returned by NewFormatter.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprintln(w, convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
-func Print(a ...interface{}) (n int, err error) {
-	return fmt.Print(convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Printf(format string, a ...interface{}) (n int, err error) {
-	return fmt.Printf(format, convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
-func Println(a ...interface{}) (n int, err error) {
-	return fmt.Println(convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprint(a ...interface{}) string {
-	return fmt.Sprint(convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintf(format string, a ...interface{}) string {
-	return fmt.Sprintf(format, convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintln(a ...interface{}) string {
-	return fmt.Sprintln(convertArgs(a)...)
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a default spew Formatter interface.
-func convertArgs(args []interface{}) (formatters []interface{}) {
-	formatters = make([]interface{}, len(args))
-	for index, arg := range args {
-		formatters[index] = NewFormatter(arg)
-	}
-	return formatters
-}

+ 0 - 12
vendor/github.com/fsnotify/fsnotify/.editorconfig

@@ -1,12 +0,0 @@
-root = true
-
-[*.go]
-indent_style = tab
-indent_size = 4
-insert_final_newline = true
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
-insert_final_newline = true
-trim_trailing_whitespace = true

+ 0 - 1
vendor/github.com/fsnotify/fsnotify/.gitattributes

@@ -1 +0,0 @@
-go.sum linguist-generated

+ 0 - 6
vendor/github.com/fsnotify/fsnotify/.gitignore

@@ -1,6 +0,0 @@
-# Setup a Global .gitignore for OS and editor generated files:
-# https://help.github.com/articles/ignoring-files
-# git config --global core.excludesfile ~/.gitignore_global
-
-.vagrant
-*.sublime-project

+ 0 - 36
vendor/github.com/fsnotify/fsnotify/.travis.yml

@@ -1,36 +0,0 @@
-sudo: false
-language: go
-
-go:
-  - "stable"
-  - "1.11.x"
-  - "1.10.x"
-  - "1.9.x"
-
-matrix:
-  include:
-    - go: "stable"
-      env: GOLINT=true
-  allow_failures:
-    - go: tip
-  fast_finish: true
-
-
-before_install:
-  - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
-
-script:
-  - go test --race ./...
-
-after_script:
-  - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
-  - if [ ! -z  "${GOLINT}" ]; then echo running golint; golint --set_exit_status  ./...; else echo skipping golint; fi
-  - go vet ./...
-
-os:
-  - linux
-  - osx
-  - windows
-
-notifications:
-  email: false

+ 0 - 52
vendor/github.com/fsnotify/fsnotify/AUTHORS

@@ -1,52 +0,0 @@
-# Names should be added to this file as
-#	Name or Organization <email address>
-# The email address is not required for organizations.
-
-# You can update this list using the following command:
-#
-#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
-
-# Please keep the list sorted.
-
-Aaron L <[email protected]>
-Adrien Bustany <[email protected]>
-Amit Krishnan <[email protected]>
-Anmol Sethi <[email protected]>
-Bjørn Erik Pedersen <[email protected]>
-Bruno Bigras <[email protected]>
-Caleb Spare <[email protected]>
-Case Nelson <[email protected]>
-Chris Howey <[email protected]> <[email protected]>
-Christoffer Buchholz <[email protected]>
-Daniel Wagner-Hall <[email protected]>
-Dave Cheney <[email protected]>
-Evan Phoenix <[email protected]>
-Francisco Souza <[email protected]>
-Hari haran <[email protected]>
-John C Barstow
-Kelvin Fo <[email protected]>
-Ken-ichirou MATSUZAWA <[email protected]>
-Matt Layher <[email protected]>
-Nathan Youngman <[email protected]>
-Nickolai Zeldovich <[email protected]>
-Patrick <[email protected]>
-Paul Hammond <[email protected]>
-Pawel Knap <[email protected]>
-Pieter Droogendijk <[email protected]>
-Pursuit92 <[email protected]>
-Riku Voipio <[email protected]>
-Rob Figueiredo <[email protected]>
-Rodrigo Chiossi <[email protected]>
-Slawek Ligus <[email protected]>
-Soge Zhang <[email protected]>
-Tiffany Jernigan <[email protected]>
-Tilak Sharma <[email protected]>
-Tom Payne <[email protected]>
-Travis Cline <[email protected]>
-Tudor Golubenco <[email protected]>
-Vahe Khachikyan <[email protected]>
-Yukang <[email protected]>
-bronze1man <[email protected]>
-debrando <[email protected]>
-henrikedwards <[email protected]>
-铁哥 <[email protected]>

+ 0 - 317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md

@@ -1,317 +0,0 @@
-# Changelog
-
-## v1.4.7 / 2018-01-09
-
-* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
-* Tests: Fix missing verb on format string (thanks @rchiossi)
-* Linux: Fix deadlock in Remove (thanks @aarondl)
-* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
-* Docs: Moved FAQ into the README (thanks @vahe)
-* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
-* Docs: replace references to OS X with macOS
-
-## v1.4.2 / 2016-10-10
-
-* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
-
-## v1.4.1 / 2016-10-04
-
-* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
-
-## v1.4.0 / 2016-10-01
-
-* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
-
-## v1.3.1 / 2016-06-28
-
-* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
-
-## v1.3.0 / 2016-04-19
-
-* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
-
-## v1.2.10 / 2016-03-02
-
-* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
-
-## v1.2.9 / 2016-01-13
-
-kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
-
-## v1.2.8 / 2015-12-17
-
-* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
-* inotify: fix race in test
-* enable race detection for continuous integration (Linux, Mac, Windows)
-
-## v1.2.5 / 2015-10-17
-
-* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
-* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
-* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
-* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
-
-## v1.2.1 / 2015-10-14
-
-* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
-
-## v1.2.0 / 2015-02-08
-
-* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
-* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
-* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
-
-## v1.1.1 / 2015-02-05
-
-* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
-
-## v1.1.0 / 2014-12-12
-
-* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
-    * add low-level functions
-    * only need to store flags on directories
-    * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
-    * done can be an unbuffered channel
-    * remove calls to os.NewSyscallError
-* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
-* kqueue: fix regression in  rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## v1.0.4 / 2014-09-07
-
-* kqueue: add dragonfly to the build tags.
-* Rename source code files, rearrange code so exported APIs are at the top.
-* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-
-## v1.0.3 / 2014-08-19
-
-* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
-
-## v1.0.2 / 2014-08-17
-
-* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-
-## v1.0.0 / 2014-08-15
-
-* [API] Remove AddWatch on Windows, use Add.
-* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
-* Minor updates based on feedback from golint.
-
-## dev / 2014-07-09
-
-* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
-* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
-
-## dev / 2014-07-04
-
-* kqueue: fix incorrect mutex used in Close()
-* Update example to demonstrate usage of Op.
-
-## dev / 2014-06-28
-
-* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
-* Fix for String() method on Event (thanks Alex Brainman)
-* Don't build on Plan 9 or Solaris (thanks @4ad)
-
-## dev / 2014-06-21
-
-* Events channel of type Event rather than *Event.
-* [internal] use syscall constants directly for inotify and kqueue.
-* [internal] kqueue: rename events to kevents and fileEvent to event.
-
-## dev / 2014-06-19
-
-* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
-* [internal] remove cookie from Event struct (unused).
-* [internal] Event struct has the same definition across every OS.
-* [internal] remove internal watch and removeWatch methods.
-
-## dev / 2014-06-12
-
-* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
-* [API] Pluralized channel names: Events and Errors.
-* [API] Renamed FileEvent struct to Event.
-* [API] Op constants replace methods like IsCreate().
-
-## dev / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## dev / 2014-05-23
-
-* [API] Remove current implementation of WatchFlags.
-    * current implementation doesn't take advantage of OS for efficiency
-    * provides little benefit over filtering events as they are received, but has  extra bookkeeping and mutexes
-    * no tests for the current implementation
-    * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-
-## v0.9.3 / 2014-12-31
-
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## v0.9.2 / 2014-08-17
-
-* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-
-## v0.9.1 / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## v0.9.0 / 2014-01-17
-
-* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
-* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
-* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-
-## v0.8.12 / 2013-11-13
-
-* [API] Remove FD_SET and friends from Linux adapter
-
-## v0.8.11 / 2013-11-02
-
-* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
-
-## v0.8.10 / 2013-10-19
-
-* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
-* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
-* [Doc] specify OS-specific limits in README (thanks @debrando)
-
-## v0.8.9 / 2013-09-08
-
-* [Doc] Contributing (thanks @nathany)
-* [Doc] update package path in example code [#63][] (thanks @paulhammond)
-* [Doc] GoCI badge in README (Linux only) [#60][]
-* [Doc] Cross-platform testing with Vagrant  [#59][] (thanks @nathany)
-
-## v0.8.8 / 2013-06-17
-
-* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-
-## v0.8.7 / 2013-06-03
-
-* [API] Make syscall flags internal
-* [Fix] inotify: ignore event changes
-* [Fix] race in symlink test [#45][] (reported by @srid)
-* [Fix] tests on Windows
-* lower case error messages
-
-## v0.8.6 / 2013-05-23
-
-* kqueue: Use EVT_ONLY flag on Darwin
-* [Doc] Update README with full example
-
-## v0.8.5 / 2013-05-09
-
-* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-
-## v0.8.4 / 2013-04-07
-
-* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-
-## v0.8.3 / 2013-03-13
-
-* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
-* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-
-## v0.8.2 / 2013-02-07
-
-* [Doc] add Authors
-* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-
-## v0.8.1 / 2013-01-09
-
-* [Fix] Windows path separators
-* [Doc] BSD License
-
-## v0.8.0 / 2012-11-09
-
-* kqueue: directory watching improvements (thanks @vmirage)
-* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
-* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-
-## v0.7.4 / 2012-10-09
-
-* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
-* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
-* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
-* [Fix] kqueue: modify after recreation of file
-
-## v0.7.3 / 2012-09-27
-
-* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
-* [Fix] kqueue: no longer get duplicate CREATE events
-
-## v0.7.2 / 2012-09-01
-
-* kqueue: events for created directories
-
-## v0.7.1 / 2012-07-14
-
-* [Fix] for renaming files
-
-## v0.7.0 / 2012-07-02
-
-* [Feature] FSNotify flags
-* [Fix] inotify: Added file name back to event path
-
-## v0.6.0 / 2012-06-06
-
-* kqueue: watch files after directory created (thanks @tmc)
-
-## v0.5.1 / 2012-05-22
-
-* [Fix] inotify: remove all watches before Close()
-
-## v0.5.0 / 2012-05-03
-
-* [API] kqueue: return errors during watch instead of sending over channel
-* kqueue: match symlink behavior on Linux
-* inotify: add `DELETE_SELF` (requested by @taralx)
-* [Fix] kqueue: handle EINTR (reported by @robfig)
-* [Doc] Godoc example [#1][] (thanks @davecheney)
-
-## v0.4.0 / 2012-03-30
-
-* Go 1 released: build with go tool
-* [Feature] Windows support using winfsnotify
-* Windows does not have attribute change notifications
-* Roll attribute notifications into IsModify
-
-## v0.3.0 / 2012-02-19
-
-* kqueue: add files when watch directory
-
-## v0.2.0 / 2011-12-30
-
-* update to latest Go weekly code
-
-## v0.1.0 / 2011-10-19
-
-* kqueue: add watch on file creation to match inotify
-* kqueue: create file event
-* inotify: ignore `IN_IGNORED` events
-* event String()
-* linux: common FileEvent functions
-* initial commit
-
-[#79]: https://github.com/howeyc/fsnotify/pull/79
-[#77]: https://github.com/howeyc/fsnotify/pull/77
-[#72]: https://github.com/howeyc/fsnotify/issues/72
-[#71]: https://github.com/howeyc/fsnotify/issues/71
-[#70]: https://github.com/howeyc/fsnotify/issues/70
-[#63]: https://github.com/howeyc/fsnotify/issues/63
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#60]: https://github.com/howeyc/fsnotify/issues/60
-[#59]: https://github.com/howeyc/fsnotify/issues/59
-[#49]: https://github.com/howeyc/fsnotify/issues/49
-[#45]: https://github.com/howeyc/fsnotify/issues/45
-[#40]: https://github.com/howeyc/fsnotify/issues/40
-[#36]: https://github.com/howeyc/fsnotify/issues/36
-[#33]: https://github.com/howeyc/fsnotify/issues/33
-[#29]: https://github.com/howeyc/fsnotify/issues/29
-[#25]: https://github.com/howeyc/fsnotify/issues/25
-[#24]: https://github.com/howeyc/fsnotify/issues/24
-[#21]: https://github.com/howeyc/fsnotify/issues/21

+ 0 - 77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md

@@ -1,77 +0,0 @@
-# Contributing
-
-## Issues
-
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
-* Please indicate the platform you are using fsnotify on.
-* A code example to reproduce the problem is appreciated.
-
-## Pull Requests
-
-### Contributor License Agreement
-
-fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please indicate that you have signed the CLA in your pull request.
-
-### How fsnotify is Developed
-
-* Development is done on feature branches.
-* Tests are run on BSD, Linux, macOS and Windows.
-* Pull requests are reviewed and [applied to master][am] using [hub][].
-  * Maintainers may modify or squash commits rather than asking contributors to.
-* To issue a new release, the maintainers will:
-  * Update the CHANGELOG
-  * Tag a version, which will become available through gopkg.in.
- 
-### How to Fork
-
-For smooth sailing, always use the original import path. Installing with `go get` makes this easy. 
-
-1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
-2. Create your feature branch (`git checkout -b my-new-feature`)
-3. Ensure everything works and the tests pass (see below)
-4. Commit your changes (`git commit -am 'Add some feature'`)
-
-Contribute upstream:
-
-1. Fork fsnotify on GitHub
-2. Add your remote (`git remote add fork [email protected]:mycompany/repo.git`)
-3. Push to the branch (`git push fork my-new-feature`)
-4. Create a new Pull Request on GitHub
-
-This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
-
-### Testing
-
-fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
-
-Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
-
-To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
-
-* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
-* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
-* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
-* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
-* When you're done, you will want to halt or destroy the Vagrant boxes.
-
-Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
-
-Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
-
-### Maintainers
-
-Help maintaining fsnotify is welcome. To be a maintainer:
-
-* Submit a pull request and sign the CLA as above.
-* You must be able to run the test suite on Mac, Windows, Linux and BSD.
-
-To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
-
-All code changes should be internal pull requests.
-
-Releases are tagged using [Semantic Versioning](http://semver.org/).
-
-[hub]: https://github.com/github/hub
-[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

+ 0 - 28
vendor/github.com/fsnotify/fsnotify/LICENSE

@@ -1,28 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 130
vendor/github.com/fsnotify/fsnotify/README.md

@@ -1,130 +0,0 @@
-# File system notifications for Go
-
-[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
-
-fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
-
-```console
-go get -u golang.org/x/sys/...
-```
-
-Cross platform: Windows, Linux, BSD and macOS.
-
-| Adapter               | OS                               | Status                                                                                                                          |
-| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| inotify               | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
-| kqueue                | BSD, macOS, iOS\*                | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
-| ReadDirectoryChangesW | Windows                          | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
-| FSEvents              | macOS                            | [Planned](https://github.com/fsnotify/fsnotify/issues/11)                                                                       |
-| FEN                   | Solaris 11                       | [In Progress](https://github.com/fsnotify/fsnotify/issues/12)                                                                   |
-| fanotify              | Linux 2.6.37+                    | [Planned](https://github.com/fsnotify/fsnotify/issues/114)                                                                      |
-| USN Journals          | Windows                          | [Maybe](https://github.com/fsnotify/fsnotify/issues/53)                                                                         |
-| Polling               | *All*                            | [Maybe](https://github.com/fsnotify/fsnotify/issues/9)                                                                          |
-
-\* Android and iOS are untested.
-
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
-
-## API stability
-
-fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). 
-
-All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
-
-Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
-
-## Usage
-
-```go
-package main
-
-import (
-	"log"
-
-	"github.com/fsnotify/fsnotify"
-)
-
-func main() {
-	watcher, err := fsnotify.NewWatcher()
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer watcher.Close()
-
-	done := make(chan bool)
-	go func() {
-		for {
-			select {
-			case event, ok := <-watcher.Events:
-				if !ok {
-					return
-				}
-				log.Println("event:", event)
-				if event.Op&fsnotify.Write == fsnotify.Write {
-					log.Println("modified file:", event.Name)
-				}
-			case err, ok := <-watcher.Errors:
-				if !ok {
-					return
-				}
-				log.Println("error:", err)
-			}
-		}
-	}()
-
-	err = watcher.Add("/tmp/foo")
-	if err != nil {
-		log.Fatal(err)
-	}
-	<-done
-}
-```
-
-## Contributing
-
-Please refer to [CONTRIBUTING][] before opening an issue or pull request.
-
-## Example
-
-See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
-
-## FAQ
-
-**When a file is moved to another directory is it still being watched?**
-
-No (it shouldn't be, unless you are watching where it was moved to).
-
-**When I watch a directory, are all subdirectories watched as well?**
-
-No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
-
-**Do I have to watch the Error and Event channels in a separate goroutine?**
-
-As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
-
-**Why am I receiving multiple events for the same file on OS X?**
-
-Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
-
-**How many files can be watched at once?**
-
-There are OS-specific limits as to how many watches can be created:
-* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
-* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
-
-**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
-
-fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
-
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#18]: https://github.com/fsnotify/fsnotify/issues/18
-[#11]: https://github.com/fsnotify/fsnotify/issues/11
-[#7]: https://github.com/howeyc/fsnotify/issues/7
-
-[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
-
-## Related Projects
-
-* [notify](https://github.com/rjeczalik/notify)
-* [fsevents](https://github.com/fsnotify/fsevents)
-

+ 0 - 37
vendor/github.com/fsnotify/fsnotify/fen.go

@@ -1,37 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-package fsnotify
-
-import (
-	"errors"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events chan Event
-	Errors chan error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	return nil
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	return nil
-}

+ 0 - 68
vendor/github.com/fsnotify/fsnotify/fsnotify.go

@@ -1,68 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-// Package fsnotify provides a platform-independent interface for file system notifications.
-package fsnotify
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-)
-
-// Event represents a single file system notification.
-type Event struct {
-	Name string // Relative path to the file or directory.
-	Op   Op     // File operation that triggered the event.
-}
-
-// Op describes a set of file operations.
-type Op uint32
-
-// These are the generalized file operations that can trigger a notification.
-const (
-	Create Op = 1 << iota
-	Write
-	Remove
-	Rename
-	Chmod
-)
-
-func (op Op) String() string {
-	// Use a buffer for efficient string concatenation
-	var buffer bytes.Buffer
-
-	if op&Create == Create {
-		buffer.WriteString("|CREATE")
-	}
-	if op&Remove == Remove {
-		buffer.WriteString("|REMOVE")
-	}
-	if op&Write == Write {
-		buffer.WriteString("|WRITE")
-	}
-	if op&Rename == Rename {
-		buffer.WriteString("|RENAME")
-	}
-	if op&Chmod == Chmod {
-		buffer.WriteString("|CHMOD")
-	}
-	if buffer.Len() == 0 {
-		return ""
-	}
-	return buffer.String()[1:] // Strip leading pipe
-}
-
-// String returns a string representation of the event in the form
-// "file: REMOVE|WRITE|..."
-func (e Event) String() string {
-	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
-}
-
-// Common errors that can be reported by a watcher
-var (
-	ErrEventOverflow = errors.New("fsnotify queue overflow")
-)

+ 0 - 5
vendor/github.com/fsnotify/fsnotify/go.mod

@@ -1,5 +0,0 @@
-module github.com/fsnotify/fsnotify
-
-go 1.13
-
-require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9

+ 0 - 2
vendor/github.com/fsnotify/fsnotify/go.sum

@@ -1,2 +0,0 @@
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

+ 0 - 337
vendor/github.com/fsnotify/fsnotify/inotify.go

@@ -1,337 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events   chan Event
-	Errors   chan error
-	mu       sync.Mutex // Map access
-	fd       int
-	poller   *fdPoller
-	watches  map[string]*watch // Map of inotify watches (key: path)
-	paths    map[int]string    // Map of watched paths (key: watch descriptor)
-	done     chan struct{}     // Channel for sending a "quit message" to the reader goroutine
-	doneResp chan struct{}     // Channel to respond to Close
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	// Create inotify fd
-	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
-	if fd == -1 {
-		return nil, errno
-	}
-	// Create epoll
-	poller, err := newFdPoller(fd)
-	if err != nil {
-		unix.Close(fd)
-		return nil, err
-	}
-	w := &Watcher{
-		fd:       fd,
-		poller:   poller,
-		watches:  make(map[string]*watch),
-		paths:    make(map[int]string),
-		Events:   make(chan Event),
-		Errors:   make(chan error),
-		done:     make(chan struct{}),
-		doneResp: make(chan struct{}),
-	}
-
-	go w.readEvents()
-	return w, nil
-}
-
-func (w *Watcher) isClosed() bool {
-	select {
-	case <-w.done:
-		return true
-	default:
-		return false
-	}
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	if w.isClosed() {
-		return nil
-	}
-
-	// Send 'close' signal to goroutine, and set the Watcher to closed.
-	close(w.done)
-
-	// Wake up goroutine
-	w.poller.wake()
-
-	// Wait for goroutine to close
-	<-w.doneResp
-
-	return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	name = filepath.Clean(name)
-	if w.isClosed() {
-		return errors.New("inotify instance already closed")
-	}
-
-	const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
-		unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
-		unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
-
-	var flags uint32 = agnosticEvents
-
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	watchEntry := w.watches[name]
-	if watchEntry != nil {
-		flags |= watchEntry.flags | unix.IN_MASK_ADD
-	}
-	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
-	if wd == -1 {
-		return errno
-	}
-
-	if watchEntry == nil {
-		w.watches[name] = &watch{wd: uint32(wd), flags: flags}
-		w.paths[wd] = name
-	} else {
-		watchEntry.wd = uint32(wd)
-		watchEntry.flags = flags
-	}
-
-	return nil
-}
-
-// Remove stops watching the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	name = filepath.Clean(name)
-
-	// Fetch the watch.
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	watch, ok := w.watches[name]
-
-	// Remove it from inotify.
-	if !ok {
-		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
-	}
-
-	// We successfully removed the watch if InotifyRmWatch doesn't return an
-	// error, we need to clean up our internal state to ensure it matches
-	// inotify's kernel state.
-	delete(w.paths, int(watch.wd))
-	delete(w.watches, name)
-
-	// inotify_rm_watch will return EINVAL if the file has been deleted;
-	// the inotify will already have been removed.
-	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
-	// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
-	// so that EINVAL means that the wd is being rm_watch()ed or its file removed
-	// by another thread and we have not received IN_IGNORE event.
-	success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
-	if success == -1 {
-		// TODO: Perhaps it's not helpful to return an error here in every case.
-		// the only two possible errors are:
-		// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
-		// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
-		// Watch descriptors are invalidated when they are removed explicitly or implicitly;
-		// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
-		return errno
-	}
-
-	return nil
-}
-
-type watch struct {
-	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
-	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
-// readEvents reads from the inotify file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
-	var (
-		buf   [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
-		n     int                                  // Number of bytes read with read()
-		errno error                                // Syscall errno
-		ok    bool                                 // For poller.wait
-	)
-
-	defer close(w.doneResp)
-	defer close(w.Errors)
-	defer close(w.Events)
-	defer unix.Close(w.fd)
-	defer w.poller.close()
-
-	for {
-		// See if we have been closed.
-		if w.isClosed() {
-			return
-		}
-
-		ok, errno = w.poller.wait()
-		if errno != nil {
-			select {
-			case w.Errors <- errno:
-			case <-w.done:
-				return
-			}
-			continue
-		}
-
-		if !ok {
-			continue
-		}
-
-		n, errno = unix.Read(w.fd, buf[:])
-		// If a signal interrupted execution, see if we've been asked to close, and try again.
-		// http://man7.org/linux/man-pages/man7/signal.7.html :
-		// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
-		if errno == unix.EINTR {
-			continue
-		}
-
-		// unix.Read might have been woken up by Close. If so, we're done.
-		if w.isClosed() {
-			return
-		}
-
-		if n < unix.SizeofInotifyEvent {
-			var err error
-			if n == 0 {
-				// If EOF is received. This should really never happen.
-				err = io.EOF
-			} else if n < 0 {
-				// If an error occurred while reading.
-				err = errno
-			} else {
-				// Read was too short.
-				err = errors.New("notify: short read in readEvents()")
-			}
-			select {
-			case w.Errors <- err:
-			case <-w.done:
-				return
-			}
-			continue
-		}
-
-		var offset uint32
-		// We don't know how many events we just read into the buffer
-		// While the offset points to at least one whole event...
-		for offset <= uint32(n-unix.SizeofInotifyEvent) {
-			// Point "raw" to the event in the buffer
-			raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
-
-			mask := uint32(raw.Mask)
-			nameLen := uint32(raw.Len)
-
-			if mask&unix.IN_Q_OVERFLOW != 0 {
-				select {
-				case w.Errors <- ErrEventOverflow:
-				case <-w.done:
-					return
-				}
-			}
-
-			// If the event happened to the watched directory or the watched file, the kernel
-			// doesn't append the filename to the event, but we would like to always fill the
-			// the "Name" field with a valid filename. We retrieve the path of the watch from
-			// the "paths" map.
-			w.mu.Lock()
-			name, ok := w.paths[int(raw.Wd)]
-			// IN_DELETE_SELF occurs when the file/directory being watched is removed.
-			// This is a sign to clean up the maps, otherwise we are no longer in sync
-			// with the inotify kernel state which has already deleted the watch
-			// automatically.
-			if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
-				delete(w.paths, int(raw.Wd))
-				delete(w.watches, name)
-			}
-			w.mu.Unlock()
-
-			if nameLen > 0 {
-				// Point "bytes" at the first byte of the filename
-				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
-				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
-				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
-			}
-
-			event := newEvent(name, mask)
-
-			// Send the events that are not ignored on the events channel
-			if !event.ignoreLinux(mask) {
-				select {
-				case w.Events <- event:
-				case <-w.done:
-					return
-				}
-			}
-
-			// Move to the next event in the buffer
-			offset += unix.SizeofInotifyEvent + nameLen
-		}
-	}
-}
-
-// Certain types of events can be "ignored" and not sent over the Events
-// channel. Such as events marked ignore by the kernel, or MODIFY events
-// against files that do not exist.
-func (e *Event) ignoreLinux(mask uint32) bool {
-	// Ignore anything the inotify API says to ignore
-	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
-		return true
-	}
-
-	// If the event is not a DELETE or RENAME, the file must exist.
-	// Otherwise the event is ignored.
-	// *Note*: this was put in place because it was seen that a MODIFY
-	// event was sent after the DELETE. This ignores that MODIFY and
-	// assumes a DELETE will come or has come if the file doesn't exist.
-	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
-		_, statErr := os.Lstat(e.Name)
-		return os.IsNotExist(statErr)
-	}
-	return false
-}
-
-// newEvent returns an platform-independent Event based on an inotify mask.
-func newEvent(name string, mask uint32) Event {
-	e := Event{Name: name}
-	if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
-		e.Op |= Create
-	}
-	if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
-		e.Op |= Remove
-	}
-	if mask&unix.IN_MODIFY == unix.IN_MODIFY {
-		e.Op |= Write
-	}
-	if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
-		e.Op |= Rename
-	}
-	if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}

+ 0 - 187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go

@@ -1,187 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
-	"errors"
-
-	"golang.org/x/sys/unix"
-)
-
-type fdPoller struct {
-	fd   int    // File descriptor (as returned by the inotify_init() syscall)
-	epfd int    // Epoll file descriptor
-	pipe [2]int // Pipe for waking up
-}
-
-func emptyPoller(fd int) *fdPoller {
-	poller := new(fdPoller)
-	poller.fd = fd
-	poller.epfd = -1
-	poller.pipe[0] = -1
-	poller.pipe[1] = -1
-	return poller
-}
-
-// Create a new inotify poller.
-// This creates an inotify handler, and an epoll handler.
-func newFdPoller(fd int) (*fdPoller, error) {
-	var errno error
-	poller := emptyPoller(fd)
-	defer func() {
-		if errno != nil {
-			poller.close()
-		}
-	}()
-	poller.fd = fd
-
-	// Create epoll fd
-	poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
-	if poller.epfd == -1 {
-		return nil, errno
-	}
-	// Create pipe; pipe[0] is the read end, pipe[1] the write end.
-	errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
-	if errno != nil {
-		return nil, errno
-	}
-
-	// Register inotify fd with epoll
-	event := unix.EpollEvent{
-		Fd:     int32(poller.fd),
-		Events: unix.EPOLLIN,
-	}
-	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
-	if errno != nil {
-		return nil, errno
-	}
-
-	// Register pipe fd with epoll
-	event = unix.EpollEvent{
-		Fd:     int32(poller.pipe[0]),
-		Events: unix.EPOLLIN,
-	}
-	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
-	if errno != nil {
-		return nil, errno
-	}
-
-	return poller, nil
-}
-
-// Wait using epoll.
-// Returns true if something is ready to be read,
-// false if there is not.
-func (poller *fdPoller) wait() (bool, error) {
-	// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
-	// I don't know whether epoll_wait returns the number of events returned,
-	// or the total number of events ready.
-	// I decided to catch both by making the buffer one larger than the maximum.
-	events := make([]unix.EpollEvent, 7)
-	for {
-		n, errno := unix.EpollWait(poller.epfd, events, -1)
-		if n == -1 {
-			if errno == unix.EINTR {
-				continue
-			}
-			return false, errno
-		}
-		if n == 0 {
-			// If there are no events, try again.
-			continue
-		}
-		if n > 6 {
-			// This should never happen. More events were returned than should be possible.
-			return false, errors.New("epoll_wait returned more events than I know what to do with")
-		}
-		ready := events[:n]
-		epollhup := false
-		epollerr := false
-		epollin := false
-		for _, event := range ready {
-			if event.Fd == int32(poller.fd) {
-				if event.Events&unix.EPOLLHUP != 0 {
-					// This should not happen, but if it does, treat it as a wakeup.
-					epollhup = true
-				}
-				if event.Events&unix.EPOLLERR != 0 {
-					// If an error is waiting on the file descriptor, we should pretend
-					// something is ready to read, and let unix.Read pick up the error.
-					epollerr = true
-				}
-				if event.Events&unix.EPOLLIN != 0 {
-					// There is data to read.
-					epollin = true
-				}
-			}
-			if event.Fd == int32(poller.pipe[0]) {
-				if event.Events&unix.EPOLLHUP != 0 {
-					// Write pipe descriptor was closed, by us. This means we're closing down the
-					// watcher, and we should wake up.
-				}
-				if event.Events&unix.EPOLLERR != 0 {
-					// If an error is waiting on the pipe file descriptor.
-					// This is an absolute mystery, and should never ever happen.
-					return false, errors.New("Error on the pipe descriptor.")
-				}
-				if event.Events&unix.EPOLLIN != 0 {
-					// This is a regular wakeup, so we have to clear the buffer.
-					err := poller.clearWake()
-					if err != nil {
-						return false, err
-					}
-				}
-			}
-		}
-
-		if epollhup || epollerr || epollin {
-			return true, nil
-		}
-		return false, nil
-	}
-}
-
-// Close the write end of the poller.
-func (poller *fdPoller) wake() error {
-	buf := make([]byte, 1)
-	n, errno := unix.Write(poller.pipe[1], buf)
-	if n == -1 {
-		if errno == unix.EAGAIN {
-			// Buffer is full, poller will wake.
-			return nil
-		}
-		return errno
-	}
-	return nil
-}
-
-func (poller *fdPoller) clearWake() error {
-	// You have to be woken up a LOT in order to get to 100!
-	buf := make([]byte, 100)
-	n, errno := unix.Read(poller.pipe[0], buf)
-	if n == -1 {
-		if errno == unix.EAGAIN {
-			// Buffer is empty, someone else cleared our wake.
-			return nil
-		}
-		return errno
-	}
-	return nil
-}
-
-// Close all poller file descriptors, but not the one passed to it.
-func (poller *fdPoller) close() {
-	if poller.pipe[1] != -1 {
-		unix.Close(poller.pipe[1])
-	}
-	if poller.pipe[0] != -1 {
-		unix.Close(poller.pipe[0])
-	}
-	if poller.epfd != -1 {
-		unix.Close(poller.epfd)
-	}
-}

+ 0 - 521
vendor/github.com/fsnotify/fsnotify/kqueue.go

@@ -1,521 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly darwin
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"sync"
-	"time"
-
-	"golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events chan Event
-	Errors chan error
-	done   chan struct{} // Channel for sending a "quit message" to the reader goroutine
-
-	kq int // File descriptor (as returned by the kqueue() syscall).
-
-	mu              sync.Mutex        // Protects access to watcher data
-	watches         map[string]int    // Map of watched file descriptors (key: path).
-	externalWatches map[string]bool   // Map of watches added by user of the library.
-	dirFlags        map[string]uint32 // Map of watched directories to fflags used in kqueue.
-	paths           map[int]pathInfo  // Map file descriptors to path names for processing kqueue events.
-	fileExists      map[string]bool   // Keep track of if we know this file exists (to stop duplicate create events).
-	isClosed        bool              // Set to true when Close() is first called
-}
-
-type pathInfo struct {
-	name  string
-	isDir bool
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	kq, err := kqueue()
-	if err != nil {
-		return nil, err
-	}
-
-	w := &Watcher{
-		kq:              kq,
-		watches:         make(map[string]int),
-		dirFlags:        make(map[string]uint32),
-		paths:           make(map[int]pathInfo),
-		fileExists:      make(map[string]bool),
-		externalWatches: make(map[string]bool),
-		Events:          make(chan Event),
-		Errors:          make(chan error),
-		done:            make(chan struct{}),
-	}
-
-	go w.readEvents()
-	return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	w.mu.Lock()
-	if w.isClosed {
-		w.mu.Unlock()
-		return nil
-	}
-	w.isClosed = true
-
-	// copy paths to remove while locked
-	var pathsToRemove = make([]string, 0, len(w.watches))
-	for name := range w.watches {
-		pathsToRemove = append(pathsToRemove, name)
-	}
-	w.mu.Unlock()
-	// unlock before calling Remove, which also locks
-
-	for _, name := range pathsToRemove {
-		w.Remove(name)
-	}
-
-	// send a "quit" message to the reader goroutine
-	close(w.done)
-
-	return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	w.mu.Lock()
-	w.externalWatches[name] = true
-	w.mu.Unlock()
-	_, err := w.addWatch(name, noteAllEvents)
-	return err
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	name = filepath.Clean(name)
-	w.mu.Lock()
-	watchfd, ok := w.watches[name]
-	w.mu.Unlock()
-	if !ok {
-		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
-	}
-
-	const registerRemove = unix.EV_DELETE
-	if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
-		return err
-	}
-
-	unix.Close(watchfd)
-
-	w.mu.Lock()
-	isDir := w.paths[watchfd].isDir
-	delete(w.watches, name)
-	delete(w.paths, watchfd)
-	delete(w.dirFlags, name)
-	w.mu.Unlock()
-
-	// Find all watched paths that are in this directory that are not external.
-	if isDir {
-		var pathsToRemove []string
-		w.mu.Lock()
-		for _, path := range w.paths {
-			wdir, _ := filepath.Split(path.name)
-			if filepath.Clean(wdir) == name {
-				if !w.externalWatches[path.name] {
-					pathsToRemove = append(pathsToRemove, path.name)
-				}
-			}
-		}
-		w.mu.Unlock()
-		for _, name := range pathsToRemove {
-			// Since these are internal, not much sense in propagating error
-			// to the user, as that will just confuse them with an error about
-			// a path they did not explicitly watch themselves.
-			w.Remove(name)
-		}
-	}
-
-	return nil
-}
-
-// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
-const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
-
-// keventWaitTime to block on each read from kevent
-var keventWaitTime = durationToTimespec(100 * time.Millisecond)
-
-// addWatch adds name to the watched file set.
-// The flags are interpreted as described in kevent(2).
-// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
-func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
-	var isDir bool
-	// Make ./name and name equivalent
-	name = filepath.Clean(name)
-
-	w.mu.Lock()
-	if w.isClosed {
-		w.mu.Unlock()
-		return "", errors.New("kevent instance already closed")
-	}
-	watchfd, alreadyWatching := w.watches[name]
-	// We already have a watch, but we can still override flags.
-	if alreadyWatching {
-		isDir = w.paths[watchfd].isDir
-	}
-	w.mu.Unlock()
-
-	if !alreadyWatching {
-		fi, err := os.Lstat(name)
-		if err != nil {
-			return "", err
-		}
-
-		// Don't watch sockets.
-		if fi.Mode()&os.ModeSocket == os.ModeSocket {
-			return "", nil
-		}
-
-		// Don't watch named pipes.
-		if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
-			return "", nil
-		}
-
-		// Follow Symlinks
-		// Unfortunately, Linux can add bogus symlinks to watch list without
-		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
-		// consistency, we will act like everything is fine. There will simply
-		// be no file events for broken symlinks.
-		// Hence the returns of nil on errors.
-		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
-			name, err = filepath.EvalSymlinks(name)
-			if err != nil {
-				return "", nil
-			}
-
-			w.mu.Lock()
-			_, alreadyWatching = w.watches[name]
-			w.mu.Unlock()
-
-			if alreadyWatching {
-				return name, nil
-			}
-
-			fi, err = os.Lstat(name)
-			if err != nil {
-				return "", nil
-			}
-		}
-
-		watchfd, err = unix.Open(name, openMode, 0700)
-		if watchfd == -1 {
-			return "", err
-		}
-
-		isDir = fi.IsDir()
-	}
-
-	const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
-	if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
-		unix.Close(watchfd)
-		return "", err
-	}
-
-	if !alreadyWatching {
-		w.mu.Lock()
-		w.watches[name] = watchfd
-		w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
-		w.mu.Unlock()
-	}
-
-	if isDir {
-		// Watch the directory if it has not been watched before,
-		// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
-		w.mu.Lock()
-
-		watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
-			(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
-		// Store flags so this watch can be updated later
-		w.dirFlags[name] = flags
-		w.mu.Unlock()
-
-		if watchDir {
-			if err := w.watchDirectoryFiles(name); err != nil {
-				return "", err
-			}
-		}
-	}
-	return name, nil
-}
-
-// readEvents reads from kqueue and converts the received kevents into
-// Event values that it sends down the Events channel.
-func (w *Watcher) readEvents() {
-	eventBuffer := make([]unix.Kevent_t, 10)
-
-loop:
-	for {
-		// See if there is a message on the "done" channel
-		select {
-		case <-w.done:
-			break loop
-		default:
-		}
-
-		// Get new events
-		kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
-		// EINTR is okay, the syscall was interrupted before timeout expired.
-		if err != nil && err != unix.EINTR {
-			select {
-			case w.Errors <- err:
-			case <-w.done:
-				break loop
-			}
-			continue
-		}
-
-		// Flush the events we received to the Events channel
-		for len(kevents) > 0 {
-			kevent := &kevents[0]
-			watchfd := int(kevent.Ident)
-			mask := uint32(kevent.Fflags)
-			w.mu.Lock()
-			path := w.paths[watchfd]
-			w.mu.Unlock()
-			event := newEvent(path.name, mask)
-
-			if path.isDir && !(event.Op&Remove == Remove) {
-				// Double check to make sure the directory exists. This can happen when
-				// we do a rm -fr on a recursively watched folders and we receive a
-				// modification event first but the folder has been deleted and later
-				// receive the delete event
-				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
-					// mark is as delete event
-					event.Op |= Remove
-				}
-			}
-
-			if event.Op&Rename == Rename || event.Op&Remove == Remove {
-				w.Remove(event.Name)
-				w.mu.Lock()
-				delete(w.fileExists, event.Name)
-				w.mu.Unlock()
-			}
-
-			if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
-				w.sendDirectoryChangeEvents(event.Name)
-			} else {
-				// Send the event on the Events channel.
-				select {
-				case w.Events <- event:
-				case <-w.done:
-					break loop
-				}
-			}
-
-			if event.Op&Remove == Remove {
-				// Look for a file that may have overwritten this.
-				// For example, mv f1 f2 will delete f2, then create f2.
-				if path.isDir {
-					fileDir := filepath.Clean(event.Name)
-					w.mu.Lock()
-					_, found := w.watches[fileDir]
-					w.mu.Unlock()
-					if found {
-						// make sure the directory exists before we watch for changes. When we
-						// do a recursive watch and perform rm -fr, the parent directory might
-						// have gone missing, ignore the missing directory and let the
-						// upcoming delete event remove the watch from the parent directory.
-						if _, err := os.Lstat(fileDir); err == nil {
-							w.sendDirectoryChangeEvents(fileDir)
-						}
-					}
-				} else {
-					filePath := filepath.Clean(event.Name)
-					if fileInfo, err := os.Lstat(filePath); err == nil {
-						w.sendFileCreatedEventIfNew(filePath, fileInfo)
-					}
-				}
-			}
-
-			// Move to next event
-			kevents = kevents[1:]
-		}
-	}
-
-	// cleanup
-	err := unix.Close(w.kq)
-	if err != nil {
-		// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
-		select {
-		case w.Errors <- err:
-		default:
-		}
-	}
-	close(w.Events)
-	close(w.Errors)
-}
-
-// newEvent returns an platform-independent Event based on kqueue Fflags.
-func newEvent(name string, mask uint32) Event {
-	e := Event{Name: name}
-	if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
-		e.Op |= Remove
-	}
-	if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
-		e.Op |= Write
-	}
-	if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
-		e.Op |= Rename
-	}
-	if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}
-
-func newCreateEvent(name string) Event {
-	return Event{Name: name, Op: Create}
-}
-
-// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
-	// Get all files
-	files, err := ioutil.ReadDir(dirPath)
-	if err != nil {
-		return err
-	}
-
-	for _, fileInfo := range files {
-		filePath := filepath.Join(dirPath, fileInfo.Name())
-		filePath, err = w.internalWatch(filePath, fileInfo)
-		if err != nil {
-			return err
-		}
-
-		w.mu.Lock()
-		w.fileExists[filePath] = true
-		w.mu.Unlock()
-	}
-
-	return nil
-}
-
-// sendDirectoryEvents searches the directory for newly created files
-// and sends them over the event channel. This functionality is to have
-// the BSD version of fsnotify match Linux inotify which provides a
-// create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
-	// Get all files
-	files, err := ioutil.ReadDir(dirPath)
-	if err != nil {
-		select {
-		case w.Errors <- err:
-		case <-w.done:
-			return
-		}
-	}
-
-	// Search for new files
-	for _, fileInfo := range files {
-		filePath := filepath.Join(dirPath, fileInfo.Name())
-		err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
-
-		if err != nil {
-			return
-		}
-	}
-}
-
-// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
-	w.mu.Lock()
-	_, doesExist := w.fileExists[filePath]
-	w.mu.Unlock()
-	if !doesExist {
-		// Send create event
-		select {
-		case w.Events <- newCreateEvent(filePath):
-		case <-w.done:
-			return
-		}
-	}
-
-	// like watchDirectoryFiles (but without doing another ReadDir)
-	filePath, err = w.internalWatch(filePath, fileInfo)
-	if err != nil {
-		return err
-	}
-
-	w.mu.Lock()
-	w.fileExists[filePath] = true
-	w.mu.Unlock()
-
-	return nil
-}
-
-func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
-	if fileInfo.IsDir() {
-		// mimic Linux providing delete events for subdirectories
-		// but preserve the flags used if currently watching subdirectory
-		w.mu.Lock()
-		flags := w.dirFlags[name]
-		w.mu.Unlock()
-
-		flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
-		return w.addWatch(name, flags)
-	}
-
-	// watch file to mimic Linux inotify
-	return w.addWatch(name, noteAllEvents)
-}
-
-// kqueue creates a new kernel event queue and returns a descriptor.
-func kqueue() (kq int, err error) {
-	kq, err = unix.Kqueue()
-	if kq == -1 {
-		return kq, err
-	}
-	return kq, nil
-}
-
-// register events with the queue
-func register(kq int, fds []int, flags int, fflags uint32) error {
-	changes := make([]unix.Kevent_t, len(fds))
-
-	for i, fd := range fds {
-		// SetKevent converts int to the platform-specific types:
-		unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
-		changes[i].Fflags = fflags
-	}
-
-	// register the events
-	success, err := unix.Kevent(kq, changes, nil, nil)
-	if success == -1 {
-		return err
-	}
-	return nil
-}
-
-// read retrieves pending events, or waits until an event occurs.
-// A timeout of nil blocks indefinitely, while 0 polls the queue.
-func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
-	n, err := unix.Kevent(kq, nil, events, timeout)
-	if err != nil {
-		return nil, err
-	}
-	return events[0:n], nil
-}
-
-// durationToTimespec prepares a timeout value
-func durationToTimespec(d time.Duration) unix.Timespec {
-	return unix.NsecToTimespec(d.Nanoseconds())
-}

+ 0 - 11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go

@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC

+ 0 - 12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go

@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-// note: this constant is not defined on BSD
-const openMode = unix.O_EVTONLY | unix.O_CLOEXEC

+ 0 - 561
vendor/github.com/fsnotify/fsnotify/windows.go

@@ -1,561 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sync"
-	"syscall"
-	"unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events   chan Event
-	Errors   chan error
-	isClosed bool           // Set to true when Close() is first called
-	mu       sync.Mutex     // Map access
-	port     syscall.Handle // Handle to completion port
-	watches  watchMap       // Map of watches (key: i-number)
-	input    chan *input    // Inputs to the reader are sent on this channel
-	quit     chan chan<- error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
-	if e != nil {
-		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
-	}
-	w := &Watcher{
-		port:    port,
-		watches: make(watchMap),
-		input:   make(chan *input, 1),
-		Events:  make(chan Event, 50),
-		Errors:  make(chan error),
-		quit:    make(chan chan<- error, 1),
-	}
-	go w.readEvents()
-	return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	if w.isClosed {
-		return nil
-	}
-	w.isClosed = true
-
-	// Send "quit" message to the reader goroutine
-	ch := make(chan error)
-	w.quit <- ch
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-ch
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	if w.isClosed {
-		return errors.New("watcher already closed")
-	}
-	in := &input{
-		op:    opAddWatch,
-		path:  filepath.Clean(name),
-		flags: sysFSALLEVENTS,
-		reply: make(chan error),
-	}
-	w.input <- in
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-in.reply
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	in := &input{
-		op:    opRemoveWatch,
-		path:  filepath.Clean(name),
-		reply: make(chan error),
-	}
-	w.input <- in
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-in.reply
-}
-
-const (
-	// Options for AddWatch
-	sysFSONESHOT = 0x80000000
-	sysFSONLYDIR = 0x1000000
-
-	// Events
-	sysFSACCESS     = 0x1
-	sysFSALLEVENTS  = 0xfff
-	sysFSATTRIB     = 0x4
-	sysFSCLOSE      = 0x18
-	sysFSCREATE     = 0x100
-	sysFSDELETE     = 0x200
-	sysFSDELETESELF = 0x400
-	sysFSMODIFY     = 0x2
-	sysFSMOVE       = 0xc0
-	sysFSMOVEDFROM  = 0x40
-	sysFSMOVEDTO    = 0x80
-	sysFSMOVESELF   = 0x800
-
-	// Special events
-	sysFSIGNORED   = 0x8000
-	sysFSQOVERFLOW = 0x4000
-)
-
-func newEvent(name string, mask uint32) Event {
-	e := Event{Name: name}
-	if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
-		e.Op |= Create
-	}
-	if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
-		e.Op |= Remove
-	}
-	if mask&sysFSMODIFY == sysFSMODIFY {
-		e.Op |= Write
-	}
-	if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
-		e.Op |= Rename
-	}
-	if mask&sysFSATTRIB == sysFSATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}
-
-const (
-	opAddWatch = iota
-	opRemoveWatch
-)
-
-const (
-	provisional uint64 = 1 << (32 + iota)
-)
-
-type input struct {
-	op    int
-	path  string
-	flags uint32
-	reply chan error
-}
-
-type inode struct {
-	handle syscall.Handle
-	volume uint32
-	index  uint64
-}
-
-type watch struct {
-	ov     syscall.Overlapped
-	ino    *inode            // i-number
-	path   string            // Directory path
-	mask   uint64            // Directory itself is being watched with these notify flags
-	names  map[string]uint64 // Map of names being watched and their notify flags
-	rename string            // Remembers the old name while renaming a file
-	buf    [4096]byte
-}
-
-type indexMap map[uint64]*watch
-type watchMap map[uint32]indexMap
-
-func (w *Watcher) wakeupReader() error {
-	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
-	if e != nil {
-		return os.NewSyscallError("PostQueuedCompletionStatus", e)
-	}
-	return nil
-}
-
-func getDir(pathname string) (dir string, err error) {
-	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
-	if e != nil {
-		return "", os.NewSyscallError("GetFileAttributes", e)
-	}
-	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
-		dir = pathname
-	} else {
-		dir, _ = filepath.Split(pathname)
-		dir = filepath.Clean(dir)
-	}
-	return
-}
-
-func getIno(path string) (ino *inode, err error) {
-	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
-		syscall.FILE_LIST_DIRECTORY,
-		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
-		nil, syscall.OPEN_EXISTING,
-		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
-	if e != nil {
-		return nil, os.NewSyscallError("CreateFile", e)
-	}
-	var fi syscall.ByHandleFileInformation
-	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
-		syscall.CloseHandle(h)
-		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
-	}
-	ino = &inode{
-		handle: h,
-		volume: fi.VolumeSerialNumber,
-		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
-	}
-	return ino, nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) get(ino *inode) *watch {
-	if i := m[ino.volume]; i != nil {
-		return i[ino.index]
-	}
-	return nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) set(ino *inode, watch *watch) {
-	i := m[ino.volume]
-	if i == nil {
-		i = make(indexMap)
-		m[ino.volume] = i
-	}
-	i[ino.index] = watch
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
-	dir, err := getDir(pathname)
-	if err != nil {
-		return err
-	}
-	if flags&sysFSONLYDIR != 0 && pathname != dir {
-		return nil
-	}
-	ino, err := getIno(dir)
-	if err != nil {
-		return err
-	}
-	w.mu.Lock()
-	watchEntry := w.watches.get(ino)
-	w.mu.Unlock()
-	if watchEntry == nil {
-		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
-			syscall.CloseHandle(ino.handle)
-			return os.NewSyscallError("CreateIoCompletionPort", e)
-		}
-		watchEntry = &watch{
-			ino:   ino,
-			path:  dir,
-			names: make(map[string]uint64),
-		}
-		w.mu.Lock()
-		w.watches.set(ino, watchEntry)
-		w.mu.Unlock()
-		flags |= provisional
-	} else {
-		syscall.CloseHandle(ino.handle)
-	}
-	if pathname == dir {
-		watchEntry.mask |= flags
-	} else {
-		watchEntry.names[filepath.Base(pathname)] |= flags
-	}
-	if err = w.startRead(watchEntry); err != nil {
-		return err
-	}
-	if pathname == dir {
-		watchEntry.mask &= ^provisional
-	} else {
-		watchEntry.names[filepath.Base(pathname)] &= ^provisional
-	}
-	return nil
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
-	dir, err := getDir(pathname)
-	if err != nil {
-		return err
-	}
-	ino, err := getIno(dir)
-	if err != nil {
-		return err
-	}
-	w.mu.Lock()
-	watch := w.watches.get(ino)
-	w.mu.Unlock()
-	if watch == nil {
-		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
-	}
-	if pathname == dir {
-		w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
-		watch.mask = 0
-	} else {
-		name := filepath.Base(pathname)
-		w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
-		delete(watch.names, name)
-	}
-	return w.startRead(watch)
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
-	for name, mask := range watch.names {
-		if mask&provisional == 0 {
-			w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
-		}
-		delete(watch.names, name)
-	}
-	if watch.mask != 0 {
-		if watch.mask&provisional == 0 {
-			w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
-		}
-		watch.mask = 0
-	}
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
-	if e := syscall.CancelIo(watch.ino.handle); e != nil {
-		w.Errors <- os.NewSyscallError("CancelIo", e)
-		w.deleteWatch(watch)
-	}
-	mask := toWindowsFlags(watch.mask)
-	for _, m := range watch.names {
-		mask |= toWindowsFlags(m)
-	}
-	if mask == 0 {
-		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
-			w.Errors <- os.NewSyscallError("CloseHandle", e)
-		}
-		w.mu.Lock()
-		delete(w.watches[watch.ino.volume], watch.ino.index)
-		w.mu.Unlock()
-		return nil
-	}
-	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
-		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
-	if e != nil {
-		err := os.NewSyscallError("ReadDirectoryChanges", e)
-		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
-			// Watched directory was probably removed
-			if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
-				if watch.mask&sysFSONESHOT != 0 {
-					watch.mask = 0
-				}
-			}
-			err = nil
-		}
-		w.deleteWatch(watch)
-		w.startRead(watch)
-		return err
-	}
-	return nil
-}
-
-// readEvents reads from the I/O completion port, converts the
-// received events into Event objects and sends them via the Events channel.
-// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
-	var (
-		n, key uint32
-		ov     *syscall.Overlapped
-	)
-	runtime.LockOSThread()
-
-	for {
-		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
-		watch := (*watch)(unsafe.Pointer(ov))
-
-		if watch == nil {
-			select {
-			case ch := <-w.quit:
-				w.mu.Lock()
-				var indexes []indexMap
-				for _, index := range w.watches {
-					indexes = append(indexes, index)
-				}
-				w.mu.Unlock()
-				for _, index := range indexes {
-					for _, watch := range index {
-						w.deleteWatch(watch)
-						w.startRead(watch)
-					}
-				}
-				var err error
-				if e := syscall.CloseHandle(w.port); e != nil {
-					err = os.NewSyscallError("CloseHandle", e)
-				}
-				close(w.Events)
-				close(w.Errors)
-				ch <- err
-				return
-			case in := <-w.input:
-				switch in.op {
-				case opAddWatch:
-					in.reply <- w.addWatch(in.path, uint64(in.flags))
-				case opRemoveWatch:
-					in.reply <- w.remWatch(in.path)
-				}
-			default:
-			}
-			continue
-		}
-
-		switch e {
-		case syscall.ERROR_MORE_DATA:
-			if watch == nil {
-				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
-			} else {
-				// The i/o succeeded but the buffer is full.
-				// In theory we should be building up a full packet.
-				// In practice we can get away with just carrying on.
-				n = uint32(unsafe.Sizeof(watch.buf))
-			}
-		case syscall.ERROR_ACCESS_DENIED:
-			// Watched directory was probably removed
-			w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
-			w.deleteWatch(watch)
-			w.startRead(watch)
-			continue
-		case syscall.ERROR_OPERATION_ABORTED:
-			// CancelIo was called on this handle
-			continue
-		default:
-			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
-			continue
-		case nil:
-		}
-
-		var offset uint32
-		for {
-			if n == 0 {
-				w.Events <- newEvent("", sysFSQOVERFLOW)
-				w.Errors <- errors.New("short read in readEvents()")
-				break
-			}
-
-			// Point "raw" to the event in the buffer
-			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
-			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
-			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
-			fullname := filepath.Join(watch.path, name)
-
-			var mask uint64
-			switch raw.Action {
-			case syscall.FILE_ACTION_REMOVED:
-				mask = sysFSDELETESELF
-			case syscall.FILE_ACTION_MODIFIED:
-				mask = sysFSMODIFY
-			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
-				watch.rename = name
-			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
-				if watch.names[watch.rename] != 0 {
-					watch.names[name] |= watch.names[watch.rename]
-					delete(watch.names, watch.rename)
-					mask = sysFSMOVESELF
-				}
-			}
-
-			sendNameEvent := func() {
-				if w.sendEvent(fullname, watch.names[name]&mask) {
-					if watch.names[name]&sysFSONESHOT != 0 {
-						delete(watch.names, name)
-					}
-				}
-			}
-			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
-				sendNameEvent()
-			}
-			if raw.Action == syscall.FILE_ACTION_REMOVED {
-				w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
-				delete(watch.names, name)
-			}
-			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
-				if watch.mask&sysFSONESHOT != 0 {
-					watch.mask = 0
-				}
-			}
-			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
-				fullname = filepath.Join(watch.path, watch.rename)
-				sendNameEvent()
-			}
-
-			// Move to the next event in the buffer
-			if raw.NextEntryOffset == 0 {
-				break
-			}
-			offset += raw.NextEntryOffset
-
-			// Error!
-			if offset >= n {
-				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
-				break
-			}
-		}
-
-		if err := w.startRead(watch); err != nil {
-			w.Errors <- err
-		}
-	}
-}
-
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
-	if mask == 0 {
-		return false
-	}
-	event := newEvent(name, uint32(mask))
-	select {
-	case ch := <-w.quit:
-		w.quit <- ch
-	case w.Events <- event:
-	}
-	return true
-}
-
-func toWindowsFlags(mask uint64) uint32 {
-	var m uint32
-	if mask&sysFSACCESS != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
-	}
-	if mask&sysFSMODIFY != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
-	}
-	if mask&sysFSATTRIB != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
-	}
-	if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
-	}
-	return m
-}
-
-func toFSnotifyFlags(action uint32) uint64 {
-	switch action {
-	case syscall.FILE_ACTION_ADDED:
-		return sysFSCREATE
-	case syscall.FILE_ACTION_REMOVED:
-		return sysFSDELETE
-	case syscall.FILE_ACTION_MODIFIED:
-		return sysFSMODIFY
-	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
-		return sysFSMOVEDFROM
-	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
-		return sysFSMOVEDTO
-	}
-	return 0
-}

+ 0 - 202
vendor/github.com/golang/geo/LICENSE

@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

+ 0 - 20
vendor/github.com/golang/geo/r1/doc.go

@@ -1,20 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package r1 implements types and functions for working with geometry in ℝ¹.
-
-See ../s2 for a more detailed overview.
-*/
-package r1

+ 0 - 177
vendor/github.com/golang/geo/r1/interval.go

@@ -1,177 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package r1
-
-import (
-	"fmt"
-	"math"
-)
-
-// Interval represents a closed interval on ℝ.
-// Zero-length intervals (where Lo == Hi) represent single points.
-// If Lo > Hi then the interval is empty.
-type Interval struct {
-	Lo, Hi float64
-}
-
-// EmptyInterval returns an empty interval.
-func EmptyInterval() Interval { return Interval{1, 0} }
-
-// IntervalFromPoint returns an interval representing a single point.
-func IntervalFromPoint(p float64) Interval { return Interval{p, p} }
-
-// IsEmpty reports whether the interval is empty.
-func (i Interval) IsEmpty() bool { return i.Lo > i.Hi }
-
-// Equal returns true iff the interval contains the same points as oi.
-func (i Interval) Equal(oi Interval) bool {
-	return i == oi || i.IsEmpty() && oi.IsEmpty()
-}
-
-// Center returns the midpoint of the interval.
-// It is undefined for empty intervals.
-func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) }
-
-// Length returns the length of the interval.
-// The length of an empty interval is negative.
-func (i Interval) Length() float64 { return i.Hi - i.Lo }
-
-// Contains returns true iff the interval contains p.
-func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi }
-
-// ContainsInterval returns true iff the interval contains oi.
-func (i Interval) ContainsInterval(oi Interval) bool {
-	if oi.IsEmpty() {
-		return true
-	}
-	return i.Lo <= oi.Lo && oi.Hi <= i.Hi
-}
-
-// InteriorContains returns true iff the interval strictly contains p.
-func (i Interval) InteriorContains(p float64) bool {
-	return i.Lo < p && p < i.Hi
-}
-
-// InteriorContainsInterval returns true iff the interval strictly contains oi.
-func (i Interval) InteriorContainsInterval(oi Interval) bool {
-	if oi.IsEmpty() {
-		return true
-	}
-	return i.Lo < oi.Lo && oi.Hi < i.Hi
-}
-
-// Intersects returns true iff the interval contains any points in common with oi.
-func (i Interval) Intersects(oi Interval) bool {
-	if i.Lo <= oi.Lo {
-		return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty
-	}
-	return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty
-}
-
-// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
-func (i Interval) InteriorIntersects(oi Interval) bool {
-	return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi
-}
-
-// Intersection returns the interval containing all points common to i and j.
-func (i Interval) Intersection(j Interval) Interval {
-	// Empty intervals do not need to be special-cased.
-	return Interval{
-		Lo: math.Max(i.Lo, j.Lo),
-		Hi: math.Min(i.Hi, j.Hi),
-	}
-}
-
-// AddPoint returns the interval expanded so that it contains the given point.
-func (i Interval) AddPoint(p float64) Interval {
-	if i.IsEmpty() {
-		return Interval{p, p}
-	}
-	if p < i.Lo {
-		return Interval{p, i.Hi}
-	}
-	if p > i.Hi {
-		return Interval{i.Lo, p}
-	}
-	return i
-}
-
-// ClampPoint returns the closest point in the interval to the given point "p".
-// The interval must be non-empty.
-func (i Interval) ClampPoint(p float64) float64 {
-	return math.Max(i.Lo, math.Min(i.Hi, p))
-}
-
-// Expanded returns an interval that has been expanded on each side by margin.
-// If margin is negative, then the function shrinks the interval on
-// each side by margin instead. The resulting interval may be empty. Any
-// expansion of an empty interval remains empty.
-func (i Interval) Expanded(margin float64) Interval {
-	if i.IsEmpty() {
-		return i
-	}
-	return Interval{i.Lo - margin, i.Hi + margin}
-}
-
-// Union returns the smallest interval that contains this interval and the given interval.
-func (i Interval) Union(other Interval) Interval {
-	if i.IsEmpty() {
-		return other
-	}
-	if other.IsEmpty() {
-		return i
-	}
-	return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)}
-}
-
-func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
-
-const (
-	// epsilon is a small number that represents a reasonable level of noise between two
-	// values that can be considered to be equal.
-	epsilon = 1e-15
-	// dblEpsilon is a smaller number for values that require more precision.
-	// This is the C++ DBL_EPSILON equivalent.
-	dblEpsilon = 2.220446049250313e-16
-)
-
-// ApproxEqual reports whether the interval can be transformed into the
-// given interval by moving each endpoint a small distance.
-// The empty interval is considered to be positioned arbitrarily on the
-// real line, so any interval with a small enough length will match
-// the empty interval.
-func (i Interval) ApproxEqual(other Interval) bool {
-	if i.IsEmpty() {
-		return other.Length() <= 2*epsilon
-	}
-	if other.IsEmpty() {
-		return i.Length() <= 2*epsilon
-	}
-	return math.Abs(other.Lo-i.Lo) <= epsilon &&
-		math.Abs(other.Hi-i.Hi) <= epsilon
-}
-
-// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two
-// intervals x and y, this distance is defined as
-//     h(x, y) = max_{p in x} min_{q in y} d(p, q).
-func (i Interval) DirectedHausdorffDistance(other Interval) float64 {
-	if i.IsEmpty() {
-		return 0
-	}
-	if other.IsEmpty() {
-		return math.Inf(1)
-	}
-	return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo))
-}

+ 0 - 20
vendor/github.com/golang/geo/r2/doc.go

@@ -1,20 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package r2 implements types and functions for working with geometry in ℝ².
-
-See package s2 for a more detailed overview.
-*/
-package r2

+ 0 - 255
vendor/github.com/golang/geo/r2/rect.go

@@ -1,255 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package r2
-
-import (
-	"fmt"
-	"math"
-
-	"github.com/golang/geo/r1"
-)
-
-// Point represents a point in ℝ².
-type Point struct {
-	X, Y float64
-}
-
-// Add returns the sum of p and op.
-func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} }
-
-// Sub returns the difference of p and op.
-func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} }
-
-// Mul returns the scalar product of p and m.
-func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} }
-
-// Ortho returns a counterclockwise orthogonal point with the same norm.
-func (p Point) Ortho() Point { return Point{-p.Y, p.X} }
-
-// Dot returns the dot product between p and op.
-func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y }
-
-// Cross returns the cross product of p and op.
-func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X }
-
-// Norm returns the vector's norm.
-func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) }
-
-// Normalize returns a unit point in the same direction as p.
-func (p Point) Normalize() Point {
-	if p.X == 0 && p.Y == 0 {
-		return p
-	}
-	return p.Mul(1 / p.Norm())
-}
-
-func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) }
-
-// Rect represents a closed axis-aligned rectangle in the (x,y) plane.
-type Rect struct {
-	X, Y r1.Interval
-}
-
-// RectFromPoints constructs a rect that contains the given points.
-func RectFromPoints(pts ...Point) Rect {
-	// Because the default value on interval is 0,0, we need to manually
-	// define the interval from the first point passed in as our starting
-	// interval, otherwise we end up with the case of passing in
-	// Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3}
-	// instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct.
-	if len(pts) == 0 {
-		return Rect{}
-	}
-
-	r := Rect{
-		X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X},
-		Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y},
-	}
-
-	for _, p := range pts[1:] {
-		r = r.AddPoint(p)
-	}
-	return r
-}
-
-// RectFromCenterSize constructs a rectangle with the given center and size.
-// Both dimensions of size must be non-negative.
-func RectFromCenterSize(center, size Point) Rect {
-	return Rect{
-		r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2},
-		r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2},
-	}
-}
-
-// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test
-// for empty rectangles, since they have more than one representation. A Rect{}
-// is not the same as the EmptyRect.
-func EmptyRect() Rect {
-	return Rect{r1.EmptyInterval(), r1.EmptyInterval()}
-}
-
-// IsValid reports whether the rectangle is valid.
-// This requires the width to be empty iff the height is empty.
-func (r Rect) IsValid() bool {
-	return r.X.IsEmpty() == r.Y.IsEmpty()
-}
-
-// IsEmpty reports whether the rectangle is empty.
-func (r Rect) IsEmpty() bool {
-	return r.X.IsEmpty()
-}
-
-// Vertices returns all four vertices of the rectangle. Vertices are returned in
-// CCW direction starting with the lower left corner.
-func (r Rect) Vertices() [4]Point {
-	return [4]Point{
-		{r.X.Lo, r.Y.Lo},
-		{r.X.Hi, r.Y.Lo},
-		{r.X.Hi, r.Y.Hi},
-		{r.X.Lo, r.Y.Hi},
-	}
-}
-
-// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and
-// direction j along the Y-axis (0=down, 1=up).
-func (r Rect) VertexIJ(i, j int) Point {
-	x := r.X.Lo
-	if i == 1 {
-		x = r.X.Hi
-	}
-	y := r.Y.Lo
-	if j == 1 {
-		y = r.Y.Hi
-	}
-	return Point{x, y}
-}
-
-// Lo returns the low corner of the rect.
-func (r Rect) Lo() Point {
-	return Point{r.X.Lo, r.Y.Lo}
-}
-
-// Hi returns the high corner of the rect.
-func (r Rect) Hi() Point {
-	return Point{r.X.Hi, r.Y.Hi}
-}
-
-// Center returns the center of the rectangle in (x,y)-space
-func (r Rect) Center() Point {
-	return Point{r.X.Center(), r.Y.Center()}
-}
-
-// Size returns the width and height of this rectangle in (x,y)-space. Empty
-// rectangles have a negative width and height.
-func (r Rect) Size() Point {
-	return Point{r.X.Length(), r.Y.Length()}
-}
-
-// ContainsPoint reports whether the rectangle contains the given point.
-// Rectangles are closed regions, i.e. they contain their boundary.
-func (r Rect) ContainsPoint(p Point) bool {
-	return r.X.Contains(p.X) && r.Y.Contains(p.Y)
-}
-
-// InteriorContainsPoint returns true iff the given point is contained in the interior
-// of the region (i.e. the region excluding its boundary).
-func (r Rect) InteriorContainsPoint(p Point) bool {
-	return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y)
-}
-
-// Contains reports whether the rectangle contains the given rectangle.
-func (r Rect) Contains(other Rect) bool {
-	return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y)
-}
-
-// InteriorContains reports whether the interior of this rectangle contains all of the
-// points of the given other rectangle (including its boundary).
-func (r Rect) InteriorContains(other Rect) bool {
-	return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y)
-}
-
-// Intersects reports whether this rectangle and the other rectangle have any points in common.
-func (r Rect) Intersects(other Rect) bool {
-	return r.X.Intersects(other.X) && r.Y.Intersects(other.Y)
-}
-
-// InteriorIntersects reports whether the interior of this rectangle intersects
-// any point (including the boundary) of the given other rectangle.
-func (r Rect) InteriorIntersects(other Rect) bool {
-	return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y)
-}
-
-// AddPoint expands the rectangle to include the given point. The rectangle is
-// expanded by the minimum amount possible.
-func (r Rect) AddPoint(p Point) Rect {
-	return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)}
-}
-
-// AddRect expands the rectangle to include the given rectangle. This is the
-// same as replacing the rectangle by the union of the two rectangles, but
-// is more efficient.
-func (r Rect) AddRect(other Rect) Rect {
-	return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
-}
-
-// ClampPoint returns the closest point in the rectangle to the given point.
-// The rectangle must be non-empty.
-func (r Rect) ClampPoint(p Point) Point {
-	return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)}
-}
-
-// Expanded returns a rectangle that has been expanded in the x-direction
-// by margin.X, and in y-direction by margin.Y. If either margin is empty,
-// then shrink the interval on the corresponding sides instead. The resulting
-// rectangle may be empty. Any expansion of an empty rectangle remains empty.
-func (r Rect) Expanded(margin Point) Rect {
-	xx := r.X.Expanded(margin.X)
-	yy := r.Y.Expanded(margin.Y)
-	if xx.IsEmpty() || yy.IsEmpty() {
-		return EmptyRect()
-	}
-	return Rect{xx, yy}
-}
-
-// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides.
-func (r Rect) ExpandedByMargin(margin float64) Rect {
-	return r.Expanded(Point{margin, margin})
-}
-
-// Union returns the smallest rectangle containing the union of this rectangle and
-// the given rectangle.
-func (r Rect) Union(other Rect) Rect {
-	return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
-}
-
-// Intersection returns the smallest rectangle containing the intersection of this
-// rectangle and the given rectangle.
-func (r Rect) Intersection(other Rect) Rect {
-	xx := r.X.Intersection(other.X)
-	yy := r.Y.Intersection(other.Y)
-	if xx.IsEmpty() || yy.IsEmpty() {
-		return EmptyRect()
-	}
-
-	return Rect{xx, yy}
-}
-
-// ApproxEqual returns true if the x- and y-intervals of the two rectangles are
-// the same up to the given tolerance.
-func (r Rect) ApproxEqual(r2 Rect) bool {
-	return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
-}
-
-func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) }

+ 0 - 20
vendor/github.com/golang/geo/r3/doc.go

@@ -1,20 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package r3 implements types and functions for working with geometry in ℝ³.
-
-See ../s2 for a more detailed overview.
-*/
-package r3

+ 0 - 198
vendor/github.com/golang/geo/r3/precisevector.go

@@ -1,198 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package r3
-
-import (
-	"fmt"
-	"math/big"
-)
-
-const (
-	// prec is the number of bits of precision to use for the Float values.
-	// To keep things simple, we use the maximum allowable precision on big
-	// values. This allows us to handle all values we expect in the s2 library.
-	prec = big.MaxPrec
-)
-
-// define some commonly referenced values.
-var (
-	precise0 = precInt(0)
-	precise1 = precInt(1)
-)
-
-// precStr wraps the conversion from a string into a big.Float. For results that
-// actually can be represented exactly, this should only be used on values that
-// are integer multiples of integer powers of 2.
-func precStr(s string) *big.Float {
-	// Explicitly ignoring the bool return for this usage.
-	f, _ := new(big.Float).SetPrec(prec).SetString(s)
-	return f
-}
-
-func precInt(i int64) *big.Float {
-	return new(big.Float).SetPrec(prec).SetInt64(i)
-}
-
-func precFloat(f float64) *big.Float {
-	return new(big.Float).SetPrec(prec).SetFloat64(f)
-}
-
-func precAdd(a, b *big.Float) *big.Float {
-	return new(big.Float).SetPrec(prec).Add(a, b)
-}
-
-func precSub(a, b *big.Float) *big.Float {
-	return new(big.Float).SetPrec(prec).Sub(a, b)
-}
-
-func precMul(a, b *big.Float) *big.Float {
-	return new(big.Float).SetPrec(prec).Mul(a, b)
-}
-
-// PreciseVector represents a point in ℝ³ using high-precision values.
-// Note that this is NOT a complete implementation because there are some
-// operations that Vector supports that are not feasible with arbitrary precision
-// math. (e.g., methods that need division like Normalize, or methods needing a
-// square root operation such as Norm)
-type PreciseVector struct {
-	X, Y, Z *big.Float
-}
-
-// PreciseVectorFromVector creates a high precision vector from the given Vector.
-func PreciseVectorFromVector(v Vector) PreciseVector {
-	return NewPreciseVector(v.X, v.Y, v.Z)
-}
-
-// NewPreciseVector creates a high precision vector from the given floating point values.
-func NewPreciseVector(x, y, z float64) PreciseVector {
-	return PreciseVector{
-		X: precFloat(x),
-		Y: precFloat(y),
-		Z: precFloat(z),
-	}
-}
-
-// Vector returns this precise vector converted to a Vector.
-func (v PreciseVector) Vector() Vector {
-	// The accuracy flag is ignored on these conversions back to float64.
-	x, _ := v.X.Float64()
-	y, _ := v.Y.Float64()
-	z, _ := v.Z.Float64()
-	return Vector{x, y, z}.Normalize()
-}
-
-// Equal reports whether v and ov are equal.
-func (v PreciseVector) Equal(ov PreciseVector) bool {
-	return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
-}
-
-func (v PreciseVector) String() string {
-	return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z)
-}
-
-// Norm2 returns the square of the norm.
-func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) }
-
-// IsUnit reports whether this vector is of unit length.
-func (v PreciseVector) IsUnit() bool {
-	return v.Norm2().Cmp(precise1) == 0
-}
-
-// Abs returns the vector with nonnegative components.
-func (v PreciseVector) Abs() PreciseVector {
-	return PreciseVector{
-		X: new(big.Float).Abs(v.X),
-		Y: new(big.Float).Abs(v.Y),
-		Z: new(big.Float).Abs(v.Z),
-	}
-}
-
-// Add returns the standard vector sum of v and ov.
-func (v PreciseVector) Add(ov PreciseVector) PreciseVector {
-	return PreciseVector{
-		X: precAdd(v.X, ov.X),
-		Y: precAdd(v.Y, ov.Y),
-		Z: precAdd(v.Z, ov.Z),
-	}
-}
-
-// Sub returns the standard vector difference of v and ov.
-func (v PreciseVector) Sub(ov PreciseVector) PreciseVector {
-	return PreciseVector{
-		X: precSub(v.X, ov.X),
-		Y: precSub(v.Y, ov.Y),
-		Z: precSub(v.Z, ov.Z),
-	}
-}
-
-// Mul returns the standard scalar product of v and f.
-func (v PreciseVector) Mul(f *big.Float) PreciseVector {
-	return PreciseVector{
-		X: precMul(v.X, f),
-		Y: precMul(v.Y, f),
-		Z: precMul(v.Z, f),
-	}
-}
-
-// MulByFloat64 returns the standard scalar product of v and f.
-func (v PreciseVector) MulByFloat64(f float64) PreciseVector {
-	return v.Mul(precFloat(f))
-}
-
-// Dot returns the standard dot product of v and ov.
-func (v PreciseVector) Dot(ov PreciseVector) *big.Float {
-	return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z)))
-}
-
-// Cross returns the standard cross product of v and ov.
-func (v PreciseVector) Cross(ov PreciseVector) PreciseVector {
-	return PreciseVector{
-		X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)),
-		Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)),
-		Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)),
-	}
-}
-
-// LargestComponent returns the axis that represents the largest component in this vector.
-func (v PreciseVector) LargestComponent() Axis {
-	t := v.Abs()
-
-	if t.X.Cmp(t.Y) > 0 {
-		if t.X.Cmp(t.Z) > 0 {
-			return XAxis
-		}
-		return ZAxis
-	}
-	if t.Y.Cmp(t.Z) > 0 {
-		return YAxis
-	}
-	return ZAxis
-}
-
-// SmallestComponent returns the axis that represents the smallest component in this vector.
-func (v PreciseVector) SmallestComponent() Axis {
-	t := v.Abs()
-
-	if t.X.Cmp(t.Y) < 0 {
-		if t.X.Cmp(t.Z) < 0 {
-			return XAxis
-		}
-		return ZAxis
-	}
-	if t.Y.Cmp(t.Z) < 0 {
-		return YAxis
-	}
-	return ZAxis
-}

+ 0 - 183
vendor/github.com/golang/geo/r3/vector.go

@@ -1,183 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package r3
-
-import (
-	"fmt"
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// Vector represents a point in ℝ³.
-type Vector struct {
-	X, Y, Z float64
-}
-
-// ApproxEqual reports whether v and ov are equal within a small epsilon.
-func (v Vector) ApproxEqual(ov Vector) bool {
-	const epsilon = 1e-16
-	return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon
-}
-
-func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) }
-
-// Norm returns the vector's norm.
-func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) }
-
-// Norm2 returns the square of the norm.
-func (v Vector) Norm2() float64 { return v.Dot(v) }
-
-// Normalize returns a unit vector in the same direction as v.
-func (v Vector) Normalize() Vector {
-	n2 := v.Norm2()
-	if n2 == 0 {
-		return Vector{0, 0, 0}
-	}
-	return v.Mul(1 / math.Sqrt(n2))
-}
-
-// IsUnit returns whether this vector is of approximately unit length.
-func (v Vector) IsUnit() bool {
-	const epsilon = 5e-14
-	return math.Abs(v.Norm2()-1) <= epsilon
-}
-
-// Abs returns the vector with nonnegative components.
-func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} }
-
-// Add returns the standard vector sum of v and ov.
-func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} }
-
-// Sub returns the standard vector difference of v and ov.
-func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} }
-
-// Mul returns the standard scalar product of v and m.
-func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} }
-
-// Dot returns the standard dot product of v and ov.
-func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
-
-// Cross returns the standard cross product of v and ov.
-func (v Vector) Cross(ov Vector) Vector {
-	return Vector{
-		v.Y*ov.Z - v.Z*ov.Y,
-		v.Z*ov.X - v.X*ov.Z,
-		v.X*ov.Y - v.Y*ov.X,
-	}
-}
-
-// Distance returns the Euclidean distance between v and ov.
-func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() }
-
-// Angle returns the angle between v and ov.
-func (v Vector) Angle(ov Vector) s1.Angle {
-	return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian
-}
-
-// Axis enumerates the 3 axes of ℝ³.
-type Axis int
-
-// The three axes of ℝ³.
-const (
-	XAxis Axis = iota
-	YAxis
-	ZAxis
-)
-
-// Ortho returns a unit vector that is orthogonal to v.
-// Ortho(-v) = -Ortho(v) for all v.
-func (v Vector) Ortho() Vector {
-	ov := Vector{0.012, 0.0053, 0.00457}
-	switch v.LargestComponent() {
-	case XAxis:
-		ov.Z = 1
-	case YAxis:
-		ov.X = 1
-	default:
-		ov.Y = 1
-	}
-	return v.Cross(ov).Normalize()
-}
-
-// LargestComponent returns the axis that represents the largest component in this vector.
-func (v Vector) LargestComponent() Axis {
-	t := v.Abs()
-
-	if t.X > t.Y {
-		if t.X > t.Z {
-			return XAxis
-		}
-		return ZAxis
-	}
-	if t.Y > t.Z {
-		return YAxis
-	}
-	return ZAxis
-}
-
-// SmallestComponent returns the axis that represents the smallest component in this vector.
-func (v Vector) SmallestComponent() Axis {
-	t := v.Abs()
-
-	if t.X < t.Y {
-		if t.X < t.Z {
-			return XAxis
-		}
-		return ZAxis
-	}
-	if t.Y < t.Z {
-		return YAxis
-	}
-	return ZAxis
-}
-
-// Cmp compares v and ov lexicographically and returns:
-//
-//   -1 if v <  ov
-//    0 if v == ov
-//   +1 if v >  ov
-//
-// This method is based on C++'s std::lexicographical_compare. Two entities
-// are compared element by element with the given operator. The first mismatch
-// defines which is less (or greater) than the other. If both have equivalent
-// values they are lexicographically equal.
-func (v Vector) Cmp(ov Vector) int {
-	if v.X < ov.X {
-		return -1
-	}
-	if v.X > ov.X {
-		return 1
-	}
-
-	// First elements were the same, try the next.
-	if v.Y < ov.Y {
-		return -1
-	}
-	if v.Y > ov.Y {
-		return 1
-	}
-
-	// Second elements were the same return the final compare.
-	if v.Z < ov.Z {
-		return -1
-	}
-	if v.Z > ov.Z {
-		return 1
-	}
-
-	// Both are equal
-	return 0
-}

+ 0 - 120
vendor/github.com/golang/geo/s1/angle.go

@@ -1,120 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s1
-
-import (
-	"math"
-	"strconv"
-)
-
-// Angle represents a 1D angle. The internal representation is a double precision
-// value in radians, so conversion to and from radians is exact.
-// Conversions between E5, E6, E7, and Degrees are not always
-// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000).
-//
-// The following conversions between degrees and radians are exact:
-//
-//       Degree*180 == Radian*math.Pi
-//   Degree*(180/n) == Radian*(math.Pi/n)     for n == 0..8
-//
-// These identities hold when the arguments are scaled up or down by any power
-// of 2. Some similar identities are also true, for example,
-//
-//   Degree*60 == Radian*(math.Pi/3)
-//
-// But be aware that this type of identity does not hold in general. For example,
-//
-//   Degree*3 != Radian*(math.Pi/60)
-//
-// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees()
-// does not always equal x. For example,
-//
-//   (Angle(45*n)*Degree).Degrees() == 45*n     for n == 0..8
-//
-// but
-//
-//   (60*Degree).Degrees() != 60
-//
-// When testing for equality, you should allow for numerical errors (ApproxEqual)
-// or convert to discrete E5/E6/E7 values first.
-type Angle float64
-
-// Angle units.
-const (
-	Radian Angle = 1
-	Degree       = (math.Pi / 180) * Radian
-
-	E5 = 1e-5 * Degree
-	E6 = 1e-6 * Degree
-	E7 = 1e-7 * Degree
-)
-
-// Radians returns the angle in radians.
-func (a Angle) Radians() float64 { return float64(a) }
-
-// Degrees returns the angle in degrees.
-func (a Angle) Degrees() float64 { return float64(a / Degree) }
-
-// round returns the value rounded to nearest as an int32.
-// This does not match C++ exactly for the case of x.5.
-func round(val float64) int32 {
-	if val < 0 {
-		return int32(val - 0.5)
-	}
-	return int32(val + 0.5)
-}
-
-// InfAngle returns an angle larger than any finite angle.
-func InfAngle() Angle {
-	return Angle(math.Inf(1))
-}
-
-// isInf reports whether this Angle is infinite.
-func (a Angle) isInf() bool {
-	return math.IsInf(float64(a), 0)
-}
-
-// E5 returns the angle in hundred thousandths of degrees.
-func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) }
-
-// E6 returns the angle in millionths of degrees.
-func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) }
-
-// E7 returns the angle in ten millionths of degrees.
-func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
-
-// Abs returns the absolute value of the angle.
-func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
-
-// Normalized returns an equivalent angle in (-π, π].
-func (a Angle) Normalized() Angle {
-	rad := math.Remainder(float64(a), 2*math.Pi)
-	if rad <= -math.Pi {
-		rad = math.Pi
-	}
-	return Angle(rad)
-}
-
-func (a Angle) String() string {
-	return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f"
-}
-
-// ApproxEqual reports whether the two angles are the same up to a small tolerance.
-func (a Angle) ApproxEqual(other Angle) bool {
-	return math.Abs(float64(a)-float64(other)) <= epsilon
-}
-
-// BUG(dsymonds): The major differences from the C++ version are:
-//   - no unsigned E5/E6/E7 methods

+ 0 - 250
vendor/github.com/golang/geo/s1/chordangle.go

@@ -1,250 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s1
-
-import (
-	"math"
-)
-
-// ChordAngle represents the angle subtended by a chord (i.e., the straight
-// line segment connecting two points on the sphere). Its representation
-// makes it very efficient for computing and comparing distances, but unlike
-// Angle it is only capable of representing angles between 0 and π radians.
-// Generally, ChordAngle should only be used in loops where many angles need
-// to be calculated and compared. Otherwise it is simpler to use Angle.
-//
-// ChordAngle loses some accuracy as the angle approaches π radians.
-// Specifically, the representation of (π - x) radians has an error of about
-// (1e-15 / x), with a maximum error of about 2e-8 radians (about 13cm on the
-// Earth's surface). For comparison, for angles up to π/2 radians (10000km)
-// the worst-case representation error is about 2e-16 radians (1 nanonmeter),
-// which is about the same as Angle.
-//
-// ChordAngles are represented by the squared chord length, which can
-// range from 0 to 4. Positive infinity represents an infinite squared length.
-type ChordAngle float64
-
-const (
-	// NegativeChordAngle represents a chord angle smaller than the zero angle.
-	// The only valid operations on a NegativeChordAngle are comparisons,
-	// Angle conversions, and Successor/Predecessor.
-	NegativeChordAngle = ChordAngle(-1)
-
-	// RightChordAngle represents a chord angle of 90 degrees (a "right angle").
-	RightChordAngle = ChordAngle(2)
-
-	// StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
-	// This is the maximum finite chord angle.
-	StraightChordAngle = ChordAngle(4)
-
-	// maxLength2 is the square of the maximum length allowed in a ChordAngle.
-	maxLength2 = 4.0
-)
-
-// ChordAngleFromAngle returns a ChordAngle from the given Angle.
-func ChordAngleFromAngle(a Angle) ChordAngle {
-	if a < 0 {
-		return NegativeChordAngle
-	}
-	if a.isInf() {
-		return InfChordAngle()
-	}
-	l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians()))
-	return ChordAngle(l * l)
-}
-
-// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
-// Note that the argument is automatically clamped to a maximum of 4 to
-// handle possible roundoff errors. The argument must be non-negative.
-func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
-	if length2 > maxLength2 {
-		return StraightChordAngle
-	}
-	return ChordAngle(length2)
-}
-
-// Expanded returns a new ChordAngle that has been adjusted by the given error
-// bound (which can be positive or negative). Error should be the value
-// returned by either MaxPointError or MaxAngleError. For example:
-//    a := ChordAngleFromPoints(x, y)
-//    a1 := a.Expanded(a.MaxPointError())
-func (c ChordAngle) Expanded(e float64) ChordAngle {
-	// If the angle is special, don't change it. Otherwise clamp it to the valid range.
-	if c.isSpecial() {
-		return c
-	}
-	return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e)))
-}
-
-// Angle converts this ChordAngle to an Angle.
-func (c ChordAngle) Angle() Angle {
-	if c < 0 {
-		return -1 * Radian
-	}
-	if c.isInf() {
-		return InfAngle()
-	}
-	return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c))))
-}
-
-// InfChordAngle returns a chord angle larger than any finite chord angle.
-// The only valid operations on an InfChordAngle are comparisons, Angle
-// conversions, and Successor/Predecessor.
-func InfChordAngle() ChordAngle {
-	return ChordAngle(math.Inf(1))
-}
-
-// isInf reports whether this ChordAngle is infinite.
-func (c ChordAngle) isInf() bool {
-	return math.IsInf(float64(c), 1)
-}
-
-// isSpecial reports whether this ChordAngle is one of the special cases.
-func (c ChordAngle) isSpecial() bool {
-	return c < 0 || c.isInf()
-}
-
-// isValid reports whether this ChordAngle is valid or not.
-func (c ChordAngle) isValid() bool {
-	return (c >= 0 && c <= maxLength2) || c.isSpecial()
-}
-
-// Successor returns the smallest representable ChordAngle larger than this one.
-// This can be used to convert a "<" comparison to a "<=" comparison.
-//
-// Note the following special cases:
-//   NegativeChordAngle.Successor == 0
-//   StraightChordAngle.Successor == InfChordAngle
-//   InfChordAngle.Successor == InfChordAngle
-func (c ChordAngle) Successor() ChordAngle {
-	if c >= maxLength2 {
-		return InfChordAngle()
-	}
-	if c < 0 {
-		return 0
-	}
-	return ChordAngle(math.Nextafter(float64(c), 10.0))
-}
-
-// Predecessor returns the largest representable ChordAngle less than this one.
-//
-// Note the following special cases:
-//   InfChordAngle.Predecessor == StraightChordAngle
-//   ChordAngle(0).Predecessor == NegativeChordAngle
-//   NegativeChordAngle.Predecessor == NegativeChordAngle
-func (c ChordAngle) Predecessor() ChordAngle {
-	if c <= 0 {
-		return NegativeChordAngle
-	}
-	if c > maxLength2 {
-		return StraightChordAngle
-	}
-
-	return ChordAngle(math.Nextafter(float64(c), -10.0))
-}
-
-// MaxPointError returns the maximum error size for a ChordAngle constructed
-// from 2 Points x and y, assuming that x and y are normalized to within the
-// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to
-// the true distance after the points are projected to lie exactly on the sphere.
-func (c ChordAngle) MaxPointError() float64 {
-	// There is a relative error of (2.5*dblEpsilon) when computing the squared
-	// distance, plus a relative error of 2 * dblEpsilon, plus an absolute error
-	// of (16 * dblEpsilon**2) because the lengths of the input points may differ
-	// from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize).
-	return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
-}
-
-// MaxAngleError returns the maximum error for a ChordAngle constructed
-// as an Angle distance.
-func (c ChordAngle) MaxAngleError() float64 {
-	return dblEpsilon * float64(c)
-}
-
-// Add adds the other ChordAngle to this one and returns the resulting value.
-// This method assumes the ChordAngles are not special.
-func (c ChordAngle) Add(other ChordAngle) ChordAngle {
-	// Note that this method (and Sub) is much more efficient than converting
-	// the ChordAngle to an Angle and adding those and converting back. It
-	// requires only one square root plus a few additions and multiplications.
-
-	// Optimization for the common case where b is an error tolerance
-	// parameter that happens to be set to zero.
-	if other == 0 {
-		return c
-	}
-
-	// Clamp the angle sum to at most 180 degrees.
-	if c+other >= maxLength2 {
-		return StraightChordAngle
-	}
-
-	// Let a and b be the (non-squared) chord lengths, and let c = a+b.
-	// Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc).
-	// Then the formula below can be derived from c = 2 * sin(A+B) and the
-	// relationships   sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A)
-	//                 cos(X) = sqrt(1 - sin^2(X))
-	x := float64(c * (1 - 0.25*other))
-	y := float64(other * (1 - 0.25*c))
-	return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y)))
-}
-
-// Sub subtracts the other ChordAngle from this one and returns the resulting
-// value. This method assumes the ChordAngles are not special.
-func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
-	if other == 0 {
-		return c
-	}
-	if c <= other {
-		return 0
-	}
-	x := float64(c * (1 - 0.25*other))
-	y := float64(other * (1 - 0.25*c))
-	return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y)))
-}
-
-// Sin returns the sine of this chord angle. This method is more efficient
-// than converting to Angle and performing the computation.
-func (c ChordAngle) Sin() float64 {
-	return math.Sqrt(c.Sin2())
-}
-
-// Sin2 returns the square of the sine of this chord angle.
-// It is more efficient than Sin.
-func (c ChordAngle) Sin2() float64 {
-	// Let a be the (non-squared) chord length, and let A be the corresponding
-	// half-angle (a = 2*sin(A)).  The formula below can be derived from:
-	//   sin(2*A) = 2 * sin(A) * cos(A)
-	//   cos^2(A) = 1 - sin^2(A)
-	// This is much faster than converting to an angle and computing its sine.
-	return float64(c * (1 - 0.25*c))
-}
-
-// Cos returns the cosine of this chord angle. This method is more efficient
-// than converting to Angle and performing the computation.
-func (c ChordAngle) Cos() float64 {
-	// cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A)
-	return float64(1 - 0.5*c)
-}
-
-// Tan returns the tangent of this chord angle.
-func (c ChordAngle) Tan() float64 {
-	return c.Sin() / c.Cos()
-}
-
-// TODO(roberts): Differences from C++:
-//   Helpers to/from E5/E6/E7
-//   Helpers to/from degrees and radians directly.
-//   FastUpperBoundFrom(angle Angle)

+ 0 - 20
vendor/github.com/golang/geo/s1/doc.go

@@ -1,20 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package s1 implements types and functions for working with geometry in S¹ (circular geometry).
-
-See ../s2 for a more detailed overview.
-*/
-package s1

+ 0 - 462
vendor/github.com/golang/geo/s1/interval.go

@@ -1,462 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s1
-
-import (
-	"math"
-	"strconv"
-)
-
-// An Interval represents a closed interval on a unit circle (also known
-// as a 1-dimensional sphere). It is capable of representing the empty
-// interval (containing no points), the full interval (containing all
-// points), and zero-length intervals (containing a single point).
-//
-// Points are represented by the angle they make with the positive x-axis in
-// the range [-π, π]. An interval is represented by its lower and upper
-// bounds (both inclusive, since the interval is closed). The lower bound may
-// be greater than the upper bound, in which case the interval is "inverted"
-// (i.e. it passes through the point (-1, 0)).
-//
-// The point (-1, 0) has two valid representations, π and -π. The
-// normalized representation of this point is π, so that endpoints
-// of normal intervals are in the range (-π, π]. We normalize the latter to
-// the former in IntervalFromEndpoints. However, we take advantage of the point
-// -π to construct two special intervals:
-//   The full interval is [-π, π]
-//   The empty interval is [π, -π].
-//
-// Treat the exported fields as read-only.
-type Interval struct {
-	Lo, Hi float64
-}
-
-// IntervalFromEndpoints constructs a new interval from endpoints.
-// Both arguments must be in the range [-π,π]. This function allows inverted intervals
-// to be created.
-func IntervalFromEndpoints(lo, hi float64) Interval {
-	i := Interval{lo, hi}
-	if lo == -math.Pi && hi != math.Pi {
-		i.Lo = math.Pi
-	}
-	if hi == -math.Pi && lo != math.Pi {
-		i.Hi = math.Pi
-	}
-	return i
-}
-
-// IntervalFromPointPair returns the minimal interval containing the two given points.
-// Both arguments must be in [-π,π].
-func IntervalFromPointPair(a, b float64) Interval {
-	if a == -math.Pi {
-		a = math.Pi
-	}
-	if b == -math.Pi {
-		b = math.Pi
-	}
-	if positiveDistance(a, b) <= math.Pi {
-		return Interval{a, b}
-	}
-	return Interval{b, a}
-}
-
-// EmptyInterval returns an empty interval.
-func EmptyInterval() Interval { return Interval{math.Pi, -math.Pi} }
-
-// FullInterval returns a full interval.
-func FullInterval() Interval { return Interval{-math.Pi, math.Pi} }
-
-// IsValid reports whether the interval is valid.
-func (i Interval) IsValid() bool {
-	return (math.Abs(i.Lo) <= math.Pi && math.Abs(i.Hi) <= math.Pi &&
-		!(i.Lo == -math.Pi && i.Hi != math.Pi) &&
-		!(i.Hi == -math.Pi && i.Lo != math.Pi))
-}
-
-// IsFull reports whether the interval is full.
-func (i Interval) IsFull() bool { return i.Lo == -math.Pi && i.Hi == math.Pi }
-
-// IsEmpty reports whether the interval is empty.
-func (i Interval) IsEmpty() bool { return i.Lo == math.Pi && i.Hi == -math.Pi }
-
-// IsInverted reports whether the interval is inverted; that is, whether Lo > Hi.
-func (i Interval) IsInverted() bool { return i.Lo > i.Hi }
-
-// Invert returns the interval with endpoints swapped.
-func (i Interval) Invert() Interval {
-	return Interval{i.Hi, i.Lo}
-}
-
-// Center returns the midpoint of the interval.
-// It is undefined for full and empty intervals.
-func (i Interval) Center() float64 {
-	c := 0.5 * (i.Lo + i.Hi)
-	if !i.IsInverted() {
-		return c
-	}
-	if c <= 0 {
-		return c + math.Pi
-	}
-	return c - math.Pi
-}
-
-// Length returns the length of the interval.
-// The length of an empty interval is negative.
-func (i Interval) Length() float64 {
-	l := i.Hi - i.Lo
-	if l >= 0 {
-		return l
-	}
-	l += 2 * math.Pi
-	if l > 0 {
-		return l
-	}
-	return -1
-}
-
-// Assumes p ∈ (-π,π].
-func (i Interval) fastContains(p float64) bool {
-	if i.IsInverted() {
-		return (p >= i.Lo || p <= i.Hi) && !i.IsEmpty()
-	}
-	return p >= i.Lo && p <= i.Hi
-}
-
-// Contains returns true iff the interval contains p.
-// Assumes p ∈ [-π,π].
-func (i Interval) Contains(p float64) bool {
-	if p == -math.Pi {
-		p = math.Pi
-	}
-	return i.fastContains(p)
-}
-
-// ContainsInterval returns true iff the interval contains oi.
-func (i Interval) ContainsInterval(oi Interval) bool {
-	if i.IsInverted() {
-		if oi.IsInverted() {
-			return oi.Lo >= i.Lo && oi.Hi <= i.Hi
-		}
-		return (oi.Lo >= i.Lo || oi.Hi <= i.Hi) && !i.IsEmpty()
-	}
-	if oi.IsInverted() {
-		return i.IsFull() || oi.IsEmpty()
-	}
-	return oi.Lo >= i.Lo && oi.Hi <= i.Hi
-}
-
-// InteriorContains returns true iff the interior of the interval contains p.
-// Assumes p ∈ [-π,π].
-func (i Interval) InteriorContains(p float64) bool {
-	if p == -math.Pi {
-		p = math.Pi
-	}
-	if i.IsInverted() {
-		return p > i.Lo || p < i.Hi
-	}
-	return (p > i.Lo && p < i.Hi) || i.IsFull()
-}
-
-// InteriorContainsInterval returns true iff the interior of the interval contains oi.
-func (i Interval) InteriorContainsInterval(oi Interval) bool {
-	if i.IsInverted() {
-		if oi.IsInverted() {
-			return (oi.Lo > i.Lo && oi.Hi < i.Hi) || oi.IsEmpty()
-		}
-		return oi.Lo > i.Lo || oi.Hi < i.Hi
-	}
-	if oi.IsInverted() {
-		return i.IsFull() || oi.IsEmpty()
-	}
-	return (oi.Lo > i.Lo && oi.Hi < i.Hi) || i.IsFull()
-}
-
-// Intersects returns true iff the interval contains any points in common with oi.
-func (i Interval) Intersects(oi Interval) bool {
-	if i.IsEmpty() || oi.IsEmpty() {
-		return false
-	}
-	if i.IsInverted() {
-		return oi.IsInverted() || oi.Lo <= i.Hi || oi.Hi >= i.Lo
-	}
-	if oi.IsInverted() {
-		return oi.Lo <= i.Hi || oi.Hi >= i.Lo
-	}
-	return oi.Lo <= i.Hi && oi.Hi >= i.Lo
-}
-
-// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
-func (i Interval) InteriorIntersects(oi Interval) bool {
-	if i.IsEmpty() || oi.IsEmpty() || i.Lo == i.Hi {
-		return false
-	}
-	if i.IsInverted() {
-		return oi.IsInverted() || oi.Lo < i.Hi || oi.Hi > i.Lo
-	}
-	if oi.IsInverted() {
-		return oi.Lo < i.Hi || oi.Hi > i.Lo
-	}
-	return (oi.Lo < i.Hi && oi.Hi > i.Lo) || i.IsFull()
-}
-
-// Compute distance from a to b in [0,2π], in a numerically stable way.
-func positiveDistance(a, b float64) float64 {
-	d := b - a
-	if d >= 0 {
-		return d
-	}
-	return (b + math.Pi) - (a - math.Pi)
-}
-
-// Union returns the smallest interval that contains both the interval and oi.
-func (i Interval) Union(oi Interval) Interval {
-	if oi.IsEmpty() {
-		return i
-	}
-	if i.fastContains(oi.Lo) {
-		if i.fastContains(oi.Hi) {
-			// Either oi ⊂ i, or i ∪ oi is the full interval.
-			if i.ContainsInterval(oi) {
-				return i
-			}
-			return FullInterval()
-		}
-		return Interval{i.Lo, oi.Hi}
-	}
-	if i.fastContains(oi.Hi) {
-		return Interval{oi.Lo, i.Hi}
-	}
-
-	// Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
-	if i.IsEmpty() || oi.fastContains(i.Lo) {
-		return oi
-	}
-
-	// This is the only hard case where we need to find the closest pair of endpoints.
-	if positiveDistance(oi.Hi, i.Lo) < positiveDistance(i.Hi, oi.Lo) {
-		return Interval{oi.Lo, i.Hi}
-	}
-	return Interval{i.Lo, oi.Hi}
-}
-
-// Intersection returns the smallest interval that contains the intersection of the interval and oi.
-func (i Interval) Intersection(oi Interval) Interval {
-	if oi.IsEmpty() {
-		return EmptyInterval()
-	}
-	if i.fastContains(oi.Lo) {
-		if i.fastContains(oi.Hi) {
-			// Either oi ⊂ i, or i and oi intersect twice. Neither are empty.
-			// In the first case we want to return i (which is shorter than oi).
-			// In the second case one of them is inverted, and the smallest interval
-			// that covers the two disjoint pieces is the shorter of i and oi.
-			// We thus want to pick the shorter of i and oi in both cases.
-			if oi.Length() < i.Length() {
-				return oi
-			}
-			return i
-		}
-		return Interval{oi.Lo, i.Hi}
-	}
-	if i.fastContains(oi.Hi) {
-		return Interval{i.Lo, oi.Hi}
-	}
-
-	// Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
-	if oi.fastContains(i.Lo) {
-		return i
-	}
-	return EmptyInterval()
-}
-
-// AddPoint returns the interval expanded by the minimum amount necessary such
-// that it contains the given point "p" (an angle in the range [-π, π]).
-func (i Interval) AddPoint(p float64) Interval {
-	if math.Abs(p) > math.Pi {
-		return i
-	}
-	if p == -math.Pi {
-		p = math.Pi
-	}
-	if i.fastContains(p) {
-		return i
-	}
-	if i.IsEmpty() {
-		return Interval{p, p}
-	}
-	if positiveDistance(p, i.Lo) < positiveDistance(i.Hi, p) {
-		return Interval{p, i.Hi}
-	}
-	return Interval{i.Lo, p}
-}
-
-// Define the maximum rounding error for arithmetic operations. Depending on the
-// platform the mantissa precision may be different than others, so we choose to
-// use specific values to be consistent across all.
-// The values come from the C++ implementation.
-var (
-	// epsilon is a small number that represents a reasonable level of noise between two
-	// values that can be considered to be equal.
-	epsilon = 1e-15
-	// dblEpsilon is a smaller number for values that require more precision.
-	dblEpsilon = 2.220446049e-16
-)
-
-// Expanded returns an interval that has been expanded on each side by margin.
-// If margin is negative, then the function shrinks the interval on
-// each side by margin instead. The resulting interval may be empty or
-// full. Any expansion (positive or negative) of a full interval remains
-// full, and any expansion of an empty interval remains empty.
-func (i Interval) Expanded(margin float64) Interval {
-	if margin >= 0 {
-		if i.IsEmpty() {
-			return i
-		}
-		// Check whether this interval will be full after expansion, allowing
-		// for a rounding error when computing each endpoint.
-		if i.Length()+2*margin+2*dblEpsilon >= 2*math.Pi {
-			return FullInterval()
-		}
-	} else {
-		if i.IsFull() {
-			return i
-		}
-		// Check whether this interval will be empty after expansion, allowing
-		// for a rounding error when computing each endpoint.
-		if i.Length()+2*margin-2*dblEpsilon <= 0 {
-			return EmptyInterval()
-		}
-	}
-	result := IntervalFromEndpoints(
-		math.Remainder(i.Lo-margin, 2*math.Pi),
-		math.Remainder(i.Hi+margin, 2*math.Pi),
-	)
-	if result.Lo <= -math.Pi {
-		result.Lo = math.Pi
-	}
-	return result
-}
-
-// ApproxEqual reports whether this interval can be transformed into the given
-// interval by moving each endpoint by at most ε, without the
-// endpoints crossing (which would invert the interval). Empty and full
-// intervals are considered to start at an arbitrary point on the unit circle,
-// so any interval with (length <= 2*ε) matches the empty interval, and
-// any interval with (length >= 2*π - 2*ε) matches the full interval.
-func (i Interval) ApproxEqual(other Interval) bool {
-	// Full and empty intervals require special cases because the endpoints
-	// are considered to be positioned arbitrarily.
-	if i.IsEmpty() {
-		return other.Length() <= 2*epsilon
-	}
-	if other.IsEmpty() {
-		return i.Length() <= 2*epsilon
-	}
-	if i.IsFull() {
-		return other.Length() >= 2*(math.Pi-epsilon)
-	}
-	if other.IsFull() {
-		return i.Length() >= 2*(math.Pi-epsilon)
-	}
-
-	// The purpose of the last test below is to verify that moving the endpoints
-	// does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20].
-	return (math.Abs(math.Remainder(other.Lo-i.Lo, 2*math.Pi)) <= epsilon &&
-		math.Abs(math.Remainder(other.Hi-i.Hi, 2*math.Pi)) <= epsilon &&
-		math.Abs(i.Length()-other.Length()) <= 2*epsilon)
-
-}
-
-func (i Interval) String() string {
-	// like "[%.7f, %.7f]"
-	return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]"
-}
-
-// Complement returns the complement of the interior of the interval. An interval and
-// its complement have the same boundary but do not share any interior
-// values. The complement operator is not a bijection, since the complement
-// of a singleton interval (containing a single value) is the same as the
-// complement of an empty interval.
-func (i Interval) Complement() Interval {
-	if i.Lo == i.Hi {
-		// Singleton. The interval just contains a single point.
-		return FullInterval()
-	}
-	// Handles empty and full.
-	return Interval{i.Hi, i.Lo}
-}
-
-// ComplementCenter returns the midpoint of the complement of the interval. For full and empty
-// intervals, the result is arbitrary. For a singleton interval (containing a
-// single point), the result is its antipodal point on S1.
-func (i Interval) ComplementCenter() float64 {
-	if i.Lo != i.Hi {
-		return i.Complement().Center()
-	}
-	// Singleton. The interval just contains a single point.
-	if i.Hi <= 0 {
-		return i.Hi + math.Pi
-	}
-	return i.Hi - math.Pi
-}
-
-// DirectedHausdorffDistance returns the Hausdorff distance to the given interval.
-// For two intervals i and y, this distance is defined by
-//     h(i, y) = max_{p in i} min_{q in y} d(p, q),
-// where d(.,.) is measured along S1.
-func (i Interval) DirectedHausdorffDistance(y Interval) Angle {
-	if y.ContainsInterval(i) {
-		return 0 // This includes the case i is empty.
-	}
-	if y.IsEmpty() {
-		return Angle(math.Pi) // maximum possible distance on s1.
-	}
-	yComplementCenter := y.ComplementCenter()
-	if i.Contains(yComplementCenter) {
-		return Angle(positiveDistance(y.Hi, yComplementCenter))
-	}
-
-	// The Hausdorff distance is realized by either two i.Hi endpoints or two
-	// i.Lo endpoints, whichever is farther apart.
-	hiHi := 0.0
-	if IntervalFromEndpoints(y.Hi, yComplementCenter).Contains(i.Hi) {
-		hiHi = positiveDistance(y.Hi, i.Hi)
-	}
-
-	loLo := 0.0
-	if IntervalFromEndpoints(yComplementCenter, y.Lo).Contains(i.Lo) {
-		loLo = positiveDistance(i.Lo, y.Lo)
-	}
-
-	return Angle(math.Max(hiHi, loLo))
-}
-
-// Project returns the closest point in the interval to the given point p.
-// The interval must be non-empty.
-func (i Interval) Project(p float64) float64 {
-	if p == -math.Pi {
-		p = math.Pi
-	}
-	if i.fastContains(p) {
-		return p
-	}
-	// Compute distance from p to each endpoint.
-	dlo := positiveDistance(p, i.Lo)
-	dhi := positiveDistance(i.Hi, p)
-	if dlo < dhi {
-		return i.Lo
-	}
-	return i.Hi
-}

+ 0 - 53
vendor/github.com/golang/geo/s2/bits_go18.go

@@ -1,53 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.9
-
-package s2
-
-// This file is for the bit manipulation code pre-Go 1.9.
-
-// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
-// significant set bit. Passing zero to this function returns zero.
-func findMSBSetNonZero64(x uint64) int {
-	val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
-	shift := []uint64{1, 2, 4, 8, 16, 32}
-	var msbPos uint64
-	for i := 5; i >= 0; i-- {
-		if x&val[i] != 0 {
-			x >>= shift[i]
-			msbPos |= shift[i]
-		}
-	}
-	return int(msbPos)
-}
-
-const deBruijn64 = 0x03f79d71b4ca8b09
-const digitMask = uint64(1<<64 - 1)
-
-var deBruijn64Lookup = []byte{
-	0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
-	62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
-	63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
-	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
-}
-
-// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
-// significant set bit. Passing zero to this function returns zero.
-//
-// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
-// which references (Knuth, volume 4, section 7.3.1).
-func findLSBSetNonZero64(x uint64) int {
-	return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58])
-}

+ 0 - 39
vendor/github.com/golang/geo/s2/bits_go19.go

@@ -1,39 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.9
-
-package s2
-
-// This file is for the bit manipulation code post-Go 1.9.
-
-import "math/bits"
-
-// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
-// significant set bit. Passing zero to this function return zero.
-func findMSBSetNonZero64(x uint64) int {
-	if x == 0 {
-		return 0
-	}
-	return 63 - bits.LeadingZeros64(x)
-}
-
-// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
-// significant set bit. Passing zero to this function return zero.
-func findLSBSetNonZero64(x uint64) int {
-	if x == 0 {
-		return 0
-	}
-	return bits.TrailingZeros64(x)
-}

+ 0 - 519
vendor/github.com/golang/geo/s2/cap.go

@@ -1,519 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/s1"
-)
-
-var (
-	// centerPoint is the default center for Caps
-	centerPoint = PointFromCoords(1.0, 0, 0)
-)
-
-// Cap represents a disc-shaped region defined by a center and radius.
-// Technically this shape is called a "spherical cap" (rather than disc)
-// because it is not planar; the cap represents a portion of the sphere that
-// has been cut off by a plane. The boundary of the cap is the circle defined
-// by the intersection of the sphere and the plane. For containment purposes,
-// the cap is a closed set, i.e. it contains its boundary.
-//
-// For the most part, you can use a spherical cap wherever you would use a
-// disc in planar geometry. The radius of the cap is measured along the
-// surface of the sphere (rather than the straight-line distance through the
-// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius
-// π covers the entire sphere.
-//
-// The center is a point on the surface of the unit sphere. (Hence the need for
-// it to be of unit length.)
-//
-// A cap can also be defined by its center point and height. The height is the
-// distance from the center point to the cutoff plane. There is also support for
-// "empty" and "full" caps, which contain no points and all points respectively.
-//
-// Here are some useful relationships between the cap height (h), the cap
-// radius (r), the maximum chord length from the cap's center (d), and the
-// radius of cap's base (a).
-//
-//     h = 1 - cos(r)
-//       = 2 * sin^2(r/2)
-//   d^2 = 2 * h
-//       = a^2 + h^2
-//
-// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap.
-type Cap struct {
-	center Point
-	radius s1.ChordAngle
-}
-
-// CapFromPoint constructs a cap containing a single point.
-func CapFromPoint(p Point) Cap {
-	return CapFromCenterChordAngle(p, 0)
-}
-
-// CapFromCenterAngle constructs a cap with the given center and angle.
-func CapFromCenterAngle(center Point, angle s1.Angle) Cap {
-	return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle))
-}
-
-// CapFromCenterChordAngle constructs a cap where the angle is expressed as an
-// s1.ChordAngle. This constructor is more efficient than using an s1.Angle.
-func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap {
-	return Cap{
-		center: center,
-		radius: radius,
-	}
-}
-
-// CapFromCenterHeight constructs a cap with the given center and height. A
-// negative height yields an empty cap; a height of 2 or more yields a full cap.
-// The center should be unit length.
-func CapFromCenterHeight(center Point, height float64) Cap {
-	return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height))
-}
-
-// CapFromCenterArea constructs a cap with the given center and surface area.
-// Note that the area can also be interpreted as the solid angle subtended by the
-// cap (because the sphere has unit radius). A negative area yields an empty cap;
-// an area of 4*π or more yields a full cap.
-func CapFromCenterArea(center Point, area float64) Cap {
-	return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi))
-}
-
-// EmptyCap returns a cap that contains no points.
-func EmptyCap() Cap {
-	return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle)
-}
-
-// FullCap returns a cap that contains all points.
-func FullCap() Cap {
-	return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle)
-}
-
-// IsValid reports whether the Cap is considered valid.
-func (c Cap) IsValid() bool {
-	return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle
-}
-
-// IsEmpty reports whether the cap is empty, i.e. it contains no points.
-func (c Cap) IsEmpty() bool {
-	return c.radius < 0
-}
-
-// IsFull reports whether the cap is full, i.e. it contains all points.
-func (c Cap) IsFull() bool {
-	return c.radius == s1.StraightChordAngle
-}
-
-// Center returns the cap's center point.
-func (c Cap) Center() Point {
-	return c.center
-}
-
-// Height returns the height of the cap. This is the distance from the center
-// point to the cutoff plane.
-func (c Cap) Height() float64 {
-	return float64(0.5 * c.radius)
-}
-
-// Radius returns the cap radius as an s1.Angle. (Note that the cap angle
-// is stored internally as a ChordAngle, so this method requires a trigonometric
-// operation and may yield a slightly different result than the value passed
-// to CapFromCenterAngle).
-func (c Cap) Radius() s1.Angle {
-	return c.radius.Angle()
-}
-
-// Area returns the surface area of the Cap on the unit sphere.
-func (c Cap) Area() float64 {
-	return 2.0 * math.Pi * math.Max(0, c.Height())
-}
-
-// Contains reports whether this cap contains the other.
-func (c Cap) Contains(other Cap) bool {
-	// In a set containment sense, every cap contains the empty cap.
-	if c.IsFull() || other.IsEmpty() {
-		return true
-	}
-	return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
-}
-
-// Intersects reports whether this cap intersects the other cap.
-// i.e. whether they have any points in common.
-func (c Cap) Intersects(other Cap) bool {
-	if c.IsEmpty() || other.IsEmpty() {
-		return false
-	}
-
-	return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center)
-}
-
-// InteriorIntersects reports whether this caps interior intersects the other cap.
-func (c Cap) InteriorIntersects(other Cap) bool {
-	// Make sure this cap has an interior and the other cap is non-empty.
-	if c.radius <= 0 || other.IsEmpty() {
-		return false
-	}
-
-	return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center)
-}
-
-// ContainsPoint reports whether this cap contains the point.
-func (c Cap) ContainsPoint(p Point) bool {
-	return ChordAngleBetweenPoints(c.center, p) <= c.radius
-}
-
-// InteriorContainsPoint reports whether the point is within the interior of this cap.
-func (c Cap) InteriorContainsPoint(p Point) bool {
-	return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius
-}
-
-// Complement returns the complement of the interior of the cap. A cap and its
-// complement have the same boundary but do not share any interior points.
-// The complement operator is not a bijection because the complement of a
-// singleton cap (containing a single point) is the same as the complement
-// of an empty cap.
-func (c Cap) Complement() Cap {
-	if c.IsFull() {
-		return EmptyCap()
-	}
-	if c.IsEmpty() {
-		return FullCap()
-	}
-
-	return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius))
-}
-
-// CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
-func (c Cap) CapBound() Cap {
-	return c
-}
-
-// RectBound returns a bounding latitude-longitude rectangle.
-// The bounds are not guaranteed to be tight.
-func (c Cap) RectBound() Rect {
-	if c.IsEmpty() {
-		return EmptyRect()
-	}
-
-	capAngle := c.Radius().Radians()
-	allLongitudes := false
-	lat := r1.Interval{
-		Lo: latitude(c.center).Radians() - capAngle,
-		Hi: latitude(c.center).Radians() + capAngle,
-	}
-	lng := s1.FullInterval()
-
-	// Check whether cap includes the south pole.
-	if lat.Lo <= -math.Pi/2 {
-		lat.Lo = -math.Pi / 2
-		allLongitudes = true
-	}
-
-	// Check whether cap includes the north pole.
-	if lat.Hi >= math.Pi/2 {
-		lat.Hi = math.Pi / 2
-		allLongitudes = true
-	}
-
-	if !allLongitudes {
-		// Compute the range of longitudes covered by the cap. We use the law
-		// of sines for spherical triangles. Consider the triangle ABC where
-		// A is the north pole, B is the center of the cap, and C is the point
-		// of tangency between the cap boundary and a line of longitude. Then
-		// C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
-		// we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
-		// Here "a" is the cap angle, and "c" is the colatitude (90 degrees
-		// minus the latitude). This formula also works for negative latitudes.
-		//
-		// The formula for sin(a) follows from the relationship h = 1 - cos(a).
-		sinA := c.radius.Sin()
-		sinC := math.Cos(latitude(c.center).Radians())
-		if sinA <= sinC {
-			angleA := math.Asin(sinA / sinC)
-			lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2)
-			lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2)
-		}
-	}
-	return Rect{lat, lng}
-}
-
-// Equal reports whether this cap is equal to the other cap.
-func (c Cap) Equal(other Cap) bool {
-	return (c.radius == other.radius && c.center == other.center) ||
-		(c.IsEmpty() && other.IsEmpty()) ||
-		(c.IsFull() && other.IsFull())
-}
-
-// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance.
-func (c Cap) ApproxEqual(other Cap) bool {
-	const epsilon = 1e-14
-	r2 := float64(c.radius)
-	otherR2 := float64(other.radius)
-	return c.center.ApproxEqual(other.center) &&
-		math.Abs(r2-otherR2) <= epsilon ||
-		c.IsEmpty() && otherR2 <= epsilon ||
-		other.IsEmpty() && r2 <= epsilon ||
-		c.IsFull() && otherR2 >= 2-epsilon ||
-		other.IsFull() && r2 >= 2-epsilon
-}
-
-// AddPoint increases the cap if necessary to include the given point. If this cap is empty,
-// then the center is set to the point with a zero height. p must be unit-length.
-func (c Cap) AddPoint(p Point) Cap {
-	if c.IsEmpty() {
-		c.center = p
-		c.radius = 0
-		return c
-	}
-
-	// After calling cap.AddPoint(p), cap.Contains(p) must be true. However
-	// we don't need to do anything special to achieve this because Contains()
-	// does exactly the same distance calculation that we do here.
-	if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius {
-		c.radius = newRad
-	}
-	return c
-}
-
-// AddCap increases the cap height if necessary to include the other cap. If this cap is empty,
-// it is set to the other cap.
-func (c Cap) AddCap(other Cap) Cap {
-	if c.IsEmpty() {
-		return other
-	}
-	if other.IsEmpty() {
-		return c
-	}
-
-	// We round up the distance to ensure that the cap is actually contained.
-	// TODO(roberts): Do some error analysis in order to guarantee this.
-	dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
-	if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius {
-		c.radius = newRad
-	}
-	return c
-}
-
-// Expanded returns a new cap expanded by the given angle. If the cap is empty,
-// it returns an empty cap.
-func (c Cap) Expanded(distance s1.Angle) Cap {
-	if c.IsEmpty() {
-		return EmptyCap()
-	}
-	return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance)))
-}
-
-func (c Cap) String() string {
-	return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees())
-}
-
-// radiusToHeight converts an s1.Angle into the height of the cap.
-func radiusToHeight(r s1.Angle) float64 {
-	if r.Radians() < 0 {
-		return float64(s1.NegativeChordAngle)
-	}
-	if r.Radians() >= math.Pi {
-		return float64(s1.RightChordAngle)
-	}
-	return float64(0.5 * s1.ChordAngleFromAngle(r))
-
-}
-
-// ContainsCell reports whether the cap contains the given cell.
-func (c Cap) ContainsCell(cell Cell) bool {
-	// If the cap does not contain all cell vertices, return false.
-	var vertices [4]Point
-	for k := 0; k < 4; k++ {
-		vertices[k] = cell.Vertex(k)
-		if !c.ContainsPoint(vertices[k]) {
-			return false
-		}
-	}
-	// Otherwise, return true if the complement of the cap does not intersect the cell.
-	return !c.Complement().intersects(cell, vertices)
-}
-
-// IntersectsCell reports whether the cap intersects the cell.
-func (c Cap) IntersectsCell(cell Cell) bool {
-	// If the cap contains any cell vertex, return true.
-	var vertices [4]Point
-	for k := 0; k < 4; k++ {
-		vertices[k] = cell.Vertex(k)
-		if c.ContainsPoint(vertices[k]) {
-			return true
-		}
-	}
-	return c.intersects(cell, vertices)
-}
-
-// intersects reports whether the cap intersects any point of the cell excluding
-// its vertices (which are assumed to already have been checked).
-func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
-	// If the cap is a hemisphere or larger, the cell and the complement of the cap
-	// are both convex. Therefore since no vertex of the cell is contained, no other
-	// interior point of the cell is contained either.
-	if c.radius >= s1.RightChordAngle {
-		return false
-	}
-
-	// We need to check for empty caps due to the center check just below.
-	if c.IsEmpty() {
-		return false
-	}
-
-	// Optimization: return true if the cell contains the cap center. This allows half
-	// of the edge checks below to be skipped.
-	if cell.ContainsPoint(c.center) {
-		return true
-	}
-
-	// At this point we know that the cell does not contain the cap center, and the cap
-	// does not contain any cell vertex. The only way that they can intersect is if the
-	// cap intersects the interior of some edge.
-	sin2Angle := c.radius.Sin2()
-	for k := 0; k < 4; k++ {
-		edge := cell.Edge(k).Vector
-		dot := c.center.Vector.Dot(edge)
-		if dot > 0 {
-			// The center is in the interior half-space defined by the edge. We do not need
-			// to consider these edges, since if the cap intersects this edge then it also
-			// intersects the edge on the opposite side of the cell, because the center is
-			// not contained with the cell.
-			continue
-		}
-
-		// The Norm2() factor is necessary because "edge" is not normalized.
-		if dot*dot > sin2Angle*edge.Norm2() {
-			return false
-		}
-
-		// Otherwise, the great circle containing this edge intersects the interior of the cap. We just
-		// need to check whether the point of closest approach occurs between the two edge endpoints.
-		dir := edge.Cross(c.center.Vector)
-		if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 {
-			return true
-		}
-	}
-	return false
-}
-
-// CellUnionBound computes a covering of the Cap. In general the covering
-// consists of at most 4 cells except for very large caps, which may need
-// up to 6 cells. The output is not sorted.
-func (c Cap) CellUnionBound() []CellID {
-	// TODO(roberts): The covering could be made quite a bit tighter by mapping
-	// the cap to a rectangle in (i,j)-space and finding a covering for that.
-
-	// Find the maximum level such that the cap contains at most one cell vertex
-	// and such that CellID.AppendVertexNeighbors() can be called.
-	level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1
-
-	// If level < 0, more than three face cells are required.
-	if level < 0 {
-		cellIDs := make([]CellID, 6)
-		for face := 0; face < 6; face++ {
-			cellIDs[face] = CellIDFromFace(face)
-		}
-		return cellIDs
-	}
-	// The covering consists of the 4 cells at the given level that share the
-	// cell vertex that is closest to the cap center.
-	return cellIDFromPoint(c.center).VertexNeighbors(level)
-}
-
-// Centroid returns the true centroid of the cap multiplied by its surface area
-// The result lies on the ray from the origin through the cap's center, but it
-// is not unit length. Note that if you just want the "surface centroid", i.e.
-// the normalized result, then it is simpler to call Center.
-//
-// The reason for multiplying the result by the cap area is to make it
-// easier to compute the centroid of more complicated shapes. The centroid
-// of a union of disjoint regions can be computed simply by adding their
-// Centroid() results. Caveat: for caps that contain a single point
-// (i.e., zero radius), this method always returns the origin (0, 0, 0).
-// This is because shapes with no area don't affect the centroid of a
-// union whose total area is positive.
-func (c Cap) Centroid() Point {
-	// From symmetry, the centroid of the cap must be somewhere on the line
-	// from the origin to the center of the cap on the surface of the sphere.
-	// When a sphere is divided into slices of constant thickness by a set of
-	// parallel planes, all slices have the same surface area. This implies
-	// that the radial component of the centroid is simply the midpoint of the
-	// range of radial distances spanned by the cap. That is easily computed
-	// from the cap height.
-	if c.IsEmpty() {
-		return Point{}
-	}
-	r := 1 - 0.5*c.Height()
-	return Point{c.center.Mul(r * c.Area())}
-}
-
-// Union returns the smallest cap which encloses this cap and other.
-func (c Cap) Union(other Cap) Cap {
-	// If the other cap is larger, swap c and other for the rest of the computations.
-	if c.radius < other.radius {
-		c, other = other, c
-	}
-
-	if c.IsFull() || other.IsEmpty() {
-		return c
-	}
-
-	// TODO: This calculation would be more efficient using s1.ChordAngles.
-	cRadius := c.Radius()
-	otherRadius := other.Radius()
-	distance := c.center.Distance(other.center)
-	if cRadius >= distance+otherRadius {
-		return c
-	}
-
-	resRadius := 0.5 * (distance + cRadius + otherRadius)
-	resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
-	return CapFromCenterAngle(resCenter, resRadius)
-}
-
-// Encode encodes the Cap.
-func (c Cap) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	c.encode(e)
-	return e.err
-}
-
-func (c Cap) encode(e *encoder) {
-	e.writeFloat64(c.center.X)
-	e.writeFloat64(c.center.Y)
-	e.writeFloat64(c.center.Z)
-	e.writeFloat64(float64(c.radius))
-}
-
-// Decode decodes the Cap.
-func (c *Cap) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	c.decode(d)
-	return d.err
-}
-
-func (c *Cap) decode(d *decoder) {
-	c.center.X = d.readFloat64()
-	c.center.Y = d.readFloat64()
-	c.center.Z = d.readFloat64()
-	c.radius = s1.ChordAngle(d.readFloat64())
-}

+ 0 - 698
vendor/github.com/golang/geo/s2/cell.go

@@ -1,698 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"io"
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// Cell is an S2 region object that represents a cell. Unlike CellIDs,
-// it supports efficient containment and intersection tests. However, it is
-// also a more expensive representation.
-type Cell struct {
-	face        int8
-	level       int8
-	orientation int8
-	id          CellID
-	uv          r2.Rect
-}
-
-// CellFromCellID constructs a Cell corresponding to the given CellID.
-func CellFromCellID(id CellID) Cell {
-	c := Cell{}
-	c.id = id
-	f, i, j, o := c.id.faceIJOrientation()
-	c.face = int8(f)
-	c.level = int8(c.id.Level())
-	c.orientation = int8(o)
-	c.uv = ijLevelToBoundUV(i, j, int(c.level))
-	return c
-}
-
-// CellFromPoint constructs a cell for the given Point.
-func CellFromPoint(p Point) Cell {
-	return CellFromCellID(cellIDFromPoint(p))
-}
-
-// CellFromLatLng constructs a cell for the given LatLng.
-func CellFromLatLng(ll LatLng) Cell {
-	return CellFromCellID(CellIDFromLatLng(ll))
-}
-
-// Face returns the face this cell is on.
-func (c Cell) Face() int {
-	return int(c.face)
-}
-
-// oppositeFace returns the face opposite the given face.
-func oppositeFace(face int) int {
-	return (face + 3) % 6
-}
-
-// Level returns the level of this cell.
-func (c Cell) Level() int {
-	return int(c.level)
-}
-
-// ID returns the CellID this cell represents.
-func (c Cell) ID() CellID {
-	return c.id
-}
-
-// IsLeaf returns whether this Cell is a leaf or not.
-func (c Cell) IsLeaf() bool {
-	return c.level == maxLevel
-}
-
-// SizeIJ returns the edge length of this cell in (i,j)-space.
-func (c Cell) SizeIJ() int {
-	return sizeIJ(int(c.level))
-}
-
-// SizeST returns the edge length of this cell in (s,t)-space.
-func (c Cell) SizeST() float64 {
-	return c.id.sizeST(int(c.level))
-}
-
-// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
-// (lower left, lower right, upper right, upper left in the UV plane).
-func (c Cell) Vertex(k int) Point {
-	return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()}
-}
-
-// Edge returns the inward-facing normal of the great circle passing through
-// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3).
-func (c Cell) Edge(k int) Point {
-	switch k {
-	case 0:
-		return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom
-	case 1:
-		return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right
-	case 2:
-		return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top
-	default:
-		return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left
-	}
-}
-
-// BoundUV returns the bounds of this cell in (u,v)-space.
-func (c Cell) BoundUV() r2.Rect {
-	return c.uv
-}
-
-// Center returns the direction vector corresponding to the center in
-// (s,t)-space of the given cell. This is the point at which the cell is
-// divided into four subcells; it is not necessarily the centroid of the
-// cell in (u,v)-space or (x,y,z)-space
-func (c Cell) Center() Point {
-	return Point{c.id.rawPoint().Normalize()}
-}
-
-// Children returns the four direct children of this cell in traversal order
-// and returns true. If this is a leaf cell, or the children could not be created,
-// false is returned.
-// The C++ method is called Subdivide.
-func (c Cell) Children() ([4]Cell, bool) {
-	var children [4]Cell
-
-	if c.id.IsLeaf() {
-		return children, false
-	}
-
-	// Compute the cell midpoint in uv-space.
-	uvMid := c.id.centerUV()
-
-	// Create four children with the appropriate bounds.
-	cid := c.id.ChildBegin()
-	for pos := 0; pos < 4; pos++ {
-		children[pos] = Cell{
-			face:        c.face,
-			level:       c.level + 1,
-			orientation: c.orientation ^ int8(posToOrientation[pos]),
-			id:          cid,
-		}
-
-		// We want to split the cell in half in u and v. To decide which
-		// side to set equal to the midpoint value, we look at cell's (i,j)
-		// position within its parent. The index for i is in bit 1 of ij.
-		ij := posToIJ[c.orientation][pos]
-		i := ij >> 1
-		j := ij & 1
-		if i == 1 {
-			children[pos].uv.X.Hi = c.uv.X.Hi
-			children[pos].uv.X.Lo = uvMid.X
-		} else {
-			children[pos].uv.X.Lo = c.uv.X.Lo
-			children[pos].uv.X.Hi = uvMid.X
-		}
-		if j == 1 {
-			children[pos].uv.Y.Hi = c.uv.Y.Hi
-			children[pos].uv.Y.Lo = uvMid.Y
-		} else {
-			children[pos].uv.Y.Lo = c.uv.Y.Lo
-			children[pos].uv.Y.Hi = uvMid.Y
-		}
-		cid = cid.Next()
-	}
-	return children, true
-}
-
-// ExactArea returns the area of this cell as accurately as possible.
-func (c Cell) ExactArea() float64 {
-	v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3)
-	return PointArea(v0, v1, v2) + PointArea(v0, v2, v3)
-}
-
-// ApproxArea returns the approximate area of this cell. This method is accurate
-// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
-// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's
-// surface). It is moderately cheap to compute.
-func (c Cell) ApproxArea() float64 {
-	// All cells at the first two levels have the same area.
-	if c.level < 2 {
-		return c.AverageArea()
-	}
-
-	// First, compute the approximate area of the cell when projected
-	// perpendicular to its normal. The cross product of its diagonals gives
-	// the normal, and the length of the normal is twice the projected area.
-	flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector).
-		Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm())
-
-	// Now, compensate for the curvature of the cell surface by pretending
-	// that the cell is shaped like a spherical cap. The ratio of the
-	// area of a spherical cap to the area of its projected disc turns out
-	// to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc.
-	// For example, when r=0 the ratio is 1, and when r=1 the ratio is 2.
-	// Here we set Pi*r*r == flatArea to find the equivalent disc.
-	return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1)))
-}
-
-// AverageArea returns the average area of cells at the level of this cell.
-// This is accurate to within a factor of 1.7.
-func (c Cell) AverageArea() float64 {
-	return AvgAreaMetric.Value(int(c.level))
-}
-
-// IntersectsCell reports whether the intersection of this cell and the other cell is not nil.
-func (c Cell) IntersectsCell(oc Cell) bool {
-	return c.id.Intersects(oc.id)
-}
-
-// ContainsCell reports whether this cell contains the other cell.
-func (c Cell) ContainsCell(oc Cell) bool {
-	return c.id.Contains(oc.id)
-}
-
-// CellUnionBound computes a covering of the Cell.
-func (c Cell) CellUnionBound() []CellID {
-	return c.CapBound().CellUnionBound()
-}
-
-// latitude returns the latitude of the cell vertex in radians given by (i,j),
-// where i and j indicate the Hi (1) or Lo (0) corner.
-func (c Cell) latitude(i, j int) float64 {
-	var u, v float64
-	switch {
-	case i == 0 && j == 0:
-		u = c.uv.X.Lo
-		v = c.uv.Y.Lo
-	case i == 0 && j == 1:
-		u = c.uv.X.Lo
-		v = c.uv.Y.Hi
-	case i == 1 && j == 0:
-		u = c.uv.X.Hi
-		v = c.uv.Y.Lo
-	case i == 1 && j == 1:
-		u = c.uv.X.Hi
-		v = c.uv.Y.Hi
-	default:
-		panic("i and/or j is out of bounds")
-	}
-	return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
-}
-
-// longitude returns the longitude of the cell vertex in radians given by (i,j),
-// where i and j indicate the Hi (1) or Lo (0) corner.
-func (c Cell) longitude(i, j int) float64 {
-	var u, v float64
-	switch {
-	case i == 0 && j == 0:
-		u = c.uv.X.Lo
-		v = c.uv.Y.Lo
-	case i == 0 && j == 1:
-		u = c.uv.X.Lo
-		v = c.uv.Y.Hi
-	case i == 1 && j == 0:
-		u = c.uv.X.Hi
-		v = c.uv.Y.Lo
-	case i == 1 && j == 1:
-		u = c.uv.X.Hi
-		v = c.uv.Y.Hi
-	default:
-		panic("i and/or j is out of bounds")
-	}
-	return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
-}
-
-var (
-	poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon
-)
-
-// RectBound returns the bounding rectangle of this cell.
-func (c Cell) RectBound() Rect {
-	if c.level > 0 {
-		// Except for cells at level 0, the latitude and longitude extremes are
-		// attained at the vertices.  Furthermore, the latitude range is
-		// determined by one pair of diagonally opposite vertices and the
-		// longitude range is determined by the other pair.
-		//
-		// We first determine which corner (i,j) of the cell has the largest
-		// absolute latitude.  To maximize latitude, we want to find the point in
-		// the cell that has the largest absolute z-coordinate and the smallest
-		// absolute x- and y-coordinates.  To do this we look at each coordinate
-		// (u and v), and determine whether we want to minimize or maximize that
-		// coordinate based on the axis direction and the cell's (u,v) quadrant.
-		u := c.uv.X.Lo + c.uv.X.Hi
-		v := c.uv.Y.Lo + c.uv.Y.Hi
-		var i, j int
-		if uAxis(int(c.face)).Z == 0 {
-			if u < 0 {
-				i = 1
-			}
-		} else if u > 0 {
-			i = 1
-		}
-		if vAxis(int(c.face)).Z == 0 {
-			if v < 0 {
-				j = 1
-			}
-		} else if v > 0 {
-			j = 1
-		}
-		lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j))
-		lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j))
-
-		// We grow the bounds slightly to make sure that the bounding rectangle
-		// contains LatLngFromPoint(P) for any point P inside the loop L defined by the
-		// four *normalized* vertices.  Note that normalization of a vector can
-		// change its direction by up to 0.5 * dblEpsilon radians, and it is not
-		// enough just to add Normalize calls to the code above because the
-		// latitude/longitude ranges are not necessarily determined by diagonally
-		// opposite vertex pairs after normalization.
-		//
-		// We would like to bound the amount by which the latitude/longitude of a
-		// contained point P can exceed the bounds computed above.  In the case of
-		// longitude, the normalization error can change the direction of rounding
-		// leading to a maximum difference in longitude of 2 * dblEpsilon.  In
-		// the case of latitude, the normalization error can shift the latitude by
-		// up to 0.5 * dblEpsilon and the other sources of error can cause the
-		// two latitudes to differ by up to another 1.5 * dblEpsilon, which also
-		// leads to a maximum difference of 2 * dblEpsilon.
-		return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure()
-	}
-
-	// The 4 cells around the equator extend to +/-45 degrees latitude at the
-	// midpoints of their top and bottom edges.  The two cells covering the
-	// poles extend down to +/-35.26 degrees at their vertices.  The maximum
-	// error in this calculation is 0.5 * dblEpsilon.
-	var bound Rect
-	switch c.face {
-	case 0:
-		bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}}
-	case 1:
-		bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}}
-	case 2:
-		bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()}
-	case 3:
-		bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}}
-	case 4:
-		bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}}
-	default:
-		bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()}
-	}
-
-	// Finally, we expand the bound to account for the error when a point P is
-	// converted to an LatLng to test for containment. (The bound should be
-	// large enough so that it contains the computed LatLng of any contained
-	// point, not just the infinite-precision version.) We don't need to expand
-	// longitude because longitude is calculated via a single call to math.Atan2,
-	// which is guaranteed to be semi-monotonic.
-	return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)})
-}
-
-// CapBound returns the bounding cap of this cell.
-func (c Cell) CapBound() Cap {
-	// We use the cell center in (u,v)-space as the cap axis.  This vector is very close
-	// to GetCenter() and faster to compute.  Neither one of these vectors yields the
-	// bounding cap with minimal surface area, but they are both pretty close.
-	cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()})
-	for k := 0; k < 4; k++ {
-		cap = cap.AddPoint(c.Vertex(k))
-	}
-	return cap
-}
-
-// ContainsPoint reports whether this cell contains the given point. Note that
-// unlike Loop/Polygon, a Cell is considered to be a closed set. This means
-// that a point on a Cell's edge or vertex belong to the Cell and the relevant
-// adjacent Cells too.
-//
-// If you want every point to be contained by exactly one Cell,
-// you will need to convert the Cell to a Loop.
-func (c Cell) ContainsPoint(p Point) bool {
-	var uv r2.Point
-	var ok bool
-	if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok {
-		return false
-	}
-
-	// Expand the (u,v) bound to ensure that
-	//
-	//   CellFromPoint(p).ContainsPoint(p)
-	//
-	// is always true. To do this, we need to account for the error when
-	// converting from (u,v) coordinates to (s,t) coordinates. In the
-	// normal case the total error is at most dblEpsilon.
-	return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
-}
-
-// Encode encodes the Cell.
-func (c Cell) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	c.encode(e)
-	return e.err
-}
-
-func (c Cell) encode(e *encoder) {
-	c.id.encode(e)
-}
-
-// Decode decodes the Cell.
-func (c *Cell) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	c.decode(d)
-	return d.err
-}
-
-func (c *Cell) decode(d *decoder) {
-	c.id.decode(d)
-	*c = CellFromCellID(c.id)
-}
-
-// vertexChordDist2 returns the squared chord distance from point P to the
-// given corner vertex specified by the Hi or Lo values of each.
-func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle {
-	x := c.uv.X.Lo
-	y := c.uv.Y.Lo
-	if xHi {
-		x = c.uv.X.Hi
-	}
-	if yHi {
-		y = c.uv.Y.Hi
-	}
-
-	return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1))
-}
-
-// uEdgeIsClosest reports whether a point P is closer to the interior of the specified
-// Cell edge (either the lower or upper edge of the Cell) or to the endpoints.
-func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool {
-	u0 := c.uv.X.Lo
-	u1 := c.uv.X.Hi
-	v := c.uv.Y.Lo
-	if vHi {
-		v = c.uv.Y.Hi
-	}
-	// These are the normals to the planes that are perpendicular to the edge
-	// and pass through one of its two endpoints.
-	dir0 := r3.Vector{v*v + 1, -u0 * v, -u0}
-	dir1 := r3.Vector{v*v + 1, -u1 * v, -u1}
-	return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
-}
-
-// vEdgeIsClosest reports whether a point P is closer to the interior of the specified
-// Cell edge (either the right or left edge of the Cell) or to the endpoints.
-func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool {
-	v0 := c.uv.Y.Lo
-	v1 := c.uv.Y.Hi
-	u := c.uv.X.Lo
-	if uHi {
-		u = c.uv.X.Hi
-	}
-	dir0 := r3.Vector{-u * v0, u*u + 1, -v0}
-	dir1 := r3.Vector{-u * v1, u*u + 1, -v1}
-	return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
-}
-
-// edgeDistance reports the distance from a Point P to a given Cell edge. The point
-// P is given by its dot product, and the uv edge by its normal in the
-// given coordinate value.
-func edgeDistance(ij, uv float64) s1.ChordAngle {
-	// Let P by the target point and let R be the closest point on the given
-	// edge AB.  The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2
-	// where Q is the point P projected onto the plane through the great circle
-	// through AB.  We can compute the distance PQ^2 perpendicular to the plane
-	// from "dirIJ" (the dot product of the target point P with the edge
-	// normal) and the squared length the edge normal (1 + uv**2).
-	pq2 := (ij * ij) / (1 + uv*uv)
-
-	// We can compute the distance QR as (1 - OQ) where O is the sphere origin,
-	// and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem.
-	// (This calculation loses accuracy as angle POQ approaches Pi/2.)
-	qr := 1 - math.Sqrt(1-pq2)
-	return s1.ChordAngleFromSquaredLength(pq2 + qr*qr)
-}
-
-// distanceInternal reports the distance from the given point to the interior of
-// the cell if toInterior is true or to the boundary of the cell otherwise.
-func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle {
-	// All calculations are done in the (u,v,w) coordinates of this cell's face.
-	target := faceXYZtoUVW(int(c.face), targetXYZ)
-
-	// Compute dot products with all four upward or rightward-facing edge
-	// normals. dirIJ is the dot product for the edge corresponding to axis
-	// I, endpoint J. For example, dir01 is the right edge of the Cell
-	// (corresponding to the upper endpoint of the u-axis).
-	dir00 := target.X - target.Z*c.uv.X.Lo
-	dir01 := target.X - target.Z*c.uv.X.Hi
-	dir10 := target.Y - target.Z*c.uv.Y.Lo
-	dir11 := target.Y - target.Z*c.uv.Y.Hi
-	inside := true
-	if dir00 < 0 {
-		inside = false // Target is to the left of the cell
-		if c.vEdgeIsClosest(target, false) {
-			return edgeDistance(-dir00, c.uv.X.Lo)
-		}
-	}
-	if dir01 > 0 {
-		inside = false // Target is to the right of the cell
-		if c.vEdgeIsClosest(target, true) {
-			return edgeDistance(dir01, c.uv.X.Hi)
-		}
-	}
-	if dir10 < 0 {
-		inside = false // Target is below the cell
-		if c.uEdgeIsClosest(target, false) {
-			return edgeDistance(-dir10, c.uv.Y.Lo)
-		}
-	}
-	if dir11 > 0 {
-		inside = false // Target is above the cell
-		if c.uEdgeIsClosest(target, true) {
-			return edgeDistance(dir11, c.uv.Y.Hi)
-		}
-	}
-	if inside {
-		if toInterior {
-			return s1.ChordAngle(0)
-		}
-		// Although you might think of Cells as rectangles, they are actually
-		// arbitrary quadrilaterals after they are projected onto the sphere.
-		// Therefore the simplest approach is just to find the minimum distance to
-		// any of the four edges.
-		return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo),
-			edgeDistance(dir01, c.uv.X.Hi),
-			edgeDistance(-dir10, c.uv.Y.Lo),
-			edgeDistance(dir11, c.uv.Y.Hi))
-	}
-
-	// Otherwise, the closest point is one of the four cell vertices. Note that
-	// it is *not* trivial to narrow down the candidates based on the edge sign
-	// tests above, because (1) the edges don't meet at right angles and (2)
-	// there are points on the far side of the sphere that are both above *and*
-	// below the cell, etc.
-	return minChordAngle(c.vertexChordDist2(target, false, false),
-		c.vertexChordDist2(target, true, false),
-		c.vertexChordDist2(target, false, true),
-		c.vertexChordDist2(target, true, true))
-}
-
-// Distance reports the distance from the cell to the given point. Returns zero if
-// the point is inside the cell.
-func (c Cell) Distance(target Point) s1.ChordAngle {
-	return c.distanceInternal(target, true)
-}
-
-// MaxDistance reports the maximum distance from the cell (including its interior) to the
-// given point.
-func (c Cell) MaxDistance(target Point) s1.ChordAngle {
-	// First check the 4 cell vertices.  If all are within the hemisphere
-	// centered around target, the max distance will be to one of these vertices.
-	targetUVW := faceXYZtoUVW(int(c.face), target)
-	maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false),
-		c.vertexChordDist2(targetUVW, true, false),
-		c.vertexChordDist2(targetUVW, false, true),
-		c.vertexChordDist2(targetUVW, true, true))
-
-	if maxDist <= s1.RightChordAngle {
-		return maxDist
-	}
-
-	// Otherwise, find the minimum distance dMin to the antipodal point and the
-	// maximum distance will be pi - dMin.
-	return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)})
-}
-
-// BoundaryDistance reports the distance from the cell boundary to the given point.
-func (c Cell) BoundaryDistance(target Point) s1.ChordAngle {
-	return c.distanceInternal(target, false)
-}
-
-// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns
-// zero if the edge intersects the cell interior.
-func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle {
-	// Possible optimizations:
-	//  - Currently the (cell vertex, edge endpoint) distances are computed
-	//    twice each, and the length of AB is computed 4 times.
-	//  - To fix this, refactor GetDistance(target) so that it skips calculating
-	//    the distance to each cell vertex. Instead, compute the cell vertices
-	//    and distances in this function, and add a low-level UpdateMinDistance
-	//    that allows the XA, XB, and AB distances to be passed in.
-	//  - It might also be more efficient to do all calculations in UVW-space,
-	//    since this would involve transforming 2 points rather than 4.
-
-	// First, check the minimum distance to the edge endpoints A and B.
-	// (This also detects whether either endpoint is inside the cell.)
-	minDist := minChordAngle(c.Distance(a), c.Distance(b))
-	if minDist == 0 {
-		return minDist
-	}
-
-	// Otherwise, check whether the edge crosses the cell boundary.
-	crosser := NewChainEdgeCrosser(a, b, c.Vertex(3))
-	for i := 0; i < 4; i++ {
-		if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross {
-			return 0
-		}
-	}
-
-	// Finally, check whether the minimum distance occurs between a cell vertex
-	// and the interior of the edge AB. (Some of this work is redundant, since
-	// it also checks the distance to the endpoints A and B again.)
-	//
-	// Note that we don't need to check the distance from the interior of AB to
-	// the interior of a cell edge, because the only way that this distance can
-	// be minimal is if the two edges cross (already checked above).
-	for i := 0; i < 4; i++ {
-		minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist)
-	}
-	return minDist
-}
-
-// MaxDistanceToEdge returns the maximum distance from the cell (including its interior)
-// to the given edge AB.
-func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle {
-	// If the maximum distance from both endpoints to the cell is less than π/2
-	// then the maximum distance from the edge to the cell is the maximum of the
-	// two endpoint distances.
-	maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b))
-	if maxDist <= s1.RightChordAngle {
-		return maxDist
-	}
-
-	return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)})
-}
-
-// DistanceToCell returns the minimum distance from this cell to the given cell.
-// It returns zero if one cell contains the other.
-func (c Cell) DistanceToCell(target Cell) s1.ChordAngle {
-	// If the cells intersect, the distance is zero.  We use the (u,v) ranges
-	// rather than CellID intersects so that cells that share a partial edge or
-	// corner are considered to intersect.
-	if c.face == target.face && c.uv.Intersects(target.uv) {
-		return 0
-	}
-
-	// Otherwise, the minimum distance always occurs between a vertex of one
-	// cell and an edge of the other cell (including the edge endpoints).  This
-	// represents a total of 32 possible (vertex, edge) pairs.
-	//
-	// TODO(roberts): This could be optimized to be at least 5x faster by pruning
-	// the set of possible closest vertex/edge pairs using the faces and (u,v)
-	// ranges of both cells.
-	var va, vb [4]Point
-	for i := 0; i < 4; i++ {
-		va[i] = c.Vertex(i)
-		vb[i] = target.Vertex(i)
-	}
-	minDist := s1.InfChordAngle()
-	for i := 0; i < 4; i++ {
-		for j := 0; j < 4; j++ {
-			minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist)
-			minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist)
-		}
-	}
-	return minDist
-}
-
-// MaxDistanceToCell returns the maximum distance from the cell (including its
-// interior) to the given target cell.
-func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle {
-	// Need to check the antipodal target for intersection with the cell. If it
-	// intersects, the distance is the straight ChordAngle.
-	// antipodalUV is the transpose of the original UV, interpreted within the opposite face.
-	antipodalUV := r2.Rect{target.uv.Y, target.uv.X}
-	if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) {
-		return s1.StraightChordAngle
-	}
-
-	// Otherwise, the maximum distance always occurs between a vertex of one
-	// cell and an edge of the other cell (including the edge endpoints).  This
-	// represents a total of 32 possible (vertex, edge) pairs.
-	//
-	// TODO(roberts): When the maximum distance is at most π/2, the maximum is
-	// always attained between a pair of vertices, and this could be made much
-	// faster by testing each vertex pair once rather than the current 4 times.
-	var va, vb [4]Point
-	for i := 0; i < 4; i++ {
-		va[i] = c.Vertex(i)
-		vb[i] = target.Vertex(i)
-	}
-	maxDist := s1.NegativeChordAngle
-	for i := 0; i < 4; i++ {
-		for j := 0; j < 4; j++ {
-			maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist)
-			maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist)
-		}
-	}
-	return maxDist
-}

+ 0 - 942
vendor/github.com/golang/geo/s2/cellid.go

@@ -1,942 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"math"
-	"sort"
-	"strconv"
-	"strings"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// CellID uniquely identifies a cell in the S2 cell decomposition.
-// The most significant 3 bits encode the face number (0-5). The
-// remaining 61 bits encode the position of the center of this cell
-// along the Hilbert curve on that face. The zero value and the value
-// (1<<64)-1 are invalid cell IDs. The first compares less than any
-// valid cell ID, the second as greater than any valid cell ID.
-//
-// Sequentially increasing cell IDs follow a continuous space-filling curve
-// over the entire sphere. They have the following properties:
-//
-//  - The ID of a cell at level k consists of a 3-bit face number followed
-//    by k bit pairs that recursively select one of the four children of
-//    each cell. The next bit is always 1, and all other bits are 0.
-//    Therefore, the level of a cell is determined by the position of its
-//    lowest-numbered bit that is turned on (for a cell at level k, this
-//    position is 2 * (maxLevel - k)).
-//
-//  - The ID of a parent cell is at the midpoint of the range of IDs spanned
-//    by its children (or by its descendants at any level).
-//
-// Leaf cells are often used to represent points on the unit sphere, and
-// this type provides methods for converting directly between these two
-// representations. For cells that represent 2D regions rather than
-// discrete point, it is better to use Cells.
-type CellID uint64
-
-// SentinelCellID is an invalid cell ID guaranteed to be larger than any
-// valid cell ID. It is used primarily by ShapeIndex. The value is also used
-// by some S2 types when encoding data.
-// Note that the sentinel's RangeMin == RangeMax == itself.
-const SentinelCellID = CellID(^uint64(0))
-
-// sortCellIDs sorts the slice of CellIDs in place.
-func sortCellIDs(ci []CellID) {
-	sort.Sort(cellIDs(ci))
-}
-
-// cellIDs implements the Sort interface for slices of CellIDs.
-type cellIDs []CellID
-
-func (c cellIDs) Len() int           { return len(c) }
-func (c cellIDs) Swap(i, j int)      { c[i], c[j] = c[j], c[i] }
-func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] }
-
-// TODO(dsymonds): Some of these constants should probably be exported.
-const (
-	faceBits = 3
-	numFaces = 6
-
-	// This is the number of levels needed to specify a leaf cell.
-	maxLevel = 30
-
-	// The extra position bit (61 rather than 60) lets us encode each cell as its
-	// Hilbert curve position at the cell center (which is halfway along the
-	// portion of the Hilbert curve that fills that cell).
-	posBits = 2*maxLevel + 1
-
-	// The maximum index of a valid leaf cell plus one. The range of valid leaf
-	// cell indices is [0..maxSize-1].
-	maxSize = 1 << maxLevel
-
-	wrapOffset = uint64(numFaces) << posBits
-)
-
-// CellIDFromFacePosLevel returns a cell given its face in the range
-// [0,5], the 61-bit Hilbert curve position pos within that face, and
-// the level in the range [0,maxLevel]. The position in the cell ID
-// will be truncated to correspond to the Hilbert curve position at
-// the center of the returned cell.
-func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID {
-	return CellID(uint64(face)<<posBits + pos | 1).Parent(level)
-}
-
-// CellIDFromFace returns the cell corresponding to a given S2 cube face.
-func CellIDFromFace(face int) CellID {
-	return CellID((uint64(face) << posBits) + lsbForLevel(0))
-}
-
-// CellIDFromLatLng returns the leaf cell containing ll.
-func CellIDFromLatLng(ll LatLng) CellID {
-	return cellIDFromPoint(PointFromLatLng(ll))
-}
-
-// CellIDFromToken returns a cell given a hex-encoded string of its uint64 ID.
-func CellIDFromToken(s string) CellID {
-	if len(s) > 16 {
-		return CellID(0)
-	}
-	n, err := strconv.ParseUint(s, 16, 64)
-	if err != nil {
-		return CellID(0)
-	}
-	// Equivalent to right-padding string with zeros to 16 characters.
-	if len(s) < 16 {
-		n = n << (4 * uint(16-len(s)))
-	}
-	return CellID(n)
-}
-
-// ToToken returns a hex-encoded string of the uint64 cell id, with leading
-// zeros included but trailing zeros stripped.
-func (ci CellID) ToToken() string {
-	s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0")
-	if len(s) == 0 {
-		return "X"
-	}
-	return s
-}
-
-// IsValid reports whether ci represents a valid cell.
-func (ci CellID) IsValid() bool {
-	return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0)
-}
-
-// Face returns the cube face for this cell ID, in the range [0,5].
-func (ci CellID) Face() int { return int(uint64(ci) >> posBits) }
-
-// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1].
-func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) }
-
-// Level returns the subdivision level of this cell ID, in the range [0, maxLevel].
-func (ci CellID) Level() int {
-	return maxLevel - findLSBSetNonZero64(uint64(ci))>>1
-}
-
-// IsLeaf returns whether this cell ID is at the deepest level;
-// that is, the level at which the cells are smallest.
-func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 }
-
-// ChildPosition returns the child position (0..3) of this cell's
-// ancestor at the given level, relative to its parent.  The argument
-// should be in the range 1..kMaxLevel.  For example,
-// ChildPosition(1) returns the position of this cell's level-1
-// ancestor within its top-level face cell.
-func (ci CellID) ChildPosition(level int) int {
-	return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3
-}
-
-// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level.
-func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) }
-
-// Parent returns the cell at the given level, which must be no greater than the current level.
-func (ci CellID) Parent(level int) CellID {
-	lsb := lsbForLevel(level)
-	return CellID((uint64(ci) & -lsb) | lsb)
-}
-
-// immediateParent is cheaper than Parent, but assumes !ci.isFace().
-func (ci CellID) immediateParent() CellID {
-	nlsb := CellID(ci.lsb() << 2)
-	return (ci & -nlsb) | nlsb
-}
-
-// isFace returns whether this is a top-level (face) cell.
-func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 }
-
-// lsb returns the least significant bit that is set.
-func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) }
-
-// Children returns the four immediate children of this cell.
-// If ci is a leaf cell, it returns four identical cells that are not the children.
-func (ci CellID) Children() [4]CellID {
-	var ch [4]CellID
-	lsb := CellID(ci.lsb())
-	ch[0] = ci - lsb + lsb>>2
-	lsb >>= 1
-	ch[1] = ch[0] + lsb
-	ch[2] = ch[1] + lsb
-	ch[3] = ch[2] + lsb
-	return ch
-}
-
-func sizeIJ(level int) int {
-	return 1 << uint(maxLevel-level)
-}
-
-// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges.
-// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space.
-// All neighbors are guaranteed to be distinct.
-func (ci CellID) EdgeNeighbors() [4]CellID {
-	level := ci.Level()
-	size := sizeIJ(level)
-	f, i, j, _ := ci.faceIJOrientation()
-	return [4]CellID{
-		cellIDFromFaceIJWrap(f, i, j-size).Parent(level),
-		cellIDFromFaceIJWrap(f, i+size, j).Parent(level),
-		cellIDFromFaceIJWrap(f, i, j+size).Parent(level),
-		cellIDFromFaceIJWrap(f, i-size, j).Parent(level),
-	}
-}
-
-// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level.
-// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of
-// the 8 cube vertices.)
-func (ci CellID) VertexNeighbors(level int) []CellID {
-	halfSize := sizeIJ(level + 1)
-	size := halfSize << 1
-	f, i, j, _ := ci.faceIJOrientation()
-
-	var isame, jsame bool
-	var ioffset, joffset int
-	if i&halfSize != 0 {
-		ioffset = size
-		isame = (i + size) < maxSize
-	} else {
-		ioffset = -size
-		isame = (i - size) >= 0
-	}
-	if j&halfSize != 0 {
-		joffset = size
-		jsame = (j + size) < maxSize
-	} else {
-		joffset = -size
-		jsame = (j - size) >= 0
-	}
-
-	results := []CellID{
-		ci.Parent(level),
-		cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level),
-		cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level),
-	}
-
-	if isame || jsame {
-		results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level))
-	}
-
-	return results
-}
-
-// AllNeighbors returns all neighbors of this cell at the given level. Two
-// cells X and Y are neighbors if their boundaries intersect but their
-// interiors do not. In particular, two cells that intersect at a single
-// point are neighbors. Note that for cells adjacent to a face vertex, the
-// same neighbor may be returned more than once. There could be up to eight
-// neighbors including the diagonal ones that share the vertex.
-//
-// This requires level >= ci.Level().
-func (ci CellID) AllNeighbors(level int) []CellID {
-	var neighbors []CellID
-
-	face, i, j, _ := ci.faceIJOrientation()
-
-	// Find the coordinates of the lower left-hand leaf cell. We need to
-	// normalize (i,j) to a known position within the cell because level
-	// may be larger than this cell's level.
-	size := sizeIJ(ci.Level())
-	i &= -size
-	j &= -size
-
-	nbrSize := sizeIJ(level)
-
-	// We compute the top-bottom, left-right, and diagonal neighbors in one
-	// pass. The loop test is at the end of the loop to avoid 32-bit overflow.
-	for k := -nbrSize; ; k += nbrSize {
-		var sameFace bool
-		if k < 0 {
-			sameFace = (j+k >= 0)
-		} else if k >= size {
-			sameFace = (j+k < maxSize)
-		} else {
-			sameFace = true
-			// Top and bottom neighbors.
-			neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize,
-				j-size >= 0).Parent(level))
-			neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size,
-				j+size < maxSize).Parent(level))
-		}
-
-		// Left, right, and diagonal neighbors.
-		neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k,
-			sameFace && i-size >= 0).Parent(level))
-		neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k,
-			sameFace && i+size < maxSize).Parent(level))
-
-		if k >= size {
-			break
-		}
-	}
-
-	return neighbors
-}
-
-// RangeMin returns the minimum CellID that is contained within this cell.
-func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) }
-
-// RangeMax returns the maximum CellID that is contained within this cell.
-func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) }
-
-// Contains returns true iff the CellID contains oci.
-func (ci CellID) Contains(oci CellID) bool {
-	return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax())
-}
-
-// Intersects returns true iff the CellID intersects oci.
-func (ci CellID) Intersects(oci CellID) bool {
-	return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin())
-}
-
-// String returns the string representation of the cell ID in the form "1/3210".
-func (ci CellID) String() string {
-	if !ci.IsValid() {
-		return "Invalid: " + strconv.FormatInt(int64(ci), 16)
-	}
-	var b bytes.Buffer
-	b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above
-	b.WriteByte('/')
-	for level := 1; level <= ci.Level(); level++ {
-		b.WriteByte("0123"[ci.ChildPosition(level)])
-	}
-	return b.String()
-}
-
-// cellIDFromString returns a CellID from a string in the form "1/3210".
-func cellIDFromString(s string) CellID {
-	level := len(s) - 2
-	if level < 0 || level > maxLevel {
-		return CellID(0)
-	}
-	face := int(s[0] - '0')
-	if face < 0 || face > 5 || s[1] != '/' {
-		return CellID(0)
-	}
-	id := CellIDFromFace(face)
-	for i := 2; i < len(s); i++ {
-		childPos := s[i] - '0'
-		if childPos < 0 || childPos > 3 {
-			return CellID(0)
-		}
-		id = id.Children()[childPos]
-	}
-	return id
-}
-
-// Point returns the center of the s2 cell on the sphere as a Point.
-// The maximum directional error in Point (compared to the exact
-// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length
-// error is 2 * dblEpsilon (the same as Normalize).
-func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} }
-
-// LatLng returns the center of the s2 cell on the sphere as a LatLng.
-func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) }
-
-// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order.
-//
-//    for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() {
-//        ...
-//    }
-func (ci CellID) ChildBegin() CellID {
-	ol := ci.lsb()
-	return CellID(uint64(ci) - ol + ol>>2)
-}
-
-// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in
-// Hilbert curve order. The given level must be no smaller than the cell's level.
-// See ChildBegin for example use.
-func (ci CellID) ChildBeginAtLevel(level int) CellID {
-	return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level))
-}
-
-// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order.
-// The returned cell may be invalid.
-func (ci CellID) ChildEnd() CellID {
-	ol := ci.lsb()
-	return CellID(uint64(ci) + ol + ol>>2)
-}
-
-// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper
-// than this cell, in Hilbert curve order.
-// The given level must be no smaller than the cell's level.
-// The returned cell may be invalid.
-func (ci CellID) ChildEndAtLevel(level int) CellID {
-	return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level))
-}
-
-// Next returns the next cell along the Hilbert curve.
-// This is expected to be used with ChildBegin and ChildEnd,
-// or ChildBeginAtLevel and ChildEndAtLevel.
-func (ci CellID) Next() CellID {
-	return CellID(uint64(ci) + ci.lsb()<<1)
-}
-
-// Prev returns the previous cell along the Hilbert curve.
-func (ci CellID) Prev() CellID {
-	return CellID(uint64(ci) - ci.lsb()<<1)
-}
-
-// NextWrap returns the next cell along the Hilbert curve, wrapping from last to
-// first as necessary. This should not be used with ChildBegin and ChildEnd.
-func (ci CellID) NextWrap() CellID {
-	n := ci.Next()
-	if uint64(n) < wrapOffset {
-		return n
-	}
-	return CellID(uint64(n) - wrapOffset)
-}
-
-// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from
-// first to last as necessary. This should not be used with ChildBegin and ChildEnd.
-func (ci CellID) PrevWrap() CellID {
-	p := ci.Prev()
-	if uint64(p) < wrapOffset {
-		return p
-	}
-	return CellID(uint64(p) + wrapOffset)
-}
-
-// AdvanceWrap advances or retreats the indicated number of steps along the
-// Hilbert curve at the current level and returns the new position. The
-// position wraps between the first and last faces as necessary.
-func (ci CellID) AdvanceWrap(steps int64) CellID {
-	if steps == 0 {
-		return ci
-	}
-
-	// We clamp the number of steps if necessary to ensure that we do not
-	// advance past the End() or before the Begin() of this level.
-	shift := uint(2*(maxLevel-ci.Level()) + 1)
-	if steps < 0 {
-		if min := -int64(uint64(ci) >> shift); steps < min {
-			wrap := int64(wrapOffset >> shift)
-			steps %= wrap
-			if steps < min {
-				steps += wrap
-			}
-		}
-	} else {
-		// Unlike Advance(), we don't want to return End(level).
-		if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max {
-			wrap := int64(wrapOffset >> shift)
-			steps %= wrap
-			if steps > max {
-				steps -= wrap
-			}
-		}
-	}
-
-	// If steps is negative, then shifting it left has undefined behavior.
-	// Cast to uint64 for a 2's complement answer.
-	return CellID(uint64(ci) + (uint64(steps) << shift))
-}
-
-// Encode encodes the CellID.
-func (ci CellID) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	ci.encode(e)
-	return e.err
-}
-
-func (ci CellID) encode(e *encoder) {
-	e.writeUint64(uint64(ci))
-}
-
-// Decode decodes the CellID.
-func (ci *CellID) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	ci.decode(d)
-	return d.err
-}
-
-func (ci *CellID) decode(d *decoder) {
-	*ci = CellID(d.readUint64())
-}
-
-// TODO: the methods below are not exported yet.  Settle on the entire API design
-// before doing this.  Do we want to mirror the C++ one as closely as possible?
-
-// distanceFromBegin returns the number of steps that this cell is from the first
-// node in the S2 hierarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
-// The return value is always non-negative.
-func (ci CellID) distanceFromBegin() int64 {
-	return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
-}
-
-// rawPoint returns an unnormalized r3 vector from the origin through the center
-// of the s2 cell on the sphere.
-func (ci CellID) rawPoint() r3.Vector {
-	face, si, ti := ci.faceSiTi()
-	return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti)))
-}
-
-// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
-func (ci CellID) faceSiTi() (face int, si, ti uint32) {
-	face, i, j, _ := ci.faceIJOrientation()
-	delta := 0
-	if ci.IsLeaf() {
-		delta = 1
-	} else {
-		if (i^(int(ci)>>2))&1 != 0 {
-			delta = 2
-		}
-	}
-	return face, uint32(2*i + delta), uint32(2*j + delta)
-}
-
-// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
-func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
-	f = ci.Face()
-	orientation = f & swapMask
-	nbits := maxLevel - 7*lookupBits // first iteration
-
-	// Each iteration maps 8 bits of the Hilbert curve position into
-	// 4 bits of "i" and "j". The lookup table transforms a key of the
-	// form "ppppppppoo" to a value of the form "iiiijjjjoo", where the
-	// letters [ijpo] represents bits of "i", "j", the Hilbert curve
-	// position, and the Hilbert curve orientation respectively.
-	//
-	// On the first iteration we need to be careful to clear out the bits
-	// representing the cube face.
-	for k := 7; k >= 0; k-- {
-		orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2
-		orientation = lookupIJ[orientation]
-		i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
-		j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
-		orientation &= (swapMask | invertMask)
-		nbits = lookupBits // following iterations
-	}
-
-	// The position of a non-leaf cell at level "n" consists of a prefix of
-	// 2*n bits that identifies the cell, followed by a suffix of
-	// 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is
-	// just "1" and has no effect. Otherwise, it consists of "10", followed
-	// by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has
-	// no effect, while each occurrence of "00" has the effect of reversing
-	// the swapMask bit.
-	if ci.lsb()&0x1111111111111110 != 0 {
-		orientation ^= swapMask
-	}
-
-	return
-}
-
-// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates.
-func cellIDFromFaceIJ(f, i, j int) CellID {
-	// Note that this value gets shifted one bit to the left at the end
-	// of the function.
-	n := uint64(f) << (posBits - 1)
-	// Alternating faces have opposite Hilbert curve orientations; this
-	// is necessary in order for all faces to have a right-handed
-	// coordinate system.
-	bits := f & swapMask
-	// Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert
-	// curve position.  The lookup table transforms a 10-bit key of the form
-	// "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the
-	// letters [ijpo] denote bits of "i", "j", Hilbert curve position, and
-	// Hilbert curve orientation respectively.
-	for k := 7; k >= 0; k-- {
-		mask := (1 << lookupBits) - 1
-		bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2)
-		bits += ((j >> uint(k*lookupBits)) & mask) << 2
-		bits = lookupPos[bits]
-		n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits)
-		bits &= (swapMask | invertMask)
-	}
-	return CellID(n*2 + 1)
-}
-
-func cellIDFromFaceIJWrap(f, i, j int) CellID {
-	// Convert i and j to the coordinates of a leaf cell just beyond the
-	// boundary of this face.  This prevents 32-bit overflow in the case
-	// of finding the neighbors of a face cell.
-	i = clampInt(i, -1, maxSize)
-	j = clampInt(j, -1, maxSize)
-
-	// We want to wrap these coordinates onto the appropriate adjacent face.
-	// The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
-	// (which yields a point outside the normal face boundary), and then call
-	// xyzToFaceUV to project back onto the correct face.
-	//
-	// The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using
-	// the linear projection (u=2*s-1 and v=2*t-1).  (The code further below
-	// converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1).
-	// Any projection would work here, so we use the simplest.)  We also clamp
-	// the (u,v) coordinates so that the point is barely outside the
-	// [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step
-	// (which divides by the new z coordinate) might change the other
-	// coordinates enough so that we end up in the wrong leaf cell.
-	const scale = 1.0 / maxSize
-	limit := math.Nextafter(1, 2)
-	u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize)))
-	v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize)))
-
-	// Find the leaf cell coordinates on the adjacent face, and convert
-	// them to a cell id at the appropriate level.
-	f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v))
-	return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1)))
-}
-
-func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
-	if sameFace {
-		return cellIDFromFaceIJ(f, i, j)
-	}
-	return cellIDFromFaceIJWrap(f, i, j)
-}
-
-// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
-// s- or t-value contained by that cell. The argument must be in the range
-// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
-// cell indices.
-func ijToSTMin(i int) float64 {
-	return float64(i) / float64(maxSize)
-}
-
-// stToIJ converts value in ST coordinates to a value in IJ coordinates.
-func stToIJ(s float64) int {
-	return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1)
-}
-
-// cellIDFromPoint returns a leaf cell containing point p. Usually there is
-// exactly one such cell, but for points along the edge of a cell, any
-// adjacent cell may be (deterministically) chosen. This is because
-// s2.CellIDs are considered to be closed sets. The returned cell will
-// always contain the given point, i.e.
-//
-//   CellFromPoint(p).ContainsPoint(p)
-//
-// is always true.
-func cellIDFromPoint(p Point) CellID {
-	f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z})
-	i := stToIJ(uvToST(u))
-	j := stToIJ(uvToST(v))
-	return cellIDFromFaceIJ(f, i, j)
-}
-
-// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given
-// level containing the leaf cell with the given (i,j)-coordinates.
-func ijLevelToBoundUV(i, j, level int) r2.Rect {
-	cellSize := sizeIJ(level)
-	xLo := i & -cellSize
-	yLo := j & -cellSize
-
-	return r2.Rect{
-		X: r1.Interval{
-			Lo: stToUV(ijToSTMin(xLo)),
-			Hi: stToUV(ijToSTMin(xLo + cellSize)),
-		},
-		Y: r1.Interval{
-			Lo: stToUV(ijToSTMin(yLo)),
-			Hi: stToUV(ijToSTMin(yLo + cellSize)),
-		},
-	}
-}
-
-// Constants related to the bit mangling in the Cell ID.
-const (
-	lookupBits = 4
-	swapMask   = 0x01
-	invertMask = 0x02
-)
-
-// The following lookup tables are used to convert efficiently between an
-// (i,j) cell index and the corresponding position along the Hilbert curve.
-//
-// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the
-// orientation of the current cell into 8 bits representing the order in which
-// that subcell is visited by the Hilbert curve, plus 2 bits indicating the
-// new orientation of the Hilbert curve within that subcell. (Cell
-// orientations are represented as combination of swapMask and invertMask.)
-//
-// lookupIJ is an inverted table used for mapping in the opposite
-// direction.
-//
-// We also experimented with looking up 16 bits at a time (14 bits of position
-// plus 2 of orientation) but found that smaller lookup tables gave better
-// performance. (2KB fits easily in the primary cache.)
-var (
-	ijToPos = [4][4]int{
-		{0, 1, 3, 2}, // canonical order
-		{0, 3, 1, 2}, // axes swapped
-		{2, 3, 1, 0}, // bits inverted
-		{2, 1, 3, 0}, // swapped & inverted
-	}
-	posToIJ = [4][4]int{
-		{0, 1, 3, 2}, // canonical order:    (0,0), (0,1), (1,1), (1,0)
-		{0, 2, 3, 1}, // axes swapped:       (0,0), (1,0), (1,1), (0,1)
-		{3, 2, 0, 1}, // bits inverted:      (1,1), (1,0), (0,0), (0,1)
-		{3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0)
-	}
-	posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask}
-	lookupIJ         [1 << (2*lookupBits + 2)]int
-	lookupPos        [1 << (2*lookupBits + 2)]int
-)
-
-func init() {
-	initLookupCell(0, 0, 0, 0, 0, 0)
-	initLookupCell(0, 0, 0, swapMask, 0, swapMask)
-	initLookupCell(0, 0, 0, invertMask, 0, invertMask)
-	initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask)
-}
-
-// initLookupCell initializes the lookupIJ table at init time.
-func initLookupCell(level, i, j, origOrientation, pos, orientation int) {
-	if level == lookupBits {
-		ij := (i << lookupBits) + j
-		lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation
-		lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation
-		return
-	}
-
-	level++
-	i <<= 1
-	j <<= 1
-	pos <<= 2
-	r := posToIJ[orientation]
-	initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0])
-	initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1])
-	initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2])
-	initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3])
-}
-
-// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs.
-func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) {
-	bits := uint64(ci ^ other)
-	if bits < ci.lsb() {
-		bits = ci.lsb()
-	}
-	if bits < other.lsb() {
-		bits = other.lsb()
-	}
-
-	msbPos := findMSBSetNonZero64(bits)
-	if msbPos > 60 {
-		return 0, false
-	}
-	return (60 - msbPos) >> 1, true
-}
-
-// Advance advances or retreats the indicated number of steps along the
-// Hilbert curve at the current level, and returns the new position. The
-// position is never advanced past End() or before Begin().
-func (ci CellID) Advance(steps int64) CellID {
-	if steps == 0 {
-		return ci
-	}
-
-	// We clamp the number of steps if necessary to ensure that we do not
-	// advance past the End() or before the Begin() of this level. Note that
-	// minSteps and maxSteps always fit in a signed 64-bit integer.
-	stepShift := uint(2*(maxLevel-ci.Level()) + 1)
-	if steps < 0 {
-		minSteps := -int64(uint64(ci) >> stepShift)
-		if steps < minSteps {
-			steps = minSteps
-		}
-	} else {
-		maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift)
-		if steps > maxSteps {
-			steps = maxSteps
-		}
-	}
-	return ci + CellID(steps)<<stepShift
-}
-
-// centerST return the center of the CellID in (s,t)-space.
-func (ci CellID) centerST() r2.Point {
-	_, si, ti := ci.faceSiTi()
-	return r2.Point{siTiToST(si), siTiToST(ti)}
-}
-
-// sizeST returns the edge length of this CellID in (s,t)-space at the given level.
-func (ci CellID) sizeST(level int) float64 {
-	return ijToSTMin(sizeIJ(level))
-}
-
-// boundST returns the bound of this CellID in (s,t)-space.
-func (ci CellID) boundST() r2.Rect {
-	s := ci.sizeST(ci.Level())
-	return r2.RectFromCenterSize(ci.centerST(), r2.Point{s, s})
-}
-
-// centerUV returns the center of this CellID in (u,v)-space. Note that
-// the center of the cell is defined as the point at which it is recursively
-// subdivided into four children; in general, it is not at the midpoint of
-// the (u,v) rectangle covered by the cell.
-func (ci CellID) centerUV() r2.Point {
-	_, si, ti := ci.faceSiTi()
-	return r2.Point{stToUV(siTiToST(si)), stToUV(siTiToST(ti))}
-}
-
-// boundUV returns the bound of this CellID in (u,v)-space.
-func (ci CellID) boundUV() r2.Rect {
-	_, i, j, _ := ci.faceIJOrientation()
-	return ijLevelToBoundUV(i, j, ci.Level())
-}
-
-// expandEndpoint returns a new u-coordinate u' such that the distance from the
-// line u=u' to the given edge (u,v0)-(u,v1) is exactly the given distance
-// (which is specified as the sine of the angle corresponding to the distance).
-func expandEndpoint(u, maxV, sinDist float64) float64 {
-	// This is based on solving a spherical right triangle, similar to the
-	// calculation in Cap.RectBound.
-	// Given an edge of the form (u,v0)-(u,v1), let maxV = max(abs(v0), abs(v1)).
-	sinUShift := sinDist * math.Sqrt((1+u*u+maxV*maxV)/(1+u*u))
-	cosUShift := math.Sqrt(1 - sinUShift*sinUShift)
-	// The following is an expansion of tan(atan(u) + asin(sinUShift)).
-	return (cosUShift*u + sinUShift) / (cosUShift - sinUShift*u)
-}
-
-// expandedByDistanceUV returns a rectangle expanded in (u,v)-space so that it
-// contains all points within the given distance of the boundary, and return the
-// smallest such rectangle. If the distance is negative, then instead shrink this
-// rectangle so that it excludes all points within the given absolute distance
-// of the boundary.
-//
-// Distances are measured *on the sphere*, not in (u,v)-space. For example,
-// you can use this method to expand the (u,v)-bound of an CellID so that
-// it contains all points within 5km of the original cell. You can then
-// test whether a point lies within the expanded bounds like this:
-//
-//   if u, v, ok := faceXYZtoUV(face, point); ok && bound.ContainsPoint(r2.Point{u,v}) { ... }
-//
-// Limitations:
-//
-//  - Because the rectangle is drawn on one of the six cube-face planes
-//    (i.e., {x,y,z} = +/-1), it can cover at most one hemisphere. This
-//    limits the maximum amount that a rectangle can be expanded. For
-//    example, CellID bounds can be expanded safely by at most 45 degrees
-//    (about 5000 km on the Earth's surface).
-//
-//  - The implementation is not exact for negative distances. The resulting
-//    rectangle will exclude all points within the given distance of the
-//    boundary but may be slightly smaller than necessary.
-func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
-	// Expand each of the four sides of the rectangle just enough to include all
-	// points within the given distance of that side. (The rectangle may be
-	// expanded by a different amount in (u,v)-space on each side.)
-	maxU := math.Max(math.Abs(uv.X.Lo), math.Abs(uv.X.Hi))
-	maxV := math.Max(math.Abs(uv.Y.Lo), math.Abs(uv.Y.Hi))
-	sinDist := math.Sin(float64(distance))
-	return r2.Rect{
-		X: r1.Interval{expandEndpoint(uv.X.Lo, maxV, -sinDist),
-			expandEndpoint(uv.X.Hi, maxV, sinDist)},
-		Y: r1.Interval{expandEndpoint(uv.Y.Lo, maxU, -sinDist),
-			expandEndpoint(uv.Y.Hi, maxU, sinDist)}}
-}
-
-// MaxTile returns the largest cell with the same RangeMin such that
-// RangeMax < limit.RangeMin. It returns limit if no such cell exists.
-// This method can be used to generate a small set of CellIDs that covers
-// a given range (a tiling). This example shows how to generate a tiling
-// for a semi-open range of leaf cells [start, limit):
-//
-//   for id := start.MaxTile(limit); id != limit; id = id.Next().MaxTile(limit)) { ... }
-//
-// Note that in general the cells in the tiling will be of different sizes;
-// they gradually get larger (near the middle of the range) and then
-// gradually get smaller as limit is approached.
-func (ci CellID) MaxTile(limit CellID) CellID {
-	start := ci.RangeMin()
-	if start >= limit.RangeMin() {
-		return limit
-	}
-
-	if ci.RangeMax() >= limit {
-		// The cell is too large, shrink it. Note that when generating coverings
-		// of CellID ranges, this loop usually executes only once. Also because
-		// ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the
-		// time we reach a leaf cell.
-		for {
-			ci = ci.Children()[0]
-			if ci.RangeMax() < limit {
-				break
-			}
-		}
-		return ci
-	}
-
-	// The cell may be too small. Grow it if necessary. Note that generally
-	// this loop only iterates once.
-	for !ci.isFace() {
-		parent := ci.immediateParent()
-		if parent.RangeMin() != start || parent.RangeMax() >= limit {
-			break
-		}
-		ci = parent
-	}
-	return ci
-}
-
-// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell.
-// Note that although (si,ti) coordinates span the range [0,2**31] in general,
-// the cell center coordinates are always in the range [1,2**31-1] and
-// therefore can be represented using a signed 32-bit integer.
-func (ci CellID) centerFaceSiTi() (face, si, ti int) {
-	// First we compute the discrete (i,j) coordinates of a leaf cell contained
-	// within the given cell. Given that cells are represented by the Hilbert
-	// curve position corresponding at their center, it turns out that the cell
-	// returned by faceIJOrientation is always one of two leaf cells closest
-	// to the center of the cell (unless the given cell is a leaf cell itself,
-	// in which case there is only one possibility).
-	//
-	// Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin,
-	// jmin) be the coordinates of its lower left-hand corner, the leaf cell
-	// returned by faceIJOrientation is either (imin + s/2, jmin + s/2)
-	// (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want.
-	// We can distinguish these two cases by looking at the low bit of i or
-	// j. In the second case the low bit is one, unless s == 2 (i.e. the
-	// level just above leaf cells) in which case the low bit is zero.
-	//
-	// In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true
-	// if we are in the second case described above.
-	face, i, j, _ := ci.faceIJOrientation()
-	delta := 0
-	if ci.IsLeaf() {
-		delta = 1
-	} else if (int64(i)^(int64(ci)>>2))&1 == 1 {
-		delta = 2
-	}
-
-	// Note that (2 * {i,j} + delta) will never overflow a 32-bit integer.
-	return face, 2*i + delta, 2*j + delta
-}

+ 0 - 590
vendor/github.com/golang/geo/s2/cellunion.go

@@ -1,590 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"sort"
-
-	"github.com/golang/geo/s1"
-)
-
-// A CellUnion is a collection of CellIDs.
-//
-// It is normalized if it is sorted, and does not contain redundancy.
-// Specifically, it may not contain the same CellID twice, nor a CellID that
-// is contained by another, nor the four sibling CellIDs that are children of
-// a single higher level CellID.
-//
-// CellUnions are not required to be normalized, but certain operations will
-// return different results if they are not (e.g. Contains).
-type CellUnion []CellID
-
-// CellUnionFromRange creates a CellUnion that covers the half-open range
-// of leaf cells [begin, end). If begin == end the resulting union is empty.
-// This requires that begin and end are both leaves, and begin <= end.
-// To create a closed-ended range, pass in end.Next().
-func CellUnionFromRange(begin, end CellID) CellUnion {
-	// We repeatedly add the largest cell we can.
-	var cu CellUnion
-	for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
-		cu = append(cu, id)
-	}
-	// The output is normalized because the cells are added in order by the iteration.
-	return cu
-}
-
-// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions.
-func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion {
-	var cu CellUnion
-	for _, cellUnion := range cellUnions {
-		cu = append(cu, cellUnion...)
-	}
-	cu.Normalize()
-	return cu
-}
-
-// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions.
-func CellUnionFromIntersection(x, y CellUnion) CellUnion {
-	var cu CellUnion
-
-	// This is a fairly efficient calculation that uses binary search to skip
-	// over sections of both input vectors. It takes constant time if all the
-	// cells of x come before or after all the cells of y in CellID order.
-	var i, j int
-	for i < len(x) && j < len(y) {
-		iMin := x[i].RangeMin()
-		jMin := y[j].RangeMin()
-		if iMin > jMin {
-			// Either j.Contains(i) or the two cells are disjoint.
-			if x[i] <= y[j].RangeMax() {
-				cu = append(cu, x[i])
-				i++
-			} else {
-				// Advance j to the first cell possibly contained by x[i].
-				j = y.lowerBound(j+1, len(y), iMin)
-				// The previous cell y[j-1] may now contain x[i].
-				if x[i] <= y[j-1].RangeMax() {
-					j--
-				}
-			}
-		} else if jMin > iMin {
-			// Identical to the code above with i and j reversed.
-			if y[j] <= x[i].RangeMax() {
-				cu = append(cu, y[j])
-				j++
-			} else {
-				i = x.lowerBound(i+1, len(x), jMin)
-				if y[j] <= x[i-1].RangeMax() {
-					i--
-				}
-			}
-		} else {
-			// i and j have the same RangeMin(), so one contains the other.
-			if x[i] < y[j] {
-				cu = append(cu, x[i])
-				i++
-			} else {
-				cu = append(cu, y[j])
-				j++
-			}
-		}
-	}
-
-	// The output is generated in sorted order.
-	cu.Normalize()
-	return cu
-}
-
-// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection
-// of a CellUnion with the given CellID. This can be useful for splitting a
-// CellUnion into chunks.
-func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion {
-	var cu CellUnion
-	if x.ContainsCellID(id) {
-		cu = append(cu, id)
-		cu.Normalize()
-		return cu
-	}
-
-	idmax := id.RangeMax()
-	for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ {
-		cu = append(cu, x[i])
-	}
-
-	cu.Normalize()
-	return cu
-}
-
-// CellUnionFromDifference creates a CellUnion from the difference (x - y)
-// of the given CellUnions.
-func CellUnionFromDifference(x, y CellUnion) CellUnion {
-	// TODO(roberts): This is approximately O(N*log(N)), but could probably
-	// use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient.
-
-	var cu CellUnion
-	for _, xid := range x {
-		cu.cellUnionDifferenceInternal(xid, &y)
-	}
-
-	// The output is generated in sorted order, and there should not be any
-	// cells that can be merged (provided that both inputs were normalized).
-	return cu
-}
-
-// The C++ constructor methods FromNormalized and FromVerbatim are not necessary
-// since they don't call Normalize, and just set the CellIDs directly on the object,
-// so straight casting is sufficient in Go to replicate this behavior.
-
-// IsValid reports whether the cell union is valid, meaning that the CellIDs are
-// valid, non-overlapping, and sorted in increasing order.
-func (cu *CellUnion) IsValid() bool {
-	for i, cid := range *cu {
-		if !cid.IsValid() {
-			return false
-		}
-		if i == 0 {
-			continue
-		}
-		if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
-			return false
-		}
-	}
-	return true
-}
-
-// IsNormalized reports whether the cell union is normalized, meaning that it is
-// satisfies IsValid and that no four cells have a common parent.
-// Certain operations such as Contains will return a different
-// result if the cell union is not normalized.
-func (cu *CellUnion) IsNormalized() bool {
-	for i, cid := range *cu {
-		if !cid.IsValid() {
-			return false
-		}
-		if i == 0 {
-			continue
-		}
-		if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
-			return false
-		}
-		if i < 3 {
-			continue
-		}
-		if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) {
-			return false
-		}
-	}
-	return true
-}
-
-// Normalize normalizes the CellUnion.
-func (cu *CellUnion) Normalize() {
-	sortCellIDs(*cu)
-
-	output := make([]CellID, 0, len(*cu)) // the list of accepted cells
-	// Loop invariant: output is a sorted list of cells with no redundancy.
-	for _, ci := range *cu {
-		// The first two passes here either ignore this new candidate,
-		// or remove previously accepted cells that are covered by this candidate.
-
-		// Ignore this cell if it is contained by the previous one.
-		// We only need to check the last accepted cell. The ordering of the
-		// cells implies containment (but not the converse), and output has no redundancy,
-		// so if this candidate is not contained by the last accepted cell
-		// then it cannot be contained by any previously accepted cell.
-		if len(output) > 0 && output[len(output)-1].Contains(ci) {
-			continue
-		}
-
-		// Discard any previously accepted cells contained by this one.
-		// This could be any contiguous trailing subsequence, but it can't be
-		// a discontiguous subsequence because of the containment property of
-		// sorted S2 cells mentioned above.
-		j := len(output) - 1 // last index to keep
-		for j >= 0 {
-			if !ci.Contains(output[j]) {
-				break
-			}
-			j--
-		}
-		output = output[:j+1]
-
-		// See if the last three cells plus this one can be collapsed.
-		// We loop because collapsing three accepted cells and adding a higher level cell
-		// could cascade into previously accepted cells.
-		for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) {
-			// Replace four children by their parent cell.
-			output = output[:len(output)-3]
-			ci = ci.immediateParent() // checked !ci.isFace above
-		}
-		output = append(output, ci)
-	}
-	*cu = output
-}
-
-// IntersectsCellID reports whether this CellUnion intersects the given cell ID.
-func (cu *CellUnion) IntersectsCellID(id CellID) bool {
-	// Find index of array item that occurs directly after our probe cell:
-	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
-
-	if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() {
-		return true
-	}
-	return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
-}
-
-// ContainsCellID reports whether the CellUnion contains the given cell ID.
-// Containment is defined with respect to regions, e.g. a cell contains its 4 children.
-//
-// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups
-// of 4 child cells are *not* considered to contain their parent cell. To get
-// this behavior you must use one of the call Normalize() explicitly.
-func (cu *CellUnion) ContainsCellID(id CellID) bool {
-	// Find index of array item that occurs directly after our probe cell:
-	i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
-
-	if i != len(*cu) && (*cu)[i].RangeMin() <= id {
-		return true
-	}
-	return i != 0 && (*cu)[i-1].RangeMax() >= id
-}
-
-// Denormalize replaces this CellUnion with an expanded version of the
-// CellUnion where any cell whose level is less than minLevel or where
-// (level - minLevel) is not a multiple of levelMod is replaced by its
-// children, until either both of these conditions are satisfied or the
-// maximum level is reached.
-func (cu *CellUnion) Denormalize(minLevel, levelMod int) {
-	var denorm CellUnion
-	for _, id := range *cu {
-		level := id.Level()
-		newLevel := level
-		if newLevel < minLevel {
-			newLevel = minLevel
-		}
-		if levelMod > 1 {
-			newLevel += (maxLevel - (newLevel - minLevel)) % levelMod
-			if newLevel > maxLevel {
-				newLevel = maxLevel
-			}
-		}
-		if newLevel == level {
-			denorm = append(denorm, id)
-		} else {
-			end := id.ChildEndAtLevel(newLevel)
-			for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() {
-				denorm = append(denorm, ci)
-			}
-		}
-	}
-	*cu = denorm
-}
-
-// RectBound returns a Rect that bounds this entity.
-func (cu *CellUnion) RectBound() Rect {
-	bound := EmptyRect()
-	for _, c := range *cu {
-		bound = bound.Union(CellFromCellID(c).RectBound())
-	}
-	return bound
-}
-
-// CapBound returns a Cap that bounds this entity.
-func (cu *CellUnion) CapBound() Cap {
-	if len(*cu) == 0 {
-		return EmptyCap()
-	}
-
-	// Compute the approximate centroid of the region. This won't produce the
-	// bounding cap of minimal area, but it should be close enough.
-	var centroid Point
-
-	for _, ci := range *cu {
-		area := AvgAreaMetric.Value(ci.Level())
-		centroid = Point{centroid.Add(ci.Point().Mul(area))}
-	}
-
-	if zero := (Point{}); centroid == zero {
-		centroid = PointFromCoords(1, 0, 0)
-	} else {
-		centroid = Point{centroid.Normalize()}
-	}
-
-	// Use the centroid as the cap axis, and expand the cap angle so that it
-	// contains the bounding caps of all the individual cells.  Note that it is
-	// *not* sufficient to just bound all the cell vertices because the bounding
-	// cap may be concave (i.e. cover more than one hemisphere).
-	c := CapFromPoint(centroid)
-	for _, ci := range *cu {
-		c = c.AddCap(CellFromCellID(ci).CapBound())
-	}
-
-	return c
-}
-
-// ContainsCell reports whether this cell union contains the given cell.
-func (cu *CellUnion) ContainsCell(c Cell) bool {
-	return cu.ContainsCellID(c.id)
-}
-
-// IntersectsCell reports whether this cell union intersects the given cell.
-func (cu *CellUnion) IntersectsCell(c Cell) bool {
-	return cu.IntersectsCellID(c.id)
-}
-
-// ContainsPoint reports whether this cell union contains the given point.
-func (cu *CellUnion) ContainsPoint(p Point) bool {
-	return cu.ContainsCell(CellFromPoint(p))
-}
-
-// CellUnionBound computes a covering of the CellUnion.
-func (cu *CellUnion) CellUnionBound() []CellID {
-	return cu.CapBound().CellUnionBound()
-}
-
-// LeafCellsCovered reports the number of leaf cells covered by this cell union.
-// This will be no more than 6*2^60 for the whole sphere.
-func (cu *CellUnion) LeafCellsCovered() int64 {
-	var numLeaves int64
-	for _, c := range *cu {
-		numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1)
-	}
-	return numLeaves
-}
-
-// Returns true if the given four cells have a common parent.
-// This requires that the four CellIDs are distinct.
-func areSiblings(a, b, c, d CellID) bool {
-	// A necessary (but not sufficient) condition is that the XOR of the
-	// four cell IDs must be zero. This is also very fast to test.
-	if (a ^ b ^ c) != d {
-		return false
-	}
-
-	// Now we do a slightly more expensive but exact test. First, compute a
-	// mask that blocks out the two bits that encode the child position of
-	// "id" with respect to its parent, then check that the other three
-	// children all agree with "mask".
-	mask := d.lsb() << 1
-	mask = ^(mask + (mask << 1))
-	idMasked := (uint64(d) & mask)
-	return ((uint64(a)&mask) == idMasked &&
-		(uint64(b)&mask) == idMasked &&
-		(uint64(c)&mask) == idMasked &&
-		!d.isFace())
-}
-
-// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion.
-func (cu *CellUnion) Contains(o CellUnion) bool {
-	// TODO(roberts): Investigate alternatives such as divide-and-conquer
-	// or alternating-skip-search that may be significantly faster in both
-	// the average and worst case. This applies to Intersects as well.
-	for _, id := range o {
-		if !cu.ContainsCellID(id) {
-			return false
-		}
-	}
-
-	return true
-}
-
-// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
-func (cu *CellUnion) Intersects(o CellUnion) bool {
-	for _, c := range *cu {
-		if o.IntersectsCellID(c) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// lowerBound returns the index in this CellUnion to the first element whose value
-// is not considered to go before the given cell id. (i.e., either it is equivalent
-// or comes after the given id.) If there is no match, then end is returned.
-func (cu *CellUnion) lowerBound(begin, end int, id CellID) int {
-	for i := begin; i < end; i++ {
-		if (*cu)[i] >= id {
-			return i
-		}
-	}
-
-	return end
-}
-
-// cellUnionDifferenceInternal adds the difference between the CellID and the union to
-// the result CellUnion. If they intersect but the difference is non-empty, it divides
-// and conquers.
-func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) {
-	if !other.IntersectsCellID(id) {
-		(*cu) = append((*cu), id)
-		return
-	}
-
-	if !other.ContainsCellID(id) {
-		for _, child := range id.Children() {
-			cu.cellUnionDifferenceInternal(child, other)
-		}
-	}
-}
-
-// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel
-// around the unions boundary.
-//
-// For each cell c in the union, we add all cells at level
-// expandLevel that abut c. There are typically eight of those
-// (four edge-abutting and four sharing a vertex). However, if c is
-// finer than expandLevel, we add all cells abutting
-// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself,
-// as an expandLevel cell rarely abuts a smaller cell.
-//
-// Note that the size of the output is exponential in
-// expandLevel. For example, if expandLevel == 20 and the input
-// has a cell at level 10, there will be on the order of 4000
-// adjacent cells in the output. For most applications the
-// ExpandByRadius method below is easier to use.
-func (cu *CellUnion) ExpandAtLevel(level int) {
-	var output CellUnion
-	levelLsb := lsbForLevel(level)
-	for i := len(*cu) - 1; i >= 0; i-- {
-		id := (*cu)[i]
-		if id.lsb() < levelLsb {
-			id = id.Parent(level)
-			// Optimization: skip over any cells contained by this one. This is
-			// especially important when very small regions are being expanded.
-			for i > 0 && id.Contains((*cu)[i-1]) {
-				i--
-			}
-		}
-		output = append(output, id)
-		output = append(output, id.AllNeighbors(level)...)
-	}
-	sortCellIDs(output)
-
-	*cu = output
-	cu.Normalize()
-}
-
-// ExpandByRadius expands this CellUnion such that it contains all points whose
-// distance to the CellUnion is at most minRadius, but do not use cells that
-// are more than maxLevelDiff levels higher than the largest cell in the input.
-// The second parameter controls the tradeoff between accuracy and output size
-// when a large region is being expanded by a small amount (e.g. expanding Canada
-// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded
-// by approximately 1/16 the width of its largest cell. Note that in the worst case,
-// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times
-// larger than the number of cells in the input.
-func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) {
-	minLevel := maxLevel
-	for _, cid := range *cu {
-		minLevel = minInt(minLevel, cid.Level())
-	}
-
-	// Find the maximum level such that all cells are at least "minRadius" wide.
-	radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians())
-	if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) {
-		// The requested expansion is greater than the width of a face cell.
-		// The easiest way to handle this is to expand twice.
-		cu.ExpandAtLevel(0)
-	}
-	cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel))
-}
-
-// Equal reports whether the two CellUnions are equal.
-func (cu CellUnion) Equal(o CellUnion) bool {
-	if len(cu) != len(o) {
-		return false
-	}
-	for i := 0; i < len(cu); i++ {
-		if cu[i] != o[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// AverageArea returns the average area of this CellUnion.
-// This is accurate to within a factor of 1.7.
-func (cu *CellUnion) AverageArea() float64 {
-	return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered())
-}
-
-// ApproxArea returns the approximate area of this CellUnion. This method is accurate
-// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
-// at level 5 or higher within the union.
-func (cu *CellUnion) ApproxArea() float64 {
-	var area float64
-	for _, id := range *cu {
-		area += CellFromCellID(id).ApproxArea()
-	}
-	return area
-}
-
-// ExactArea returns the area of this CellUnion as accurately as possible.
-func (cu *CellUnion) ExactArea() float64 {
-	var area float64
-	for _, id := range *cu {
-		area += CellFromCellID(id).ExactArea()
-	}
-	return area
-}
-
-// Encode encodes the CellUnion.
-func (cu *CellUnion) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	cu.encode(e)
-	return e.err
-}
-
-func (cu *CellUnion) encode(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeInt64(int64(len(*cu)))
-	for _, ci := range *cu {
-		ci.encode(e)
-	}
-}
-
-// Decode decodes the CellUnion.
-func (cu *CellUnion) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	cu.decode(d)
-	return d.err
-}
-
-func (cu *CellUnion) decode(d *decoder) {
-	version := d.readInt8()
-	if d.err != nil {
-		return
-	}
-	if version != encodingVersion {
-		d.err = fmt.Errorf("only version %d is supported", encodingVersion)
-		return
-	}
-	n := d.readInt64()
-	if d.err != nil {
-		return
-	}
-	const maxCells = 1000000
-	if n > maxCells {
-		d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells)
-		return
-	}
-	*cu = make([]CellID, n)
-	for i := range *cu {
-		(*cu)[i].decode(d)
-	}
-}

+ 0 - 133
vendor/github.com/golang/geo/s2/centroids.go

@@ -1,133 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/r3"
-)
-
-// There are several notions of the "centroid" of a triangle. First, there
-// is the planar centroid, which is simply the centroid of the ordinary
-// (non-spherical) triangle defined by the three vertices. Second, there is
-// the surface centroid, which is defined as the intersection of the three
-// medians of the spherical triangle. It is possible to show that this
-// point is simply the planar centroid projected to the surface of the
-// sphere. Finally, there is the true centroid (mass centroid), which is
-// defined as the surface integral over the spherical triangle of (x,y,z)
-// divided by the triangle area. This is the point that the triangle would
-// rotate around if it was spinning in empty space.
-//
-// The best centroid for most purposes is the true centroid. Unlike the
-// planar and surface centroids, the true centroid behaves linearly as
-// regions are added or subtracted. That is, if you split a triangle into
-// pieces and compute the average of their centroids (weighted by triangle
-// area), the result equals the centroid of the original triangle. This is
-// not true of the other centroids.
-//
-// Also note that the surface centroid may be nowhere near the intuitive
-// "center" of a spherical triangle. For example, consider the triangle
-// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere).
-// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is
-// within a distance of 2*eps of the vertex B. Note that the median from A
-// (the segment connecting A to the midpoint of BC) passes through S, since
-// this is the shortest path connecting the two endpoints. On the other
-// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto
-// the surface is a much more reasonable interpretation of the "center" of
-// this triangle.
-//
-
-// TrueCentroid returns the true centroid of the spherical triangle ABC
-// multiplied by the signed area of spherical triangle ABC. The reasons for
-// multiplying by the signed area are (1) this is the quantity that needs to be
-// summed to compute the centroid of a union or difference of triangles, and
-// (2) it's actually easier to calculate this way. All points must have unit length.
-//
-// Note that the result of this function is defined to be Point(0, 0, 0) if
-// the triangle is degenerate.
-func TrueCentroid(a, b, c Point) Point {
-	// Use Distance to get accurate results for small triangles.
-	ra := float64(1)
-	if sa := float64(b.Distance(c)); sa != 0 {
-		ra = sa / math.Sin(sa)
-	}
-	rb := float64(1)
-	if sb := float64(c.Distance(a)); sb != 0 {
-		rb = sb / math.Sin(sb)
-	}
-	rc := float64(1)
-	if sc := float64(a.Distance(b)); sc != 0 {
-		rc = sc / math.Sin(sc)
-	}
-
-	// Now compute a point M such that:
-	//
-	//  [Ax Ay Az] [Mx]                       [ra]
-	//  [Bx By Bz] [My]  = 0.5 * det(A,B,C) * [rb]
-	//  [Cx Cy Cz] [Mz]                       [rc]
-	//
-	// To improve the numerical stability we subtract the first row (A) from the
-	// other two rows; this reduces the cancellation error when A, B, and C are
-	// very close together. Then we solve it using Cramer's rule.
-	//
-	// The result is the true centroid of the triangle multiplied by the
-	// triangle's area.
-	//
-	// This code still isn't as numerically stable as it could be.
-	// The biggest potential improvement is to compute B-A and C-A more
-	// accurately so that (B-A)x(C-A) is always inside triangle ABC.
-	x := r3.Vector{a.X, b.X - a.X, c.X - a.X}
-	y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y}
-	z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z}
-	r := r3.Vector{ra, rb - ra, rc - ra}
-
-	return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)}
-}
-
-// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB
-// multiplied by the length of the edge AB. As with triangles, the true centroid
-// of a collection of line segments may be computed simply by summing the result
-// of this method for each segment.
-//
-// Note that the planar centroid of a line segment is simply 0.5 * (a + b),
-// while the surface centroid is (a + b).Normalize(). However neither of
-// these values is appropriate for computing the centroid of a collection of
-// edges (such as a polyline).
-//
-// Also note that the result of this function is defined to be Point(0, 0, 0)
-// if the edge is degenerate.
-func EdgeTrueCentroid(a, b Point) Point {
-	// The centroid (multiplied by length) is a vector toward the midpoint
-	// of the edge, whose length is twice the sine of half the angle between
-	// the two vertices. Defining theta to be this angle, we have:
-	vDiff := a.Sub(b.Vector) // Length == 2*sin(theta)
-	vSum := a.Add(b.Vector)  // Length == 2*cos(theta)
-	sin2 := vDiff.Norm2()
-	cos2 := vSum.Norm2()
-	if cos2 == 0 {
-		return Point{} // Ignore antipodal edges.
-	}
-	return Point{vSum.Mul(math.Sqrt(sin2 / cos2))} // Length == 2*sin(theta)
-}
-
-// PlanarCentroid returns the centroid of the planar triangle ABC. This can be
-// normalized to unit length to obtain the "surface centroid" of the corresponding
-// spherical triangle, i.e. the intersection of the three medians. However, note
-// that for large spherical triangles the surface centroid may be nowhere near
-// the intuitive "center".
-func PlanarCentroid(a, b, c Point) Point {
-	return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)}
-}

+ 0 - 190
vendor/github.com/golang/geo/s2/contains_point_query.go

@@ -1,190 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// VertexModel defines whether shapes are considered to contain their vertices.
-// Note that these definitions differ from the ones used by BooleanOperation.
-//
-// Note that points other than vertices are never contained by polylines.
-// If you want need this behavior, use ClosestEdgeQuery's IsDistanceLess
-// with a suitable distance threshold instead.
-type VertexModel int
-
-const (
-	// VertexModelOpen means no shapes contain their vertices (not even
-	// points). Therefore Contains(Point) returns true if and only if the
-	// point is in the interior of some polygon.
-	VertexModelOpen VertexModel = iota
-
-	// VertexModelSemiOpen means that polygon point containment is defined
-	// such that if several polygons tile the region around a vertex, then
-	// exactly one of those polygons contains that vertex. Points and
-	// polylines still do not contain any vertices.
-	VertexModelSemiOpen
-
-	// VertexModelClosed means all shapes contain their vertices (including
-	// points and polylines).
-	VertexModelClosed
-)
-
-// ContainsPointQuery determines whether one or more shapes in a ShapeIndex
-// contain a given Point. The ShapeIndex may contain any number of points,
-// polylines, and/or polygons (possibly overlapping). Shape boundaries may be
-// modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are
-// considered to contain their vertices).
-//
-// This type is not safe for concurrent use.
-//
-// However, note that if you need to do a large number of point containment
-// tests, it is more efficient to re-use the query rather than creating a new
-// one each time.
-type ContainsPointQuery struct {
-	model VertexModel
-	index *ShapeIndex
-	iter  *ShapeIndexIterator
-}
-
-// NewContainsPointQuery creates a new instance of the ContainsPointQuery for the index
-// and given vertex model choice.
-func NewContainsPointQuery(index *ShapeIndex, model VertexModel) *ContainsPointQuery {
-	return &ContainsPointQuery{
-		index: index,
-		model: model,
-		iter:  index.Iterator(),
-	}
-}
-
-// Contains reports whether any shape in the queries index contains the point p
-// under the queries vertex model (Open, SemiOpen, or Closed).
-func (q *ContainsPointQuery) Contains(p Point) bool {
-	if !q.iter.LocatePoint(p) {
-		return false
-	}
-
-	cell := q.iter.IndexCell()
-	for _, clipped := range cell.shapes {
-		if q.shapeContains(clipped, q.iter.Center(), p) {
-			return true
-		}
-	}
-	return false
-}
-
-// shapeContains reports whether the clippedShape from the iterator's center position contains
-// the given point.
-func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Point) bool {
-	inside := clipped.containsCenter
-	numEdges := clipped.numEdges()
-	if numEdges <= 0 {
-		return inside
-	}
-
-	shape := q.index.Shape(clipped.shapeID)
-	if shape.Dimension() != 2 {
-		// Points and polylines can be ignored unless the vertex model is Closed.
-		if q.model != VertexModelClosed {
-			return false
-		}
-
-		// Otherwise, the point is contained if and only if it matches a vertex.
-		for _, edgeID := range clipped.edges {
-			edge := shape.Edge(edgeID)
-			if edge.V0 == p || edge.V1 == p {
-				return true
-			}
-		}
-		return false
-	}
-
-	// Test containment by drawing a line segment from the cell center to the
-	// given point and counting edge crossings.
-	crosser := NewEdgeCrosser(center, p)
-	for _, edgeID := range clipped.edges {
-		edge := shape.Edge(edgeID)
-		sign := crosser.CrossingSign(edge.V0, edge.V1)
-		if sign == DoNotCross {
-			continue
-		}
-		if sign == MaybeCross {
-			// For the Open and Closed models, check whether p is a vertex.
-			if q.model != VertexModelSemiOpen && (edge.V0 == p || edge.V1 == p) {
-				return (q.model == VertexModelClosed)
-			}
-			// C++ plays fast and loose with the int <-> bool conversions here.
-			if VertexCrossing(crosser.a, crosser.b, edge.V0, edge.V1) {
-				sign = Cross
-			} else {
-				sign = DoNotCross
-			}
-		}
-		inside = inside != (sign == Cross)
-	}
-
-	return inside
-}
-
-// ShapeContains reports whether the given shape contains the point under this
-// queries vertex model (Open, SemiOpen, or Closed).
-//
-// This requires the shape belongs to this queries index.
-func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool {
-	if !q.iter.LocatePoint(p) {
-		return false
-	}
-
-	clipped := q.iter.IndexCell().findByShapeID(q.index.idForShape(shape))
-	if clipped == nil {
-		return false
-	}
-	return q.shapeContains(clipped, q.iter.Center(), p)
-}
-
-// shapeVisitorFunc is a type of function that can be called against shaped in an index.
-type shapeVisitorFunc func(shape Shape) bool
-
-// visitContainingShapes visits all shapes in the given index that contain the
-// given point p, terminating early if the given visitor function returns false,
-// in which case visitContainingShapes returns false. Each shape is
-// visited at most once.
-func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool {
-	// This function returns false only if the algorithm terminates early
-	// because the visitor function returned false.
-	if !q.iter.LocatePoint(p) {
-		return true
-	}
-
-	cell := q.iter.IndexCell()
-	for _, clipped := range cell.shapes {
-		if q.shapeContains(clipped, q.iter.Center(), p) &&
-			!f(q.index.Shape(clipped.shapeID)) {
-			return false
-		}
-	}
-	return true
-}
-
-// ContainingShapes returns a slice of all shapes that contain the given point.
-func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape {
-	var shapes []Shape
-	q.visitContainingShapes(p, func(shape Shape) bool {
-		shapes = append(shapes, shape)
-		return true
-	})
-	return shapes
-}
-
-// TODO(roberts): Remaining methods from C++
-// type edgeVisitorFunc func(shape ShapeEdge) bool
-// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool

+ 0 - 63
vendor/github.com/golang/geo/s2/contains_vertex_query.go

@@ -1,63 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// ContainsVertexQuery is used to track the edges entering and leaving the
-// given vertex of a Polygon in order to be able to determine if the point is
-// contained by the Polygon.
-//
-// Point containment is defined according to the semi-open boundary model
-// which means that if several polygons tile the region around a vertex,
-// then exactly one of those polygons contains that vertex.
-type ContainsVertexQuery struct {
-	target  Point
-	edgeMap map[Point]int
-}
-
-// NewContainsVertexQuery returns a new query for the given vertex whose
-// containment will be determined.
-func NewContainsVertexQuery(target Point) *ContainsVertexQuery {
-	return &ContainsVertexQuery{
-		target:  target,
-		edgeMap: make(map[Point]int),
-	}
-}
-
-// AddEdge adds the edge between target and v with the given direction.
-// (+1 = outgoing, -1 = incoming, 0 = degenerate).
-func (q *ContainsVertexQuery) AddEdge(v Point, direction int) {
-	q.edgeMap[v] += direction
-}
-
-// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is
-// not contained, and 0 if the incident edges consisted of matched sibling pairs.
-func (q *ContainsVertexQuery) ContainsVertex() int {
-	// Find the unmatched edge that is immediately clockwise from Ortho(P).
-	referenceDir := Point{q.target.Ortho()}
-
-	bestPoint := referenceDir
-	bestDir := 0
-
-	for k, v := range q.edgeMap {
-		if v == 0 {
-			continue // This is a "matched" edge.
-		}
-		if OrderedCCW(referenceDir, bestPoint, k, q.target) {
-			bestPoint = k
-			bestDir = v
-		}
-	}
-	return bestDir
-}

+ 0 - 239
vendor/github.com/golang/geo/s2/convex_hull_query.go

@@ -1,239 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"sort"
-)
-
-// ConvexHullQuery builds the convex hull of any collection of points,
-// polylines, loops, and polygons. It returns a single convex loop.
-//
-// The convex hull is defined as the smallest convex region on the sphere that
-// contains all of your input geometry. Recall that a region is "convex" if
-// for every pair of points inside the region, the straight edge between them
-// is also inside the region. In our case, a "straight" edge is a geodesic,
-// i.e. the shortest path on the sphere between two points.
-//
-// Containment of input geometry is defined as follows:
-//
-//  - Each input loop and polygon is contained by the convex hull exactly
-//    (i.e., according to Polygon's Contains(Polygon)).
-//
-//  - Each input point is either contained by the convex hull or is a vertex
-//    of the convex hull. (Recall that S2Loops do not necessarily contain their
-//    vertices.)
-//
-//  - For each input polyline, the convex hull contains all of its vertices
-//    according to the rule for points above. (The definition of convexity
-//    then ensures that the convex hull also contains the polyline edges.)
-//
-// To use this type, call the various Add... methods to add your input geometry, and
-// then call ConvexHull. Note that ConvexHull does *not* reset the
-// state; you can continue adding geometry if desired and compute the convex
-// hull again. If you want to start from scratch, simply create a new
-// ConvexHullQuery value.
-//
-// This implement Andrew's monotone chain algorithm, which is a variant of the
-// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time
-// complexity is O(n log n), and the space required is O(n). In fact only the
-// call to "sort" takes O(n log n) time; the rest of the algorithm is linear.
-//
-// Demonstration of the algorithm and code:
-// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
-//
-// This type is not safe for concurrent use.
-type ConvexHullQuery struct {
-	bound  Rect
-	points []Point
-}
-
-// NewConvexHullQuery creates a new ConvexHullQuery.
-func NewConvexHullQuery() *ConvexHullQuery {
-	return &ConvexHullQuery{
-		bound: EmptyRect(),
-	}
-}
-
-// AddPoint adds the given point to the input geometry.
-func (q *ConvexHullQuery) AddPoint(p Point) {
-	q.bound = q.bound.AddPoint(LatLngFromPoint(p))
-	q.points = append(q.points, p)
-}
-
-// AddPolyline adds the given polyline to the input geometry.
-func (q *ConvexHullQuery) AddPolyline(p *Polyline) {
-	q.bound = q.bound.Union(p.RectBound())
-	q.points = append(q.points, (*p)...)
-}
-
-// AddLoop adds the given loop to the input geometry.
-func (q *ConvexHullQuery) AddLoop(l *Loop) {
-	q.bound = q.bound.Union(l.RectBound())
-	if l.isEmptyOrFull() {
-		return
-	}
-	q.points = append(q.points, l.vertices...)
-}
-
-// AddPolygon adds the given polygon to the input geometry.
-func (q *ConvexHullQuery) AddPolygon(p *Polygon) {
-	q.bound = q.bound.Union(p.RectBound())
-	for _, l := range p.loops {
-		// Only loops at depth 0 can contribute to the convex hull.
-		if l.depth == 0 {
-			q.AddLoop(l)
-		}
-	}
-}
-
-// CapBound returns a bounding cap for the input geometry provided.
-//
-// Note that this method does not clear the geometry; you can continue
-// adding to it and call this method again if desired.
-func (q *ConvexHullQuery) CapBound() Cap {
-	// We keep track of a rectangular bound rather than a spherical cap because
-	// it is easy to compute a tight bound for a union of rectangles, whereas it
-	// is quite difficult to compute a tight bound around a union of caps.
-	// Also, polygons and polylines implement CapBound() in terms of
-	// RectBound() for this same reason, so it is much better to keep track
-	// of a rectangular bound as we go along and convert it at the end.
-	//
-	// TODO(roberts): We could compute an optimal bound by implementing Welzl's
-	// algorithm. However we would still need to have special handling of loops
-	// and polygons, since if a loop spans more than 180 degrees in any
-	// direction (i.e., if it contains two antipodal points), then it is not
-	// enough just to bound its vertices. In this case the only convex bounding
-	// cap is FullCap(), and the only convex bounding loop is the full loop.
-	return q.bound.CapBound()
-}
-
-// ConvexHull returns a Loop representing the convex hull of the input geometry provided.
-//
-// If there is no geometry, this method returns an empty loop containing no
-// points.
-//
-// If the geometry spans more than half of the sphere, this method returns a
-// full loop containing the entire sphere.
-//
-// If the geometry contains 1 or 2 points, or a single edge, this method
-// returns a very small loop consisting of three vertices (which are a
-// superset of the input vertices).
-//
-// Note that this method does not clear the geometry; you can continue
-// adding to the query and call this method again.
-func (q *ConvexHullQuery) ConvexHull() *Loop {
-	c := q.CapBound()
-	if c.Height() >= 1 {
-		// The bounding cap is not convex. The current bounding cap
-		// implementation is not optimal, but nevertheless it is likely that the
-		// input geometry itself is not contained by any convex polygon. In any
-		// case, we need a convex bounding cap to proceed with the algorithm below
-		// (in order to construct a point "origin" that is definitely outside the
-		// convex hull).
-		return FullLoop()
-	}
-
-	// Remove duplicates. We need to do this before checking whether there are
-	// fewer than 3 points.
-	x := make(map[Point]bool)
-	r, w := 0, 0 // read/write indexes
-	for ; r < len(q.points); r++ {
-		if x[q.points[r]] {
-			continue
-		}
-		q.points[w] = q.points[r]
-		x[q.points[r]] = true
-		w++
-	}
-	q.points = q.points[:w]
-
-	// This code implements Andrew's monotone chain algorithm, which is a simple
-	// variant of the Graham scan. Rather than sorting by x-coordinate, instead
-	// we sort the points in CCW order around an origin O such that all points
-	// are guaranteed to be on one side of some geodesic through O. This
-	// ensures that as we scan through the points, each new point can only
-	// belong at the end of the chain (i.e., the chain is monotone in terms of
-	// the angle around O from the starting point).
-	origin := Point{c.Center().Ortho()}
-	sort.Slice(q.points, func(i, j int) bool {
-		return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise
-	})
-
-	// Special cases for fewer than 3 points.
-	switch len(q.points) {
-	case 0:
-		return EmptyLoop()
-	case 1:
-		return singlePointLoop(q.points[0])
-	case 2:
-		return singleEdgeLoop(q.points[0], q.points[1])
-	}
-
-	// Generate the lower and upper halves of the convex hull. Each half
-	// consists of the maximal subset of vertices such that the edge chain
-	// makes only left (CCW) turns.
-	lower := q.monotoneChain()
-
-	// reverse the points
-	for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 {
-		q.points[left], q.points[right] = q.points[right], q.points[left]
-	}
-	upper := q.monotoneChain()
-
-	// Remove the duplicate vertices and combine the chains.
-	lower = lower[:len(lower)-1]
-	upper = upper[:len(upper)-1]
-	lower = append(lower, upper...)
-
-	return LoopFromPoints(lower)
-}
-
-// monotoneChain iterates through the points, selecting the maximal subset of points
-// such that the edge chain makes only left (CCW) turns.
-func (q *ConvexHullQuery) monotoneChain() []Point {
-	var output []Point
-	for _, p := range q.points {
-		// Remove any points that would cause the chain to make a clockwise turn.
-		for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise {
-			output = output[:len(output)-1]
-		}
-		output = append(output, p)
-	}
-	return output
-}
-
-// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby
-// vertices. Note that ContainsPoint(p) may be false for the resulting loop.
-func singlePointLoop(p Point) *Loop {
-	const offset = 1e-15
-	d0 := p.Ortho()
-	d1 := p.Cross(d0)
-	vertices := []Point{
-		p,
-		{p.Add(d0.Mul(offset)).Normalize()},
-		{p.Add(d1.Mul(offset)).Normalize()},
-	}
-	return LoopFromPoints(vertices)
-}
-
-// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint.
-func singleEdgeLoop(a, b Point) *Loop {
-	vertices := []Point{a, b, {a.Add(b.Vector).Normalize()}}
-	loop := LoopFromPoints(vertices)
-	// The resulting loop may be clockwise, so invert it if necessary.
-	loop.Normalize()
-	return loop
-}

+ 0 - 409
vendor/github.com/golang/geo/s2/crossing_edge_query.go

@@ -1,409 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"sort"
-
-	"github.com/golang/geo/r2"
-)
-
-// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by
-// a given edge(s).
-//
-// Note that if you need to query many edges, it is more efficient to declare
-// a single CrossingEdgeQuery instance and reuse it.
-//
-// If you want to find *all* the pairs of crossing edges, it is more efficient to
-// use the not yet implemented VisitCrossings in shapeutil.
-type CrossingEdgeQuery struct {
-	index *ShapeIndex
-
-	// temporary values used while processing a query.
-	a, b r2.Point
-	iter *ShapeIndexIterator
-
-	// candidate cells generated when finding crossings.
-	cells []*ShapeIndexCell
-}
-
-// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index.
-func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery {
-	c := &CrossingEdgeQuery{
-		index: index,
-		iter:  index.Iterator(),
-	}
-	return c
-}
-
-// Crossings returns the set of edge of the shape S that intersect the given edge AB.
-// If the CrossingType is Interior, then only intersections at a point interior to both
-// edges are reported, while if it is CrossingTypeAll then edges that share a vertex
-// are also reported.
-func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int {
-	edges := c.candidates(a, b, shape)
-	if len(edges) == 0 {
-		return nil
-	}
-
-	crosser := NewEdgeCrosser(a, b)
-	out := 0
-	n := len(edges)
-
-	for in := 0; in < n; in++ {
-		b := shape.Edge(edges[in])
-		sign := crosser.CrossingSign(b.V0, b.V1)
-		if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross {
-			edges[out] = edges[in]
-			out++
-		}
-	}
-
-	if out < n {
-		edges = edges[0:out]
-	}
-	return edges
-}
-
-// EdgeMap stores a sorted set of edge ids for each shape.
-type EdgeMap map[Shape][]int
-
-// CrossingsEdgeMap returns the set of all edges in the index that intersect the given
-// edge AB. If crossType is CrossingTypeInterior, then only intersections at a
-// point interior to both edges are reported, while if it is CrossingTypeAll
-// then edges that share a vertex are also reported.
-//
-// The edges are returned as a mapping from shape to the edges of that shape
-// that intersect AB. Every returned shape has at least one crossing edge.
-func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap {
-	edgeMap := c.candidatesEdgeMap(a, b)
-	if len(edgeMap) == 0 {
-		return nil
-	}
-
-	crosser := NewEdgeCrosser(a, b)
-	for shape, edges := range edgeMap {
-		out := 0
-		n := len(edges)
-		for in := 0; in < n; in++ {
-			edge := shape.Edge(edges[in])
-			sign := crosser.CrossingSign(edge.V0, edge.V1)
-			if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) {
-				edgeMap[shape][out] = edges[in]
-				out++
-			}
-		}
-
-		if out == 0 {
-			delete(edgeMap, shape)
-		} else {
-			if out < n {
-				edgeMap[shape] = edgeMap[shape][0:out]
-			}
-		}
-	}
-	return edgeMap
-}
-
-// candidates returns a superset of the edges of the given shape that intersect
-// the edge AB.
-func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int {
-	var edges []int
-
-	// For small loops it is faster to use brute force. The threshold below was
-	// determined using benchmarks.
-	const maxBruteForceEdges = 27
-	maxEdges := shape.NumEdges()
-	if maxEdges <= maxBruteForceEdges {
-		edges = make([]int, maxEdges)
-		for i := 0; i < maxEdges; i++ {
-			edges[i] = i
-		}
-		return edges
-	}
-
-	// Compute the set of index cells intersected by the query edge.
-	c.getCellsForEdge(a, b)
-	if len(c.cells) == 0 {
-		return nil
-	}
-
-	// Gather all the edges that intersect those cells and sort them.
-	// TODO(roberts): Shapes don't track their ID, so we need to range over
-	// the index to find the ID manually.
-	var shapeID int32
-	for k, v := range c.index.shapes {
-		if v == shape {
-			shapeID = k
-		}
-	}
-
-	for _, cell := range c.cells {
-		if cell == nil {
-			continue
-		}
-		clipped := cell.findByShapeID(shapeID)
-		if clipped == nil {
-			continue
-		}
-		edges = append(edges, clipped.edges...)
-	}
-
-	if len(c.cells) > 1 {
-		edges = uniqueInts(edges)
-	}
-
-	return edges
-}
-
-// uniqueInts returns the sorted uniqued values from the given input.
-func uniqueInts(in []int) []int {
-	var edges []int
-	m := make(map[int]bool)
-	for _, i := range in {
-		if m[i] {
-			continue
-		}
-		m[i] = true
-		edges = append(edges, i)
-	}
-	sort.Ints(edges)
-	return edges
-}
-
-// candidatesEdgeMap returns a map from shapes to the superse of edges for that
-// shape that intersect the edge AB.
-//
-// CAVEAT: This method may return shapes that have an empty set of candidate edges.
-// However the return value is non-empty only if at least one shape has a candidate edge.
-func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
-	edgeMap := make(EdgeMap)
-
-	// If there are only a few edges then it's faster to use brute force. We
-	// only bother with this optimization when there is a single shape.
-	if len(c.index.shapes) == 1 {
-		// Typically this method is called many times, so it is worth checking
-		// whether the edge map is empty or already consists of a single entry for
-		// this shape, and skip clearing edge map in that case.
-		shape := c.index.Shape(0)
-
-		// Note that we leave the edge map non-empty even if there are no candidates
-		// (i.e., there is a single entry with an empty set of edges).
-		edgeMap[shape] = c.candidates(a, b, shape)
-		return edgeMap
-	}
-
-	// Compute the set of index cells intersected by the query edge.
-	c.getCellsForEdge(a, b)
-	if len(c.cells) == 0 {
-		return edgeMap
-	}
-
-	// Gather all the edges that intersect those cells and sort them.
-	for _, cell := range c.cells {
-		for _, clipped := range cell.shapes {
-			s := c.index.Shape(clipped.shapeID)
-			for j := 0; j < clipped.numEdges(); j++ {
-				edgeMap[s] = append(edgeMap[s], clipped.edges[j])
-			}
-		}
-	}
-
-	if len(c.cells) > 1 {
-		for s, edges := range edgeMap {
-			edgeMap[s] = uniqueInts(edges)
-		}
-	}
-
-	return edgeMap
-}
-
-// getCells returns the set of ShapeIndexCells that might contain edges intersecting
-// the edge AB in the given cell root. This method is used primarily by loop and shapeutil.
-func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
-	aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
-	if ok {
-		c.a = aUV
-		c.b = bUV
-		edgeBound := r2.RectFromPoints(c.a, c.b)
-		if root.Bound().Intersects(edgeBound) {
-			c.computeCellsIntersected(root, edgeBound)
-		}
-	}
-
-	if len(c.cells) == 0 {
-		return nil
-	}
-
-	return c.cells
-}
-
-// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB.
-func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) {
-	c.cells = nil
-
-	segments := FaceSegments(a, b)
-	for _, segment := range segments {
-		c.a = segment.a
-		c.b = segment.b
-
-		// Optimization: rather than always starting the recursive subdivision at
-		// the top level face cell, instead we start at the smallest S2CellId that
-		// contains the edge (the edge root cell). This typically lets us skip
-		// quite a few levels of recursion since most edges are short.
-		edgeBound := r2.RectFromPoints(c.a, c.b)
-		pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0)
-		edgeRoot := pcell.ShrinkToFit(edgeBound)
-
-		// Now we need to determine how the edge root cell is related to the cells
-		// in the spatial index (cellMap). There are three cases:
-		//
-		//  1. edgeRoot is an index cell or is contained within an index cell.
-		//     In this case we only need to look at the contents of that cell.
-		//  2. edgeRoot is subdivided into one or more index cells. In this case
-		//     we recursively subdivide to find the cells intersected by AB.
-		//  3. edgeRoot does not intersect any index cells. In this case there
-		//     is nothing to do.
-		relation := c.iter.LocateCellID(edgeRoot)
-		if relation == Indexed {
-			// edgeRoot is an index cell or is contained by an index cell (case 1).
-			c.cells = append(c.cells, c.iter.IndexCell())
-		} else if relation == Subdivided {
-			// edgeRoot is subdivided into one or more index cells (case 2). We
-			// find the cells intersected by AB using recursive subdivision.
-			if !edgeRoot.isFace() {
-				pcell = PaddedCellFromCellID(edgeRoot, 0)
-			}
-			c.computeCellsIntersected(pcell, edgeBound)
-		}
-	}
-}
-
-// computeCellsIntersected computes the index cells intersected by the current
-// edge that are descendants of pcell and adds them to this queries set of cells.
-func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) {
-
-	c.iter.seek(pcell.id.RangeMin())
-	if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() {
-		// The index does not contain pcell or any of its descendants.
-		return
-	}
-	if c.iter.CellID() == pcell.id {
-		// The index contains this cell exactly.
-		c.cells = append(c.cells, c.iter.IndexCell())
-		return
-	}
-
-	// Otherwise, split the edge among the four children of pcell.
-	center := pcell.Middle().Lo()
-
-	if edgeBound.X.Hi < center.X {
-		// Edge is entirely contained in the two left children.
-		c.clipVAxis(edgeBound, center.Y, 0, pcell)
-		return
-	} else if edgeBound.X.Lo >= center.X {
-		// Edge is entirely contained in the two right children.
-		c.clipVAxis(edgeBound, center.Y, 1, pcell)
-		return
-	}
-
-	childBounds := c.splitUBound(edgeBound, center.X)
-	if edgeBound.Y.Hi < center.Y {
-		// Edge is entirely contained in the two lower children.
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0])
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1])
-	} else if edgeBound.Y.Lo >= center.Y {
-		// Edge is entirely contained in the two upper children.
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0])
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1])
-	} else {
-		// The edge bound spans all four children. The edge itself intersects
-		// at most three children (since no padding is being used).
-		c.clipVAxis(childBounds[0], center.Y, 0, pcell)
-		c.clipVAxis(childBounds[1], center.Y, 1, pcell)
-	}
-}
-
-// clipVAxis computes the intersected cells recursively for a given padded cell.
-// Given either the left (i=0) or right (i=1) side of a padded cell pcell,
-// determine whether the current edge intersects the lower child, upper child,
-// or both children, and call c.computeCellsIntersected recursively on those children.
-// The center is the v-coordinate at the center of pcell.
-func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) {
-	if edgeBound.Y.Hi < center {
-		// Edge is entirely contained in the lower child.
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound)
-	} else if edgeBound.Y.Lo >= center {
-		// Edge is entirely contained in the upper child.
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound)
-	} else {
-		// The edge intersects both children.
-		childBounds := c.splitVBound(edgeBound, center)
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0])
-		c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1])
-	}
-}
-
-// splitUBound returns the bound for two children as a result of spliting the
-// current edge at the given value U.
-func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect {
-	v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y))
-	// diag indicates which diagonal of the bounding box is spanned by AB:
-	// it is 0 if AB has positive slope, and 1 if AB has negative slope.
-	var diag int
-	if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
-		diag = 1
-	}
-	return splitBound(edgeBound, 0, diag, u, v)
-}
-
-// splitVBound returns the bound for two children as a result of spliting the
-// current edge into two child edges at the given value V.
-func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect {
-	u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X))
-	var diag int
-	if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
-		diag = 1
-	}
-	return splitBound(edgeBound, diag, 0, u, v)
-}
-
-// splitBound returns the bounds for the two childrenn as a result of spliting
-// the current edge into two child edges at the given point (u,v). uEnd and vEnd
-// indicate which bound endpoints of the first child will be updated.
-func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect {
-	var childBounds = [2]r2.Rect{
-		edgeBound,
-		edgeBound,
-	}
-
-	if uEnd == 1 {
-		childBounds[0].X.Lo = u
-		childBounds[1].X.Hi = u
-	} else {
-		childBounds[0].X.Hi = u
-		childBounds[1].X.Lo = u
-	}
-
-	if vEnd == 1 {
-		childBounds[0].Y.Lo = v
-		childBounds[1].Y.Hi = v
-	} else {
-		childBounds[0].Y.Hi = v
-		childBounds[1].Y.Lo = v
-	}
-
-	return childBounds
-}

+ 0 - 149
vendor/github.com/golang/geo/s2/distance_target.go

@@ -1,149 +0,0 @@
-// Copyright 2019 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"github.com/golang/geo/s1"
-)
-
-// The distance interface represents a set of common methods used by algorithms
-// that compute distances between various S2 types.
-type distance interface {
-	// chordAngle returns this type as a ChordAngle.
-	chordAngle() s1.ChordAngle
-
-	// fromChordAngle is used to type convert a ChordAngle to this type.
-	// This is to work around needing to be clever in parts of the code
-	// where a distanceTarget interface method expects distances, but the
-	// user only supplies a ChordAngle, and we need to dynamically cast it
-	// to an appropriate distance interface types.
-	fromChordAngle(o s1.ChordAngle) distance
-
-	// zero returns a zero distance.
-	zero() distance
-	// negative returns a value smaller than any valid value.
-	negative() distance
-	// infinity returns a value larger than any valid value.
-	infinity() distance
-
-	// less is similar to the Less method in Sort. To get minimum values,
-	// this would be a less than type operation. For maximum, this would
-	// be a greater than type operation.
-	less(other distance) bool
-
-	// sub subtracts the other value from this one and returns the new value.
-	// This is done as a method and not simple mathematical operation to
-	// allow closest and furthest to implement this in opposite ways.
-	sub(other distance) distance
-
-	// chordAngleBound reports the upper bound on a ChordAngle corresponding
-	// to this distance. For example, if distance measures WGS84 ellipsoid
-	// distance then the corresponding angle needs to be 0.56% larger.
-	chordAngleBound() s1.ChordAngle
-
-	// updateDistance may update the value this distance represents
-	// based on the given input. The updated value and a boolean reporting
-	// if the value was changed are returned.
-	updateDistance(other distance) (distance, bool)
-}
-
-// distanceTarget is an interface that represents a geometric type to which distances
-// are measured.
-//
-// For example, there are implementations that measure distances to a Point,
-// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry
-// stored in ShapeIndex.
-//
-// The distanceTarget types are provided for the benefit of types that measure
-// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery,
-// ClosestPointQuery, and ClosestCellQuery, etc.
-type distanceTarget interface {
-	// capBound returns a Cap that bounds the set of points whose distance to the
-	// target is distance.zero().
-	capBound() Cap
-
-	// updateDistanceToPoint updates the distance if the distance to
-	// the point P is within than the given dist.
-	// The boolean reports if the value was updated.
-	updateDistanceToPoint(p Point, dist distance) (distance, bool)
-
-	// updateDistanceToEdge updates the distance if the distance to
-	// the edge E is within than the given dist.
-	// The boolean reports if the value was updated.
-	updateDistanceToEdge(e Edge, dist distance) (distance, bool)
-
-	// updateDistanceToCell updates the distance if the distance to the cell C
-	// (including its interior) is within than the given dist.
-	// The boolean reports if the value was updated.
-	updateDistanceToCell(c Cell, dist distance) (distance, bool)
-
-	// setMaxError potentially updates the value of MaxError, and reports if
-	// the specific type supports altering it. Whenever one of the
-	// updateDistanceTo... methods above returns true, the returned distance
-	// is allowed to be up to maxError larger than the true minimum distance.
-	// In other words, it gives this target object permission to terminate its
-	// distance calculation as soon as it has determined that (1) the minimum
-	// distance is less than minDist and (2) the best possible further
-	// improvement is less than maxError.
-	//
-	// If the target takes advantage of maxError to optimize its distance
-	// calculation, this method must return true. (Most target types will
-	// default to return false.)
-	setMaxError(maxErr s1.ChordAngle) bool
-
-	// maxBruteForceIndexSize reports the maximum number of indexed objects for
-	// which it is faster to compute the distance by brute force (e.g., by testing
-	// every edge) rather than by using an index.
-	//
-	// The following method is provided as a convenience for types that compute
-	// distances to a collection of indexed geometry, such as ClosestEdgeQuery
-	// and ClosestPointQuery.
-	//
-	// Types that do not support this should return a -1.
-	maxBruteForceIndexSize() int
-
-	// distance returns an instance of the underlying distance type this
-	// target uses. This is to work around the use of Templates in the C++.
-	distance() distance
-
-	// visitContainingShapes finds all polygons in the given index that
-	// completely contain a connected component of the target geometry. (For
-	// example, if the target consists of 10 points, this method finds
-	// polygons that contain any of those 10 points.) For each such polygon,
-	// the visit function is called with the Shape of the polygon along with
-	// a point of the target geometry that is contained by that polygon.
-	//
-	// Optionally, any polygon that intersects the target geometry may also be
-	// returned.  In other words, this method returns all polygons that
-	// contain any connected component of the target, along with an arbitrary
-	// subset of the polygons that intersect the target.
-	//
-	// For example, suppose that the index contains two abutting polygons
-	// A and B. If the target consists of two points "a" contained by A and
-	// "b" contained by B, then both A and B are returned. But if the target
-	// consists of the edge "ab", then any subset of {A, B} could be returned
-	// (because both polygons intersect the target but neither one contains
-	// the edge "ab").
-	//
-	// If the visit function returns false, this method terminates early and
-	// returns false as well. Otherwise returns true.
-	//
-	// NOTE(roberts): This method exists only for the purpose of implementing
-	// edgeQuery IncludeInteriors efficiently.
-	visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool
-}
-
-// shapePointVisitorFunc defines a type of function the visitContainingShapes can call.
-type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool

+ 0 - 29
vendor/github.com/golang/geo/s2/doc.go

@@ -1,29 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package s2 is a library for working with geometry in S² (spherical geometry).
-
-Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹),
-r2 (operates on ℝ²) and r3 (operates on ℝ³).
-
-This package provides types and functions for the S2 cell hierarchy and coordinate systems.
-The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²)
-into ``cells''; it is highly efficient, scales from continental size to under 1 cm²
-and preserves spatial locality (nearby cells have close IDs).
-
-More information including an in-depth introduction to S2 can be found on the
-S2 website https://s2geometry.io/
-*/
-package s2

+ 0 - 672
vendor/github.com/golang/geo/s2/edge_clipping.go

@@ -1,672 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// This file contains a collection of methods for:
-//
-//   (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube
-//       (see s2stuv), and
-//
-//   (2) Robustly clipping 2D edges against 2D rectangles.
-//
-// These functions can be used to efficiently find the set of CellIDs that
-// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery).
-
-import (
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/r3"
-)
-
-const (
-	// edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
-	// compared to the exact result, assuming that the points A and B are in
-	// the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
-	edgeClipErrorUVCoord = 2.25 * dblEpsilon
-
-	// edgeClipErrorUVDist is the maximum distance from a clipped point to
-	// the corresponding exact result. It is equal to the error in a single
-	// coordinate because at most one coordinate is subject to error.
-	edgeClipErrorUVDist = 2.25 * dblEpsilon
-
-	// faceClipErrorRadians is the maximum angle between a returned vertex
-	// and the nearest point on the exact edge AB. It is equal to the
-	// maximum directional error in PointCross, plus the error when
-	// projecting points onto a cube face.
-	faceClipErrorRadians = 3 * dblEpsilon
-
-	// faceClipErrorDist is the same angle expressed as a maximum distance
-	// in (u,v)-space. In other words, a returned vertex is at most this far
-	// from the exact edge AB projected into (u,v)-space.
-	faceClipErrorUVDist = 9 * dblEpsilon
-
-	// faceClipErrorUVCoord is the maximum angle between a returned vertex
-	// and the nearest point on the exact edge AB expressed as the maximum error
-	// in an individual u- or v-coordinate. In other words, for each
-	// returned vertex there is a point on the exact edge AB whose u- and
-	// v-coordinates differ from the vertex by at most this amount.
-	faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
-
-	// intersectsRectErrorUVDist is the maximum error when computing if a point
-	// intersects with a given Rect. If some point of AB is inside the
-	// rectangle by at least this distance, the result is guaranteed to be true;
-	// if all points of AB are outside the rectangle by at least this distance,
-	// the result is guaranteed to be false. This bound assumes that rect is
-	// a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
-	// (e.g., by 1e-10 or less).
-	intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
-)
-
-// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
-// intersects the given face, or false if the edge AB does not intersect.
-// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
-// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
-// the results may differ from those produced by FaceSegments.
-func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
-	return ClipToPaddedFace(a, b, face, 0.0)
-}
-
-// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
-// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
-// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
-// Padding must be non-negative.
-func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
-	// Fast path: both endpoints are on the given face.
-	if face(a.Vector) == f && face(b.Vector) == f {
-		au, av := validFaceXYZToUV(f, a.Vector)
-		bu, bv := validFaceXYZToUV(f, b.Vector)
-		return r2.Point{au, av}, r2.Point{bu, bv}, true
-	}
-
-	// Convert everything into the (u,v,w) coordinates of the given face. Note
-	// that the cross product *must* be computed in the original (x,y,z)
-	// coordinate system because PointCross (unlike the mathematical cross
-	// product) can produce different results in different coordinate systems
-	// when one argument is a linear multiple of the other, due to the use of
-	// symbolic perturbations.
-	normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
-	aUVW := pointUVW(faceXYZtoUVW(f, a))
-	bUVW := pointUVW(faceXYZtoUVW(f, b))
-
-	// Padding is handled by scaling the u- and v-components of the normal.
-	// Letting R=1+padding, this means that when we compute the dot product of
-	// the normal with a cube face vertex (such as (-1,-1,1)), we will actually
-	// compute the dot product with the scaled vertex (-R,-R,1). This allows
-	// methods such as intersectsFace, exitAxis, etc, to handle padding
-	// with no further modifications.
-	scaleUV := 1 + padding
-	scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
-	if !scaledN.intersectsFace() {
-		return aUV, bUV, false
-	}
-
-	// TODO(roberts): This is a workaround for extremely small vectors where some
-	// loss of precision can occur in Normalize causing underflow. When PointCross
-	// is updated to work around this, this can be removed.
-	if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
-		normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
-	}
-
-	normUVW = pointUVW{normUVW.Normalize()}
-
-	aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
-	bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
-
-	// As described in clipDestination, if the sum of the scores from clipping the two
-	// endpoints is 3 or more, then the segment does not intersect this face.
-	aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
-	bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
-
-	return aUV, bUV, aScore+bScore < 3
-}
-
-// ClipEdge returns the portion of the edge defined by AB that is contained by the
-// given rectangle. If there is no intersection, false is returned and aClip and bClip
-// are undefined.
-func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
-	// Compute the bounding rectangle of AB, clip it, and then extract the new
-	// endpoints from the clipped bound.
-	bound := r2.RectFromPoints(a, b)
-	if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
-		return aClip, bClip, false
-	}
-	ai := 0
-	if a.X > b.X {
-		ai = 1
-	}
-	aj := 0
-	if a.Y > b.Y {
-		aj = 1
-	}
-
-	return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
-}
-
-// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges)
-// all compare a sum (u + v) to a third value w. They are implemented in such a
-// way that they produce an exact result even though all calculations are done
-// with ordinary floating-point operations. Here are the principles on which these
-// functions are based:
-//
-// A. If u + v < w in floating-point, then u + v < w in exact arithmetic.
-//
-// B. If u + v < w in exact arithmetic, then at least one of the following
-//    expressions is true in floating-point:
-//       u + v < w
-//       u < w - v
-//       v < w - u
-//
-// Proof: By rearranging terms and substituting ">" for "<", we can assume
-// that all values are non-negative.  Now clearly "w" is not the smallest
-// value, so assume WLOG that "u" is the smallest.  We want to show that
-// u < w - v in floating-point.  If v >= w/2, the calculation of w - v is
-// exact since the result is smaller in magnitude than either input value,
-// so the result holds.  Otherwise we have u <= v < w/2 and w - v >= w/2
-// (even in floating point), so the result also holds.
-
-// sumEqual reports whether u + v == w exactly.
-func sumEqual(u, v, w float64) bool {
-	return (u+v == w) && (u == w-v) && (v == w-u)
-}
-
-// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
-type pointUVW Point
-
-// intersectsFace reports whether a given directed line L intersects the cube face F.
-// The line L is defined by its normal N in the (u,v,w) coordinates of F.
-func (p pointUVW) intersectsFace() bool {
-	// L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
-	// products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
-	// and (-1,1,1) do not all have the same sign. This is true exactly when
-	// |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
-	u := math.Abs(p.X)
-	v := math.Abs(p.Y)
-	w := math.Abs(p.Z)
-
-	// We only need to consider the cases where u or v is the smallest value,
-	// since if w is the smallest then both expressions below will have a
-	// positive LHS and a negative RHS.
-	return (v >= w-u) && (u >= w-v)
-}
-
-// intersectsOppositeEdges reports whether a directed line L intersects two
-// opposite edges of a cube face F. This includs the case where L passes
-// exactly through a corner vertex of F. The directed line L is defined
-// by its normal N in the (u,v,w) coordinates of F.
-func (p pointUVW) intersectsOppositeEdges() bool {
-	// The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
-	// and only exactly two of the corner vertices lie on each side of L. This
-	// is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
-	// expression exactly.
-	u := math.Abs(p.X)
-	v := math.Abs(p.Y)
-	w := math.Abs(p.Z)
-
-	// If w is the smallest, the following line returns an exact result.
-	if math.Abs(u-v) != w {
-		return math.Abs(u-v) >= w
-	}
-
-	// Otherwise u - v = w exactly, or w is not the smallest value. In either
-	// case the following returns the correct result.
-	if u >= v {
-		return u-w >= v
-	}
-	return v-w >= u
-}
-
-// axis represents the possible results of exitAxis.
-type axis int
-
-const (
-	axisU axis = iota
-	axisV
-)
-
-// exitAxis reports which axis the directed line L exits the cube face F on.
-// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
-// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
-// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
-// through a corner vertex of the cube face.
-func (p pointUVW) exitAxis() axis {
-	if p.intersectsOppositeEdges() {
-		// The line passes through through opposite edges of the face.
-		// It exits through the v=+1 or v=-1 edge if the u-component of N has a
-		// larger absolute magnitude than the v-component.
-		if math.Abs(p.X) >= math.Abs(p.Y) {
-			return axisV
-		}
-		return axisU
-	}
-
-	// The line passes through through two adjacent edges of the face.
-	// It exits the v=+1 or v=-1 edge if an even number of the components of N
-	// are negative. We test this using signbit() rather than multiplication
-	// to avoid the possibility of underflow.
-	var x, y, z int
-	if math.Signbit(p.X) {
-		x = 1
-	}
-	if math.Signbit(p.Y) {
-		y = 1
-	}
-	if math.Signbit(p.Z) {
-		z = 1
-	}
-
-	if x^y^z == 0 {
-		return axisV
-	}
-	return axisU
-}
-
-// exitPoint returns the UV coordinates of the point where a directed line L (represented
-// by the CCW normal of this point), exits the cube face this point is derived from along
-// the given axis.
-func (p pointUVW) exitPoint(a axis) r2.Point {
-	if a == axisU {
-		u := -1.0
-		if p.Y > 0 {
-			u = 1.0
-		}
-		return r2.Point{u, (-u*p.X - p.Z) / p.Y}
-	}
-
-	v := -1.0
-	if p.X < 0 {
-		v = 1.0
-	}
-	return r2.Point{(-v*p.Y - p.Z) / p.X, v}
-}
-
-// clipDestination returns a score which is used to indicate if the clipped edge AB
-// on the given face intersects the face at all. This function returns the score for
-// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
-// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
-//
-// First, it clips the line segment AB to find the clipped destination B' on a given
-// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
-// coordinates of that face.) Second, it partially computes whether the segment AB
-// intersects this face at all. The actual condition is fairly complicated, but it
-// turns out that it can be expressed as a "score" that can be computed independently
-// when clipping the two endpoints A and B.
-func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
-	var uv r2.Point
-
-	// Optimization: if B is within the safe region of the face, use it.
-	maxSafeUVCoord := 1 - faceClipErrorUVCoord
-	if b.Z > 0 {
-		uv = r2.Point{b.X / b.Z, b.Y / b.Z}
-		if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
-			return uv, 0
-		}
-	}
-
-	// Otherwise find the point B' where the line AB exits the face.
-	uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
-
-	p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
-
-	// Determine if the exit point B' is contained within the segment. We do this
-	// by computing the dot products with two inward-facing tangent vectors at A
-	// and B. If either dot product is negative, we say that B' is on the "wrong
-	// side" of that point. As the point B' moves around the great circle AB past
-	// the segment endpoint B, it is initially on the wrong side of B only; as it
-	// moves further it is on the wrong side of both endpoints; and then it is on
-	// the wrong side of A only. If the exit point B' is on the wrong side of
-	// either endpoint, we can't use it; instead the segment is clipped at the
-	// original endpoint B.
-	//
-	// We reject the segment if the sum of the scores of the two endpoints is 3
-	// or more. Here is what that rule encodes:
-	//  - If B' is on the wrong side of A, then the other clipped endpoint A'
-	//    must be in the interior of AB (otherwise AB' would go the wrong way
-	//    around the circle). There is a similar rule for A'.
-	//  - If B' is on the wrong side of either endpoint (and therefore we must
-	//    use the original endpoint B instead), then it must be possible to
-	//    project B onto this face (i.e., its w-coordinate must be positive).
-	//    This rule is only necessary to handle certain zero-length edges (A=B).
-	score := 0
-	if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
-		score = 2 // B' is on wrong side of A.
-	} else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
-		score = 1 // B' is on wrong side of B.
-	}
-
-	if score > 0 { // B' is not in the interior of AB.
-		if b.Z <= 0 {
-			score = 3 // B cannot be projected onto this face.
-		} else {
-			uv = r2.Point{b.X / b.Z, b.Y / b.Z}
-		}
-	}
-
-	return uv, score
-}
-
-// updateEndpoint returns the interval with the specified endpoint updated to
-// the given value. If the value lies beyond the opposite endpoint, nothing is
-// changed and false is returned.
-func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
-	if !highEndpoint {
-		if bound.Hi < value {
-			return bound, false
-		}
-		if bound.Lo < value {
-			bound.Lo = value
-		}
-		return bound, true
-	}
-
-	if bound.Lo > value {
-		return bound, false
-	}
-	if bound.Hi > value {
-		bound.Hi = value
-	}
-	return bound, true
-}
-
-// clipBoundAxis returns the clipped versions of the bounding intervals for the given
-// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
-// given clip interval. negSlope is a precomputed helper variable that indicates which
-// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
-// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
-// false is returned.
-func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
-	negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
-
-	if bound0.Lo < clip.Lo {
-		// If the upper bound is below the clips lower bound, there is nothing to do.
-		if bound0.Hi < clip.Lo {
-			return bound0, bound1, false
-		}
-		// narrow the intervals lower bound to the clip bound.
-		bound0.Lo = clip.Lo
-		if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated {
-			return bound0, bound1, false
-		}
-	}
-
-	if bound0.Hi > clip.Hi {
-		// If the lower bound is above the clips upper bound, there is nothing to do.
-		if bound0.Lo > clip.Hi {
-			return bound0, bound1, false
-		}
-		// narrow the intervals upper bound to the clip bound.
-		bound0.Hi = clip.Hi
-		if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated {
-			return bound0, bound1, false
-		}
-	}
-	return bound0, bound1, true
-}
-
-// edgeIntersectsRect reports whether the edge defined by AB intersects the
-// given closed rectangle to within the error bound.
-func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
-	// First check whether the bounds of a Rect around AB intersects the given rect.
-	if !r.Intersects(r2.RectFromPoints(a, b)) {
-		return false
-	}
-
-	// Otherwise AB intersects the rect if and only if all four vertices of rect
-	// do not lie on the same side of the extended line AB. We test this by finding
-	// the two vertices of rect with minimum and maximum projections onto the normal
-	// of AB, and computing their dot products with the edge normal.
-	n := b.Sub(a).Ortho()
-
-	i := 0
-	if n.X >= 0 {
-		i = 1
-	}
-	j := 0
-	if n.Y >= 0 {
-		j = 1
-	}
-
-	max := n.Dot(r.VertexIJ(i, j).Sub(a))
-	min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
-
-	return (max >= 0) && (min <= 0)
-}
-
-// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
-// by AB intersected by clip. The resulting bound may be empty. This is a convenience
-// function built on top of clipEdgeBound.
-func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
-	bound := r2.RectFromPoints(a, b)
-	if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
-		return b1
-	}
-	return r2.EmptyRect()
-}
-
-// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
-// It represents the clipped edges by their bounding boxes rather than as a pair of
-// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
-// a tight bound of A'B'. This function returns the bound that is a tight bound
-// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
-// it returns false and the original bound.
-func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
-	// negSlope indicates which diagonal of the bounding box is spanned by AB: it
-	// is false if AB has positive slope, and true if AB has negative slope. This is
-	// used to determine which interval endpoints need to be updated each time
-	// the edge is clipped.
-	negSlope := (a.X > b.X) != (a.Y > b.Y)
-
-	b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
-	if !up1 {
-		return bound, false
-	}
-	b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
-	if !up2 {
-		return r2.Rect{b0x, b0y}, false
-	}
-	return r2.Rect{X: b1x, Y: b1y}, true
-}
-
-// interpolateFloat64 returns a value with the same combination of a1 and b1 as the
-// given value x is of a and b. This function makes the following guarantees:
-//  - If x == a, then x1 = a1 (exactly).
-//  - If x == b, then x1 = b1 (exactly).
-//  - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
-// This requires a != b.
-func interpolateFloat64(x, a, b, a1, b1 float64) float64 {
-	// To get results that are accurate near both A and B, we interpolate
-	// starting from the closer of the two points.
-	if math.Abs(a-x) <= math.Abs(b-x) {
-		return a1 + (b1-a1)*(x-a)/(b-a)
-	}
-	return b1 + (a1-b1)*(x-b)/(a-b)
-}
-
-// FaceSegment represents an edge AB clipped to an S2 cube face. It is
-// represented by a face index and a pair of (u,v) coordinates.
-type FaceSegment struct {
-	face int
-	a, b r2.Point
-}
-
-// FaceSegments subdivides the given edge AB at every point where it crosses the
-// boundary between two S2 cube faces and returns the corresponding FaceSegments.
-// The segments are returned in order from A toward B. The input points must be
-// unit length.
-//
-// This function guarantees that the returned segments form a continuous path
-// from A to B, and that all vertices are within faceClipErrorUVDist of the
-// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles.
-// The results are consistent with Sign, i.e. the edge is well-defined even its
-// endpoints are antipodal.
-// TODO(roberts): Extend the implementation of PointCross so that this is true.
-func FaceSegments(a, b Point) []FaceSegment {
-	var segment FaceSegment
-
-	// Fast path: both endpoints are on the same face.
-	var aFace, bFace int
-	aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector)
-	bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector)
-	if aFace == bFace {
-		segment.face = aFace
-		return []FaceSegment{segment}
-	}
-
-	// Starting at A, we follow AB from face to face until we reach the face
-	// containing B. The following code is designed to ensure that we always
-	// reach B, even in the presence of numerical errors.
-	//
-	// First we compute the normal to the plane containing A and B. This normal
-	// becomes the ultimate definition of the line AB; it is used to resolve all
-	// questions regarding where exactly the line goes. Unfortunately due to
-	// numerical errors, the line may not quite intersect the faces containing
-	// the original endpoints. We handle this by moving A and/or B slightly if
-	// necessary so that they are on faces intersected by the line AB.
-	ab := a.PointCross(b)
-
-	aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a)
-	bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b)
-
-	// Now we simply follow AB from face to face until we reach B.
-	var segments []FaceSegment
-	segment.face = aFace
-	bSaved := segment.b
-
-	for face := aFace; face != bFace; {
-		// Complete the current segment by finding the point where AB
-		// exits the current face.
-		z := faceXYZtoUVW(face, ab)
-		n := pointUVW{z.Vector}
-
-		exitAxis := n.exitAxis()
-		segment.b = n.exitPoint(exitAxis)
-		segments = append(segments, segment)
-
-		// Compute the next face intersected by AB, and translate the exit
-		// point of the current segment into the (u,v) coordinates of the
-		// next face. This becomes the first point of the next segment.
-		exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y)
-		face = nextFace(face, segment.b, exitAxis, n, bFace)
-		exitUvw := faceXYZtoUVW(face, Point{exitXyz})
-		segment.face = face
-		segment.a = r2.Point{exitUvw.X, exitUvw.Y}
-	}
-	// Finish the last segment.
-	segment.b = bSaved
-	return append(segments, segment)
-}
-
-// moveOriginToValidFace updates the origin point to a valid face if necessary.
-// Given a line segment AB whose origin A has been projected onto a given cube
-// face, determine whether it is necessary to project A onto a different face
-// instead. This can happen because the normal of the line AB is not computed
-// exactly, so that the line AB (defined as the set of points perpendicular to
-// the normal) may not intersect the cube face containing A. Even if it does
-// intersect the face, the exit point of the line from that face may be on
-// the wrong side of A (i.e., in the direction away from B). If this happens,
-// we reproject A onto the adjacent face where the line AB approaches A most
-// closely. This moves the origin by a small amount, but never more than the
-// error tolerances.
-func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) {
-	// Fast path: if the origin is sufficiently far inside the face, it is
-	// always safe to use it.
-	const maxSafeUVCoord = 1 - faceClipErrorUVCoord
-	if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord {
-		return face, aUV
-	}
-
-	// Otherwise check whether the normal AB even intersects this face.
-	z := faceXYZtoUVW(face, ab)
-	n := pointUVW{z.Vector}
-	if n.intersectsFace() {
-		// Check whether the point where the line AB exits this face is on the
-		// wrong side of A (by more than the acceptable error tolerance).
-		uv := n.exitPoint(n.exitAxis())
-		exit := faceUVToXYZ(face, uv.X, uv.Y)
-		aTangent := ab.Normalize().Cross(a.Vector)
-
-		// We can use the given face.
-		if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians {
-			return face, aUV
-		}
-	}
-
-	// Otherwise we reproject A to the nearest adjacent face. (If line AB does
-	// not pass through a given face, it must pass through all adjacent faces.)
-	var dir int
-	if math.Abs((aUV).X) >= math.Abs((aUV).Y) {
-		// U-axis
-		if aUV.X > 0 {
-			dir = 1
-		}
-		face = uvwFace(face, 0, dir)
-	} else {
-		// V-axis
-		if aUV.Y > 0 {
-			dir = 1
-		}
-		face = uvwFace(face, 1, dir)
-	}
-
-	aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector)
-	aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X))
-	aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y))
-
-	return face, aUV
-}
-
-// nextFace returns the next face that should be visited by FaceSegments, given that
-// we have just visited face and we are following the line AB (represented
-// by its normal N in the (u,v,w) coordinates of that face). The other
-// arguments include the point where AB exits face, the corresponding
-// exit axis, and the target face containing the destination point B.
-func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int {
-	// this bit is to work around C++ cleverly casting bools to ints for you.
-	exitA := exit.X
-	exit1MinusA := exit.Y
-
-	if axis == axisV {
-		exitA = exit.Y
-		exit1MinusA = exit.X
-	}
-	exitAPos := 0
-	if exitA > 0 {
-		exitAPos = 1
-	}
-	exit1MinusAPos := 0
-	if exit1MinusA > 0 {
-		exit1MinusAPos = 1
-	}
-
-	// We return the face that is adjacent to the exit point along the given
-	// axis. If line AB exits *exactly* through a corner of the face, there are
-	// two possible next faces. If one is the target face containing B, then
-	// we guarantee that we advance to that face directly.
-	//
-	// The three conditions below check that (1) AB exits approximately through
-	// a corner, (2) the adjacent face along the non-exit axis is the target
-	// face, and (3) AB exits *exactly* through the corner. (The sumEqual
-	// code checks whether the dot product of (u,v,1) and n is exactly zero.)
-	if math.Abs(exit1MinusA) == 1 &&
-		uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace &&
-		sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) {
-		return targetFace
-	}
-
-	// Otherwise return the face that is adjacent to the exit point in the
-	// direction of the exit axis.
-	return uvwFace(face, int(axis), exitAPos)
-}

+ 0 - 227
vendor/github.com/golang/geo/s2/edge_crosser.go

@@ -1,227 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-)
-
-// EdgeCrosser allows edges to be efficiently tested for intersection with a
-// given fixed edge AB. It is especially efficient when testing for
-// intersection with an edge chain connecting vertices v0, v1, v2, ...
-//
-// Example usage:
-//
-//	func CountIntersections(a, b Point, edges []Edge) int {
-//		count := 0
-//		crosser := NewEdgeCrosser(a, b)
-//		for _, edge := range edges {
-//			if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross {
-//				count++
-//			}
-//		}
-//		return count
-//	}
-//
-type EdgeCrosser struct {
-	a   Point
-	b   Point
-	aXb Point
-
-	// To reduce the number of calls to expensiveSign, we compute an
-	// outward-facing tangent at A and B if necessary. If the plane
-	// perpendicular to one of these tangents separates AB from CD (i.e., one
-	// edge on each side) then there is no intersection.
-	aTangent Point // Outward-facing tangent at A.
-	bTangent Point // Outward-facing tangent at B.
-
-	// The fields below are updated for each vertex in the chain.
-	c   Point     // Previous vertex in the vertex chain.
-	acb Direction // The orientation of triangle ACB.
-}
-
-// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
-func NewEdgeCrosser(a, b Point) *EdgeCrosser {
-	norm := a.PointCross(b)
-	return &EdgeCrosser{
-		a:        a,
-		b:        b,
-		aXb:      Point{a.Cross(b.Vector)},
-		aTangent: Point{a.Cross(norm.Vector)},
-		bTangent: Point{norm.Cross(b.Vector)},
-	}
-}
-
-// CrossingSign reports whether the edge AB intersects the edge CD. If any two
-// vertices from different edges are the same, returns MaybeCross. If either edge
-// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross.
-//
-// Properties of CrossingSign:
-//
-//  (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
-//  (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
-//  (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
-//  (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
-//
-// Note that if you want to check an edge against a chain of other edges,
-// it is slightly more efficient to use the single-argument version
-// ChainCrossingSign below.
-func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
-	if c != e.c {
-		e.RestartAt(c)
-	}
-	return e.ChainCrossingSign(d)
-}
-
-// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
-// CD share a vertex and VertexCrossing(a, b, c, d) is true.
-//
-// This method extends the concept of a "crossing" to the case where AB
-// and CD have a vertex in common. The two edges may or may not cross,
-// according to the rules defined in VertexCrossing above. The rules
-// are designed so that point containment tests can be implemented simply
-// by counting edge crossings. Similarly, determining whether one edge
-// chain crosses another edge chain can be implemented by counting.
-func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
-	if c != e.c {
-		e.RestartAt(c)
-	}
-	return e.EdgeOrVertexChainCrossing(d)
-}
-
-// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
-// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
-//
-// You don't need to use this or any of the chain functions unless you're trying to
-// squeeze out every last drop of performance. Essentially all you are saving is a test
-// whether the first vertex of the current edge is the same as the second vertex of the
-// previous edge.
-func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
-	e := NewEdgeCrosser(a, b)
-	e.RestartAt(c)
-	return e
-}
-
-// RestartAt sets the current point of the edge crosser to be c.
-// Call this method when your chain 'jumps' to a new place.
-// The argument must point to a value that persists until the next call.
-func (e *EdgeCrosser) RestartAt(c Point) {
-	e.c = c
-	e.acb = -triageSign(e.a, e.b, e.c)
-}
-
-// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
-// the crossing methods (or RestartAt) as the first vertex of the current edge.
-func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
-	// For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
-	// all be oriented the same way (CW or CCW). We keep the orientation of ACB
-	// as part of our state. When each new point D arrives, we compute the
-	// orientation of BDA and check whether it matches ACB. This checks whether
-	// the points C and D are on opposite sides of the great circle through AB.
-
-	// Recall that triageSign is invariant with respect to rotating its
-	// arguments, i.e. ABD has the same orientation as BDA.
-	bda := triageSign(e.a, e.b, d)
-	if e.acb == -bda && bda != Indeterminate {
-		// The most common case -- triangles have opposite orientations. Save the
-		// current vertex D as the next vertex C, and also save the orientation of
-		// the new triangle ACB (which is opposite to the current triangle BDA).
-		e.c = d
-		e.acb = -bda
-		return DoNotCross
-	}
-	return e.crossingSign(d, bda)
-}
-
-// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
-// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
-func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
-	// We need to copy e.c since it is clobbered by ChainCrossingSign.
-	c := e.c
-	switch e.ChainCrossingSign(d) {
-	case DoNotCross:
-		return false
-	case Cross:
-		return true
-	}
-	return VertexCrossing(e.a, e.b, c, d)
-}
-
-// crossingSign handle the slow path of CrossingSign.
-func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
-	// Compute the actual result, and then save the current vertex D as the next
-	// vertex C, and save the orientation of the next triangle ACB (which is
-	// opposite to the current triangle BDA).
-	defer func() {
-		e.c = d
-		e.acb = -bda
-	}()
-
-	// At this point, a very common situation is that A,B,C,D are four points on
-	// a line such that AB does not overlap CD. (For example, this happens when
-	// a line or curve is sampled finely, or when geometry is constructed by
-	// computing the union of S2CellIds.) Most of the time, we can determine
-	// that AB and CD do not intersect using the two outward-facing
-	// tangents at A and B (parallel to AB) and testing whether AB and CD are on
-	// opposite sides of the plane perpendicular to one of these tangents. This
-	// is moderately expensive but still much cheaper than expensiveSign.
-
-	// The error in RobustCrossProd is insignificant. The maximum error in
-	// the call to CrossProd (i.e., the maximum norm of the error vector) is
-	// (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
-	// DotProd below is dblEpsilon. (There is also a small relative error
-	// term that is insignificant because we are comparing the result against a
-	// constant that is very close to zero.)
-	maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
-	if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
-		return DoNotCross
-	}
-
-	// Otherwise, eliminate the cases where two vertices from different edges are
-	// equal. (These cases could be handled in the code below, but we would rather
-	// avoid calling ExpensiveSign if possible.)
-	if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
-		return MaybeCross
-	}
-
-	// Eliminate the cases where an input edge is degenerate. (Note that in
-	// most cases, if CD is degenerate then this method is not even called
-	// because acb and bda have different signs.)
-	if e.a == e.b || e.c == d {
-		return DoNotCross
-	}
-
-	// Otherwise it's time to break out the big guns.
-	if e.acb == Indeterminate {
-		e.acb = -expensiveSign(e.a, e.b, e.c)
-	}
-	if bda == Indeterminate {
-		bda = expensiveSign(e.a, e.b, d)
-	}
-
-	if bda != e.acb {
-		return DoNotCross
-	}
-
-	cbd := -RobustSign(e.c, d, e.b)
-	if cbd != e.acb {
-		return DoNotCross
-	}
-	dac := RobustSign(e.c, d, e.a)
-	if dac != e.acb {
-		return DoNotCross
-	}
-	return Cross
-}

+ 0 - 396
vendor/github.com/golang/geo/s2/edge_crossings.go

@@ -1,396 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"math"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	// intersectionError can be set somewhat arbitrarily, because the algorithm
-	// uses more precision if necessary in order to achieve the specified error.
-	// The only strict requirement is that intersectionError >= dblEpsilon
-	// radians. However, using a larger error tolerance makes the algorithm more
-	// efficient because it reduces the number of cases where exact arithmetic is
-	// needed.
-	intersectionError = s1.Angle(8 * dblError)
-
-	// intersectionMergeRadius is used to ensure that intersection points that
-	// are supposed to be coincident are merged back together into a single
-	// vertex. This is required in order for various polygon operations (union,
-	// intersection, etc) to work correctly. It is twice the intersection error
-	// because two coincident intersection points might have errors in
-	// opposite directions.
-	intersectionMergeRadius = 2 * intersectionError
-)
-
-// A Crossing indicates how edges cross.
-type Crossing int
-
-const (
-	// Cross means the edges cross.
-	Cross Crossing = iota
-	// MaybeCross means two vertices from different edges are the same.
-	MaybeCross
-	// DoNotCross means the edges do not cross.
-	DoNotCross
-)
-
-func (c Crossing) String() string {
-	switch c {
-	case Cross:
-		return "Cross"
-	case MaybeCross:
-		return "MaybeCross"
-	case DoNotCross:
-		return "DoNotCross"
-	default:
-		return fmt.Sprintf("(BAD CROSSING %d)", c)
-	}
-}
-
-// CrossingSign reports whether the edge AB intersects the edge CD.
-// If AB crosses CD at a point that is interior to both edges, Cross is returned.
-// If any two vertices from different edges are the same it returns MaybeCross.
-// Otherwise it returns DoNotCross.
-// If either edge is degenerate (A == B or C == D), the return value is MaybeCross
-// if two vertices from different edges are the same and DoNotCross otherwise.
-//
-// Properties of CrossingSign:
-//
-//  (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
-//  (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
-//  (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
-//  (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
-//
-// This method implements an exact, consistent perturbation model such
-// that no three points are ever considered to be collinear. This means
-// that even if you have 4 points A, B, C, D that lie exactly in a line
-// (say, around the equator), C and D will be treated as being slightly to
-// one side or the other of AB. This is done in a way such that the
-// results are always consistent (see RobustSign).
-func CrossingSign(a, b, c, d Point) Crossing {
-	crosser := NewChainEdgeCrosser(a, b, c)
-	return crosser.ChainCrossingSign(d)
-}
-
-// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
-// containment tests can be implemented by counting the number of edge crossings.
-//
-// Given two edges AB and CD where at least two vertices are identical
-// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
-// occurs if AB is encountered after CD during a CCW sweep around the shared
-// vertex starting from a fixed reference point.
-//
-// Note that according to this rule, if AB crosses CD then in general CD
-// does not cross AB. However, this leads to the correct result when
-// counting polygon edge crossings. For example, suppose that A,B,C are
-// three consecutive vertices of a CCW polygon. If we now consider the edge
-// crossings of a segment BP as P sweeps around B, the crossing number
-// changes parity exactly when BP crosses BA or BC.
-//
-// Useful properties of VertexCrossing (VC):
-//
-//  (1) VC(a,a,c,d) == VC(a,b,c,c) == false
-//  (2) VC(a,b,a,b) == VC(a,b,b,a) == true
-//  (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
-//  (3) If exactly one of a,b equals one of c,d, then exactly one of
-//      VC(a,b,c,d) and VC(c,d,a,b) is true
-//
-// It is an error to call this method with 4 distinct vertices.
-func VertexCrossing(a, b, c, d Point) bool {
-	// If A == B or C == D there is no intersection. We need to check this
-	// case first in case 3 or more input points are identical.
-	if a == b || c == d {
-		return false
-	}
-
-	// If any other pair of vertices is equal, there is a crossing if and only
-	// if OrderedCCW indicates that the edge AB is further CCW around the
-	// shared vertex O (either A or B) than the edge CD, starting from an
-	// arbitrary fixed reference point.
-
-	// Optimization: if AB=CD or AB=DC, we can avoid most of the calculations.
-	switch {
-	case a == c:
-		return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a)
-	case b == d:
-		return OrderedCCW(Point{b.Ortho()}, c, a, b)
-	case a == d:
-		return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a)
-	case b == c:
-		return OrderedCCW(Point{b.Ortho()}, d, a, b)
-	}
-
-	return false
-}
-
-// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to
-// handle cases where all four vertices are distinct, and VertexCrossing to
-// handle cases where two or more vertices are the same. This defines a crossing
-// function such that point-in-polygon containment tests can be implemented
-// by simply counting edge crossings.
-func EdgeOrVertexCrossing(a, b, c, d Point) bool {
-	switch CrossingSign(a, b, c, d) {
-	case DoNotCross:
-		return false
-	case Cross:
-		return true
-	default:
-		return VertexCrossing(a, b, c, d)
-	}
-}
-
-// Intersection returns the intersection point of two edges AB and CD that cross
-// (CrossingSign(a,b,c,d) == Crossing).
-//
-// Useful properties of Intersection:
-//
-//  (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
-//  (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
-//
-// The returned intersection point X is guaranteed to be very close to the
-// true intersection point of AB and CD, even if the edges intersect at a
-// very small angle.
-func Intersection(a0, a1, b0, b1 Point) Point {
-	// It is difficult to compute the intersection point of two edges accurately
-	// when the angle between the edges is very small. Previously we handled
-	// this by only guaranteeing that the returned intersection point is within
-	// intersectionError of each edge. However, this means that when the edges
-	// cross at a very small angle, the computed result may be very far from the
-	// true intersection point.
-	//
-	// Instead this function now guarantees that the result is always within
-	// intersectionError of the true intersection. This requires using more
-	// sophisticated techniques and in some cases extended precision.
-	//
-	//  - intersectionStable computes the intersection point using
-	//    projection and interpolation, taking care to minimize cancellation
-	//    error.
-	//
-	//  - intersectionExact computes the intersection point using precision
-	//    arithmetic and converts the final result back to an Point.
-	pt, ok := intersectionStable(a0, a1, b0, b1)
-	if !ok {
-		pt = intersectionExact(a0, a1, b0, b1)
-	}
-
-	// Make sure the intersection point is on the correct side of the sphere.
-	// Since all vertices are unit length, and edges are less than 180 degrees,
-	// (a0 + a1) and (b0 + b1) both have positive dot product with the
-	// intersection point.  We use the sum of all vertices to make sure that the
-	// result is unchanged when the edges are swapped or reversed.
-	if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 {
-		pt = Point{pt.Mul(-1)}
-	}
-
-	return pt
-}
-
-// Computes the cross product of two vectors, normalized to be unit length.
-// Also returns the length of the cross
-// product before normalization, which is useful for estimating the amount of
-// error in the result.  For numerical stability, the vectors should both be
-// approximately unit length.
-func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) {
-	var pt r3.Vector
-	// This computes 2 * (x.Cross(y)), but has much better numerical
-	// stability when x and y are unit length.
-	tmp := x.Sub(y).Cross(x.Add(y))
-	length := tmp.Norm()
-	if length != 0 {
-		pt = tmp.Mul(1 / length)
-	}
-	return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y))
-}
-
-/*
-// intersectionSimple is not used by the C++ so it is skipped here.
-*/
-
-// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound
-// on the error in the result. aNorm is not necessarily unit length.
-//
-// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints
-// a0 and a1) allow this dot product to be computed more accurately and efficiently.
-func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) {
-	// The error in the dot product is proportional to the lengths of the input
-	// vectors, so rather than using x itself (a unit-length vector) we use
-	// the vectors from x to the closer of the two edge endpoints. This
-	// typically reduces the error by a huge factor.
-	x0 := x.Sub(a0.Vector)
-	x1 := x.Sub(a1.Vector)
-	x0Dist2 := x0.Norm2()
-	x1Dist2 := x1.Norm2()
-
-	// If both distances are the same, we need to be careful to choose one
-	// endpoint deterministically so that the result does not change if the
-	// order of the endpoints is reversed.
-	var dist float64
-	if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) {
-		dist = math.Sqrt(x0Dist2)
-		proj = x0.Dot(aNorm)
-	} else {
-		dist = math.Sqrt(x1Dist2)
-		proj = x1.Dot(aNorm)
-	}
-
-	// This calculation bounds the error from all sources: the computation of
-	// the normal, the subtraction of one endpoint, and the dot product itself.
-	// dblError appears because the input points are assumed to be
-	// normalized in double precision.
-	//
-	// For reference, the bounds that went into this calculation are:
-	// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon
-	// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
-	// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
-	bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon
-	return proj, bound
-}
-
-// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total
-// ordering on edges that is invariant under edge reversals.
-func compareEdges(a0, a1, b0, b1 Point) bool {
-	if a0.Cmp(a1.Vector) != -1 {
-		a0, a1 = a1, a0
-	}
-	if b0.Cmp(b1.Vector) != -1 {
-		b0, b1 = b1, b0
-	}
-	return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1)
-}
-
-// intersectionStable returns the intersection point of the edges (a0,a1) and
-// (b0,b1) if it can be computed to within an error of at most intersectionError
-// by this function.
-//
-// The intersection point is not guaranteed to have the correct sign because we
-// choose to use the longest of the two edges first. The sign is corrected by
-// Intersection.
-func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) {
-	// Sort the two edges so that (a0,a1) is longer, breaking ties in a
-	// deterministic way that does not depend on the ordering of the endpoints.
-	// This is desirable for two reasons:
-	//  - So that the result doesn't change when edges are swapped or reversed.
-	//  - It reduces error, since the first edge is used to compute the edge
-	//    normal (where a longer edge means less error), and the second edge
-	//    is used for interpolation (where a shorter edge means less error).
-	aLen2 := a1.Sub(a0.Vector).Norm2()
-	bLen2 := b1.Sub(b0.Vector).Norm2()
-	if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) {
-		return intersectionStableSorted(b0, b1, a0, a1)
-	}
-	return intersectionStableSorted(a0, a1, b0, b1)
-}
-
-// intersectionStableSorted is a helper function for intersectionStable.
-// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that
-// the first edge passed in is longer.
-func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
-	var pt Point
-
-	// Compute the normal of the plane through (a0, a1) in a stable way.
-	aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector))
-	aNormLen := aNorm.Norm()
-	bLen := b1.Sub(b0.Vector).Norm()
-
-	// Compute the projection (i.e., signed distance) of b0 and b1 onto the
-	// plane through (a0, a1).  Distances are scaled by the length of aNorm.
-	b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1)
-	b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1)
-
-	// The total distance from b0 to b1 measured perpendicularly to (a0,a1) is
-	// |b0Dist - b1Dist|.  Note that b0Dist and b1Dist generally have
-	// opposite signs because b0 and b1 are on opposite sides of (a0, a1).  The
-	// code below finds the intersection point by interpolating along the edge
-	// (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist).
-	//
-	// It can be shown that the maximum error in the interpolation fraction is
-	//
-	//   (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum))
-	//
-	// We save ourselves some work by scaling the result and the error bound by
-	// "distSum", since the result is normalized to be unit length anyway.
-	distSum := math.Abs(b0Dist - b1Dist)
-	errorSum := b0Error + b1Error
-	if distSum <= errorSum {
-		return pt, false // Error is unbounded in this case.
-	}
-
-	x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist))
-	err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/
-		(distSum-errorSum) + 2*distSum*epsilon
-
-	// Finally we normalize the result, compute the corresponding error, and
-	// check whether the total error is acceptable.
-	xLen := x.Norm()
-	maxError := intersectionError
-	if err > (float64(maxError)-epsilon)*xLen {
-		return pt, false
-	}
-
-	return Point{x.Mul(1 / xLen)}, true
-}
-
-// intersectionExact returns the intersection point of (a0, a1) and (b0, b1)
-// using precise arithmetic. Note that the result is not exact because it is
-// rounded down to double precision at the end. Also, the intersection point
-// is not guaranteed to have the correct sign (i.e., the return value may need
-// to be negated).
-func intersectionExact(a0, a1, b0, b1 Point) Point {
-	// Since we are using presice arithmetic, we don't need to worry about
-	// numerical stability.
-	a0P := r3.PreciseVectorFromVector(a0.Vector)
-	a1P := r3.PreciseVectorFromVector(a1.Vector)
-	b0P := r3.PreciseVectorFromVector(b0.Vector)
-	b1P := r3.PreciseVectorFromVector(b1.Vector)
-	aNormP := a0P.Cross(a1P)
-	bNormP := b0P.Cross(b1P)
-	xP := aNormP.Cross(bNormP)
-
-	// The final Normalize() call is done in double precision, which creates a
-	// directional error of up to 2*dblError. (Precise conversion and Normalize()
-	// each contribute up to dblError of directional error.)
-	x := xP.Vector()
-
-	if x == (r3.Vector{}) {
-		// The two edges are exactly collinear, but we still consider them to be
-		// "crossing" because of simulation of simplicity. Out of the four
-		// endpoints, exactly two lie in the interior of the other edge. Of
-		// those two we return the one that is lexicographically smallest.
-		x = r3.Vector{10, 10, 10} // Greater than any valid S2Point
-
-		aNorm := Point{aNormP.Vector()}
-		bNorm := Point{bNormP.Vector()}
-		if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 {
-			return a0
-		}
-		if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 {
-			return a1
-		}
-		if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 {
-			return b0
-		}
-		if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 {
-			return b1
-		}
-	}
-
-	return Point{x}
-}

+ 0 - 408
vendor/github.com/golang/geo/s2/edge_distances.go

@@ -1,408 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// This file defines a collection of methods for computing the distance to an edge,
-// interpolating along an edge, projecting points onto edges, etc.
-
-import (
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// DistanceFromSegment returns the distance of point X from line segment AB.
-// The points are expected to be normalized. The result is very accurate for small
-// distances but may have some numerical error if the distance is large
-// (approximately pi/2 or greater). The case A == B is handled correctly.
-func DistanceFromSegment(x, a, b Point) s1.Angle {
-	var minDist s1.ChordAngle
-	minDist, _ = updateMinDistance(x, a, b, minDist, true)
-	return minDist.Angle()
-}
-
-// IsDistanceLess reports whether the distance from X to the edge AB is less
-// than limit. (For less than or equal to, specify limit.Successor()).
-// This method is faster than DistanceFromSegment(). If you want to
-// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
-// once and save the value, since this conversion is relatively expensive.
-func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
-	_, less := UpdateMinDistance(x, a, b, limit)
-	return less
-}
-
-// UpdateMinDistance checks if the distance from X to the edge AB is less
-// than minDist, and if so, returns the updated value and true.
-// The case A == B is handled correctly.
-//
-// Use this method when you want to compute many distances and keep track of
-// the minimum. It is significantly faster than using DistanceFromSegment
-// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it
-// can save a lot of work by not actually computing the distance when it is
-// obviously larger than the current minimum.
-func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
-	return updateMinDistance(x, a, b, minDist, false)
-}
-
-// UpdateMaxDistance checks if the distance from X to the edge AB is greater
-// than maxDist, and if so, returns the updated value and true.
-// Otherwise it returns false. The case A == B is handled correctly.
-func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
-	dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b))
-	if dist > s1.RightChordAngle {
-		dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true)
-		dist = s1.StraightChordAngle - dist
-	}
-	if maxDist < dist {
-		return dist, true
-	}
-
-	return maxDist, false
-}
-
-// IsInteriorDistanceLess reports whether the minimum distance from X to the edge
-// AB is attained at an interior point of AB (i.e., not an endpoint), and that
-// distance is less than limit. (Specify limit.Successor() for less than or equal to).
-func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
-	_, less := UpdateMinInteriorDistance(x, a, b, limit)
-	return less
-}
-
-// UpdateMinInteriorDistance reports whether the minimum distance from X to AB
-// is attained at an interior point of AB (i.e., not an endpoint), and that distance
-// is less than minDist. If so, the value of minDist is updated and true is returned.
-// Otherwise it is unchanged and returns false.
-func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
-	return interiorDist(x, a, b, minDist, false)
-}
-
-// Project returns the point along the edge AB that is closest to the point X.
-// The fractional distance of this point along the edge AB can be obtained
-// using DistanceFraction.
-//
-// This requires that all points are unit length.
-func Project(x, a, b Point) Point {
-	aXb := a.PointCross(b)
-	// Find the closest point to X along the great circle through AB.
-	p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
-
-	// If this point is on the edge AB, then it's the closest point.
-	if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
-		return Point{p.Normalize()}
-	}
-
-	// Otherwise, the closest point is either A or B.
-	if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
-		return a
-	}
-	return b
-}
-
-// DistanceFraction returns the distance ratio of the point X along an edge AB.
-// If X is on the line segment AB, this is the fraction T such
-// that X == Interpolate(T, A, B).
-//
-// This requires that A and B are distinct.
-func DistanceFraction(x, a, b Point) float64 {
-	d0 := x.Angle(a.Vector)
-	d1 := x.Angle(b.Vector)
-	return float64(d0 / (d0 + d1))
-}
-
-// Interpolate returns the point X along the line segment AB whose distance from A
-// is the given fraction "t" of the distance AB. Does NOT require that "t" be
-// between 0 and 1. Note that all distances are measured on the surface of
-// the sphere, so this is more complicated than just computing (1-t)*a + t*b
-// and normalizing the result.
-func Interpolate(t float64, a, b Point) Point {
-	if t == 0 {
-		return a
-	}
-	if t == 1 {
-		return b
-	}
-	ab := a.Angle(b.Vector)
-	return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
-}
-
-// InterpolateAtDistance returns the point X along the line segment AB whose
-// distance from A is the angle ax.
-func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
-	aRad := ax.Radians()
-
-	// Use PointCross to compute the tangent vector at A towards B. The
-	// result is always perpendicular to A, even if A=B or A=-B, but it is not
-	// necessarily unit length. (We effectively normalize it below.)
-	normal := a.PointCross(b)
-	tangent := normal.Vector.Cross(a.Vector)
-
-	// Now compute the appropriate linear combination of A and "tangent". With
-	// infinite precision the result would always be unit length, but we
-	// normalize it anyway to ensure that the error is within acceptable bounds.
-	// (Otherwise errors can build up when the result of one interpolation is
-	// fed into another interpolation.)
-	return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
-}
-
-// minUpdateDistanceMaxError returns the maximum error in the result of
-// UpdateMinDistance (and the associated functions such as
-// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all
-// input points are normalized to within the bounds guaranteed by r3.Vector's
-// Normalize. The error can be added or subtracted from an s1.ChordAngle
-// using its Expanded method.
-func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 {
-	// There are two cases for the maximum error in UpdateMinDistance(),
-	// depending on whether the closest point is interior to the edge.
-	return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError())
-}
-
-// minUpdateInteriorDistanceMaxError returns the maximum error in the result of
-// UpdateMinInteriorDistance, assuming that all input points are normalized
-// to within the bounds guaranteed by Point's Normalize. The error can be added
-// or subtracted from an s1.ChordAngle using its Expanded method.
-//
-// Note that accuracy goes down as the distance approaches 0 degrees or 180
-// degrees (for different reasons). Near 0 degrees the error is acceptable
-// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers).  For
-// exactly antipodal points the maximum error is quite high (0.5 meters),
-// but this error drops rapidly as the points move away from antipodality
-// (approximately 1 millimeter for points that are 50 meters from antipodal,
-// and 1 micrometer for points that are 50km from antipodal).
-//
-// TODO(roberts): Currently the error bound does not hold for edges whose endpoints
-// are antipodal to within about 1e-15 radians (less than 1 micron). This could
-// be fixed by extending PointCross to use higher precision when necessary.
-func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
-	// If a point is more than 90 degrees from an edge, then the minimum
-	// distance is always to one of the endpoints, not to the edge interior.
-	if dist >= s1.RightChordAngle {
-		return 0.0
-	}
-
-	// This bound includes all source of error, assuming that the input points
-	// are normalized. a and b are components of chord length that are
-	// perpendicular and parallel to a plane containing the edge respectively.
-	b := math.Min(1.0, 0.5*float64(dist))
-	a := math.Sqrt(b * (2 - b))
-	return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
-		(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
-		(23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon
-}
-
-// updateMinDistance computes the distance from a point X to a line segment AB,
-// and if either the distance was less than the given minDist, or alwaysUpdate is
-// true, the value and whether it was updated are returned.
-func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
-	if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok {
-		// Minimum distance is attained along the edge interior.
-		return d, true
-	}
-
-	// Otherwise the minimum distance is to one of the endpoints.
-	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
-	dist := s1.ChordAngle(math.Min(xa2, xb2))
-	if !alwaysUpdate && dist >= minDist {
-		return minDist, false
-	}
-	return dist, true
-}
-
-// interiorDist returns the shortest distance from point x to edge ab, assuming
-// that the closest point to X is interior to AB. If the closest point is not
-// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to
-// false, the distance is only updated when the value exceeds certain the given minDist.
-func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
-	// Chord distance of x to both end points a and b.
-	xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
-
-	// The closest point on AB could either be one of the two vertices (the
-	// vertex case) or in the interior (the interior case). Let C = A x B.
-	// If X is in the spherical wedge extending from A to B around the axis
-	// through C, then we are in the interior case. Otherwise we are in the
-	// vertex case.
-	//
-	// Check whether we might be in the interior case. For this to be true, XAB
-	// and XBA must both be acute angles. Checking this condition exactly is
-	// expensive, so instead we consider the planar triangle ABX (which passes
-	// through the sphere's interior). The planar angles XAB and XBA are always
-	// less than the corresponding spherical angles, so if we are in the
-	// interior case then both of these angles must be acute.
-	//
-	// We check this by computing the squared edge lengths of the planar
-	// triangle ABX, and testing whether angles XAB and XBA are both acute using
-	// the law of cosines:
-	//
-	//            | XA^2 - XB^2 | < AB^2      (*)
-	//
-	// This test must be done conservatively (taking numerical errors into
-	// account) since otherwise we might miss a situation where the true minimum
-	// distance is achieved by a point on the edge interior.
-	//
-	// There are two sources of error in the expression above (*).  The first is
-	// that points are not normalized exactly; they are only guaranteed to be
-	// within 2 * dblEpsilon of unit length.  Under the assumption that the two
-	// sides of (*) are nearly equal, the total error due to normalization errors
-	// can be shown to be at most
-	//
-	//        2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
-	//
-	// The other source of error is rounding of results in the calculation of (*).
-	// Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon,
-	// plus an additional relative error of 0.5 * dblEpsilon in the final
-	// subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 +
-	// AB^2) for convenience.  This yields a final error bound of
-	//
-	//        4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
-	ab2 := a.Sub(b.Vector).Norm2()
-	maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon)
-	if math.Abs(xa2-xb2) >= ab2+maxError {
-		return minDist, false
-	}
-
-	// The minimum distance might be to a point on the edge interior. Let R
-	// be closest point to X that lies on the great circle through AB. Rather
-	// than computing the geodesic distance along the surface of the sphere,
-	// instead we compute the "chord length" through the sphere's interior.
-	//
-	// The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
-	// is the point X projected onto the plane through the great circle AB.
-	// The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
-	// We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
-	// is faster and the corresponding distance on the Earth's surface is
-	// accurate to within 1% for distances up to about 1800km.
-	c := a.PointCross(b)
-	c2 := c.Norm2()
-	xDotC := x.Dot(c.Vector)
-	xDotC2 := xDotC * xDotC
-	if !alwaysUpdate && xDotC2 > c2*float64(minDist) {
-		// The closest point on the great circle AB is too far away.  We need to
-		// test this using ">" rather than ">=" because the actual minimum bound
-		// on the distance is (xDotC2 / c2), which can be rounded differently
-		// than the (more efficient) multiplicative test above.
-		return minDist, false
-	}
-
-	// Otherwise we do the exact, more expensive test for the interior case.
-	// This test is very likely to succeed because of the conservative planar
-	// test we did initially.
-	//
-	// TODO(roberts): Ensure that the errors in test are accurately reflected in the
-	// minUpdateInteriorDistanceMaxError.
-	cx := c.Cross(x.Vector)
-	if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 {
-		return minDist, false
-	}
-
-	// Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
-	// This calculation has good accuracy for all chord lengths since it
-	// is based on both the dot product and cross product (rather than
-	// deriving one from the other). However, note that the chord length
-	// representation itself loses accuracy as the angle approaches π.
-	qr := 1 - math.Sqrt(cx.Norm2()/c2)
-	dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr))
-
-	if !alwaysUpdate && dist >= minDist {
-		return minDist, false
-	}
-
-	return dist, true
-}
-
-// updateEdgePairMinDistance computes the minimum distance between the given
-// pair of edges. If the two edges cross, the distance is zero. The cases
-// a0 == a1 and b0 == b1 are handled correctly.
-func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
-	if minDist == 0 {
-		return 0, false
-	}
-	if CrossingSign(a0, a1, b0, b1) == Cross {
-		minDist = 0
-		return 0, true
-	}
-
-	// Otherwise, the minimum distance is achieved at an endpoint of at least
-	// one of the two edges. We ensure that all four possibilities are always checked.
-	//
-	// The calculation below computes each of the six vertex-vertex distances
-	// twice (this could be optimized).
-	var ok1, ok2, ok3, ok4 bool
-	minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist)
-	minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist)
-	minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist)
-	minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist)
-	return minDist, ok1 || ok2 || ok3 || ok4
-}
-
-// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges.
-// If one edge crosses the antipodal reflection of the other, the distance is pi.
-func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
-	if maxDist == s1.StraightChordAngle {
-		return s1.StraightChordAngle, false
-	}
-	if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross {
-		return s1.StraightChordAngle, true
-	}
-
-	// Otherwise, the maximum distance is achieved at an endpoint of at least
-	// one of the two edges. We ensure that all four possibilities are always checked.
-	//
-	// The calculation below computes each of the six vertex-vertex distances
-	// twice (this could be optimized).
-	var ok1, ok2, ok3, ok4 bool
-	maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist)
-	maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist)
-	maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist)
-	maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist)
-	return maxDist, ok1 || ok2 || ok3 || ok4
-}
-
-// EdgePairClosestPoints returns the pair of points (a, b) that achieves the
-// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and
-// b is a point on b0b1. If the two edges intersect, a and b are both equal to
-// the intersection point. Handles a0 == a1 and b0 == b1 correctly.
-func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
-	if CrossingSign(a0, a1, b0, b1) == Cross {
-		x := Intersection(a0, a1, b0, b1)
-		return x, x
-	}
-	// We save some work by first determining which vertex/edge pair achieves
-	// the minimum distance, and then computing the closest point on that edge.
-	var minDist s1.ChordAngle
-	var ok bool
-
-	minDist, ok = updateMinDistance(a0, b0, b1, minDist, true)
-	closestVertex := 0
-	if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok {
-		closestVertex = 1
-	}
-	if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok {
-		closestVertex = 2
-	}
-	if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
-		closestVertex = 3
-	}
-	switch closestVertex {
-	case 0:
-		return a0, Project(a0, b0, b1)
-	case 1:
-		return a1, Project(a1, b0, b1)
-	case 2:
-		return Project(b0, a0, a1), b0
-	case 3:
-		return Project(b1, a0, a1), b1
-	default:
-		panic("illegal case reached")
-	}
-}

+ 0 - 512
vendor/github.com/golang/geo/s2/edge_query.go

@@ -1,512 +0,0 @@
-// Copyright 2019 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"sort"
-
-	"github.com/golang/geo/s1"
-)
-
-// EdgeQueryOptions holds the options for controlling how EdgeQuery operates.
-//
-// Options can be chained together builder-style:
-//
-//	opts = NewClosestEdgeQueryOptions().
-//		MaxResults(1).
-//		DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)).
-//		MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree))
-//	query = NewClosestEdgeQuery(index, opts)
-//
-//  or set individually:
-//
-//	opts = NewClosestEdgeQueryOptions()
-//	opts.IncludeInteriors(true)
-//
-// or just inline:
-//
-//	query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3))
-//
-// If you pass a nil as the options you get the default values for the options.
-type EdgeQueryOptions struct {
-	common *queryOptions
-}
-
-// DistanceLimit specifies that only edges whose distance to the target is
-// within, this distance should be returned.  Edges whose distance is equal
-// are not returned. To include values that are equal, specify the limit with
-// the next largest representable distance. i.e. limit.Successor().
-func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions {
-	e.common = e.common.DistanceLimit(limit)
-	return e
-}
-
-// IncludeInteriors specifies whether polygon interiors should be
-// included when measuring distances.
-func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions {
-	e.common = e.common.IncludeInteriors(x)
-	return e
-}
-
-// UseBruteForce sets or disables the use of brute force in a query.
-func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions {
-	e.common = e.common.UseBruteForce(x)
-	return e
-}
-
-// MaxError specifies that edges up to dist away than the true
-// matching edges may be substituted in the result set, as long as such
-// edges satisfy all the remaining search criteria (such as DistanceLimit).
-// This option only has an effect if MaxResults is also specified;
-// otherwise all edges closer than MaxDistance will always be returned.
-func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions {
-	e.common = e.common.MaxError(dist)
-	return e
-}
-
-// MaxResults specifies that at most MaxResults edges should be returned.
-// This must be at least 1.
-func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions {
-	e.common = e.common.MaxResults(n)
-	return e
-}
-
-// NewClosestEdgeQueryOptions returns a set of edge query options suitable
-// for performing closest edge queries.
-func NewClosestEdgeQueryOptions() *EdgeQueryOptions {
-	return &EdgeQueryOptions{
-		common: newQueryOptions(minDistance(0)),
-	}
-}
-
-// NewFurthestEdgeQueryOptions returns a set of edge query options suitable
-// for performing furthest edge queries.
-func NewFurthestEdgeQueryOptions() *EdgeQueryOptions {
-	return &EdgeQueryOptions{
-		common: newQueryOptions(maxDistance(0)),
-	}
-}
-
-// EdgeQueryResult represents an edge that meets the target criteria for the
-// query. Note the following special cases:
-//
-//  - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape.
-//    Such results may be returned when the option IncludeInteriors is true.
-//
-//  - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge
-//    satisfies the requested query options.
-type EdgeQueryResult struct {
-	distance distance
-	shapeID  int32
-	edgeID   int32
-}
-
-// Distance reports the distance between the edge in this shape that satisfied
-// the query's parameters.
-func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() }
-
-// ShapeID reports the ID of the Shape this result is for.
-func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID }
-
-// EdgeID reports the ID of the edge in the results Shape.
-func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID }
-
-// newEdgeQueryResult returns a result instance with default values.
-func newEdgeQueryResult(target distanceTarget) EdgeQueryResult {
-	return EdgeQueryResult{
-		distance: target.distance().infinity(),
-		shapeID:  -1,
-		edgeID:   -1,
-	}
-}
-
-// IsInterior reports if this result represents the interior of a Shape.
-func (e EdgeQueryResult) IsInterior() bool {
-	return e.shapeID >= 0 && e.edgeID < 0
-}
-
-// IsEmpty reports if this has no edge that satisfies the given edge query options.
-// This result is only returned in one special case, namely when FindEdge() does
-// not find any suitable edges.
-func (e EdgeQueryResult) IsEmpty() bool {
-	return e.shapeID < 0
-}
-
-// Less reports if this results is less that the other first by distance,
-// then by (shapeID, edgeID). This is used for sorting.
-func (e EdgeQueryResult) Less(other EdgeQueryResult) bool {
-	if e.distance.less(other.distance) {
-		return true
-	}
-	if other.distance.less(e.distance) {
-		return false
-	}
-	if e.shapeID < other.shapeID {
-		return true
-	}
-	if other.shapeID < e.shapeID {
-		return false
-	}
-	return e.edgeID < other.edgeID
-}
-
-// EdgeQuery is used to find the edge(s) between two geometries that match a
-// given set of options. It is flexible enough so that it can be adapted to
-// compute maximum distances and even potentially Hausdorff distances.
-//
-// By using the appropriate options, this type can answer questions such as:
-//
-//  - Find the minimum distance between two geometries A and B.
-//  - Find all edges of geometry A that are within a distance D of geometry B.
-//  - Find the k edges of geometry A that are closest to a given point P.
-//
-// You can also specify whether polygons should include their interiors (i.e.,
-// if a point is contained by a polygon, should the distance be zero or should
-// it be measured to the polygon boundary?)
-//
-// The input geometries may consist of any number of points, polylines, and
-// polygons (collectively referred to as "shapes"). Shapes do not need to be
-// disjoint; they may overlap or intersect arbitrarily. The implementation is
-// designed to be fast for both simple and complex geometries.
-type EdgeQuery struct {
-	index  *ShapeIndex
-	opts   *queryOptions
-	target distanceTarget
-
-	// True if opts.maxError must be subtracted from ShapeIndex cell distances
-	// in order to ensure that such distances are measured conservatively. This
-	// is true only if the target takes advantage of maxError in order to
-	// return faster results, and 0 < maxError < distanceLimit.
-	useConservativeCellDistance bool
-
-	// The decision about whether to use the brute force algorithm is based on
-	// counting the total number of edges in the index. However if the index
-	// contains a large number of shapes, this in itself might take too long.
-	// So instead we only count edges up to (maxBruteForceIndexSize() + 1)
-	// for the current target type (stored as indexNumEdgesLimit).
-	indexNumEdges      int
-	indexNumEdgesLimit int
-
-	// The distance beyond which we can safely ignore further candidate edges.
-	// (Candidates that are exactly at the limit are ignored; this is more
-	// efficient for UpdateMinDistance and should not affect clients since
-	// distance measurements have a small amount of error anyway.)
-	//
-	// Initially this is the same as the maximum distance specified by the user,
-	// but it can also be updated by the algorithm (see maybeAddResult).
-	distanceLimit distance
-
-	// The current set of results of the query.
-	results []EdgeQueryResult
-
-	// This field is true when duplicates must be avoided explicitly. This
-	// is achieved by maintaining a separate set keyed by (shapeID, edgeID)
-	// only, and checking whether each edge is in that set before computing the
-	// distance to it.
-	avoidDuplicates bool
-
-	// testedEdges tracks the set of shape and edges that have already been tested.
-	testedEdges map[ShapeEdgeID]uint32
-}
-
-// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the
-// closest edge(s) to a given Point, Edge, Cell, or geometry collection.
-//
-// You can find either the k closest edges, or all edges within a given
-// radius, or both (i.e., the k closest edges up to a given maximum radius).
-// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in
-// the options.
-//
-// By default *all* edges are returned, so you should always specify either
-// MaxResults or DistanceLimit options or both.
-//
-// Note that by default, distances are measured to the boundary and interior
-// of polygons. For example, if a point is inside a polygon then its distance
-// is zero. To change this behavior, set the IncludeInteriors option to false.
-//
-// If you only need to test whether the distance is above or below a given
-// threshold (e.g., 10 km), you can use the IsDistanceLess() method.  This is
-// much faster than actually calculating the distance with FindEdge,
-// since the implementation can stop as soon as it can prove that the minimum
-// distance is either above or below the threshold.
-func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
-	if opts == nil {
-		opts = NewClosestEdgeQueryOptions()
-	}
-	return &EdgeQuery{
-		testedEdges: make(map[ShapeEdgeID]uint32),
-		index:       index,
-		opts:        opts.common,
-	}
-}
-
-// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the
-// furthest edge(s) to a given Point, Edge, Cell, or geometry collection.
-//
-// The furthest edge is defined as the one which maximizes the
-// distance from any point on that edge to any point on the target geometry.
-//
-// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges
-// from a given Point:
-func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
-	if opts == nil {
-		opts = NewFurthestEdgeQueryOptions()
-	}
-	return &EdgeQuery{
-		testedEdges: make(map[ShapeEdgeID]uint32),
-		index:       index,
-		opts:        opts.common,
-	}
-}
-
-// FindEdges returns the edges for the given target that satisfy the current options.
-//
-// Note that if opts.IncludeInteriors is true, the results may include some
-// entries with edge_id == -1. This indicates that the target intersects
-// the indexed polygon with the given ShapeID.
-func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult {
-	return e.findEdges(target, e.opts)
-}
-
-// Distance reports the distance to the target. If the index or target is empty,
-// returns the EdgeQuery's maximal sentinel.
-//
-// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the
-// distance against a threshold value, since it is often much faster.
-func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle {
-	return e.findEdge(target, e.opts).Distance()
-}
-
-// IsDistanceLess reports if the distance to target is less than the given limit.
-//
-// This method is usually much faster than Distance(), since it is much
-// less work to determine whether the minimum distance is above or below a
-// threshold than it is to calculate the actual minimum distance.
-//
-// If you wish to check if the distance is less than or equal to the limit, use:
-//
-//	query.IsDistanceLess(target, limit.Successor())
-//
-func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool {
-	opts := e.opts
-	opts = opts.MaxResults(1).
-		DistanceLimit(limit).
-		MaxError(s1.StraightChordAngle)
-	return !e.findEdge(target, opts).IsEmpty()
-}
-
-// IsDistanceGreater reports if the distance to target is greater than limit.
-//
-// This method is usually much faster than Distance, since it is much
-// less work to determine whether the maximum distance is above or below a
-// threshold than it is to calculate the actual maximum distance.
-// If you wish to check if the distance is less than or equal to the limit, use:
-//
-//	query.IsDistanceGreater(target, limit.Predecessor())
-//
-func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool {
-	return e.IsDistanceLess(target, limit)
-}
-
-// IsConservativeDistanceLessOrEqual reports if the distance to target is less
-// or equal to the limit, where the limit has been expanded by the maximum error
-// for the distance calculation.
-//
-// For example, suppose that we want to test whether two geometries might
-// intersect each other after they are snapped together using Builder
-// (using the IdentitySnapFunction with a given "snap radius").  Since
-// Builder uses exact distance predicates (s2predicates), we need to
-// measure the distance between the two geometries conservatively.  If the
-// distance is definitely greater than "snap radius", then the geometries
-// are guaranteed to not intersect after snapping.
-func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
-	return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit)))
-}
-
-// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater
-// than or equal to the given limit with some small tolerance.
-func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
-	return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit)))
-}
-
-// findEdges returns the closest edges to the given target that satisfy the given options.
-//
-// Note that if opts.includeInteriors is true, the results may include some
-// entries with edgeID == -1. This indicates that the target intersects the
-// indexed polygon with the given shapeID.
-func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult {
-	e.findEdgesInternal(target, opts)
-	// TODO(roberts): Revisit this if there is a heap or other sorted and
-	// uniquing datastructure we can use instead of just a slice.
-	e.results = sortAndUniqueResults(e.results)
-	if len(e.results) > e.opts.maxResults {
-		e.results = e.results[:e.opts.maxResults]
-	}
-	return e.results
-}
-
-func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult {
-	if len(results) <= 1 {
-		return results
-	}
-	sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) })
-	j := 0
-	for i := 1; i < len(results); i++ {
-		if results[j] == results[i] {
-			continue
-		}
-		j++
-		results[j] = results[i]
-	}
-	return results[:j+1]
-}
-
-// findEdge is a convenience method that returns exactly one edge, and if no
-// edges satisfy the given search criteria, then a default Result is returned.
-//
-// This is primarily to ease the usage of a number of the methods in the DistanceTargets
-// and in EdgeQuery.
-func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult {
-	opts.MaxResults(1)
-	e.findEdges(target, opts)
-	if len(e.results) > 0 {
-		return e.results[0]
-	}
-
-	return newEdgeQueryResult(target)
-}
-
-// findEdgesInternal does the actual work for find edges that match the given options.
-func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) {
-	e.target = target
-	e.opts = opts
-
-	e.testedEdges = make(map[ShapeEdgeID]uint32)
-	e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit)
-	e.results = make([]EdgeQueryResult, 0)
-
-	if e.distanceLimit == target.distance().zero() {
-		return
-	}
-
-	if opts.includeInteriors {
-		shapeIDs := map[int32]struct{}{}
-		e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool {
-			shapeIDs[e.index.idForShape(containingShape)] = struct{}{}
-			return len(shapeIDs) < opts.maxResults
-		})
-		for shapeID := range shapeIDs {
-			e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1})
-		}
-
-		if e.distanceLimit == target.distance().zero() {
-			return
-		}
-	}
-
-	// If maxError > 0 and the target takes advantage of this, then we may
-	// need to adjust the distance estimates to ShapeIndex cells to ensure
-	// that they are always a lower bound on the true distance. For example,
-	// suppose max_distance == 100, maxError == 30, and we compute the distance
-	// to the target from some cell C0 as d(C0) == 80. Then because the target
-	// takes advantage of maxError, the true distance could be as low as 50.
-	// In order not to miss edges contained by such cells, we need to subtract
-	// maxError from the distance estimates. This behavior is controlled by
-	// the useConservativeCellDistance flag.
-	//
-	// However there is one important case where this adjustment is not
-	// necessary, namely when distanceLimit < maxError, This is because
-	// maxError only affects the algorithm once at least maxEdges edges
-	// have been found that satisfy the given distance limit. At that point,
-	// maxError is subtracted from distanceLimit in order to ensure that
-	// any further matches are closer by at least that amount. But when
-	// distanceLimit < maxError, this reduces the distance limit to 0,
-	// i.e. all remaining candidate cells and edges can safely be discarded.
-	// (This is how IsDistanceLess() and friends are implemented.)
-	targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() &&
-		e.target.setMaxError(opts.maxError)
-
-	// Note that we can't compare maxError and distanceLimit directly
-	// because one is a Delta and one is a Distance. Instead we subtract them.
-	e.useConservativeCellDistance = targetUsesMaxError &&
-		(e.distanceLimit == target.distance().infinity() ||
-			target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError))))
-
-	// Use the brute force algorithm if the index is small enough. To avoid
-	// spending too much time counting edges when there are many shapes, we stop
-	// counting once there are too many edges. We may need to recount the edges
-	// if we later see a target with a larger brute force edge threshold.
-	minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1
-	if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit {
-		e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges)
-		e.indexNumEdgesLimit = minOptimizedEdges
-	}
-
-	if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges {
-		// The brute force algorithm already considers each edge exactly once.
-		e.avoidDuplicates = false
-		e.findEdgesBruteForce()
-	} else {
-		// If the target takes advantage of maxError then we need to avoid
-		// duplicate edges explicitly. (Otherwise it happens automatically.)
-		e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1
-
-		// TODO(roberts): Uncomment when optimized is completed.
-		e.findEdgesBruteForce()
-		//e.findEdgesOptimized()
-	}
-}
-
-func (e *EdgeQuery) addResult(r EdgeQueryResult) {
-	e.results = append(e.results, r)
-	if e.opts.maxResults == 1 {
-		// Optimization for the common case where only the closest edge is wanted.
-		e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError))
-	}
-	// TODO(roberts): Add the other if/else cases when a different data structure
-	// is used for the results.
-}
-
-func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) {
-	if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok {
-		return
-	}
-	edge := shape.Edge(int(edgeID))
-	dist := e.distanceLimit
-
-	if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok {
-		e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID})
-	}
-}
-
-func (e *EdgeQuery) findEdgesBruteForce() {
-	// Range over all shapes in the index. Does order matter here? if so
-	// switch to for i = 0 .. n?
-	for _, shape := range e.index.shapes {
-		// TODO(roberts): can this happen if we are only ranging over current entries?
-		if shape == nil {
-			continue
-		}
-		for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ {
-			e.maybeAddResult(shape, edgeID)
-		}
-	}
-}
-
-// TODO(roberts): Remaining pieces
-// Add clear/reset/re-init method to empty out the state of the query.
-// findEdgesOptimized and related methods.
-// GetEdge
-// Project

+ 0 - 167
vendor/github.com/golang/geo/s2/edge_tessellator.go

@@ -1,167 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	// MinTessellationTolerance is the minimum supported tolerance (which
-	// corresponds to a distance less than 1 micrometer on the Earth's
-	// surface, but is still much larger than the expected projection and
-	// interpolation errors).
-	MinTessellationTolerance s1.Angle = 1e-13
-)
-
-// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into
-// a chain of spherical geodesic edges such that the maximum distance between
-// the original edge and the geodesic edge chain is at most the requested
-// tolerance. Similarly, it can convert a spherical geodesic edge into a chain
-// of edges in a given 2D projection such that the maximum distance between the
-// geodesic edge and the chain of projected edges is at most the requested tolerance.
-//
-//   Method      | Input                  | Output
-//   ------------|------------------------|-----------------------
-//   Projected   | S2 geodesics           | Planar projected edges
-//   Unprojected | Planar projected edges | S2 geodesics
-type EdgeTessellator struct {
-	projection   Projection
-	tolerance    s1.ChordAngle
-	wrapDistance r2.Point
-}
-
-// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance.
-func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator {
-	return &EdgeTessellator{
-		projection:   p,
-		tolerance:    s1.ChordAngleFromAngle(maxAngle(tolerance, MinTessellationTolerance)),
-		wrapDistance: p.WrapDistance(),
-	}
-}
-
-// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges
-// in the given projection and returns the corresponding vertices.
-//
-// If the given projection has one or more coordinate axes that wrap, then
-// every vertex's coordinates will be as close as possible to the previous
-// vertex's coordinates. Note that this may yield vertices whose
-// coordinates are outside the usual range. For example, tessellating the
-// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190).
-func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point {
-	pa := e.projection.Project(a)
-	if len(vertices) == 0 {
-		vertices = []r2.Point{pa}
-	} else {
-		pa = e.wrapDestination(vertices[len(vertices)-1], pa)
-	}
-
-	pb := e.wrapDestination(pa, e.projection.Project(b))
-	return e.appendProjected(pa, a, pb, b, vertices)
-}
-
-// appendProjected splits a geodesic edge AB as necessary and returns the
-// projected vertices appended to the given vertices.
-//
-// The maximum recursion depth is (math.Pi / MinTessellationTolerance) < 45
-func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []r2.Point) []r2.Point {
-	// It's impossible to robustly test whether a projected edge is close enough
-	// to a geodesic edge without knowing the details of the projection
-	// function, but the following heuristic works well for a wide range of map
-	// projections. The idea is simply to test whether the midpoint of the
-	// projected edge is close enough to the midpoint of the geodesic edge.
-	//
-	// This measures the distance between the two edges by treating them as
-	// parametric curves rather than geometric ones. The problem with
-	// measuring, say, the minimum distance from the projected midpoint to the
-	// geodesic edge is that this is a lower bound on the value we want, because
-	// the maximum separation between the two curves is generally not attained
-	// at the midpoint of the projected edge. The distance between the curve
-	// midpoints is at least an upper bound on the distance from either midpoint
-	// to opposite curve. It's not necessarily an upper bound on the maximum
-	// distance between the two curves, but it is a powerful requirement because
-	// it demands that the two curves stay parametrically close together. This
-	// turns out to be much more robust with respect for projections with
-	// singularities (e.g., the North and South poles in the rectangular and
-	// Mercator projections) because the curve parameterization speed changes
-	// rapidly near such singularities.
-	mid := Point{a.Add(b.Vector).Normalize()}
-	testMid := e.projection.Unproject(e.projection.Interpolate(0.5, pa, pb))
-
-	if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
-		return append(vertices, pb)
-	}
-
-	pmid := e.wrapDestination(pa, e.projection.Project(mid))
-	vertices = e.appendProjected(pa, a, pmid, mid, vertices)
-	return e.appendProjected(pmid, mid, pb, b, vertices)
-}
-
-// AppendUnprojected converts the planar edge AB in the given projection to a chain of
-// spherical geodesic edges and returns the vertices.
-//
-// Note that to construct a Loop, you must eliminate the duplicate first and last
-// vertex. Note also that if the given projection involves coordinate wrapping
-// (e.g. across the 180 degree meridian) then the first and last vertices may not
-// be exactly the same.
-func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point {
-	pb2 := e.wrapDestination(pa, pb)
-	a := e.projection.Unproject(pa)
-	b := e.projection.Unproject(pb)
-
-	if len(vertices) == 0 {
-		vertices = []Point{a}
-	}
-
-	// Note that coordinate wrapping can create a small amount of error. For
-	// example in the edge chain "0:-175, 0:179, 0:-177", the first edge is
-	// transformed into "0:-175, 0:-181" while the second is transformed into
-	// "0:179, 0:183". The two coordinate pairs for the middle vertex
-	// ("0:-181" and "0:179") may not yield exactly the same S2Point.
-	return e.appendUnprojected(pa, a, pb2, b, vertices)
-}
-
-// appendUnprojected interpolates a projected edge and appends the corresponding
-// points on the sphere.
-func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []Point) []Point {
-	pmid := e.projection.Interpolate(0.5, pa, pb)
-	mid := e.projection.Unproject(pmid)
-	testMid := Point{a.Add(b.Vector).Normalize()}
-
-	if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
-		return append(vertices, b)
-	}
-
-	vertices = e.appendUnprojected(pa, a, pmid, mid, vertices)
-	return e.appendUnprojected(pmid, mid, pb, b, vertices)
-}
-
-// wrapDestination returns the coordinates of the edge destination wrapped if
-// necessary to obtain the shortest edge.
-func (e *EdgeTessellator) wrapDestination(pa, pb r2.Point) r2.Point {
-	x := pb.X
-	y := pb.Y
-	// The code below ensures that pb is unmodified unless wrapping is required.
-	if e.wrapDistance.X > 0 && math.Abs(x-pa.X) > 0.5*e.wrapDistance.X {
-		x = pa.X + math.Remainder(x-pa.X, e.wrapDistance.X)
-	}
-	if e.wrapDistance.Y > 0 && math.Abs(y-pa.Y) > 0.5*e.wrapDistance.Y {
-		y = pa.Y + math.Remainder(y-pa.Y, e.wrapDistance.Y)
-	}
-	return r2.Point{x, y}
-}

+ 0 - 237
vendor/github.com/golang/geo/s2/encode.go

@@ -1,237 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"encoding/binary"
-	"io"
-)
-
-const (
-	// encodingVersion is the current version of the encoding
-	// format that is compatible with C++ and other S2 libraries.
-	encodingVersion = int8(1)
-
-	// encodingCompressedVersion is the current version of the
-	// compressed format.
-	encodingCompressedVersion = int8(4)
-)
-
-// encoder handles the specifics of encoding for S2 types.
-type encoder struct {
-	w   io.Writer // the real writer passed to Encode
-	err error
-}
-
-func (e *encoder) writeUvarint(x uint64) {
-	if e.err != nil {
-		return
-	}
-	var buf [binary.MaxVarintLen64]byte
-	n := binary.PutUvarint(buf[:], x)
-	_, e.err = e.w.Write(buf[:n])
-}
-
-func (e *encoder) writeBool(x bool) {
-	if e.err != nil {
-		return
-	}
-	var val int8
-	if x {
-		val = 1
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, val)
-}
-
-func (e *encoder) writeInt8(x int8) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeInt16(x int16) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeInt32(x int32) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeInt64(x int64) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeUint8(x uint8) {
-	if e.err != nil {
-		return
-	}
-	_, e.err = e.w.Write([]byte{x})
-}
-
-func (e *encoder) writeUint32(x uint32) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeUint64(x uint64) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeFloat32(x float32) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-func (e *encoder) writeFloat64(x float64) {
-	if e.err != nil {
-		return
-	}
-	e.err = binary.Write(e.w, binary.LittleEndian, x)
-}
-
-type byteReader interface {
-	io.Reader
-	io.ByteReader
-}
-
-// byteReaderAdapter embellishes an io.Reader with a ReadByte method,
-// so that it implements the io.ByteReader interface.
-type byteReaderAdapter struct {
-	io.Reader
-}
-
-func (b byteReaderAdapter) ReadByte() (byte, error) {
-	buf := []byte{0}
-	_, err := io.ReadFull(b, buf)
-	return buf[0], err
-}
-
-func asByteReader(r io.Reader) byteReader {
-	if br, ok := r.(byteReader); ok {
-		return br
-	}
-	return byteReaderAdapter{r}
-}
-
-type decoder struct {
-	r   byteReader // the real reader passed to Decode
-	err error
-}
-
-func (d *decoder) readBool() (x bool) {
-	if d.err != nil {
-		return
-	}
-	var val int8
-	d.err = binary.Read(d.r, binary.LittleEndian, &val)
-	return val == 1
-}
-
-func (d *decoder) readInt8() (x int8) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readInt16() (x int16) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readInt32() (x int32) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readInt64() (x int64) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readUint8() (x uint8) {
-	if d.err != nil {
-		return
-	}
-	x, d.err = d.r.ReadByte()
-	return
-}
-
-func (d *decoder) readUint32() (x uint32) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readUint64() (x uint64) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readFloat32() (x float32) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readFloat64() (x float64) {
-	if d.err != nil {
-		return
-	}
-	d.err = binary.Read(d.r, binary.LittleEndian, &x)
-	return
-}
-
-func (d *decoder) readUvarint() (x uint64) {
-	if d.err != nil {
-		return
-	}
-	x, d.err = binary.ReadUvarint(d.r)
-	return
-}

+ 0 - 143
vendor/github.com/golang/geo/s2/interleave.go

@@ -1,143 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-/*
-The lookup table below can convert a sequence of interleaved 8 bits into
-non-interleaved 4 bits. The table can convert both odd and even bits at the
-same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6),
-while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7).
-
-The lookup table below was generated using the following python code:
-
-	def deinterleave(bits):
-	  if bits == 0: return 0
-	  if bits < 4: return 1
-	  return deinterleave(bits / 4) * 2 + deinterleave(bits & 3)
-
-	for i in range(256): print "0x%x," % deinterleave(i),
-*/
-var deinterleaveLookup = [256]uint32{
-	0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3,
-	0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3,
-	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
-	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
-	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
-	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
-	0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
-	0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
-
-	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
-	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-
-	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
-	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-
-	0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
-	0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-	0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
-	0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
-}
-
-// deinterleaveUint32 decodes the interleaved values.
-func deinterleaveUint32(code uint64) (uint32, uint32) {
-	x := (deinterleaveLookup[code&0x55]) |
-		(deinterleaveLookup[(code>>8)&0x55] << 4) |
-		(deinterleaveLookup[(code>>16)&0x55] << 8) |
-		(deinterleaveLookup[(code>>24)&0x55] << 12) |
-		(deinterleaveLookup[(code>>32)&0x55] << 16) |
-		(deinterleaveLookup[(code>>40)&0x55] << 20) |
-		(deinterleaveLookup[(code>>48)&0x55] << 24) |
-		(deinterleaveLookup[(code>>56)&0x55] << 28)
-	y := (deinterleaveLookup[code&0xaa]) |
-		(deinterleaveLookup[(code>>8)&0xaa] << 4) |
-		(deinterleaveLookup[(code>>16)&0xaa] << 8) |
-		(deinterleaveLookup[(code>>24)&0xaa] << 12) |
-		(deinterleaveLookup[(code>>32)&0xaa] << 16) |
-		(deinterleaveLookup[(code>>40)&0xaa] << 20) |
-		(deinterleaveLookup[(code>>48)&0xaa] << 24) |
-		(deinterleaveLookup[(code>>56)&0xaa] << 28)
-	return x, y
-}
-
-var interleaveLookup = [256]uint64{
-	0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
-	0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
-	0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
-	0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
-	0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
-	0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
-	0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
-	0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
-
-	0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
-	0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
-	0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
-	0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
-	0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
-	0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
-	0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
-	0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
-
-	0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
-	0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
-	0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
-	0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
-	0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
-	0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
-	0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
-	0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
-
-	0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
-	0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
-	0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
-	0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
-	0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
-	0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
-	0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
-	0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555,
-}
-
-// interleaveUint32 interleaves the given arguments into the return value.
-//
-// The 0-bit in val0 will be the 0-bit in the return value.
-// The 0-bit in val1 will be the 1-bit in the return value.
-// The 1-bit of val0 will be the 2-bit in the return value, and so on.
-func interleaveUint32(x, y uint32) uint64 {
-	return (interleaveLookup[x&0xff]) |
-		(interleaveLookup[(x>>8)&0xff] << 16) |
-		(interleaveLookup[(x>>16)&0xff] << 32) |
-		(interleaveLookup[x>>24] << 48) |
-		(interleaveLookup[y&0xff] << 1) |
-		(interleaveLookup[(y>>8)&0xff] << 17) |
-		(interleaveLookup[(y>>16)&0xff] << 33) |
-		(interleaveLookup[y>>24] << 49)
-}

+ 0 - 101
vendor/github.com/golang/geo/s2/latlng.go

@@ -1,101 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"math"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	northPoleLat = s1.Angle(math.Pi/2) * s1.Radian
-	southPoleLat = -northPoleLat
-)
-
-// LatLng represents a point on the unit sphere as a pair of angles.
-type LatLng struct {
-	Lat, Lng s1.Angle
-}
-
-// LatLngFromDegrees returns a LatLng for the coordinates given in degrees.
-func LatLngFromDegrees(lat, lng float64) LatLng {
-	return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree}
-}
-
-// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π].
-func (ll LatLng) IsValid() bool {
-	return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi
-}
-
-// Normalized returns the normalized version of the LatLng,
-// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π].
-func (ll LatLng) Normalized() LatLng {
-	lat := ll.Lat
-	if lat > northPoleLat {
-		lat = northPoleLat
-	} else if lat < southPoleLat {
-		lat = southPoleLat
-	}
-	lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian
-	return LatLng{lat, lng}
-}
-
-func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) }
-
-// Distance returns the angle between two LatLngs.
-func (ll LatLng) Distance(ll2 LatLng) s1.Angle {
-	// Haversine formula, as used in C++ S2LatLng::GetDistance.
-	lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians()
-	lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians()
-	dlat := math.Sin(0.5 * (lat2 - lat1))
-	dlng := math.Sin(0.5 * (lng2 - lng1))
-	x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2)
-	return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian
-}
-
-// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude
-// functions. Let's see if that's really necessary before exposing the same functionality.
-
-func latitude(p Point) s1.Angle {
-	return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian
-}
-
-func longitude(p Point) s1.Angle {
-	return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian
-}
-
-// PointFromLatLng returns an Point for the given LatLng.
-// The maximum error in the result is 1.5 * dblEpsilon. (This does not
-// include the error of converting degrees, E5, E6, or E7 into radians.)
-func PointFromLatLng(ll LatLng) Point {
-	phi := ll.Lat.Radians()
-	theta := ll.Lng.Radians()
-	cosphi := math.Cos(phi)
-	return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}}
-}
-
-// LatLngFromPoint returns an LatLng for a given Point.
-func LatLngFromPoint(p Point) LatLng {
-	return LatLng{latitude(p), longitude(p)}
-}
-
-// ApproxEqual reports whether the latitude and longitude of the two LatLngs
-// are the same up to a small tolerance.
-func (ll LatLng) ApproxEqual(other LatLng) bool {
-	return ll.Lat.ApproxEqual(other.Lat) && ll.Lng.ApproxEqual(other.Lng)
-}

+ 0 - 175
vendor/github.com/golang/geo/s2/lexicon.go

@@ -1,175 +0,0 @@
-// Copyright 2020 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"encoding/binary"
-	"hash/adler32"
-	"math"
-	"sort"
-)
-
-// TODO(roberts): If any of these are worth making public, change the
-// method signatures and type names.
-
-// emptySetID represents the last ID that will ever be generated.
-// (Non-negative IDs are reserved for singleton sets.)
-var emptySetID = int32(math.MinInt32)
-
-// idSetLexicon compactly represents a set of non-negative
-// integers such as array indices ("ID sets"). It is especially suitable when
-// either (1) there are many duplicate sets, or (2) there are many singleton
-// or empty sets. See also sequenceLexicon.
-//
-// Each distinct ID set is mapped to a 32-bit integer. Empty and singleton
-// sets take up no additional space; the set itself is represented
-// by the unique ID assigned to the set. Duplicate sets are automatically
-// eliminated. Note also that ID sets are referred to using 32-bit integers
-// rather than pointers.
-type idSetLexicon struct {
-	idSets *sequenceLexicon
-}
-
-func newIDSetLexicon() *idSetLexicon {
-	return &idSetLexicon{
-		idSets: newSequenceLexicon(),
-	}
-}
-
-// add adds the given set of integers to the lexicon if it is not already
-// present, and return the unique ID for this set. The values are automatically
-// sorted and duplicates are removed.
-//
-// The primary difference between this and sequenceLexicon are:
-// 1. Empty and singleton sets are represented implicitly; they use no space.
-// 2. Sets are represented rather than sequences; the ordering of values is
-//    not important and duplicates are removed.
-// 3. The values must be 32-bit non-negative integers only.
-func (l *idSetLexicon) add(ids ...int32) int32 {
-	// Empty sets have a special ID chosen not to conflict with other IDs.
-	if len(ids) == 0 {
-		return emptySetID
-	}
-
-	// Singleton sets are represented by their element.
-	if len(ids) == 1 {
-		return ids[0]
-	}
-
-	// Canonicalize the set by sorting and removing duplicates.
-	//
-	// Creates a new slice in order to not alter the supplied values.
-	set := uniqueInt32s(ids)
-
-	// Non-singleton sets are represented by the bitwise complement of the ID
-	// returned by the sequenceLexicon
-	return ^l.idSets.add(set)
-}
-
-// idSet returns the set of integers corresponding to an ID returned by add.
-func (l *idSetLexicon) idSet(setID int32) []int32 {
-	if setID >= 0 {
-		return []int32{setID}
-	}
-	if setID == emptySetID {
-		return []int32{}
-	}
-
-	return l.idSets.sequence(^setID)
-}
-
-func (l *idSetLexicon) clear() {
-	l.idSets.clear()
-}
-
-// sequenceLexicon compactly represents a sequence of values (e.g., tuples).
-// It automatically eliminates duplicates slices, and maps the remaining
-// sequences to sequentially increasing integer IDs. See also idSetLexicon.
-//
-// Each distinct sequence is mapped to a 32-bit integer.
-type sequenceLexicon struct {
-	values []int32
-	begins []uint32
-
-	// idSet is a mapping of a sequence hash to sequence index in the lexicon.
-	idSet map[uint32]int32
-}
-
-func newSequenceLexicon() *sequenceLexicon {
-	return &sequenceLexicon{
-		begins: []uint32{0},
-		idSet:  make(map[uint32]int32),
-	}
-}
-
-// clears all data from the lexicon.
-func (l *sequenceLexicon) clear() {
-	l.values = nil
-	l.begins = []uint32{0}
-	l.idSet = make(map[uint32]int32)
-}
-
-// add adds the given value to the lexicon if it is not already present, and
-// returns its ID. IDs are assigned sequentially starting from zero.
-func (l *sequenceLexicon) add(ids []int32) int32 {
-	if id, ok := l.idSet[hashSet(ids)]; ok {
-		return id
-	}
-	for _, v := range ids {
-		l.values = append(l.values, v)
-	}
-	l.begins = append(l.begins, uint32(len(l.values)))
-
-	id := int32(len(l.begins)) - 2
-	l.idSet[hashSet(ids)] = id
-
-	return id
-}
-
-// sequence returns the original sequence of values for the given ID.
-func (l *sequenceLexicon) sequence(id int32) []int32 {
-	return l.values[l.begins[id]:l.begins[id+1]]
-}
-
-// size reports the number of value sequences in the lexicon.
-func (l *sequenceLexicon) size() int {
-	// Subtract one because the list of begins starts out with the first element set to 0.
-	return len(l.begins) - 1
-}
-
-// hash returns a hash of this sequence of int32s.
-func hashSet(s []int32) uint32 {
-	// TODO(roberts): We just need a way to nicely hash all the values down to
-	// a 32-bit value. To ensure no unnecessary dependencies we use the core
-	// library types available to do this. Is there a better option?
-	a := adler32.New()
-	binary.Write(a, binary.LittleEndian, s)
-	return a.Sum32()
-}
-
-// uniqueInt32s returns the sorted and uniqued set of int32s from the input.
-func uniqueInt32s(in []int32) []int32 {
-	var vals []int32
-	m := make(map[int32]bool)
-	for _, i := range in {
-		if m[i] {
-			continue
-		}
-		m[i] = true
-		vals = append(vals, i)
-	}
-	sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] })
-	return vals
-}

+ 0 - 1816
vendor/github.com/golang/geo/s2/loop.go

@@ -1,1816 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// Loop represents a simple spherical polygon. It consists of a sequence
-// of vertices where the first vertex is implicitly connected to the
-// last. All loops are defined to have a CCW orientation, i.e. the interior of
-// the loop is on the left side of the edges. This implies that a clockwise
-// loop enclosing a small area is interpreted to be a CCW loop enclosing a
-// very large area.
-//
-// Loops are not allowed to have any duplicate vertices (whether adjacent or
-// not).  Non-adjacent edges are not allowed to intersect, and furthermore edges
-// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be
-// antipodal). Loops must have at least 3 vertices (except for the "empty" and
-// "full" loops discussed below).
-//
-// There are two special loops: the "empty" loop contains no points and the
-// "full" loop contains all points. These loops do not have any edges, but to
-// preserve the invariant that every loop can be represented as a vertex
-// chain, they are defined as having exactly one vertex each (see EmptyLoop
-// and FullLoop).
-type Loop struct {
-	vertices []Point
-
-	// originInside keeps a precomputed value whether this loop contains the origin
-	// versus computing from the set of vertices every time.
-	originInside bool
-
-	// depth is the nesting depth of this Loop if it is contained by a Polygon
-	// or other shape and is used to determine if this loop represents a hole
-	// or a filled in portion.
-	depth int
-
-	// bound is a conservative bound on all points contained by this loop.
-	// If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
-	bound Rect
-
-	// Since bound is not exact, it is possible that a loop A contains
-	// another loop B whose bounds are slightly larger. subregionBound
-	// has been expanded sufficiently to account for this error, i.e.
-	// if A.Contains(B), then A.subregionBound.Contains(B.bound).
-	subregionBound Rect
-
-	// index is the spatial index for this Loop.
-	index *ShapeIndex
-}
-
-// LoopFromPoints constructs a loop from the given points.
-func LoopFromPoints(pts []Point) *Loop {
-	l := &Loop{
-		vertices: pts,
-	}
-
-	l.initOriginAndBound()
-	return l
-}
-
-// LoopFromCell constructs a loop corresponding to the given cell.
-//
-// Note that the loop and cell *do not* contain exactly the same set of
-// points, because Loop and Cell have slightly different definitions of
-// point containment. For example, a Cell vertex is contained by all
-// four neighboring Cells, but it is contained by exactly one of four
-// Loops constructed from those cells. As another example, the cell
-// coverings of cell and LoopFromCell(cell) will be different, because the
-// loop contains points on its boundary that actually belong to other cells
-// (i.e., the covering will include a layer of neighboring cells).
-func LoopFromCell(c Cell) *Loop {
-	l := &Loop{
-		vertices: []Point{
-			c.Vertex(0),
-			c.Vertex(1),
-			c.Vertex(2),
-			c.Vertex(3),
-		},
-	}
-
-	l.initOriginAndBound()
-	return l
-}
-
-// These two points are used for the special Empty and Full loops.
-var (
-	emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}}
-	fullLoopPoint  = Point{r3.Vector{X: 0, Y: 0, Z: -1}}
-)
-
-// EmptyLoop returns a special "empty" loop.
-func EmptyLoop() *Loop {
-	return LoopFromPoints([]Point{emptyLoopPoint})
-}
-
-// FullLoop returns a special "full" loop.
-func FullLoop() *Loop {
-	return LoopFromPoints([]Point{fullLoopPoint})
-}
-
-// initOriginAndBound sets the origin containment for the given point and then calls
-// the initialization for the bounds objects and the internal index.
-func (l *Loop) initOriginAndBound() {
-	if len(l.vertices) < 3 {
-		// Check for the special "empty" and "full" loops (which have one vertex).
-		if !l.isEmptyOrFull() {
-			l.originInside = false
-			return
-		}
-
-		// This is the special empty or full loop, so the origin depends on if
-		// the vertex is in the southern hemisphere or not.
-		l.originInside = l.vertices[0].Z < 0
-	} else {
-		// Point containment testing is done by counting edge crossings starting
-		// at a fixed point on the sphere (OriginPoint). We need to know whether
-		// the reference point (OriginPoint) is inside or outside the loop before
-		// we can construct the ShapeIndex. We do this by first guessing that
-		// it is outside, and then seeing whether we get the correct containment
-		// result for vertex 1. If the result is incorrect, the origin must be
-		// inside the loop.
-		//
-		// A loop with consecutive vertices A,B,C contains vertex B if and only if
-		// the fixed vector R = B.Ortho is contained by the wedge ABC. The
-		// wedge is closed at A and open at C, i.e. the point B is inside the loop
-		// if A = R but not if C = R. This convention is required for compatibility
-		// with VertexCrossing. (Note that we can't use OriginPoint
-		// as the fixed vector because of the possibility that B == OriginPoint.)
-		l.originInside = false
-		v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1])
-		if v1Inside != l.ContainsPoint(l.vertices[1]) {
-			l.originInside = true
-		}
-	}
-
-	// We *must* call initBound before initializing the index, because
-	// initBound calls ContainsPoint which does a bounds check before using
-	// the index.
-	l.initBound()
-
-	// Create a new index and add us to it.
-	l.index = NewShapeIndex()
-	l.index.Add(l)
-}
-
-// initBound sets up the approximate bounding Rects for this loop.
-func (l *Loop) initBound() {
-	// Check for the special "empty" and "full" loops.
-	if l.isEmptyOrFull() {
-		if l.IsEmpty() {
-			l.bound = EmptyRect()
-		} else {
-			l.bound = FullRect()
-		}
-		l.subregionBound = l.bound
-		return
-	}
-
-	// The bounding rectangle of a loop is not necessarily the same as the
-	// bounding rectangle of its vertices. First, the maximal latitude may be
-	// attained along the interior of an edge. Second, the loop may wrap
-	// entirely around the sphere (e.g. a loop that defines two revolutions of a
-	// candy-cane stripe). Third, the loop may include one or both poles.
-	// Note that a small clockwise loop near the equator contains both poles.
-	bounder := NewRectBounder()
-	for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice
-		bounder.AddPoint(l.Vertex(i))
-	}
-	b := bounder.RectBound()
-
-	if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) {
-		b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()}
-	}
-	// If a loop contains the south pole, then either it wraps entirely
-	// around the sphere (full longitude range), or it also contains the
-	// north pole in which case b.Lng.IsFull() due to the test above.
-	// Either way, we only need to do the south pole containment test if
-	// b.Lng.IsFull().
-	if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) {
-		b.Lat.Lo = -math.Pi / 2
-	}
-	l.bound = b
-	l.subregionBound = ExpandForSubregions(l.bound)
-}
-
-// Validate checks whether this is a valid loop.
-func (l *Loop) Validate() error {
-	if err := l.findValidationErrorNoIndex(); err != nil {
-		return err
-	}
-
-	// Check for intersections between non-adjacent edges (including at vertices)
-	// TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this.
-	// return findAnyCrossing(l.index)
-
-	return nil
-}
-
-// findValidationErrorNoIndex reports whether this is not a valid loop, but
-// skips checks that would require a ShapeIndex to be built for the loop. This
-// is primarily used by Polygon to do validation so it doesn't trigger the
-// creation of unneeded ShapeIndices.
-func (l *Loop) findValidationErrorNoIndex() error {
-	// All vertices must be unit length.
-	for i, v := range l.vertices {
-		if !v.IsUnit() {
-			return fmt.Errorf("vertex %d is not unit length", i)
-		}
-	}
-
-	// Loops must have at least 3 vertices (except for empty and full).
-	if len(l.vertices) < 3 {
-		if l.isEmptyOrFull() {
-			return nil // Skip remaining tests.
-		}
-		return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices")
-	}
-
-	// Loops are not allowed to have any duplicate vertices or edge crossings.
-	// We split this check into two parts. First we check that no edge is
-	// degenerate (identical endpoints). Then we check that there are no
-	// intersections between non-adjacent edges (including at vertices). The
-	// second check needs the ShapeIndex, so it does not fall within the scope
-	// of this method.
-	for i, v := range l.vertices {
-		if v == l.Vertex(i+1) {
-			return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i)
-		}
-
-		// Antipodal vertices are not allowed.
-		if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other {
-			return fmt.Errorf("vertices %d and %d are antipodal", i,
-				(i+1)%len(l.vertices))
-		}
-	}
-
-	return nil
-}
-
-// Contains reports whether the region contained by this loop is a superset of the
-// region contained by the given other loop.
-func (l *Loop) Contains(o *Loop) bool {
-	// For a loop A to contain the loop B, all of the following must
-	// be true:
-	//
-	//  (1) There are no edge crossings between A and B except at vertices.
-	//
-	//  (2) At every vertex that is shared between A and B, the local edge
-	//      ordering implies that A contains B.
-	//
-	//  (3) If there are no shared vertices, then A must contain a vertex of B
-	//      and B must not contain a vertex of A. (An arbitrary vertex may be
-	//      chosen in each case.)
-	//
-	// The second part of (3) is necessary to detect the case of two loops whose
-	// union is the entire sphere, i.e. two loops that contains each other's
-	// boundaries but not each other's interiors.
-	if !l.subregionBound.Contains(o.bound) {
-		return false
-	}
-
-	// Special cases to handle either loop being empty or full.
-	if l.isEmptyOrFull() || o.isEmptyOrFull() {
-		return l.IsFull() || o.IsEmpty()
-	}
-
-	// Check whether there are any edge crossings, and also check the loop
-	// relationship at any shared vertices.
-	relation := &containsRelation{}
-	if hasCrossingRelation(l, o, relation) {
-		return false
-	}
-
-	// There are no crossings, and if there are any shared vertices then A
-	// contains B locally at each shared vertex.
-	if relation.foundSharedVertex {
-		return true
-	}
-
-	// Since there are no edge intersections or shared vertices, we just need to
-	// test condition (3) above. We can skip this test if we discovered that A
-	// contains at least one point of B while checking for edge crossings.
-	if !l.ContainsPoint(o.Vertex(0)) {
-		return false
-	}
-
-	// We still need to check whether (A union B) is the entire sphere.
-	// Normally this check is very cheap due to the bounding box precondition.
-	if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) &&
-		o.ContainsPoint(l.Vertex(0)) {
-		return false
-	}
-	return true
-}
-
-// Intersects reports whether the region contained by this loop intersects the region
-// contained by the other loop.
-func (l *Loop) Intersects(o *Loop) bool {
-	// Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B).
-	//
-	// This code is similar to Contains, but is optimized for the case
-	// where both loops enclose less than half of the sphere.
-	if !l.bound.Intersects(o.bound) {
-		return false
-	}
-
-	// Check whether there are any edge crossings, and also check the loop
-	// relationship at any shared vertices.
-	relation := &intersectsRelation{}
-	if hasCrossingRelation(l, o, relation) {
-		return true
-	}
-	if relation.foundSharedVertex {
-		return false
-	}
-
-	// Since there are no edge intersections or shared vertices, the loops
-	// intersect only if A contains B, B contains A, or the two loops contain
-	// each other's boundaries.  These checks are usually cheap because of the
-	// bounding box preconditions.  Note that neither loop is empty (because of
-	// the bounding box check above), so it is safe to access vertex(0).
-
-	// Check whether A contains B, or A and B contain each other's boundaries.
-	// (Note that A contains all the vertices of B in either case.)
-	if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() {
-		if l.ContainsPoint(o.Vertex(0)) {
-			return true
-		}
-	}
-	// Check whether B contains A.
-	if o.subregionBound.Contains(l.bound) {
-		if o.ContainsPoint(l.Vertex(0)) {
-			return true
-		}
-	}
-	return false
-}
-
-// Equal reports whether two loops have the same vertices in the same linear order
-// (i.e., cyclic rotations are not allowed).
-func (l *Loop) Equal(other *Loop) bool {
-	if len(l.vertices) != len(other.vertices) {
-		return false
-	}
-
-	for i, v := range l.vertices {
-		if v != other.Vertex(i) {
-			return false
-		}
-	}
-	return true
-}
-
-// BoundaryEqual reports whether the two loops have the same boundary. This is
-// true if and only if the loops have the same vertices in the same cyclic order
-// (i.e., the vertices may be cyclically rotated). The empty and full loops are
-// considered to have different boundaries.
-func (l *Loop) BoundaryEqual(o *Loop) bool {
-	if len(l.vertices) != len(o.vertices) {
-		return false
-	}
-
-	// Special case to handle empty or full loops.  Since they have the same
-	// number of vertices, if one loop is empty/full then so is the other.
-	if l.isEmptyOrFull() {
-		return l.IsEmpty() == o.IsEmpty()
-	}
-
-	// Loop through the vertices to find the first of ours that matches the
-	// starting vertex of the other loop. Use that offset to then 'align' the
-	// vertices for comparison.
-	for offset, vertex := range l.vertices {
-		if vertex == o.Vertex(0) {
-			// There is at most one starting offset since loop vertices are unique.
-			for i := 0; i < len(l.vertices); i++ {
-				if l.Vertex(i+offset) != o.Vertex(i) {
-					return false
-				}
-			}
-			return true
-		}
-	}
-	return false
-}
-
-// compareBoundary returns +1 if this loop contains the boundary of the other loop,
-// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two
-// loops cross. Shared edges are handled as follows:
-//
-//   If XY is a shared edge, define Reversed(XY) to be true if XY
-//     appears in opposite directions in both loops.
-//   Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole.
-//   (Intuitively, this checks whether this loop contains a vanishingly small region
-//   extending from the boundary of the other toward the interior of the polygon to
-//   which the other belongs.)
-//
-// This function is used for testing containment and intersection of
-// multi-loop polygons. Note that this method is not symmetric, since the
-// result depends on the direction of this loop but not on the direction of
-// the other loop (in the absence of shared edges).
-//
-// This requires that neither loop is empty, and if other loop IsFull, then it must not
-// be a hole.
-func (l *Loop) compareBoundary(o *Loop) int {
-	// The bounds must intersect for containment or crossing.
-	if !l.bound.Intersects(o.bound) {
-		return -1
-	}
-
-	// Full loops are handled as though the loop surrounded the entire sphere.
-	if l.IsFull() {
-		return 1
-	}
-	if o.IsFull() {
-		return -1
-	}
-
-	// Check whether there are any edge crossings, and also check the loop
-	// relationship at any shared vertices.
-	relation := newCompareBoundaryRelation(o.IsHole())
-	if hasCrossingRelation(l, o, relation) {
-		return 0
-	}
-	if relation.foundSharedVertex {
-		if relation.containsEdge {
-			return 1
-		}
-		return -1
-	}
-
-	// There are no edge intersections or shared vertices, so we can check
-	// whether A contains an arbitrary vertex of B.
-	if l.ContainsPoint(o.Vertex(0)) {
-		return 1
-	}
-	return -1
-}
-
-// ContainsOrigin reports true if this loop contains s2.OriginPoint().
-func (l *Loop) ContainsOrigin() bool {
-	return l.originInside
-}
-
-// ReferencePoint returns the reference point for this loop.
-func (l *Loop) ReferencePoint() ReferencePoint {
-	return OriginReferencePoint(l.originInside)
-}
-
-// NumEdges returns the number of edges in this shape.
-func (l *Loop) NumEdges() int {
-	if l.isEmptyOrFull() {
-		return 0
-	}
-	return len(l.vertices)
-}
-
-// Edge returns the endpoints for the given edge index.
-func (l *Loop) Edge(i int) Edge {
-	return Edge{l.Vertex(i), l.Vertex(i + 1)}
-}
-
-// NumChains reports the number of contiguous edge chains in the Loop.
-func (l *Loop) NumChains() int {
-	if l.IsEmpty() {
-		return 0
-	}
-	return 1
-}
-
-// Chain returns the i-th edge chain in the Shape.
-func (l *Loop) Chain(chainID int) Chain {
-	return Chain{0, l.NumEdges()}
-}
-
-// ChainEdge returns the j-th edge of the i-th edge chain.
-func (l *Loop) ChainEdge(chainID, offset int) Edge {
-	return Edge{l.Vertex(offset), l.Vertex(offset + 1)}
-}
-
-// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the
-// j-th edge of the Loop.
-func (l *Loop) ChainPosition(edgeID int) ChainPosition {
-	return ChainPosition{0, edgeID}
-}
-
-// Dimension returns the dimension of the geometry represented by this Loop.
-func (l *Loop) Dimension() int { return 2 }
-
-func (l *Loop) typeTag() typeTag { return typeTagNone }
-
-func (l *Loop) privateInterface() {}
-
-// IsEmpty reports true if this is the special empty loop that contains no points.
-func (l *Loop) IsEmpty() bool {
-	return l.isEmptyOrFull() && !l.ContainsOrigin()
-}
-
-// IsFull reports true if this is the special full loop that contains all points.
-func (l *Loop) IsFull() bool {
-	return l.isEmptyOrFull() && l.ContainsOrigin()
-}
-
-// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops.
-func (l *Loop) isEmptyOrFull() bool {
-	return len(l.vertices) == 1
-}
-
-// Vertices returns the vertices in the loop.
-func (l *Loop) Vertices() []Point {
-	return l.vertices
-}
-
-// RectBound returns a tight bounding rectangle. If the loop contains the point,
-// the bound also contains it.
-func (l *Loop) RectBound() Rect {
-	return l.bound
-}
-
-// CapBound returns a bounding cap that may have more padding than the corresponding
-// RectBound. The bound is conservative such that if the loop contains a point P,
-// the bound also contains it.
-func (l *Loop) CapBound() Cap {
-	return l.bound.CapBound()
-}
-
-// Vertex returns the vertex for the given index. For convenience, the vertex indices
-// wrap automatically for methods that do index math such as Edge.
-// i.e., Vertex(NumEdges() + n) is the same as Vertex(n).
-func (l *Loop) Vertex(i int) Point {
-	return l.vertices[i%len(l.vertices)]
-}
-
-// OrientedVertex returns the vertex in reverse order if the loop represents a polygon
-// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where
-// n == len(vertices). This ensures that the interior of the polygon is always to
-// the left of the vertex chain.
-//
-// This requires: 0 <= i < 2 * len(vertices)
-func (l *Loop) OrientedVertex(i int) Point {
-	j := i - len(l.vertices)
-	if j < 0 {
-		j = i
-	}
-	if l.IsHole() {
-		j = len(l.vertices) - 1 - j
-	}
-	return l.Vertex(j)
-}
-
-// NumVertices returns the number of vertices in this loop.
-func (l *Loop) NumVertices() int {
-	return len(l.vertices)
-}
-
-// bruteForceContainsPoint reports if the given point is contained by this loop.
-// This method does not use the ShapeIndex, so it is only preferable below a certain
-// size of loop.
-func (l *Loop) bruteForceContainsPoint(p Point) bool {
-	origin := OriginPoint()
-	inside := l.originInside
-	crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0))
-	for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice
-		inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i))
-	}
-	return inside
-}
-
-// ContainsPoint returns true if the loop contains the point.
-func (l *Loop) ContainsPoint(p Point) bool {
-	// Empty and full loops don't need a special case, but invalid loops with
-	// zero vertices do, so we might as well handle them all at once.
-	if len(l.vertices) < 3 {
-		return l.originInside
-	}
-
-	// For small loops, and during initial construction, it is faster to just
-	// check all the crossing.
-	const maxBruteForceVertices = 32
-	if len(l.vertices) < maxBruteForceVertices || l.index == nil {
-		return l.bruteForceContainsPoint(p)
-	}
-
-	// Otherwise, look up the point in the index.
-	it := l.index.Iterator()
-	if !it.LocatePoint(p) {
-		return false
-	}
-	return l.iteratorContainsPoint(it, p)
-}
-
-// ContainsCell reports whether the given Cell is contained by this Loop.
-func (l *Loop) ContainsCell(target Cell) bool {
-	it := l.index.Iterator()
-	relation := it.LocateCellID(target.ID())
-
-	// If "target" is disjoint from all index cells, it is not contained.
-	// Similarly, if "target" is subdivided into one or more index cells then it
-	// is not contained, since index cells are subdivided only if they (nearly)
-	// intersect a sufficient number of edges.  (But note that if "target" itself
-	// is an index cell then it may be contained, since it could be a cell with
-	// no edges in the loop interior.)
-	if relation != Indexed {
-		return false
-	}
-
-	// Otherwise check if any edges intersect "target".
-	if l.boundaryApproxIntersects(it, target) {
-		return false
-	}
-
-	// Otherwise check if the loop contains the center of "target".
-	return l.iteratorContainsPoint(it, target.Center())
-}
-
-// IntersectsCell reports whether this Loop intersects the given cell.
-func (l *Loop) IntersectsCell(target Cell) bool {
-	it := l.index.Iterator()
-	relation := it.LocateCellID(target.ID())
-
-	// If target does not overlap any index cell, there is no intersection.
-	if relation == Disjoint {
-		return false
-	}
-	// If target is subdivided into one or more index cells, there is an
-	// intersection to within the ShapeIndex error bound (see Contains).
-	if relation == Subdivided {
-		return true
-	}
-	// If target is an index cell, there is an intersection because index cells
-	// are created only if they have at least one edge or they are entirely
-	// contained by the loop.
-	if it.CellID() == target.id {
-		return true
-	}
-	// Otherwise check if any edges intersect target.
-	if l.boundaryApproxIntersects(it, target) {
-		return true
-	}
-	// Otherwise check if the loop contains the center of target.
-	return l.iteratorContainsPoint(it, target.Center())
-}
-
-// CellUnionBound computes a covering of the Loop.
-func (l *Loop) CellUnionBound() []CellID {
-	return l.CapBound().CellUnionBound()
-}
-
-// boundaryApproxIntersects reports if the loop's boundary intersects target.
-// It may also return true when the loop boundary does not intersect target but
-// some edge comes within the worst-case error tolerance.
-//
-// This requires that it.Locate(target) returned Indexed.
-func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool {
-	aClipped := it.IndexCell().findByShapeID(0)
-
-	// If there are no edges, there is no intersection.
-	if len(aClipped.edges) == 0 {
-		return false
-	}
-
-	// We can save some work if target is the index cell itself.
-	if it.CellID() == target.ID() {
-		return true
-	}
-
-	// Otherwise check whether any of the edges intersect target.
-	maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist)
-	bound := target.BoundUV().ExpandedByMargin(maxError)
-	for _, ai := range aClipped.edges {
-		v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError)
-		if ok && edgeIntersectsRect(v0, v1, bound) {
-			return true
-		}
-	}
-	return false
-}
-
-// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell
-// that may contain p, contains the point p.
-func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool {
-	// Test containment by drawing a line segment from the cell center to the
-	// given point and counting edge crossings.
-	aClipped := it.IndexCell().findByShapeID(0)
-	inside := aClipped.containsCenter
-	if len(aClipped.edges) > 0 {
-		center := it.Center()
-		crosser := NewEdgeCrosser(center, p)
-		aiPrev := -2
-		for _, ai := range aClipped.edges {
-			if ai != aiPrev+1 {
-				crosser.RestartAt(l.Vertex(ai))
-			}
-			aiPrev = ai
-			inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1))
-		}
-	}
-	return inside
-}
-
-// RegularLoop creates a loop with the given number of vertices, all
-// located on a circle of the specified radius around the given center.
-func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop {
-	return RegularLoopForFrame(getFrame(center), radius, numVertices)
-}
-
-// RegularLoopForFrame creates a loop centered around the z-axis of the given
-// coordinate frame, with the first vertex in the direction of the positive x-axis.
-func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop {
-	return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices))
-}
-
-// CanonicalFirstVertex returns a first index and a direction (either +1 or -1)
-// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does
-// not change when the loop vertex order is rotated or inverted. This allows the
-// loop vertices to be traversed in a canonical order. The return values are
-// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as
-// expected by the Vertex method.
-func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) {
-	firstIdx = 0
-	n := len(l.vertices)
-	for i := 1; i < n; i++ {
-		if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 {
-			firstIdx = i
-		}
-	}
-
-	// 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1.
-	if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 {
-		return firstIdx, 1
-	}
-
-	// n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0.
-	firstIdx += n
-	return firstIdx, -1
-}
-
-// TurningAngle returns the sum of the turning angles at each vertex. The return
-// value is positive if the loop is counter-clockwise, negative if the loop is
-// clockwise, and zero if the loop is a great circle. Degenerate and
-// nearly-degenerate loops are handled consistently with Sign. So for example,
-// if a loop has zero area (i.e., it is a very small CCW loop) then the turning
-// angle will always be negative.
-//
-// This quantity is also called the "geodesic curvature" of the loop.
-func (l *Loop) TurningAngle() float64 {
-	// For empty and full loops, we return the limit value as the loop area
-	// approaches 0 or 4*Pi respectively.
-	if l.isEmptyOrFull() {
-		if l.ContainsOrigin() {
-			return -2 * math.Pi
-		}
-		return 2 * math.Pi
-	}
-
-	// Don't crash even if the loop is not well-defined.
-	if len(l.vertices) < 3 {
-		return 0
-	}
-
-	// To ensure that we get the same result when the vertex order is rotated,
-	// and that the result is negated when the vertex order is reversed, we need
-	// to add up the individual turn angles in a consistent order. (In general,
-	// adding up a set of numbers in a different order can change the sum due to
-	// rounding errors.)
-	//
-	// Furthermore, if we just accumulate an ordinary sum then the worst-case
-	// error is quadratic in the number of vertices. (This can happen with
-	// spiral shapes, where the partial sum of the turning angles can be linear
-	// in the number of vertices.) To avoid this we use the Kahan summation
-	// algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm).
-	n := len(l.vertices)
-	i, dir := l.CanonicalFirstVertex()
-	sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n))
-
-	compensation := s1.Angle(0)
-	for n-1 > 0 {
-		i += dir
-		angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir))
-		oldSum := sum
-		angle += compensation
-		sum += angle
-		compensation = (oldSum - sum) + angle
-		n--
-	}
-	return float64(dir) * float64(sum+compensation)
-}
-
-// turningAngleMaxError return the maximum error in TurningAngle. The value is not
-// constant; it depends on the loop.
-func (l *Loop) turningAngleMaxError() float64 {
-	// The maximum error can be bounded as follows:
-	//   2.24 * dblEpsilon    for RobustCrossProd(b, a)
-	//   2.24 * dblEpsilon    for RobustCrossProd(c, b)
-	//   3.25 * dblEpsilon    for Angle()
-	//   2.00 * dblEpsilon    for each addition in the Kahan summation
-	//   ------------------
-	//   9.73 * dblEpsilon
-	maxErrorPerVertex := 9.73 * dblEpsilon
-	return maxErrorPerVertex * float64(len(l.vertices))
-}
-
-// IsHole reports whether this loop represents a hole in its containing polygon.
-func (l *Loop) IsHole() bool { return l.depth&1 != 0 }
-
-// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise.
-func (l *Loop) Sign() int {
-	if l.IsHole() {
-		return -1
-	}
-	return 1
-}
-
-// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are
-// handled consistently with Sign, i.e., if a loop can be
-// expressed as the union of degenerate or nearly-degenerate CCW triangles,
-// then it will always be considered normalized.
-func (l *Loop) IsNormalized() bool {
-	// Optimization: if the longitude span is less than 180 degrees, then the
-	// loop covers less than half the sphere and is therefore normalized.
-	if l.bound.Lng.Length() < math.Pi {
-		return true
-	}
-
-	// We allow some error so that hemispheres are always considered normalized.
-	// TODO(roberts): This is no longer required by the Polygon implementation,
-	// so alternatively we could create the invariant that a loop is normalized
-	// if and only if its complement is not normalized.
-	return l.TurningAngle() >= -l.turningAngleMaxError()
-}
-
-// Normalize inverts the loop if necessary so that the area enclosed by the loop
-// is at most 2*pi.
-func (l *Loop) Normalize() {
-	if !l.IsNormalized() {
-		l.Invert()
-	}
-}
-
-// Invert reverses the order of the loop vertices, effectively complementing the
-// region represented by the loop. For example, the loop ABCD (with edges
-// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD).
-// Notice that the last edge is the same in both cases except that its
-// direction has been reversed.
-func (l *Loop) Invert() {
-	l.index.Reset()
-	if l.isEmptyOrFull() {
-		if l.IsFull() {
-			l.vertices[0] = emptyLoopPoint
-		} else {
-			l.vertices[0] = fullLoopPoint
-		}
-	} else {
-		// For non-special loops, reverse the slice of vertices.
-		for i := len(l.vertices)/2 - 1; i >= 0; i-- {
-			opp := len(l.vertices) - 1 - i
-			l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i]
-		}
-	}
-
-	// originInside must be set correctly before building the ShapeIndex.
-	l.originInside = !l.originInside
-	if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 {
-		// The complement of this loop contains both poles.
-		l.bound = FullRect()
-		l.subregionBound = l.bound
-	} else {
-		l.initBound()
-	}
-	l.index.Add(l)
-}
-
-// findVertex returns the index of the vertex at the given Point in the range
-// 1..numVertices, and a boolean indicating if a vertex was found.
-func (l *Loop) findVertex(p Point) (index int, ok bool) {
-	const notFound = 0
-	if len(l.vertices) < 10 {
-		// Exhaustive search for loops below a small threshold.
-		for i := 1; i <= len(l.vertices); i++ {
-			if l.Vertex(i) == p {
-				return i, true
-			}
-		}
-		return notFound, false
-	}
-
-	it := l.index.Iterator()
-	if !it.LocatePoint(p) {
-		return notFound, false
-	}
-
-	aClipped := it.IndexCell().findByShapeID(0)
-	for i := aClipped.numEdges() - 1; i >= 0; i-- {
-		ai := aClipped.edges[i]
-		if l.Vertex(ai) == p {
-			if ai == 0 {
-				return len(l.vertices), true
-			}
-			return ai, true
-		}
-
-		if l.Vertex(ai+1) == p {
-			return ai + 1, true
-		}
-	}
-	return notFound, false
-}
-
-// ContainsNested reports whether the given loops is contained within this loop.
-// This function does not test for edge intersections. The two loops must meet
-// all of the Polygon requirements; for example this implies that their
-// boundaries may not cross or have any shared edges (although they may have
-// shared vertices).
-func (l *Loop) ContainsNested(other *Loop) bool {
-	if !l.subregionBound.Contains(other.bound) {
-		return false
-	}
-
-	// Special cases to handle either loop being empty or full.  Also bail out
-	// when B has no vertices to avoid heap overflow on the vertex(1) call
-	// below.  (This method is called during polygon initialization before the
-	// client has an opportunity to call IsValid().)
-	if l.isEmptyOrFull() || other.NumVertices() < 2 {
-		return l.IsFull() || other.IsEmpty()
-	}
-
-	// We are given that A and B do not share any edges, and that either one
-	// loop contains the other or they do not intersect.
-	m, ok := l.findVertex(other.Vertex(1))
-	if !ok {
-		// Since other.vertex(1) is not shared, we can check whether A contains it.
-		return l.ContainsPoint(other.Vertex(1))
-	}
-
-	// Check whether the edge order around other.Vertex(1) is compatible with
-	// A containing B.
-	return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2))
-}
-
-// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x)
-// over the loop interior, given a function f(A,B,C) that returns the
-// corresponding integral over the spherical triangle ABC. Here "oriented
-// surface integral" means:
-//
-// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise,
-//     and the integral of -f if ABC is clockwise.
-//
-// (2) The result of this function is *either* the integral of f over the
-//     loop interior, or the integral of (-f) over the loop exterior.
-//
-// Note that there are at least two common situations where it easy to work
-// around property (2) above:
-//
-//  - If the integral of f over the entire sphere is zero, then it doesn't
-//    matter which case is returned because they are always equal.
-//
-//  - If f is non-negative, then it is easy to detect when the integral over
-//    the loop exterior has been returned, and the integral over the loop
-//    interior can be obtained by adding the integral of f over the entire
-//    unit sphere (a constant) to the result.
-//
-// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well.
-func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 {
-	// We sum f over a collection T of oriented triangles, possibly
-	// overlapping. Let the sign of a triangle be +1 if it is CCW and -1
-	// otherwise, and let the sign of a point x be the sum of the signs of the
-	// triangles containing x. Then the collection of triangles T is chosen
-	// such that either:
-	//
-	//  (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or
-	//  (2) Each point in the loop exterior has sign -1, and sign 0 otherwise.
-	//
-	// The triangles basically consist of a fan from vertex 0 to every loop
-	// edge that does not include vertex 0. These triangles will always satisfy
-	// either (1) or (2). However, what makes this a bit tricky is that
-	// spherical edges become numerically unstable as their length approaches
-	// 180 degrees. Of course there is not much we can do if the loop itself
-	// contains such edges, but we would like to make sure that all the triangle
-	// edges under our control (i.e., the non-loop edges) are stable. For
-	// example, consider a loop around the equator consisting of four equally
-	// spaced points. This is a well-defined loop, but we cannot just split it
-	// into two triangles by connecting vertex 0 to vertex 2.
-	//
-	// We handle this type of situation by moving the origin of the triangle fan
-	// whenever we are about to create an unstable edge. We choose a new
-	// location for the origin such that all relevant edges are stable. We also
-	// create extra triangles with the appropriate orientation so that the sum
-	// of the triangle signs is still correct at every point.
-
-	// The maximum length of an edge for it to be considered numerically stable.
-	// The exact value is fairly arbitrary since it depends on the stability of
-	// the function f. The value below is quite conservative but could be
-	// reduced further if desired.
-	const maxLength = math.Pi - 1e-5
-
-	var sum float64
-	origin := l.Vertex(0)
-	for i := 1; i+1 < len(l.vertices); i++ {
-		// Let V_i be vertex(i), let O be the current origin, and let length(A,B)
-		// be the length of edge (A,B). At the start of each loop iteration, the
-		// "leading edge" of the triangle fan is (O,V_i), and we want to extend
-		// the triangle fan so that the leading edge is (O,V_i+1).
-		//
-		// Invariants:
-		//  1. length(O,V_i) < maxLength for all (i > 1).
-		//  2. Either O == V_0, or O is approximately perpendicular to V_0.
-		//  3. "sum" is the oriented integral of f over the area defined by
-		//     (O, V_0, V_1, ..., V_i).
-		if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
-			// We are about to create an unstable edge, so choose a new origin O'
-			// for the triangle fan.
-			oldOrigin := origin
-			if origin == l.Vertex(0) {
-				// The following point is well-separated from V_i and V_0 (and
-				// therefore V_i+1 as well).
-				origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
-			} else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
-				// All edges of the triangle (O, V_0, V_i) are stable, so we can
-				// revert to using V_0 as the origin.
-				origin = l.Vertex(0)
-			} else {
-				// (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are
-				// perpendicular. Therefore V_0.CrossProd(O) is approximately
-				// perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose
-				// this point O' as the new origin.
-				origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
-
-				// Advance the edge (V_0,O) to (V_0,O').
-				sum += f(l.Vertex(0), oldOrigin, origin)
-			}
-			// Advance the edge (O,V_i) to (O',V_i).
-			sum += f(oldOrigin, l.Vertex(i), origin)
-		}
-		// Advance the edge (O,V_i) to (O,V_i+1).
-		sum += f(origin, l.Vertex(i), l.Vertex(i+1))
-	}
-	// If the origin is not V_0, we need to sum one more triangle.
-	if origin != l.Vertex(0) {
-		// Advance the edge (O,V_n-1) to (O,V_0).
-		sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0))
-	}
-	return sum
-}
-
-// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points;
-// see that method for commentary. The C++ version uses a templated method.
-// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well.
-func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point {
-	const maxLength = math.Pi - 1e-5
-	var sum r3.Vector
-
-	origin := l.Vertex(0)
-	for i := 1; i+1 < len(l.vertices); i++ {
-		if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
-			oldOrigin := origin
-			if origin == l.Vertex(0) {
-				origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
-			} else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
-				origin = l.Vertex(0)
-			} else {
-				origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
-				sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector)
-			}
-			sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector)
-		}
-		sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector)
-	}
-	if origin != l.Vertex(0) {
-		sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector)
-	}
-	return Point{sum}
-}
-
-// Area returns the area of the loop interior, i.e. the region on the left side of
-// the loop. The return value is between 0 and 4*pi. (Note that the return
-// value is not affected by whether this loop is a "hole" or a "shell".)
-func (l *Loop) Area() float64 {
-	// It is surprisingly difficult to compute the area of a loop robustly. The
-	// main issues are (1) whether degenerate loops are considered to be CCW or
-	// not (i.e., whether their area is close to 0 or 4*pi), and (2) computing
-	// the areas of small loops with good relative accuracy.
-	//
-	// With respect to degeneracies, we would like Area to be consistent
-	// with ContainsPoint in that loops that contain many points
-	// should have large areas, and loops that contain few points should have
-	// small areas. For example, if a degenerate triangle is considered CCW
-	// according to s2predicates Sign, then it will contain very few points and
-	// its area should be approximately zero. On the other hand if it is
-	// considered clockwise, then it will contain virtually all points and so
-	// its area should be approximately 4*pi.
-	//
-	// More precisely, let U be the set of Points for which IsUnitLength
-	// is true, let P(U) be the projection of those points onto the mathematical
-	// unit sphere, and let V(P(U)) be the Voronoi diagram of the projected
-	// points. Then for every loop x, we would like Area to approximately
-	// equal the sum of the areas of the Voronoi regions of the points p for
-	// which x.ContainsPoint(p) is true.
-	//
-	// The second issue is that we want to compute the area of small loops
-	// accurately. This requires having good relative precision rather than
-	// good absolute precision. For example, if the area of a loop is 1e-12 and
-	// the error is 1e-15, then the area only has 3 digits of accuracy. (For
-	// reference, 1e-12 is about 40 square meters on the surface of the earth.)
-	// We would like to have good relative accuracy even for small loops.
-	//
-	// To achieve these goals, we combine two different methods of computing the
-	// area. This first method is based on the Gauss-Bonnet theorem, which says
-	// that the area enclosed by the loop equals 2*pi minus the total geodesic
-	// curvature of the loop (i.e., the sum of the "turning angles" at all the
-	// loop vertices). The big advantage of this method is that as long as we
-	// use Sign to compute the turning angle at each vertex, then
-	// degeneracies are always handled correctly. In other words, if a
-	// degenerate loop is CCW according to the symbolic perturbations used by
-	// Sign, then its turning angle will be approximately 2*pi.
-	//
-	// The disadvantage of the Gauss-Bonnet method is that its absolute error is
-	// about 2e-15 times the number of vertices (see turningAngleMaxError).
-	// So, it cannot compute the area of small loops accurately.
-	//
-	// The second method is based on splitting the loop into triangles and
-	// summing the area of each triangle. To avoid the difficulty and expense
-	// of decomposing the loop into a union of non-overlapping triangles,
-	// instead we compute a signed sum over triangles that may overlap (see the
-	// comments for surfaceIntegral). The advantage of this method
-	// is that the area of each triangle can be computed with much better
-	// relative accuracy (using l'Huilier's theorem). The disadvantage is that
-	// the result is a signed area: CCW loops may yield a small positive value,
-	// while CW loops may yield a small negative value (which is converted to a
-	// positive area by adding 4*pi). This means that small errors in computing
-	// the signed area may translate into a very large error in the result (if
-	// the sign of the sum is incorrect).
-	//
-	// So, our strategy is to combine these two methods as follows. First we
-	// compute the area using the "signed sum over triangles" approach (since it
-	// is generally more accurate). We also estimate the maximum error in this
-	// result. If the signed area is too close to zero (i.e., zero is within
-	// the error bounds), then we double-check the sign of the result using the
-	// Gauss-Bonnet method. (In fact we just call IsNormalized, which is
-	// based on this method.) If the two methods disagree, we return either 0
-	// or 4*pi based on the result of IsNormalized. Otherwise we return the
-	// area that we computed originally.
-	if l.isEmptyOrFull() {
-		if l.ContainsOrigin() {
-			return 4 * math.Pi
-		}
-		return 0
-	}
-	area := l.surfaceIntegralFloat64(SignedArea)
-
-	// TODO(roberts): This error estimate is very approximate. There are two
-	// issues: (1) SignedArea needs some improvements to ensure that its error
-	// is actually never higher than GirardArea, and (2) although the number of
-	// triangles in the sum is typically N-2, in theory it could be as high as
-	// 2*N for pathological inputs. But in other respects this error bound is
-	// very conservative since it assumes that the maximum error is achieved on
-	// every triangle.
-	maxError := l.turningAngleMaxError()
-
-	// The signed area should be between approximately -4*pi and 4*pi.
-	if area < 0 {
-		// We have computed the negative of the area of the loop exterior.
-		area += 4 * math.Pi
-	}
-
-	if area > 4*math.Pi {
-		area = 4 * math.Pi
-	}
-	if area < 0 {
-		area = 0
-	}
-
-	// If the area is close enough to zero or 4*pi so that the loop orientation
-	// is ambiguous, then we compute the loop orientation explicitly.
-	if area < maxError && !l.IsNormalized() {
-		return 4 * math.Pi
-	} else if area > (4*math.Pi-maxError) && l.IsNormalized() {
-		return 0
-	}
-
-	return area
-}
-
-// Centroid returns the true centroid of the loop multiplied by the area of the
-// loop. The result is not unit length, so you may want to normalize it. Also
-// note that in general, the centroid may not be contained by the loop.
-//
-// We prescale by the loop area for two reasons: (1) it is cheaper to
-// compute this way, and (2) it makes it easier to compute the centroid of
-// more complicated shapes (by splitting them into disjoint regions and
-// adding their centroids).
-//
-// Note that the return value is not affected by whether this loop is a
-// "hole" or a "shell".
-func (l *Loop) Centroid() Point {
-	// surfaceIntegralPoint() returns either the integral of position over loop
-	// interior, or the negative of the integral of position over the loop
-	// exterior. But these two values are the same (!), because the integral of
-	// position over the entire sphere is (0, 0, 0).
-	return l.surfaceIntegralPoint(TrueCentroid)
-}
-
-// Encode encodes the Loop.
-func (l Loop) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	l.encode(e)
-	return e.err
-}
-
-func (l Loop) encode(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeUint32(uint32(len(l.vertices)))
-	for _, v := range l.vertices {
-		e.writeFloat64(v.X)
-		e.writeFloat64(v.Y)
-		e.writeFloat64(v.Z)
-	}
-
-	e.writeBool(l.originInside)
-	e.writeInt32(int32(l.depth))
-
-	// Encode the bound.
-	l.bound.encode(e)
-}
-
-// Decode decodes a loop.
-func (l *Loop) Decode(r io.Reader) error {
-	*l = Loop{}
-	d := &decoder{r: asByteReader(r)}
-	l.decode(d)
-	return d.err
-}
-
-func (l *Loop) decode(d *decoder) {
-	version := int8(d.readUint8())
-	if d.err != nil {
-		return
-	}
-	if version != encodingVersion {
-		d.err = fmt.Errorf("cannot decode version %d", version)
-		return
-	}
-
-	// Empty loops are explicitly allowed here: a newly created loop has zero vertices
-	// and such loops encode and decode properly.
-	nvertices := d.readUint32()
-	if nvertices > maxEncodedVertices {
-		if d.err == nil {
-			d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
-
-		}
-		return
-	}
-	l.vertices = make([]Point, nvertices)
-	for i := range l.vertices {
-		l.vertices[i].X = d.readFloat64()
-		l.vertices[i].Y = d.readFloat64()
-		l.vertices[i].Z = d.readFloat64()
-	}
-	l.originInside = d.readBool()
-	l.depth = int(d.readUint32())
-	l.bound.decode(d)
-	l.subregionBound = ExpandForSubregions(l.bound)
-
-	l.index = NewShapeIndex()
-	l.index.Add(l)
-}
-
-// Bitmasks to read from properties.
-const (
-	originInside = 1 << iota
-	boundEncoded
-)
-
-func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi {
-	ret := make([]xyzFaceSiTi, len(l.vertices))
-	for i, v := range l.vertices {
-		ret[i].xyz = v
-		ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v)
-	}
-	return ret
-}
-
-func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) {
-	if len(l.vertices) != len(vertices) {
-		panic("encodeCompressed: vertices must be the same length as l.vertices")
-	}
-	if len(vertices) > maxEncodedVertices {
-		if e.err == nil {
-			e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices)
-		}
-		return
-	}
-	e.writeUvarint(uint64(len(vertices)))
-	encodePointsCompressed(e, vertices, snapLevel)
-
-	props := l.compressedEncodingProperties()
-	e.writeUvarint(props)
-	e.writeUvarint(uint64(l.depth))
-	if props&boundEncoded != 0 {
-		l.bound.encode(e)
-	}
-}
-
-func (l *Loop) compressedEncodingProperties() uint64 {
-	var properties uint64
-	if l.originInside {
-		properties |= originInside
-	}
-
-	// Write whether there is a bound so we can change the threshold later.
-	// Recomputing the bound multiplies the decode time taken per vertex
-	// by a factor of about 3.5.  Without recomputing the bound, decode
-	// takes approximately 125 ns / vertex.  A loop with 63 vertices
-	// encoded without the bound will take ~30us to decode, which is
-	// acceptable.  At ~3.5 bytes / vertex without the bound, adding
-	// the bound will increase the size by <15%, which is also acceptable.
-	const minVerticesForBound = 64
-	if len(l.vertices) >= minVerticesForBound {
-		properties |= boundEncoded
-	}
-
-	return properties
-}
-
-func (l *Loop) decodeCompressed(d *decoder, snapLevel int) {
-	nvertices := d.readUvarint()
-	if d.err != nil {
-		return
-	}
-	if nvertices > maxEncodedVertices {
-		d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
-		return
-	}
-	l.vertices = make([]Point, nvertices)
-	decodePointsCompressed(d, snapLevel, l.vertices)
-	properties := d.readUvarint()
-
-	// Make sure values are valid before using.
-	if d.err != nil {
-		return
-	}
-
-	l.originInside = (properties & originInside) != 0
-
-	l.depth = int(d.readUvarint())
-
-	if (properties & boundEncoded) != 0 {
-		l.bound.decode(d)
-		if d.err != nil {
-			return
-		}
-		l.subregionBound = ExpandForSubregions(l.bound)
-	} else {
-		l.initBound()
-	}
-
-	l.index = NewShapeIndex()
-	l.index.Add(l)
-}
-
-// crossingTarget is an enum representing the possible crossing target cases for relations.
-type crossingTarget int
-
-const (
-	crossingTargetDontCare crossingTarget = iota
-	crossingTargetDontCross
-	crossingTargetCross
-)
-
-// loopRelation defines the interface for checking a type of relationship between two loops.
-// Some examples of relations are Contains, Intersects, or CompareBoundary.
-type loopRelation interface {
-	// Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit
-	// condition for the loop relation. If any point P is found such that
-	//
-	//   A.ContainsPoint(P) == aCrossingTarget() &&
-	//   B.ContainsPoint(P) == bCrossingTarget()
-	//
-	// then the loop relation is assumed to be the same as if a pair of crossing
-	// edges were found. For example, the ContainsPoint relation has
-	//
-	//   aCrossingTarget() == crossingTargetDontCross
-	//   bCrossingTarget() == crossingTargetCross
-	//
-	// because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true
-	// for any point P, then it is equivalent to finding an edge crossing (i.e.,
-	// since Contains returns false in both cases).
-	//
-	// Loop relations that do not have an early-exit condition of this form
-	// should return crossingTargetDontCare for both crossing targets.
-
-	// aCrossingTarget reports whether loop A crosses the target point with
-	// the given relation type.
-	aCrossingTarget() crossingTarget
-	// bCrossingTarget reports whether loop B crosses the target point with
-	// the given relation type.
-	bCrossingTarget() crossingTarget
-
-	// wedgesCross reports if a shared vertex ab1 and the two associated wedges
-	// (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing.
-	// The loop relation is also allowed to maintain its own internal state, and
-	// can return true if it observes any sequence of wedges that are equivalent
-	// to an edge crossing.
-	wedgesCross(a0, ab1, a2, b0, b2 Point) bool
-}
-
-// loopCrosser is a helper type for determining whether two loops cross.
-// It is instantiated twice for each pair of loops to be tested, once for the
-// pair (A,B) and once for the pair (B,A), in order to be able to process
-// edges in either loop nesting order.
-type loopCrosser struct {
-	a, b            *Loop
-	relation        loopRelation
-	swapped         bool
-	aCrossingTarget crossingTarget
-	bCrossingTarget crossingTarget
-
-	// state maintained by startEdge and edgeCrossesCell.
-	crosser    *EdgeCrosser
-	aj, bjPrev int
-
-	// temporary data declared here to avoid repeated memory allocations.
-	bQuery *CrossingEdgeQuery
-	bCells []*ShapeIndexCell
-}
-
-// newLoopCrosser creates a loopCrosser from the given values. If swapped is true,
-// the loops A and B have been swapped. This affects how arguments are passed to
-// the given loop relation, since for example A.Contains(B) is not the same as
-// B.Contains(A).
-func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser {
-	l := &loopCrosser{
-		a:               a,
-		b:               b,
-		relation:        relation,
-		swapped:         swapped,
-		aCrossingTarget: relation.aCrossingTarget(),
-		bCrossingTarget: relation.bCrossingTarget(),
-		bQuery:          NewCrossingEdgeQuery(b.index),
-	}
-	if swapped {
-		l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget
-	}
-
-	return l
-}
-
-// startEdge sets the crossers state for checking the given edge of loop A.
-func (l *loopCrosser) startEdge(aj int) {
-	l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1))
-	l.aj = aj
-	l.bjPrev = -2
-}
-
-// edgeCrossesCell reports whether the current edge of loop A has any crossings with
-// edges of the index cell of loop B.
-func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool {
-	// Test the current edge of A against all edges of bClipped
-	bNumEdges := bClipped.numEdges()
-	for j := 0; j < bNumEdges; j++ {
-		bj := bClipped.edges[j]
-		if bj != l.bjPrev+1 {
-			l.crosser.RestartAt(l.b.Vertex(bj))
-		}
-		l.bjPrev = bj
-		if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross {
-			continue
-		} else if crossing == Cross {
-			return true
-		}
-
-		// We only need to check each shared vertex once, so we only
-		// consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1).
-		if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) {
-			if l.swapped {
-				if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) {
-					return true
-				}
-			} else {
-				if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) {
-					return true
-				}
-			}
-		}
-	}
-
-	return false
-}
-
-// cellCrossesCell reports whether there are any edge crossings or wedge crossings
-// within the two given cells.
-func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool {
-	// Test all edges of aClipped against all edges of bClipped.
-	for _, edge := range aClipped.edges {
-		l.startEdge(edge)
-		if l.edgeCrossesCell(bClipped) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// cellCrossesAnySubcell reports whether given an index cell of A, if there are any
-// edge or wedge crossings with any index cell of B contained within bID.
-func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool {
-	// Test all edges of aClipped against all edges of B. The relevant B
-	// edges are guaranteed to be children of bID, which lets us find the
-	// correct index cells more efficiently.
-	bRoot := PaddedCellFromCellID(bID, 0)
-	for _, aj := range aClipped.edges {
-		// Use an CrossingEdgeQuery starting at bRoot to find the index cells
-		// of B that might contain crossing edges.
-		l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot)
-		if len(l.bCells) == 0 {
-			continue
-		}
-		l.startEdge(aj)
-		for c := 0; c < len(l.bCells); c++ {
-			if l.edgeCrossesCell(l.bCells[c].shapes[0]) {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// hasCrossing reports whether given two iterators positioned such that
-// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing
-// anywhere within ai.cellID(). This function advances bi only past ai.cellID().
-func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
-	// If ai.CellID() intersects many edges of B, then it is faster to use
-	// CrossingEdgeQuery to narrow down the candidates. But if it intersects
-	// only a few edges, it is faster to check all the crossings directly.
-	// We handle this by advancing bi and keeping track of how many edges we
-	// would need to test.
-	const edgeQueryMinEdges = 20 // Tuned from benchmarks.
-	var totalEdges int
-	l.bCells = nil
-
-	for {
-		if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 {
-			totalEdges += n
-			if totalEdges >= edgeQueryMinEdges {
-				// There are too many edges to test them directly, so use CrossingEdgeQuery.
-				if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) {
-					return true
-				}
-				bi.seekBeyond(ai)
-				return false
-			}
-			l.bCells = append(l.bCells, bi.indexCell())
-		}
-		bi.next()
-		if bi.cellID() > ai.rangeMax {
-			break
-		}
-	}
-
-	// Test all the edge crossings directly.
-	for _, c := range l.bCells {
-		if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds
-// to the crossing target type given. (This is to work around C++ allowing false == 0,
-// true == 1 type implicit conversions and comparisons)
-func containsCenterMatches(a *clippedShape, target crossingTarget) bool {
-	return (!a.containsCenter && target == crossingTargetDontCross) ||
-		(a.containsCenter && target == crossingTargetCross)
-}
-
-// hasCrossingRelation reports whether given two iterators positioned such that
-// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship
-// anywhere within ai.cellID(). Specifically, this method returns true if there
-// is an edge crossing, a wedge crossing, or a point P that matches both relations
-// crossing targets. This function advances both iterators past ai.cellID.
-func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
-	aClipped := ai.it.IndexCell().shapes[0]
-	if aClipped.numEdges() != 0 {
-		// The current cell of A has at least one edge, so check for crossings.
-		if l.hasCrossing(ai, bi) {
-			return true
-		}
-		ai.next()
-		return false
-	}
-
-	if containsCenterMatches(aClipped, l.aCrossingTarget) {
-		// The crossing target for A is not satisfied, so we skip over these cells of B.
-		bi.seekBeyond(ai)
-		ai.next()
-		return false
-	}
-
-	// All points within ai.cellID() satisfy the crossing target for A, so it's
-	// worth iterating through the cells of B to see whether any cell
-	// centers also satisfy the crossing target for B.
-	for bi.cellID() <= ai.rangeMax {
-		bClipped := bi.it.IndexCell().shapes[0]
-		if containsCenterMatches(bClipped, l.bCrossingTarget) {
-			return true
-		}
-		bi.next()
-	}
-	ai.next()
-	return false
-}
-
-// hasCrossingRelation checks all edges of loop A for intersection against all edges
-// of loop B and reports if there are any that satisfy the given relation. If there
-// is any shared vertex, the wedges centered at this vertex are sent to the given
-// relation to be tested.
-//
-// If the two loop boundaries cross, this method is guaranteed to return
-// true. It also returns true in certain cases if the loop relationship is
-// equivalent to crossing. For example, if the relation is Contains and a
-// point P is found such that B contains P but A does not contain P, this
-// method will return true to indicate that the result is the same as though
-// a pair of crossing edges were found (since Contains returns false in
-// both cases).
-//
-// See Contains, Intersects and CompareBoundary for the three uses of this function.
-func hasCrossingRelation(a, b *Loop, relation loopRelation) bool {
-	// We look for CellID ranges where the indexes of A and B overlap, and
-	// then test those edges for crossings.
-	ai := newRangeIterator(a.index)
-	bi := newRangeIterator(b.index)
-
-	ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B
-	ba := newLoopCrosser(b, a, relation, true)  // Tests edges of B against A
-
-	for !ai.done() || !bi.done() {
-		if ai.rangeMax < bi.rangeMin {
-			// The A and B cells don't overlap, and A precedes B.
-			ai.seekTo(bi)
-		} else if bi.rangeMax < ai.rangeMin {
-			// The A and B cells don't overlap, and B precedes A.
-			bi.seekTo(ai)
-		} else {
-			// One cell contains the other. Determine which cell is larger.
-			abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb())
-			if abRelation > 0 {
-				// A's index cell is larger.
-				if ab.hasCrossingRelation(ai, bi) {
-					return true
-				}
-			} else if abRelation < 0 {
-				// B's index cell is larger.
-				if ba.hasCrossingRelation(bi, ai) {
-					return true
-				}
-			} else {
-				// The A and B cells are the same. Since the two cells
-				// have the same center point P, check whether P satisfies
-				// the crossing targets.
-				aClipped := ai.it.IndexCell().shapes[0]
-				bClipped := bi.it.IndexCell().shapes[0]
-				if containsCenterMatches(aClipped, ab.aCrossingTarget) &&
-					containsCenterMatches(bClipped, ab.bCrossingTarget) {
-					return true
-				}
-				// Otherwise test all the edge crossings directly.
-				if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) {
-					return true
-				}
-				ai.next()
-				bi.next()
-			}
-		}
-	}
-	return false
-}
-
-// containsRelation implements loopRelation for a contains operation. If
-// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent
-// to having an edge crossing (i.e., Contains returns false).
-type containsRelation struct {
-	foundSharedVertex bool
-}
-
-func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross }
-func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
-func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
-	c.foundSharedVertex = true
-	return !WedgeContains(a0, ab1, a2, b0, b2)
-}
-
-// intersectsRelation implements loopRelation for an intersects operation. Given
-// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true,
-// it is equivalent to having an edge crossing (i.e., Intersects returns true).
-type intersectsRelation struct {
-	foundSharedVertex bool
-}
-
-func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross }
-func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
-func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
-	i.foundSharedVertex = true
-	return WedgeIntersects(a0, ab1, a2, b0, b2)
-}
-
-// compareBoundaryRelation implements loopRelation for comparing boundaries.
-//
-// The compare boundary relation does not have a useful early-exit condition,
-// so we return crossingTargetDontCare for both crossing targets.
-//
-// Aside: A possible early exit condition could be based on the following.
-//   If A contains a point of both B and ~B, then A intersects Boundary(B).
-//   If ~A contains a point of both B and ~B, then ~A intersects Boundary(B).
-//   So if the intersections of {A, ~A} with {B, ~B} are all non-empty,
-//   the return value is 0, i.e., Boundary(A) intersects Boundary(B).
-// Unfortunately it isn't worth detecting this situation because by the
-// time we have seen a point in all four intersection regions, we are also
-// guaranteed to have seen at least one pair of crossing edges.
-type compareBoundaryRelation struct {
-	reverse           bool // True if the other loop should be reversed.
-	foundSharedVertex bool // True if any wedge was processed.
-	containsEdge      bool // True if any edge of the other loop is contained by this loop.
-	excludesEdge      bool // True if any edge of the other loop is excluded by this loop.
-}
-
-func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation {
-	return &compareBoundaryRelation{reverse: reverse}
-}
-
-func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare }
-func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare }
-func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
-	// Because we don't care about the interior of the other, only its boundary,
-	// it is sufficient to check whether this one contains the semiwedge (ab1, b2).
-	c.foundSharedVertex = true
-	if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) {
-		c.containsEdge = true
-	} else {
-		c.excludesEdge = true
-	}
-	return c.containsEdge && c.excludesEdge
-}
-
-// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the
-// "semiwedge" defined as any non-empty open set of rays immediately CCW from
-// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW;
-// this simulates what would happen if the direction of the other loop was reversed.
-func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool {
-	if b2 == a0 || b2 == a2 {
-		// We have a shared or reversed edge.
-		return (b2 == a0) == reverse
-	}
-	return OrderedCCW(a0, a2, b2, ab1)
-}
-
-// containsNonCrossingBoundary reports whether given two loops whose boundaries
-// do not cross (see compareBoundary), if this loop contains the boundary of the
-// other loop. If reverse is true, the boundary of the other loop is reversed
-// first (which only affects the result when there are shared edges). This method
-// is cheaper than compareBoundary because it does not test for edge intersections.
-//
-// This function requires that neither loop is empty, and that if the other is full,
-// then reverse == false.
-func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool {
-	// The bounds must intersect for containment.
-	if !l.bound.Intersects(other.bound) {
-		return false
-	}
-
-	// Full loops are handled as though the loop surrounded the entire sphere.
-	if l.IsFull() {
-		return true
-	}
-	if other.IsFull() {
-		return false
-	}
-
-	m, ok := l.findVertex(other.Vertex(0))
-	if !ok {
-		// Since the other loops vertex 0 is not shared, we can check if this contains it.
-		return l.ContainsPoint(other.Vertex(0))
-	}
-	// Otherwise check whether the edge (b0, b1) is contained by this loop.
-	return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1),
-		other.Vertex(1), reverseOther)
-}
-
-// TODO(roberts): Differences from the C++ version:
-// DistanceToPoint
-// DistanceToBoundary
-// Project
-// ProjectToBoundary
-// BoundaryApproxEqual
-// BoundaryNear

+ 0 - 127
vendor/github.com/golang/geo/s2/matrix3x3.go

@@ -1,127 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-
-	"github.com/golang/geo/r3"
-)
-
-// matrix3x3 represents a traditional 3x3 matrix of floating point values.
-// This is not a full fledged matrix. It only contains the pieces needed
-// to satisfy the computations done within the s2 package.
-type matrix3x3 [3][3]float64
-
-// col returns the given column as a Point.
-func (m *matrix3x3) col(col int) Point {
-	return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}}
-}
-
-// row returns the given row as a Point.
-func (m *matrix3x3) row(row int) Point {
-	return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}}
-}
-
-// setCol sets the specified column to the value in the given Point.
-func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 {
-	m[0][col] = p.X
-	m[1][col] = p.Y
-	m[2][col] = p.Z
-
-	return m
-}
-
-// setRow sets the specified row to the value in the given Point.
-func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 {
-	m[row][0] = p.X
-	m[row][1] = p.Y
-	m[row][2] = p.Z
-
-	return m
-}
-
-// scale multiplies the matrix by the given value.
-func (m *matrix3x3) scale(f float64) *matrix3x3 {
-	return &matrix3x3{
-		[3]float64{f * m[0][0], f * m[0][1], f * m[0][2]},
-		[3]float64{f * m[1][0], f * m[1][1], f * m[1][2]},
-		[3]float64{f * m[2][0], f * m[2][1], f * m[2][2]},
-	}
-}
-
-// mul returns the multiplication of m by the Point p and converts the
-// resulting 1x3 matrix into a Point.
-func (m *matrix3x3) mul(p Point) Point {
-	return Point{r3.Vector{
-		m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z,
-		m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z,
-		m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z,
-	}}
-}
-
-// det returns the determinant of this matrix.
-func (m *matrix3x3) det() float64 {
-	//      | a  b  c |
-	//  det | d  e  f | = aei + bfg + cdh - ceg - bdi - afh
-	//      | g  h  i |
-	return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] -
-		m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1]
-}
-
-// transpose reflects the matrix along its diagonal and returns the result.
-func (m *matrix3x3) transpose() *matrix3x3 {
-	m[0][1], m[1][0] = m[1][0], m[0][1]
-	m[0][2], m[2][0] = m[2][0], m[0][2]
-	m[1][2], m[2][1] = m[2][1], m[1][2]
-
-	return m
-}
-
-// String formats the matrix into an easier to read layout.
-func (m *matrix3x3) String() string {
-	return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]",
-		m[0][0], m[0][1], m[0][2],
-		m[1][0], m[1][1], m[1][2],
-		m[2][0], m[2][1], m[2][2],
-	)
-}
-
-// getFrame returns the orthonormal frame for the given point on the unit sphere.
-func getFrame(p Point) matrix3x3 {
-	// Given the point p on the unit sphere, extend this into a right-handed
-	// coordinate frame of unit-length column vectors m = (x,y,z).  Note that
-	// the vectors (x,y) are an orthonormal frame for the tangent space at point p,
-	// while p itself is an orthonormal frame for the normal space at p.
-	m := matrix3x3{}
-	m.setCol(2, p)
-	m.setCol(1, Point{p.Ortho()})
-	m.setCol(0, Point{m.col(1).Cross(p.Vector)})
-	return m
-}
-
-// toFrame returns the coordinates of the given point with respect to its orthonormal basis m.
-// The resulting point q satisfies the identity (m * q == p).
-func toFrame(m matrix3x3, p Point) Point {
-	// The inverse of an orthonormal matrix is its transpose.
-	return m.transpose().mul(p)
-}
-
-// fromFrame returns the coordinates of the given point in standard axis-aligned basis
-// from its orthonormal basis m.
-// The resulting point p satisfies the identity (p == m * q).
-func fromFrame(m matrix3x3, q Point) Point {
-	return m.mul(q)
-}

+ 0 - 306
vendor/github.com/golang/geo/s2/max_distance_targets.go

@@ -1,306 +0,0 @@
-// Copyright 2019 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// maxDistance implements distance as the supplementary distance (Pi - x) to find
-// results that are the furthest using the distance related algorithms.
-type maxDistance s1.ChordAngle
-
-func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
-func (m maxDistance) zero() distance            { return maxDistance(s1.StraightChordAngle) }
-func (m maxDistance) negative() distance        { return maxDistance(s1.InfChordAngle()) }
-func (m maxDistance) infinity() distance        { return maxDistance(s1.NegativeChordAngle) }
-func (m maxDistance) less(other distance) bool  { return m.chordAngle() > other.chordAngle() }
-func (m maxDistance) sub(other distance) distance {
-	return maxDistance(m.chordAngle() + other.chordAngle())
-}
-func (m maxDistance) chordAngleBound() s1.ChordAngle {
-	return s1.StraightChordAngle - m.chordAngle()
-}
-func (m maxDistance) updateDistance(dist distance) (distance, bool) {
-	if dist.less(m) {
-		m = maxDistance(dist.chordAngle())
-		return m, true
-	}
-	return m, false
-}
-
-func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance {
-	return maxDistance(o)
-}
-
-// MaxDistanceToPointTarget is used for computing the maximum distance to a Point.
-type MaxDistanceToPointTarget struct {
-	point Point
-	dist  distance
-}
-
-// NewMaxDistanceToPointTarget returns a new target for the given Point.
-func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget {
-	m := maxDistance(0)
-	return &MaxDistanceToPointTarget{point: point, dist: &m}
-}
-
-func (m *MaxDistanceToPointTarget) capBound() Cap {
-	return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0)))
-}
-
-func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point)))
-}
-
-func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(maxDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(cell.MaxDistance(m.point)))
-}
-
-func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// For furthest points, we visit the polygons whose interior contains
-	// the antipode of the target point. These are the polygons whose
-	// distance to the target is maxDistance.zero()
-	q := NewContainsPointQuery(index, VertexModelSemiOpen)
-	return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool {
-		return v(shape, m.point)
-	})
-}
-
-func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int           { return 300 }
-func (m *MaxDistanceToPointTarget) distance() distance                    { return m.dist }
-
-// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge.
-type MaxDistanceToEdgeTarget struct {
-	e    Edge
-	dist distance
-}
-
-// NewMaxDistanceToEdgeTarget returns a new target for the given Edge.
-func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget {
-	m := maxDistance(0)
-	return &MaxDistanceToEdgeTarget{e: e, dist: m}
-}
-
-// capBound returns a Cap that bounds the antipode of the target. (This
-// is the set of points whose maxDistance to the target is maxDistance.zero)
-func (m *MaxDistanceToEdgeTarget) capBound() Cap {
-	// The following computes a radius equal to half the edge length in an
-	// efficient and numerically stable way.
-	d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
-	r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
-	return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
-}
-
-func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(maxDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(maxDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1)))
-}
-
-func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// We only need to test one edge point. That is because the method *must*
-	// visit a polygon if it fully contains the target, and *is allowed* to
-	// visit a polygon if it intersects the target. If the tested vertex is not
-	// contained, we know the full edge is not contained; if the tested vertex is
-	// contained, then the edge either is fully contained (must be visited) or it
-	// intersects (is allowed to be visited). We visit the center of the edge so
-	// that edge AB gives identical results to BA.
-	target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
-	return target.visitContainingShapes(index, v)
-}
-
-func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int           { return 110 }
-func (m *MaxDistanceToEdgeTarget) distance() distance                    { return m.dist }
-
-// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell.
-type MaxDistanceToCellTarget struct {
-	cell Cell
-	dist distance
-}
-
-// NewMaxDistanceToCellTarget returns a new target for the given Cell.
-func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget {
-	m := maxDistance(0)
-	return &MaxDistanceToCellTarget{cell: cell, dist: m}
-}
-
-func (m *MaxDistanceToCellTarget) capBound() Cap {
-	c := m.cell.CapBound()
-	return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius())
-}
-
-func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(m.cell.MaxDistance(p)))
-}
-
-func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1)))
-}
-
-func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell)))
-}
-
-func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// We only need to check one point here - cell center is simplest.
-	// See comment at MaxDistanceToEdgeTarget's visitContainingShapes.
-	target := NewMaxDistanceToPointTarget(m.cell.Center())
-	return target.visitContainingShapes(index, v)
-}
-
-func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int           { return 100 }
-func (m *MaxDistanceToCellTarget) distance() distance                    { return m.dist }
-
-// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex.
-type MaxDistanceToShapeIndexTarget struct {
-	index *ShapeIndex
-	query *EdgeQuery
-	dist  distance
-}
-
-// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
-func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget {
-	m := maxDistance(0)
-	return &MaxDistanceToShapeIndexTarget{
-		index: index,
-		dist:  m,
-		query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()),
-	}
-}
-
-// capBound returns a Cap that bounds the antipode of the target. This
-// is the set of points whose maxDistance to the target is maxDistance.zero()
-func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
-	// TODO(roberts): Depends on ShapeIndexRegion
-	// c := makeShapeIndexRegion(m.index).CapBound()
-	// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
-	panic("not implemented yet")
-}
-
-func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMaxDistanceToPointTarget(p)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMaxDistanceToEdgeTarget(edge)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMaxDistanceToCellTarget(cell)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-// visitContainingShapes returns the polygons containing the antipodal
-// reflection of *any* connected component for target types consisting of
-// multiple connected components. It is sufficient to test containment of
-// one vertex per connected component, since this allows us to also return
-// any polygon whose boundary has distance.zero() to the target.
-func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// It is sufficient to find the set of chain starts in the target index
-	// (i.e., one vertex per connected component of edges) that are contained by
-	// the query index, except for one special case to handle full polygons.
-	//
-	// TODO(roberts): Do this by merge-joining the two ShapeIndexes and share
-	// the code with BooleanOperation.
-	for _, shape := range m.index.shapes {
-		numChains := shape.NumChains()
-		// Shapes that don't have any edges require a special case (below).
-		testedPoint := false
-		for c := 0; c < numChains; c++ {
-			chain := shape.Chain(c)
-			if chain.Length == 0 {
-				continue
-			}
-			testedPoint = true
-			target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
-			if !target.visitContainingShapes(index, v) {
-				return false
-			}
-		}
-		if !testedPoint {
-			// Special case to handle full polygons.
-			ref := shape.ReferencePoint()
-			if !ref.Contained {
-				continue
-			}
-			target := NewMaxDistanceToPointTarget(ref.Point)
-			if !target.visitContainingShapes(index, v) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
-	m.query.opts.maxError = maxErr
-	return true
-}
-func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 70 }
-func (m *MaxDistanceToShapeIndexTarget) distance() distance          { return m.dist }
-func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool) {
-	m.query.opts.includeInteriors = b
-}
-func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b }
-
-// TODO(roberts): Remaining methods
-//
-// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
-// CellUnionTarget

+ 0 - 164
vendor/github.com/golang/geo/s2/metric.go

@@ -1,164 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// This file implements functions for various S2 measurements.
-
-import "math"
-
-// A Metric is a measure for cells. It is used to describe the shape and size
-// of cells. They are useful for deciding which cell level to use in order to
-// satisfy a given condition (e.g. that cell vertices must be no further than
-// "x" apart). You can use the Value(level) method to compute the corresponding
-// length or area on the unit sphere for cells at a given level. The minimum
-// and maximum bounds are valid for cells at all levels, but they may be
-// somewhat conservative for very large cells (e.g. face cells).
-type Metric struct {
-	// Dim is either 1 or 2, for a 1D or 2D metric respectively.
-	Dim int
-	// Deriv is the scaling factor for the metric.
-	Deriv float64
-}
-
-// Defined metrics.
-// Of the projection methods defined in C++, Go only supports the quadratic projection.
-
-// Each cell is bounded by four planes passing through its four edges and
-// the center of the sphere. These metrics relate to the angle between each
-// pair of opposite bounding planes, or equivalently, between the planes
-// corresponding to two different s-values or two different t-values.
-var (
-	MinAngleSpanMetric = Metric{1, 4.0 / 3}
-	AvgAngleSpanMetric = Metric{1, math.Pi / 2}
-	MaxAngleSpanMetric = Metric{1, 1.704897179199218452}
-)
-
-// The width of geometric figure is defined as the distance between two
-// parallel bounding lines in a given direction. For cells, the minimum
-// width is always attained between two opposite edges, and the maximum
-// width is attained between two opposite vertices. However, for our
-// purposes we redefine the width of a cell as the perpendicular distance
-// between a pair of opposite edges. A cell therefore has two widths, one
-// in each direction. The minimum width according to this definition agrees
-// with the classic geometric one, but the maximum width is different. (The
-// maximum geometric width corresponds to MaxDiag defined below.)
-//
-// The average width in both directions for all cells at level k is approximately
-// AvgWidthMetric.Value(k).
-//
-// The width is useful for bounding the minimum or maximum distance from a
-// point on one edge of a cell to the closest point on the opposite edge.
-// For example, this is useful when growing regions by a fixed distance.
-var (
-	MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3}
-	AvgWidthMetric = Metric{1, 1.434523672886099389}
-	MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv}
-)
-
-// The edge length metrics can be used to bound the minimum, maximum,
-// or average distance from the center of one cell to the center of one of
-// its edge neighbors. In particular, it can be used to bound the distance
-// between adjacent cell centers along the space-filling Hilbert curve for
-// cells at any given level.
-var (
-	MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3}
-	AvgEdgeMetric = Metric{1, 1.459213746386106062}
-	MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv}
-
-	// MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level,
-	// where the edge aspect ratio of a cell is defined as the ratio of its longest
-	// edge length to its shortest edge length.
-	MaxEdgeAspect = 1.442615274452682920
-
-	MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9}
-	AvgAreaMetric = Metric{2, 4 * math.Pi / 6}
-	MaxAreaMetric = Metric{2, 2.635799256963161491}
-)
-
-// The maximum diagonal is also the maximum diameter of any cell,
-// and also the maximum geometric width (see the comment for widths). For
-// example, the distance from an arbitrary point to the closest cell center
-// at a given level is at most half the maximum diagonal length.
-var (
-	MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9}
-	AvgDiagMetric = Metric{1, 2.060422738998471683}
-	MaxDiagMetric = Metric{1, 2.438654594434021032}
-
-	// MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any
-	// level, where the diagonal aspect ratio of a cell is defined as the ratio
-	// of its longest diagonal length to its shortest diagonal length.
-	MaxDiagAspect = math.Sqrt(3)
-)
-
-// Value returns the value of the metric at the given level.
-func (m Metric) Value(level int) float64 {
-	return math.Ldexp(m.Deriv, -m.Dim*level)
-}
-
-// MinLevel returns the minimum level such that the metric is at most
-// the given value, or maxLevel (30) if there is no such level.
-//
-// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal
-// lengths are 0.1 or smaller. The returned value is always a valid level.
-//
-// In C++, this is called GetLevelForMaxValue.
-func (m Metric) MinLevel(val float64) int {
-	if val < 0 {
-		return maxLevel
-	}
-
-	level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1))
-	if level > maxLevel {
-		level = maxLevel
-	}
-	if level < 0 {
-		level = 0
-	}
-	return level
-}
-
-// MaxLevel returns the maximum level such that the metric is at least
-// the given value, or zero if there is no such level.
-//
-// For example, MaxLevel(0.1) returns the maximum level such that all cells have a
-// minimum width of 0.1 or larger. The returned value is always a valid level.
-//
-// In C++, this is called GetLevelForMinValue.
-func (m Metric) MaxLevel(val float64) int {
-	if val <= 0 {
-		return maxLevel
-	}
-
-	level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1)
-	if level > maxLevel {
-		level = maxLevel
-	}
-	if level < 0 {
-		level = 0
-	}
-	return level
-}
-
-// ClosestLevel returns the level at which the metric has approximately the given
-// value. The return value is always a valid level. For example,
-// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge
-// length is approximately 0.1.
-func (m Metric) ClosestLevel(val float64) int {
-	x := math.Sqrt2
-	if m.Dim == 2 {
-		x = 2
-	}
-	return m.MinLevel(x * val)
-}

+ 0 - 362
vendor/github.com/golang/geo/s2/min_distance_targets.go

@@ -1,362 +0,0 @@
-// Copyright 2019 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// minDistance implements distance interface to find closest distance types.
-type minDistance s1.ChordAngle
-
-func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
-func (m minDistance) zero() distance            { return minDistance(0) }
-func (m minDistance) negative() distance        { return minDistance(s1.NegativeChordAngle) }
-func (m minDistance) infinity() distance        { return minDistance(s1.InfChordAngle()) }
-func (m minDistance) less(other distance) bool  { return m.chordAngle() < other.chordAngle() }
-func (m minDistance) sub(other distance) distance {
-	return minDistance(m.chordAngle() - other.chordAngle())
-}
-func (m minDistance) chordAngleBound() s1.ChordAngle {
-	return m.chordAngle().Expanded(m.chordAngle().MaxAngleError())
-}
-
-// updateDistance updates its own value if the other value is less() than it is,
-// and reports if it updated.
-func (m minDistance) updateDistance(dist distance) (distance, bool) {
-	if dist.less(m) {
-		m = minDistance(dist.chordAngle())
-		return m, true
-	}
-	return m, false
-}
-
-func (m minDistance) fromChordAngle(o s1.ChordAngle) distance {
-	return minDistance(o)
-}
-
-// MinDistanceToPointTarget is a type for computing the minimum distance to a Point.
-type MinDistanceToPointTarget struct {
-	point Point
-	dist  distance
-}
-
-// NewMinDistanceToPointTarget returns a new target for the given Point.
-func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget {
-	m := minDistance(0)
-	return &MinDistanceToPointTarget{point: point, dist: &m}
-}
-
-func (m *MinDistanceToPointTarget) capBound() Cap {
-	return CapFromCenterChordAngle(m.point, s1.ChordAngle(0))
-}
-
-func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	var ok bool
-	dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point)))
-	return dist, ok
-}
-
-func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(minDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	var ok bool
-	dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point)))
-	return dist, ok
-}
-
-func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// For furthest points, we visit the polygons whose interior contains
-	// the antipode of the target point. These are the polygons whose
-	// distance to the target is maxDistance.zero()
-	q := NewContainsPointQuery(index, VertexModelSemiOpen)
-	return q.visitContainingShapes(m.point, func(shape Shape) bool {
-		return v(shape, m.point)
-	})
-}
-
-func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int           { return 120 }
-func (m *MinDistanceToPointTarget) distance() distance                    { return m.dist }
-
-// ----------------------------------------------------------
-
-// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge.
-type MinDistanceToEdgeTarget struct {
-	e    Edge
-	dist distance
-}
-
-// NewMinDistanceToEdgeTarget returns a new target for the given Edge.
-func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget {
-	m := minDistance(0)
-	return &MinDistanceToEdgeTarget{e: e, dist: m}
-}
-
-// capBound returns a Cap that bounds the antipode of the target. (This
-// is the set of points whose maxDistance to the target is maxDistance.zero)
-func (m *MinDistanceToEdgeTarget) capBound() Cap {
-	// The following computes a radius equal to half the edge length in an
-	// efficient and numerically stable way.
-	d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
-	r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
-	return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
-}
-
-func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(minDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
-		dist, _ = dist.updateDistance(minDistance(d))
-		return dist, true
-	}
-	return dist, false
-}
-
-func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1)))
-}
-
-func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// We test the center of the edge in order to ensure that edge targets AB
-	// and BA yield identical results (which is not guaranteed by the API but
-	// users might expect).  Other options would be to test both endpoints, or
-	// return different results for AB and BA in some cases.
-	target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
-	return target.visitContainingShapes(index, v)
-}
-
-func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int           { return 60 }
-func (m *MinDistanceToEdgeTarget) distance() distance                    { return m.dist }
-
-// ----------------------------------------------------------
-
-// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell.
-type MinDistanceToCellTarget struct {
-	cell Cell
-	dist distance
-}
-
-// NewMinDistanceToCellTarget returns a new target for the given Cell.
-func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget {
-	m := minDistance(0)
-	return &MinDistanceToCellTarget{cell: cell, dist: m}
-}
-
-func (m *MinDistanceToCellTarget) capBound() Cap {
-	return m.cell.CapBound()
-}
-
-func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	return dist.updateDistance(minDistance(m.cell.Distance(p)))
-}
-
-func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1)))
-}
-
-func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell)))
-}
-
-func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// The simplest approach is simply to return the polygons that contain the
-	// cell center.  Alternatively, if the index cell is smaller than the target
-	// cell then we could return all polygons that are present in the
-	// shapeIndexCell, but since the index is built conservatively this may
-	// include some polygons that don't quite intersect the cell.  So we would
-	// either need to recheck for intersection more accurately, or weaken the
-	// VisitContainingShapes contract so that it only guarantees approximate
-	// intersection, neither of which seems like a good tradeoff.
-	target := NewMinDistanceToPointTarget(m.cell.Center())
-	return target.visitContainingShapes(index, v)
-}
-func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
-func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int           { return 30 }
-func (m *MinDistanceToCellTarget) distance() distance                    { return m.dist }
-
-// ----------------------------------------------------------
-
-/*
-// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion.
-type MinDistanceToCellUnionTarget struct {
-	cu    CellUnion
-	query *ClosestCellQuery
-	dist  distance
-}
-
-// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion.
-func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget {
-	m := minDistance(0)
-	return &MinDistanceToCellUnionTarget{cu: cu, dist: m}
-}
-
-func (m *MinDistanceToCellUnionTarget) capBound() Cap {
-	return m.cu.CapBound()
-}
-
-func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	m.query.opts.DistanceLimit = dist.chordAngle()
-	target := NewMinDistanceToPointTarget(p)
-	r := m.query.findEdge(target)
-	if r.ShapeID < 0 {
-		return dist, false
-	}
-	return minDistance(r.Distance), true
-}
-
-func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// We test the center of the edge in order to ensure that edge targets AB
-	// and BA yield identical results (which is not guaranteed by the API but
-	// users might expect).  Other options would be to test both endpoints, or
-	// return different results for AB and BA in some cases.
-	target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
-	return target.visitContainingShapes(index, v)
-}
-func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool {
-	m.query.opts.MaxError = maxErr
-	return true
-}
-func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int           { return 30 }
-func (m *MinDistanceToCellUnionTarget) distance() distance                    { return m.dist }
-*/
-
-// ----------------------------------------------------------
-
-// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex.
-type MinDistanceToShapeIndexTarget struct {
-	index *ShapeIndex
-	query *EdgeQuery
-	dist  distance
-}
-
-// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
-func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget {
-	m := minDistance(0)
-	return &MinDistanceToShapeIndexTarget{
-		index: index,
-		dist:  m,
-		query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()),
-	}
-}
-
-func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
-	// TODO(roberts): Depends on ShapeIndexRegion existing.
-	// c := makeS2ShapeIndexRegion(m.index).CapBound()
-	// return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
-	panic("not implemented yet")
-}
-
-func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMinDistanceToPointTarget(p)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMinDistanceToEdgeTarget(edge)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
-	m.query.opts.distanceLimit = dist.chordAngle()
-	target := NewMinDistanceToCellTarget(cell)
-	r := m.query.findEdge(target, m.query.opts)
-	if r.shapeID < 0 {
-		return dist, false
-	}
-	return r.distance, true
-}
-
-// For target types consisting of multiple connected components (such as this one),
-// this method should return the polygons containing the antipodal reflection of
-// *any* connected component. (It is sufficient to test containment of one vertex per
-// connected component, since this allows us to also return any polygon whose
-// boundary has distance.zero() to the target.)
-func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
-	// It is sufficient to find the set of chain starts in the target index
-	// (i.e., one vertex per connected component of edges) that are contained by
-	// the query index, except for one special case to handle full polygons.
-	//
-	// TODO(roberts): Do this by merge-joining the two ShapeIndexes.
-	for _, shape := range m.index.shapes {
-		numChains := shape.NumChains()
-		// Shapes that don't have any edges require a special case (below).
-		testedPoint := false
-		for c := 0; c < numChains; c++ {
-			chain := shape.Chain(c)
-			if chain.Length == 0 {
-				continue
-			}
-			testedPoint = true
-			target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
-			if !target.visitContainingShapes(index, v) {
-				return false
-			}
-		}
-		if !testedPoint {
-			// Special case to handle full polygons.
-			ref := shape.ReferencePoint()
-			if !ref.Contained {
-				continue
-			}
-			target := NewMinDistanceToPointTarget(ref.Point)
-			if !target.visitContainingShapes(index, v) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
-	m.query.opts.maxError = maxErr
-	return true
-}
-func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 }
-func (m *MinDistanceToShapeIndexTarget) distance() distance          { return m.dist }
-func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool) {
-	m.query.opts.includeInteriors = b
-}
-func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b }
-
-// TODO(roberts): Remaining methods
-//
-// func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
-// CellUnionTarget

+ 0 - 88
vendor/github.com/golang/geo/s2/nthderivative.go

@@ -1,88 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// nthDerivativeCoder provides Nth Derivative Coding.
-//   (In signal processing disciplines, this is known as N-th Delta Coding.)
-//
-// Good for varint coding integer sequences with polynomial trends.
-//
-// Instead of coding a sequence of values directly, code its nth-order discrete
-// derivative.  Overflow in integer addition and subtraction makes this a
-// lossless transform.
-//
-//                                       constant     linear      quadratic
-//                                        trend       trend         trend
-//                                      /        \  /        \  /           \_
-// input                               |0  0  0  0  1  2  3  4  9  16  25  36
-// 0th derivative(identity)            |0  0  0  0  1  2  3  4  9  16  25  36
-// 1st derivative(delta coding)        |   0  0  0  1  1  1  1  5   7   9  11
-// 2nd derivative(linear prediction)   |      0  0  1  0  0  0  4   2   2   2
-//                                      -------------------------------------
-//                                      0  1  2  3  4  5  6  7  8   9  10  11
-//                                                  n in sequence
-//
-// Higher-order codings can break even or be detrimental on other sequences.
-//
-//                                           random            oscillating
-//                                      /               \  /                  \_
-// input                               |5  9  6  1   8  8  2 -2   4  -4   6  -6
-// 0th derivative(identity)            |5  9  6  1   8  8  2 -2   4  -4   6  -6
-// 1st derivative(delta coding)        |   4 -3 -5   7  0 -6 -4   6  -8  10 -12
-// 2nd derivative(linear prediction)   |     -7 -2  12 -7 -6  2  10 -14  18 -22
-//                                      ---------------------------------------
-//                                      0  1  2  3  4   5  6  7   8   9  10  11
-//                                                  n in sequence
-//
-// Note that the nth derivative isn't available until sequence item n.  Earlier
-// values are coded at lower order.  For the above table, read 5 4 -7 -2 12 ...
-type nthDerivativeCoder struct {
-	n, m   int
-	memory [10]int32
-}
-
-// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative).
-// n must be within [0,10].
-func newNthDerivativeCoder(n int) *nthDerivativeCoder {
-	c := &nthDerivativeCoder{n: n}
-	if n < 0 || n > len(c.memory) {
-		panic("unsupported n. Must be within [0,10].")
-	}
-	return c
-}
-
-func (c *nthDerivativeCoder) encode(k int32) int32 {
-	for i := 0; i < c.m; i++ {
-		delta := k - c.memory[i]
-		c.memory[i] = k
-		k = delta
-	}
-	if c.m < c.n {
-		c.memory[c.m] = k
-		c.m++
-	}
-	return k
-}
-
-func (c *nthDerivativeCoder) decode(k int32) int32 {
-	if c.m < c.n {
-		c.m++
-	}
-	for i := c.m - 1; i >= 0; i-- {
-		c.memory[i] += k
-		k = c.memory[i]
-	}
-	return k
-}

+ 0 - 252
vendor/github.com/golang/geo/s2/paddedcell.go

@@ -1,252 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r2"
-)
-
-// PaddedCell represents a Cell whose (u,v)-range has been expanded on
-// all sides by a given amount of "padding". Unlike Cell, its methods and
-// representation are optimized for clipping edges against Cell boundaries
-// to determine which cells are intersected by a given set of edges.
-type PaddedCell struct {
-	id          CellID
-	padding     float64
-	bound       r2.Rect
-	middle      r2.Rect // A rect in (u, v)-space that belongs to all four children.
-	iLo, jLo    int     // Minimum (i,j)-coordinates of this cell before padding
-	orientation int     // Hilbert curve orientation of this cell.
-	level       int
-}
-
-// PaddedCellFromCellID constructs a padded cell with the given padding.
-func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell {
-	p := &PaddedCell{
-		id:      id,
-		padding: padding,
-		middle:  r2.EmptyRect(),
-	}
-
-	// Fast path for constructing a top-level face (the most common case).
-	if id.isFace() {
-		limit := padding + 1
-		p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}}
-		p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}}
-		p.orientation = id.Face() & 1
-		return p
-	}
-
-	_, p.iLo, p.jLo, p.orientation = id.faceIJOrientation()
-	p.level = id.Level()
-	p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding)
-	ijSize := sizeIJ(p.level)
-	p.iLo &= -ijSize
-	p.jLo &= -ijSize
-
-	return p
-}
-
-// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index.
-// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j
-// indices correspond to increasing u- and v-values respectively.
-func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell {
-	// Compute the position and orientation of the child incrementally from the
-	// orientation of the parent.
-	pos := ijToPos[parent.orientation][2*i+j]
-
-	p := &PaddedCell{
-		id:          parent.id.Children()[pos],
-		padding:     parent.padding,
-		bound:       parent.bound,
-		orientation: parent.orientation ^ posToOrientation[pos],
-		level:       parent.level + 1,
-		middle:      r2.EmptyRect(),
-	}
-
-	ijSize := sizeIJ(p.level)
-	p.iLo = parent.iLo + i*ijSize
-	p.jLo = parent.jLo + j*ijSize
-
-	// For each child, one corner of the bound is taken directly from the parent
-	// while the diagonally opposite corner is taken from middle().
-	middle := parent.Middle()
-	if i == 1 {
-		p.bound.X.Lo = middle.X.Lo
-	} else {
-		p.bound.X.Hi = middle.X.Hi
-	}
-	if j == 1 {
-		p.bound.Y.Lo = middle.Y.Lo
-	} else {
-		p.bound.Y.Hi = middle.Y.Hi
-	}
-
-	return p
-}
-
-// CellID returns the CellID this padded cell represents.
-func (p PaddedCell) CellID() CellID {
-	return p.id
-}
-
-// Padding returns the amount of padding on this cell.
-func (p PaddedCell) Padding() float64 {
-	return p.padding
-}
-
-// Level returns the level this cell is at.
-func (p PaddedCell) Level() int {
-	return p.level
-}
-
-// Center returns the center of this cell.
-func (p PaddedCell) Center() Point {
-	ijSize := sizeIJ(p.level)
-	si := uint32(2*p.iLo + ijSize)
-	ti := uint32(2*p.jLo + ijSize)
-	return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
-}
-
-// Middle returns the rectangle in the middle of this cell that belongs to
-// all four of its children in (u,v)-space.
-func (p *PaddedCell) Middle() r2.Rect {
-	// We compute this field lazily because it is not needed the majority of the
-	// time (i.e., for cells where the recursion terminates).
-	if p.middle.IsEmpty() {
-		ijSize := sizeIJ(p.level)
-		u := stToUV(siTiToST(uint32(2*p.iLo + ijSize)))
-		v := stToUV(siTiToST(uint32(2*p.jLo + ijSize)))
-		p.middle = r2.Rect{
-			r1.Interval{u - p.padding, u + p.padding},
-			r1.Interval{v - p.padding, v + p.padding},
-		}
-	}
-	return p.middle
-}
-
-// Bound returns the bounds for this cell in (u,v)-space including padding.
-func (p PaddedCell) Bound() r2.Rect {
-	return p.bound
-}
-
-// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal
-// position. The traversal position corresponds to the order in which child
-// cells are visited by the Hilbert curve.
-func (p PaddedCell) ChildIJ(pos int) (i, j int) {
-	ij := posToIJ[p.orientation][pos]
-	return ij >> 1, ij & 1
-}
-
-// EntryVertex return the vertex where the space-filling curve enters this cell.
-func (p PaddedCell) EntryVertex() Point {
-	// The curve enters at the (0,0) vertex unless the axis directions are
-	// reversed, in which case it enters at the (1,1) vertex.
-	i := p.iLo
-	j := p.jLo
-	if p.orientation&invertMask != 0 {
-		ijSize := sizeIJ(p.level)
-		i += ijSize
-		j += ijSize
-	}
-	return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
-}
-
-// ExitVertex returns the vertex where the space-filling curve exits this cell.
-func (p PaddedCell) ExitVertex() Point {
-	// The curve exits at the (1,0) vertex unless the axes are swapped or
-	// inverted but not both, in which case it exits at the (0,1) vertex.
-	i := p.iLo
-	j := p.jLo
-	ijSize := sizeIJ(p.level)
-	if p.orientation == 0 || p.orientation == swapMask+invertMask {
-		i += ijSize
-	} else {
-		j += ijSize
-	}
-	return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
-}
-
-// ShrinkToFit returns the smallest CellID that contains all descendants of this
-// padded cell whose bounds intersect the given rect. For algorithms that use
-// recursive subdivision to find the cells that intersect a particular object, this
-// method can be used to skip all of the initial subdivision steps where only
-// one child needs to be expanded.
-//
-// Note that this method is not the same as returning the smallest cell that contains
-// the intersection of this cell with rect. Because of the padding, even if one child
-// completely contains rect it is still possible that a neighboring child may also
-// intersect the given rect.
-//
-// The provided Rect must intersect the bounds of this cell.
-func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
-	// Quick rejection test: if rect contains the center of this cell along
-	// either axis, then no further shrinking is possible.
-	if p.level == 0 {
-		// Fast path (most calls to this function start with a face cell).
-		if rect.X.Contains(0) || rect.Y.Contains(0) {
-			return p.id
-		}
-	}
-
-	ijSize := sizeIJ(p.level)
-	if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) ||
-		rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) {
-		return p.id
-	}
-
-	// Otherwise we expand rect by the given padding on all sides and find
-	// the range of coordinates that it spans along the i- and j-axes. We then
-	// compute the highest bit position at which the min and max coordinates
-	// differ. This corresponds to the first cell level at which at least two
-	// children intersect rect.
-
-	// Increase the padding to compensate for the error in uvToST.
-	// (The constant below is a provable upper bound on the additional error.)
-	padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon)
-	iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded
-	var iXor, jXor int         // XOR of the min and max i- or j-coordinates
-
-	if iMin < stToIJ(uvToST(padded.X.Lo)) {
-		iMin = stToIJ(uvToST(padded.X.Lo))
-	}
-	if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b {
-		iXor = iMin ^ a
-	} else {
-		iXor = iMin ^ b
-	}
-
-	if jMin < stToIJ(uvToST(padded.Y.Lo)) {
-		jMin = stToIJ(uvToST(padded.Y.Lo))
-	}
-	if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b {
-		jXor = jMin ^ a
-	} else {
-		jXor = jMin ^ b
-	}
-
-	// Compute the highest bit position where the two i- or j-endpoints differ,
-	// and then choose the cell level that includes both of these endpoints. So
-	// if both pairs of endpoints are equal we choose maxLevel; if they differ
-	// only at bit 0, we choose (maxLevel - 1), and so on.
-	levelMSB := uint64(((iXor | jXor) << 1) + 1)
-	level := maxLevel - findMSBSetNonZero64(levelMSB)
-	if level <= p.level {
-		return p.id
-	}
-
-	return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level)
-}

+ 0 - 258
vendor/github.com/golang/geo/s2/point.go

@@ -1,258 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-	"sort"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// Point represents a point on the unit sphere as a normalized 3D vector.
-// Fields should be treated as read-only. Use one of the factory methods for creation.
-type Point struct {
-	r3.Vector
-}
-
-// sortPoints sorts the slice of Points in place.
-func sortPoints(e []Point) {
-	sort.Sort(points(e))
-}
-
-// points implements the Sort interface for slices of Point.
-type points []Point
-
-func (p points) Len() int           { return len(p) }
-func (p points) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 }
-
-// PointFromCoords creates a new normalized point from coordinates.
-//
-// This always returns a valid point. If the given coordinates can not be normalized
-// the origin point will be returned.
-//
-// This behavior is different from the C++ construction of a S2Point from coordinates
-// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize.
-func PointFromCoords(x, y, z float64) Point {
-	if x == 0 && y == 0 && z == 0 {
-		return OriginPoint()
-	}
-	return Point{r3.Vector{x, y, z}.Normalize()}
-}
-
-// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed
-// reference point. In particular, this is the "point at infinity" used for
-// point-in-polygon testing (by counting the number of edge crossings).
-//
-// It should *not* be a point that is commonly used in edge tests in order
-// to avoid triggering code to handle degenerate cases (this rules out the
-// north and south poles). It should also not be on the boundary of any
-// low-level S2Cell for the same reason.
-func OriginPoint() Point {
-	return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}}
-}
-
-// PointCross returns a Point that is orthogonal to both p and op. This is similar to
-// p.Cross(op) (the true cross product) except that it does a better job of
-// ensuring orthogonality when the Point is nearly parallel to op, it returns
-// a non-zero result even when p == op or p == -op and the result is a Point.
-//
-// It satisfies the following properties (f == PointCross):
-//
-//   (1) f(p, op) != 0 for all p, op
-//   (2) f(op,p) == -f(p,op) unless p == op or p == -op
-//   (3) f(-p,op) == -f(p,op) unless p == op or p == -op
-//   (4) f(p,-op) == -f(p,op) unless p == op or p == -op
-func (p Point) PointCross(op Point) Point {
-	// NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd",
-	// but PointCross more accurately describes how this method is used.
-	x := p.Add(op.Vector).Cross(op.Sub(p.Vector))
-
-	// Compare exactly to the 0 vector.
-	if x == (r3.Vector{}) {
-		// The only result that makes sense mathematically is to return zero, but
-		// we find it more convenient to return an arbitrary orthogonal vector.
-		return Point{p.Ortho()}
-	}
-
-	return Point{x}
-}
-
-// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that
-// order while sweeping CCW around the point O.
-//
-// You can think of this as testing whether A <= B <= C with respect to the
-// CCW ordering around O that starts at A, or equivalently, whether B is
-// contained in the range of angles (inclusive) that starts at A and extends
-// CCW to C. Properties:
-//
-//  (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
-//  (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
-//  (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
-//  (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
-//  (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
-func OrderedCCW(a, b, c, o Point) bool {
-	sum := 0
-	if RobustSign(b, o, a) != Clockwise {
-		sum++
-	}
-	if RobustSign(c, o, b) != Clockwise {
-		sum++
-	}
-	if RobustSign(a, o, c) == CounterClockwise {
-		sum++
-	}
-	return sum >= 2
-}
-
-// Distance returns the angle between two points.
-func (p Point) Distance(b Point) s1.Angle {
-	return p.Vector.Angle(b.Vector)
-}
-
-// ApproxEqual reports whether the two points are similar enough to be equal.
-func (p Point) ApproxEqual(other Point) bool {
-	return p.approxEqual(other, s1.Angle(epsilon))
-}
-
-// approxEqual reports whether the two points are within the given epsilon.
-func (p Point) approxEqual(other Point, eps s1.Angle) bool {
-	return p.Vector.Angle(other.Vector) <= eps
-}
-
-// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance
-// between the two given points. The points must be unit length.
-func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle {
-	return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2()))
-}
-
-// regularPoints generates a slice of points shaped as a regular polygon with
-// the numVertices vertices, all located on a circle of the specified angular radius
-// around the center. The radius is the actual distance from center to each vertex.
-func regularPoints(center Point, radius s1.Angle, numVertices int) []Point {
-	return regularPointsForFrame(getFrame(center), radius, numVertices)
-}
-
-// regularPointsForFrame generates a slice of points shaped as a regular polygon
-// with numVertices vertices, all on a circle of the specified angular radius around
-// the center. The radius is the actual distance from the center to each vertex.
-func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point {
-	// We construct the loop in the given frame coordinates, with the center at
-	// (0, 0, 1). For a loop of radius r, the loop vertices have the form
-	// (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the
-	// sphere (arc length) from each vertex to the center is acos(cos(r)) = r.
-	z := math.Cos(radius.Radians())
-	r := math.Sin(radius.Radians())
-	radianStep := 2 * math.Pi / float64(numVertices)
-	var vertices []Point
-
-	for i := 0; i < numVertices; i++ {
-		angle := float64(i) * radianStep
-		p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}}
-		vertices = append(vertices, Point{fromFrame(frame, p).Normalize()})
-	}
-
-	return vertices
-}
-
-// CapBound returns a bounding cap for this point.
-func (p Point) CapBound() Cap {
-	return CapFromPoint(p)
-}
-
-// RectBound returns a bounding latitude-longitude rectangle from this point.
-func (p Point) RectBound() Rect {
-	return RectFromLatLng(LatLngFromPoint(p))
-}
-
-// ContainsCell returns false as Points do not contain any other S2 types.
-func (p Point) ContainsCell(c Cell) bool { return false }
-
-// IntersectsCell reports whether this Point intersects the given cell.
-func (p Point) IntersectsCell(c Cell) bool {
-	return c.ContainsPoint(p)
-}
-
-// ContainsPoint reports if this Point contains the other Point.
-// (This method is named to satisfy the Region interface.)
-func (p Point) ContainsPoint(other Point) bool {
-	return p.Contains(other)
-}
-
-// CellUnionBound computes a covering of the Point.
-func (p Point) CellUnionBound() []CellID {
-	return p.CapBound().CellUnionBound()
-}
-
-// Contains reports if this Point contains the other Point.
-// (This method matches all other s2 types where the reflexive Contains
-// method does not contain the type's name.)
-func (p Point) Contains(other Point) bool { return p == other }
-
-// Encode encodes the Point.
-func (p Point) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	p.encode(e)
-	return e.err
-}
-
-func (p Point) encode(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeFloat64(p.X)
-	e.writeFloat64(p.Y)
-	e.writeFloat64(p.Z)
-}
-
-// Decode decodes the Point.
-func (p *Point) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	p.decode(d)
-	return d.err
-}
-
-func (p *Point) decode(d *decoder) {
-	version := d.readInt8()
-	if d.err != nil {
-		return
-	}
-	if version != encodingVersion {
-		d.err = fmt.Errorf("only version %d is supported", encodingVersion)
-		return
-	}
-	p.X = d.readFloat64()
-	p.Y = d.readFloat64()
-	p.Z = d.readFloat64()
-}
-
-// Rotate the given point about the given axis by the given angle. p and
-// axis must be unit length; angle has no restrictions (e.g., it can be
-// positive, negative, greater than 360 degrees, etc).
-func Rotate(p, axis Point, angle s1.Angle) Point {
-	// Let M be the plane through P that is perpendicular to axis, and let
-	// center be the point where M intersects axis. We construct a
-	// right-handed orthogonal frame (dx, dy, center) such that dx is the
-	// vector from center to P, and dy has the same length as dx. The
-	// result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center).
-	center := axis.Mul(p.Dot(axis.Vector))
-	dx := p.Sub(center)
-	dy := axis.Cross(p.Vector)
-	// Mathematically the result is unit length, but normalization is necessary
-	// to ensure that numerical errors don't accumulate.
-	return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()}
-}

+ 0 - 149
vendor/github.com/golang/geo/s2/point_measures.go

@@ -1,149 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// PointArea returns the area of triangle ABC. This method combines two different
-// algorithms to get accurate results for both large and small triangles.
-// The maximum error is about 5e-15 (about 0.25 square meters on the Earth's
-// surface), the same as GirardArea below, but unlike that method it is
-// also accurate for small triangles. Example: when the true area is 100
-// square meters, PointArea yields an error about 1 trillion times smaller than
-// GirardArea.
-//
-// All points should be unit length, and no two points should be antipodal.
-// The area is always positive.
-func PointArea(a, b, c Point) float64 {
-	// This method is based on l'Huilier's theorem,
-	//
-	//   tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2))
-	//
-	// where E is the spherical excess of the triangle (i.e. its area),
-	//       a, b, c are the side lengths, and
-	//       s is the semiperimeter (a + b + c) / 2.
-	//
-	// The only significant source of error using l'Huilier's method is the
-	// cancellation error of the terms (s-a), (s-b), (s-c). This leads to a
-	// *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares
-	// to a relative error of about 1e-15 / E using Girard's formula, where E is
-	// the true area of the triangle. Girard's formula can be even worse than
-	// this for very small triangles, e.g. a triangle with a true area of 1e-30
-	// might evaluate to 1e-5.
-	//
-	// So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where
-	// dmin = min(s-a, s-b, s-c). This basically includes all triangles
-	// except for extremely long and skinny ones.
-	//
-	// Since we don't know E, we would like a conservative upper bound on
-	// the triangle area in terms of s and dmin. It's possible to show that
-	// E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1).
-	// Using this, it's easy to show that we should always use l'Huilier's
-	// method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore,
-	// if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where
-	// k3 is about 0.1. Since the best case error using Girard's formula
-	// is about 1e-15, this means that we shouldn't even consider it unless
-	// s >= 3e-4 or so.
-	sa := float64(b.Angle(c.Vector))
-	sb := float64(c.Angle(a.Vector))
-	sc := float64(a.Angle(b.Vector))
-	s := 0.5 * (sa + sb + sc)
-	if s >= 3e-4 {
-		// Consider whether Girard's formula might be more accurate.
-		dmin := s - math.Max(sa, math.Max(sb, sc))
-		if dmin < 1e-2*s*s*s*s*s {
-			// This triangle is skinny enough to use Girard's formula.
-			area := GirardArea(a, b, c)
-			if dmin < s*0.1*area {
-				return area
-			}
-		}
-	}
-
-	// Use l'Huilier's formula.
-	return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))*
-		math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
-}
-
-// GirardArea returns the area of the triangle computed using Girard's formula.
-// All points should be unit length, and no two points should be antipodal.
-//
-// This method is about twice as fast as PointArea() but has poor relative
-// accuracy for small triangles. The maximum error is about 5e-15 (about
-// 0.25 square meters on the Earth's surface) and the average error is about
-// 1e-15. These bounds apply to triangles of any size, even as the maximum
-// edge length of the triangle approaches 180 degrees. But note that for
-// such triangles, tiny perturbations of the input points can change the
-// true mathematical area dramatically.
-func GirardArea(a, b, c Point) float64 {
-	// This is equivalent to the usual Girard's formula but is slightly more
-	// accurate, faster to compute, and handles a == b == c without a special
-	// case. PointCross is necessary to get good accuracy when two of
-	// the input points are very close together.
-	ab := a.PointCross(b)
-	bc := b.PointCross(c)
-	ac := a.PointCross(c)
-
-	area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector))
-	if area < 0 {
-		area = 0
-	}
-	return area
-}
-
-// SignedArea returns a positive value for counterclockwise triangles and a negative
-// value otherwise (similar to PointArea).
-func SignedArea(a, b, c Point) float64 {
-	return float64(RobustSign(a, b, c)) * PointArea(a, b, c)
-}
-
-// Angle returns the interior angle at the vertex B in the triangle ABC. The
-// return value is always in the range [0, pi]. All points should be
-// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c.
-//
-// The angle is undefined if A or C is diametrically opposite from B, and
-// becomes numerically unstable as the length of edge AB or BC approaches
-// 180 degrees.
-func Angle(a, b, c Point) s1.Angle {
-	// PointCross is necessary to get good accuracy when two of the input
-	// points are very close together.
-	return a.PointCross(b).Angle(c.PointCross(b).Vector)
-}
-
-// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The
-// return value is positive if ABC is counterclockwise and negative otherwise.
-// If you imagine an ant walking from A to B to C, this is the angle that the
-// ant turns at vertex B (positive = left = CCW, negative = right = CW).
-// This quantity is also known as the "geodesic curvature" at B.
-//
-// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct
-// a,b,c. The result is undefined if (a == b || b == c), but is either
-// -Pi or Pi if (a == c). All points should be normalized.
-func TurnAngle(a, b, c Point) s1.Angle {
-	// We use PointCross to get good accuracy when two points are very
-	// close together, and RobustSign to ensure that the sign is correct for
-	// turns that are close to 180 degrees.
-	angle := a.PointCross(b).Angle(b.PointCross(c).Vector)
-
-	// Don't return RobustSign * angle because it is legal to have (a == c).
-	if RobustSign(a, b, c) == CounterClockwise {
-		return angle
-	}
-	return -angle
-}

+ 0 - 42
vendor/github.com/golang/geo/s2/point_vector.go

@@ -1,42 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// Shape interface enforcement
-var (
-	_ Shape = (*PointVector)(nil)
-)
-
-// PointVector is a Shape representing a set of Points. Each point
-// is represented as a degenerate edge with the same starting and ending
-// vertices.
-//
-// This type is useful for adding a collection of points to an ShapeIndex.
-//
-// Its methods are on *PointVector due to implementation details of ShapeIndex.
-type PointVector []Point
-
-func (p *PointVector) NumEdges() int                     { return len(*p) }
-func (p *PointVector) Edge(i int) Edge                   { return Edge{(*p)[i], (*p)[i]} }
-func (p *PointVector) ReferencePoint() ReferencePoint    { return OriginReferencePoint(false) }
-func (p *PointVector) NumChains() int                    { return len(*p) }
-func (p *PointVector) Chain(i int) Chain                 { return Chain{i, 1} }
-func (p *PointVector) ChainEdge(i, j int) Edge           { return Edge{(*p)[i], (*p)[j]} }
-func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{e, 0} }
-func (p *PointVector) Dimension() int                    { return 0 }
-func (p *PointVector) IsEmpty() bool                     { return defaultShapeIsEmpty(p) }
-func (p *PointVector) IsFull() bool                      { return defaultShapeIsFull(p) }
-func (p *PointVector) typeTag() typeTag                  { return typeTagPointVector }
-func (p *PointVector) privateInterface()                 {}

+ 0 - 319
vendor/github.com/golang/geo/s2/pointcompression.go

@@ -1,319 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"errors"
-	"fmt"
-
-	"github.com/golang/geo/r3"
-)
-
-// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded.
-// On decode, this defends against malicious encodings that try and have us exceed RAM.
-const maxEncodedVertices = 50000000
-
-// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point
-// and, if this point is equal to the center of a Cell, the level of this cell
-// (-1 otherwise). This is used for Loops and Polygons to store data in a more
-// compressed format.
-type xyzFaceSiTi struct {
-	xyz    Point
-	face   int
-	si, ti uint32
-	level  int
-}
-
-const derivativeEncodingOrder = 2
-
-func appendFace(faces []faceRun, face int) []faceRun {
-	if len(faces) == 0 || faces[len(faces)-1].face != face {
-		return append(faces, faceRun{face, 1})
-	}
-	faces[len(faces)-1].count++
-	return faces
-}
-
-// encodePointsCompressed uses an optimized compressed format to encode the given values.
-func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) {
-	var faces []faceRun
-	for _, v := range vertices {
-		faces = appendFace(faces, v.face)
-	}
-	encodeFaces(e, faces)
-
-	type piQi struct {
-		pi, qi uint32
-	}
-	verticesPiQi := make([]piQi, len(vertices))
-	for i, v := range vertices {
-		verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)}
-	}
-	piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder)
-	for i, v := range verticesPiQi {
-		f := encodePointCompressed
-		if i == 0 {
-			// The first point will be just the (pi, qi) coordinates
-			// of the Point. NthDerivativeCoder will not save anything
-			// in that case, so we encode in fixed format rather than varint
-			// to avoid the varint overhead.
-			f = encodeFirstPointFixedLength
-		}
-		f(e, v.pi, v.qi, level, piCoder, qiCoder)
-	}
-
-	var offCenter []int
-	for i, v := range vertices {
-		if v.level != level {
-			offCenter = append(offCenter, i)
-		}
-	}
-	e.writeUvarint(uint64(len(offCenter)))
-	for _, idx := range offCenter {
-		e.writeUvarint(uint64(idx))
-		e.writeFloat64(vertices[idx].xyz.X)
-		e.writeFloat64(vertices[idx].xyz.Y)
-		e.writeFloat64(vertices[idx].xyz.Z)
-	}
-}
-
-func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
-	// Do not ZigZagEncode the first point, since it cannot be negative.
-	codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi))
-	// Interleave to reduce overhead from two partial bytes to one.
-	interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi))
-
-	// Write as little endian.
-	bytesRequired := (level + 7) / 8 * 2
-	for i := 0; i < bytesRequired; i++ {
-		e.writeUint8(uint8(interleaved))
-		interleaved >>= 8
-	}
-}
-
-// encodePointCompressed encodes points into e.
-// Given a sequence of Points assumed to be the center of level-k cells,
-// compresses it into a stream using the following method:
-// - decompose the points into (face, si, ti) tuples.
-// - run-length encode the faces, combining face number and count into a
-//     varint32. See the faceRun struct.
-// - right shift the (si, ti) to remove the part that's constant for all cells
-//     of level-k. The result is called the (pi, qi) space.
-// - 2nd derivative encode the pi and qi sequences (linear prediction)
-// - zig-zag encode all derivative values but the first, which cannot be
-//     negative
-// - interleave the zig-zag encoded values
-// - encode the first interleaved value in a fixed length encoding
-//     (varint would make this value larger)
-// - encode the remaining interleaved values as varint64s, as the
-//     derivative encoding should make the values small.
-// In addition, provides a lossless method to compress a sequence of points even
-// if some points are not the center of level-k cells. These points are stored
-// exactly, using 3 double precision values, after the above encoded string,
-// together with their index in the sequence (this leads to some redundancy - it
-// is expected that only a small fraction of the points are not cell centers).
-//
-// To encode leaf cells, this requires 8 bytes for the first vertex plus
-// an average of 3.8 bytes for each additional vertex, when computed on
-// Google's geographic repository.
-func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
-	// ZigZagEncode, as varint requires the maximum number of bytes for
-	// negative numbers.
-	zzPi := zigzagEncode(piCoder.encode(int32(pi)))
-	zzQi := zigzagEncode(qiCoder.encode(int32(qi)))
-	// Interleave to reduce overhead from two partial bytes to one.
-	interleaved := interleaveUint32(zzPi, zzQi)
-	e.writeUvarint(interleaved)
-}
-
-type faceRun struct {
-	face, count int
-}
-
-func decodeFaceRun(d *decoder) faceRun {
-	faceAndCount := d.readUvarint()
-	ret := faceRun{
-		face:  int(faceAndCount % numFaces),
-		count: int(faceAndCount / numFaces),
-	}
-	if ret.count <= 0 && d.err == nil {
-		d.err = errors.New("non-positive count for face run")
-	}
-	return ret
-}
-
-func decodeFaces(numVertices int, d *decoder) []faceRun {
-	var frs []faceRun
-	for nparsed := 0; nparsed < numVertices; {
-		fr := decodeFaceRun(d)
-		if d.err != nil {
-			return nil
-		}
-		frs = append(frs, fr)
-		nparsed += fr.count
-	}
-	return frs
-}
-
-// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face.
-func encodeFaceRun(e *encoder, fr faceRun) {
-	// It isn't necessary to encode the number of faces left for the last run,
-	// but since this would only help if there were more than 21 faces, it will
-	// be a small overall savings, much smaller than the bound encoding.
-	coded := numFaces*uint64(fr.count) + uint64(fr.face)
-	e.writeUvarint(coded)
-}
-
-func encodeFaces(e *encoder, frs []faceRun) {
-	for _, fr := range frs {
-		encodeFaceRun(e, fr)
-	}
-}
-
-type facesIterator struct {
-	faces []faceRun
-	// How often have we yet shown the current face?
-	numCurrentFaceShown int
-	curFace             int
-}
-
-func (fi *facesIterator) next() (ok bool) {
-	if len(fi.faces) == 0 {
-		return false
-	}
-	fi.curFace = fi.faces[0].face
-	fi.numCurrentFaceShown++
-
-	// Advance fs if needed.
-	if fi.faces[0].count <= fi.numCurrentFaceShown {
-		fi.faces = fi.faces[1:]
-		fi.numCurrentFaceShown = 0
-	}
-
-	return true
-}
-
-func decodePointsCompressed(d *decoder, level int, target []Point) {
-	faces := decodeFaces(len(target), d)
-
-	piCoder := newNthDerivativeCoder(derivativeEncodingOrder)
-	qiCoder := newNthDerivativeCoder(derivativeEncodingOrder)
-
-	iter := facesIterator{faces: faces}
-	for i := range target {
-		decodeFn := decodePointCompressed
-		if i == 0 {
-			decodeFn = decodeFirstPointFixedLength
-		}
-		pi, qi := decodeFn(d, level, piCoder, qiCoder)
-		if ok := iter.next(); !ok && d.err == nil {
-			d.err = fmt.Errorf("ran out of faces at target %d", i)
-			return
-		}
-		target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)}
-	}
-
-	numOffCenter := int(d.readUvarint())
-	if d.err != nil {
-		return
-	}
-	if numOffCenter > len(target) {
-		d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target))
-		return
-	}
-	for i := 0; i < numOffCenter; i++ {
-		idx := int(d.readUvarint())
-		if d.err != nil {
-			return
-		}
-		if idx >= len(target) {
-			d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target))
-			return
-		}
-		target[idx].X = d.readFloat64()
-		target[idx].Y = d.readFloat64()
-		target[idx].Z = d.readFloat64()
-	}
-}
-
-func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
-	bytesToRead := (level + 7) / 8 * 2
-	var interleaved uint64
-	for i := 0; i < bytesToRead; i++ {
-		rr := d.readUint8()
-		interleaved |= (uint64(rr) << uint(i*8))
-	}
-
-	piCoded, qiCoded := deinterleaveUint32(interleaved)
-
-	return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded)))
-}
-
-func zigzagEncode(x int32) uint32 {
-	return (uint32(x) << 1) ^ uint32(x>>31)
-}
-
-func zigzagDecode(x uint32) int32 {
-	return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31))
-}
-
-func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
-	interleavedZigZagEncodedDerivPiQi := d.readUvarint()
-	piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi)
-	return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag)))
-}
-
-// We introduce a new coordinate system (pi, qi), which is (si, ti)
-// with the bits that are constant for cells of that level shifted
-// off to the right.
-// si = round(s * 2^31)
-// pi = si >> (31 - level)
-//    = floor(s * 2^level)
-// If the point has been snapped to the level, the bits that are
-// shifted off will be a 1 in the msb, then 0s after that, so the
-// fractional part discarded by the cast is (close to) 0.5.
-
-// stToPiQi returns the value transformed to the PiQi coordinate space.
-func stToPiQi(s float64, level uint) uint32 {
-	return uint32(s * float64(int(1)<<level))
-}
-
-// siTiToPiQi returns the value transformed into the PiQi coordinate spade.
-// encodeFirstPointFixedLength encodes the return value using level bits,
-// so we clamp si to the range [0, 2**level - 1] before trying to encode
-// it. This is okay because if si == maxSiTi, then it is not a cell center
-// anyway and will be encoded separately as an off-center point.
-func siTitoPiQi(siTi uint32, level int) uint32 {
-	s := uint(siTi)
-	const max = maxSiTi - 1
-	if s > max {
-		s = max
-	}
-
-	return uint32(s >> (maxLevel + 1 - uint(level)))
-}
-
-// piQiToST returns the value transformed to ST space.
-func piQiToST(pi uint32, level int) float64 {
-	// We want to recover the position at the center of the cell. If the point
-	// was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5.
-	// Inverting STtoPiQi gives:
-	// s = (pi + 0.5) / 2^level.
-	return (float64(pi) + 0.5) / float64(int(1)<<uint(level))
-}
-
-func facePiQitoXYZ(face int, pi, qi uint32, level int) r3.Vector {
-	return faceUVToXYZ(face, stToUV(piQiToST(pi, level)), stToUV(piQiToST(qi, level))).Normalize()
-}

+ 0 - 1212
vendor/github.com/golang/geo/s2/polygon.go

@@ -1,1212 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-)
-
-// Polygon represents a sequence of zero or more loops; recall that the
-// interior of a loop is defined to be its left-hand side (see Loop).
-//
-// When the polygon is initialized, the given loops are automatically converted
-// into a canonical form consisting of "shells" and "holes". Shells and holes
-// are both oriented CCW, and are nested hierarchically. The loops are
-// reordered to correspond to a pre-order traversal of the nesting hierarchy.
-//
-// Polygons may represent any region of the sphere with a polygonal boundary,
-// including the entire sphere (known as the "full" polygon). The full polygon
-// consists of a single full loop (see Loop), whereas the empty polygon has no
-// loops at all.
-//
-// Use FullPolygon() to construct a full polygon. The zero value of Polygon is
-// treated as the empty polygon.
-//
-// Polygons have the following restrictions:
-//
-//  - Loops may not cross, i.e. the boundary of a loop may not intersect
-//    both the interior and exterior of any other loop.
-//
-//  - Loops may not share edges, i.e. if a loop contains an edge AB, then
-//    no other loop may contain AB or BA.
-//
-//  - Loops may share vertices, however no vertex may appear twice in a
-//    single loop (see Loop).
-//
-//  - No loop may be empty. The full loop may appear only in the full polygon.
-type Polygon struct {
-	loops []*Loop
-
-	// index is a spatial index of all the polygon loops.
-	index *ShapeIndex
-
-	// hasHoles tracks if this polygon has at least one hole.
-	hasHoles bool
-
-	// numVertices keeps the running total of all of the vertices of the contained loops.
-	numVertices int
-
-	// numEdges tracks the total number of edges in all the loops in this polygon.
-	numEdges int
-
-	// bound is a conservative bound on all points contained by this loop.
-	// If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
-	bound Rect
-
-	// Since bound is not exact, it is possible that a loop A contains
-	// another loop B whose bounds are slightly larger. subregionBound
-	// has been expanded sufficiently to account for this error, i.e.
-	// if A.Contains(B), then A.subregionBound.Contains(B.bound).
-	subregionBound Rect
-
-	// A slice where element i is the cumulative number of edges in the
-	// preceding loops in the polygon. This field is used for polygons that
-	// have a large number of loops, and may be empty for polygons with few loops.
-	cumulativeEdges []int
-}
-
-// PolygonFromLoops constructs a polygon from the given set of loops. The polygon
-// interior consists of the points contained by an odd number of loops. (Recall
-// that a loop contains the set of points on its left-hand side.)
-//
-// This method determines the loop nesting hierarchy and assigns every loop a
-// depth. Shells have even depths, and holes have odd depths.
-//
-// Note: The given set of loops are reordered by this method so that the hierarchy
-// can be traversed using Parent, LastDescendant and the loops depths.
-func PolygonFromLoops(loops []*Loop) *Polygon {
-	p := &Polygon{}
-	// Empty polygons do not contain any loops, even the Empty loop.
-	if len(loops) == 1 && loops[0].IsEmpty() {
-		p.initLoopProperties()
-		return p
-	}
-	p.loops = loops
-	p.initNested()
-	return p
-}
-
-// PolygonFromOrientedLoops returns a Polygon from the given set of loops,
-// like PolygonFromLoops. It expects loops to be oriented such that the polygon
-// interior is on the left-hand side of all loops. This implies that shells
-// and holes should have opposite orientations in the input to this method.
-// (During initialization, loops representing holes will automatically be
-// inverted.)
-func PolygonFromOrientedLoops(loops []*Loop) *Polygon {
-	// Here is the algorithm:
-	//
-	// 1. Remember which of the given loops contain OriginPoint.
-	//
-	// 2. Invert loops as necessary to ensure that they are nestable (i.e., no
-	//    loop contains the complement of any other loop). This may result in a
-	//    set of loops corresponding to the complement of the given polygon, but
-	//    we will fix that problem later.
-	//
-	//    We make the loops nestable by first normalizing all the loops (i.e.,
-	//    inverting any loops whose turning angle is negative). This handles
-	//    all loops except those whose turning angle is very close to zero
-	//    (within the maximum error tolerance). Any such loops are inverted if
-	//    and only if they contain OriginPoint(). (In theory this step is only
-	//    necessary if there are at least two such loops.) The resulting set of
-	//    loops is guaranteed to be nestable.
-	//
-	// 3. Build the polygon. This yields either the desired polygon or its
-	//    complement.
-	//
-	// 4. If there is at least one loop, we find a loop L that is adjacent to
-	//    OriginPoint() (where "adjacent" means that there exists a path
-	//    connecting OriginPoint() to some vertex of L such that the path does
-	//    not cross any loop). There may be a single such adjacent loop, or
-	//    there may be several (in which case they should all have the same
-	//    contains_origin() value). We choose L to be the loop containing the
-	//    origin whose depth is greatest, or loop(0) (a top-level shell) if no
-	//    such loop exists.
-	//
-	// 5. If (L originally contained origin) != (polygon contains origin), we
-	//    invert the polygon. This is done by inverting a top-level shell whose
-	//    turning angle is minimal and then fixing the nesting hierarchy. Note
-	//    that because we normalized all the loops initially, this step is only
-	//    necessary if the polygon requires at least one non-normalized loop to
-	//    represent it.
-
-	containedOrigin := make(map[*Loop]bool)
-	for _, l := range loops {
-		containedOrigin[l] = l.ContainsOrigin()
-	}
-
-	for _, l := range loops {
-		angle := l.TurningAngle()
-		if math.Abs(angle) > l.turningAngleMaxError() {
-			// Normalize the loop.
-			if angle < 0 {
-				l.Invert()
-			}
-		} else {
-			// Ensure that the loop does not contain the origin.
-			if l.ContainsOrigin() {
-				l.Invert()
-			}
-		}
-	}
-
-	p := PolygonFromLoops(loops)
-
-	if p.NumLoops() > 0 {
-		originLoop := p.Loop(0)
-		polygonContainsOrigin := false
-		for _, l := range p.Loops() {
-			if l.ContainsOrigin() {
-				polygonContainsOrigin = !polygonContainsOrigin
-
-				originLoop = l
-			}
-		}
-		if containedOrigin[originLoop] != polygonContainsOrigin {
-			p.Invert()
-		}
-	}
-
-	return p
-}
-
-// Invert inverts the polygon (replaces it by its complement).
-func (p *Polygon) Invert() {
-	// Inverting any one loop will invert the polygon.  The best loop to invert
-	// is the one whose area is largest, since this yields the smallest area
-	// after inversion. The loop with the largest area is always at depth 0.
-	// The descendents of this loop all have their depth reduced by 1, while the
-	// former siblings of this loop all have their depth increased by 1.
-
-	// The empty and full polygons are handled specially.
-	if p.IsEmpty() {
-		*p = *FullPolygon()
-		return
-	}
-	if p.IsFull() {
-		*p = Polygon{}
-		return
-	}
-
-	// Find the loop whose area is largest (i.e., whose turning angle is
-	// smallest), minimizing calls to TurningAngle(). In particular, for
-	// polygons with a single shell at level 0 there is no need to call
-	// TurningAngle() at all. (This method is relatively expensive.)
-	best := 0
-	const none = 10.0 // Flag that means "not computed yet"
-	bestAngle := none
-	for i := 1; i < p.NumLoops(); i++ {
-		if p.Loop(i).depth != 0 {
-			continue
-		}
-		// We defer computing the turning angle of loop 0 until we discover
-		// that the polygon has another top-level shell.
-		if bestAngle == none {
-			bestAngle = p.Loop(best).TurningAngle()
-		}
-		angle := p.Loop(i).TurningAngle()
-		// We break ties deterministically in order to avoid having the output
-		// depend on the input order of the loops.
-		if angle < bestAngle || (angle == bestAngle && compareLoops(p.Loop(i), p.Loop(best)) < 0) {
-			best = i
-			bestAngle = angle
-		}
-	}
-	// Build the new loops vector, starting with the inverted loop.
-	p.Loop(best).Invert()
-	newLoops := make([]*Loop, 0, p.NumLoops())
-	// Add the former siblings of this loop as descendants.
-	lastBest := p.LastDescendant(best)
-	newLoops = append(newLoops, p.Loop(best))
-	for i, l := range p.Loops() {
-		if i < best || i > lastBest {
-			l.depth++
-			newLoops = append(newLoops, l)
-		}
-	}
-	// Add the former children of this loop as siblings.
-	for i, l := range p.Loops() {
-		if i > best && i <= lastBest {
-			l.depth--
-			newLoops = append(newLoops, l)
-		}
-	}
-	p.loops = newLoops
-	p.initLoopProperties()
-}
-
-// Defines a total ordering on Loops that does not depend on the cyclic
-// order of loop vertices. This function is used to choose which loop to
-// invert in the case where several loops have exactly the same area.
-func compareLoops(a, b *Loop) int {
-	if na, nb := a.NumVertices(), b.NumVertices(); na != nb {
-		return na - nb
-	}
-	ai, aDir := a.CanonicalFirstVertex()
-	bi, bDir := b.CanonicalFirstVertex()
-	if aDir != bDir {
-		return aDir - bDir
-	}
-	for n := a.NumVertices() - 1; n >= 0; n, ai, bi = n-1, ai+aDir, bi+bDir {
-		if cmp := a.Vertex(ai).Cmp(b.Vertex(bi).Vector); cmp != 0 {
-			return cmp
-		}
-	}
-	return 0
-}
-
-// PolygonFromCell returns a Polygon from a single loop created from the given Cell.
-func PolygonFromCell(cell Cell) *Polygon {
-	return PolygonFromLoops([]*Loop{LoopFromCell(cell)})
-}
-
-// initNested takes the set of loops in this polygon and performs the nesting
-// computations to set the proper nesting and parent/child relationships.
-func (p *Polygon) initNested() {
-	if len(p.loops) == 1 {
-		p.initOneLoop()
-		return
-	}
-
-	lm := make(loopMap)
-
-	for _, l := range p.loops {
-		lm.insertLoop(l, nil)
-	}
-	// The loops have all been added to the loopMap for ordering. Clear the
-	// loops slice because we add all the loops in-order in initLoops.
-	p.loops = nil
-
-	// Reorder the loops in depth-first traversal order.
-	p.initLoops(lm)
-	p.initLoopProperties()
-}
-
-// loopMap is a map of a loop to its immediate children with respect to nesting.
-// It is used to determine which loops are shells and which are holes.
-type loopMap map[*Loop][]*Loop
-
-// insertLoop adds the given loop to the loop map under the specified parent.
-// All children of the new entry are checked to see if the need to move up to
-// a different level.
-func (lm loopMap) insertLoop(newLoop, parent *Loop) {
-	var children []*Loop
-	for done := false; !done; {
-		children = lm[parent]
-		done = true
-		for _, child := range children {
-			if child.ContainsNested(newLoop) {
-				parent = child
-				done = false
-				break
-			}
-		}
-	}
-
-	// Now, we have found a parent for this loop, it may be that some of the
-	// children of the parent of this loop may now be children of the new loop.
-	newChildren := lm[newLoop]
-	for i := 0; i < len(children); {
-		child := children[i]
-		if newLoop.ContainsNested(child) {
-			newChildren = append(newChildren, child)
-			children = append(children[0:i], children[i+1:]...)
-		} else {
-			i++
-		}
-	}
-
-	lm[newLoop] = newChildren
-	lm[parent] = append(children, newLoop)
-}
-
-// loopStack simplifies access to the loops while being initialized.
-type loopStack []*Loop
-
-func (s *loopStack) push(v *Loop) {
-	*s = append(*s, v)
-}
-func (s *loopStack) pop() *Loop {
-	l := len(*s)
-	r := (*s)[l-1]
-	*s = (*s)[:l-1]
-	return r
-}
-
-// initLoops walks the mapping of loops to all of their children, and adds them in
-// order into to the polygons set of loops.
-func (p *Polygon) initLoops(lm loopMap) {
-	var stack loopStack
-	stack.push(nil)
-	depth := -1
-
-	for len(stack) > 0 {
-		loop := stack.pop()
-		if loop != nil {
-			depth = loop.depth
-			p.loops = append(p.loops, loop)
-		}
-		children := lm[loop]
-		for i := len(children) - 1; i >= 0; i-- {
-			child := children[i]
-			child.depth = depth + 1
-			stack.push(child)
-		}
-	}
-}
-
-// initOneLoop set the properties for a polygon made of a single loop.
-// TODO(roberts): Can this be merged with initLoopProperties
-func (p *Polygon) initOneLoop() {
-	p.hasHoles = false
-	p.numVertices = len(p.loops[0].vertices)
-	p.bound = p.loops[0].RectBound()
-	p.subregionBound = ExpandForSubregions(p.bound)
-	// Ensure the loops depth is set correctly.
-	p.loops[0].depth = 0
-
-	p.initEdgesAndIndex()
-}
-
-// initLoopProperties sets the properties for polygons with multiple loops.
-func (p *Polygon) initLoopProperties() {
-	// the loops depths are set by initNested/initOriented prior to this.
-	p.bound = EmptyRect()
-	p.hasHoles = false
-	for _, l := range p.loops {
-		if l.IsHole() {
-			p.hasHoles = true
-		} else {
-			p.bound = p.bound.Union(l.RectBound())
-		}
-		p.numVertices += l.NumVertices()
-	}
-	p.subregionBound = ExpandForSubregions(p.bound)
-
-	p.initEdgesAndIndex()
-}
-
-// initEdgesAndIndex performs the shape related initializations and adds the final
-// polygon to the index.
-func (p *Polygon) initEdgesAndIndex() {
-	if p.IsFull() {
-		return
-	}
-	const maxLinearSearchLoops = 12 // Based on benchmarks.
-	if len(p.loops) > maxLinearSearchLoops {
-		p.cumulativeEdges = make([]int, 0, len(p.loops))
-	}
-
-	for _, l := range p.loops {
-		if p.cumulativeEdges != nil {
-			p.cumulativeEdges = append(p.cumulativeEdges, p.numEdges)
-		}
-		p.numEdges += len(l.vertices)
-	}
-
-	p.index = NewShapeIndex()
-	p.index.Add(p)
-}
-
-// FullPolygon returns a special "full" polygon.
-func FullPolygon() *Polygon {
-	ret := &Polygon{
-		loops: []*Loop{
-			FullLoop(),
-		},
-		numVertices:    len(FullLoop().Vertices()),
-		bound:          FullRect(),
-		subregionBound: FullRect(),
-	}
-	ret.initEdgesAndIndex()
-	return ret
-}
-
-// Validate checks whether this is a valid polygon,
-// including checking whether all the loops are themselves valid.
-func (p *Polygon) Validate() error {
-	for i, l := range p.loops {
-		// Check for loop errors that don't require building a ShapeIndex.
-		if err := l.findValidationErrorNoIndex(); err != nil {
-			return fmt.Errorf("loop %d: %v", i, err)
-		}
-		// Check that no loop is empty, and that the full loop only appears in the
-		// full polygon.
-		if l.IsEmpty() {
-			return fmt.Errorf("loop %d: empty loops are not allowed", i)
-		}
-		if l.IsFull() && len(p.loops) > 1 {
-			return fmt.Errorf("loop %d: full loop appears in non-full polygon", i)
-		}
-	}
-
-	// TODO(roberts): Uncomment the remaining checks when they are completed.
-
-	// Check for loop self-intersections and loop pairs that cross
-	// (including duplicate edges and vertices).
-	// if findSelfIntersection(p.index) {
-	//	return fmt.Errorf("polygon has loop pairs that cross")
-	// }
-
-	// Check whether initOriented detected inconsistent loop orientations.
-	// if p.hasInconsistentLoopOrientations {
-	// 	return fmt.Errorf("inconsistent loop orientations detected")
-	// }
-
-	// Finally, verify the loop nesting hierarchy.
-	return p.findLoopNestingError()
-}
-
-// findLoopNestingError reports if there is an error in the loop nesting hierarchy.
-func (p *Polygon) findLoopNestingError() error {
-	// First check that the loop depths make sense.
-	lastDepth := -1
-	for i, l := range p.loops {
-		depth := l.depth
-		if depth < 0 || depth > lastDepth+1 {
-			return fmt.Errorf("loop %d: invalid loop depth (%d)", i, depth)
-		}
-		lastDepth = depth
-	}
-	// Then check that they correspond to the actual loop nesting.  This test
-	// is quadratic in the number of loops but the cost per iteration is small.
-	for i, l := range p.loops {
-		last := p.LastDescendant(i)
-		for j, l2 := range p.loops {
-			if i == j {
-				continue
-			}
-			nested := (j >= i+1) && (j <= last)
-			const reverseB = false
-
-			if l.containsNonCrossingBoundary(l2, reverseB) != nested {
-				nestedStr := ""
-				if !nested {
-					nestedStr = "not "
-				}
-				return fmt.Errorf("invalid nesting: loop %d should %scontain loop %d", i, nestedStr, j)
-			}
-		}
-	}
-	return nil
-}
-
-// IsEmpty reports whether this is the special "empty" polygon (consisting of no loops).
-func (p *Polygon) IsEmpty() bool {
-	return len(p.loops) == 0
-}
-
-// IsFull reports whether this is the special "full" polygon (consisting of a
-// single loop that encompasses the entire sphere).
-func (p *Polygon) IsFull() bool {
-	return len(p.loops) == 1 && p.loops[0].IsFull()
-}
-
-// NumLoops returns the number of loops in this polygon.
-func (p *Polygon) NumLoops() int {
-	return len(p.loops)
-}
-
-// Loops returns the loops in this polygon.
-func (p *Polygon) Loops() []*Loop {
-	return p.loops
-}
-
-// Loop returns the loop at the given index. Note that during initialization,
-// the given loops are reordered according to a pre-order traversal of the loop
-// nesting hierarchy. This implies that every loop is immediately followed by
-// its descendants. This hierarchy can be traversed using the methods Parent,
-// LastDescendant, and Loop.depth.
-func (p *Polygon) Loop(k int) *Loop {
-	return p.loops[k]
-}
-
-// Parent returns the index of the parent of loop k.
-// If the loop does not have a parent, ok=false is returned.
-func (p *Polygon) Parent(k int) (index int, ok bool) {
-	// See where we are on the depth hierarchy.
-	depth := p.loops[k].depth
-	if depth == 0 {
-		return -1, false
-	}
-
-	// There may be several loops at the same nesting level as us that share a
-	// parent loop with us. (Imagine a slice of swiss cheese, of which we are one loop.
-	// we don't know how many may be next to us before we get back to our parent loop.)
-	// Move up one position from us, and then begin traversing back through the set of loops
-	// until we find the one that is our parent or we get to the top of the polygon.
-	for k--; k >= 0 && p.loops[k].depth <= depth; k-- {
-	}
-	return k, true
-}
-
-// LastDescendant returns the index of the last loop that is contained within loop k.
-// If k is negative, it returns the last loop in the polygon.
-// Note that loops are indexed according to a pre-order traversal of the nesting
-// hierarchy, so the immediate children of loop k can be found by iterating over
-// the loops (k+1)..LastDescendant(k) and selecting those whose depth is equal
-// to Loop(k).depth+1.
-func (p *Polygon) LastDescendant(k int) int {
-	if k < 0 {
-		return len(p.loops) - 1
-	}
-
-	depth := p.loops[k].depth
-
-	// Find the next loop immediately past us in the set of loops, and then start
-	// moving down the list until we either get to the end or find the next loop
-	// that is higher up the hierarchy than we are.
-	for k++; k < len(p.loops) && p.loops[k].depth > depth; k++ {
-	}
-	return k - 1
-}
-
-// CapBound returns a bounding spherical cap.
-func (p *Polygon) CapBound() Cap { return p.bound.CapBound() }
-
-// RectBound returns a bounding latitude-longitude rectangle.
-func (p *Polygon) RectBound() Rect { return p.bound }
-
-// ContainsPoint reports whether the polygon contains the point.
-func (p *Polygon) ContainsPoint(point Point) bool {
-	// NOTE: A bounds check slows down this function by about 50%. It is
-	// worthwhile only when it might allow us to delay building the index.
-	if !p.index.IsFresh() && !p.bound.ContainsPoint(point) {
-		return false
-	}
-
-	// For small polygons, and during initial construction, it is faster to just
-	// check all the crossing.
-	const maxBruteForceVertices = 32
-	if p.numVertices < maxBruteForceVertices || p.index == nil {
-		inside := false
-		for _, l := range p.loops {
-			// use loops bruteforce to avoid building the index on each loop.
-			inside = inside != l.bruteForceContainsPoint(point)
-		}
-		return inside
-	}
-
-	// Otherwise, look up the ShapeIndex cell containing this point.
-	it := p.index.Iterator()
-	if !it.LocatePoint(point) {
-		return false
-	}
-
-	return p.iteratorContainsPoint(it, point)
-}
-
-// ContainsCell reports whether the polygon contains the given cell.
-func (p *Polygon) ContainsCell(cell Cell) bool {
-	it := p.index.Iterator()
-	relation := it.LocateCellID(cell.ID())
-
-	// If "cell" is disjoint from all index cells, it is not contained.
-	// Similarly, if "cell" is subdivided into one or more index cells then it
-	// is not contained, since index cells are subdivided only if they (nearly)
-	// intersect a sufficient number of edges.  (But note that if "cell" itself
-	// is an index cell then it may be contained, since it could be a cell with
-	// no edges in the loop interior.)
-	if relation != Indexed {
-		return false
-	}
-
-	// Otherwise check if any edges intersect "cell".
-	if p.boundaryApproxIntersects(it, cell) {
-		return false
-	}
-
-	// Otherwise check if the loop contains the center of "cell".
-	return p.iteratorContainsPoint(it, cell.Center())
-}
-
-// IntersectsCell reports whether the polygon intersects the given cell.
-func (p *Polygon) IntersectsCell(cell Cell) bool {
-	it := p.index.Iterator()
-	relation := it.LocateCellID(cell.ID())
-
-	// If cell does not overlap any index cell, there is no intersection.
-	if relation == Disjoint {
-		return false
-	}
-	// If cell is subdivided into one or more index cells, there is an
-	// intersection to within the S2ShapeIndex error bound (see Contains).
-	if relation == Subdivided {
-		return true
-	}
-	// If cell is an index cell, there is an intersection because index cells
-	// are created only if they have at least one edge or they are entirely
-	// contained by the loop.
-	if it.CellID() == cell.id {
-		return true
-	}
-	// Otherwise check if any edges intersect cell.
-	if p.boundaryApproxIntersects(it, cell) {
-		return true
-	}
-	// Otherwise check if the loop contains the center of cell.
-	return p.iteratorContainsPoint(it, cell.Center())
-}
-
-// CellUnionBound computes a covering of the Polygon.
-func (p *Polygon) CellUnionBound() []CellID {
-	// TODO(roberts): Use ShapeIndexRegion when it's available.
-	return p.CapBound().CellUnionBound()
-}
-
-// boundaryApproxIntersects reports whether the loop's boundary intersects cell.
-// It may also return true when the loop boundary does not intersect cell but
-// some edge comes within the worst-case error tolerance.
-//
-// This requires that it.Locate(cell) returned Indexed.
-func (p *Polygon) boundaryApproxIntersects(it *ShapeIndexIterator, cell Cell) bool {
-	aClipped := it.IndexCell().findByShapeID(0)
-
-	// If there are no edges, there is no intersection.
-	if len(aClipped.edges) == 0 {
-		return false
-	}
-
-	// We can save some work if cell is the index cell itself.
-	if it.CellID() == cell.ID() {
-		return true
-	}
-
-	// Otherwise check whether any of the edges intersect cell.
-	maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist)
-	bound := cell.BoundUV().ExpandedByMargin(maxError)
-	for _, e := range aClipped.edges {
-		edge := p.index.Shape(0).Edge(e)
-		v0, v1, ok := ClipToPaddedFace(edge.V0, edge.V1, cell.Face(), maxError)
-		if ok && edgeIntersectsRect(v0, v1, bound) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// iteratorContainsPoint reports whether the iterator that is positioned at the
-// ShapeIndexCell that may contain p, contains the point p.
-func (p *Polygon) iteratorContainsPoint(it *ShapeIndexIterator, point Point) bool {
-	// Test containment by drawing a line segment from the cell center to the
-	// given point and counting edge crossings.
-	aClipped := it.IndexCell().findByShapeID(0)
-	inside := aClipped.containsCenter
-
-	if len(aClipped.edges) == 0 {
-		return inside
-	}
-
-	// This block requires ShapeIndex.
-	crosser := NewEdgeCrosser(it.Center(), point)
-	shape := p.index.Shape(0)
-	for _, e := range aClipped.edges {
-		edge := shape.Edge(e)
-		inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1)
-	}
-
-	return inside
-}
-
-// Shape Interface
-
-// NumEdges returns the number of edges in this shape.
-func (p *Polygon) NumEdges() int {
-	return p.numEdges
-}
-
-// Edge returns endpoints for the given edge index.
-func (p *Polygon) Edge(e int) Edge {
-	var i int
-
-	if len(p.cumulativeEdges) > 0 {
-		for i = range p.cumulativeEdges {
-			if i+1 >= len(p.cumulativeEdges) || e < p.cumulativeEdges[i+1] {
-				e -= p.cumulativeEdges[i]
-				break
-			}
-		}
-	} else {
-		// When the number of loops is small, use linear search. Most often
-		// there is exactly one loop and the code below executes zero times.
-		for i = 0; e >= len(p.Loop(i).vertices); i++ {
-			e -= len(p.Loop(i).vertices)
-		}
-	}
-
-	return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)}
-}
-
-// ReferencePoint returns the reference point for this polygon.
-func (p *Polygon) ReferencePoint() ReferencePoint {
-	containsOrigin := false
-	for _, l := range p.loops {
-		containsOrigin = containsOrigin != l.ContainsOrigin()
-	}
-	return OriginReferencePoint(containsOrigin)
-}
-
-// NumChains reports the number of contiguous edge chains in the Polygon.
-func (p *Polygon) NumChains() int {
-	return p.NumLoops()
-}
-
-// Chain returns the i-th edge Chain (loop) in the Shape.
-func (p *Polygon) Chain(chainID int) Chain {
-	if p.cumulativeEdges != nil {
-		return Chain{p.cumulativeEdges[chainID], len(p.Loop(chainID).vertices)}
-	}
-	e := 0
-	for j := 0; j < chainID; j++ {
-		e += len(p.Loop(j).vertices)
-	}
-
-	// Polygon represents a full loop as a loop with one vertex, while
-	// Shape represents a full loop as a chain with no vertices.
-	if numVertices := p.Loop(chainID).NumVertices(); numVertices != 1 {
-		return Chain{e, numVertices}
-	}
-	return Chain{e, 0}
-}
-
-// ChainEdge returns the j-th edge of the i-th edge Chain (loop).
-func (p *Polygon) ChainEdge(i, j int) Edge {
-	return Edge{p.Loop(i).OrientedVertex(j), p.Loop(i).OrientedVertex(j + 1)}
-}
-
-// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
-// of the i-th edge Chain.
-func (p *Polygon) ChainPosition(edgeID int) ChainPosition {
-	var i int
-
-	if len(p.cumulativeEdges) > 0 {
-		for i = range p.cumulativeEdges {
-			if i+1 >= len(p.cumulativeEdges) || edgeID < p.cumulativeEdges[i+1] {
-				edgeID -= p.cumulativeEdges[i]
-				break
-			}
-		}
-	} else {
-		// When the number of loops is small, use linear search. Most often
-		// there is exactly one loop and the code below executes zero times.
-		for i = 0; edgeID >= len(p.Loop(i).vertices); i++ {
-			edgeID -= len(p.Loop(i).vertices)
-		}
-	}
-	// TODO(roberts): unify this and Edge since they are mostly identical.
-	return ChainPosition{i, edgeID}
-}
-
-// Dimension returns the dimension of the geometry represented by this Polygon.
-func (p *Polygon) Dimension() int { return 2 }
-
-func (p *Polygon) typeTag() typeTag { return typeTagPolygon }
-
-func (p *Polygon) privateInterface() {}
-
-// Contains reports whether this polygon contains the other polygon.
-// Specifically, it reports whether all the points in the other polygon
-// are also in this polygon.
-func (p *Polygon) Contains(o *Polygon) bool {
-	// If both polygons have one loop, use the more efficient Loop method.
-	// Note that Loop's Contains does its own bounding rectangle check.
-	if len(p.loops) == 1 && len(o.loops) == 1 {
-		return p.loops[0].Contains(o.loops[0])
-	}
-
-	// Otherwise if neither polygon has holes, we can still use the more
-	// efficient Loop's Contains method (rather than compareBoundary),
-	// but it's worthwhile to do our own bounds check first.
-	if !p.subregionBound.Contains(o.bound) {
-		// Even though Bound(A) does not contain Bound(B), it is still possible
-		// that A contains B. This can only happen when union of the two bounds
-		// spans all longitudes. For example, suppose that B consists of two
-		// shells with a longitude gap between them, while A consists of one shell
-		// that surrounds both shells of B but goes the other way around the
-		// sphere (so that it does not intersect the longitude gap).
-		if !p.bound.Lng.Union(o.bound.Lng).IsFull() {
-			return false
-		}
-	}
-
-	if !p.hasHoles && !o.hasHoles {
-		for _, l := range o.loops {
-			if !p.anyLoopContains(l) {
-				return false
-			}
-		}
-		return true
-	}
-
-	// Polygon A contains B iff B does not intersect the complement of A. From
-	// the intersection algorithm below, this means that the complement of A
-	// must exclude the entire boundary of B, and B must exclude all shell
-	// boundaries of the complement of A. (It can be shown that B must then
-	// exclude the entire boundary of the complement of A.) The first call
-	// below returns false if the boundaries cross, therefore the second call
-	// does not need to check for any crossing edges (which makes it cheaper).
-	return p.containsBoundary(o) && o.excludesNonCrossingComplementShells(p)
-}
-
-// Intersects reports whether this polygon intersects the other polygon, i.e.
-// if there is a point that is contained by both polygons.
-func (p *Polygon) Intersects(o *Polygon) bool {
-	// If both polygons have one loop, use the more efficient Loop method.
-	// Note that Loop Intersects does its own bounding rectangle check.
-	if len(p.loops) == 1 && len(o.loops) == 1 {
-		return p.loops[0].Intersects(o.loops[0])
-	}
-
-	// Otherwise if neither polygon has holes, we can still use the more
-	// efficient Loop.Intersects method. The polygons intersect if and
-	// only if some pair of loop regions intersect.
-	if !p.bound.Intersects(o.bound) {
-		return false
-	}
-
-	if !p.hasHoles && !o.hasHoles {
-		for _, l := range o.loops {
-			if p.anyLoopIntersects(l) {
-				return true
-			}
-		}
-		return false
-	}
-
-	// Polygon A is disjoint from B if A excludes the entire boundary of B and B
-	// excludes all shell boundaries of A. (It can be shown that B must then
-	// exclude the entire boundary of A.) The first call below returns false if
-	// the boundaries cross, therefore the second call does not need to check
-	// for crossing edges.
-	return !p.excludesBoundary(o) || !o.excludesNonCrossingShells(p)
-}
-
-// compareBoundary returns +1 if this polygon contains the boundary of B, -1 if A
-// excludes the boundary of B, and 0 if the boundaries of A and B cross.
-func (p *Polygon) compareBoundary(o *Loop) int {
-	result := -1
-	for i := 0; i < len(p.loops) && result != 0; i++ {
-		// If B crosses any loop of A, the result is 0. Otherwise the result
-		// changes sign each time B is contained by a loop of A.
-		result *= -p.loops[i].compareBoundary(o)
-	}
-	return result
-}
-
-// containsBoundary reports whether this polygon contains the entire boundary of B.
-func (p *Polygon) containsBoundary(o *Polygon) bool {
-	for _, l := range o.loops {
-		if p.compareBoundary(l) <= 0 {
-			return false
-		}
-	}
-	return true
-}
-
-// excludesBoundary reports whether this polygon excludes the entire boundary of B.
-func (p *Polygon) excludesBoundary(o *Polygon) bool {
-	for _, l := range o.loops {
-		if p.compareBoundary(l) >= 0 {
-			return false
-		}
-	}
-	return true
-}
-
-// containsNonCrossingBoundary reports whether polygon A contains the boundary of
-// loop B. Shared edges are handled according to the rule described in loops
-// containsNonCrossingBoundary.
-func (p *Polygon) containsNonCrossingBoundary(o *Loop, reverse bool) bool {
-	var inside bool
-	for _, l := range p.loops {
-		x := l.containsNonCrossingBoundary(o, reverse)
-		inside = (inside != x)
-	}
-	return inside
-}
-
-// excludesNonCrossingShells reports wheterh given two polygons A and B such that the
-// boundary of A does not cross any loop of B, if A excludes all shell boundaries of B.
-func (p *Polygon) excludesNonCrossingShells(o *Polygon) bool {
-	for _, l := range o.loops {
-		if l.IsHole() {
-			continue
-		}
-		if p.containsNonCrossingBoundary(l, false) {
-			return false
-		}
-	}
-	return true
-}
-
-// excludesNonCrossingComplementShells reports whether given two polygons A and B
-// such that the boundary of A does not cross any loop of B, if A excludes all
-// shell boundaries of the complement of B.
-func (p *Polygon) excludesNonCrossingComplementShells(o *Polygon) bool {
-	// Special case to handle the complement of the empty or full polygons.
-	if o.IsEmpty() {
-		return !p.IsFull()
-	}
-	if o.IsFull() {
-		return true
-	}
-
-	// Otherwise the complement of B may be obtained by inverting loop(0) and
-	// then swapping the shell/hole status of all other loops. This implies
-	// that the shells of the complement consist of loop 0 plus all the holes of
-	// the original polygon.
-	for j, l := range o.loops {
-		if j > 0 && !l.IsHole() {
-			continue
-		}
-
-		// The interior of the complement is to the right of loop 0, and to the
-		// left of the loops that were originally holes.
-		if p.containsNonCrossingBoundary(l, j == 0) {
-			return false
-		}
-	}
-	return true
-}
-
-// anyLoopContains reports whether any loop in this polygon contains the given loop.
-func (p *Polygon) anyLoopContains(o *Loop) bool {
-	for _, l := range p.loops {
-		if l.Contains(o) {
-			return true
-		}
-	}
-	return false
-}
-
-// anyLoopIntersects reports whether any loop in this polygon intersects the given loop.
-func (p *Polygon) anyLoopIntersects(o *Loop) bool {
-	for _, l := range p.loops {
-		if l.Intersects(o) {
-			return true
-		}
-	}
-	return false
-}
-
-// Area returns the area of the polygon interior, i.e. the region on the left side
-// of an odd number of loops. The return value is between 0 and 4*Pi.
-func (p *Polygon) Area() float64 {
-	var area float64
-	for _, loop := range p.loops {
-		area += float64(loop.Sign()) * loop.Area()
-	}
-	return area
-}
-
-// Encode encodes the Polygon
-func (p *Polygon) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	p.encode(e)
-	return e.err
-}
-
-// encode only supports lossless encoding and not compressed format.
-func (p *Polygon) encode(e *encoder) {
-	if p.numVertices == 0 {
-		p.encodeCompressed(e, maxLevel, nil)
-		return
-	}
-
-	// Convert all the polygon vertices to XYZFaceSiTi format.
-	vs := make([]xyzFaceSiTi, 0, p.numVertices)
-	for _, l := range p.loops {
-		vs = append(vs, l.xyzFaceSiTiVertices()...)
-	}
-
-	// Computes a histogram of the cell levels at which the vertices are snapped.
-	// (histogram[0] is the number of unsnapped vertices, histogram[i] the number
-	// of vertices snapped at level i-1).
-	histogram := make([]int, maxLevel+2)
-	for _, v := range vs {
-		histogram[v.level+1]++
-	}
-
-	// Compute the level at which most of the vertices are snapped.
-	// If multiple levels have the same maximum number of vertices
-	// snapped to it, the first one (lowest level number / largest
-	// area / smallest encoding length) will be chosen, so this
-	// is desired.
-	var snapLevel, numSnapped int
-	for level, h := range histogram[1:] {
-		if h > numSnapped {
-			snapLevel, numSnapped = level, h
-		}
-	}
-
-	// Choose an encoding format based on the number of unsnapped vertices and a
-	// rough estimate of the encoded sizes.
-	numUnsnapped := p.numVertices - numSnapped // Number of vertices that won't be snapped at snapLevel.
-	const pointSize = 3 * 8                    // s2.Point is an r3.Vector, which is 3 float64s. That's 3*8 = 24 bytes.
-	compressedSize := 4*p.numVertices + (pointSize+2)*numUnsnapped
-	losslessSize := pointSize * p.numVertices
-	if compressedSize < losslessSize {
-		p.encodeCompressed(e, snapLevel, vs)
-	} else {
-		p.encodeLossless(e)
-	}
-}
-
-// encodeLossless encodes the polygon's Points as float64s.
-func (p *Polygon) encodeLossless(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeBool(true) // a legacy c++ value. must be true.
-	e.writeBool(p.hasHoles)
-	e.writeUint32(uint32(len(p.loops)))
-
-	if e.err != nil {
-		return
-	}
-	if len(p.loops) > maxEncodedLoops {
-		e.err = fmt.Errorf("too many loops (%d; max is %d)", len(p.loops), maxEncodedLoops)
-		return
-	}
-	for _, l := range p.loops {
-		l.encode(e)
-	}
-
-	// Encode the bound.
-	p.bound.encode(e)
-}
-
-func (p *Polygon) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) {
-	e.writeUint8(uint8(encodingCompressedVersion))
-	e.writeUint8(uint8(snapLevel))
-	e.writeUvarint(uint64(len(p.loops)))
-
-	if e.err != nil {
-		return
-	}
-	if l := len(p.loops); l > maxEncodedLoops {
-		e.err = fmt.Errorf("too many loops to encode: %d; max is %d", l, maxEncodedLoops)
-		return
-	}
-
-	for _, l := range p.loops {
-		l.encodeCompressed(e, snapLevel, vertices[:len(l.vertices)])
-		vertices = vertices[len(l.vertices):]
-	}
-	// Do not write the bound, num_vertices, or has_holes_ as they can be
-	// cheaply recomputed by decodeCompressed.  Microbenchmarks show the
-	// speed difference is inconsequential.
-}
-
-// Decode decodes the Polygon.
-func (p *Polygon) Decode(r io.Reader) error {
-	d := &decoder{r: asByteReader(r)}
-	version := int8(d.readUint8())
-	var dec func(*decoder)
-	switch version {
-	case encodingVersion:
-		dec = p.decode
-	case encodingCompressedVersion:
-		dec = p.decodeCompressed
-	default:
-		return fmt.Errorf("unsupported version %d", version)
-	}
-	dec(d)
-	return d.err
-}
-
-// maxEncodedLoops is the biggest supported number of loops in a polygon during encoding.
-// Setting a maximum guards an allocation: it prevents an attacker from easily pushing us OOM.
-const maxEncodedLoops = 10000000
-
-func (p *Polygon) decode(d *decoder) {
-	*p = Polygon{}
-	d.readUint8() // Ignore irrelevant serialized owns_loops_ value.
-
-	p.hasHoles = d.readBool()
-
-	// Polygons with no loops are explicitly allowed here: a newly created
-	// polygon has zero loops and such polygons encode and decode properly.
-	nloops := d.readUint32()
-	if d.err != nil {
-		return
-	}
-	if nloops > maxEncodedLoops {
-		d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops)
-		return
-	}
-	p.loops = make([]*Loop, nloops)
-	for i := range p.loops {
-		p.loops[i] = new(Loop)
-		p.loops[i].decode(d)
-		p.numVertices += len(p.loops[i].vertices)
-	}
-
-	p.bound.decode(d)
-	if d.err != nil {
-		return
-	}
-	p.subregionBound = ExpandForSubregions(p.bound)
-	p.initEdgesAndIndex()
-}
-
-func (p *Polygon) decodeCompressed(d *decoder) {
-	snapLevel := int(d.readUint8())
-
-	if snapLevel > maxLevel {
-		d.err = fmt.Errorf("snaplevel too big: %d", snapLevel)
-		return
-	}
-	// Polygons with no loops are explicitly allowed here: a newly created
-	// polygon has zero loops and such polygons encode and decode properly.
-	nloops := int(d.readUvarint())
-	if nloops > maxEncodedLoops {
-		d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops)
-	}
-	p.loops = make([]*Loop, nloops)
-	for i := range p.loops {
-		p.loops[i] = new(Loop)
-		p.loops[i].decodeCompressed(d, snapLevel)
-	}
-	p.initLoopProperties()
-}
-
-// TODO(roberts): Differences from C++
-// Centroid
-// SnapLevel
-// DistanceToPoint
-// DistanceToBoundary
-// Project
-// ProjectToBoundary
-// ApproxContains/ApproxDisjoint for Polygons
-// InitTo{Intersection/ApproxIntersection/Union/ApproxUnion/Diff/ApproxDiff}
-// InitToSimplified
-// InitToSnapped
-// IntersectWithPolyline
-// ApproxIntersectWithPolyline
-// SubtractFromPolyline
-// ApproxSubtractFromPolyline
-// DestructiveUnion
-// DestructiveApproxUnion
-// InitToCellUnionBorder
-// IsNormalized
-// Equal/BoundaryEqual/BoundaryApproxEqual/BoundaryNear Polygons
-// BreakEdgesAndAddToBuilder
-//
-// clearLoops
-// findLoopNestingError
-// initToSimplifiedInternal
-// internalClipPolyline
-// clipBoundary

+ 0 - 589
vendor/github.com/golang/geo/s2/polyline.go

@@ -1,589 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-// Polyline represents a sequence of zero or more vertices connected by
-// straight edges (geodesics). Edges of length 0 and 180 degrees are not
-// allowed, i.e. adjacent vertices should not be identical or antipodal.
-type Polyline []Point
-
-// PolylineFromLatLngs creates a new Polyline from the given LatLngs.
-func PolylineFromLatLngs(points []LatLng) *Polyline {
-	p := make(Polyline, len(points))
-	for k, v := range points {
-		p[k] = PointFromLatLng(v)
-	}
-	return &p
-}
-
-// Reverse reverses the order of the Polyline vertices.
-func (p *Polyline) Reverse() {
-	for i := 0; i < len(*p)/2; i++ {
-		(*p)[i], (*p)[len(*p)-i-1] = (*p)[len(*p)-i-1], (*p)[i]
-	}
-}
-
-// Length returns the length of this Polyline.
-func (p *Polyline) Length() s1.Angle {
-	var length s1.Angle
-
-	for i := 1; i < len(*p); i++ {
-		length += (*p)[i-1].Distance((*p)[i])
-	}
-	return length
-}
-
-// Centroid returns the true centroid of the polyline multiplied by the length of the
-// polyline. The result is not unit length, so you may wish to normalize it.
-//
-// Scaling by the Polyline length makes it easy to compute the centroid
-// of several Polylines (by simply adding up their centroids).
-func (p *Polyline) Centroid() Point {
-	var centroid Point
-	for i := 1; i < len(*p); i++ {
-		// The centroid (multiplied by length) is a vector toward the midpoint
-		// of the edge, whose length is twice the sin of half the angle between
-		// the two vertices. Defining theta to be this angle, we have:
-		vSum := (*p)[i-1].Add((*p)[i].Vector)  // Length == 2*cos(theta)
-		vDiff := (*p)[i-1].Sub((*p)[i].Vector) // Length == 2*sin(theta)
-
-		// Length == 2*sin(theta)
-		centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))}
-	}
-	return centroid
-}
-
-// Equal reports whether the given Polyline is exactly the same as this one.
-func (p *Polyline) Equal(b *Polyline) bool {
-	if len(*p) != len(*b) {
-		return false
-	}
-	for i, v := range *p {
-		if v != (*b)[i] {
-			return false
-		}
-	}
-
-	return true
-}
-
-// ApproxEqual reports whether two polylines have the same number of vertices,
-// and corresponding vertex pairs are separated by no more the standard margin.
-func (p *Polyline) ApproxEqual(o *Polyline) bool {
-	return p.approxEqual(o, s1.Angle(epsilon))
-}
-
-// approxEqual reports whether two polylines are equal within the given margin.
-func (p *Polyline) approxEqual(o *Polyline, maxError s1.Angle) bool {
-	if len(*p) != len(*o) {
-		return false
-	}
-	for offset, val := range *p {
-		if !val.approxEqual((*o)[offset], maxError) {
-			return false
-		}
-	}
-	return true
-}
-
-// CapBound returns the bounding Cap for this Polyline.
-func (p *Polyline) CapBound() Cap {
-	return p.RectBound().CapBound()
-}
-
-// RectBound returns the bounding Rect for this Polyline.
-func (p *Polyline) RectBound() Rect {
-	rb := NewRectBounder()
-	for _, v := range *p {
-		rb.AddPoint(v)
-	}
-	return rb.RectBound()
-}
-
-// ContainsCell reports whether this Polyline contains the given Cell. Always returns false
-// because "containment" is not numerically well-defined except at the Polyline vertices.
-func (p *Polyline) ContainsCell(cell Cell) bool {
-	return false
-}
-
-// IntersectsCell reports whether this Polyline intersects the given Cell.
-func (p *Polyline) IntersectsCell(cell Cell) bool {
-	if len(*p) == 0 {
-		return false
-	}
-
-	// We only need to check whether the cell contains vertex 0 for correctness,
-	// but these tests are cheap compared to edge crossings so we might as well
-	// check all the vertices.
-	for _, v := range *p {
-		if cell.ContainsPoint(v) {
-			return true
-		}
-	}
-
-	cellVertices := []Point{
-		cell.Vertex(0),
-		cell.Vertex(1),
-		cell.Vertex(2),
-		cell.Vertex(3),
-	}
-
-	for j := 0; j < 4; j++ {
-		crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], (*p)[0])
-		for i := 1; i < len(*p); i++ {
-			if crosser.ChainCrossingSign((*p)[i]) != DoNotCross {
-				// There is a proper crossing, or two vertices were the same.
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// ContainsPoint returns false since Polylines are not closed.
-func (p *Polyline) ContainsPoint(point Point) bool {
-	return false
-}
-
-// CellUnionBound computes a covering of the Polyline.
-func (p *Polyline) CellUnionBound() []CellID {
-	return p.CapBound().CellUnionBound()
-}
-
-// NumEdges returns the number of edges in this shape.
-func (p *Polyline) NumEdges() int {
-	if len(*p) == 0 {
-		return 0
-	}
-	return len(*p) - 1
-}
-
-// Edge returns endpoints for the given edge index.
-func (p *Polyline) Edge(i int) Edge {
-	return Edge{(*p)[i], (*p)[i+1]}
-}
-
-// ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
-func (p *Polyline) ReferencePoint() ReferencePoint {
-	return OriginReferencePoint(false)
-}
-
-// NumChains reports the number of contiguous edge chains in this Polyline.
-func (p *Polyline) NumChains() int {
-	return minInt(1, p.NumEdges())
-}
-
-// Chain returns the i-th edge Chain in the Shape.
-func (p *Polyline) Chain(chainID int) Chain {
-	return Chain{0, p.NumEdges()}
-}
-
-// ChainEdge returns the j-th edge of the i-th edge Chain.
-func (p *Polyline) ChainEdge(chainID, offset int) Edge {
-	return Edge{(*p)[offset], (*p)[offset+1]}
-}
-
-// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
-func (p *Polyline) ChainPosition(edgeID int) ChainPosition {
-	return ChainPosition{0, edgeID}
-}
-
-// Dimension returns the dimension of the geometry represented by this Polyline.
-func (p *Polyline) Dimension() int { return 1 }
-
-// IsEmpty reports whether this shape contains no points.
-func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
-
-// IsFull reports whether this shape contains all points on the sphere.
-func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
-
-func (p *Polyline) typeTag() typeTag { return typeTagPolyline }
-
-func (p *Polyline) privateInterface() {}
-
-// findEndVertex reports the maximal end index such that the line segment between
-// the start index and this one such that the line segment between these two
-// vertices passes within the given tolerance of all interior vertices, in order.
-func findEndVertex(p Polyline, tolerance s1.Angle, index int) int {
-	// The basic idea is to keep track of the "pie wedge" of angles
-	// from the starting vertex such that a ray from the starting
-	// vertex at that angle will pass through the discs of radius
-	// tolerance centered around all vertices processed so far.
-	//
-	// First we define a coordinate frame for the tangent and normal
-	// spaces at the starting vertex. Essentially this means picking
-	// three orthonormal vectors X,Y,Z such that X and Y span the
-	// tangent plane at the starting vertex, and Z is up. We use
-	// the coordinate frame to define a mapping from 3D direction
-	// vectors to a one-dimensional ray angle in the range (-π,
-	// π]. The angle of a direction vector is computed by
-	// transforming it into the X,Y,Z basis, and then calculating
-	// atan2(y,x). This mapping allows us to represent a wedge of
-	// angles as a 1D interval. Since the interval wraps around, we
-	// represent it as an Interval, i.e. an interval on the unit
-	// circle.
-	origin := p[index]
-	frame := getFrame(origin)
-
-	// As we go along, we keep track of the current wedge of angles
-	// and the distance to the last vertex (which must be
-	// non-decreasing).
-	currentWedge := s1.FullInterval()
-	var lastDistance s1.Angle
-
-	for index++; index < len(p); index++ {
-		candidate := p[index]
-		distance := origin.Distance(candidate)
-
-		// We don't allow simplification to create edges longer than
-		// 90 degrees, to avoid numeric instability as lengths
-		// approach 180 degrees. We do need to allow for original
-		// edges longer than 90 degrees, though.
-		if distance > math.Pi/2 && lastDistance > 0 {
-			break
-		}
-
-		// Vertices must be in increasing order along the ray, except
-		// for the initial disc around the origin.
-		if distance < lastDistance && lastDistance > tolerance {
-			break
-		}
-
-		lastDistance = distance
-
-		// Points that are within the tolerance distance of the origin
-		// do not constrain the ray direction, so we can ignore them.
-		if distance <= tolerance {
-			continue
-		}
-
-		// If the current wedge of angles does not contain the angle
-		// to this vertex, then stop right now. Note that the wedge
-		// of possible ray angles is not necessarily empty yet, but we
-		// can't continue unless we are willing to backtrack to the
-		// last vertex that was contained within the wedge (since we
-		// don't create new vertices). This would be more complicated
-		// and also make the worst-case running time more than linear.
-		direction := toFrame(frame, candidate)
-		center := math.Atan2(direction.Y, direction.X)
-		if !currentWedge.Contains(center) {
-			break
-		}
-
-		// To determine how this vertex constrains the possible ray
-		// angles, consider the triangle ABC where A is the origin, B
-		// is the candidate vertex, and C is one of the two tangent
-		// points between A and the spherical cap of radius
-		// tolerance centered at B. Then from the spherical law of
-		// sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are
-		// the lengths of the edges opposite A and C. In our case C
-		// is a 90 degree angle, therefore A = asin(sin(a) / sin(c)).
-		// Angle A is the half-angle of the allowable wedge.
-		halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians()))
-		target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle)
-		currentWedge = currentWedge.Intersection(target)
-	}
-
-	// We break out of the loop when we reach a vertex index that
-	// can't be included in the line segment, so back up by one
-	// vertex.
-	return index - 1
-}
-
-// SubsampleVertices returns a subsequence of vertex indices such that the
-// polyline connecting these vertices is never further than the given tolerance from
-// the original polyline. Provided the first and last vertices are distinct,
-// they are always preserved; if they are not, the subsequence may contain
-// only a single index.
-//
-// Some useful properties of the algorithm:
-//
-//  - It runs in linear time.
-//
-//  - The output always represents a valid polyline. In particular, adjacent
-//    output vertices are never identical or antipodal.
-//
-//  - The method is not optimal, but it tends to produce 2-3% fewer
-//    vertices than the Douglas-Peucker algorithm with the same tolerance.
-//
-//  - The output is parametrically equivalent to the original polyline to
-//    within the given tolerance. For example, if a polyline backtracks on
-//    itself and then proceeds onwards, the backtracking will be preserved
-//    (to within the given tolerance). This is different than the
-//    Douglas-Peucker algorithm which only guarantees geometric equivalence.
-func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int {
-	var result []int
-
-	if len(*p) < 1 {
-		return result
-	}
-
-	result = append(result, 0)
-	clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0))
-
-	for index := 0; index+1 < len(*p); {
-		nextIndex := findEndVertex(*p, clampedTolerance, index)
-		// Don't create duplicate adjacent vertices.
-		if (*p)[nextIndex] != (*p)[index] {
-			result = append(result, nextIndex)
-		}
-		index = nextIndex
-	}
-
-	return result
-}
-
-// Encode encodes the Polyline.
-func (p Polyline) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	p.encode(e)
-	return e.err
-}
-
-func (p Polyline) encode(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeUint32(uint32(len(p)))
-	for _, v := range p {
-		e.writeFloat64(v.X)
-		e.writeFloat64(v.Y)
-		e.writeFloat64(v.Z)
-	}
-}
-
-// Decode decodes the polyline.
-func (p *Polyline) Decode(r io.Reader) error {
-	d := decoder{r: asByteReader(r)}
-	p.decode(d)
-	return d.err
-}
-
-func (p *Polyline) decode(d decoder) {
-	version := d.readInt8()
-	if d.err != nil {
-		return
-	}
-	if int(version) != int(encodingVersion) {
-		d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
-		return
-	}
-	nvertices := d.readUint32()
-	if d.err != nil {
-		return
-	}
-	if nvertices > maxEncodedVertices {
-		d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
-		return
-	}
-	*p = make([]Point, nvertices)
-	for i := range *p {
-		(*p)[i].X = d.readFloat64()
-		(*p)[i].Y = d.readFloat64()
-		(*p)[i].Z = d.readFloat64()
-	}
-}
-
-// Project returns a point on the polyline that is closest to the given point,
-// and the index of the next vertex after the projected point. The
-// value of that index is always in the range [1, len(polyline)].
-// The polyline must not be empty.
-func (p *Polyline) Project(point Point) (Point, int) {
-	if len(*p) == 1 {
-		// If there is only one vertex, it is always closest to any given point.
-		return (*p)[0], 1
-	}
-
-	// Initial value larger than any possible distance on the unit sphere.
-	minDist := 10 * s1.Radian
-	minIndex := -1
-
-	// Find the line segment in the polyline that is closest to the point given.
-	for i := 1; i < len(*p); i++ {
-		if dist := DistanceFromSegment(point, (*p)[i-1], (*p)[i]); dist < minDist {
-			minDist = dist
-			minIndex = i
-		}
-	}
-
-	// Compute the point on the segment found that is closest to the point given.
-	closest := Project(point, (*p)[minIndex-1], (*p)[minIndex])
-	if closest == (*p)[minIndex] {
-		minIndex++
-	}
-
-	return closest, minIndex
-}
-
-// IsOnRight reports whether the point given is on the right hand side of the
-// polyline, using a naive definition of "right-hand-sideness" where the point
-// is on the RHS of the polyline iff the point is on the RHS of the line segment
-// in the polyline which it is closest to.
-// The polyline must have at least 2 vertices.
-func (p *Polyline) IsOnRight(point Point) bool {
-	// If the closest point C is an interior vertex of the polyline, let B and D
-	// be the previous and next vertices. The given point P is on the right of
-	// the polyline (locally) if B, P, D are ordered CCW around vertex C.
-	closest, next := p.Project(point)
-	if closest == (*p)[next-1] && next > 1 && next < len(*p) {
-		if point == (*p)[next-1] {
-			// Polyline vertices are not on the RHS.
-			return false
-		}
-		return OrderedCCW((*p)[next-2], point, (*p)[next], (*p)[next-1])
-	}
-	// Otherwise, the closest point C is incident to exactly one polyline edge.
-	// We test the point P against that edge.
-	if next == len(*p) {
-		next--
-	}
-	return Sign(point, (*p)[next], (*p)[next-1])
-}
-
-// Validate checks whether this is a valid polyline or not.
-func (p *Polyline) Validate() error {
-	// All vertices must be unit length.
-	for i, pt := range *p {
-		if !pt.IsUnit() {
-			return fmt.Errorf("vertex %d is not unit length", i)
-		}
-	}
-
-	// Adjacent vertices must not be identical or antipodal.
-	for i := 1; i < len(*p); i++ {
-		prev, cur := (*p)[i-1], (*p)[i]
-		if prev == cur {
-			return fmt.Errorf("vertices %d and %d are identical", i-1, i)
-		}
-		if prev == (Point{cur.Mul(-1)}) {
-			return fmt.Errorf("vertices %d and %d are antipodal", i-1, i)
-		}
-	}
-
-	return nil
-}
-
-// Intersects reports whether this polyline intersects the given polyline. If
-// the polylines share a vertex they are considered to be intersecting. When a
-// polyline endpoint is the only intersection with the other polyline, the
-// function may return true or false arbitrarily.
-//
-// The running time is quadratic in the number of vertices.
-func (p *Polyline) Intersects(o *Polyline) bool {
-	if len(*p) == 0 || len(*o) == 0 {
-		return false
-	}
-
-	if !p.RectBound().Intersects(o.RectBound()) {
-		return false
-	}
-
-	// TODO(roberts): Use ShapeIndex here.
-	for i := 1; i < len(*p); i++ {
-		crosser := NewChainEdgeCrosser((*p)[i-1], (*p)[i], (*o)[0])
-		for j := 1; j < len(*o); j++ {
-			if crosser.ChainCrossingSign((*o)[j]) != DoNotCross {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// Interpolate returns the point whose distance from vertex 0 along the polyline is
-// the given fraction of the polyline's total length, and the index of
-// the next vertex after the interpolated point P. Fractions less than zero
-// or greater than one are clamped. The return value is unit length. The cost of
-// this function is currently linear in the number of vertices.
-//
-// This method allows the caller to easily construct a given suffix of the
-// polyline by concatenating P with the polyline vertices starting at that next
-// vertex. Note that P is guaranteed to be different than the point at the next
-// vertex, so this will never result in a duplicate vertex.
-//
-// The polyline must not be empty. Note that if fraction >= 1.0, then the next
-// vertex will be set to len(p) (indicating that no vertices from the polyline
-// need to be appended). The value of the next vertex is always between 1 and
-// len(p).
-//
-// This method can also be used to construct a prefix of the polyline, by
-// taking the polyline vertices up to next vertex-1 and appending the
-// returned point P if it is different from the last vertex (since in this
-// case there is no guarantee of distinctness).
-func (p *Polyline) Interpolate(fraction float64) (Point, int) {
-	// We intentionally let the (fraction >= 1) case fall through, since
-	// we need to handle it in the loop below in any case because of
-	// possible roundoff errors.
-	if fraction <= 0 {
-		return (*p)[0], 1
-	}
-	target := s1.Angle(fraction) * p.Length()
-
-	for i := 1; i < len(*p); i++ {
-		length := (*p)[i-1].Distance((*p)[i])
-		if target < length {
-			// This interpolates with respect to arc length rather than
-			// straight-line distance, and produces a unit-length result.
-			result := InterpolateAtDistance(target, (*p)[i-1], (*p)[i])
-
-			// It is possible that (result == vertex(i)) due to rounding errors.
-			if result == (*p)[i] {
-				return result, i + 1
-			}
-			return result, i
-		}
-		target -= length
-	}
-
-	return (*p)[len(*p)-1], len(*p)
-}
-
-// Uninterpolate is the inverse operation of Interpolate. Given a point on the
-// polyline, it returns the ratio of the distance to the point from the
-// beginning of the polyline over the length of the polyline. The return
-// value is always betwen 0 and 1 inclusive.
-//
-// The polyline should not be empty.  If it has fewer than 2 vertices, the
-// return value is zero.
-func (p *Polyline) Uninterpolate(point Point, nextVertex int) float64 {
-	if len(*p) < 2 {
-		return 0
-	}
-
-	var sum s1.Angle
-	for i := 1; i < nextVertex; i++ {
-		sum += (*p)[i-1].Distance((*p)[i])
-	}
-	lengthToPoint := sum + (*p)[nextVertex-1].Distance(point)
-	for i := nextVertex; i < len(*p); i++ {
-		sum += (*p)[i-1].Distance((*p)[i])
-	}
-	// The ratio can be greater than 1.0 due to rounding errors or because the
-	// point is not exactly on the polyline.
-	return minFloat64(1.0, float64(lengthToPoint/sum))
-}
-
-// TODO(roberts): Differences from C++.
-// NearlyCoversPolyline
-// InitToSnapped
-// InitToSimplified
-// SnapLevel
-// encode/decode compressed

+ 0 - 53
vendor/github.com/golang/geo/s2/polyline_measures.go

@@ -1,53 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// This file defines various measures for polylines on the sphere. These are
-// low-level methods that work directly with arrays of Points. They are used to
-// implement the methods in various other measures files.
-
-import (
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// polylineLength returns the length of the given Polyline.
-// It returns 0 for polylines with fewer than two vertices.
-func polylineLength(p []Point) s1.Angle {
-	var length s1.Angle
-
-	for i := 1; i < len(p); i++ {
-		length += p[i-1].Distance(p[i])
-	}
-	return length
-}
-
-// polylineCentroid returns the true centroid of the polyline multiplied by the
-// length of the polyline. The result is not unit length, so you may wish to
-// normalize it.
-//
-// Scaling by the Polyline length makes it easy to compute the centroid
-// of several Polylines (by simply adding up their centroids).
-//
-// Note that for degenerate Polylines (e.g., AA) this returns Point(0, 0, 0).
-// (This answer is correct; the result of this function is a line integral over
-// the polyline, whose value is always zero if the polyline is degenerate.)
-func polylineCentroid(p []Point) Point {
-	var centroid r3.Vector
-	for i := 1; i < len(p); i++ {
-		centroid = centroid.Add(EdgeTrueCentroid(p[i-1], p[i]).Vector)
-	}
-	return Point{centroid}
-}

+ 0 - 701
vendor/github.com/golang/geo/s2/predicates.go

@@ -1,701 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// This file contains various predicates that are guaranteed to produce
-// correct, consistent results. They are also relatively efficient. This is
-// achieved by computing conservative error bounds and falling back to high
-// precision or even exact arithmetic when the result is uncertain. Such
-// predicates are useful in implementing robust algorithms.
-//
-// See also EdgeCrosser, which implements various exact
-// edge-crossing predicates more efficiently than can be done here.
-
-import (
-	"math"
-	"math/big"
-
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-const (
-	// If any other machine architectures need to be suppported, these next three
-	// values will need to be updated.
-
-	// epsilon is a small number that represents a reasonable level of noise between two
-	// values that can be considered to be equal.
-	epsilon = 1e-15
-	// dblEpsilon is a smaller number for values that require more precision.
-	// This is the C++ DBL_EPSILON equivalent.
-	dblEpsilon = 2.220446049250313e-16
-	// dblError is the C++ value for S2 rounding_epsilon().
-	dblError = 1.110223024625156e-16
-
-	// maxDeterminantError is the maximum error in computing (AxB).C where all vectors
-	// are unit length. Using standard inequalities, it can be shown that
-	//
-	//  fl(AxB) = AxB + D where |D| <= (|AxB| + (2/sqrt(3))*|A|*|B|) * e
-	//
-	// where "fl()" denotes a calculation done in floating-point arithmetic,
-	// |x| denotes either absolute value or the L2-norm as appropriate, and
-	// e is a reasonably small value near the noise level of floating point
-	// number accuracy. Similarly,
-	//
-	//  fl(B.C) = B.C + d where |d| <= (|B.C| + 2*|B|*|C|) * e .
-	//
-	// Applying these bounds to the unit-length vectors A,B,C and neglecting
-	// relative error (which does not affect the sign of the result), we get
-	//
-	//  fl((AxB).C) = (AxB).C + d where |d| <= (3 + 2/sqrt(3)) * e
-	maxDeterminantError = 1.8274 * dblEpsilon
-
-	// detErrorMultiplier is the factor to scale the magnitudes by when checking
-	// for the sign of set of points with certainty. Using a similar technique to
-	// the one used for maxDeterminantError, the error is at most:
-	//
-	//   |d| <= (3 + 6/sqrt(3)) * |A-C| * |B-C| * e
-	//
-	// If the determinant magnitude is larger than this value then we know
-	// its sign with certainty.
-	detErrorMultiplier = 3.2321 * dblEpsilon
-)
-
-// Direction is an indication of the ordering of a set of points.
-type Direction int
-
-// These are the three options for the direction of a set of points.
-const (
-	Clockwise        Direction = -1
-	Indeterminate    Direction = 0
-	CounterClockwise Direction = 1
-)
-
-// newBigFloat constructs a new big.Float with maximum precision.
-func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) }
-
-// Sign returns true if the points A, B, C are strictly counterclockwise,
-// and returns false if the points are clockwise or collinear (i.e. if they are all
-// contained on some great circle).
-//
-// Due to numerical errors, situations may arise that are mathematically
-// impossible, e.g. ABC may be considered strictly CCW while BCA is not.
-// However, the implementation guarantees the following:
-//
-// If Sign(a,b,c), then !Sign(c,b,a) for all a,b,c.
-func Sign(a, b, c Point) bool {
-	// NOTE(dnadasi): In the C++ API the equivalent method here was known as "SimpleSign".
-
-	// We compute the signed volume of the parallelepiped ABC. The usual
-	// formula for this is (A ⨯ B) · C, but we compute it here using (C ⨯ A) · B
-	// in order to ensure that ABC and CBA are not both CCW. This follows
-	// from the following identities (which are true numerically, not just
-	// mathematically):
-	//
-	//     (1) x ⨯ y == -(y ⨯ x)
-	//     (2) -x · y == -(x · y)
-	return c.Cross(a.Vector).Dot(b.Vector) > 0
-}
-
-// RobustSign returns a Direction representing the ordering of the points.
-// CounterClockwise is returned if the points are in counter-clockwise order,
-// Clockwise for clockwise, and Indeterminate if any two points are the same (collinear),
-// or the sign could not completely be determined.
-//
-// This function has additional logic to make sure that the above properties hold even
-// when the three points are coplanar, and to deal with the limitations of
-// floating-point arithmetic.
-//
-// RobustSign satisfies the following conditions:
-//
-//  (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a
-//  (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
-//  (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
-//
-// In other words:
-//
-//  (1) The result is Indeterminate if and only if two points are the same.
-//  (2) Rotating the order of the arguments does not affect the result.
-//  (3) Exchanging any two arguments inverts the result.
-//
-// On the other hand, note that it is not true in general that
-// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities
-// involving antipodal points.
-func RobustSign(a, b, c Point) Direction {
-	sign := triageSign(a, b, c)
-	if sign == Indeterminate {
-		sign = expensiveSign(a, b, c)
-	}
-	return sign
-}
-
-// stableSign reports the direction sign of the points in a numerically stable way.
-// Unlike triageSign, this method can usually compute the correct determinant sign
-// even when all three points are as collinear as possible. For example if three
-// points are spaced 1km apart along a random line on the Earth's surface using
-// the nearest representable points, there is only a 0.4% chance that this method
-// will not be able to find the determinant sign. The probability of failure
-// decreases as the points get closer together; if the collinear points are 1 meter
-// apart, the failure rate drops to 0.0004%.
-//
-// This method could be extended to also handle nearly-antipodal points, but antipodal
-// points are rare in practice so it seems better to simply fall back to
-// exact arithmetic in that case.
-func stableSign(a, b, c Point) Direction {
-	ab := b.Sub(a.Vector)
-	ab2 := ab.Norm2()
-	bc := c.Sub(b.Vector)
-	bc2 := bc.Norm2()
-	ca := a.Sub(c.Vector)
-	ca2 := ca.Norm2()
-
-	// Now compute the determinant ((A-C)x(B-C)).C, where the vertices have been
-	// cyclically permuted if necessary so that AB is the longest edge. (This
-	// minimizes the magnitude of cross product.)  At the same time we also
-	// compute the maximum error in the determinant.
-
-	// The two shortest edges, pointing away from their common point.
-	var e1, e2, op r3.Vector
-	if ab2 >= bc2 && ab2 >= ca2 {
-		// AB is the longest edge.
-		e1, e2, op = ca, bc, c.Vector
-	} else if bc2 >= ca2 {
-		// BC is the longest edge.
-		e1, e2, op = ab, ca, a.Vector
-	} else {
-		// CA is the longest edge.
-		e1, e2, op = bc, ab, b.Vector
-	}
-
-	det := -e1.Cross(e2).Dot(op)
-	maxErr := detErrorMultiplier * math.Sqrt(e1.Norm2()*e2.Norm2())
-
-	// If the determinant isn't zero, within maxErr, we know definitively the point ordering.
-	if det > maxErr {
-		return CounterClockwise
-	}
-	if det < -maxErr {
-		return Clockwise
-	}
-	return Indeterminate
-}
-
-// triageSign returns the direction sign of the points. It returns Indeterminate if two
-// points are identical or the result is uncertain. Uncertain cases can be resolved, if
-// desired, by calling expensiveSign.
-//
-// The purpose of this method is to allow additional cheap tests to be done without
-// calling expensiveSign.
-func triageSign(a, b, c Point) Direction {
-	det := a.Cross(b.Vector).Dot(c.Vector)
-	if det > maxDeterminantError {
-		return CounterClockwise
-	}
-	if det < -maxDeterminantError {
-		return Clockwise
-	}
-	return Indeterminate
-}
-
-// expensiveSign reports the direction sign of the points. It returns Indeterminate
-// if two of the input points are the same. It uses multiple-precision arithmetic
-// to ensure that its results are always self-consistent.
-func expensiveSign(a, b, c Point) Direction {
-	// Return Indeterminate if and only if two points are the same.
-	// This ensures RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a.
-	// ie. Property 1 of RobustSign.
-	if a == b || b == c || c == a {
-		return Indeterminate
-	}
-
-	// Next we try recomputing the determinant still using floating-point
-	// arithmetic but in a more precise way. This is more expensive than the
-	// simple calculation done by triageSign, but it is still *much* cheaper
-	// than using arbitrary-precision arithmetic. This optimization is able to
-	// compute the correct determinant sign in virtually all cases except when
-	// the three points are truly collinear (e.g., three points on the equator).
-	detSign := stableSign(a, b, c)
-	if detSign != Indeterminate {
-		return detSign
-	}
-
-	// Otherwise fall back to exact arithmetic and symbolic permutations.
-	return exactSign(a, b, c, true)
-}
-
-// exactSign reports the direction sign of the points computed using high-precision
-// arithmetic and/or symbolic perturbations.
-func exactSign(a, b, c Point, perturb bool) Direction {
-	// Sort the three points in lexicographic order, keeping track of the sign
-	// of the permutation. (Each exchange inverts the sign of the determinant.)
-	permSign := CounterClockwise
-	pa := &a
-	pb := &b
-	pc := &c
-	if pa.Cmp(pb.Vector) > 0 {
-		pa, pb = pb, pa
-		permSign = -permSign
-	}
-	if pb.Cmp(pc.Vector) > 0 {
-		pb, pc = pc, pb
-		permSign = -permSign
-	}
-	if pa.Cmp(pb.Vector) > 0 {
-		pa, pb = pb, pa
-		permSign = -permSign
-	}
-
-	// Construct multiple-precision versions of the sorted points and compute
-	// their precise 3x3 determinant.
-	xa := r3.PreciseVectorFromVector(pa.Vector)
-	xb := r3.PreciseVectorFromVector(pb.Vector)
-	xc := r3.PreciseVectorFromVector(pc.Vector)
-	xbCrossXc := xb.Cross(xc)
-	det := xa.Dot(xbCrossXc)
-
-	// The precision of big.Float is high enough that the result should always
-	// be exact enough (no rounding was performed).
-
-	// If the exact determinant is non-zero, we're done.
-	detSign := Direction(det.Sign())
-	if detSign == Indeterminate && perturb {
-		// Otherwise, we need to resort to symbolic perturbations to resolve the
-		// sign of the determinant.
-		detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
-	}
-	return permSign * detSign
-}
-
-// symbolicallyPerturbedSign reports the sign of the determinant of three points
-// A, B, C under a model where every possible Point is slightly perturbed by
-// a unique infinitesmal amount such that no three perturbed points are
-// collinear and no four points are coplanar. The perturbations are so small
-// that they do not change the sign of any determinant that was non-zero
-// before the perturbations, and therefore can be safely ignored unless the
-// determinant of three points is exactly zero (using multiple-precision
-// arithmetic). This returns CounterClockwise or Clockwise according to the
-// sign of the determinant after the symbolic perturbations are taken into account.
-//
-// Since the symbolic perturbation of a given point is fixed (i.e., the
-// perturbation is the same for all calls to this method and does not depend
-// on the other two arguments), the results of this method are always
-// self-consistent. It will never return results that would correspond to an
-// impossible configuration of non-degenerate points.
-//
-// This requires that the 3x3 determinant of A, B, C must be exactly zero.
-// And the points must be distinct, with A < B < C in lexicographic order.
-//
-// Reference:
-//   "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
-//   Graphics, 1990).
-//
-func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
-	// This method requires that the points are sorted in lexicographically
-	// increasing order. This is because every possible Point has its own
-	// symbolic perturbation such that if A < B then the symbolic perturbation
-	// for A is much larger than the perturbation for B.
-	//
-	// Alternatively, we could sort the points in this method and keep track of
-	// the sign of the permutation, but it is more efficient to do this before
-	// converting the inputs to the multi-precision representation, and this
-	// also lets us re-use the result of the cross product B x C.
-	//
-	// Every input coordinate x[i] is assigned a symbolic perturbation dx[i].
-	// We then compute the sign of the determinant of the perturbed points,
-	// i.e.
-	//               | a.X+da.X  a.Y+da.Y  a.Z+da.Z |
-	//               | b.X+db.X  b.Y+db.Y  b.Z+db.Z |
-	//               | c.X+dc.X  c.Y+dc.Y  c.Z+dc.Z |
-	//
-	// The perturbations are chosen such that
-	//
-	//   da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X
-	//
-	// where each perturbation is so much smaller than the previous one that we
-	// don't even need to consider it unless the coefficients of all previous
-	// perturbations are zero. In fact, it is so small that we don't need to
-	// consider it unless the coefficient of all products of the previous
-	// perturbations are zero. For example, we don't need to consider the
-	// coefficient of db.Y unless the coefficient of db.Z *da.X is zero.
-	//
-	// The follow code simply enumerates the coefficients of the perturbations
-	// (and products of perturbations) that appear in the determinant above, in
-	// order of decreasing perturbation magnitude. The first non-zero
-	// coefficient determines the sign of the result. The easiest way to
-	// enumerate the coefficients in the correct order is to pretend that each
-	// perturbation is some tiny value "eps" raised to a power of two:
-	//
-	// eps**     1      2      4      8     16     32     64    128    256
-	//        da.Z   da.Y   da.X   db.Z   db.Y   db.X   dc.Z   dc.Y   dc.X
-	//
-	// Essentially we can then just count in binary and test the corresponding
-	// subset of perturbations at each step. So for example, we must test the
-	// coefficient of db.Z*da.X before db.Y because eps**12 > eps**16.
-	//
-	// Of course, not all products of these perturbations appear in the
-	// determinant above, since the determinant only contains the products of
-	// elements in distinct rows and columns. Thus we don't need to consider
-	// da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of
-	// perturbations have the same coefficient in the determinant; for example,
-	// da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore
-	// we only need to test this coefficient the first time we encounter it in
-	// the binary order above (which will be db.Y*da.X).
-	//
-	// The sequence of tests below also appears in Table 4-ii of the paper
-	// referenced above, if you just want to look it up, with the following
-	// translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that
-	// some of the signs are different because the opposite cross product is
-	// used (e.g., B x C rather than C x B).
-
-	detSign := bCrossC.Z.Sign() // da.Z
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = bCrossC.Y.Sign() // da.Y
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = bCrossC.X.Sign() // da.X
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-
-	detSign = newBigFloat().Sub(newBigFloat().Mul(c.X, a.Y), newBigFloat().Mul(c.Y, a.X)).Sign() // db.Z
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = c.X.Sign() // db.Z * da.Y
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = -(c.Y.Sign()) // db.Z * da.X
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-
-	detSign = newBigFloat().Sub(newBigFloat().Mul(c.Z, a.X), newBigFloat().Mul(c.X, a.Z)).Sign() // db.Y
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = c.Z.Sign() // db.Y * da.X
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-
-	// The following test is listed in the paper, but it is redundant because
-	// the previous tests guarantee that C == (0, 0, 0).
-	// (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
-
-	detSign = newBigFloat().Sub(newBigFloat().Mul(a.X, b.Y), newBigFloat().Mul(a.Y, b.X)).Sign() // dc.Z
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = -(b.X.Sign()) // dc.Z * da.Y
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = b.Y.Sign() // dc.Z * da.X
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	detSign = a.X.Sign() // dc.Z * db.Y
-	if detSign != 0 {
-		return Direction(detSign)
-	}
-	return CounterClockwise // dc.Z * db.Y * da.X
-}
-
-// CompareDistances returns -1, 0, or +1 according to whether AX < BX, A == B,
-// or AX > BX respectively. Distances are measured with respect to the positions
-// of X, A, and B as though they were reprojected to lie exactly on the surface of
-// the unit sphere. Furthermore, this method uses symbolic perturbations to
-// ensure that the result is non-zero whenever A != B, even when AX == BX
-// exactly, or even when A and B project to the same point on the sphere.
-// Such results are guaranteed to be self-consistent, i.e. if AB < BC and
-// BC < AC, then AB < AC.
-func CompareDistances(x, a, b Point) int {
-	// We start by comparing distances using dot products (i.e., cosine of the
-	// angle), because (1) this is the cheapest technique, and (2) it is valid
-	// over the entire range of possible angles. (We can only use the sin^2
-	// technique if both angles are less than 90 degrees or both angles are
-	// greater than 90 degrees.)
-	sign := triageCompareCosDistances(x, a, b)
-	if sign != 0 {
-		return sign
-	}
-
-	// Optimization for (a == b) to avoid falling back to exact arithmetic.
-	if a == b {
-		return 0
-	}
-
-	// It is much better numerically to compare distances using cos(angle) if
-	// the distances are near 90 degrees and sin^2(angle) if the distances are
-	// near 0 or 180 degrees. We only need to check one of the two angles when
-	// making this decision because the fact that the test above failed means
-	// that angles "a" and "b" are very close together.
-	cosAX := a.Dot(x.Vector)
-	if cosAX > 1/math.Sqrt2 {
-		// Angles < 45 degrees.
-		sign = triageCompareSin2Distances(x, a, b)
-	} else if cosAX < -1/math.Sqrt2 {
-		// Angles > 135 degrees. sin^2(angle) is decreasing in this range.
-		sign = -triageCompareSin2Distances(x, a, b)
-	}
-	// C++ adds an additional check here using 80-bit floats.
-	// This is skipped in Go because we only have 32 and 64 bit floats.
-
-	if sign != 0 {
-		return sign
-	}
-
-	sign = exactCompareDistances(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector))
-	if sign != 0 {
-		return sign
-	}
-	return symbolicCompareDistances(x, a, b)
-}
-
-// cosDistance returns cos(XY) where XY is the angle between X and Y, and the
-// maximum error amount in the result. This requires X and Y be normalized.
-func cosDistance(x, y Point) (cos, err float64) {
-	cos = x.Dot(y.Vector)
-	return cos, 9.5*dblError*math.Abs(cos) + 1.5*dblError
-}
-
-// sin2Distance returns sin**2(XY), where XY is the angle between X and Y,
-// and the maximum error amount in the result. This requires X and Y be normalized.
-func sin2Distance(x, y Point) (sin2, err float64) {
-	// The (x-y).Cross(x+y) trick eliminates almost all of error due to x
-	// and y being not quite unit length. This method is extremely accurate
-	// for small distances; the *relative* error in the result is O(dblError) for
-	// distances as small as dblError.
-	n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
-	sin2 = 0.25 * n.Norm2()
-	err = ((21+4*math.Sqrt(3))*dblError*sin2 +
-		32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) +
-		768*dblError*dblError*dblError*dblError)
-	return sin2, err
-}
-
-// triageCompareCosDistances returns -1, 0, or +1 according to whether AX < BX,
-// A == B, or AX > BX by comparing the distances between them using cosDistance.
-func triageCompareCosDistances(x, a, b Point) int {
-	cosAX, cosAXerror := cosDistance(a, x)
-	cosBX, cosBXerror := cosDistance(b, x)
-	diff := cosAX - cosBX
-	err := cosAXerror + cosBXerror
-	if diff > err {
-		return -1
-	}
-	if diff < -err {
-		return 1
-	}
-	return 0
-}
-
-// triageCompareSin2Distances returns -1, 0, or +1 according to whether AX < BX,
-// A == B, or AX > BX by comparing the distances between them using sin2Distance.
-func triageCompareSin2Distances(x, a, b Point) int {
-	sin2AX, sin2AXerror := sin2Distance(a, x)
-	sin2BX, sin2BXerror := sin2Distance(b, x)
-	diff := sin2AX - sin2BX
-	err := sin2AXerror + sin2BXerror
-	if diff > err {
-		return 1
-	}
-	if diff < -err {
-		return -1
-	}
-	return 0
-}
-
-// exactCompareDistances returns -1, 0, or 1 after comparing using the values as
-// PreciseVectors.
-func exactCompareDistances(x, a, b r3.PreciseVector) int {
-	// This code produces the same result as though all points were reprojected
-	// to lie exactly on the surface of the unit sphere. It is based on testing
-	// whether x.Dot(a.Normalize()) < x.Dot(b.Normalize()), reformulated
-	// so that it can be evaluated using exact arithmetic.
-	cosAX := x.Dot(a)
-	cosBX := x.Dot(b)
-
-	// If the two values have different signs, we need to handle that case now
-	// before squaring them below.
-	aSign := cosAX.Sign()
-	bSign := cosBX.Sign()
-	if aSign != bSign {
-		// If cos(AX) > cos(BX), then AX < BX.
-		if aSign > bSign {
-			return -1
-		}
-		return 1
-	}
-	cosAX2 := newBigFloat().Mul(cosAX, cosAX)
-	cosBX2 := newBigFloat().Mul(cosBX, cosBX)
-	cmp := newBigFloat().Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2()))
-	return aSign * cmp.Sign()
-}
-
-// symbolicCompareDistances returns -1, 0, or +1 given three points such that AX == BX
-// (exactly) according to whether AX < BX, AX == BX, or AX > BX after symbolic
-// perturbations are taken into account.
-func symbolicCompareDistances(x, a, b Point) int {
-	// Our symbolic perturbation strategy is based on the following model.
-	// Similar to "simulation of simplicity", we assign a perturbation to every
-	// point such that if A < B, then the symbolic perturbation for A is much,
-	// much larger than the symbolic perturbation for B. We imagine that
-	// rather than projecting every point to lie exactly on the unit sphere,
-	// instead each point is positioned on its own tiny pedestal that raises it
-	// just off the surface of the unit sphere. This means that the distance AX
-	// is actually the true distance AX plus the (symbolic) heights of the
-	// pedestals for A and X. The pedestals are infinitesmally thin, so they do
-	// not affect distance measurements except at the two endpoints. If several
-	// points project to exactly the same point on the unit sphere, we imagine
-	// that they are placed on separate pedestals placed close together, where
-	// the distance between pedestals is much, much less than the height of any
-	// pedestal. (There are a finite number of Points, and therefore a finite
-	// number of pedestals, so this is possible.)
-	//
-	// If A < B, then A is on a higher pedestal than B, and therefore AX > BX.
-	switch a.Cmp(b.Vector) {
-	case -1:
-		return 1
-	case 1:
-		return -1
-	default:
-		return 0
-	}
-}
-
-var (
-	// ca45Degrees is a predefined ChordAngle representing (approximately) 45 degrees.
-	ca45Degrees = s1.ChordAngleFromSquaredLength(2 - math.Sqrt2)
-)
-
-// CompareDistance returns -1, 0, or +1 according to whether the distance XY is
-// respectively less than, equal to, or greater than the provided chord angle. Distances are measured
-// with respect to the positions of all points as though they are projected to lie
-// exactly on the surface of the unit sphere.
-func CompareDistance(x, y Point, r s1.ChordAngle) int {
-	// As with CompareDistances, we start by comparing dot products because
-	// the sin^2 method is only valid when the distance XY and the limit "r" are
-	// both less than 90 degrees.
-	sign := triageCompareCosDistance(x, y, float64(r))
-	if sign != 0 {
-		return sign
-	}
-
-	// Unlike with CompareDistances, it's not worth using the sin^2 method
-	// when the distance limit is near 180 degrees because the ChordAngle
-	// representation itself has has a rounding error of up to 2e-8 radians for
-	// distances near 180 degrees.
-	if r < ca45Degrees {
-		sign = triageCompareSin2Distance(x, y, float64(r))
-		if sign != 0 {
-			return sign
-		}
-	}
-	return exactCompareDistance(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(y.Vector), big.NewFloat(float64(r)).SetPrec(big.MaxPrec))
-}
-
-// triageCompareCosDistance returns -1, 0, or +1 according to whether the distance XY is
-// less than, equal to, or greater than r2 respectively using cos distance.
-func triageCompareCosDistance(x, y Point, r2 float64) int {
-	cosXY, cosXYError := cosDistance(x, y)
-	cosR := 1.0 - 0.5*r2
-	cosRError := 2.0 * dblError * cosR
-	diff := cosXY - cosR
-	err := cosXYError + cosRError
-	if diff > err {
-		return -1
-	}
-	if diff < -err {
-		return 1
-	}
-	return 0
-}
-
-// triageCompareSin2Distance returns -1, 0, or +1 according to whether the distance XY is
-// less than, equal to, or greater than r2 respectively using sin^2 distance.
-func triageCompareSin2Distance(x, y Point, r2 float64) int {
-	// Only valid for distance limits < 90 degrees.
-	sin2XY, sin2XYError := sin2Distance(x, y)
-	sin2R := r2 * (1.0 - 0.25*r2)
-	sin2RError := 3.0 * dblError * sin2R
-	diff := sin2XY - sin2R
-	err := sin2XYError + sin2RError
-	if diff > err {
-		return 1
-	}
-	if diff < -err {
-		return -1
-	}
-	return 0
-}
-
-var (
-	bigOne  = big.NewFloat(1.0).SetPrec(big.MaxPrec)
-	bigHalf = big.NewFloat(0.5).SetPrec(big.MaxPrec)
-)
-
-// exactCompareDistance returns -1, 0, or +1 after comparing using PreciseVectors.
-func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int {
-	// This code produces the same result as though all points were reprojected
-	// to lie exactly on the surface of the unit sphere.  It is based on
-	// comparing the cosine of the angle XY (when both points are projected to
-	// lie exactly on the sphere) to the given threshold.
-	cosXY := x.Dot(y)
-	cosR := newBigFloat().Sub(bigOne, newBigFloat().Mul(bigHalf, r2))
-
-	// If the two values have different signs, we need to handle that case now
-	// before squaring them below.
-	xySign := cosXY.Sign()
-	rSign := cosR.Sign()
-	if xySign != rSign {
-		if xySign > rSign {
-			return -1
-		}
-		return 1 // If cos(XY) > cos(r), then XY < r.
-	}
-	cmp := newBigFloat().Sub(
-		newBigFloat().Mul(
-			newBigFloat().Mul(cosR, cosR), newBigFloat().Mul(x.Norm2(), y.Norm2())),
-		newBigFloat().Mul(cosXY, cosXY))
-	return xySign * cmp.Sign()
-}
-
-// TODO(roberts): Differences from C++
-// CompareEdgeDistance
-// CompareEdgeDirections
-// EdgeCircumcenterSign
-// GetVoronoiSiteExclusion
-// GetClosestVertex
-// TriageCompareLineSin2Distance
-// TriageCompareLineCos2Distance
-// TriageCompareLineDistance
-// TriageCompareEdgeDistance
-// ExactCompareLineDistance
-// ExactCompareEdgeDistance
-// TriageCompareEdgeDirections
-// ExactCompareEdgeDirections
-// ArePointsAntipodal
-// ArePointsLinearlyDependent
-// GetCircumcenter
-// TriageEdgeCircumcenterSign
-// ExactEdgeCircumcenterSign
-// UnperturbedSign
-// SymbolicEdgeCircumcenterSign
-// ExactVoronoiSiteExclusion

+ 0 - 203
vendor/github.com/golang/geo/s2/projections.go

@@ -1,203 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/r2"
-	"github.com/golang/geo/s1"
-)
-
-// Projection defines an interface for different ways of mapping between s2 and r2 Points.
-// It can also define the coordinate wrapping behavior along each axis.
-type Projection interface {
-	// Project converts a point on the sphere to a projected 2D point.
-	Project(p Point) r2.Point
-
-	// Unproject converts a projected 2D point to a point on the sphere.
-	//
-	// If wrapping is defined for a given axis (see below), then this method
-	// should accept any real number for the corresponding coordinate.
-	Unproject(p r2.Point) Point
-
-	// FromLatLng is a convenience function equivalent to Project(LatLngToPoint(ll)),
-	// but the implementation is more efficient.
-	FromLatLng(ll LatLng) r2.Point
-
-	// ToLatLng is a convenience function equivalent to LatLngFromPoint(Unproject(p)),
-	// but the implementation is more efficient.
-	ToLatLng(p r2.Point) LatLng
-
-	// Interpolate returns the point obtained by interpolating the given
-	// fraction of the distance along the line from A to B.
-	// Fractions < 0 or > 1 result in extrapolation instead.
-	Interpolate(f float64, a, b r2.Point) r2.Point
-
-	// WrapDistance reports the coordinate wrapping distance along each axis.
-	// If this value is non-zero for a given axis, the coordinates are assumed
-	// to "wrap" with the given period. For example, if WrapDistance.Y == 360
-	// then (x, y) and (x, y + 360) should map to the same Point.
-	//
-	// This information is used to ensure that edges takes the shortest path
-	// between two given points. For example, if coordinates represent
-	// (latitude, longitude) pairs in degrees and WrapDistance().Y == 360,
-	// then the edge (5:179, 5:-179) would be interpreted as spanning 2 degrees
-	// of longitude rather than 358 degrees.
-	//
-	// If a given axis does not wrap, its WrapDistance should be set to zero.
-	WrapDistance() r2.Point
-}
-
-// PlateCarreeProjection defines the "plate carree" (square plate) projection,
-// which converts points on the sphere to (longitude, latitude) pairs.
-// Coordinates can be scaled so that they represent radians, degrees, etc, but
-// the projection is always centered around (latitude=0, longitude=0).
-//
-// Note that (x, y) coordinates are backwards compared to the usual (latitude,
-// longitude) ordering, in order to match the usual convention for graphs in
-// which "x" is horizontal and "y" is vertical.
-type PlateCarreeProjection struct {
-	xWrap       float64
-	toRadians   float64 // Multiplier to convert coordinates to radians.
-	fromRadians float64 // Multiplier to convert coordinates from radians.
-}
-
-// NewPlateCarreeProjection constructs a plate carree projection where the
-// x-coordinates (lng) span [-xScale, xScale] and the y coordinates (lat)
-// span [-xScale/2, xScale/2]. For example if xScale==180 then the x
-// range is [-180, 180] and the y range is [-90, 90].
-//
-// By default coordinates are expressed in radians, i.e. the x range is
-// [-Pi, Pi] and the y range is [-Pi/2, Pi/2].
-func NewPlateCarreeProjection(xScale float64) Projection {
-	return &PlateCarreeProjection{
-		xWrap:       2 * xScale,
-		toRadians:   math.Pi / xScale,
-		fromRadians: xScale / math.Pi,
-	}
-}
-
-// Project converts a point on the sphere to a projected 2D point.
-func (p *PlateCarreeProjection) Project(pt Point) r2.Point {
-	return p.FromLatLng(LatLngFromPoint(pt))
-}
-
-// Unproject converts a projected 2D point to a point on the sphere.
-func (p *PlateCarreeProjection) Unproject(pt r2.Point) Point {
-	return PointFromLatLng(p.ToLatLng(pt))
-}
-
-// FromLatLng returns the LatLng projected into an R2 Point.
-func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point {
-	return r2.Point{
-		X: p.fromRadians * ll.Lng.Radians(),
-		Y: p.fromRadians * ll.Lat.Radians(),
-	}
-}
-
-// ToLatLng returns the LatLng projected from the given R2 Point.
-func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng {
-	return LatLng{
-		Lat: s1.Angle(p.toRadians * pt.Y),
-		Lng: s1.Angle(p.toRadians * math.Remainder(pt.X, p.xWrap)),
-	}
-}
-
-// Interpolate returns the point obtained by interpolating the given
-// fraction of the distance along the line from A to B.
-func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
-	return a.Mul(1 - f).Add(b.Mul(f))
-}
-
-// WrapDistance reports the coordinate wrapping distance along each axis.
-func (p *PlateCarreeProjection) WrapDistance() r2.Point {
-	return r2.Point{p.xWrap, 0}
-}
-
-// MercatorProjection defines the spherical Mercator projection. Google Maps
-// uses this projection together with WGS84 coordinates, in which case it is
-// known as the "Web Mercator" projection (see Wikipedia). This class makes
-// no assumptions regarding the coordinate system of its input points, but
-// simply applies the spherical Mercator projection to them.
-//
-// The Mercator projection is finite in width (x) but infinite in height (y).
-// "x" corresponds to longitude, and spans a finite range such as [-180, 180]
-// (with coordinate wrapping), while "y" is a function of latitude and spans
-// an infinite range. (As "y" coordinates get larger, points get closer to
-// the north pole but never quite reach it.) The north and south poles have
-// infinite "y" values. (Note that this will cause problems if you tessellate
-// a Mercator edge where one endpoint is a pole. If you need to do this, clip
-// the edge first so that the "y" coordinate is no more than about 5 * maxX.)
-type MercatorProjection struct {
-	xWrap       float64
-	toRadians   float64 // Multiplier to convert coordinates to radians.
-	fromRadians float64 // Multiplier to convert coordinates from radians.
-}
-
-// NewMercatorProjection constructs a Mercator projection with the given maximum
-// longitude axis value corresponding to a range of [-maxLng, maxLng].
-// The horizontal and vertical axes are scaled equally.
-func NewMercatorProjection(maxLng float64) Projection {
-	return &MercatorProjection{
-		xWrap:       2 * maxLng,
-		toRadians:   math.Pi / maxLng,
-		fromRadians: maxLng / math.Pi,
-	}
-}
-
-// Project converts a point on the sphere to a projected 2D point.
-func (p *MercatorProjection) Project(pt Point) r2.Point {
-	return p.FromLatLng(LatLngFromPoint(pt))
-}
-
-// Unproject converts a projected 2D point to a point on the sphere.
-func (p *MercatorProjection) Unproject(pt r2.Point) Point {
-	return PointFromLatLng(p.ToLatLng(pt))
-}
-
-// FromLatLng returns the LatLng projected into an R2 Point.
-func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point {
-	// This formula is more accurate near zero than the log(tan()) version.
-	// Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity.
-	sinPhi := math.Sin(float64(ll.Lat))
-	y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi))
-	return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y}
-}
-
-// ToLatLng returns the LatLng projected from the given R2 Point.
-func (p *MercatorProjection) ToLatLng(pt r2.Point) LatLng {
-	// This formula is more accurate near zero than the atan(exp()) version.
-	x := p.toRadians * math.Remainder(pt.X, p.xWrap)
-	k := math.Exp(2 * p.toRadians * pt.Y)
-	var y float64
-	if math.IsInf(k, 0) {
-		y = math.Pi / 2
-	} else {
-		y = math.Asin((k - 1) / (k + 1))
-	}
-	return LatLng{s1.Angle(y), s1.Angle(x)}
-}
-
-// Interpolate returns the point obtained by interpolating the given
-// fraction of the distance along the line from A to B.
-func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
-	return a.Mul(1 - f).Add(b.Mul(f))
-}
-
-// WrapDistance reports the coordinate wrapping distance along each axis.
-func (p *MercatorProjection) WrapDistance() r2.Point {
-	return r2.Point{p.xWrap, 0}
-}

+ 0 - 196
vendor/github.com/golang/geo/s2/query_options.go

@@ -1,196 +0,0 @@
-// Copyright 2019 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/s1"
-)
-
-const maxQueryResults = math.MaxInt32
-
-// queryOptions represents the set of all configurable parameters used by all of
-// the Query types. Most of these fields have non-zero defaults, so initialization
-// is handled within each Query type. All of the exported methods accept user
-// supplied sets of options to set or adjust as necessary.
-//
-// Several of the defaults depend on the distance interface type being used
-// (e.g. minDistance, maxDistance, etc.)
-//
-// If a user sets an option value that a given query type doesn't use, it is ignored.
-type queryOptions struct {
-	// maxResults specifies that at most MaxResults edges should be returned.
-	// This must be at least 1.
-	//
-	// The default value is to return all results.
-	maxResults int
-
-	// distanceLimit specifies that only edges whose distance to the target is
-	// within this distance should be returned.
-	//
-	// Note that edges whose distance is exactly equal to this are
-	// not returned. In most cases this doesn't matter (since distances are
-	// not computed exactly in the first place), but if such edges are needed
-	// then you can retrieve them by specifying the distance as the next
-	// largest representable distance. i.e. distanceLimit.Successor().
-	//
-	// The default value is the infinity value, such that all results will be
-	// returned.
-	distanceLimit s1.ChordAngle
-
-	// maxError specifies that edges up to MaxError further away than the true
-	// closest edges may be substituted in the result set, as long as such
-	// edges satisfy all the remaining search criteria (such as DistanceLimit).
-	// This option only has an effect if MaxResults is also specified;
-	// otherwise all edges closer than MaxDistance will always be returned.
-	//
-	// Note that this does not affect how the distance between edges is
-	// computed; it simply gives the algorithm permission to stop the search
-	// early as soon as the best possible improvement drops below MaxError.
-	//
-	// This can be used to implement distance predicates efficiently. For
-	// example, to determine whether the minimum distance is less than D, set
-	// MaxResults == 1 and MaxDistance == MaxError == D. This causes
-	// the algorithm to terminate as soon as it finds any edge whose distance
-	// is less than D, rather than continuing to search for an edge that is
-	// even closer.
-	//
-	// The default value is zero.
-	maxError s1.ChordAngle
-
-	// includeInteriors specifies that polygon interiors should be included
-	// when measuring distances. In other words, polygons that contain the target
-	// should have a distance of zero. (For targets consisting of multiple connected
-	// components, the distance is zero if any component is contained.) This
-	// is indicated in the results by returning a (ShapeID, EdgeID) pair
-	// with EdgeID == -1, i.e. this value denotes the polygons's interior.
-	//
-	// Note that for efficiency, any polygon that intersects the target may or
-	// may not have an EdgeID == -1 result. Such results are optional
-	// because in that case the distance to the polygon is already zero.
-	//
-	// The default value is true.
-	includeInteriors bool
-
-	// specifies that distances should be computed by examining every edge
-	// rather than using the ShapeIndex.
-	//
-	// TODO(roberts): When optimized is implemented, update the default to false.
-	// The default value is true.
-	useBruteForce bool
-
-	// region specifies that results must intersect the given Region.
-	//
-	// Note that if you want to set the region to a disc around a target
-	// point, it is faster to use a PointTarget with distanceLimit set
-	// instead. You can also set a distance limit and also require that results
-	// lie within a given rectangle.
-	//
-	// The default is nil (no region limits).
-	region Region
-}
-
-// UseBruteForce sets or disables the use of brute force in a query.
-func (q *queryOptions) UseBruteForce(x bool) *queryOptions {
-	q.useBruteForce = x
-	return q
-}
-
-// IncludeInteriors specifies whether polygon interiors should be
-// included when measuring distances.
-func (q *queryOptions) IncludeInteriors(x bool) *queryOptions {
-	q.includeInteriors = x
-	return q
-}
-
-// MaxError specifies that edges up to dist away than the true
-// matching edges may be substituted in the result set, as long as such
-// edges satisfy all the remaining search criteria (such as DistanceLimit).
-// This option only has an effect if MaxResults is also specified;
-// otherwise all edges closer than MaxDistance will always be returned.
-func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions {
-	q.maxError = x
-	return q
-}
-
-// MaxResults specifies that at most MaxResults edges should be returned.
-// This must be at least 1.
-func (q *queryOptions) MaxResults(x int) *queryOptions {
-	// TODO(roberts): What should be done if the value is <= 0?
-	q.maxResults = int(x)
-	return q
-}
-
-// DistanceLimit specifies that only edges whose distance to the target is
-// within, this distance should be returned. Edges whose distance is equal
-// are not returned.
-//
-// To include values that are equal, specify the limit with the next largest
-// representable distance such as limit.Successor(), or set the option with
-// Furthest/ClosestInclusiveDistanceLimit.
-func (q *queryOptions) DistanceLimit(x s1.ChordAngle) *queryOptions {
-	q.distanceLimit = x
-	return q
-}
-
-// ClosestInclusiveDistanceLimit sets the distance limit such that results whose
-// distance is exactly equal to the limit are also returned.
-func (q *queryOptions) ClosestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
-	q.distanceLimit = limit.Successor()
-	return q
-}
-
-// FurthestInclusiveDistanceLimit sets the distance limit such that results whose
-// distance is exactly equal to the limit are also returned.
-func (q *queryOptions) FurthestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
-	q.distanceLimit = limit.Predecessor()
-	return q
-}
-
-// ClosestConservativeDistanceLimit sets the distance limit such that results
-// also incorporates the error in distance calculations. This ensures that all
-// edges whose true distance is less than or equal to limit will be returned
-// (along with some edges whose true distance is slightly greater).
-//
-// Algorithms that need to do exact distance comparisons can use this
-// option to find a set of candidate edges that can then be filtered
-// further (e.g., using CompareDistance).
-func (q *queryOptions) ClosestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
-	q.distanceLimit = limit.Expanded(minUpdateDistanceMaxError(limit))
-	return q
-}
-
-// FurthestConservativeDistanceLimit sets the distance limit such that results
-// also incorporates the error in distance calculations. This ensures that all
-// edges whose true distance is greater than or equal to limit will be returned
-// (along with some edges whose true distance is slightly less).
-func (q *queryOptions) FurthestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
-	q.distanceLimit = limit.Expanded(-minUpdateDistanceMaxError(limit))
-	return q
-}
-
-// newQueryOptions returns a set of options using the given distance type
-// with the proper default values.
-func newQueryOptions(d distance) *queryOptions {
-	return &queryOptions{
-		maxResults:       maxQueryResults,
-		distanceLimit:    d.infinity().chordAngle(),
-		maxError:         0,
-		includeInteriors: true,
-		useBruteForce:    false,
-		region:           nil,
-	}
-}

+ 0 - 710
vendor/github.com/golang/geo/s2/rect.go

@@ -1,710 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"fmt"
-	"io"
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// Rect represents a closed latitude-longitude rectangle.
-type Rect struct {
-	Lat r1.Interval
-	Lng s1.Interval
-}
-
-var (
-	validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2}
-	validRectLngRange = s1.FullInterval()
-)
-
-// EmptyRect returns the empty rectangle.
-func EmptyRect() Rect { return Rect{r1.EmptyInterval(), s1.EmptyInterval()} }
-
-// FullRect returns the full rectangle.
-func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} }
-
-// RectFromLatLng constructs a rectangle containing a single point p.
-func RectFromLatLng(p LatLng) Rect {
-	return Rect{
-		Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()},
-		Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()},
-	}
-}
-
-// RectFromCenterSize constructs a rectangle with the given size and center.
-// center needs to be normalized, but size does not. The latitude
-// interval of the result is clamped to [-90,90] degrees, and the longitude
-// interval of the result is FullRect() if and only if the longitude size is
-// 360 degrees or more.
-//
-// Examples of clamping (in degrees):
-//   center=(80,170),  size=(40,60)   -> lat=[60,90],   lng=[140,-160]
-//   center=(10,40),   size=(210,400) -> lat=[-90,90],  lng=[-180,180]
-//   center=(-90,180), size=(20,50)   -> lat=[-90,-80], lng=[155,-155]
-func RectFromCenterSize(center, size LatLng) Rect {
-	half := LatLng{size.Lat / 2, size.Lng / 2}
-	return RectFromLatLng(center).expanded(half)
-}
-
-// IsValid returns true iff the rectangle is valid.
-// This requires Lat ⊆ [-π/2,π/2] and Lng ⊆ [-π,π], and Lat = ∅ iff Lng = ∅
-func (r Rect) IsValid() bool {
-	return math.Abs(r.Lat.Lo) <= math.Pi/2 &&
-		math.Abs(r.Lat.Hi) <= math.Pi/2 &&
-		r.Lng.IsValid() &&
-		r.Lat.IsEmpty() == r.Lng.IsEmpty()
-}
-
-// IsEmpty reports whether the rectangle is empty.
-func (r Rect) IsEmpty() bool { return r.Lat.IsEmpty() }
-
-// IsFull reports whether the rectangle is full.
-func (r Rect) IsFull() bool { return r.Lat.Equal(validRectLatRange) && r.Lng.IsFull() }
-
-// IsPoint reports whether the rectangle is a single point.
-func (r Rect) IsPoint() bool { return r.Lat.Lo == r.Lat.Hi && r.Lng.Lo == r.Lng.Hi }
-
-// Vertex returns the i-th vertex of the rectangle (i = 0,1,2,3) in CCW order
-// (lower left, lower right, upper right, upper left).
-func (r Rect) Vertex(i int) LatLng {
-	var lat, lng float64
-
-	switch i {
-	case 0:
-		lat = r.Lat.Lo
-		lng = r.Lng.Lo
-	case 1:
-		lat = r.Lat.Lo
-		lng = r.Lng.Hi
-	case 2:
-		lat = r.Lat.Hi
-		lng = r.Lng.Hi
-	case 3:
-		lat = r.Lat.Hi
-		lng = r.Lng.Lo
-	}
-	return LatLng{s1.Angle(lat) * s1.Radian, s1.Angle(lng) * s1.Radian}
-}
-
-// Lo returns one corner of the rectangle.
-func (r Rect) Lo() LatLng {
-	return LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(r.Lng.Lo) * s1.Radian}
-}
-
-// Hi returns the other corner of the rectangle.
-func (r Rect) Hi() LatLng {
-	return LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(r.Lng.Hi) * s1.Radian}
-}
-
-// Center returns the center of the rectangle.
-func (r Rect) Center() LatLng {
-	return LatLng{s1.Angle(r.Lat.Center()) * s1.Radian, s1.Angle(r.Lng.Center()) * s1.Radian}
-}
-
-// Size returns the size of the Rect.
-func (r Rect) Size() LatLng {
-	return LatLng{s1.Angle(r.Lat.Length()) * s1.Radian, s1.Angle(r.Lng.Length()) * s1.Radian}
-}
-
-// Area returns the surface area of the Rect.
-func (r Rect) Area() float64 {
-	if r.IsEmpty() {
-		return 0
-	}
-	capDiff := math.Abs(math.Sin(r.Lat.Hi) - math.Sin(r.Lat.Lo))
-	return r.Lng.Length() * capDiff
-}
-
-// AddPoint increases the size of the rectangle to include the given point.
-func (r Rect) AddPoint(ll LatLng) Rect {
-	if !ll.IsValid() {
-		return r
-	}
-	return Rect{
-		Lat: r.Lat.AddPoint(ll.Lat.Radians()),
-		Lng: r.Lng.AddPoint(ll.Lng.Radians()),
-	}
-}
-
-// expanded returns a rectangle that has been expanded by margin.Lat on each side
-// in the latitude direction, and by margin.Lng on each side in the longitude
-// direction. If either margin is negative, then it shrinks the rectangle on
-// the corresponding sides instead. The resulting rectangle may be empty.
-//
-// The latitude-longitude space has the topology of a cylinder. Longitudes
-// "wrap around" at +/-180 degrees, while latitudes are clamped to range [-90, 90].
-// This means that any expansion (positive or negative) of the full longitude range
-// remains full (since the "rectangle" is actually a continuous band around the
-// cylinder), while expansion of the full latitude range remains full only if the
-// margin is positive.
-//
-// If either the latitude or longitude interval becomes empty after
-// expansion by a negative margin, the result is empty.
-//
-// Note that if an expanded rectangle contains a pole, it may not contain
-// all possible lat/lng representations of that pole, e.g., both points [π/2,0]
-// and [π/2,1] represent the same pole, but they might not be contained by the
-// same Rect.
-//
-// If you are trying to grow a rectangle by a certain distance on the
-// sphere (e.g. 5km), refer to the ExpandedByDistance() C++ method implementation
-// instead.
-func (r Rect) expanded(margin LatLng) Rect {
-	lat := r.Lat.Expanded(margin.Lat.Radians())
-	lng := r.Lng.Expanded(margin.Lng.Radians())
-
-	if lat.IsEmpty() || lng.IsEmpty() {
-		return EmptyRect()
-	}
-
-	return Rect{
-		Lat: lat.Intersection(validRectLatRange),
-		Lng: lng,
-	}
-}
-
-func (r Rect) String() string { return fmt.Sprintf("[Lo%v, Hi%v]", r.Lo(), r.Hi()) }
-
-// PolarClosure returns the rectangle unmodified if it does not include either pole.
-// If it includes either pole, PolarClosure returns an expansion of the rectangle along
-// the longitudinal range to include all possible representations of the contained poles.
-func (r Rect) PolarClosure() Rect {
-	if r.Lat.Lo == -math.Pi/2 || r.Lat.Hi == math.Pi/2 {
-		return Rect{r.Lat, s1.FullInterval()}
-	}
-	return r
-}
-
-// Union returns the smallest Rect containing the union of this rectangle and the given rectangle.
-func (r Rect) Union(other Rect) Rect {
-	return Rect{
-		Lat: r.Lat.Union(other.Lat),
-		Lng: r.Lng.Union(other.Lng),
-	}
-}
-
-// Intersection returns the smallest rectangle containing the intersection of
-// this rectangle and the given rectangle. Note that the region of intersection
-// may consist of two disjoint rectangles, in which case a single rectangle
-// spanning both of them is returned.
-func (r Rect) Intersection(other Rect) Rect {
-	lat := r.Lat.Intersection(other.Lat)
-	lng := r.Lng.Intersection(other.Lng)
-
-	if lat.IsEmpty() || lng.IsEmpty() {
-		return EmptyRect()
-	}
-	return Rect{lat, lng}
-}
-
-// Intersects reports whether this rectangle and the other have any points in common.
-func (r Rect) Intersects(other Rect) bool {
-	return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng)
-}
-
-// CapBound returns a cap that contains Rect.
-func (r Rect) CapBound() Cap {
-	// We consider two possible bounding caps, one whose axis passes
-	// through the center of the lat-long rectangle and one whose axis
-	// is the north or south pole.  We return the smaller of the two caps.
-
-	if r.IsEmpty() {
-		return EmptyCap()
-	}
-
-	var poleZ, poleAngle float64
-	if r.Lat.Hi+r.Lat.Lo < 0 {
-		// South pole axis yields smaller cap.
-		poleZ = -1
-		poleAngle = math.Pi/2 + r.Lat.Hi
-	} else {
-		poleZ = 1
-		poleAngle = math.Pi/2 - r.Lat.Lo
-	}
-	poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian)
-
-	// For bounding rectangles that span 180 degrees or less in longitude, the
-	// maximum cap size is achieved at one of the rectangle vertices.  For
-	// rectangles that are larger than 180 degrees, we punt and always return a
-	// bounding cap centered at one of the two poles.
-	if math.Remainder(r.Lng.Hi-r.Lng.Lo, 2*math.Pi) >= 0 && r.Lng.Hi-r.Lng.Lo < 2*math.Pi {
-		midCap := CapFromPoint(PointFromLatLng(r.Center())).AddPoint(PointFromLatLng(r.Lo())).AddPoint(PointFromLatLng(r.Hi()))
-		if midCap.Height() < poleCap.Height() {
-			return midCap
-		}
-	}
-	return poleCap
-}
-
-// RectBound returns itself.
-func (r Rect) RectBound() Rect {
-	return r
-}
-
-// Contains reports whether this Rect contains the other Rect.
-func (r Rect) Contains(other Rect) bool {
-	return r.Lat.ContainsInterval(other.Lat) && r.Lng.ContainsInterval(other.Lng)
-}
-
-// ContainsCell reports whether the given Cell is contained by this Rect.
-func (r Rect) ContainsCell(c Cell) bool {
-	// A latitude-longitude rectangle contains a cell if and only if it contains
-	// the cell's bounding rectangle. This test is exact from a mathematical
-	// point of view, assuming that the bounds returned by Cell.RectBound()
-	// are tight. However, note that there can be a loss of precision when
-	// converting between representations -- for example, if an s2.Cell is
-	// converted to a polygon, the polygon's bounding rectangle may not contain
-	// the cell's bounding rectangle. This has some slightly unexpected side
-	// effects; for instance, if one creates an s2.Polygon from an s2.Cell, the
-	// polygon will contain the cell, but the polygon's bounding box will not.
-	return r.Contains(c.RectBound())
-}
-
-// ContainsLatLng reports whether the given LatLng is within the Rect.
-func (r Rect) ContainsLatLng(ll LatLng) bool {
-	if !ll.IsValid() {
-		return false
-	}
-	return r.Lat.Contains(ll.Lat.Radians()) && r.Lng.Contains(ll.Lng.Radians())
-}
-
-// ContainsPoint reports whether the given Point is within the Rect.
-func (r Rect) ContainsPoint(p Point) bool {
-	return r.ContainsLatLng(LatLngFromPoint(p))
-}
-
-// CellUnionBound computes a covering of the Rect.
-func (r Rect) CellUnionBound() []CellID {
-	return r.CapBound().CellUnionBound()
-}
-
-// intersectsLatEdge reports whether the edge AB intersects the given edge of constant
-// latitude. Requires the points to have unit length.
-func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
-	// Unfortunately, lines of constant latitude are curves on
-	// the sphere. They can intersect a straight edge in 0, 1, or 2 points.
-
-	// First, compute the normal to the plane AB that points vaguely north.
-	z := Point{a.PointCross(b).Normalize()}
-	if z.Z < 0 {
-		z = Point{z.Mul(-1)}
-	}
-
-	// Extend this to an orthonormal frame (x,y,z) where x is the direction
-	// where the great circle through AB achieves its maximium latitude.
-	y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()}
-	x := y.Cross(z.Vector)
-
-	// Compute the angle "theta" from the x-axis (in the x-y plane defined
-	// above) where the great circle intersects the given line of latitude.
-	sinLat := math.Sin(float64(lat))
-	if math.Abs(sinLat) >= x.Z {
-		// The great circle does not reach the given latitude.
-		return false
-	}
-
-	cosTheta := sinLat / x.Z
-	sinTheta := math.Sqrt(1 - cosTheta*cosTheta)
-	theta := math.Atan2(sinTheta, cosTheta)
-
-	// The candidate intersection points are located +/- theta in the x-y
-	// plane. For an intersection to be valid, we need to check that the
-	// intersection point is contained in the interior of the edge AB and
-	// also that it is contained within the given longitude interval "lng".
-
-	// Compute the range of theta values spanned by the edge AB.
-	abTheta := s1.IntervalFromPointPair(
-		math.Atan2(a.Dot(y.Vector), a.Dot(x)),
-		math.Atan2(b.Dot(y.Vector), b.Dot(x)))
-
-	if abTheta.Contains(theta) {
-		// Check if the intersection point is also in the given lng interval.
-		isect := x.Mul(cosTheta).Add(y.Mul(sinTheta))
-		if lng.Contains(math.Atan2(isect.Y, isect.X)) {
-			return true
-		}
-	}
-
-	if abTheta.Contains(-theta) {
-		// Check if the other intersection point is also in the given lng interval.
-		isect := x.Mul(cosTheta).Sub(y.Mul(sinTheta))
-		if lng.Contains(math.Atan2(isect.Y, isect.X)) {
-			return true
-		}
-	}
-	return false
-}
-
-// intersectsLngEdge reports whether the edge AB intersects the given edge of constant
-// longitude. Requires the points to have unit length.
-func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
-	// The nice thing about edges of constant longitude is that
-	// they are straight lines on the sphere (geodesics).
-	return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
-		PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross
-}
-
-// IntersectsCell reports whether this rectangle intersects the given cell. This is an
-// exact test and may be fairly expensive.
-func (r Rect) IntersectsCell(c Cell) bool {
-	// First we eliminate the cases where one region completely contains the
-	// other. Once these are disposed of, then the regions will intersect
-	// if and only if their boundaries intersect.
-	if r.IsEmpty() {
-		return false
-	}
-	if r.ContainsPoint(Point{c.id.rawPoint()}) {
-		return true
-	}
-	if c.ContainsPoint(PointFromLatLng(r.Center())) {
-		return true
-	}
-
-	// Quick rejection test (not required for correctness).
-	if !r.Intersects(c.RectBound()) {
-		return false
-	}
-
-	// Precompute the cell vertices as points and latitude-longitudes. We also
-	// check whether the Cell contains any corner of the rectangle, or
-	// vice-versa, since the edge-crossing tests only check the edge interiors.
-	vertices := [4]Point{}
-	latlngs := [4]LatLng{}
-
-	for i := range vertices {
-		vertices[i] = c.Vertex(i)
-		latlngs[i] = LatLngFromPoint(vertices[i])
-		if r.ContainsLatLng(latlngs[i]) {
-			return true
-		}
-		if c.ContainsPoint(PointFromLatLng(r.Vertex(i))) {
-			return true
-		}
-	}
-
-	// Now check whether the boundaries intersect. Unfortunately, a
-	// latitude-longitude rectangle does not have straight edges: two edges
-	// are curved, and at least one of them is concave.
-	for i := range vertices {
-		edgeLng := s1.IntervalFromEndpoints(latlngs[i].Lng.Radians(), latlngs[(i+1)&3].Lng.Radians())
-		if !r.Lng.Intersects(edgeLng) {
-			continue
-		}
-
-		a := vertices[i]
-		b := vertices[(i+1)&3]
-		if edgeLng.Contains(r.Lng.Lo) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Lo)) {
-			return true
-		}
-		if edgeLng.Contains(r.Lng.Hi) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Hi)) {
-			return true
-		}
-		if intersectsLatEdge(a, b, s1.Angle(r.Lat.Lo), r.Lng) {
-			return true
-		}
-		if intersectsLatEdge(a, b, s1.Angle(r.Lat.Hi), r.Lng) {
-			return true
-		}
-	}
-	return false
-}
-
-// Encode encodes the Rect.
-func (r Rect) Encode(w io.Writer) error {
-	e := &encoder{w: w}
-	r.encode(e)
-	return e.err
-}
-
-func (r Rect) encode(e *encoder) {
-	e.writeInt8(encodingVersion)
-	e.writeFloat64(r.Lat.Lo)
-	e.writeFloat64(r.Lat.Hi)
-	e.writeFloat64(r.Lng.Lo)
-	e.writeFloat64(r.Lng.Hi)
-}
-
-// Decode decodes a rectangle.
-func (r *Rect) Decode(rd io.Reader) error {
-	d := &decoder{r: asByteReader(rd)}
-	r.decode(d)
-	return d.err
-}
-
-func (r *Rect) decode(d *decoder) {
-	if version := d.readUint8(); int(version) != int(encodingVersion) && d.err == nil {
-		d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
-		return
-	}
-	r.Lat.Lo = d.readFloat64()
-	r.Lat.Hi = d.readFloat64()
-	r.Lng.Lo = d.readFloat64()
-	r.Lng.Hi = d.readFloat64()
-	return
-}
-
-// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere)
-// from a given point to the rectangle (both its boundary and its interior).
-// If r is empty, the result is meaningless.
-// The latlng must be valid.
-func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle {
-	if r.Lng.Contains(float64(ll.Lng)) {
-		return maxAngle(0, ll.Lat-s1.Angle(r.Lat.Hi), s1.Angle(r.Lat.Lo)-ll.Lat)
-	}
-
-	i := s1.IntervalFromEndpoints(r.Lng.Hi, r.Lng.ComplementCenter())
-	rectLng := r.Lng.Lo
-	if i.Contains(float64(ll.Lng)) {
-		rectLng = r.Lng.Hi
-	}
-
-	lo := LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
-	hi := LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
-	return DistanceFromSegment(PointFromLatLng(ll), PointFromLatLng(lo), PointFromLatLng(hi))
-}
-
-// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the
-// surface of the sphere) to the given Rect. The directed Hausdorff
-// distance from rectangle A to rectangle B is given by
-//     h(A, B) = max_{p in A} min_{q in B} d(p, q).
-func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle {
-	if r.IsEmpty() {
-		return 0 * s1.Radian
-	}
-	if other.IsEmpty() {
-		return math.Pi * s1.Radian
-	}
-
-	lng := r.Lng.DirectedHausdorffDistance(other.Lng)
-	return directedHausdorffDistance(lng, r.Lat, other.Lat)
-}
-
-// HausdorffDistance returns the undirected Hausdorff distance (measured along the
-// surface of the sphere) to the given Rect.
-// The Hausdorff distance between rectangle A and rectangle B is given by
-//     H(A, B) = max{h(A, B), h(B, A)}.
-func (r Rect) HausdorffDistance(other Rect) s1.Angle {
-	return maxAngle(r.DirectedHausdorffDistance(other),
-		other.DirectedHausdorffDistance(r))
-}
-
-// ApproxEqual reports whether the latitude and longitude intervals of the two rectangles
-// are the same up to a small tolerance.
-func (r Rect) ApproxEqual(other Rect) bool {
-	return r.Lat.ApproxEqual(other.Lat) && r.Lng.ApproxEqual(other.Lng)
-}
-
-// directedHausdorffDistance returns the directed Hausdorff distance
-// from one longitudinal edge spanning latitude range 'a' to the other
-// longitudinal edge spanning latitude range 'b', with their longitudinal
-// difference given by 'lngDiff'.
-func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle {
-	// By symmetry, we can assume a's longitude is 0 and b's longitude is
-	// lngDiff. Call b's two endpoints bLo and bHi. Let H be the hemisphere
-	// containing a and delimited by the longitude line of b. The Voronoi diagram
-	// of b on H has three edges (portions of great circles) all orthogonal to b
-	// and meeting at bLo cross bHi.
-	// E1: (bLo, bLo cross bHi)
-	// E2: (bHi, bLo cross bHi)
-	// E3: (-bMid, bLo cross bHi), where bMid is the midpoint of b
-	//
-	// They subdivide H into three Voronoi regions. Depending on how longitude 0
-	// (which contains edge a) intersects these regions, we distinguish two cases:
-	// Case 1: it intersects three regions. This occurs when lngDiff <= π/2.
-	// Case 2: it intersects only two regions. This occurs when lngDiff > π/2.
-	//
-	// In the first case, the directed Hausdorff distance to edge b can only be
-	// realized by the following points on a:
-	// A1: two endpoints of a.
-	// A2: intersection of a with the equator, if b also intersects the equator.
-	//
-	// In the second case, the directed Hausdorff distance to edge b can only be
-	// realized by the following points on a:
-	// B1: two endpoints of a.
-	// B2: intersection of a with E3
-	// B3: farthest point from bLo to the interior of D, and farthest point from
-	//     bHi to the interior of U, if any, where D (resp. U) is the portion
-	//     of edge a below (resp. above) the intersection point from B2.
-
-	if lngDiff < 0 {
-		panic("impossible: negative lngDiff")
-	}
-	if lngDiff > math.Pi {
-		panic("impossible: lngDiff > Pi")
-	}
-
-	if lngDiff == 0 {
-		return s1.Angle(a.DirectedHausdorffDistance(b))
-	}
-
-	// Assumed longitude of b.
-	bLng := lngDiff
-	// Two endpoints of b.
-	bLo := PointFromLatLng(LatLng{s1.Angle(b.Lo), bLng})
-	bHi := PointFromLatLng(LatLng{s1.Angle(b.Hi), bLng})
-
-	// Cases A1 and B1.
-	aLo := PointFromLatLng(LatLng{s1.Angle(a.Lo), 0})
-	aHi := PointFromLatLng(LatLng{s1.Angle(a.Hi), 0})
-	maxDistance := maxAngle(
-		DistanceFromSegment(aLo, bLo, bHi),
-		DistanceFromSegment(aHi, bLo, bHi))
-
-	if lngDiff <= math.Pi/2 {
-		// Case A2.
-		if a.Contains(0) && b.Contains(0) {
-			maxDistance = maxAngle(maxDistance, lngDiff)
-		}
-		return maxDistance
-	}
-
-	// Case B2.
-	p := bisectorIntersection(b, bLng)
-	pLat := LatLngFromPoint(p).Lat
-	if a.Contains(float64(pLat)) {
-		maxDistance = maxAngle(maxDistance, p.Angle(bLo.Vector))
-	}
-
-	// Case B3.
-	if pLat > s1.Angle(a.Lo) {
-		intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo)
-		if ok {
-			maxDistance = maxAngle(maxDistance, intDist)
-		}
-	}
-	if pLat < s1.Angle(a.Hi) {
-		intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi)
-		if ok {
-			maxDistance = maxAngle(maxDistance, intDist)
-		}
-	}
-
-	return maxDistance
-}
-
-// interiorMaxDistance returns the max distance from a point b to the segment spanning latitude range
-// aLat on longitude 0 if the max occurs in the interior of aLat. Otherwise, returns (0, false).
-func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) {
-	// Longitude 0 is in the y=0 plane. b.X >= 0 implies that the maximum
-	// does not occur in the interior of aLat.
-	if aLat.IsEmpty() || b.X >= 0 {
-		return 0, false
-	}
-
-	// Project b to the y=0 plane. The antipodal of the normalized projection is
-	// the point at which the maxium distance from b occurs, if it is contained
-	// in aLat.
-	intersectionPoint := PointFromCoords(-b.X, 0, -b.Z)
-	if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) {
-		return 0, false
-	}
-	return b.Angle(intersectionPoint.Vector), true
-}
-
-// bisectorIntersection return the intersection of longitude 0 with the bisector of an edge
-// on longitude 'lng' and spanning latitude range 'lat'.
-func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point {
-	lng = s1.Angle(math.Abs(float64(lng)))
-	latCenter := s1.Angle(lat.Center())
-
-	// A vector orthogonal to the bisector of the given longitudinal edge.
-	orthoBisector := LatLng{latCenter - math.Pi/2, lng}
-	if latCenter < 0 {
-		orthoBisector = LatLng{-latCenter - math.Pi/2, lng - math.Pi}
-	}
-
-	// A vector orthogonal to longitude 0.
-	orthoLng := Point{r3.Vector{0, -1, 0}}
-
-	return orthoLng.PointCross(PointFromLatLng(orthoBisector))
-}
-
-// Centroid returns the true centroid of the given Rect multiplied by its
-// surface area. The result is not unit length, so you may want to normalize it.
-// Note that in general the centroid is *not* at the center of the rectangle, and
-// in fact it may not even be contained by the rectangle. (It is the "center of
-// mass" of the rectangle viewed as subset of the unit sphere, i.e. it is the
-// point in space about which this curved shape would rotate.)
-//
-// The reason for multiplying the result by the rectangle area is to make it
-// easier to compute the centroid of more complicated shapes. The centroid
-// of a union of disjoint regions can be computed simply by adding their
-// Centroid results.
-func (r Rect) Centroid() Point {
-	// When a sphere is divided into slices of constant thickness by a set
-	// of parallel planes, all slices have the same surface area. This
-	// implies that the z-component of the centroid is simply the midpoint
-	// of the z-interval spanned by the Rect.
-	//
-	// Similarly, it is easy to see that the (x,y) of the centroid lies in
-	// the plane through the midpoint of the rectangle's longitude interval.
-	// We only need to determine the distance "d" of this point from the
-	// z-axis.
-	//
-	// Let's restrict our attention to a particular z-value. In this
-	// z-plane, the Rect is a circular arc. The centroid of this arc
-	// lies on a radial line through the midpoint of the arc, and at a
-	// distance from the z-axis of
-	//
-	//     r * (sin(alpha) / alpha)
-	//
-	// where r = sqrt(1-z^2) is the radius of the arc, and "alpha" is half
-	// of the arc length (i.e., the arc covers longitudes [-alpha, alpha]).
-	//
-	// To find the centroid distance from the z-axis for the entire
-	// rectangle, we just need to integrate over the z-interval. This gives
-	//
-	//    d = Integrate[sqrt(1-z^2)*sin(alpha)/alpha, z1..z2] / (z2 - z1)
-	//
-	// where [z1, z2] is the range of z-values covered by the rectangle.
-	// This simplifies to
-	//
-	//    d = sin(alpha)/(2*alpha*(z2-z1))*(z2*r2 - z1*r1 + theta2 - theta1)
-	//
-	// where [theta1, theta2] is the latitude interval, z1=sin(theta1),
-	// z2=sin(theta2), r1=cos(theta1), and r2=cos(theta2).
-	//
-	// Finally, we want to return not the centroid itself, but the centroid
-	// scaled by the area of the rectangle. The area of the rectangle is
-	//
-	//    A = 2 * alpha * (z2 - z1)
-	//
-	// which fortunately appears in the denominator of "d".
-
-	if r.IsEmpty() {
-		return Point{}
-	}
-
-	z1 := math.Sin(r.Lat.Lo)
-	z2 := math.Sin(r.Lat.Hi)
-	r1 := math.Cos(r.Lat.Lo)
-	r2 := math.Cos(r.Lat.Hi)
-
-	alpha := 0.5 * r.Lng.Length()
-	r0 := math.Sin(alpha) * (r2*z2 - r1*z1 + r.Lat.Length())
-	lng := r.Lng.Center()
-	z := alpha * (z2 + z1) * (z2 - z1) // scaled by the area
-
-	return Point{r3.Vector{r0 * math.Cos(lng), r0 * math.Sin(lng), z}}
-}
-
-// BUG: The major differences from the C++ version are:
-//  - Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)

+ 0 - 352
vendor/github.com/golang/geo/s2/rect_bounder.go

@@ -1,352 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"math"
-
-	"github.com/golang/geo/r1"
-	"github.com/golang/geo/r3"
-	"github.com/golang/geo/s1"
-)
-
-// RectBounder is used to compute a bounding rectangle that contains all edges
-// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
-// Note that the bounding rectangle of an edge can be larger than the bounding
-// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
-//
-// The bounds are calculated conservatively to account for numerical errors
-// when points are converted to LatLngs. More precisely, this function
-// guarantees the following:
-// Let L be a closed edge chain (Loop) such that the interior of the loop does
-// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
-// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
-type RectBounder struct {
-	// The previous vertex in the chain.
-	a Point
-	// The previous vertex latitude longitude.
-	aLL   LatLng
-	bound Rect
-}
-
-// NewRectBounder returns a new instance of a RectBounder.
-func NewRectBounder() *RectBounder {
-	return &RectBounder{
-		bound: EmptyRect(),
-	}
-}
-
-// maxErrorForTests returns the maximum error in RectBound provided that the
-// result does not include either pole. It is only used for testing purposes
-func (r *RectBounder) maxErrorForTests() LatLng {
-	// The maximum error in the latitude calculation is
-	//    3.84 * dblEpsilon   for the PointCross calculation
-	//    0.96 * dblEpsilon   for the Latitude calculation
-	//    5    * dblEpsilon   added by AddPoint/RectBound to compensate for error
-	//    -----------------
-	//    9.80 * dblEpsilon   maximum error in result
-	//
-	// The maximum error in the longitude calculation is dblEpsilon. RectBound
-	// does not do any expansion because this isn't necessary in order to
-	// bound the *rounded* longitudes of contained points.
-	return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian}
-}
-
-// AddPoint adds the given point to the chain. The Point must be unit length.
-func (r *RectBounder) AddPoint(b Point) {
-	bLL := LatLngFromPoint(b)
-
-	if r.bound.IsEmpty() {
-		r.a = b
-		r.aLL = bLL
-		r.bound = r.bound.AddPoint(bLL)
-		return
-	}
-
-	// First compute the cross product N = A x B robustly. This is the normal
-	// to the great circle through A and B. We don't use RobustSign
-	// since that method returns an arbitrary vector orthogonal to A if the two
-	// vectors are proportional, and we want the zero vector in that case.
-	n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
-
-	// The relative error in N gets large as its norm gets very small (i.e.,
-	// when the two points are nearly identical or antipodal). We handle this
-	// by choosing a maximum allowable error, and if the error is greater than
-	// this we fall back to a different technique. Since it turns out that
-	// the other sources of error in converting the normal to a maximum
-	// latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
-	// have the total error be a multiple of dblEpsilon, we have chosen to
-	// limit the maximum error in the normal to be 3.84 * dblEpsilon.
-	// It is possible to show that the error is less than this when
-	//
-	// n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
-	//          = 1.91346e-15 (about 8.618 * dblEpsilon)
-	nNorm := n.Norm()
-	if nNorm < 1.91346e-15 {
-		// A and B are either nearly identical or nearly antipodal (to within
-		// 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
-		if r.a.Dot(b.Vector) < 0 {
-			// The two points are nearly antipodal. The easiest solution is to
-			// assume that the edge between A and B could go in any direction
-			// around the sphere.
-			r.bound = FullRect()
-		} else {
-			// The two points are nearly identical (to within 4.309 * dblEpsilon).
-			// In this case we can just use the bounding rectangle of the points,
-			// since after the expansion done by GetBound this Rect is
-			// guaranteed to include the (lat,lng) values of all points along AB.
-			r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
-		}
-		r.a = b
-		r.aLL = bLL
-		return
-	}
-
-	// Compute the longitude range spanned by AB.
-	lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
-	if lngAB.Length() >= math.Pi-2*dblEpsilon {
-		// The points lie on nearly opposite lines of longitude to within the
-		// maximum error of the calculation. The easiest solution is to assume
-		// that AB could go on either side of the pole.
-		lngAB = s1.FullInterval()
-	}
-
-	// Next we compute the latitude range spanned by the edge AB. We start
-	// with the range spanning the two endpoints of the edge:
-	latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
-
-	// This is the desired range unless the edge AB crosses the plane
-	// through N and the Z-axis (which is where the great circle through A
-	// and B attains its minimum and maximum latitudes). To test whether AB
-	// crosses this plane, we compute a vector M perpendicular to this
-	// plane and then project A and B onto it.
-	m := n.Cross(r3.Vector{0, 0, 1})
-	mA := m.Dot(r.a.Vector)
-	mB := m.Dot(b.Vector)
-
-	// We want to test the signs of "mA" and "mB", so we need to bound
-	// the error in these calculations. It is possible to show that the
-	// total error is bounded by
-	//
-	// (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
-	//   = 6.06638e-16 * nNorm + 6.83174e-31
-
-	mError := 6.06638e-16*nNorm + 6.83174e-31
-	if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
-		// Minimum/maximum latitude *may* occur in the edge interior.
-		//
-		// The maximum latitude is 90 degrees minus the latitude of N. We
-		// compute this directly using atan2 in order to get maximum accuracy
-		// near the poles.
-		//
-		// Our goal is compute a bound that contains the computed latitudes of
-		// all S2Points P that pass the point-in-polygon containment test.
-		// There are three sources of error we need to consider:
-		// - the directional error in N (at most 3.84 * dblEpsilon)
-		// - converting N to a maximum latitude
-		// - computing the latitude of the test point P
-		// The latter two sources of error are at most 0.955 * dblEpsilon
-		// individually, but it is possible to show by a more complex analysis
-		// that together they can add up to at most 1.16 * dblEpsilon, for a
-		// total error of 5 * dblEpsilon.
-		//
-		// We add 3 * dblEpsilon to the bound here, and GetBound() will pad
-		// the bound by another 2 * dblEpsilon.
-		maxLat := math.Min(
-			math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
-			math.Pi/2)
-
-		// In order to get tight bounds when the two points are close together,
-		// we also bound the min/max latitude relative to the latitudes of the
-		// endpoints A and B. First we compute the distance between A and B,
-		// and then we compute the maximum change in latitude between any two
-		// points along the great circle that are separated by this distance.
-		// This gives us a latitude change "budget". Some of this budget must
-		// be spent getting from A to B; the remainder bounds the round-trip
-		// distance (in latitude) from A or B to the min or max latitude
-		// attained along the edge AB.
-		latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
-		maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
-
-		// Test whether AB passes through the point of maximum latitude or
-		// minimum latitude. If the dot product(s) are small enough then the
-		// result may be ambiguous.
-		if mA <= mError && mB >= -mError {
-			latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
-		}
-		if mB <= mError && mA >= -mError {
-			latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
-		}
-	}
-	r.a = b
-	r.aLL = bLL
-	r.bound = r.bound.Union(Rect{latAB, lngAB})
-}
-
-// RectBound returns the bounding rectangle of the edge chain that connects the
-// vertices defined so far. This bound satisfies the guarantee made
-// above, i.e. if the edge chain defines a Loop, then the bound contains
-// the LatLng coordinates of all Points contained by the loop.
-func (r *RectBounder) RectBound() Rect {
-	return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
-}
-
-// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
-// contain the bounds of any subregion whose bounds are computed using
-// ComputeRectBound. For example, consider a loop L that defines a square.
-// GetBound ensures that if a point P is contained by this square, then
-// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
-// shaped loop S contained by L. It is possible that GetBound returns a
-// *larger* bound for S than it does for L, due to rounding errors. This
-// method expands the bound for L so that it is guaranteed to contain the
-// bounds of any subregion S.
-//
-// More precisely, if L is a loop that does not contain either pole, and S
-// is a loop such that L.Contains(S), then
-//
-//   ExpandForSubregions(L.RectBound).Contains(S.RectBound).
-//
-func ExpandForSubregions(bound Rect) Rect {
-	// Empty bounds don't need expansion.
-	if bound.IsEmpty() {
-		return bound
-	}
-
-	// First we need to check whether the bound B contains any nearly-antipodal
-	// points (to within 4.309 * dblEpsilon). If so then we need to return
-	// FullRect, since the subregion might have an edge between two
-	// such points, and AddPoint returns Full for such edges. Note that
-	// this can happen even if B is not Full for example, consider a loop
-	// that defines a 10km strip straddling the equator extending from
-	// longitudes -100 to +100 degrees.
-	//
-	// It is easy to check whether B contains any antipodal points, but checking
-	// for nearly-antipodal points is trickier. Essentially we consider the
-	// original bound B and its reflection through the origin B', and then test
-	// whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
-
-	// lngGap is a lower bound on the longitudinal distance between B and its
-	// reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
-	// endpoint longitude calculations and the Length call.)
-	lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
-
-	// minAbsLat is the minimum distance from B to the equator (if zero or
-	// negative, then B straddles the equator).
-	minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
-
-	// latGapSouth and latGapNorth measure the minimum distance from B to the
-	// south and north poles respectively.
-	latGapSouth := math.Pi/2 + bound.Lat.Lo
-	latGapNorth := math.Pi/2 - bound.Lat.Hi
-
-	if minAbsLat >= 0 {
-		// The bound B does not straddle the equator. In this case the minimum
-		// distance is between one endpoint of the latitude edge in B closest to
-		// the equator and the other endpoint of that edge in B'. The latitude
-		// distance between these two points is 2*minAbsLat, and the longitude
-		// distance is lngGap. We could compute the distance exactly using the
-		// Haversine formula, but then we would need to bound the errors in that
-		// calculation. Since we only need accuracy when the distance is very
-		// small (close to 4.309 * dblEpsilon), we substitute the Euclidean
-		// distance instead. This gives us a right triangle XYZ with two edges of
-		// length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
-		// length of the third edge z, and we have
-		//
-		//         z  ~=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
-		//
-		// Therefore the region may contain nearly antipodal points only if
-		//
-		//  2*minAbsLat + lngGap  <  sqrt(2) * 4.309 * dblEpsilon
-		//                        ~= 1.354e-15
-		//
-		// Note that because the given bound B is conservative, minAbsLat and
-		// lngGap are both lower bounds on their true values so we do not need
-		// to make any adjustments for their errors.
-		if 2*minAbsLat+lngGap < 1.354e-15 {
-			return FullRect()
-		}
-	} else if lngGap >= math.Pi/2 {
-		// B spans at most Pi/2 in longitude. The minimum distance is always
-		// between one corner of B and the diagonally opposite corner of B'. We
-		// use the same distance approximation that we used above; in this case
-		// we have an obtuse triangle XYZ with two edges of length x = latGapSouth
-		// and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
-		//
-		//         z  >=  sqrt(x^2 + y^2)  >=  (x + y) / sqrt(2)
-		//
-		// Unlike the case above, latGapSouth and latGapNorth are not lower bounds
-		// (because of the extra addition operation, and because math.Pi/2 is not
-		// exactly equal to Pi/2); they can exceed their true values by up to
-		// 0.75 * dblEpsilon. Putting this all together, the region may contain
-		// nearly antipodal points only if
-		//
-		//   latGapSouth + latGapNorth  <  (sqrt(2) * 4.309 + 1.5) * dblEpsilon
-		//                              ~= 1.687e-15
-		if latGapSouth+latGapNorth < 1.687e-15 {
-			return FullRect()
-		}
-	} else {
-		// Otherwise we know that (1) the bound straddles the equator and (2) its
-		// width in longitude is at least Pi/2. In this case the minimum
-		// distance can occur either between a corner of B and the diagonally
-		// opposite corner of B' (as in the case above), or between a corner of B
-		// and the opposite longitudinal edge reflected in B'. It is sufficient
-		// to only consider the corner-edge case, since this distance is also a
-		// lower bound on the corner-corner distance when that case applies.
-
-		// Consider the spherical triangle XYZ where X is a corner of B with
-		// minimum absolute latitude, Y is the closest pole to X, and Z is the
-		// point closest to X on the opposite longitudinal edge of B'. This is a
-		// right triangle (Z = Pi/2), and from the spherical law of sines we have
-		//
-		//     sin(z) / sin(Z)  =  sin(y) / sin(Y)
-		//     sin(maxLatGap) / 1  =  sin(dMin) / sin(lngGap)
-		//     sin(dMin)  =  sin(maxLatGap) * sin(lngGap)
-		//
-		// where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
-		// desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
-		// for 0 <= t <= Pi/2, that we only need an accurate approximation when
-		// at least one of "maxLatGap" or lngGap is extremely small (in which
-		// case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
-		// to 0.75 * dblEpsilon, we want to test whether
-		//
-		//   maxLatGap * lngGap  <  (4.309 + 0.75) * (Pi/2) * dblEpsilon
-		//                       ~= 1.765e-15
-		if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
-			return FullRect()
-		}
-	}
-	// Next we need to check whether the subregion might contain any edges that
-	// span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
-	// sets the longitude bound to Full in that case. This corresponds to
-	// testing whether (lngGap <= 0) in lngExpansion below.
-
-	// Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
-	// In the worst case, the errors when computing the latitude bound for a
-	// subregion could go in the opposite direction as the errors when computing
-	// the bound for the original region, so we need to double this value.
-	// (More analysis shows that it's okay to round down to a multiple of
-	// dblEpsilon.)
-	//
-	// For longitude, we rely on the fact that atan2 is correctly rounded and
-	// therefore no additional bounds expansion is necessary.
-
-	latExpansion := 9 * dblEpsilon
-	lngExpansion := 0.0
-	if lngGap <= 0 {
-		lngExpansion = math.Pi
-	}
-	return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
-}

+ 0 - 71
vendor/github.com/golang/geo/s2/region.go

@@ -1,71 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-// A Region represents a two-dimensional region on the unit sphere.
-//
-// The purpose of this interface is to allow complex regions to be
-// approximated as simpler regions. The interface is restricted to methods
-// that are useful for computing approximations.
-type Region interface {
-	// CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
-	CapBound() Cap
-
-	// RectBound returns a bounding latitude-longitude rectangle that contains
-	// the region. The bounds are not guaranteed to be tight.
-	RectBound() Rect
-
-	// ContainsCell reports whether the region completely contains the given region.
-	// It returns false if containment could not be determined.
-	ContainsCell(c Cell) bool
-
-	// IntersectsCell reports whether the region intersects the given cell or
-	// if intersection could not be determined. It returns false if the region
-	// does not intersect.
-	IntersectsCell(c Cell) bool
-
-	// ContainsPoint reports whether the region contains the given point or not.
-	// The point should be unit length, although some implementations may relax
-	// this restriction.
-	ContainsPoint(p Point) bool
-
-	// CellUnionBound returns a small collection of CellIDs whose union covers
-	// the region. The cells are not sorted, may have redundancies (such as cells
-	// that contain other cells), and may cover much more area than necessary.
-	//
-	// This method is not intended for direct use by client code. Clients
-	// should typically use Covering, which has options to control the size and
-	// accuracy of the covering. Alternatively, if you want a fast covering and
-	// don't care about accuracy, consider calling FastCovering (which returns a
-	// cleaned-up version of the covering computed by this method).
-	//
-	// CellUnionBound implementations should attempt to return a small
-	// covering (ideally 4 cells or fewer) that covers the region and can be
-	// computed quickly. The result is used by RegionCoverer as a starting
-	// point for further refinement.
-	CellUnionBound() []CellID
-}
-
-// Enforce Region interface satisfaction.
-var (
-	_ Region = Cap{}
-	_ Region = Cell{}
-	_ Region = (*CellUnion)(nil)
-	_ Region = (*Loop)(nil)
-	_ Region = Point{}
-	_ Region = (*Polygon)(nil)
-	_ Region = (*Polyline)(nil)
-	_ Region = Rect{}
-)

+ 0 - 477
vendor/github.com/golang/geo/s2/regioncoverer.go

@@ -1,477 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package s2
-
-import (
-	"container/heap"
-)
-
-// RegionCoverer allows arbitrary regions to be approximated as unions of cells (CellUnion).
-// This is useful for implementing various sorts of search and precomputation operations.
-//
-// Typical usage:
-//
-//	rc := &s2.RegionCoverer{MaxLevel: 30, MaxCells: 5}
-//	r := s2.Region(CapFromCenterArea(center, area))
-//	covering := rc.Covering(r)
-//
-// This yields a CellUnion of at most 5 cells that is guaranteed to cover the
-// given region (a disc-shaped region on the sphere).
-//
-// For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used.
-// This effectively allows the branching factor of the S2 CellID hierarchy to be increased.
-// Currently the only parameter values allowed are 1, 2, or 3, corresponding to
-// branching factors of 4, 16, and 64 respectively.
-//
-// Note the following:
-//
-//  - MinLevel takes priority over MaxCells, i.e. cells below the given level will
-//    never be used even if this causes a large number of cells to be returned.
-//
-//  - For any setting of MaxCells, up to 6 cells may be returned if that
-//    is the minimum number of cells required (e.g. if the region intersects
-//    all six face cells).  Up to 3 cells may be returned even for very tiny
-//    convex regions if they happen to be located at the intersection of
-//    three cube faces.
-//
-//  - For any setting of MaxCells, an arbitrary number of cells may be
-//    returned if MinLevel is too high for the region being approximated.
-//
-//  - If MaxCells is less than 4, the area of the covering may be
-//    arbitrarily large compared to the area of the original region even if
-//    the region is convex (e.g. a Cap or Rect).
-//
-// The approximation algorithm is not optimal but does a pretty good job in
-// practice. The output does not always use the maximum number of cells
-// allowed, both because this would not always yield a better approximation,
-// and because MaxCells is a limit on how much work is done exploring the
-// possible covering as well as a limit on the final output size.
-//
-// Because it is an approximation algorithm, one should not rely on the
-// stability of the output. In particular, the output of the covering algorithm
-// may change across different versions of the library.
-//
-// One can also generate interior coverings, which are sets of cells which
-// are entirely contained within a region. Interior coverings can be
-// empty, even for non-empty regions, if there are no cells that satisfy
-// the provided constraints and are contained by the region. Note that for
-// performance reasons, it is wise to specify a MaxLevel when computing
-// interior coverings - otherwise for regions with small or zero area, the
-// algorithm may spend a lot of time subdividing cells all the way to leaf
-// level to try to find contained cells.
-type RegionCoverer struct {
-	MinLevel int // the minimum cell level to be used.
-	MaxLevel int // the maximum cell level to be used.
-	LevelMod int // the LevelMod to be used.
-	MaxCells int // the maximum desired number of cells in the approximation.
-}
-
-type coverer struct {
-	minLevel         int // the minimum cell level to be used.
-	maxLevel         int // the maximum cell level to be used.
-	levelMod         int // the LevelMod to be used.
-	maxCells         int // the maximum desired number of cells in the approximation.
-	region           Region
-	result           CellUnion
-	pq               priorityQueue
-	interiorCovering bool
-}
-
-type candidate struct {
-	cell        Cell
-	terminal    bool         // Cell should not be expanded further.
-	numChildren int          // Number of children that intersect the region.
-	children    []*candidate // Actual size may be 0, 4, 16, or 64 elements.
-	priority    int          // Priority of the candidate.
-}
-
-type priorityQueue []*candidate
-
-func (pq priorityQueue) Len() int {
-	return len(pq)
-}
-
-func (pq priorityQueue) Less(i, j int) bool {
-	// We want Pop to give us the highest, not lowest, priority so we use greater than here.
-	return pq[i].priority > pq[j].priority
-}
-
-func (pq priorityQueue) Swap(i, j int) {
-	pq[i], pq[j] = pq[j], pq[i]
-}
-
-func (pq *priorityQueue) Push(x interface{}) {
-	item := x.(*candidate)
-	*pq = append(*pq, item)
-}
-
-func (pq *priorityQueue) Pop() interface{} {
-	item := (*pq)[len(*pq)-1]
-	*pq = (*pq)[:len(*pq)-1]
-	return item
-}
-
-func (pq *priorityQueue) Reset() {
-	*pq = (*pq)[:0]
-}
-
-// newCandidate returns a new candidate with no children if the cell intersects the given region.
-// The candidate is marked as terminal if it should not be expanded further.
-func (c *coverer) newCandidate(cell Cell) *candidate {
-	if !c.region.IntersectsCell(cell) {
-		return nil
-	}
-	cand := &candidate{cell: cell}
-	level := int(cell.level)
-	if level >= c.minLevel {
-		if c.interiorCovering {
-			if c.region.ContainsCell(cell) {
-				cand.terminal = true
-			} else if level+c.levelMod > c.maxLevel {
-				return nil
-			}
-		} else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) {
-			cand.terminal = true
-		}
-	}
-	return cand
-}
-
-// expandChildren populates the children of the candidate by expanding the given number of
-// levels from the given cell.  Returns the number of children that were marked "terminal".
-func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int {
-	numLevels--
-	var numTerminals int
-	last := cell.id.ChildEnd()
-	for ci := cell.id.ChildBegin(); ci != last; ci = ci.Next() {
-		childCell := CellFromCellID(ci)
-		if numLevels > 0 {
-			if c.region.IntersectsCell(childCell) {
-				numTerminals += c.expandChildren(cand, childCell, numLevels)
-			}
-			continue
-		}
-		if child := c.newCandidate(childCell); child != nil {
-			cand.children = append(cand.children, child)
-			cand.numChildren++
-			if child.terminal {
-				numTerminals++
-			}
-		}
-	}
-	return numTerminals
-}
-
-// addCandidate adds the given candidate to the result if it is marked as "terminal",
-// otherwise expands its children and inserts it into the priority queue.
-// Passing an argument of nil does nothing.
-func (c *coverer) addCandidate(cand *candidate) {
-	if cand == nil {
-		return
-	}
-
-	if cand.terminal {
-		c.result = append(c.result, cand.cell.id)
-		return
-	}
-
-	// Expand one level at a time until we hit minLevel to ensure that we don't skip over it.
-	numLevels := c.levelMod
-	level := int(cand.cell.level)
-	if level < c.minLevel {
-		numLevels = 1
-	}
-
-	numTerminals := c.expandChildren(cand, cand.cell, numLevels)
-	maxChildrenShift := uint(2 * c.levelMod)
-	if cand.numChildren == 0 {
-		return
-	} else if !c.interiorCovering && numTerminals == 1<<maxChildrenShift && level >= c.minLevel {
-		// Optimization: add the parent cell rather than all of its children.
-		// We can't do this for interior coverings, since the children just
-		// intersect the region, but may not be contained by it - we need to
-		// subdivide them further.
-		cand.terminal = true
-		c.addCandidate(cand)
-	} else {
-		// We negate the priority so that smaller absolute priorities are returned
-		// first. The heuristic is designed to refine the largest cells first,
-		// since those are where we have the largest potential gain. Among cells
-		// of the same size, we prefer the cells with the fewest children.
-		// Finally, among cells with equal numbers of children we prefer those
-		// with the smallest number of children that cannot be refined further.
-		cand.priority = -(((level<<maxChildrenShift)+cand.numChildren)<<maxChildrenShift + numTerminals)
-		heap.Push(&c.pq, cand)
-	}
-}
-
-// adjustLevel returns the reduced "level" so that it satisfies levelMod. Levels smaller than minLevel
-// are not affected (since cells at these levels are eventually expanded).
-func (c *coverer) adjustLevel(level int) int {
-	if c.levelMod > 1 && level > c.minLevel {
-		level -= (level - c.minLevel) % c.levelMod
-	}
-	return level
-}
-
-// adjustCellLevels ensures that all cells with level > minLevel also satisfy levelMod,
-// by replacing them with an ancestor if necessary. Cell levels smaller
-// than minLevel are not modified (see AdjustLevel). The output is
-// then normalized to ensure that no redundant cells are present.
-func (c *coverer) adjustCellLevels(cells *CellUnion) {
-	if c.levelMod == 1 {
-		return
-	}
-
-	var out int
-	for _, ci := range *cells {
-		level := ci.Level()
-		newLevel := c.adjustLevel(level)
-		if newLevel != level {
-			ci = ci.Parent(newLevel)
-		}
-		if out > 0 && (*cells)[out-1].Contains(ci) {
-			continue
-		}
-		for out > 0 && ci.Contains((*cells)[out-1]) {
-			out--
-		}
-		(*cells)[out] = ci
-		out++
-	}
-	*cells = (*cells)[:out]
-}
-
-// initialCandidates computes a set of initial candidates that cover the given region.
-func (c *coverer) initialCandidates() {
-	// Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
-	temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
-
-	cells := temp.FastCovering(c.region)
-	c.adjustCellLevels(&cells)
-	for _, ci := range cells {
-		c.addCandidate(c.newCandidate(CellFromCellID(ci)))
-	}
-}
-
-// coveringInternal generates a covering and stores it in result.
-// Strategy: Start with the 6 faces of the cube.  Discard any
-// that do not intersect the shape.  Then repeatedly choose the
-// largest cell that intersects the shape and subdivide it.
-//
-// result contains the cells that will be part of the output, while pq
-// contains cells that we may still subdivide further. Cells that are
-// entirely contained within the region are immediately added to the output,
-// while cells that do not intersect the region are immediately discarded.
-// Therefore pq only contains cells that partially intersect the region.
-// Candidates are prioritized first according to cell size (larger cells
-// first), then by the number of intersecting children they have (fewest
-// children first), and then by the number of fully contained children
-// (fewest children first).
-func (c *coverer) coveringInternal(region Region) {
-	c.region = region
-
-	c.initialCandidates()
-	for c.pq.Len() > 0 && (!c.interiorCovering || len(c.result) < c.maxCells) {
-		cand := heap.Pop(&c.pq).(*candidate)
-
-		// For interior covering we keep subdividing no matter how many children
-		// candidate has. If we reach MaxCells before expanding all children,
-		// we will just use some of them.
-		// For exterior covering we cannot do this, because result has to cover the
-		// whole region, so all children have to be used.
-		// candidate.numChildren == 1 case takes care of the situation when we
-		// already have more than MaxCells in result (minLevel is too high).
-		// Subdividing of the candidate with one child does no harm in this case.
-		if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells {
-			for _, child := range cand.children {
-				if !c.interiorCovering || len(c.result) < c.maxCells {
-					c.addCandidate(child)
-				}
-			}
-		} else {
-			cand.terminal = true
-			c.addCandidate(cand)
-		}
-	}
-	c.pq.Reset()
-	c.region = nil
-}
-
-// newCoverer returns an instance of coverer.
-func (rc *RegionCoverer) newCoverer() *coverer {
-	return &coverer{
-		minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)),
-		maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)),
-		levelMod: maxInt(1, minInt(3, rc.LevelMod)),
-		maxCells: rc.MaxCells,
-	}
-}
-
-// Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
-func (rc *RegionCoverer) Covering(region Region) CellUnion {
-	covering := rc.CellUnion(region)
-	covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
-	return covering
-}
-
-// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
-func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
-	intCovering := rc.InteriorCellUnion(region)
-	intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
-	return intCovering
-}
-
-// CellUnion returns a normalized CellUnion that covers the given region and
-// satisfies the restrictions except for minLevel and levelMod. These criteria
-// cannot be satisfied using a cell union because cell unions are
-// automatically normalized by replacing four child cells with their parent
-// whenever possible. (Note that the list of cell ids passed to the CellUnion
-// constructor does in fact satisfy all the given restrictions.)
-func (rc *RegionCoverer) CellUnion(region Region) CellUnion {
-	c := rc.newCoverer()
-	c.coveringInternal(region)
-	cu := c.result
-	cu.Normalize()
-	return cu
-}
-
-// InteriorCellUnion returns a normalized CellUnion that is contained within the given region and
-// satisfies the restrictions except for minLevel and levelMod. These criteria
-// cannot be satisfied using a cell union because cell unions are
-// automatically normalized by replacing four child cells with their parent
-// whenever possible. (Note that the list of cell ids passed to the CellUnion
-// constructor does in fact satisfy all the given restrictions.)
-func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion {
-	c := rc.newCoverer()
-	c.interiorCovering = true
-	c.coveringInternal(region)
-	cu := c.result
-	cu.Normalize()
-	return cu
-}
-
-// FastCovering returns a CellUnion that covers the given region similar to Covering,
-// except that this method is much faster and the coverings are not as tight.
-// All of the usual parameters are respected (MaxCells, MinLevel, MaxLevel, and LevelMod),
-// except that the implementation makes no attempt to take advantage of large values of
-// MaxCells.  (A small number of cells will always be returned.)
-//
-// This function is useful as a starting point for algorithms that
-// recursively subdivide cells.
-func (rc *RegionCoverer) FastCovering(region Region) CellUnion {
-	c := rc.newCoverer()
-	cu := CellUnion(region.CellUnionBound())
-	c.normalizeCovering(&cu)
-	return cu
-}
-
-// normalizeCovering normalizes the "covering" so that it conforms to the current covering
-// parameters (MaxCells, minLevel, maxLevel, and levelMod).
-// This method makes no attempt to be optimal. In particular, if
-// minLevel > 0 or levelMod > 1 then it may return more than the
-// desired number of cells even when this isn't necessary.
-//
-// Note that when the covering parameters have their default values, almost
-// all of the code in this function is skipped.
-func (c *coverer) normalizeCovering(covering *CellUnion) {
-	// If any cells are too small, or don't satisfy levelMod, then replace them with ancestors.
-	if c.maxLevel < maxLevel || c.levelMod > 1 {
-		for i, ci := range *covering {
-			level := ci.Level()
-			newLevel := c.adjustLevel(minInt(level, c.maxLevel))
-			if newLevel != level {
-				(*covering)[i] = ci.Parent(newLevel)
-			}
-		}
-	}
-	// Sort the cells and simplify them.
-	covering.Normalize()
-
-	// If there are still too many cells, then repeatedly replace two adjacent
-	// cells in CellID order by their lowest common ancestor.
-	for len(*covering) > c.maxCells {
-		bestIndex := -1
-		bestLevel := -1
-		for i := 0; i+1 < len(*covering); i++ {
-			level, ok := (*covering)[i].CommonAncestorLevel((*covering)[i+1])
-			if !ok {
-				continue
-			}
-			level = c.adjustLevel(level)
-			if level > bestLevel {
-				bestLevel = level
-				bestIndex = i
-			}
-		}
-
-		if bestLevel < c.minLevel {
-			break
-		}
-		(*covering)[bestIndex] = (*covering)[bestIndex].Parent(bestLevel)
-		covering.Normalize()
-	}
-	// Make sure that the covering satisfies minLevel and levelMod,
-	// possibly at the expense of satisfying MaxCells.
-	if c.minLevel > 0 || c.levelMod > 1 {
-		covering.Denormalize(c.minLevel, c.levelMod)
-	}
-}
-
-// SimpleRegionCovering returns a set of cells at the given level that cover
-// the connected region and a starting point on the boundary or inside the
-// region. The cells are returned in arbitrary order.
-//
-// Note that this method is not faster than the regular Covering
-// method for most region types, such as Cap or Polygon, and in fact it
-// can be much slower when the output consists of a large number of cells.
-// Currently it can be faster at generating coverings of long narrow regions
-// such as polylines, but this may change in the future.
-func SimpleRegionCovering(region Region, start Point, level int) []CellID {
-	return FloodFillRegionCovering(region, cellIDFromPoint(start).Parent(level))
-}
-
-// FloodFillRegionCovering returns all edge-connected cells at the same level as
-// the given CellID that intersect the given region, in arbitrary order.
-func FloodFillRegionCovering(region Region, start CellID) []CellID {
-	var output []CellID
-	all := map[CellID]bool{
-		start: true,
-	}
-	frontier := []CellID{start}
-	for len(frontier) > 0 {
-		id := frontier[len(frontier)-1]
-		frontier = frontier[:len(frontier)-1]
-		if !region.IntersectsCell(CellFromCellID(id)) {
-			continue
-		}
-		output = append(output, id)
-		for _, nbr := range id.EdgeNeighbors() {
-			if !all[nbr] {
-				all[nbr] = true
-				frontier = append(frontier, nbr)
-			}
-		}
-	}
-
-	return output
-}
-
-// TODO(roberts): The differences from the C++ version
-// finish up FastCovering to match C++
-// IsCanonical
-// CanonicalizeCovering
-// containsAllChildren
-// replaceCellsWithAncestor

Some files were not shown because too many files changed in this diff