Procházet zdrojové kódy

staticcheck (#313)

* CI: use staticcheck for linting

This commit switches the linter for Go code from golint to staticcheck.
Golint has been deprecated since last year and staticcheck is a
recommended replacement.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* revendor

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* cmd,pkg: fix lint warnings

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
Lucas Servén Marín před 4 roky
rodič
revize
50fbc2eec2
100 změnil soubory, kde provedl 13581 přidání a 2683 odebrání
  1. 8 8
      Makefile
  2. 0 2
      cmd/kgctl/connect_linux.go
  3. 5 3
      go.mod
  4. 10 3
      go.sum
  5. 3 1
      pkg/mesh/mesh.go
  6. 0 5
      pkg/mesh/topology_test.go
  7. 4 10
      pkg/wireguard/conf.go
  8. 1 1
      tools.go
  9. 2 0
      vendor/github.com/BurntSushi/toml/.gitignore
  10. 1 0
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  11. 21 0
      vendor/github.com/BurntSushi/toml/COPYING
  12. 220 0
      vendor/github.com/BurntSushi/toml/README.md
  13. 511 0
      vendor/github.com/BurntSushi/toml/decode.go
  14. 18 0
      vendor/github.com/BurntSushi/toml/decode_go116.go
  15. 123 0
      vendor/github.com/BurntSushi/toml/decode_meta.go
  16. 33 0
      vendor/github.com/BurntSushi/toml/deprecated.go
  17. 13 0
      vendor/github.com/BurntSushi/toml/doc.go
  18. 650 0
      vendor/github.com/BurntSushi/toml/encode.go
  19. 36 0
      vendor/github.com/BurntSushi/toml/internal/tz.go
  20. 1225 0
      vendor/github.com/BurntSushi/toml/lex.go
  21. 739 0
      vendor/github.com/BurntSushi/toml/parse.go
  22. 70 0
      vendor/github.com/BurntSushi/toml/type_check.go
  23. 242 0
      vendor/github.com/BurntSushi/toml/type_fields.go
  24. 1 1
      vendor/golang.org/x/exp/typeparams/LICENSE
  25. 186 0
      vendor/golang.org/x/exp/typeparams/common.go
  26. 200 0
      vendor/golang.org/x/exp/typeparams/normalize.go
  27. 172 0
      vendor/golang.org/x/exp/typeparams/termlist.go
  28. 202 0
      vendor/golang.org/x/exp/typeparams/typeparams_go117.go
  29. 148 0
      vendor/golang.org/x/exp/typeparams/typeparams_go118.go
  30. 170 0
      vendor/golang.org/x/exp/typeparams/typeterm.go
  31. 0 19
      vendor/golang.org/x/lint/.travis.yml
  32. 0 15
      vendor/golang.org/x/lint/CONTRIBUTING.md
  33. 0 93
      vendor/golang.org/x/lint/README.md
  34. 0 159
      vendor/golang.org/x/lint/golint/golint.go
  35. 0 309
      vendor/golang.org/x/lint/golint/import.go
  36. 0 13
      vendor/golang.org/x/lint/golint/importcomment.go
  37. 0 1615
      vendor/golang.org/x/lint/lint.go
  38. 78 0
      vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go
  39. 41 22
      vendor/golang.org/x/mod/module/module.go
  40. 250 0
      vendor/golang.org/x/mod/module/pseudo.go
  41. 20 10
      vendor/golang.org/x/mod/semver/semver.go
  42. 242 0
      vendor/golang.org/x/tools/go/analysis/analysis.go
  43. 65 0
      vendor/golang.org/x/tools/go/analysis/diagnostic.go
  44. 321 0
      vendor/golang.org/x/tools/go/analysis/doc.go
  45. 49 0
      vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
  46. 130 0
      vendor/golang.org/x/tools/go/analysis/validate.go
  47. 16 4
      vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
  48. 5 7
      vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
  49. 186 0
      vendor/golang.org/x/tools/go/ast/inspector/inspector.go
  50. 227 0
      vendor/golang.org/x/tools/go/ast/inspector/typeof.go
  51. 198 0
      vendor/golang.org/x/tools/go/buildutil/allpackages.go
  52. 113 0
      vendor/golang.org/x/tools/go/buildutil/fakecontext.go
  53. 103 0
      vendor/golang.org/x/tools/go/buildutil/overlay.go
  54. 79 0
      vendor/golang.org/x/tools/go/buildutil/tags.go
  55. 212 0
      vendor/golang.org/x/tools/go/buildutil/util.go
  56. 18 5
      vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
  57. 222 0
      vendor/golang.org/x/tools/go/internal/cgo/cgo.go
  58. 39 0
      vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
  59. 11 12
      vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
  60. 33 19
      vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
  61. 11 5
      vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
  62. 9 3
      vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
  63. 274 48
      vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
  64. 250 30
      vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
  65. 16 0
      vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go
  66. 23 0
      vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go
  67. 204 0
      vendor/golang.org/x/tools/go/loader/doc.go
  68. 1080 0
      vendor/golang.org/x/tools/go/loader/loader.go
  69. 124 0
      vendor/golang.org/x/tools/go/loader/util.go
  70. 5 0
      vendor/golang.org/x/tools/go/packages/packages.go
  71. 617 0
      vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
  72. 69 0
      vendor/golang.org/x/tools/go/types/typeutil/callee.go
  73. 31 0
      vendor/golang.org/x/tools/go/types/typeutil/imports.go
  74. 443 0
      vendor/golang.org/x/tools/go/types/typeutil/map.go
  75. 72 0
      vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
  76. 52 0
      vendor/golang.org/x/tools/go/types/typeutil/ui.go
  77. 428 0
      vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
  78. 12 4
      vendor/golang.org/x/tools/internal/gocommand/invoke.go
  79. 12 10
      vendor/golang.org/x/tools/internal/gocommand/vendor.go
  80. 1 1
      vendor/golang.org/x/tools/internal/imports/imports.go
  81. 14 11
      vendor/golang.org/x/tools/internal/imports/mod.go
  82. 13 2
      vendor/golang.org/x/tools/internal/imports/sortimports.go
  83. 23 0
      vendor/golang.org/x/tools/internal/imports/zstdlib.go
  84. 183 0
      vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
  85. 407 0
      vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
  86. 236 0
      vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go
  87. 167 12
      vendor/golang.org/x/tools/internal/typeparams/common.go
  88. 12 0
      vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
  89. 15 0
      vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
  90. 216 0
      vendor/golang.org/x/tools/internal/typeparams/normalize.go
  91. 0 93
      vendor/golang.org/x/tools/internal/typeparams/notypeparams.go
  92. 172 0
      vendor/golang.org/x/tools/internal/typeparams/termlist.go
  93. 0 125
      vendor/golang.org/x/tools/internal/typeparams/typeparams.go
  94. 197 0
      vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
  95. 151 0
      vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
  96. 170 0
      vendor/golang.org/x/tools/internal/typeparams/typeterm.go
  97. 158 0
      vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
  98. 14 1
      vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
  99. 9 2
      vendor/golang.org/x/tools/internal/typesinternal/types.go
  100. 19 0
      vendor/golang.org/x/tools/internal/typesinternal/types_118.go

+ 8 - 8
Makefile

@@ -38,14 +38,14 @@ DOCS_GEN_BINARY := bin/docs-gen
 DEEPCOPY_GEN_BINARY := bin/deepcopy-gen
 DEEPCOPY_GEN_BINARY := bin/deepcopy-gen
 INFORMER_GEN_BINARY := bin/informer-gen
 INFORMER_GEN_BINARY := bin/informer-gen
 LISTER_GEN_BINARY := bin/lister-gen
 LISTER_GEN_BINARY := bin/lister-gen
-GOLINT_BINARY := bin/golint
+STATICCHECK_BINARY := bin/staticcheck
 EMBEDMD_BINARY := bin/embedmd
 EMBEDMD_BINARY := bin/embedmd
 KIND_BINARY := $(shell pwd)/bin/kind
 KIND_BINARY := $(shell pwd)/bin/kind
 KUBECTL_BINARY := $(shell pwd)/bin/kubectl
 KUBECTL_BINARY := $(shell pwd)/bin/kubectl
 BASH_UNIT := $(shell pwd)/bin/bash_unit
 BASH_UNIT := $(shell pwd)/bin/bash_unit
 BASH_UNIT_FLAGS :=
 BASH_UNIT_FLAGS :=
 
 
-BUILD_IMAGE ?= golang:1.18
+BUILD_IMAGE ?= golang:1.18.0
 BASE_IMAGE ?= alpine:3.15
 BASE_IMAGE ?= alpine:3.15
 
 
 build: $(BINS)
 build: $(BINS)
@@ -165,7 +165,7 @@ fmt:
 	@echo $(GO_PKGS)
 	@echo $(GO_PKGS)
 	gofmt -w -s $(GO_FILES)
 	gofmt -w -s $(GO_FILES)
 
 
-lint: header $(GOLINT_BINARY)
+lint: header $(STATICCHECK_BINARY)
 	@echo 'go vet $(GO_PKGS)'
 	@echo 'go vet $(GO_PKGS)'
 	@vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \
 	@vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \
 		echo ""; \
 		echo ""; \
@@ -174,10 +174,10 @@ lint: header $(GOLINT_BINARY)
 		echo "$$vet_res"; \
 		echo "$$vet_res"; \
 		exit 1; \
 		exit 1; \
 	fi
 	fi
-	@echo '$(GOLINT_BINARY) $(GO_PKGS)'
-	@lint_res=$$($(GOLINT_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
+	@echo '$(STATICCHECK_BINARY) $(GO_PKGS)'
+	@lint_res=$$($(STATICCHECK_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
 		echo ""; \
 		echo ""; \
-		echo "Golint found style issues. Please check the reported issues"; \
+		echo "Staticcheck found style issues. Please check the reported issues"; \
 		echo "and fix them if necessary before submitting the code for review:"; \
 		echo "and fix them if necessary before submitting the code for review:"; \
 		echo "$$lint_res"; \
 		echo "$$lint_res"; \
 		exit 1; \
 		exit 1; \
@@ -358,8 +358,8 @@ $(LISTER_GEN_BINARY):
 $(DOCS_GEN_BINARY): cmd/docs-gen/main.go
 $(DOCS_GEN_BINARY): cmd/docs-gen/main.go
 	go build -mod=vendor -o $@ ./cmd/docs-gen
 	go build -mod=vendor -o $@ ./cmd/docs-gen
 
 
-$(GOLINT_BINARY):
-	go build -mod=vendor -o $@ golang.org/x/lint/golint
+$(STATICCHECK_BINARY):
+	go build -mod=vendor -o $@ honnef.co/go/tools/cmd/staticcheck
 
 
 $(EMBEDMD_BINARY):
 $(EMBEDMD_BINARY):
 	go build -mod=vendor -o $@ github.com/campoy/embedmd
 	go build -mod=vendor -o $@ github.com/campoy/embedmd

+ 0 - 2
cmd/kgctl/connect_linux.go

@@ -276,8 +276,6 @@ func cleanUp(iface int, t *route.Table, logger log.Logger) {
 	if err := t.CleanUp(); err != nil {
 	if err := t.CleanUp(); err != nil {
 		level.Error(logger).Log("failed to clean up routes: %v", err)
 		level.Error(logger).Log("failed to clean up routes: %v", err)
 	}
 	}
-
-	return
 }
 }
 
 
 func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int, logger log.Logger) error {
 func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int, logger log.Logger) error {

+ 5 - 3
go.mod

@@ -15,9 +15,9 @@ require (
 	github.com/prometheus/client_golang v1.11.0
 	github.com/prometheus/client_golang v1.11.0
 	github.com/spf13/cobra v1.2.1
 	github.com/spf13/cobra v1.2.1
 	github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
 	github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
-	golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
 	golang.org/x/sys v0.0.0-20211124211545-fe61309f8881
 	golang.org/x/sys v0.0.0-20211124211545-fe61309f8881
 	golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211124212657-dd7407c86d22
 	golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211124212657-dd7407c86d22
+	honnef.co/go/tools v0.3.1
 	k8s.io/api v0.23.6
 	k8s.io/api v0.23.6
 	k8s.io/apiextensions-apiserver v0.23.6
 	k8s.io/apiextensions-apiserver v0.23.6
 	k8s.io/apimachinery v0.23.6
 	k8s.io/apimachinery v0.23.6
@@ -27,6 +27,7 @@ require (
 )
 )
 
 
 require (
 require (
+	github.com/BurntSushi/toml v0.4.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.1.1 // indirect
 	github.com/cespare/xxhash/v2 v2.1.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
@@ -62,13 +63,14 @@ require (
 	github.com/spf13/pflag v1.0.5 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
 	golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect
 	golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect
-	golang.org/x/mod v0.4.2 // indirect
+	golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
+	golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
 	golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
 	golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
 	golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
 	golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
 	golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
 	golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
 	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
 	golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
-	golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
+	golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
 	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	golang.zx2c4.com/wireguard v0.0.0-20211123210315-387f7c461a16 // indirect
 	golang.zx2c4.com/wireguard v0.0.0-20211123210315-387f7c461a16 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/appengine v1.6.7 // indirect

+ 10 - 3
go.sum

@@ -47,6 +47,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
 github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
+github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
 github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
 github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
 github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@@ -574,6 +576,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
+golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -587,7 +591,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
 golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
 golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -599,8 +602,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -839,8 +843,9 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q=
 golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
 golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
+golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II=
+golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1003,6 +1008,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc=
+honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70=
 k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw=
 k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw=
 k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g=
 k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g=
 k8s.io/apiextensions-apiserver v0.23.6 h1:v58cQ6Z0/GK1IXYr+oW0fnYl52o9LTY0WgoWvI8uv5Q=
 k8s.io/apiextensions-apiserver v0.23.6 h1:v58cQ6Z0/GK1IXYr+oW0fnYl52o9LTY0WgoWvI8uv5Q=

+ 3 - 1
pkg/mesh/mesh.go

@@ -62,7 +62,6 @@ type Mesh struct {
 	ipTables            *iptables.Controller
 	ipTables            *iptables.Controller
 	kiloIface           int
 	kiloIface           int
 	kiloIfaceName       string
 	kiloIfaceName       string
-	key                 []byte
 	local               bool
 	local               bool
 	port                int
 	port                int
 	priv                wgtypes.Key
 	priv                wgtypes.Key
@@ -94,6 +93,9 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 		return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
 		return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
 	}
 	}
 	privateB, err := ioutil.ReadFile(privateKeyPath)
 	privateB, err := ioutil.ReadFile(privateKeyPath)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, fmt.Errorf("failed to read private key file: %v", err)
+	}
 	privateB = bytes.Trim(privateB, "\n")
 	privateB = bytes.Trim(privateB, "\n")
 	private, err := wgtypes.ParseKey(string(privateB))
 	private, err := wgtypes.ParseKey(string(privateB))
 	if err != nil {
 	if err != nil {

+ 0 - 5
pkg/mesh/topology_test.go

@@ -16,7 +16,6 @@ package mesh
 
 
 import (
 import (
 	"net"
 	"net"
-	"strings"
 	"testing"
 	"testing"
 	"time"
 	"time"
 
 
@@ -27,10 +26,6 @@ import (
 	"github.com/squat/kilo/pkg/wireguard"
 	"github.com/squat/kilo/pkg/wireguard"
 )
 )
 
 
-func allowedIPs(ips ...string) string {
-	return strings.Join(ips, ", ")
-}
-
 func mustParseCIDR(s string) (r net.IPNet) {
 func mustParseCIDR(s string) (r net.IPNet) {
 	if _, ip, err := net.ParseCIDR(s); err != nil {
 	if _, ip, err := net.ParseCIDR(s); err != nil {
 		panic("failed to parse CIDR")
 		panic("failed to parse CIDR")

+ 4 - 10
pkg/wireguard/conf.go

@@ -183,7 +183,7 @@ func (e *Endpoint) Resolved() bool {
 // UDPAddr() will not try to resolve a DN name, if the Endpoint is not yet resolved.
 // UDPAddr() will not try to resolve a DN name, if the Endpoint is not yet resolved.
 func (e *Endpoint) UDPAddr(resolve bool) (*net.UDPAddr, error) {
 func (e *Endpoint) UDPAddr(resolve bool) (*net.UDPAddr, error) {
 	if !e.Ready() {
 	if !e.Ready() {
-		return nil, errors.New("Enpoint is not ready")
+		return nil, errors.New("endpoint is not ready")
 	}
 	}
 	if e.udpAddr != nil {
 	if e.udpAddr != nil {
 		// Make a copy of the UDPAddr to protect it from modification outside this package.
 		// Make a copy of the UDPAddr to protect it from modification outside this package.
@@ -191,7 +191,7 @@ func (e *Endpoint) UDPAddr(resolve bool) (*net.UDPAddr, error) {
 		return &h, nil
 		return &h, nil
 	}
 	}
 	if !resolve {
 	if !resolve {
-		return nil, errors.New("Endpoint is not resolved")
+		return nil, errors.New("endpoint is not resolved")
 	}
 	}
 	var err error
 	var err error
 	if e.udpAddr, err = net.ResolveUDPAddr("udp", e.addr); err != nil {
 	if e.udpAddr, err = net.ResolveUDPAddr("udp", e.addr); err != nil {
@@ -358,19 +358,13 @@ func (c *Conf) Equal(d *wgtypes.Device) (bool, string) {
 
 
 func sortPeerConfigs(peers []wgtypes.Peer) {
 func sortPeerConfigs(peers []wgtypes.Peer) {
 	sort.Slice(peers, func(i, j int) bool {
 	sort.Slice(peers, func(i, j int) bool {
-		if peers[i].PublicKey.String() < peers[j].PublicKey.String() {
-			return true
-		}
-		return false
+		return peers[i].PublicKey.String() < peers[j].PublicKey.String()
 	})
 	})
 }
 }
 
 
 func sortPeers(peers []Peer) {
 func sortPeers(peers []Peer) {
 	sort.Slice(peers, func(i, j int) bool {
 	sort.Slice(peers, func(i, j int) bool {
-		if peers[i].PublicKey.String() < peers[j].PublicKey.String() {
-			return true
-		}
-		return false
+		return peers[i].PublicKey.String() < peers[j].PublicKey.String()
 	})
 	})
 }
 }
 
 

+ 1 - 1
tools.go

@@ -19,7 +19,7 @@ package main
 
 
 import (
 import (
 	_ "github.com/campoy/embedmd"
 	_ "github.com/campoy/embedmd"
-	_ "golang.org/x/lint/golint"
+	_ "honnef.co/go/tools/cmd/staticcheck"
 	_ "k8s.io/code-generator/cmd/client-gen"
 	_ "k8s.io/code-generator/cmd/client-gen"
 	_ "k8s.io/code-generator/cmd/deepcopy-gen"
 	_ "k8s.io/code-generator/cmd/deepcopy-gen"
 	_ "k8s.io/code-generator/cmd/informer-gen"
 	_ "k8s.io/code-generator/cmd/informer-gen"

+ 2 - 0
vendor/github.com/BurntSushi/toml/.gitignore

@@ -0,0 +1,2 @@
+toml.test
+/toml-test

+ 1 - 0
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -0,0 +1 @@
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).

+ 21 - 0
vendor/github.com/BurntSushi/toml/COPYING

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 220 - 0
vendor/github.com/BurntSushi/toml/README.md

@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
+
+Documentation: https://godocs.io/github.com/BurntSushi/toml
+
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
+
+This library requires Go 1.13 or newer; install it with:
+
+    $ go get github.com/BurntSushi/toml
+
+It also comes with a TOML validator CLI tool:
+
+    $ go get github.com/BurntSushi/toml/cmd/tomlv
+    $ tomlv some-toml-file.toml
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles XML and
+JSON. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+	Age        int
+	Cats       []string
+	Pi         float64
+	Perfection []int
+	DOB        time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+  // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+  ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+	Name     string
+	Duration duration
+}
+type songs struct {
+	Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+	log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+	fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+	time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+	var err error
+	d.Duration, err = time.ParseDuration(string(text))
+	return err
+}
+```
+
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+  "alpha",
+  "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+	Title   string
+	Owner   ownerInfo
+	DB      database `toml:"database"`
+	Servers map[string]server
+	Clients clients
+}
+
+type ownerInfo struct {
+	Name string
+	Org  string `toml:"organization"`
+	Bio  string
+	DOB  time.Time
+}
+
+type database struct {
+	Server  string
+	Ports   []int
+	ConnMax int `toml:"connection_max"`
+	Enabled bool
+}
+
+type server struct {
+	IP string
+	DC string
+}
+
+type clients struct {
+	Data  [][]interface{}
+	Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+

+ 511 - 0
vendor/github.com/BurntSushi/toml/decode.go

@@ -0,0 +1,511 @@
+package toml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"reflect"
+	"strings"
+	"time"
+)
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+	UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+	_, err := Decode(string(p), v)
+	return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+//
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
+//
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
+//
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
+type Primitive struct {
+	undecoded interface{}
+	context   Key
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md.context = primValue.context
+	defer func() { md.context = nil }()
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decoder decodes TOML data.
+//
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
+//
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
+//
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
+//
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
+//
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
+//
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+	r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+	}
+	if rv.IsNil() {
+		return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+	}
+
+	// TODO: have parser should read from io.Reader? Or at the very least, make
+	// it read from []byte rather than string
+	data, err := ioutil.ReadAll(dec.r)
+	if err != nil {
+		return MetaData{}, err
+	}
+
+	p, err := parse(string(data))
+	if err != nil {
+		return MetaData{}, err
+	}
+	md := MetaData{
+		p.mapping, p.types, p.ordered,
+		make(map[string]bool, len(p.ordered)), nil,
+	}
+	return md, md.unify(p.mapping, indirect(rv))
+}
+
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+	return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+	fp, err := os.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+	// Special case. Look for a `Primitive` value.
+	// TODO: #76 would make this superfluous after implemented.
+	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+		// Save the undecoded data and the key context into the primitive
+		// value.
+		context := make(Key, len(md.context))
+		copy(context, md.context)
+		rv.Set(reflect.ValueOf(Primitive{
+			undecoded: data,
+			context:   context,
+		}))
+		return nil
+	}
+
+	// Special case. Unmarshaler Interface support.
+	if rv.CanAddr() {
+		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+			return v.UnmarshalTOML(data)
+		}
+	}
+
+	// Special case. Look for a value satisfying the TextUnmarshaler interface.
+	if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return md.unifyText(data, v)
+	}
+	// TODO:
+	// The behavior here is incorrect whenever a Go type satisfies the
+	// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+	// array. In particular, the unmarshaler should only be applied to primitive
+	// TOML values. But at this point, it will be applied to all kinds of values
+	// and produce an incorrect error whenever those values are hashes or arrays
+	// (including arrays of tables).
+
+	k := rv.Kind()
+
+	// laziness
+	if k >= reflect.Int && k <= reflect.Uint64 {
+		return md.unifyInt(data, rv)
+	}
+	switch k {
+	case reflect.Ptr:
+		elem := reflect.New(rv.Type().Elem())
+		err := md.unify(data, reflect.Indirect(elem))
+		if err != nil {
+			return err
+		}
+		rv.Set(elem)
+		return nil
+	case reflect.Struct:
+		return md.unifyStruct(data, rv)
+	case reflect.Map:
+		return md.unifyMap(data, rv)
+	case reflect.Array:
+		return md.unifyArray(data, rv)
+	case reflect.Slice:
+		return md.unifySlice(data, rv)
+	case reflect.String:
+		return md.unifyString(data, rv)
+	case reflect.Bool:
+		return md.unifyBool(data, rv)
+	case reflect.Interface:
+		// we only support empty interfaces.
+		if rv.NumMethod() > 0 {
+			return e("unsupported type %s", rv.Type())
+		}
+		return md.unifyAnything(data, rv)
+	case reflect.Float32:
+		fallthrough
+	case reflect.Float64:
+		return md.unifyFloat64(data, rv)
+	}
+	return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if mapping == nil {
+			return nil
+		}
+		return e("type mismatch for %s: expected table but found %T",
+			rv.Type().String(), mapping)
+	}
+
+	for key, datum := range tmap {
+		var f *field
+		fields := cachedTypeFields(rv.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if ff.name == key {
+				f = ff
+				break
+			}
+			if f == nil && strings.EqualFold(ff.name, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			subv := rv
+			for _, i := range f.index {
+				subv = indirect(subv.Field(i))
+			}
+			if isUnifiable(subv) {
+				md.decoded[md.context.add(key).String()] = true
+				md.context = append(md.context, key)
+				if err := md.unify(datum, subv); err != nil {
+					return err
+				}
+				md.context = md.context[0 : len(md.context)-1]
+			} else if f.name != "" {
+				// Bad user! No soup for you!
+				return e("cannot write unexported field %s.%s",
+					rv.Type().String(), f.name)
+			}
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+	if k := rv.Type().Key().Kind(); k != reflect.String {
+		return fmt.Errorf(
+			"toml: cannot decode to a map with non-string key type (%s in %q)",
+			k, rv.Type())
+	}
+
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		if tmap == nil {
+			return nil
+		}
+		return badtype("map", mapping)
+	}
+	if rv.IsNil() {
+		rv.Set(reflect.MakeMap(rv.Type()))
+	}
+	for k, v := range tmap {
+		md.decoded[md.context.add(k).String()] = true
+		md.context = append(md.context, k)
+
+		rvkey := indirect(reflect.New(rv.Type().Key()))
+		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+		if err := md.unify(v, rvval); err != nil {
+			return err
+		}
+		md.context = md.context[0 : len(md.context)-1]
+
+		rvkey.SetString(k)
+		rv.SetMapIndex(rvkey, rvval)
+	}
+	return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return badtype("slice", data)
+	}
+	if l := datav.Len(); l != rv.Len() {
+		return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		if !datav.IsValid() {
+			return nil
+		}
+		return badtype("slice", data)
+	}
+	n := datav.Len()
+	if rv.IsNil() || rv.Cap() < n {
+		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+	}
+	rv.SetLen(n)
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+	l := data.Len()
+	for i := 0; i < l; i++ {
+		err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+	if _, ok := data.(time.Time); ok {
+		rv.Set(reflect.ValueOf(data))
+		return nil
+	}
+	return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+	if s, ok := data.(string); ok {
+		rv.SetString(s)
+		return nil
+	}
+	return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(float64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			fallthrough
+		case reflect.Float64:
+			rv.SetFloat(num)
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+	return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(int64); ok {
+		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+			switch rv.Kind() {
+			case reflect.Int, reflect.Int64:
+				// No bounds checking necessary.
+			case reflect.Int8:
+				if num < math.MinInt8 || num > math.MaxInt8 {
+					return e("value %d is out of range for int8", num)
+				}
+			case reflect.Int16:
+				if num < math.MinInt16 || num > math.MaxInt16 {
+					return e("value %d is out of range for int16", num)
+				}
+			case reflect.Int32:
+				if num < math.MinInt32 || num > math.MaxInt32 {
+					return e("value %d is out of range for int32", num)
+				}
+			}
+			rv.SetInt(num)
+		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+			unum := uint64(num)
+			switch rv.Kind() {
+			case reflect.Uint, reflect.Uint64:
+				// No bounds checking necessary.
+			case reflect.Uint8:
+				if num < 0 || unum > math.MaxUint8 {
+					return e("value %d is out of range for uint8", num)
+				}
+			case reflect.Uint16:
+				if num < 0 || unum > math.MaxUint16 {
+					return e("value %d is out of range for uint16", num)
+				}
+			case reflect.Uint32:
+				if num < 0 || unum > math.MaxUint32 {
+					return e("value %d is out of range for uint32", num)
+				}
+			}
+			rv.SetUint(unum)
+		} else {
+			panic("unreachable")
+		}
+		return nil
+	}
+	return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+	if b, ok := data.(bool); ok {
+		rv.SetBool(b)
+		return nil
+	}
+	return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+	rv.Set(reflect.ValueOf(data))
+	return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+	var s string
+	switch sdata := data.(type) {
+	case TextMarshaler:
+		text, err := sdata.MarshalText()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case fmt.Stringer:
+		s = sdata.String()
+	case string:
+		s = sdata
+	case bool:
+		s = fmt.Sprintf("%v", sdata)
+	case int64:
+		s = fmt.Sprintf("%d", sdata)
+	case float64:
+		s = fmt.Sprintf("%f", sdata)
+	default:
+		return badtype("primitive (string-like)", data)
+	}
+	if err := v.UnmarshalText([]byte(s)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+	return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Ptr {
+		if v.CanSet() {
+			pv := v.Addr()
+			if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
+				return pv
+			}
+		}
+		return v
+	}
+	if v.IsNil() {
+		v.Set(reflect.New(v.Type().Elem()))
+	}
+	return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+	if rv.CanSet() {
+		return true
+	}
+	if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+		return true
+	}
+	return false
+}
+
+func e(format string, args ...interface{}) error {
+	return fmt.Errorf("toml: "+format, args...)
+}
+
+func badtype(expected string, data interface{}) error {
+	return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}

+ 18 - 0
vendor/github.com/BurntSushi/toml/decode_go116.go

@@ -0,0 +1,18 @@
+// +build go1.16
+
+package toml
+
+import (
+	"io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+	fp, err := fsys.Open(path)
+	if err != nil {
+		return MetaData{}, err
+	}
+	defer fp.Close()
+	return NewDecoder(fp).Decode(v)
+}

+ 123 - 0
vendor/github.com/BurntSushi/toml/decode_meta.go

@@ -0,0 +1,123 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not be
+// inferable via reflection. In particular, whether a key has been defined and
+// the TOML type of a key.
+type MetaData struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	keys    []Key
+	decoded map[string]bool
+	context Key // Used only during decoding.
+}
+
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use:
+//
+//	IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+	if len(key) == 0 {
+		return false
+	}
+
+	var hash map[string]interface{}
+	var ok bool
+	var hashOrVal interface{} = md.mapping
+	for _, k := range key {
+		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+			return false
+		}
+		if hashOrVal, ok = hash[k]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+	fullkey := strings.Join(key, ".")
+	if typ, ok := md.types[fullkey]; ok {
+		return typ.typeString()
+	}
+	return ""
+}
+
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string { return strings.Join(k, ".") }
+
+func (k Key) maybeQuotedAll() string {
+	var ss []string
+	for i := range k {
+		ss = append(ss, k.maybeQuoted(i))
+	}
+	return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+	if k[i] == "" {
+		return `""`
+	}
+	quote := false
+	for _, c := range k[i] {
+		if !isBareKeyChar(c) {
+			quote = true
+			break
+		}
+	}
+	if quote {
+		return `"` + quotedReplacer.Replace(k[i]) + `"`
+	}
+	return k[i]
+}
+
+func (k Key) add(piece string) Key {
+	newKey := make(Key, len(k)+1)
+	copy(newKey, k)
+	newKey[len(k)] = piece
+	return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+//
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+	return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+	undecoded := make([]Key, 0, len(md.keys))
+	for _, key := range md.keys {
+		if !md.decoded[key.String()] {
+			undecoded = append(undecoded, key)
+		}
+	}
+	return undecoded
+}

+ 33 - 0
vendor/github.com/BurntSushi/toml/deprecated.go

@@ -0,0 +1,33 @@
+package toml
+
+import (
+	"encoding"
+	"io"
+)
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextMarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextMarshaler encoding.TextMarshaler
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextUnmarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md := MetaData{decoded: make(map[string]bool)}
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// DEPRECATED!
+//
+// Use NewDecoder(reader).Decode(&v) instead.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+	return NewDecoder(r).Decode(v)
+}

+ 13 - 0
vendor/github.com/BurntSushi/toml/doc.go

@@ -0,0 +1,13 @@
+/*
+Package toml implements decoding and encoding of TOML files.
+
+This package supports TOML v1.0.0, as listed on https://toml.io
+
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
+
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
+*/
+package toml

+ 650 - 0
vendor/github.com/BurntSushi/toml/encode.go

@@ -0,0 +1,650 @@
+package toml
+
+import (
+	"bufio"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+	errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+	errNonString       = errors.New("toml: cannot encode a map with non-string key type")
+	errAnonNonStruct   = errors.New("toml: cannot encode an anonymous field that is not a struct")
+	errNoKey           = errors.New("toml: top-level values must be Go maps or structs")
+	errAnything        = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+	"\"", "\\\"",
+	"\\", "\\\\",
+	"\x00", `\u0000`,
+	"\x01", `\u0001`,
+	"\x02", `\u0002`,
+	"\x03", `\u0003`,
+	"\x04", `\u0004`,
+	"\x05", `\u0005`,
+	"\x06", `\u0006`,
+	"\x07", `\u0007`,
+	"\b", `\b`,
+	"\t", `\t`,
+	"\n", `\n`,
+	"\x0b", `\u000b`,
+	"\f", `\f`,
+	"\r", `\r`,
+	"\x0e", `\u000e`,
+	"\x0f", `\u000f`,
+	"\x10", `\u0010`,
+	"\x11", `\u0011`,
+	"\x12", `\u0012`,
+	"\x13", `\u0013`,
+	"\x14", `\u0014`,
+	"\x15", `\u0015`,
+	"\x16", `\u0016`,
+	"\x17", `\u0017`,
+	"\x18", `\u0018`,
+	"\x19", `\u0019`,
+	"\x1a", `\u001a`,
+	"\x1b", `\u001b`,
+	"\x1c", `\u001c`,
+	"\x1d", `\u001d`,
+	"\x1e", `\u001e`,
+	"\x1f", `\u001f`,
+	"\x7f", `\u007f`,
+)
+
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
+//
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
+type Encoder struct {
+	// The string to use for a single indentation level. The default is two
+	// spaces.
+	Indent string
+
+	// hasWritten is whether we have written any output to w yet.
+	hasWritten bool
+	w          *bufio.Writer
+}
+
+// NewEncoder create a new Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:      bufio.NewWriter(w),
+		Indent: "  ",
+	}
+}
+
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
+//
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
+func (enc *Encoder) Encode(v interface{}) error {
+	rv := eindirect(reflect.ValueOf(v))
+	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+		return err
+	}
+	return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if terr, ok := r.(tomlEncodeError); ok {
+				err = terr.error
+				return
+			}
+			panic(r)
+		}
+	}()
+	enc.encode(key, rv)
+	return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+	// Special case. Time needs to be in ISO8601 format.
+	// Special case. If we can marshal the type to text, then we used that.
+	// Basically, this prevents the encoder for handling these types as
+	// generic structs (or whatever the underlying type of a TextMarshaler is).
+	switch t := rv.Interface().(type) {
+	case time.Time, encoding.TextMarshaler:
+		enc.writeKeyValue(key, rv, false)
+		return
+	// TODO: #76 would make this superfluous after implemented.
+	case Primitive:
+		enc.encode(key, reflect.ValueOf(t.undecoded))
+		return
+	}
+
+	k := rv.Kind()
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64,
+		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+		enc.writeKeyValue(key, rv, false)
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+			enc.eArrayOfTables(key, rv)
+		} else {
+			enc.writeKeyValue(key, rv, false)
+		}
+	case reflect.Interface:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Map:
+		if rv.IsNil() {
+			return
+		}
+		enc.eTable(key, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Struct:
+		enc.eTable(key, rv)
+	default:
+		encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
+	}
+}
+
+// eElement encodes any value that can be an array element.
+func (enc *Encoder) eElement(rv reflect.Value) {
+	switch v := rv.Interface().(type) {
+	case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+		format := time.RFC3339Nano
+		switch v.Location() {
+		case internal.LocalDatetime:
+			format = "2006-01-02T15:04:05.999999999"
+		case internal.LocalDate:
+			format = "2006-01-02"
+		case internal.LocalTime:
+			format = "15:04:05.999999999"
+		}
+		switch v.Location() {
+		default:
+			enc.wf(v.Format(format))
+		case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+			enc.wf(v.In(time.UTC).Format(format))
+		}
+		return
+	case encoding.TextMarshaler:
+		// Use text marshaler if it's available for this value.
+		if s, err := v.MarshalText(); err != nil {
+			encPanic(err)
+		} else {
+			enc.writeQuoted(string(s))
+		}
+		return
+	}
+
+	switch rv.Kind() {
+	case reflect.String:
+		enc.writeQuoted(rv.String())
+	case reflect.Bool:
+		enc.wf(strconv.FormatBool(rv.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		enc.wf(strconv.FormatInt(rv.Int(), 10))
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		enc.wf(strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Float32:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+		}
+	case reflect.Float64:
+		f := rv.Float()
+		if math.IsNaN(f) {
+			enc.wf("nan")
+		} else if math.IsInf(f, 0) {
+			enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+		} else {
+			enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+		}
+	case reflect.Array, reflect.Slice:
+		enc.eArrayOrSliceElement(rv)
+	case reflect.Struct:
+		enc.eStruct(nil, rv, true)
+	case reflect.Map:
+		enc.eMap(nil, rv, true)
+	case reflect.Interface:
+		enc.eElement(rv.Elem())
+	default:
+		encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
+	}
+}
+
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
+func floatAddDecimal(fstr string) string {
+	if !strings.Contains(fstr, ".") {
+		return fstr + ".0"
+	}
+	return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+	enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+	length := rv.Len()
+	enc.wf("[")
+	for i := 0; i < length; i++ {
+		elem := rv.Index(i)
+		enc.eElement(elem)
+		if i != length-1 {
+			enc.wf(", ")
+		}
+	}
+	enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	for i := 0; i < rv.Len(); i++ {
+		trv := rv.Index(i)
+		if isNil(trv) {
+			continue
+		}
+		enc.newline()
+		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+		enc.eMapOrStruct(key, trv, false)
+	}
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+	if len(key) == 1 {
+		// Output an extra newline between top-level tables.
+		// (The newline isn't written if nothing else has been written though.)
+		enc.newline()
+	}
+	if len(key) > 0 {
+		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+	}
+	enc.eMapOrStruct(key, rv, false)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
+	switch rv := eindirect(rv); rv.Kind() {
+	case reflect.Map:
+		enc.eMap(key, rv, inline)
+	case reflect.Struct:
+		enc.eStruct(key, rv, inline)
+	default:
+		// Should never happen?
+		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+	}
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
+	rt := rv.Type()
+	if rt.Key().Kind() != reflect.String {
+		encPanic(errNonString)
+	}
+
+	// Sort keys so that we have deterministic output. And write keys directly
+	// underneath this key first, before writing sub-structs or sub-maps.
+	var mapKeysDirect, mapKeysSub []string
+	for _, mapKey := range rv.MapKeys() {
+		k := mapKey.String()
+		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+			mapKeysSub = append(mapKeysSub, k)
+		} else {
+			mapKeysDirect = append(mapKeysDirect, k)
+		}
+	}
+
+	var writeMapKeys = func(mapKeys []string, trailC bool) {
+		sort.Strings(mapKeys)
+		for i, mapKey := range mapKeys {
+			val := rv.MapIndex(reflect.ValueOf(mapKey))
+			if isNil(val) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{mapKey}, val, true)
+				if trailC || i != len(mapKeys)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(mapKey), val)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+	writeMapKeys(mapKeysSub, false)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
+	// Write keys for fields directly under this key first, because if we write
+	// a field that creates a new table then all keys under it will be in that
+	// table (not the one we're writing here).
+	//
+	// Fields is a [][]int: for fieldsDirect this always has one entry (the
+	// struct index). For fieldsSub it contains two entries: the parent field
+	// index from tv, and the field indexes for the fields of the sub.
+	var (
+		rt                      = rv.Type()
+		fieldsDirect, fieldsSub [][]int
+		addFields               func(rt reflect.Type, rv reflect.Value, start []int)
+	)
+	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
+				continue
+			}
+
+			frv := rv.Field(i)
+
+			// Treat anonymous struct fields with tag names as though they are
+			// not anonymous, like encoding/json does.
+			//
+			// Non-struct anonymous fields use the normal encoding logic.
+			if f.Anonymous {
+				t := f.Type
+				switch t.Kind() {
+				case reflect.Struct:
+					if getOptions(f.Tag).name == "" {
+						addFields(t, frv, append(start, f.Index...))
+						continue
+					}
+				case reflect.Ptr:
+					if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
+						if !frv.IsNil() {
+							addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
+						}
+						continue
+					}
+				}
+			}
+
+			if typeIsHash(tomlTypeOfGo(frv)) {
+				fieldsSub = append(fieldsSub, append(start, f.Index...))
+			} else {
+				fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+			}
+		}
+	}
+	addFields(rt, rv, nil)
+
+	writeFields := func(fields [][]int) {
+		for _, fieldIndex := range fields {
+			fieldType := rt.FieldByIndex(fieldIndex)
+			fieldVal := rv.FieldByIndex(fieldIndex)
+
+			if isNil(fieldVal) { /// Don't write anything for nil fields.
+				continue
+			}
+
+			opts := getOptions(fieldType.Tag)
+			if opts.skip {
+				continue
+			}
+			keyName := fieldType.Name
+			if opts.name != "" {
+				keyName = opts.name
+			}
+			if opts.omitempty && isEmpty(fieldVal) {
+				continue
+			}
+			if opts.omitzero && isZero(fieldVal) {
+				continue
+			}
+
+			if inline {
+				enc.writeKeyValue(Key{keyName}, fieldVal, true)
+				if fieldIndex[0] != len(fields)-1 {
+					enc.wf(", ")
+				}
+			} else {
+				enc.encode(key.add(keyName), fieldVal)
+			}
+		}
+	}
+
+	if inline {
+		enc.wf("{")
+	}
+	writeFields(fieldsDirect)
+	writeFields(fieldsSub)
+	if inline {
+		enc.wf("}")
+	}
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() {
+		return nil
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		return tomlBool
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64:
+		return tomlInteger
+	case reflect.Float32, reflect.Float64:
+		return tomlFloat
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlHash, tomlArrayType(rv)) {
+			return tomlArrayHash
+		}
+		return tomlArray
+	case reflect.Ptr, reflect.Interface:
+		return tomlTypeOfGo(rv.Elem())
+	case reflect.String:
+		return tomlString
+	case reflect.Map:
+		return tomlHash
+	case reflect.Struct:
+		switch rv.Interface().(type) {
+		case time.Time:
+			return tomlDatetime
+		case encoding.TextMarshaler:
+			return tomlString
+		default:
+			// Someone used a pointer receiver: we can make it work for pointer
+			// values.
+			if rv.CanAddr() {
+				_, ok := rv.Addr().Interface().(encoding.TextMarshaler)
+				if ok {
+					return tomlString
+				}
+			}
+			return tomlHash
+		}
+	default:
+		_, ok := rv.Interface().(encoding.TextMarshaler)
+		if ok {
+			return tomlString
+		}
+		encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+		panic("") // Need *some* return value
+	}
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+		return nil
+	}
+
+	/// Don't allow nil.
+	rvlen := rv.Len()
+	for i := 1; i < rvlen; i++ {
+		if tomlTypeOfGo(rv.Index(i)) == nil {
+			encPanic(errArrayNilElement)
+		}
+	}
+
+	firstType := tomlTypeOfGo(rv.Index(0))
+	if firstType == nil {
+		encPanic(errArrayNilElement)
+	}
+	return firstType
+}
+
+type tagOptions struct {
+	skip      bool // "-"
+	name      string
+	omitempty bool
+	omitzero  bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+	t := tag.Get("toml")
+	if t == "-" {
+		return tagOptions{skip: true}
+	}
+	var opts tagOptions
+	parts := strings.Split(t, ",")
+	opts.name = parts[0]
+	for _, s := range parts[1:] {
+		switch s {
+		case "omitempty":
+			opts.omitempty = true
+		case "omitzero":
+			opts.omitzero = true
+		}
+	}
+	return opts
+}
+
+func isZero(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return rv.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return rv.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return rv.Float() == 0.0
+	}
+	return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+		return rv.Len() == 0
+	case reflect.Bool:
+		return !rv.Bool()
+	}
+	return false
+}
+
+func (enc *Encoder) newline() {
+	if enc.hasWritten {
+		enc.wf("\n")
+	}
+}
+
+// Write a key/value pair:
+//
+//   key = <any value>
+//
+// If inline is true it won't add a newline at the end.
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+	enc.eElement(val)
+	if !inline {
+		enc.newline()
+	}
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+		encPanic(err)
+	}
+	enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+	return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+	panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return eindirect(v.Elem())
+	default:
+		return v
+	}
+}
+
+func isNil(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}

+ 36 - 0
vendor/github.com/BurntSushi/toml/internal/tz.go

@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+	localOffset   = func() int { _, o := time.Now().Zone(); return o }()
+	LocalDatetime = time.FixedZone("datetime-local", localOffset)
+	LocalDate     = time.FixedZone("date-local", localOffset)
+	LocalTime     = time.FixedZone("time-local", localOffset)
+)

+ 1225 - 0
vendor/github.com/BurntSushi/toml/lex.go

@@ -0,0 +1,1225 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type itemType int
+
+const (
+	itemError itemType = iota
+	itemNIL            // used in the parser to indicate no type
+	itemEOF
+	itemText
+	itemString
+	itemRawString
+	itemMultilineString
+	itemRawMultilineString
+	itemBool
+	itemInteger
+	itemFloat
+	itemDatetime
+	itemArray // the start of an array
+	itemArrayEnd
+	itemTableStart
+	itemTableEnd
+	itemArrayTableStart
+	itemArrayTableEnd
+	itemKeyStart
+	itemKeyEnd
+	itemCommentStart
+	itemInlineTableStart
+	itemInlineTableEnd
+)
+
+const (
+	eof              = 0
+	comma            = ','
+	tableStart       = '['
+	tableEnd         = ']'
+	arrayTableStart  = '['
+	arrayTableEnd    = ']'
+	tableSep         = '.'
+	keySep           = '='
+	arrayStart       = '['
+	arrayEnd         = ']'
+	commentStart     = '#'
+	stringStart      = '"'
+	stringEnd        = '"'
+	rawStringStart   = '\''
+	rawStringEnd     = '\''
+	inlineTableStart = '{'
+	inlineTableEnd   = '}'
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+	input string
+	start int
+	pos   int
+	line  int
+	state stateFn
+	items chan item
+
+	// Allow for backing up up to four runes.
+	// This is necessary because TOML contains 3-rune tokens (""" and ''').
+	prevWidths [4]int
+	nprev      int // how many of prevWidths are in use
+	// If we emit an eof, we can still back up, but it is not OK to call
+	// next again.
+	atEOF bool
+
+	// A stack of state functions used to maintain context.
+	// The idea is to reuse parts of the state machine in various places.
+	// For example, values can appear at the top level or within arbitrarily
+	// nested arrays. The last state on the stack is used after a value has
+	// been lexed. Similarly for comments.
+	stack []stateFn
+}
+
+type item struct {
+	typ  itemType
+	val  string
+	line int
+}
+
+func (lx *lexer) nextItem() item {
+	for {
+		select {
+		case item := <-lx.items:
+			return item
+		default:
+			lx.state = lx.state(lx)
+			//fmt.Printf("     STATE %-24s   current: %-10q   stack: %s\n", lx.state, lx.current(), lx.stack)
+		}
+	}
+}
+
+func lex(input string) *lexer {
+	lx := &lexer{
+		input: input,
+		state: lexTop,
+		line:  1,
+		items: make(chan item, 10),
+		stack: make([]stateFn, 0, 10),
+	}
+	return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+	lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+	if len(lx.stack) == 0 {
+		return lx.errorf("BUG in lexer: no states to pop")
+	}
+	last := lx.stack[len(lx.stack)-1]
+	lx.stack = lx.stack[0 : len(lx.stack)-1]
+	return last
+}
+
+func (lx *lexer) current() string {
+	return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+	lx.items <- item{typ, lx.current(), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+	if lx.atEOF {
+		panic("BUG in lexer: next called after EOF")
+	}
+	if lx.pos >= len(lx.input) {
+		lx.atEOF = true
+		return eof
+	}
+
+	if lx.input[lx.pos] == '\n' {
+		lx.line++
+	}
+	lx.prevWidths[3] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[0]
+	if lx.nprev < 4 {
+		lx.nprev++
+	}
+
+	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+	if r == utf8.RuneError {
+		lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
+		return utf8.RuneError
+	}
+
+	lx.prevWidths[0] = w
+	lx.pos += w
+	return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+	lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called 4 times between calls to next.
+func (lx *lexer) backup() {
+	if lx.atEOF {
+		lx.atEOF = false
+		return
+	}
+	if lx.nprev < 1 {
+		panic("BUG in lexer: backed up too far")
+	}
+	w := lx.prevWidths[0]
+	lx.prevWidths[0] = lx.prevWidths[1]
+	lx.prevWidths[1] = lx.prevWidths[2]
+	lx.prevWidths[2] = lx.prevWidths[3]
+	lx.nprev--
+	lx.pos -= w
+	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+		lx.line--
+	}
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+	if lx.next() == valid {
+		return true
+	}
+	lx.backup()
+	return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+	r := lx.next()
+	lx.backup()
+	return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+	for {
+		r := lx.next()
+		if pred(r) {
+			continue
+		}
+		lx.backup()
+		lx.ignore()
+		return
+	}
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+	lx.items <- item{
+		itemError,
+		fmt.Sprintf(format, values...),
+		lx.line,
+	}
+	return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+	r := lx.next()
+	if isWhitespace(r) || isNL(r) {
+		return lexSkip(lx, lexTop)
+	}
+	switch r {
+	case commentStart:
+		lx.push(lexTop)
+		return lexCommentStart
+	case tableStart:
+		return lexTableStart
+	case eof:
+		if lx.pos > lx.start {
+			return lx.errorf("unexpected EOF")
+		}
+		lx.emit(itemEOF)
+		return nil
+	}
+
+	// At this point, the only valid item can be a key, so we back up
+	// and let the key lexer do the rest.
+	lx.backup()
+	lx.push(lexTopEnd)
+	return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == commentStart:
+		// a comment will read to a newline for us.
+		lx.push(lexTop)
+		return lexCommentStart
+	case isWhitespace(r):
+		return lexTopEnd
+	case isNL(r):
+		lx.ignore()
+		return lexTop
+	case r == eof:
+		lx.emit(itemEOF)
+		return nil
+	}
+	return lx.errorf(
+		"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+		r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+	if lx.peek() == arrayTableStart {
+		lx.next()
+		lx.emit(itemArrayTableStart)
+		lx.push(lexArrayTableEnd)
+	} else {
+		lx.emit(itemTableStart)
+		lx.push(lexTableEnd)
+	}
+	return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+	lx.emit(itemTableEnd)
+	return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+	if r := lx.next(); r != arrayTableEnd {
+		return lx.errorf(
+			"expected end of table array name delimiter %q, but got %q instead",
+			arrayTableEnd, r)
+	}
+	lx.emit(itemArrayTableEnd)
+	return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == tableEnd || r == eof:
+		return lx.errorf("unexpected end of table name (table names cannot be empty)")
+	case r == tableSep:
+		return lx.errorf("unexpected table separator (table names cannot be empty)")
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.push(lexTableNameEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexTableNameEnd)
+		return lexBareName
+	}
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexTableNameEnd
+	case r == tableSep:
+		lx.ignore()
+		return lexTableNameStart
+	case r == tableEnd:
+		return lx.pop()
+	default:
+		return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
+	}
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+	r := lx.next()
+	if isBareKeyChar(r) {
+		return lexBareName
+	}
+	lx.backup()
+	lx.emit(itemText)
+	return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case r == stringStart:
+		lx.ignore() // ignore the '"'
+		return lexString
+	case r == rawStringStart:
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected value")
+	default:
+		return lx.errorf("expected value but found %q instead", r)
+	}
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '=': key name appears blank")
+	case r == '.':
+		return lx.errorf("unexpected '.': keys cannot start with a '.'")
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		fallthrough
+	default: // Bare key
+		lx.emit(itemKeyStart)
+		return lexKeyNameStart
+	}
+}
+
+func lexKeyNameStart(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.peek(); {
+	case r == '=' || r == eof:
+		return lx.errorf("unexpected '='")
+	case r == '.':
+		return lx.errorf("unexpected '.'")
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.push(lexKeyEnd)
+		return lexQuotedName
+	default:
+		lx.push(lexKeyEnd)
+		return lexBareName
+	}
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+	lx.skip(isWhitespace)
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexKeyEnd)
+	case r == eof:
+		return lx.errorf("unexpected EOF; expected key separator %q", keySep)
+	case r == '.':
+		lx.ignore()
+		return lexKeyNameStart
+	case r == '=':
+		lx.emit(itemKeyEnd)
+		return lexSkip(lx, lexValue)
+	default:
+		return lx.errorf("expected '.' or '=', but got %q instead", r)
+	}
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+	// We allow whitespace to precede a value, but NOT newlines.
+	// In array syntax, the array states are responsible for ignoring newlines.
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexValue)
+	case isDigit(r):
+		lx.backup() // avoid an extra state and use the same as above
+		return lexNumberOrDateStart
+	}
+	switch r {
+	case arrayStart:
+		lx.ignore()
+		lx.emit(itemArray)
+		return lexArrayValue
+	case inlineTableStart:
+		lx.ignore()
+		lx.emit(itemInlineTableStart)
+		return lexInlineTableValue
+	case stringStart:
+		if lx.accept(stringStart) {
+			if lx.accept(stringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the '"'
+		return lexString
+	case rawStringStart:
+		if lx.accept(rawStringStart) {
+			if lx.accept(rawStringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineRawString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case '.': // special error case, be kind to users
+		return lx.errorf("floats must start with a digit, not '.'")
+	case 'i', 'n':
+		if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+			lx.emit(itemFloat)
+			return lx.pop()
+		}
+	case '-', '+':
+		return lexDecimalNumberStart
+	}
+	if unicode.IsLetter(r) {
+		// Be permissive here; lexBool will give a nice error if the
+		// user wrote something like
+		//   x = foo
+		// (i.e. not 'true' or 'false' but is something else word-like.)
+		lx.backup()
+		return lexBool
+	}
+	if r == eof {
+		return lx.errorf("unexpected EOF; expected value")
+	}
+	return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValue)
+	case r == commentStart:
+		lx.push(lexArrayValue)
+		return lexCommentStart
+	case r == comma:
+		return lx.errorf("unexpected comma")
+	case r == arrayEnd:
+		// NOTE(caleb): The spec isn't clear about whether you can have
+		// a trailing comma or not, so we'll allow it.
+		return lexArrayEnd
+	}
+
+	lx.backup()
+	lx.push(lexArrayValueEnd)
+	return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValueEnd)
+	case r == commentStart:
+		lx.push(lexArrayValueEnd)
+		return lexCommentStart
+	case r == comma:
+		lx.ignore()
+		return lexArrayValue // move on to the next value
+	case r == arrayEnd:
+		return lexArrayEnd
+	}
+	return lx.errorf(
+		"expected a comma or array terminator %q, but got %s instead",
+		arrayEnd, runeOrEOF(r))
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemArrayEnd)
+	return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValue)
+	case isNL(r):
+		return lx.errorf("newlines not allowed within inline tables")
+	case r == commentStart:
+		lx.push(lexInlineTableValue)
+		return lexCommentStart
+	case r == comma:
+		return lx.errorf("unexpected comma")
+	case r == inlineTableEnd:
+		return lexInlineTableEnd
+	}
+	lx.backup()
+	lx.push(lexInlineTableValueEnd)
+	return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexSkip(lx, lexInlineTableValueEnd)
+	case isNL(r):
+		return lx.errorf("newlines not allowed within inline tables")
+	case r == commentStart:
+		lx.push(lexInlineTableValueEnd)
+		return lexCommentStart
+	case r == comma:
+		lx.ignore()
+		lx.skip(isWhitespace)
+		if lx.peek() == '}' {
+			return lx.errorf("trailing comma not allowed in inline tables")
+		}
+		return lexInlineTableValue
+	case r == inlineTableEnd:
+		return lexInlineTableEnd
+	default:
+		return lx.errorf(
+			"expected a comma or an inline table terminator %q, but got %s instead",
+			inlineTableEnd, runeOrEOF(r))
+	}
+}
+
+func runeOrEOF(r rune) string {
+	if r == eof {
+		return "end of file"
+	}
+	return "'" + string(r) + "'"
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemInlineTableEnd)
+	return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected '"'`)
+	case isControl(r) || r == '\r':
+		return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+	case isNL(r):
+		return lx.errorf("strings cannot contain newlines")
+	case r == '\\':
+		lx.push(lexString)
+		return lexStringEscape
+	case r == stringEnd:
+		lx.backup()
+		lx.emit(itemString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case eof:
+		return lx.errorf(`unexpected EOF; expected '"""'`)
+	case '\r':
+		if lx.peek() != '\n' {
+			return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+		}
+		return lexMultilineString
+	case '\\':
+		return lexMultilineStringEscape
+	case stringEnd:
+		/// Found " → try to read two more "".
+		if lx.accept(stringEnd) {
+			if lx.accept(stringEnd) {
+				/// Peek ahead: the string can contain " and "", including at the
+				/// end: """str"""""
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == stringEnd {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), `"""""`) {
+						return lx.errorf(`unexpected '""""""'`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineString
+				}
+
+				lx.backup() /// backup: don't include the """ in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+
+	if isControl(r) {
+		return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+	}
+	return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == eof:
+		return lx.errorf(`unexpected EOF; expected "'"`)
+	case isControl(r) || r == '\r':
+		return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+	case isNL(r):
+		return lx.errorf("strings cannot contain newlines")
+	case r == rawStringEnd:
+		lx.backup()
+		lx.emit(itemRawString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case eof:
+		return lx.errorf(`unexpected EOF; expected "'''"`)
+	case '\r':
+		if lx.peek() != '\n' {
+			return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+		}
+		return lexMultilineRawString
+	case rawStringEnd:
+		/// Found ' → try to read two more ''.
+		if lx.accept(rawStringEnd) {
+			if lx.accept(rawStringEnd) {
+				/// Peek ahead: the string can contain ' and '', including at the
+				/// end: '''str'''''
+				/// 6 or more at the end, however, is an error.
+				if lx.peek() == rawStringEnd {
+					/// Check if we already lexed 5 's; if so we have 6 now, and
+					/// that's just too many man!
+					if strings.HasSuffix(lx.current(), "'''''") {
+						return lx.errorf(`unexpected "''''''"`)
+					}
+					lx.backup()
+					lx.backup()
+					return lexMultilineRawString
+				}
+
+				lx.backup() /// backup: don't include the ''' in the item.
+				lx.backup()
+				lx.backup()
+				lx.emit(itemRawMultilineString)
+				lx.next() /// Read over ''' again and discard it.
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+
+	if isControl(r) {
+		return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+	}
+	return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+	// Handle the special case first:
+	if isNL(lx.next()) {
+		return lexMultilineString
+	}
+	lx.backup()
+	lx.push(lexMultilineString)
+	return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case 'b':
+		fallthrough
+	case 't':
+		fallthrough
+	case 'n':
+		fallthrough
+	case 'f':
+		fallthrough
+	case 'r':
+		fallthrough
+	case '"':
+		fallthrough
+	case ' ', '\t':
+		// Inside """ .. """ strings you can use \ to escape newlines, and any
+		// amount of whitespace can be between the \ and \n.
+		fallthrough
+	case '\\':
+		return lx.pop()
+	case 'u':
+		return lexShortUnicodeEscape
+	case 'U':
+		return lexLongUnicodeEscape
+	}
+	return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
+		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 4; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected four hexadecimal digits after '\u', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 8; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf(
+				`expected eight hexadecimal digits after '\U', but got %q instead`,
+				lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case '0':
+		return lexBaseNumberOrDate
+	}
+
+	if !isDigit(r) {
+		// The only way to reach this state is if the value starts
+		// with a digit, so specifically treat anything else as an
+		// error.
+		return lx.errorf("expected a digit but got %q", r)
+	}
+
+	return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '-', ':':
+		return lexDatetime
+	case '_':
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDatetime
+	}
+	switch r {
+	case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
+		return lexDatetime
+	}
+
+	lx.backup()
+	lx.emitTrim(itemDatetime)
+	return lx.pop()
+}
+
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isHexadecimal(r) {
+		return lexHexInteger
+	}
+	switch r {
+	case '_':
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isOctal(r) {
+		return lexOctalInteger
+	}
+	switch r {
+	case '_':
+		return lexOctalInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+	r := lx.next()
+	if isBinary(r) {
+		return lexBinaryInteger
+	}
+	switch r {
+	case '_':
+		return lexBinaryInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+	switch r {
+	case '.', 'e', 'E':
+		return lexFloat
+	case '_':
+		return lexDecimalNumber
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+	r := lx.next()
+
+	// Special error cases to give users better error messages
+	switch r {
+	case 'i':
+		if !lx.accept('n') || !lx.accept('f') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case 'n':
+		if !lx.accept('a') || !lx.accept('n') {
+			return lx.errorf("invalid float: '%s'", lx.current())
+		}
+		lx.emit(itemFloat)
+		return lx.pop()
+	case '0':
+		p := lx.peek()
+		switch p {
+		case 'b', 'o', 'x':
+			return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+		}
+	case '.':
+		return lx.errorf("floats must start with a digit, not '.'")
+	}
+
+	if isDigit(r) {
+		return lexDecimalNumber
+	}
+
+	return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	// Note: All datetimes start with at least two digits, so we don't
+	// handle date characters (':', '-', etc.) here.
+	if isDigit(r) {
+		return lexNumberOrDate
+	}
+	switch r {
+	case '_':
+		// Can only be decimal, because there can't be an underscore
+		// between the '0' and the base designator, and dates can't
+		// contain underscores.
+		return lexDecimalNumber
+	case '.', 'e', 'E':
+		return lexFloat
+	case 'b':
+		r = lx.peek()
+		if !isBinary(r) {
+			lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+		}
+		return lexBinaryInteger
+	case 'o':
+		r = lx.peek()
+		if !isOctal(r) {
+			lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+		}
+		return lexOctalInteger
+	case 'x':
+		r = lx.peek()
+		if !isHexadecimal(r) {
+			lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+		}
+		return lexHexInteger
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexFloat
+	}
+	switch r {
+	case '_', '.', '-', '+', 'e', 'E':
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemFloat)
+	return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+	var rs []rune
+	for {
+		r := lx.next()
+		if !unicode.IsLetter(r) {
+			lx.backup()
+			break
+		}
+		rs = append(rs, r)
+	}
+	s := string(rs)
+	switch s {
+	case "true", "false":
+		lx.emit(itemBool)
+		return lx.pop()
+	}
+	return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemCommentStart)
+	return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isNL(r) || r == eof:
+		lx.backup()
+		lx.emit(itemText)
+		return lx.pop()
+	case isControl(r):
+		return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
+	default:
+		return lexComment
+	}
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+	lx.ignore()
+	return nextState
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+	return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+// Control characters except \n, \t
+func isControl(r rune) bool {
+	switch r {
+	case '\t', '\r', '\n':
+		return false
+	default:
+		return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+	}
+}
+
+func isDigit(r rune) bool {
+	return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+	return (r >= '0' && r <= '9') ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
+
+func isOctal(r rune) bool {
+	return r >= '0' && r <= '7'
+}
+
+func isBinary(r rune) bool {
+	return r == '0' || r == '1'
+}
+
+func isBareKeyChar(r rune) bool {
+	return (r >= 'A' && r <= 'Z') ||
+		(r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') ||
+		r == '_' ||
+		r == '-'
+}
+
+func (s stateFn) String() string {
+	name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+	if i := strings.LastIndexByte(name, '.'); i > -1 {
+		name = name[i+1:]
+	}
+	if s == nil {
+		name = "<nil>"
+	}
+	return name + "()"
+}
+
+func (itype itemType) String() string {
+	switch itype {
+	case itemError:
+		return "Error"
+	case itemNIL:
+		return "NIL"
+	case itemEOF:
+		return "EOF"
+	case itemText:
+		return "Text"
+	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+		return "String"
+	case itemBool:
+		return "Bool"
+	case itemInteger:
+		return "Integer"
+	case itemFloat:
+		return "Float"
+	case itemDatetime:
+		return "DateTime"
+	case itemTableStart:
+		return "TableStart"
+	case itemTableEnd:
+		return "TableEnd"
+	case itemKeyStart:
+		return "KeyStart"
+	case itemKeyEnd:
+		return "KeyEnd"
+	case itemArray:
+		return "Array"
+	case itemArrayEnd:
+		return "ArrayEnd"
+	case itemCommentStart:
+		return "CommentStart"
+	case itemInlineTableStart:
+		return "InlineTableStart"
+	case itemInlineTableEnd:
+		return "InlineTableEnd"
+	}
+	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}

+ 739 - 0
vendor/github.com/BurntSushi/toml/parse.go

@@ -0,0 +1,739 @@
+package toml
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/BurntSushi/toml/internal"
+)
+
+type parser struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	lx      *lexer
+
+	ordered    []Key           // List of keys in the order that they appear in the TOML data.
+	context    Key             // Full key for the current hash in scope.
+	currentKey string          // Base key name for everything except hashes.
+	approxLine int             // Rough approximation of line number
+	implicits  map[string]bool // Record implied keys (e.g. 'key.group.names').
+}
+
+// ParseError is used when a file can't be parsed: for example invalid integer
+// literals, duplicate keys, etc.
+type ParseError struct {
+	Message string
+	Line    int
+	LastKey string
+}
+
+func (pe ParseError) Error() string {
+	return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+		pe.Line, pe.LastKey, pe.Message)
+}
+
+func parse(data string) (p *parser, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			if err, ok = r.(ParseError); ok {
+				return
+			}
+			panic(r)
+		}
+	}()
+
+	// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+	// which mangles stuff.
+	if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+		data = data[2:]
+	}
+
+	// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+	// file (second byte in surrogate pair being NULL). Again, do this here to
+	// avoid having to deal with UTF-8/16 stuff in the lexer.
+	ex := 6
+	if len(data) < 6 {
+		ex = len(data)
+	}
+	if strings.ContainsRune(data[:ex], 0) {
+		return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
+	}
+
+	p = &parser{
+		mapping:   make(map[string]interface{}),
+		types:     make(map[string]tomlType),
+		lx:        lex(data),
+		ordered:   make([]Key, 0),
+		implicits: make(map[string]bool),
+	}
+	for {
+		item := p.next()
+		if item.typ == itemEOF {
+			break
+		}
+		p.topLevel(item)
+	}
+
+	return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+	msg := fmt.Sprintf(format, v...)
+	panic(ParseError{
+		Message: msg,
+		Line:    p.approxLine,
+		LastKey: p.current(),
+	})
+}
+
+func (p *parser) next() item {
+	it := p.lx.nextItem()
+	//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
+	if it.typ == itemError {
+		p.panicf("%s", it.val)
+	}
+	return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+	it := p.next()
+	p.assertEqual(typ, it.typ)
+	return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+	if expected != got {
+		p.bug("Expected '%s' but got '%s'.", expected, got)
+	}
+}
+
+func (p *parser) topLevel(item item) {
+	switch item.typ {
+	case itemCommentStart: // # ..
+		p.approxLine = item.line
+		p.expect(itemText)
+	case itemTableStart: // [ .. ]
+		name := p.next()
+		p.approxLine = name.line
+
+		var key Key
+		for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemTableEnd, name.typ)
+
+		p.addContext(key, false)
+		p.setType("", tomlHash)
+		p.ordered = append(p.ordered, key)
+	case itemArrayTableStart: // [[ .. ]]
+		name := p.next()
+		p.approxLine = name.line
+
+		var key Key
+		for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+			key = append(key, p.keyString(name))
+		}
+		p.assertEqual(itemArrayTableEnd, name.typ)
+
+		p.addContext(key, true)
+		p.setType("", tomlArrayHash)
+		p.ordered = append(p.ordered, key)
+	case itemKeyStart: // key = ..
+		outerContext := p.context
+		/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+		k := p.next()
+		p.approxLine = k.line
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+		/// Remove the context we added (preserving any context from [tbl] lines).
+		p.context = outerContext
+		p.currentKey = ""
+	default:
+		p.bug("Unexpected type at top level: %s", item.typ)
+	}
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+	switch it.typ {
+	case itemText:
+		return it.val
+	case itemString, itemMultilineString,
+		itemRawString, itemRawMultilineString:
+		s, _ := p.value(it, false)
+		return s.(string)
+	default:
+		p.bug("Unexpected key type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+var datetimeRepl = strings.NewReplacer(
+	"z", "Z",
+	"t", "T",
+	" ", "T")
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+	switch it.typ {
+	case itemString:
+		return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+	case itemMultilineString:
+		return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+	case itemRawString:
+		return it.val, p.typeOfPrimitive(it)
+	case itemRawMultilineString:
+		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+	case itemInteger:
+		return p.valueInteger(it)
+	case itemFloat:
+		return p.valueFloat(it)
+	case itemBool:
+		switch it.val {
+		case "true":
+			return true, p.typeOfPrimitive(it)
+		case "false":
+			return false, p.typeOfPrimitive(it)
+		default:
+			p.bug("Expected boolean value, but got '%s'.", it.val)
+		}
+	case itemDatetime:
+		return p.valueDatetime(it)
+	case itemArray:
+		return p.valueArray(it)
+	case itemInlineTableStart:
+		return p.valueInlineTable(it, parentIsArray)
+	default:
+		p.bug("Unexpected value type: %s", it.typ)
+	}
+	panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+	if !numUnderscoresOK(it.val) {
+		p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
+	}
+	if numHasLeadingZero(it.val) {
+		p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
+	}
+
+	num, err := strconv.ParseInt(it.val, 0, 64)
+	if err != nil {
+		// Distinguish integer values. Normally, it'd be a bug if the lexer
+		// provides an invalid integer, but it's possible that the number is
+		// out of range of valid values (which the lexer cannot determine).
+		// So mark the former as a bug but the latter as a legitimate user
+		// error.
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+		} else {
+			p.bug("Expected integer value, but got '%s'.", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+	parts := strings.FieldsFunc(it.val, func(r rune) bool {
+		switch r {
+		case '.', 'e', 'E':
+			return true
+		}
+		return false
+	})
+	for _, part := range parts {
+		if !numUnderscoresOK(part) {
+			p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
+		}
+	}
+	if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+		p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
+	}
+	if !numPeriodsOK(it.val) {
+		// As a special case, numbers like '123.' or '1.e2',
+		// which are valid as far as Go/strconv are concerned,
+		// must be rejected because TOML says that a fractional
+		// part consists of '.' followed by 1+ digits.
+		p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
+	}
+	val := strings.Replace(it.val, "_", "", -1)
+	if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+		val = "nan"
+	}
+	num, err := strconv.ParseFloat(val, 64)
+	if err != nil {
+		if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+			p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+		} else {
+			p.panicf("Invalid float value: %q", it.val)
+		}
+	}
+	return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+	fmt  string
+	zone *time.Location
+}{
+	{time.RFC3339Nano, time.Local},
+	{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+	{"2006-01-02", internal.LocalDate},
+	{"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+	it.val = datetimeRepl.Replace(it.val)
+	var (
+		t   time.Time
+		ok  bool
+		err error
+	)
+	for _, dt := range dtTypes {
+		t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+		if err == nil {
+			ok = true
+			break
+		}
+	}
+	if !ok {
+		p.panicf("Invalid TOML Datetime: %q.", it.val)
+	}
+	return t, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+	p.setType(p.currentKey, tomlArray)
+
+	// p.setType(p.currentKey, typ)
+	var (
+		array []interface{}
+		types []tomlType
+	)
+	for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		val, typ := p.value(it, true)
+		array = append(array, val)
+		types = append(types, typ)
+	}
+	return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+	var (
+		hash         = make(map[string]interface{})
+		outerContext = p.context
+		outerKey     = p.currentKey
+	)
+
+	p.context = append(p.context, p.currentKey)
+	prevContext := p.context
+	p.currentKey = ""
+
+	p.addImplicit(p.context)
+	p.addContext(p.context, parentIsArray)
+
+	/// Loop over all table key/value pairs.
+	for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+		if it.typ == itemCommentStart {
+			p.expect(itemText)
+			continue
+		}
+
+		/// Read all key parts.
+		k := p.next()
+		p.approxLine = k.line
+		var key Key
+		for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+			key = append(key, p.keyString(k))
+		}
+		p.assertEqual(itemKeyEnd, k.typ)
+
+		/// The current key is the last part.
+		p.currentKey = key[len(key)-1]
+
+		/// All the other parts (if any) are the context; need to set each part
+		/// as implicit.
+		context := key[:len(key)-1]
+		for i := range context {
+			p.addImplicitContext(append(p.context, context[i:i+1]...))
+		}
+
+		/// Set the value.
+		val, typ := p.value(p.next(), false)
+		p.set(p.currentKey, val, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+		hash[p.currentKey] = val
+
+		/// Restore context.
+		p.context = prevContext
+	}
+	p.context = outerContext
+	p.currentKey = outerKey
+	return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+	if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
+		return true
+	}
+	if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+		return true
+	}
+	return false
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+	switch s {
+	case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+		return true
+	}
+	accept := false
+	for _, r := range s {
+		if r == '_' {
+			if !accept {
+				return false
+			}
+		}
+
+		// isHexadecimal is a superset of all the permissable characters
+		// surrounding an underscore.
+		accept = isHexadecimal(r)
+	}
+	return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+	period := false
+	for _, r := range s {
+		if period && !isDigit(r) {
+			return false
+		}
+		period = r == '.'
+	}
+	return !period
+}
+
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) addContext(key Key, array bool) {
+	var ok bool
+
+	// Always start at the top level and drill down for our context.
+	hashContext := p.mapping
+	keyContext := make(Key, 0)
+
+	// We only need implicit hashes for key[0:-1]
+	for _, k := range key[0 : len(key)-1] {
+		_, ok = hashContext[k]
+		keyContext = append(keyContext, k)
+
+		// No key? Make an implicit hash and move on.
+		if !ok {
+			p.addImplicit(keyContext)
+			hashContext[k] = make(map[string]interface{})
+		}
+
+		// If the hash context is actually an array of tables, then set
+		// the hash context to the last element in that array.
+		//
+		// Otherwise, it better be a table, since this MUST be a key group (by
+		// virtue of it not being the last element in a key).
+		switch t := hashContext[k].(type) {
+		case []map[string]interface{}:
+			hashContext = t[len(t)-1]
+		case map[string]interface{}:
+			hashContext = t
+		default:
+			p.panicf("Key '%s' was already created as a hash.", keyContext)
+		}
+	}
+
+	p.context = keyContext
+	if array {
+		// If this is the first element for this array, then allocate a new
+		// list of tables for it.
+		k := key[len(key)-1]
+		if _, ok := hashContext[k]; !ok {
+			hashContext[k] = make([]map[string]interface{}, 0, 4)
+		}
+
+		// Add a new table. But make sure the key hasn't already been used
+		// for something else.
+		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+			hashContext[k] = append(hash, make(map[string]interface{}))
+		} else {
+			p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
+		}
+	} else {
+		p.setValue(key[len(key)-1], make(map[string]interface{}))
+	}
+	p.context = append(p.context, key[len(key)-1])
+}
+
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+	p.setValue(p.currentKey, val)
+	p.setType(p.currentKey, typ)
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+	var (
+		tmpHash    interface{}
+		ok         bool
+		hash       = p.mapping
+		keyContext Key
+	)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+		if tmpHash, ok = hash[k]; !ok {
+			p.bug("Context for key '%s' has not been established.", keyContext)
+		}
+		switch t := tmpHash.(type) {
+		case []map[string]interface{}:
+			// The context is a table of hashes. Pick the most recent table
+			// defined as the current hash.
+			hash = t[len(t)-1]
+		case map[string]interface{}:
+			hash = t
+		default:
+			p.panicf("Key '%s' has already been defined.", keyContext)
+		}
+	}
+	keyContext = append(keyContext, key)
+
+	if _, ok := hash[key]; ok {
+		// Normally redefining keys isn't allowed, but the key could have been
+		// defined implicitly and it's allowed to be redefined concretely. (See
+		// the `valid/implicit-and-explicit-after.toml` in toml-test)
+		//
+		// But we have to make sure to stop marking it as an implicit. (So that
+		// another redefinition provokes an error.)
+		//
+		// Note that since it has already been defined (as a hash), we don't
+		// want to overwrite it. So our business is done.
+		if p.isArray(keyContext) {
+			p.removeImplicit(keyContext)
+			hash[key] = value
+			return
+		}
+		if p.isImplicit(keyContext) {
+			p.removeImplicit(keyContext)
+			return
+		}
+
+		// Otherwise, we have a concrete key trying to override a previous
+		// key, which is *always* wrong.
+		p.panicf("Key '%s' has already been defined.", keyContext)
+	}
+
+	hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+	keyContext := make(Key, 0, len(p.context)+1)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+	}
+	if len(key) > 0 { // allow type setting for hashes
+		keyContext = append(keyContext, key)
+	}
+	p.types[keyContext.String()] = typ
+}
+
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key)     { p.implicits[key.String()] = true }
+func (p *parser) removeImplicit(key Key)  { p.implicits[key.String()] = false }
+func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
+func (p *parser) isArray(key Key) bool    { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+	p.addImplicit(key)
+	p.addContext(key, false)
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+	if len(p.currentKey) == 0 {
+		return p.context.String()
+	}
+	if len(p.context) == 0 {
+		return p.currentKey
+	}
+	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+	if len(s) > 0 && s[0] == '\n' {
+		return s[1:]
+	}
+	if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+		return s[2:]
+	}
+	return s
+}
+
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func stripEscapedNewlines(s string) string {
+	split := strings.Split(s, "\n")
+	if len(split) < 1 {
+		return s
+	}
+
+	escNL := false // Keep track of the last non-blank line was escaped.
+	for i, line := range split {
+		line = strings.TrimRight(line, " \t\r")
+
+		if len(line) == 0 || line[len(line)-1] != '\\' {
+			split[i] = strings.TrimRight(split[i], "\r")
+			if !escNL && i != len(split)-1 {
+				split[i] += "\n"
+			}
+			continue
+		}
+
+		escBS := true
+		for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+			escBS = !escBS
+		}
+		if escNL {
+			line = strings.TrimLeft(line, " \t\r")
+		}
+		escNL = !escBS
+
+		if escBS {
+			split[i] += "\n"
+			continue
+		}
+
+		split[i] = line[:len(line)-1] // Remove \
+		if len(split)-1 > i {
+			split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
+		}
+	}
+	return strings.Join(split, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+	var replaced []rune
+	s := []byte(str)
+	r := 0
+	for r < len(s) {
+		if s[r] != '\\' {
+			c, size := utf8.DecodeRune(s[r:])
+			r += size
+			replaced = append(replaced, c)
+			continue
+		}
+		r += 1
+		if r >= len(s) {
+			p.bug("Escape sequence at end of string.")
+			return ""
+		}
+		switch s[r] {
+		default:
+			p.bug("Expected valid escape code after \\, but got %q.", s[r])
+			return ""
+		case ' ', '\t':
+			p.panicf("invalid escape: '\\%c'", s[r])
+			return ""
+		case 'b':
+			replaced = append(replaced, rune(0x0008))
+			r += 1
+		case 't':
+			replaced = append(replaced, rune(0x0009))
+			r += 1
+		case 'n':
+			replaced = append(replaced, rune(0x000A))
+			r += 1
+		case 'f':
+			replaced = append(replaced, rune(0x000C))
+			r += 1
+		case 'r':
+			replaced = append(replaced, rune(0x000D))
+			r += 1
+		case '"':
+			replaced = append(replaced, rune(0x0022))
+			r += 1
+		case '\\':
+			replaced = append(replaced, rune(0x005C))
+			r += 1
+		case 'u':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+			replaced = append(replaced, escaped)
+			r += 5
+		case 'U':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+			replaced = append(replaced, escaped)
+			r += 9
+		}
+	}
+	return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+	s := string(bs)
+	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+	if err != nil {
+		p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+			"lexer claims it's OK: %s", s, err)
+	}
+	if !utf8.ValidRune(rune(hex)) {
+		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+	}
+	return rune(hex)
+}

+ 70 - 0
vendor/github.com/BurntSushi/toml/type_check.go

@@ -0,0 +1,70 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+	typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+	if t1 == nil || t2 == nil {
+		return false
+	}
+	return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+	return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+	return btype.typeString()
+}
+
+var (
+	tomlInteger   tomlBaseType = "Integer"
+	tomlFloat     tomlBaseType = "Float"
+	tomlDatetime  tomlBaseType = "Datetime"
+	tomlString    tomlBaseType = "String"
+	tomlBool      tomlBaseType = "Bool"
+	tomlArray     tomlBaseType = "Array"
+	tomlHash      tomlBaseType = "Hash"
+	tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+	switch lexItem.typ {
+	case itemInteger:
+		return tomlInteger
+	case itemFloat:
+		return tomlFloat
+	case itemDatetime:
+		return tomlDatetime
+	case itemString:
+		return tomlString
+	case itemMultilineString:
+		return tomlString
+	case itemRawString:
+		return tomlString
+	case itemRawMultilineString:
+		return tomlString
+	case itemBool:
+		return tomlBool
+	}
+	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+	panic("unreachable")
+}

+ 242 - 0
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+	name  string       // the name of the field (`toml` tag included)
+	tag   bool         // whether field has a `toml` tag
+	index []int        // represents the depth of an anonymous field
+	typ   reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				opts := getOptions(sf.Tag)
+				if opts.skip {
+					continue
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := opts.name != ""
+					name := opts.name
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, field{name, tagged, index, ft})
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					f := field{name: ft.Name(), index: index, typ: ft}
+					next = append(next, f)
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with TOML tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 1 - 1
vendor/golang.org/x/lint/LICENSE → vendor/golang.org/x/exp/typeparams/LICENSE

@@ -1,4 +1,4 @@
-Copyright (c) 2013 The Go Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
 modification, are permitted provided that the following conditions are

+ 186 - 0
vendor/golang.org/x/exp/typeparams/common.go

@@ -0,0 +1,186 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that interact
+// with generic Go code, as introduced with Go 1.18.
+//
+// THIS PACKAGE IS CURRENTLY EXPERIMENTAL AND MAY CHANGE. While the API is
+// being tested, we may find the need for improvement. This caveat will be
+// removed shortly.
+//
+// Many of the types and functions in this package are proxies for the new APIs
+// introduced in the standard library with Go 1.18. For example, the
+// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
+// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
+// versions older than 1.18 these helpers are implemented as stubs, allowing
+// users of this package to write code that handles generic constructs inline,
+// even if the Go version being used to compile does not support generics.
+//
+// Additionally, this package contains common utilities for working with the
+// new generic constructs, to supplement the standard library APIs. Notably,
+// the NormalTerms API computes a minimal representation of the structural
+// restrictions on a type parameter. In the future, these supplemental APIs may
+// be available in the standard library..
+package typeparams
+
+import (
+	"go/ast"
+	"go/token"
+	"go/types"
+)
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+func Enabled() bool {
+	return enabled
+}
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+	switch e := n.(type) {
+	case *ast.IndexExpr:
+		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+	case *IndexListExpr:
+		return e.X, e.Lbrack, e.Indices, e.Rbrack
+	}
+	return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+	switch len(indices) {
+	case 0:
+		panic("empty indices")
+	case 1:
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  indices[0],
+			Rbrack: rbrack,
+		}
+	default:
+		return &IndexListExpr{
+			X:       x,
+			Lbrack:  lbrack,
+			Indices: indices,
+			Rbrack:  rbrack,
+		}
+	}
+}
+
+// IsTypeParam reports whether t is a type parameter.
+func IsTypeParam(t types.Type) bool {
+	_, ok := t.(*TypeParam)
+	return ok
+}
+
+// OriginMethod returns the origin method associated with the method fn. For
+// methods on a non-generic receiver base type, this is just fn. However, for
+// methods with a generic receiver, OriginMethod returns the corresponding
+// method in the method set of the origin type.
+//
+// As a special case, if fn is not a method (has no receiver), OriginMethod
+// returns fn.
+func OriginMethod(fn *types.Func) *types.Func {
+	recv := fn.Type().(*types.Signature).Recv()
+	if recv == nil {
+		return fn
+	}
+	base := recv.Type()
+	p, isPtr := base.(*types.Pointer)
+	if isPtr {
+		base = p.Elem()
+	}
+	named, isNamed := base.(*types.Named)
+	if !isNamed {
+		// Receiver is a *types.Interface.
+		return fn
+	}
+	if ForNamed(named).Len() == 0 {
+		// Receiver base has no type parameters, so we can avoid the lookup below.
+		return fn
+	}
+	orig := NamedTypeOrigin(named)
+	gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
+	return gfn.(*types.Func)
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+//  type Interface[T any] interface {
+//  	Accept(T)
+//  }
+//
+//  type Container[T any] struct {
+//  	Element T
+//  }
+//
+//  func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *Context, V, T types.Type) bool {
+	// If V and T are not both named, or do not have matching non-empty type
+	// parameter lists, fall back on types.AssignableTo.
+
+	VN, Vnamed := V.(*types.Named)
+	TN, Tnamed := T.(*types.Named)
+	if !Vnamed || !Tnamed {
+		return types.AssignableTo(V, T)
+	}
+
+	vtparams := ForNamed(VN)
+	ttparams := ForNamed(TN)
+	if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
+		return types.AssignableTo(V, T)
+	}
+
+	// V and T have the same (non-zero) number of type params. Instantiate both
+	// with the type parameters of V. This must always succeed for V, and will
+	// succeed for T if and only if the type set of each type parameter of V is a
+	// subset of the type set of the corresponding type parameter of T, meaning
+	// that every instantiation of V corresponds to a valid instantiation of T.
+
+	// Minor optimization: ensure we share a context across the two
+	// instantiations below.
+	if ctxt == nil {
+		ctxt = NewContext()
+	}
+
+	var targs []types.Type
+	for i := 0; i < vtparams.Len(); i++ {
+		targs = append(targs, vtparams.At(i))
+	}
+
+	vinst, err := Instantiate(ctxt, V, targs, true)
+	if err != nil {
+		panic("type parameters should satisfy their own constraints")
+	}
+
+	tinst, err := Instantiate(ctxt, T, targs, true)
+	if err != nil {
+		return false
+	}
+
+	return types.AssignableTo(vinst, tinst)
+}

+ 200 - 0
vendor/golang.org/x/exp/typeparams/normalize.go

@@ -0,0 +1,200 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"errors"
+	"fmt"
+	"go/types"
+	"os"
+	"strings"
+)
+
+const debug = false
+
+// ErrEmptyTypeSet is returned if a type set computation results in a type set
+// with no types.
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//  type A interface{ ~string|~[]byte }
+//
+//  type B interface{ int|string }
+//
+//  type C interface { ~string|~int }
+//
+//  type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(typ types.Type) ([]*Term, error) {
+	if tparam, ok := typ.(*TypeParam); ok {
+		constraint := tparam.Constraint()
+		if constraint == nil {
+			return nil, fmt.Errorf("%s has nil constraint", tparam)
+		}
+		iface, _ := constraint.Underlying().(*types.Interface)
+		if iface == nil {
+			return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+		}
+		typ = iface
+	}
+	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+	if err != nil {
+		return nil, err
+	}
+	if tset.terms.isEmpty() {
+		return nil, ErrEmptyTypeSet
+	}
+	if tset.terms.isAll() {
+		return nil, nil
+	}
+	var terms []*Term
+	for _, term := range tset.terms {
+		terms = append(terms, NewTerm(term.tilde, term.typ))
+	}
+	return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+	complete bool
+	terms    termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+	if t == nil {
+		panic("nil type")
+	}
+
+	if debug {
+		indentf(depth, "%s", t.String())
+		defer func() {
+			if err != nil {
+				indentf(depth, "=> %s", err)
+			} else {
+				indentf(depth, "=> %s", res.terms.String())
+			}
+		}()
+	}
+
+	const maxTermCount = 100
+	if tset, ok := seen[t]; ok {
+		if !tset.complete {
+			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+		}
+		return tset, nil
+	}
+
+	// Mark the current type as seen to avoid infinite recursion.
+	tset := new(termSet)
+	defer func() {
+		tset.complete = true
+	}()
+	seen[t] = tset
+
+	switch u := t.Underlying().(type) {
+	case *types.Interface:
+		// The term set of an interface is the intersection of the term sets of its
+		// embedded types.
+		tset.terms = allTermlist
+		for i := 0; i < u.NumEmbeddeds(); i++ {
+			embedded := u.EmbeddedType(i)
+			if _, ok := embedded.Underlying().(*TypeParam); ok {
+				return nil, fmt.Errorf("invalid embedded type %T", embedded)
+			}
+			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+			if err != nil {
+				return nil, err
+			}
+			tset.terms = tset.terms.intersect(tset2.terms)
+		}
+	case *Union:
+		// The term set of a union is the union of term sets of its terms.
+		tset.terms = nil
+		for i := 0; i < u.Len(); i++ {
+			t := u.Term(i)
+			var terms termlist
+			switch t.Type().Underlying().(type) {
+			case *types.Interface:
+				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+				if err != nil {
+					return nil, err
+				}
+				terms = tset2.terms
+			case *TypeParam, *Union:
+				// A stand-alone type parameter or union is not permitted as union
+				// term.
+				return nil, fmt.Errorf("invalid union term %T", t)
+			default:
+				if t.Type() == types.Typ[types.Invalid] {
+					continue
+				}
+				terms = termlist{{t.Tilde(), t.Type()}}
+			}
+			tset.terms = tset.terms.union(terms)
+			if len(tset.terms) > maxTermCount {
+				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+			}
+		}
+	case *TypeParam:
+		panic("unreachable")
+	default:
+		// For all other types, the term set is just a single non-tilde term
+		// holding the type itself.
+		if u != types.Typ[types.Invalid] {
+			tset.terms = termlist{{false, t}}
+		}
+	}
+	return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+	return t.Underlying()
+}

+ 172 - 0
vendor/golang.org/x/exp/typeparams/termlist.go

@@ -0,0 +1,172 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+	"bytes"
+	"go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+	if len(xl) == 0 {
+		return "∅"
+	}
+	var buf bytes.Buffer
+	for i, x := range xl {
+		if i > 0 {
+			buf.WriteString(" ∪ ")
+		}
+		buf.WriteString(x.String())
+	}
+	return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+	// If there's a non-nil term, the entire list is not empty.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil {
+			return false
+		}
+	}
+	return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+	// If there's a 𝓤 term, the entire list is 𝓤.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil && x.typ == nil {
+			return true
+		}
+	}
+	return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	used := make([]bool, len(xl))
+	var rl termlist
+	for i, xi := range xl {
+		if xi == nil || used[i] {
+			continue
+		}
+		for j := i + 1; j < len(xl); j++ {
+			xj := xl[j]
+			if xj == nil || used[j] {
+				continue
+			}
+			if u1, u2 := xi.union(xj); u2 == nil {
+				// If we encounter a 𝓤 term, the entire list is 𝓤.
+				// Exit early.
+				// (Note that this is not just an optimization;
+				// if we continue, we may end up with a 𝓤 term
+				// and other terms and the result would not be
+				// in normal form.)
+				if u1.typ == nil {
+					return allTermlist
+				}
+				xi = u1
+				used[j] = true // xj is now unioned into xi - ignore it in future iterations
+			}
+		}
+		rl = append(rl, xi)
+	}
+	return rl
+}
+
+// If the type set represented by xl is specified by a single (non-𝓤) term,
+// singleType returns that type. Otherwise it returns nil.
+func (xl termlist) singleType() types.Type {
+	if nl := xl.norm(); len(nl) == 1 {
+		return nl[0].typ // if nl.isAll() then typ is nil, which is ok
+	}
+	return nil
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+	return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+	if xl.isEmpty() || yl.isEmpty() {
+		return nil
+	}
+
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	var rl termlist
+	for _, x := range xl {
+		for _, y := range yl {
+			if r := x.intersect(y); r != nil {
+				rl = append(rl, r)
+			}
+		}
+	}
+	return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+	// TODO(gri) this should be more efficient
+	return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+	for _, x := range xl {
+		if x.includes(t) {
+			return true
+		}
+	}
+	return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+	for _, x := range xl {
+		if y.subsetOf(x) {
+			return true
+		}
+	}
+	return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+	if yl.isEmpty() {
+		return xl.isEmpty()
+	}
+
+	// each term x of xl must be a subset of yl
+	for _, x := range xl {
+		if !yl.supersetOf(x) {
+			return false // x is not a subset yl
+		}
+	}
+	return true
+}

+ 202 - 0
vendor/golang.org/x/exp/typeparams/typeparams_go117.go

@@ -0,0 +1,202 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+import (
+	"go/ast"
+	"go/token"
+	"go/types"
+)
+
+const enabled = false
+
+func unsupported() {
+	panic("type parameters are unsupported at this go version")
+}
+
+// IndexListExpr is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type IndexListExpr struct {
+	ast.Expr
+	X       ast.Expr   // expression
+	Lbrack  token.Pos  // position of "["
+	Indices []ast.Expr // index expressions
+	Rbrack  token.Pos  // position of "]"
+}
+
+func (*IndexListExpr) Pos() token.Pos { unsupported(); return token.NoPos }
+func (*IndexListExpr) End() token.Pos { unsupported(); return token.NoPos }
+
+// ForTypeSpec returns an empty field list, as type parameters on not supported
+// at this Go version.
+func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
+	return nil
+}
+
+// ForFuncType returns an empty field list, as type parameters are not
+// supported at this Go version.
+func ForFuncType(*ast.FuncType) *ast.FieldList {
+	return nil
+}
+
+// TypeParam is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type TypeParam struct{ types.Type }
+
+func (*TypeParam) String() string           { unsupported(); return "" }
+func (*TypeParam) Underlying() types.Type   { unsupported(); return nil }
+func (*TypeParam) Index() int               { unsupported(); return 0 }
+func (*TypeParam) Constraint() types.Type   { unsupported(); return nil }
+func (*TypeParam) SetConstraint(types.Type) { unsupported() }
+func (*TypeParam) Obj() *types.TypeName     { unsupported(); return nil }
+
+// TypeParamList is a placeholder for an empty type parameter list.
+type TypeParamList struct{}
+
+func (*TypeParamList) Len() int          { return 0 }
+func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
+
+// TypeList is a placeholder for an empty type list.
+type TypeList struct{}
+
+func (*TypeList) Len() int          { return 0 }
+func (*TypeList) At(int) types.Type { unsupported(); return nil }
+
+// NewTypeParam is unsupported at this Go version, and panics.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+	unsupported()
+	return nil
+}
+
+// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
+// typeParams is non-empty.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+	if len(recvTypeParams) != 0 || len(typeParams) != 0 {
+		unsupported()
+	}
+	return types.NewSignature(recv, params, results, variadic)
+}
+
+// ForSignature returns an empty slice.
+func ForSignature(*types.Signature) *TypeParamList {
+	return nil
+}
+
+// RecvTypeParams returns a nil slice.
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+	return nil
+}
+
+// IsComparable returns false, as no interfaces are type-restricted at this Go
+// version.
+func IsComparable(*types.Interface) bool {
+	return false
+}
+
+// IsMethodSet returns true, as no interfaces are type-restricted at this Go
+// version.
+func IsMethodSet(*types.Interface) bool {
+	return true
+}
+
+// IsImplicit returns false, as no interfaces are implicit at this Go version.
+func IsImplicit(*types.Interface) bool {
+	return false
+}
+
+// MarkImplicit does nothing, because this Go version does not have implicit
+// interfaces.
+func MarkImplicit(*types.Interface) {}
+
+// ForNamed returns an empty type parameter list, as type parameters are not
+// supported at this Go version.
+func ForNamed(*types.Named) *TypeParamList {
+	return nil
+}
+
+// SetForNamed panics if tparams is non-empty.
+func SetForNamed(_ *types.Named, tparams []*TypeParam) {
+	if len(tparams) > 0 {
+		unsupported()
+	}
+}
+
+// NamedTypeArgs returns nil.
+func NamedTypeArgs(*types.Named) *TypeList {
+	return nil
+}
+
+// NamedTypeOrigin is the identity method at this Go version.
+func NamedTypeOrigin(named *types.Named) types.Type {
+	return named
+}
+
+// Term holds information about a structural type restriction.
+type Term struct {
+	tilde bool
+	typ   types.Type
+}
+
+func (m *Term) Tilde() bool      { return m.tilde }
+func (m *Term) Type() types.Type { return m.typ }
+func (m *Term) String() string {
+	pre := ""
+	if m.tilde {
+		pre = "~"
+	}
+	return pre + m.typ.String()
+}
+
+// NewTerm creates a new placeholder term type.
+func NewTerm(tilde bool, typ types.Type) *Term {
+	return &Term{tilde, typ}
+}
+
+// Union is a placeholder type, as type parameters are not supported at this Go
+// version. Its methods panic on use.
+type Union struct{ types.Type }
+
+func (*Union) String() string         { unsupported(); return "" }
+func (*Union) Underlying() types.Type { unsupported(); return nil }
+func (*Union) Len() int               { return 0 }
+func (*Union) Term(i int) *Term       { unsupported(); return nil }
+
+// NewUnion is unsupported at this Go version, and panics.
+func NewUnion(terms []*Term) *Union {
+	unsupported()
+	return nil
+}
+
+// InitInstances is a noop at this Go version.
+func InitInstances(*types.Info) {}
+
+// Instance is a placeholder type, as type parameters are not supported at this
+// Go version.
+type Instance struct {
+	TypeArgs *TypeList
+	Type     types.Type
+}
+
+// GetInstances returns a nil map, as type parameters are not supported at this
+// Go version.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
+
+// Context is a placeholder type, as type parameters are not supported at
+// this Go version.
+type Context struct{}
+
+// NewContext returns a placeholder Context instance.
+func NewContext() *Context {
+	return &Context{}
+}
+
+// Instantiate is unsupported on this Go version, and panics.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+	unsupported()
+	return nil, nil
+}

+ 148 - 0
vendor/golang.org/x/exp/typeparams/typeparams_go118.go

@@ -0,0 +1,148 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+const enabled = true
+
+// IndexListExpr is an alias for ast.IndexListExpr.
+type IndexListExpr = ast.IndexListExpr
+
+// ForTypeSpec returns n.TypeParams.
+func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
+	if n == nil {
+		return nil
+	}
+	return n.TypeParams
+}
+
+// ForFuncType returns n.TypeParams.
+func ForFuncType(n *ast.FuncType) *ast.FieldList {
+	if n == nil {
+		return nil
+	}
+	return n.TypeParams
+}
+
+// TypeParam is an alias for types.TypeParam
+type TypeParam = types.TypeParam
+
+// TypeParamList is an alias for types.TypeParamList
+type TypeParamList = types.TypeParamList
+
+// TypeList is an alias for types.TypeList
+type TypeList = types.TypeList
+
+// NewTypeParam calls types.NewTypeParam.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+	return types.NewTypeParam(name, constraint)
+}
+
+// NewSignatureType calls types.NewSignatureType.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+	return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
+}
+
+// ForSignature returns sig.TypeParams()
+func ForSignature(sig *types.Signature) *TypeParamList {
+	return sig.TypeParams()
+}
+
+// RecvTypeParams returns sig.RecvTypeParams().
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+	return sig.RecvTypeParams()
+}
+
+// IsComparable calls iface.IsComparable().
+func IsComparable(iface *types.Interface) bool {
+	return iface.IsComparable()
+}
+
+// IsMethodSet calls iface.IsMethodSet().
+func IsMethodSet(iface *types.Interface) bool {
+	return iface.IsMethodSet()
+}
+
+// IsImplicit calls iface.IsImplicit().
+func IsImplicit(iface *types.Interface) bool {
+	return iface.IsImplicit()
+}
+
+// MarkImplicit calls iface.MarkImplicit().
+func MarkImplicit(iface *types.Interface) {
+	iface.MarkImplicit()
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(named *types.Named) *TypeParamList {
+	return named.TypeParams()
+}
+
+// SetForNamed sets the type params tparams on n. Each tparam must be of
+// dynamic type *types.TypeParam.
+func SetForNamed(n *types.Named, tparams []*TypeParam) {
+	n.SetTypeParams(tparams)
+}
+
+// NamedTypeArgs returns named.TypeArgs().
+func NamedTypeArgs(named *types.Named) *TypeList {
+	return named.TypeArgs()
+}
+
+// NamedTypeOrigin returns named.Orig().
+func NamedTypeOrigin(named *types.Named) types.Type {
+	return named.Origin()
+}
+
+// Term is an alias for types.Term.
+type Term = types.Term
+
+// NewTerm calls types.NewTerm.
+func NewTerm(tilde bool, typ types.Type) *Term {
+	return types.NewTerm(tilde, typ)
+}
+
+// Union is an alias for types.Union
+type Union = types.Union
+
+// NewUnion calls types.NewUnion.
+func NewUnion(terms []*Term) *Union {
+	return types.NewUnion(terms)
+}
+
+// InitInstances initializes info to record information about type and function
+// instances.
+func InitInstances(info *types.Info) {
+	info.Instances = make(map[*ast.Ident]types.Instance)
+}
+
+// Instance is an alias for types.Instance.
+type Instance = types.Instance
+
+// GetInstances returns info.Instances.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance {
+	return info.Instances
+}
+
+// Context is an alias for types.Context.
+type Context = types.Context
+
+// NewContext calls types.NewContext.
+func NewContext() *Context {
+	return types.NewContext()
+}
+
+// Instantiate calls types.Instantiate.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+	return types.Instantiate(ctxt, typ, targs, validate)
+}

+ 170 - 0
vendor/golang.org/x/exp/typeparams/typeterm.go

@@ -0,0 +1,170 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+//   ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
+//   𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
+//   T:  &term{false, T}  == {T}                    // set of type T
+//  ~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
+//
+type term struct {
+	tilde bool // valid if typ != nil
+	typ   types.Type
+}
+
+func (x *term) String() string {
+	switch {
+	case x == nil:
+		return "∅"
+	case x.typ == nil:
+		return "𝓤"
+	case x.tilde:
+		return "~" + x.typ.String()
+	default:
+		return x.typ.String()
+	}
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return x == y
+	case x.typ == nil || y.typ == nil:
+		return x.typ == y.typ
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+	// easy cases
+	switch {
+	case x == nil && y == nil:
+		return nil, nil // ∅ ∪ ∅ == ∅
+	case x == nil:
+		return y, nil // ∅ ∪ y == y
+	case y == nil:
+		return x, nil // x ∪ ∅ == x
+	case x.typ == nil:
+		return x, nil // 𝓤 ∪ y == 𝓤
+	case y.typ == nil:
+		return y, nil // x ∪ 𝓤 == 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∪ ~t == ~t
+	// ~t ∪  T == ~t
+	//  T ∪ ~t == ~t
+	//  T ∪  T ==  T
+	if x.tilde || !y.tilde {
+		return x, nil
+	}
+	return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+	case x.typ == nil:
+		return y // 𝓤 ∩ y == y
+	case y.typ == nil:
+		return x // x ∩ 𝓤 == x
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return nil // x ∩ y == ∅ if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∩ ~t == ~t
+	// ~t ∩  T ==  T
+	//  T ∩ ~t ==  T
+	//  T ∩  T ==  T
+	if !x.tilde || y.tilde {
+		return x
+	}
+	return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return false // t ∈ ∅ == false
+	case x.typ == nil:
+		return true // t ∈ 𝓤 == true
+	}
+	// ∅ ⊂ x ⊂ 𝓤
+
+	u := t
+	if x.tilde {
+		u = under(u)
+	}
+	return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return true // ∅ ⊆ y == true
+	case y == nil:
+		return false // x ⊆ ∅ == false since x != ∅
+	case y.typ == nil:
+		return true // x ⊆ 𝓤 == true
+	case x.typ == nil:
+		return false // 𝓤 ⊆ y == false since y != 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return false // x ⊆ y == false if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ⊆ ~t == true
+	// ~t ⊆ T == false
+	//  T ⊆ ~t == true
+	//  T ⊆  T == true
+	return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+	if debug && (x.typ == nil || y.typ == nil) {
+		panic("invalid argument(s)")
+	}
+	ux := x.typ
+	if y.tilde {
+		ux = under(ux)
+	}
+	uy := y.typ
+	if x.tilde {
+		uy = under(uy)
+	}
+	return !types.Identical(ux, uy)
+}

+ 0 - 19
vendor/golang.org/x/lint/.travis.yml

@@ -1,19 +0,0 @@
-sudo: false
-language: go
-go:
-  - 1.10.x
-  - 1.11.x
-  - master
-
-go_import_path: golang.org/x/lint
-
-install:
-  - go get -t -v ./...
-
-script:
-  - go test -v -race ./...
-
-matrix:
-  allow_failures:
-    - go: master
-  fast_finish: true

+ 0 - 15
vendor/golang.org/x/lint/CONTRIBUTING.md

@@ -1,15 +0,0 @@
-# Contributing to Golint
-
-## Before filing an issue:
-
-### Are you having trouble building golint?
-
-Check you have the latest version of its dependencies. Run
-```
-go get -u golang.org/x/lint/golint
-```
-If you still have problems, consider searching for existing issues before filing a new issue.
-
-## Before sending a pull request:
-
-Have you understood the purpose of golint? Make sure to carefully read `README`.

+ 0 - 93
vendor/golang.org/x/lint/README.md

@@ -1,93 +0,0 @@
-**NOTE:** Golint is [deprecated and frozen](https://github.com/golang/go/issues/38968).
-There's no drop-in replacement for it, but tools such as [Staticcheck](https://staticcheck.io/)
-and `go vet` should be used instead.
-
-Golint is a linter for Go source code.
-
-[![Go Reference](https://pkg.go.dev/badge/golang.org/x/lint.svg)](https://pkg.go.dev/golang.org/x/lint)
-[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
-
-## Installation
-
-Golint requires a
-[supported release of Go](https://golang.org/doc/devel/release.html#policy).
-
-    go get -u golang.org/x/lint/golint
-
-To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
-
-## Usage
-
-Invoke `golint` with one or more filenames, directories, or packages named
-by its import path. Golint uses the same
-[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
-the `go` command and therefore
-also supports relative import paths like `./...`. Additionally the `...`
-wildcard can be used as suffix on relative and absolute file paths to recurse
-into them.
-
-The output of this tool is a list of suggestions in Vim quickfix format,
-which is accepted by lots of different editors.
-
-## Purpose
-
-Golint differs from gofmt. Gofmt reformats Go source code, whereas
-golint prints out style mistakes.
-
-Golint differs from govet. Govet is concerned with correctness, whereas
-golint is concerned with coding style. Golint is in use at Google, and it
-seeks to match the accepted style of the open source Go project.
-
-The suggestions made by golint are exactly that: suggestions.
-Golint is not perfect, and has both false positives and false negatives.
-Do not treat its output as a gold standard. We will not be adding pragmas
-or other knobs to suppress specific warnings, so do not expect or require
-code to be completely "lint-free".
-In short, this tool is not, and will never be, trustworthy enough for its
-suggestions to be enforced automatically, for example as part of a build process.
-Golint makes suggestions for many of the mechanically checkable items listed in
-[Effective Go](https://golang.org/doc/effective_go.html) and the
-[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
-
-## Scope
-
-Golint is meant to carry out the stylistic conventions put forth in
-[Effective Go](https://golang.org/doc/effective_go.html) and
-[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
-Changes that are not aligned with those documents will not be considered.
-
-## Contributions
-
-Contributions to this project are welcome provided they are [in scope](#scope),
-though please send mail before starting work on anything major.
-Contributors retain their copyright, so we need you to fill out
-[a short form](https://developers.google.com/open-source/cla/individual)
-before we can accept your contribution.
-
-## Vim
-
-Add this to your ~/.vimrc:
-
-    set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
-
-If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
-
-Running `:Lint` will run golint on the current file and populate the quickfix list.
-
-Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
-
-    autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
-
-
-## Emacs
-
-Add this to your `.emacs` file:
-
-    (add-to-list 'load-path (concat (getenv "GOPATH")  "/src/golang.org/x/lint/misc/emacs/"))
-    (require 'golint)
-
-If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
-
-Running M-x golint will run golint on the current file.
-
-For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).

+ 0 - 159
vendor/golang.org/x/lint/golint/golint.go

@@ -1,159 +0,0 @@
-// Copyright (c) 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// golint lints the Go source files named on its command line.
-package main
-
-import (
-	"flag"
-	"fmt"
-	"go/build"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"golang.org/x/lint"
-)
-
-var (
-	minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
-	setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
-	suggestions   int
-)
-
-func usage() {
-	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-	fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
-	fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
-	fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
-	fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
-	fmt.Fprintf(os.Stderr, "Flags:\n")
-	flag.PrintDefaults()
-}
-
-func main() {
-	flag.Usage = usage
-	flag.Parse()
-
-	if flag.NArg() == 0 {
-		lintDir(".")
-	} else {
-		// dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
-		// directory, file or package targets. The distinction affects which
-		// checks are run. It is no valid to mix target types.
-		var dirsRun, filesRun, pkgsRun int
-		var args []string
-		for _, arg := range flag.Args() {
-			if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
-				dirsRun = 1
-				for _, dirname := range allPackagesInFS(arg) {
-					args = append(args, dirname)
-				}
-			} else if isDir(arg) {
-				dirsRun = 1
-				args = append(args, arg)
-			} else if exists(arg) {
-				filesRun = 1
-				args = append(args, arg)
-			} else {
-				pkgsRun = 1
-				args = append(args, arg)
-			}
-		}
-
-		if dirsRun+filesRun+pkgsRun != 1 {
-			usage()
-			os.Exit(2)
-		}
-		switch {
-		case dirsRun == 1:
-			for _, dir := range args {
-				lintDir(dir)
-			}
-		case filesRun == 1:
-			lintFiles(args...)
-		case pkgsRun == 1:
-			for _, pkg := range importPaths(args) {
-				lintPackage(pkg)
-			}
-		}
-	}
-
-	if *setExitStatus && suggestions > 0 {
-		fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
-		os.Exit(1)
-	}
-}
-
-func isDir(filename string) bool {
-	fi, err := os.Stat(filename)
-	return err == nil && fi.IsDir()
-}
-
-func exists(filename string) bool {
-	_, err := os.Stat(filename)
-	return err == nil
-}
-
-func lintFiles(filenames ...string) {
-	files := make(map[string][]byte)
-	for _, filename := range filenames {
-		src, err := ioutil.ReadFile(filename)
-		if err != nil {
-			fmt.Fprintln(os.Stderr, err)
-			continue
-		}
-		files[filename] = src
-	}
-
-	l := new(lint.Linter)
-	ps, err := l.LintFiles(files)
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "%v\n", err)
-		return
-	}
-	for _, p := range ps {
-		if p.Confidence >= *minConfidence {
-			fmt.Printf("%v: %s\n", p.Position, p.Text)
-			suggestions++
-		}
-	}
-}
-
-func lintDir(dirname string) {
-	pkg, err := build.ImportDir(dirname, 0)
-	lintImportedPackage(pkg, err)
-}
-
-func lintPackage(pkgname string) {
-	pkg, err := build.Import(pkgname, ".", 0)
-	lintImportedPackage(pkg, err)
-}
-
-func lintImportedPackage(pkg *build.Package, err error) {
-	if err != nil {
-		if _, nogo := err.(*build.NoGoError); nogo {
-			// Don't complain if the failure is due to no Go source files.
-			return
-		}
-		fmt.Fprintln(os.Stderr, err)
-		return
-	}
-
-	var files []string
-	files = append(files, pkg.GoFiles...)
-	files = append(files, pkg.CgoFiles...)
-	files = append(files, pkg.TestGoFiles...)
-	if pkg.Dir != "." {
-		for i, f := range files {
-			files[i] = filepath.Join(pkg.Dir, f)
-		}
-	}
-	// TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
-
-	lintFiles(files...)
-}

+ 0 - 309
vendor/golang.org/x/lint/golint/import.go

@@ -1,309 +0,0 @@
-package main
-
-/*
-
-This file holds a direct copy of the import path matching code of
-https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
-replaced when https://golang.org/issue/8768 is resolved.
-
-It has been updated to follow upstream changes in a few ways.
-
-*/
-
-import (
-	"fmt"
-	"go/build"
-	"log"
-	"os"
-	"path"
-	"path/filepath"
-	"regexp"
-	"runtime"
-	"strings"
-)
-
-var (
-	buildContext = build.Default
-	goroot       = filepath.Clean(runtime.GOROOT())
-	gorootSrc    = filepath.Join(goroot, "src")
-)
-
-// importPathsNoDotExpansion returns the import paths to use for the given
-// command line, but it does no ... expansion.
-func importPathsNoDotExpansion(args []string) []string {
-	if len(args) == 0 {
-		return []string{"."}
-	}
-	var out []string
-	for _, a := range args {
-		// Arguments are supposed to be import paths, but
-		// as a courtesy to Windows developers, rewrite \ to /
-		// in command-line arguments.  Handles .\... and so on.
-		if filepath.Separator == '\\' {
-			a = strings.Replace(a, `\`, `/`, -1)
-		}
-
-		// Put argument in canonical form, but preserve leading ./.
-		if strings.HasPrefix(a, "./") {
-			a = "./" + path.Clean(a)
-			if a == "./." {
-				a = "."
-			}
-		} else {
-			a = path.Clean(a)
-		}
-		if a == "all" || a == "std" {
-			out = append(out, allPackages(a)...)
-			continue
-		}
-		out = append(out, a)
-	}
-	return out
-}
-
-// importPaths returns the import paths to use for the given command line.
-func importPaths(args []string) []string {
-	args = importPathsNoDotExpansion(args)
-	var out []string
-	for _, a := range args {
-		if strings.Contains(a, "...") {
-			if build.IsLocalImport(a) {
-				out = append(out, allPackagesInFS(a)...)
-			} else {
-				out = append(out, allPackages(a)...)
-			}
-			continue
-		}
-		out = append(out, a)
-	}
-	return out
-}
-
-// matchPattern(pattern)(name) reports whether
-// name matches pattern.  Pattern is a limited glob
-// pattern in which '...' means 'any string' and there
-// is no other special syntax.
-func matchPattern(pattern string) func(name string) bool {
-	re := regexp.QuoteMeta(pattern)
-	re = strings.Replace(re, `\.\.\.`, `.*`, -1)
-	// Special case: foo/... matches foo too.
-	if strings.HasSuffix(re, `/.*`) {
-		re = re[:len(re)-len(`/.*`)] + `(/.*)?`
-	}
-	reg := regexp.MustCompile(`^` + re + `$`)
-	return func(name string) bool {
-		return reg.MatchString(name)
-	}
-}
-
-// hasPathPrefix reports whether the path s begins with the
-// elements in prefix.
-func hasPathPrefix(s, prefix string) bool {
-	switch {
-	default:
-		return false
-	case len(s) == len(prefix):
-		return s == prefix
-	case len(s) > len(prefix):
-		if prefix != "" && prefix[len(prefix)-1] == '/' {
-			return strings.HasPrefix(s, prefix)
-		}
-		return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
-	}
-}
-
-// treeCanMatchPattern(pattern)(name) reports whether
-// name or children of name can possibly match pattern.
-// Pattern is the same limited glob accepted by matchPattern.
-func treeCanMatchPattern(pattern string) func(name string) bool {
-	wildCard := false
-	if i := strings.Index(pattern, "..."); i >= 0 {
-		wildCard = true
-		pattern = pattern[:i]
-	}
-	return func(name string) bool {
-		return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
-			wildCard && strings.HasPrefix(name, pattern)
-	}
-}
-
-// allPackages returns all the packages that can be found
-// under the $GOPATH directories and $GOROOT matching pattern.
-// The pattern is either "all" (all packages), "std" (standard packages)
-// or a path including "...".
-func allPackages(pattern string) []string {
-	pkgs := matchPackages(pattern)
-	if len(pkgs) == 0 {
-		fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
-	}
-	return pkgs
-}
-
-func matchPackages(pattern string) []string {
-	match := func(string) bool { return true }
-	treeCanMatch := func(string) bool { return true }
-	if pattern != "all" && pattern != "std" {
-		match = matchPattern(pattern)
-		treeCanMatch = treeCanMatchPattern(pattern)
-	}
-
-	have := map[string]bool{
-		"builtin": true, // ignore pseudo-package that exists only for documentation
-	}
-	if !buildContext.CgoEnabled {
-		have["runtime/cgo"] = true // ignore during walk
-	}
-	var pkgs []string
-
-	// Commands
-	cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
-	filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
-		if err != nil || !fi.IsDir() || path == cmd {
-			return nil
-		}
-		name := path[len(cmd):]
-		if !treeCanMatch(name) {
-			return filepath.SkipDir
-		}
-		// Commands are all in cmd/, not in subdirectories.
-		if strings.Contains(name, string(filepath.Separator)) {
-			return filepath.SkipDir
-		}
-
-		// We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
-		name = "cmd/" + name
-		if have[name] {
-			return nil
-		}
-		have[name] = true
-		if !match(name) {
-			return nil
-		}
-		_, err = buildContext.ImportDir(path, 0)
-		if err != nil {
-			if _, noGo := err.(*build.NoGoError); !noGo {
-				log.Print(err)
-			}
-			return nil
-		}
-		pkgs = append(pkgs, name)
-		return nil
-	})
-
-	for _, src := range buildContext.SrcDirs() {
-		if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
-			continue
-		}
-		src = filepath.Clean(src) + string(filepath.Separator)
-		root := src
-		if pattern == "cmd" {
-			root += "cmd" + string(filepath.Separator)
-		}
-		filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
-			if err != nil || !fi.IsDir() || path == src {
-				return nil
-			}
-
-			// Avoid .foo, _foo, and testdata directory trees.
-			_, elem := filepath.Split(path)
-			if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
-				return filepath.SkipDir
-			}
-
-			name := filepath.ToSlash(path[len(src):])
-			if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
-				// The name "std" is only the standard library.
-				// If the name is cmd, it's the root of the command tree.
-				return filepath.SkipDir
-			}
-			if !treeCanMatch(name) {
-				return filepath.SkipDir
-			}
-			if have[name] {
-				return nil
-			}
-			have[name] = true
-			if !match(name) {
-				return nil
-			}
-			_, err = buildContext.ImportDir(path, 0)
-			if err != nil {
-				if _, noGo := err.(*build.NoGoError); noGo {
-					return nil
-				}
-			}
-			pkgs = append(pkgs, name)
-			return nil
-		})
-	}
-	return pkgs
-}
-
-// allPackagesInFS is like allPackages but is passed a pattern
-// beginning ./ or ../, meaning it should scan the tree rooted
-// at the given directory.  There are ... in the pattern too.
-func allPackagesInFS(pattern string) []string {
-	pkgs := matchPackagesInFS(pattern)
-	if len(pkgs) == 0 {
-		fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
-	}
-	return pkgs
-}
-
-func matchPackagesInFS(pattern string) []string {
-	// Find directory to begin the scan.
-	// Could be smarter but this one optimization
-	// is enough for now, since ... is usually at the
-	// end of a path.
-	i := strings.Index(pattern, "...")
-	dir, _ := path.Split(pattern[:i])
-
-	// pattern begins with ./ or ../.
-	// path.Clean will discard the ./ but not the ../.
-	// We need to preserve the ./ for pattern matching
-	// and in the returned import paths.
-	prefix := ""
-	if strings.HasPrefix(pattern, "./") {
-		prefix = "./"
-	}
-	match := matchPattern(pattern)
-
-	var pkgs []string
-	filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
-		if err != nil || !fi.IsDir() {
-			return nil
-		}
-		if path == dir {
-			// filepath.Walk starts at dir and recurses. For the recursive case,
-			// the path is the result of filepath.Join, which calls filepath.Clean.
-			// The initial case is not Cleaned, though, so we do this explicitly.
-			//
-			// This converts a path like "./io/" to "io". Without this step, running
-			// "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
-			// package, because prepending the prefix "./" to the unclean path would
-			// result in "././io", and match("././io") returns false.
-			path = filepath.Clean(path)
-		}
-
-		// Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
-		_, elem := filepath.Split(path)
-		dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
-		if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
-			return filepath.SkipDir
-		}
-
-		name := prefix + filepath.ToSlash(path)
-		if !match(name) {
-			return nil
-		}
-		if _, err = build.ImportDir(path, 0); err != nil {
-			if _, noGo := err.(*build.NoGoError); !noGo {
-				log.Print(err)
-			}
-			return nil
-		}
-		pkgs = append(pkgs, name)
-		return nil
-	})
-	return pkgs
-}

+ 0 - 13
vendor/golang.org/x/lint/golint/importcomment.go

@@ -1,13 +0,0 @@
-// Copyright (c) 2018 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// +build go1.12
-
-// Require use of the correct import path only for Go 1.12+ users, so
-// any breakages coincide with people updating their CI configs or
-// whatnot.
-
-package main // import "golang.org/x/lint/golint"

+ 0 - 1615
vendor/golang.org/x/lint/lint.go

@@ -1,1615 +0,0 @@
-// Copyright (c) 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// Package lint contains a linter for Go source code.
-package lint // import "golang.org/x/lint"
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"go/ast"
-	"go/parser"
-	"go/printer"
-	"go/token"
-	"go/types"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"golang.org/x/tools/go/ast/astutil"
-	"golang.org/x/tools/go/gcexportdata"
-)
-
-const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
-
-// A Linter lints Go source code.
-type Linter struct {
-}
-
-// Problem represents a problem in some source code.
-type Problem struct {
-	Position   token.Position // position in source file
-	Text       string         // the prose that describes the problem
-	Link       string         // (optional) the link to the style guide for the problem
-	Confidence float64        // a value in (0,1] estimating the confidence in this problem's correctness
-	LineText   string         // the source line
-	Category   string         // a short name for the general category of the problem
-
-	// If the problem has a suggested fix (the minority case),
-	// ReplacementLine is a full replacement for the relevant line of the source file.
-	ReplacementLine string
-}
-
-func (p *Problem) String() string {
-	if p.Link != "" {
-		return p.Text + "\n\n" + p.Link
-	}
-	return p.Text
-}
-
-type byPosition []Problem
-
-func (p byPosition) Len() int      { return len(p) }
-func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p byPosition) Less(i, j int) bool {
-	pi, pj := p[i].Position, p[j].Position
-
-	if pi.Filename != pj.Filename {
-		return pi.Filename < pj.Filename
-	}
-	if pi.Line != pj.Line {
-		return pi.Line < pj.Line
-	}
-	if pi.Column != pj.Column {
-		return pi.Column < pj.Column
-	}
-
-	return p[i].Text < p[j].Text
-}
-
-// Lint lints src.
-func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
-	return l.LintFiles(map[string][]byte{filename: src})
-}
-
-// LintFiles lints a set of files of a single package.
-// The argument is a map of filename to source.
-func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
-	pkg := &pkg{
-		fset:  token.NewFileSet(),
-		files: make(map[string]*file),
-	}
-	var pkgName string
-	for filename, src := range files {
-		if isGenerated(src) {
-			continue // See issue #239
-		}
-		f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
-		if err != nil {
-			return nil, err
-		}
-		if pkgName == "" {
-			pkgName = f.Name.Name
-		} else if f.Name.Name != pkgName {
-			return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName)
-		}
-		pkg.files[filename] = &file{
-			pkg:      pkg,
-			f:        f,
-			fset:     pkg.fset,
-			src:      src,
-			filename: filename,
-		}
-	}
-	if len(pkg.files) == 0 {
-		return nil, nil
-	}
-	return pkg.lint(), nil
-}
-
-var (
-	genHdr = []byte("// Code generated ")
-	genFtr = []byte(" DO NOT EDIT.")
-)
-
-// isGenerated reports whether the source file is generated code
-// according the rules from https://golang.org/s/generatedcode.
-func isGenerated(src []byte) bool {
-	sc := bufio.NewScanner(bytes.NewReader(src))
-	for sc.Scan() {
-		b := sc.Bytes()
-		if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
-			return true
-		}
-	}
-	return false
-}
-
-// pkg represents a package being linted.
-type pkg struct {
-	fset  *token.FileSet
-	files map[string]*file
-
-	typesPkg  *types.Package
-	typesInfo *types.Info
-
-	// sortable is the set of types in the package that implement sort.Interface.
-	sortable map[string]bool
-	// main is whether this is a "main" package.
-	main bool
-
-	problems []Problem
-}
-
-func (p *pkg) lint() []Problem {
-	if err := p.typeCheck(); err != nil {
-		/* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages.
-		if e, ok := err.(types.Error); ok {
-			pos := p.fset.Position(e.Pos)
-			conf := 1.0
-			if strings.Contains(e.Msg, "can't find import: ") {
-				// Golint is probably being run in a context that doesn't support
-				// typechecking (e.g. package files aren't found), so don't warn about it.
-				conf = 0
-			}
-			if conf > 0 {
-				p.errorfAt(pos, conf, category("typechecking"), e.Msg)
-			}
-
-			// TODO(dsymonds): Abort if !e.Soft?
-		}
-		*/
-	}
-
-	p.scanSortable()
-	p.main = p.isMain()
-
-	for _, f := range p.files {
-		f.lint()
-	}
-
-	sort.Sort(byPosition(p.problems))
-
-	return p.problems
-}
-
-// file represents a file being linted.
-type file struct {
-	pkg      *pkg
-	f        *ast.File
-	fset     *token.FileSet
-	src      []byte
-	filename string
-}
-
-func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
-
-func (f *file) lint() {
-	f.lintPackageComment()
-	f.lintImports()
-	f.lintBlankImports()
-	f.lintExported()
-	f.lintNames()
-	f.lintElses()
-	f.lintRanges()
-	f.lintErrorf()
-	f.lintErrors()
-	f.lintErrorStrings()
-	f.lintReceiverNames()
-	f.lintIncDec()
-	f.lintErrorReturn()
-	f.lintUnexportedReturn()
-	f.lintTimeNames()
-	f.lintContextKeyTypes()
-	f.lintContextArgs()
-}
-
-type link string
-type category string
-
-// The variadic arguments may start with link and category types,
-// and must end with a format string and any arguments.
-// It returns the new Problem.
-func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem {
-	pos := f.fset.Position(n.Pos())
-	if pos.Filename == "" {
-		pos.Filename = f.filename
-	}
-	return f.pkg.errorfAt(pos, confidence, args...)
-}
-
-func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem {
-	problem := Problem{
-		Position:   pos,
-		Confidence: confidence,
-	}
-	if pos.Filename != "" {
-		// The file might not exist in our mapping if a //line directive was encountered.
-		if f, ok := p.files[pos.Filename]; ok {
-			problem.LineText = srcLine(f.src, pos)
-		}
-	}
-
-argLoop:
-	for len(args) > 1 { // always leave at least the format string in args
-		switch v := args[0].(type) {
-		case link:
-			problem.Link = string(v)
-		case category:
-			problem.Category = string(v)
-		default:
-			break argLoop
-		}
-		args = args[1:]
-	}
-
-	problem.Text = fmt.Sprintf(args[0].(string), args[1:]...)
-
-	p.problems = append(p.problems, problem)
-	return &p.problems[len(p.problems)-1]
-}
-
-var newImporter = func(fset *token.FileSet) types.ImporterFrom {
-	return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
-}
-
-func (p *pkg) typeCheck() error {
-	config := &types.Config{
-		// By setting a no-op error reporter, the type checker does as much work as possible.
-		Error:    func(error) {},
-		Importer: newImporter(p.fset),
-	}
-	info := &types.Info{
-		Types:  make(map[ast.Expr]types.TypeAndValue),
-		Defs:   make(map[*ast.Ident]types.Object),
-		Uses:   make(map[*ast.Ident]types.Object),
-		Scopes: make(map[ast.Node]*types.Scope),
-	}
-	var anyFile *file
-	var astFiles []*ast.File
-	for _, f := range p.files {
-		anyFile = f
-		astFiles = append(astFiles, f.f)
-	}
-	pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info)
-	// Remember the typechecking info, even if config.Check failed,
-	// since we will get partial information.
-	p.typesPkg = pkg
-	p.typesInfo = info
-	return err
-}
-
-func (p *pkg) typeOf(expr ast.Expr) types.Type {
-	if p.typesInfo == nil {
-		return nil
-	}
-	return p.typesInfo.TypeOf(expr)
-}
-
-func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool {
-	n, ok := typ.(*types.Named)
-	if !ok {
-		return false
-	}
-	tn := n.Obj()
-	return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name
-}
-
-// scopeOf returns the tightest scope encompassing id.
-func (p *pkg) scopeOf(id *ast.Ident) *types.Scope {
-	var scope *types.Scope
-	if obj := p.typesInfo.ObjectOf(id); obj != nil {
-		scope = obj.Parent()
-	}
-	if scope == p.typesPkg.Scope() {
-		// We were given a top-level identifier.
-		// Use the file-level scope instead of the package-level scope.
-		pos := id.Pos()
-		for _, f := range p.files {
-			if f.f.Pos() <= pos && pos < f.f.End() {
-				scope = p.typesInfo.Scopes[f.f]
-				break
-			}
-		}
-	}
-	return scope
-}
-
-func (p *pkg) scanSortable() {
-	p.sortable = make(map[string]bool)
-
-	// bitfield for which methods exist on each type.
-	const (
-		Len = 1 << iota
-		Less
-		Swap
-	)
-	nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
-	has := make(map[string]int)
-	for _, f := range p.files {
-		f.walk(func(n ast.Node) bool {
-			fn, ok := n.(*ast.FuncDecl)
-			if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
-				return true
-			}
-			// TODO(dsymonds): We could check the signature to be more precise.
-			recv := receiverType(fn)
-			if i, ok := nmap[fn.Name.Name]; ok {
-				has[recv] |= i
-			}
-			return false
-		})
-	}
-	for typ, ms := range has {
-		if ms == Len|Less|Swap {
-			p.sortable[typ] = true
-		}
-	}
-}
-
-func (p *pkg) isMain() bool {
-	for _, f := range p.files {
-		if f.isMain() {
-			return true
-		}
-	}
-	return false
-}
-
-func (f *file) isMain() bool {
-	if f.f.Name.Name == "main" {
-		return true
-	}
-	return false
-}
-
-// lintPackageComment checks package comments. It complains if
-// there is no package comment, or if it is not of the right form.
-// This has a notable false positive in that a package comment
-// could rightfully appear in a different file of the same package,
-// but that's not easy to fix since this linter is file-oriented.
-func (f *file) lintPackageComment() {
-	if f.isTest() {
-		return
-	}
-
-	const ref = styleGuideBase + "#package-comments"
-	prefix := "Package " + f.f.Name.Name + " "
-
-	// Look for a detached package comment.
-	// First, scan for the last comment that occurs before the "package" keyword.
-	var lastCG *ast.CommentGroup
-	for _, cg := range f.f.Comments {
-		if cg.Pos() > f.f.Package {
-			// Gone past "package" keyword.
-			break
-		}
-		lastCG = cg
-	}
-	if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) {
-		endPos := f.fset.Position(lastCG.End())
-		pkgPos := f.fset.Position(f.f.Package)
-		if endPos.Line+1 < pkgPos.Line {
-			// There isn't a great place to anchor this error;
-			// the start of the blank lines between the doc and the package statement
-			// is at least pointing at the location of the problem.
-			pos := token.Position{
-				Filename: endPos.Filename,
-				// Offset not set; it is non-trivial, and doesn't appear to be needed.
-				Line:   endPos.Line + 1,
-				Column: 1,
-			}
-			f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement")
-			return
-		}
-	}
-
-	if f.f.Doc == nil {
-		f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package")
-		return
-	}
-	s := f.f.Doc.Text()
-	if ts := strings.TrimLeft(s, " \t"); ts != s {
-		f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space")
-		s = ts
-	}
-	// Only non-main packages need to keep to this form.
-	if !f.pkg.main && !strings.HasPrefix(s, prefix) {
-		f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix)
-	}
-}
-
-// lintBlankImports complains if a non-main package has blank imports that are
-// not documented.
-func (f *file) lintBlankImports() {
-	// In package main and in tests, we don't complain about blank imports.
-	if f.pkg.main || f.isTest() {
-		return
-	}
-
-	// The first element of each contiguous group of blank imports should have
-	// an explanatory comment of some kind.
-	for i, imp := range f.f.Imports {
-		pos := f.fset.Position(imp.Pos())
-
-		if !isBlank(imp.Name) {
-			continue // Ignore non-blank imports.
-		}
-		if i > 0 {
-			prev := f.f.Imports[i-1]
-			prevPos := f.fset.Position(prev.Pos())
-			if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
-				continue // A subsequent blank in a group.
-			}
-		}
-
-		// This is the first blank import of a group.
-		if imp.Doc == nil && imp.Comment == nil {
-			ref := ""
-			f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it")
-		}
-	}
-}
-
-// lintImports examines import blocks.
-func (f *file) lintImports() {
-	for i, is := range f.f.Imports {
-		_ = i
-		if is.Name != nil && is.Name.Name == "." && !f.isTest() {
-			f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports")
-		}
-
-	}
-}
-
-const docCommentsLink = styleGuideBase + "#doc-comments"
-
-// lintExported examines the exported names.
-// It complains if any required doc comments are missing,
-// or if they are not of the right form. The exact rules are in
-// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
-// also tracks the GenDecl structure being traversed to permit
-// doc comments for constants to be on top of the const block.
-// It also complains if the names stutter when combined with
-// the package name.
-func (f *file) lintExported() {
-	if f.isTest() {
-		return
-	}
-
-	var lastGen *ast.GenDecl // last GenDecl entered.
-
-	// Set of GenDecls that have already had missing comments flagged.
-	genDeclMissingComments := make(map[*ast.GenDecl]bool)
-
-	f.walk(func(node ast.Node) bool {
-		switch v := node.(type) {
-		case *ast.GenDecl:
-			if v.Tok == token.IMPORT {
-				return false
-			}
-			// token.CONST, token.TYPE or token.VAR
-			lastGen = v
-			return true
-		case *ast.FuncDecl:
-			f.lintFuncDoc(v)
-			if v.Recv == nil {
-				// Only check for stutter on functions, not methods.
-				// Method names are not used package-qualified.
-				f.checkStutter(v.Name, "func")
-			}
-			// Don't proceed inside funcs.
-			return false
-		case *ast.TypeSpec:
-			// inside a GenDecl, which usually has the doc
-			doc := v.Doc
-			if doc == nil {
-				doc = lastGen.Doc
-			}
-			f.lintTypeDoc(v, doc)
-			f.checkStutter(v.Name, "type")
-			// Don't proceed inside types.
-			return false
-		case *ast.ValueSpec:
-			f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
-			return false
-		}
-		return true
-	})
-}
-
-var (
-	allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
-	anyCapsRE = regexp.MustCompile(`[A-Z]`)
-)
-
-// knownNameExceptions is a set of names that are known to be exempt from naming checks.
-// This is usually because they are constrained by having to match names in the
-// standard library.
-var knownNameExceptions = map[string]bool{
-	"LastInsertId": true, // must match database/sql
-	"kWh":          true,
-}
-
-func isInTopLevel(f *ast.File, ident *ast.Ident) bool {
-	path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End())
-	for _, f := range path {
-		switch f.(type) {
-		case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident:
-			continue
-		}
-		return false
-	}
-	return true
-}
-
-// lintNames examines all names in the file.
-// It complains if any use underscores or incorrect known initialisms.
-func (f *file) lintNames() {
-	// Package names need slightly different handling than other names.
-	if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
-		f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name")
-	}
-	if anyCapsRE.MatchString(f.f.Name.Name) {
-		f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name))
-	}
-
-	check := func(id *ast.Ident, thing string) {
-		if id.Name == "_" {
-			return
-		}
-		if knownNameExceptions[id.Name] {
-			return
-		}
-
-		// Handle two common styles from other languages that don't belong in Go.
-		if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
-			capCount := 0
-			for _, c := range id.Name {
-				if 'A' <= c && c <= 'Z' {
-					capCount++
-				}
-			}
-			if capCount >= 2 {
-				f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
-				return
-			}
-		}
-		if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) {
-			if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
-				should := string(id.Name[1]+'a'-'A') + id.Name[2:]
-				f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
-			}
-		}
-
-		should := lintName(id.Name)
-		if id.Name == should {
-			return
-		}
-
-		if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
-			f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
-			return
-		}
-		f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should)
-	}
-	checkList := func(fl *ast.FieldList, thing string) {
-		if fl == nil {
-			return
-		}
-		for _, f := range fl.List {
-			for _, id := range f.Names {
-				check(id, thing)
-			}
-		}
-	}
-	f.walk(func(node ast.Node) bool {
-		switch v := node.(type) {
-		case *ast.AssignStmt:
-			if v.Tok == token.ASSIGN {
-				return true
-			}
-			for _, exp := range v.Lhs {
-				if id, ok := exp.(*ast.Ident); ok {
-					check(id, "var")
-				}
-			}
-		case *ast.FuncDecl:
-			if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
-				return true
-			}
-
-			thing := "func"
-			if v.Recv != nil {
-				thing = "method"
-			}
-
-			// Exclude naming warnings for functions that are exported to C but
-			// not exported in the Go API.
-			// See https://github.com/golang/lint/issues/144.
-			if ast.IsExported(v.Name.Name) || !isCgoExported(v) {
-				check(v.Name, thing)
-			}
-
-			checkList(v.Type.Params, thing+" parameter")
-			checkList(v.Type.Results, thing+" result")
-		case *ast.GenDecl:
-			if v.Tok == token.IMPORT {
-				return true
-			}
-			var thing string
-			switch v.Tok {
-			case token.CONST:
-				thing = "const"
-			case token.TYPE:
-				thing = "type"
-			case token.VAR:
-				thing = "var"
-			}
-			for _, spec := range v.Specs {
-				switch s := spec.(type) {
-				case *ast.TypeSpec:
-					check(s.Name, thing)
-				case *ast.ValueSpec:
-					for _, id := range s.Names {
-						check(id, thing)
-					}
-				}
-			}
-		case *ast.InterfaceType:
-			// Do not check interface method names.
-			// They are often constrainted by the method names of concrete types.
-			for _, x := range v.Methods.List {
-				ft, ok := x.Type.(*ast.FuncType)
-				if !ok { // might be an embedded interface name
-					continue
-				}
-				checkList(ft.Params, "interface method parameter")
-				checkList(ft.Results, "interface method result")
-			}
-		case *ast.RangeStmt:
-			if v.Tok == token.ASSIGN {
-				return true
-			}
-			if id, ok := v.Key.(*ast.Ident); ok {
-				check(id, "range var")
-			}
-			if id, ok := v.Value.(*ast.Ident); ok {
-				check(id, "range var")
-			}
-		case *ast.StructType:
-			for _, f := range v.Fields.List {
-				for _, id := range f.Names {
-					check(id, "struct field")
-				}
-			}
-		}
-		return true
-	})
-}
-
-// lintName returns a different name if it should be different.
-func lintName(name string) (should string) {
-	// Fast path for simple cases: "_" and all lowercase.
-	if name == "_" {
-		return name
-	}
-	allLower := true
-	for _, r := range name {
-		if !unicode.IsLower(r) {
-			allLower = false
-			break
-		}
-	}
-	if allLower {
-		return name
-	}
-
-	// Split camelCase at any lower->upper transition, and split on underscores.
-	// Check each word for common initialisms.
-	runes := []rune(name)
-	w, i := 0, 0 // index of start of word, scan
-	for i+1 <= len(runes) {
-		eow := false // whether we hit the end of a word
-		if i+1 == len(runes) {
-			eow = true
-		} else if runes[i+1] == '_' {
-			// underscore; shift the remainder forward over any run of underscores
-			eow = true
-			n := 1
-			for i+n+1 < len(runes) && runes[i+n+1] == '_' {
-				n++
-			}
-
-			// Leave at most one underscore if the underscore is between two digits
-			if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
-				n--
-			}
-
-			copy(runes[i+1:], runes[i+n+1:])
-			runes = runes[:len(runes)-n]
-		} else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
-			// lower->non-lower
-			eow = true
-		}
-		i++
-		if !eow {
-			continue
-		}
-
-		// [w,i) is a word.
-		word := string(runes[w:i])
-		if u := strings.ToUpper(word); commonInitialisms[u] {
-			// Keep consistent case, which is lowercase only at the start.
-			if w == 0 && unicode.IsLower(runes[w]) {
-				u = strings.ToLower(u)
-			}
-			// All the common initialisms are ASCII,
-			// so we can replace the bytes exactly.
-			copy(runes[w:], []rune(u))
-		} else if w > 0 && strings.ToLower(word) == word {
-			// already all lowercase, and not the first word, so uppercase the first character.
-			runes[w] = unicode.ToUpper(runes[w])
-		}
-		w = i
-	}
-	return string(runes)
-}
-
-// commonInitialisms is a set of common initialisms.
-// Only add entries that are highly unlikely to be non-initialisms.
-// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
-var commonInitialisms = map[string]bool{
-	"ACL":   true,
-	"API":   true,
-	"ASCII": true,
-	"CPU":   true,
-	"CSS":   true,
-	"DNS":   true,
-	"EOF":   true,
-	"GUID":  true,
-	"HTML":  true,
-	"HTTP":  true,
-	"HTTPS": true,
-	"ID":    true,
-	"IP":    true,
-	"JSON":  true,
-	"LHS":   true,
-	"QPS":   true,
-	"RAM":   true,
-	"RHS":   true,
-	"RPC":   true,
-	"SLA":   true,
-	"SMTP":  true,
-	"SQL":   true,
-	"SSH":   true,
-	"TCP":   true,
-	"TLS":   true,
-	"TTL":   true,
-	"UDP":   true,
-	"UI":    true,
-	"UID":   true,
-	"UUID":  true,
-	"URI":   true,
-	"URL":   true,
-	"UTF8":  true,
-	"VM":    true,
-	"XML":   true,
-	"XMPP":  true,
-	"XSRF":  true,
-	"XSS":   true,
-}
-
-// lintTypeDoc examines the doc comment on a type.
-// It complains if they are missing from an exported type,
-// or if they are not of the standard form.
-func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) {
-	if !ast.IsExported(t.Name.Name) {
-		return
-	}
-	if doc == nil {
-		f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name)
-		return
-	}
-
-	s := doc.Text()
-	articles := [...]string{"A", "An", "The"}
-	for _, a := range articles {
-		if strings.HasPrefix(s, a+" ") {
-			s = s[len(a)+1:]
-			break
-		}
-	}
-	if !strings.HasPrefix(s, t.Name.Name+" ") {
-		f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name)
-	}
-}
-
-var commonMethods = map[string]bool{
-	"Error":     true,
-	"Read":      true,
-	"ServeHTTP": true,
-	"String":    true,
-	"Write":     true,
-	"Unwrap":    true,
-}
-
-// lintFuncDoc examines doc comments on functions and methods.
-// It complains if they are missing, or not of the right form.
-// It has specific exclusions for well-known methods (see commonMethods above).
-func (f *file) lintFuncDoc(fn *ast.FuncDecl) {
-	if !ast.IsExported(fn.Name.Name) {
-		// func is unexported
-		return
-	}
-	kind := "function"
-	name := fn.Name.Name
-	if fn.Recv != nil && len(fn.Recv.List) > 0 {
-		// method
-		kind = "method"
-		recv := receiverType(fn)
-		if !ast.IsExported(recv) {
-			// receiver is unexported
-			return
-		}
-		if commonMethods[name] {
-			return
-		}
-		switch name {
-		case "Len", "Less", "Swap":
-			if f.pkg.sortable[recv] {
-				return
-			}
-		}
-		name = recv + "." + name
-	}
-	if fn.Doc == nil {
-		f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name)
-		return
-	}
-	s := fn.Doc.Text()
-	prefix := fn.Name.Name + " "
-	if !strings.HasPrefix(s, prefix) {
-		f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
-	}
-}
-
-// lintValueSpecDoc examines package-global variables and constants.
-// It complains if they are not individually declared,
-// or if they are not suitably documented in the right form (unless they are in a block that is commented).
-func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) {
-	kind := "var"
-	if gd.Tok == token.CONST {
-		kind = "const"
-	}
-
-	if len(vs.Names) > 1 {
-		// Check that none are exported except for the first.
-		for _, n := range vs.Names[1:] {
-			if ast.IsExported(n.Name) {
-				f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name)
-				return
-			}
-		}
-	}
-
-	// Only one name.
-	name := vs.Names[0].Name
-	if !ast.IsExported(name) {
-		return
-	}
-
-	if vs.Doc == nil && gd.Doc == nil {
-		if genDeclMissingComments[gd] {
-			return
-		}
-		block := ""
-		if kind == "const" && gd.Lparen.IsValid() {
-			block = " (or a comment on this block)"
-		}
-		f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block)
-		genDeclMissingComments[gd] = true
-		return
-	}
-	// If this GenDecl has parens and a comment, we don't check its comment form.
-	if gd.Lparen.IsValid() && gd.Doc != nil {
-		return
-	}
-	// The relevant text to check will be on either vs.Doc or gd.Doc.
-	// Use vs.Doc preferentially.
-	doc := vs.Doc
-	if doc == nil {
-		doc = gd.Doc
-	}
-	prefix := name + " "
-	if !strings.HasPrefix(doc.Text(), prefix) {
-		f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
-	}
-}
-
-func (f *file) checkStutter(id *ast.Ident, thing string) {
-	pkg, name := f.f.Name.Name, id.Name
-	if !ast.IsExported(name) {
-		// unexported name
-		return
-	}
-	// A name stutters if the package name is a strict prefix
-	// and the next character of the name starts a new word.
-	if len(name) <= len(pkg) {
-		// name is too short to stutter.
-		// This permits the name to be the same as the package name.
-		return
-	}
-	if !strings.EqualFold(pkg, name[:len(pkg)]) {
-		return
-	}
-	// We can assume the name is well-formed UTF-8.
-	// If the next rune after the package name is uppercase or an underscore
-	// the it's starting a new word and thus this name stutters.
-	rem := name[len(pkg):]
-	if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) {
-		f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem)
-	}
-}
-
-// zeroLiteral is a set of ast.BasicLit values that are zero values.
-// It is not exhaustive.
-var zeroLiteral = map[string]bool{
-	"false": true, // bool
-	// runes
-	`'\x00'`: true,
-	`'\000'`: true,
-	// strings
-	`""`: true,
-	"``": true,
-	// numerics
-	"0":   true,
-	"0.":  true,
-	"0.0": true,
-	"0i":  true,
-}
-
-// lintElses examines else blocks. It complains about any else block whose if block ends in a return.
-func (f *file) lintElses() {
-	// We don't want to flag if { } else if { } else { } constructions.
-	// They will appear as an IfStmt whose Else field is also an IfStmt.
-	// Record such a node so we ignore it when we visit it.
-	ignore := make(map[*ast.IfStmt]bool)
-
-	f.walk(func(node ast.Node) bool {
-		ifStmt, ok := node.(*ast.IfStmt)
-		if !ok || ifStmt.Else == nil {
-			return true
-		}
-		if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
-			ignore[elseif] = true
-			return true
-		}
-		if ignore[ifStmt] {
-			return true
-		}
-		if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
-			// only care about elses without conditions
-			return true
-		}
-		if len(ifStmt.Body.List) == 0 {
-			return true
-		}
-		shortDecl := false // does the if statement have a ":=" initialization statement?
-		if ifStmt.Init != nil {
-			if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE {
-				shortDecl = true
-			}
-		}
-		lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1]
-		if _, ok := lastStmt.(*ast.ReturnStmt); ok {
-			extra := ""
-			if shortDecl {
-				extra = " (move short variable declaration to its own line if necessary)"
-			}
-			f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra)
-		}
-		return true
-	})
-}
-
-// lintRanges examines range clauses. It complains about redundant constructions.
-func (f *file) lintRanges() {
-	f.walk(func(node ast.Node) bool {
-		rs, ok := node.(*ast.RangeStmt)
-		if !ok {
-			return true
-		}
-
-		if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) {
-			p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`")
-
-			newRS := *rs // shallow copy
-			newRS.Value = nil
-			newRS.Key = nil
-			p.ReplacementLine = f.firstLineOf(&newRS, rs)
-
-			return true
-		}
-
-		if isIdent(rs.Value, "_") {
-			p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
-
-			newRS := *rs // shallow copy
-			newRS.Value = nil
-			p.ReplacementLine = f.firstLineOf(&newRS, rs)
-		}
-
-		return true
-	})
-}
-
-// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation.
-func (f *file) lintErrorf() {
-	f.walk(func(node ast.Node) bool {
-		ce, ok := node.(*ast.CallExpr)
-		if !ok || len(ce.Args) != 1 {
-			return true
-		}
-		isErrorsNew := isPkgDot(ce.Fun, "errors", "New")
-		var isTestingError bool
-		se, ok := ce.Fun.(*ast.SelectorExpr)
-		if ok && se.Sel.Name == "Error" {
-			if typ := f.pkg.typeOf(se.X); typ != nil {
-				isTestingError = typ.String() == "*testing.T"
-			}
-		}
-		if !isErrorsNew && !isTestingError {
-			return true
-		}
-		if !f.imports("errors") {
-			return true
-		}
-		arg := ce.Args[0]
-		ce, ok = arg.(*ast.CallExpr)
-		if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
-			return true
-		}
-		errorfPrefix := "fmt"
-		if isTestingError {
-			errorfPrefix = f.render(se.X)
-		}
-		p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix)
-
-		m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`)
-		if m != nil {
-			p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3]
-		}
-
-		return true
-	})
-}
-
-// lintErrors examines global error vars. It complains if they aren't named in the standard way.
-func (f *file) lintErrors() {
-	for _, decl := range f.f.Decls {
-		gd, ok := decl.(*ast.GenDecl)
-		if !ok || gd.Tok != token.VAR {
-			continue
-		}
-		for _, spec := range gd.Specs {
-			spec := spec.(*ast.ValueSpec)
-			if len(spec.Names) != 1 || len(spec.Values) != 1 {
-				continue
-			}
-			ce, ok := spec.Values[0].(*ast.CallExpr)
-			if !ok {
-				continue
-			}
-			if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
-				continue
-			}
-
-			id := spec.Names[0]
-			prefix := "err"
-			if id.IsExported() {
-				prefix = "Err"
-			}
-			if !strings.HasPrefix(id.Name, prefix) {
-				f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix)
-			}
-		}
-	}
-}
-
-func lintErrorString(s string) (isClean bool, conf float64) {
-	const basicConfidence = 0.8
-	const capConfidence = basicConfidence - 0.2
-	first, firstN := utf8.DecodeRuneInString(s)
-	last, _ := utf8.DecodeLastRuneInString(s)
-	if last == '.' || last == ':' || last == '!' || last == '\n' {
-		return false, basicConfidence
-	}
-	if unicode.IsUpper(first) {
-		// People use proper nouns and exported Go identifiers in error strings,
-		// so decrease the confidence of warnings for capitalization.
-		if len(s) <= firstN {
-			return false, capConfidence
-		}
-		// Flag strings starting with something that doesn't look like an initialism.
-		if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) {
-			return false, capConfidence
-		}
-	}
-	return true, 0
-}
-
-// lintErrorStrings examines error strings.
-// It complains if they are capitalized or end in punctuation or a newline.
-func (f *file) lintErrorStrings() {
-	f.walk(func(node ast.Node) bool {
-		ce, ok := node.(*ast.CallExpr)
-		if !ok {
-			return true
-		}
-		if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
-			return true
-		}
-		if len(ce.Args) < 1 {
-			return true
-		}
-		str, ok := ce.Args[0].(*ast.BasicLit)
-		if !ok || str.Kind != token.STRING {
-			return true
-		}
-		s, _ := strconv.Unquote(str.Value) // can assume well-formed Go
-		if s == "" {
-			return true
-		}
-		clean, conf := lintErrorString(s)
-		if clean {
-			return true
-		}
-
-		f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"),
-			"error strings should not be capitalized or end with punctuation or a newline")
-		return true
-	})
-}
-
-// lintReceiverNames examines receiver names. It complains about inconsistent
-// names used for the same type and names such as "this".
-func (f *file) lintReceiverNames() {
-	typeReceiver := map[string]string{}
-	f.walk(func(n ast.Node) bool {
-		fn, ok := n.(*ast.FuncDecl)
-		if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
-			return true
-		}
-		names := fn.Recv.List[0].Names
-		if len(names) < 1 {
-			return true
-		}
-		name := names[0].Name
-		const ref = styleGuideBase + "#receiver-names"
-		if name == "_" {
-			f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`)
-			return true
-		}
-		if name == "this" || name == "self" {
-			f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
-			return true
-		}
-		recv := receiverType(fn)
-		if prev, ok := typeReceiver[recv]; ok && prev != name {
-			f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv)
-			return true
-		}
-		typeReceiver[recv] = name
-		return true
-	})
-}
-
-// lintIncDec examines statements that increment or decrement a variable.
-// It complains if they don't use x++ or x--.
-func (f *file) lintIncDec() {
-	f.walk(func(n ast.Node) bool {
-		as, ok := n.(*ast.AssignStmt)
-		if !ok {
-			return true
-		}
-		if len(as.Lhs) != 1 {
-			return true
-		}
-		if !isOne(as.Rhs[0]) {
-			return true
-		}
-		var suffix string
-		switch as.Tok {
-		case token.ADD_ASSIGN:
-			suffix = "++"
-		case token.SUB_ASSIGN:
-			suffix = "--"
-		default:
-			return true
-		}
-		f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix)
-		return true
-	})
-}
-
-// lintErrorReturn examines function declarations that return an error.
-// It complains if the error isn't the last parameter.
-func (f *file) lintErrorReturn() {
-	f.walk(func(n ast.Node) bool {
-		fn, ok := n.(*ast.FuncDecl)
-		if !ok || fn.Type.Results == nil {
-			return true
-		}
-		ret := fn.Type.Results.List
-		if len(ret) <= 1 {
-			return true
-		}
-		if isIdent(ret[len(ret)-1].Type, "error") {
-			return true
-		}
-		// An error return parameter should be the last parameter.
-		// Flag any error parameters found before the last.
-		for _, r := range ret[:len(ret)-1] {
-			if isIdent(r.Type, "error") {
-				f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items")
-				break // only flag one
-			}
-		}
-		return true
-	})
-}
-
-// lintUnexportedReturn examines exported function declarations.
-// It complains if any return an unexported type.
-func (f *file) lintUnexportedReturn() {
-	f.walk(func(n ast.Node) bool {
-		fn, ok := n.(*ast.FuncDecl)
-		if !ok {
-			return true
-		}
-		if fn.Type.Results == nil {
-			return false
-		}
-		if !fn.Name.IsExported() {
-			return false
-		}
-		thing := "func"
-		if fn.Recv != nil && len(fn.Recv.List) > 0 {
-			thing = "method"
-			if !ast.IsExported(receiverType(fn)) {
-				// Don't report exported methods of unexported types,
-				// such as private implementations of sort.Interface.
-				return false
-			}
-		}
-		for _, ret := range fn.Type.Results.List {
-			typ := f.pkg.typeOf(ret.Type)
-			if exportedType(typ) {
-				continue
-			}
-			f.errorf(ret.Type, 0.8, category("unexported-type-in-api"),
-				"exported %s %s returns unexported type %s, which can be annoying to use",
-				thing, fn.Name.Name, typ)
-			break // only flag one
-		}
-		return false
-	})
-}
-
-// exportedType reports whether typ is an exported type.
-// It is imprecise, and will err on the side of returning true,
-// such as for composite types.
-func exportedType(typ types.Type) bool {
-	switch T := typ.(type) {
-	case *types.Named:
-		// Builtin types have no package.
-		return T.Obj().Pkg() == nil || T.Obj().Exported()
-	case *types.Map:
-		return exportedType(T.Key()) && exportedType(T.Elem())
-	case interface {
-		Elem() types.Type
-	}: // array, slice, pointer, chan
-		return exportedType(T.Elem())
-	}
-	// Be conservative about other types, such as struct, interface, etc.
-	return true
-}
-
-// timeSuffixes is a list of name suffixes that imply a time unit.
-// This is not an exhaustive list.
-var timeSuffixes = []string{
-	"Sec", "Secs", "Seconds",
-	"Msec", "Msecs",
-	"Milli", "Millis", "Milliseconds",
-	"Usec", "Usecs", "Microseconds",
-	"MS", "Ms",
-}
-
-func (f *file) lintTimeNames() {
-	f.walk(func(node ast.Node) bool {
-		v, ok := node.(*ast.ValueSpec)
-		if !ok {
-			return true
-		}
-		for _, name := range v.Names {
-			origTyp := f.pkg.typeOf(name)
-			// Look for time.Duration or *time.Duration;
-			// the latter is common when using flag.Duration.
-			typ := origTyp
-			if pt, ok := typ.(*types.Pointer); ok {
-				typ = pt.Elem()
-			}
-			if !f.pkg.isNamedType(typ, "time", "Duration") {
-				continue
-			}
-			suffix := ""
-			for _, suf := range timeSuffixes {
-				if strings.HasSuffix(name.Name, suf) {
-					suffix = suf
-					break
-				}
-			}
-			if suffix == "" {
-				continue
-			}
-			f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix)
-		}
-		return true
-	})
-}
-
-// lintContextKeyTypes checks for call expressions to context.WithValue with
-// basic types used for the key argument.
-// See: https://golang.org/issue/17293
-func (f *file) lintContextKeyTypes() {
-	f.walk(func(node ast.Node) bool {
-		switch node := node.(type) {
-		case *ast.CallExpr:
-			f.checkContextKeyType(node)
-		}
-
-		return true
-	})
-}
-
-// checkContextKeyType reports an error if the call expression calls
-// context.WithValue with a key argument of basic type.
-func (f *file) checkContextKeyType(x *ast.CallExpr) {
-	sel, ok := x.Fun.(*ast.SelectorExpr)
-	if !ok {
-		return
-	}
-	pkg, ok := sel.X.(*ast.Ident)
-	if !ok || pkg.Name != "context" {
-		return
-	}
-	if sel.Sel.Name != "WithValue" {
-		return
-	}
-
-	// key is second argument to context.WithValue
-	if len(x.Args) != 3 {
-		return
-	}
-	key := f.pkg.typesInfo.Types[x.Args[1]]
-
-	if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid {
-		f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type))
-	}
-}
-
-// lintContextArgs examines function declarations that contain an
-// argument with a type of context.Context
-// It complains if that argument isn't the first parameter.
-func (f *file) lintContextArgs() {
-	f.walk(func(n ast.Node) bool {
-		fn, ok := n.(*ast.FuncDecl)
-		if !ok || len(fn.Type.Params.List) <= 1 {
-			return true
-		}
-		// A context.Context should be the first parameter of a function.
-		// Flag any that show up after the first.
-		for _, arg := range fn.Type.Params.List[1:] {
-			if isPkgDot(arg.Type, "context", "Context") {
-				f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function")
-				break // only flag one
-			}
-		}
-		return true
-	})
-}
-
-// containsComments returns whether the interval [start, end) contains any
-// comments without "// MATCH " prefix.
-func (f *file) containsComments(start, end token.Pos) bool {
-	for _, cgroup := range f.f.Comments {
-		comments := cgroup.List
-		if comments[0].Slash >= end {
-			// All comments starting with this group are after end pos.
-			return false
-		}
-		if comments[len(comments)-1].Slash < start {
-			// Comments group ends before start pos.
-			continue
-		}
-		for _, c := range comments {
-			if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// receiverType returns the named type of the method receiver, sans "*",
-// or "invalid-type" if fn.Recv is ill formed.
-func receiverType(fn *ast.FuncDecl) string {
-	switch e := fn.Recv.List[0].Type.(type) {
-	case *ast.Ident:
-		return e.Name
-	case *ast.StarExpr:
-		if id, ok := e.X.(*ast.Ident); ok {
-			return id.Name
-		}
-	}
-	// The parser accepts much more than just the legal forms.
-	return "invalid-type"
-}
-
-func (f *file) walk(fn func(ast.Node) bool) {
-	ast.Walk(walker(fn), f.f)
-}
-
-func (f *file) render(x interface{}) string {
-	var buf bytes.Buffer
-	if err := printer.Fprint(&buf, f.fset, x); err != nil {
-		panic(err)
-	}
-	return buf.String()
-}
-
-func (f *file) debugRender(x interface{}) string {
-	var buf bytes.Buffer
-	if err := ast.Fprint(&buf, f.fset, x, nil); err != nil {
-		panic(err)
-	}
-	return buf.String()
-}
-
-// walker adapts a function to satisfy the ast.Visitor interface.
-// The function return whether the walk should proceed into the node's children.
-type walker func(ast.Node) bool
-
-func (w walker) Visit(node ast.Node) ast.Visitor {
-	if w(node) {
-		return w
-	}
-	return nil
-}
-
-func isIdent(expr ast.Expr, ident string) bool {
-	id, ok := expr.(*ast.Ident)
-	return ok && id.Name == ident
-}
-
-// isBlank returns whether id is the blank identifier "_".
-// If id == nil, the answer is false.
-func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" }
-
-func isPkgDot(expr ast.Expr, pkg, name string) bool {
-	sel, ok := expr.(*ast.SelectorExpr)
-	return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
-}
-
-func isOne(expr ast.Expr) bool {
-	lit, ok := expr.(*ast.BasicLit)
-	return ok && lit.Kind == token.INT && lit.Value == "1"
-}
-
-func isCgoExported(f *ast.FuncDecl) bool {
-	if f.Recv != nil || f.Doc == nil {
-		return false
-	}
-
-	cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name)))
-	for _, c := range f.Doc.List {
-		if cgoExport.MatchString(c.Text) {
-			return true
-		}
-	}
-	return false
-}
-
-var basicTypeKinds = map[types.BasicKind]string{
-	types.UntypedBool:    "bool",
-	types.UntypedInt:     "int",
-	types.UntypedRune:    "rune",
-	types.UntypedFloat:   "float64",
-	types.UntypedComplex: "complex128",
-	types.UntypedString:  "string",
-}
-
-// isUntypedConst reports whether expr is an untyped constant,
-// and indicates what its default type is.
-// scope may be nil.
-func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) {
-	// Re-evaluate expr outside of its context to see if it's untyped.
-	// (An expr evaluated within, for example, an assignment context will get the type of the LHS.)
-	exprStr := f.render(expr)
-	tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr)
-	if err != nil {
-		return "", false
-	}
-	if b, ok := tv.Type.(*types.Basic); ok {
-		if dt, ok := basicTypeKinds[b.Kind()]; ok {
-			return dt, true
-		}
-	}
-
-	return "", false
-}
-
-// firstLineOf renders the given node and returns its first line.
-// It will also match the indentation of another node.
-func (f *file) firstLineOf(node, match ast.Node) string {
-	line := f.render(node)
-	if i := strings.Index(line, "\n"); i >= 0 {
-		line = line[:i]
-	}
-	return f.indentOf(match) + line
-}
-
-func (f *file) indentOf(node ast.Node) string {
-	line := srcLine(f.src, f.fset.Position(node.Pos()))
-	for i, r := range line {
-		switch r {
-		case ' ', '\t':
-		default:
-			return line[:i]
-		}
-	}
-	return line // unusual or empty line
-}
-
-func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) {
-	line := srcLine(f.src, f.fset.Position(node.Pos()))
-	line = strings.TrimSuffix(line, "\n")
-	rx := regexp.MustCompile(pattern)
-	return rx.FindStringSubmatch(line)
-}
-
-// imports returns true if the current file imports the specified package path.
-func (f *file) imports(importPath string) bool {
-	all := astutil.Imports(f.fset, f.f)
-	for _, p := range all {
-		for _, i := range p {
-			uq, err := strconv.Unquote(i.Path.Value)
-			if err == nil && importPath == uq {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// srcLine returns the complete line at p, including the terminating newline.
-func srcLine(src []byte, p token.Position) string {
-	// Run to end of line in both directions if not at line start/end.
-	lo, hi := p.Offset, p.Offset+1
-	for lo > 0 && src[lo-1] != '\n' {
-		lo--
-	}
-	for hi < len(src) && src[hi-1] != '\n' {
-		hi++
-	}
-	return string(src[lo:hi])
-}

+ 78 - 0
vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go

@@ -0,0 +1,78 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+	"os"
+	"regexp"
+	"strings"
+	"sync"
+)
+
+// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+	str  string
+	once sync.Once
+	rx   *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+	r.once.Do(r.build)
+	return r.rx
+}
+
+func (r *Regexp) build() {
+	r.rx = regexp.MustCompile(r.str)
+	r.str = ""
+}
+
+func (r *Regexp) FindSubmatch(s []byte) [][]byte {
+	return r.re().FindSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatch(s string) []string {
+	return r.re().FindStringSubmatch(s)
+}
+
+func (r *Regexp) FindStringSubmatchIndex(s string) []int {
+	return r.re().FindStringSubmatchIndex(s)
+}
+
+func (r *Regexp) ReplaceAllString(src, repl string) string {
+	return r.re().ReplaceAllString(src, repl)
+}
+
+func (r *Regexp) FindString(s string) string {
+	return r.re().FindString(s)
+}
+
+func (r *Regexp) FindAllString(s string, n int) []string {
+	return r.re().FindAllString(s, n)
+}
+
+func (r *Regexp) MatchString(s string) bool {
+	return r.re().MatchString(s)
+}
+
+func (r *Regexp) SubexpNames() []string {
+	return r.re().SubexpNames()
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+	lr := &Regexp{str: str}
+	if inTest {
+		// In tests, always compile the regexps early.
+		lr.re()
+	}
+	return lr
+}

+ 41 - 22
vendor/golang.org/x/mod/module/module.go

@@ -192,6 +192,21 @@ func (e *InvalidVersionError) Error() string {
 
 
 func (e *InvalidVersionError) Unwrap() error { return e.Err }
 func (e *InvalidVersionError) Unwrap() error { return e.Err }
 
 
+// An InvalidPathError indicates a module, import, or file path doesn't
+// satisfy all naming constraints. See CheckPath, CheckImportPath,
+// and CheckFilePath for specific restrictions.
+type InvalidPathError struct {
+	Kind string // "module", "import", or "file"
+	Path string
+	Err  error
+}
+
+func (e *InvalidPathError) Error() string {
+	return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err)
+}
+
+func (e *InvalidPathError) Unwrap() error { return e.Err }
+
 // Check checks that a given module path, version pair is valid.
 // Check checks that a given module path, version pair is valid.
 // In addition to the path being a valid module path
 // In addition to the path being a valid module path
 // and the version being a valid semantic version,
 // and the version being a valid semantic version,
@@ -271,12 +286,7 @@ func fileNameOK(r rune) bool {
 		if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
 		if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
 			return true
 			return true
 		}
 		}
-		for i := 0; i < len(allowed); i++ {
-			if rune(allowed[i]) == r {
-				return true
-			}
-		}
-		return false
+		return strings.ContainsRune(allowed, r)
 	}
 	}
 	// It may be OK to add more ASCII punctuation here, but only carefully.
 	// It may be OK to add more ASCII punctuation here, but only carefully.
 	// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
 	// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
@@ -296,30 +306,36 @@ func fileNameOK(r rune) bool {
 // this second requirement is replaced by a requirement that the path
 // this second requirement is replaced by a requirement that the path
 // follow the gopkg.in server's conventions.
 // follow the gopkg.in server's conventions.
 // Third, no path element may begin with a dot.
 // Third, no path element may begin with a dot.
-func CheckPath(path string) error {
+func CheckPath(path string) (err error) {
+	defer func() {
+		if err != nil {
+			err = &InvalidPathError{Kind: "module", Path: path, Err: err}
+		}
+	}()
+
 	if err := checkPath(path, modulePath); err != nil {
 	if err := checkPath(path, modulePath); err != nil {
-		return fmt.Errorf("malformed module path %q: %v", path, err)
+		return err
 	}
 	}
 	i := strings.Index(path, "/")
 	i := strings.Index(path, "/")
 	if i < 0 {
 	if i < 0 {
 		i = len(path)
 		i = len(path)
 	}
 	}
 	if i == 0 {
 	if i == 0 {
-		return fmt.Errorf("malformed module path %q: leading slash", path)
+		return fmt.Errorf("leading slash")
 	}
 	}
 	if !strings.Contains(path[:i], ".") {
 	if !strings.Contains(path[:i], ".") {
-		return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
+		return fmt.Errorf("missing dot in first path element")
 	}
 	}
 	if path[0] == '-' {
 	if path[0] == '-' {
-		return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
+		return fmt.Errorf("leading dash in first path element")
 	}
 	}
 	for _, r := range path[:i] {
 	for _, r := range path[:i] {
 		if !firstPathOK(r) {
 		if !firstPathOK(r) {
-			return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
+			return fmt.Errorf("invalid char %q in first path element", r)
 		}
 		}
 	}
 	}
 	if _, _, ok := SplitPathVersion(path); !ok {
 	if _, _, ok := SplitPathVersion(path); !ok {
-		return fmt.Errorf("malformed module path %q: invalid version", path)
+		return fmt.Errorf("invalid version")
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -343,7 +359,7 @@ func CheckPath(path string) error {
 // subtleties of Unicode.
 // subtleties of Unicode.
 func CheckImportPath(path string) error {
 func CheckImportPath(path string) error {
 	if err := checkPath(path, importPath); err != nil {
 	if err := checkPath(path, importPath); err != nil {
-		return fmt.Errorf("malformed import path %q: %v", path, err)
+		return &InvalidPathError{Kind: "import", Path: path, Err: err}
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -358,12 +374,13 @@ const (
 	filePath
 	filePath
 )
 )
 
 
-// checkPath checks that a general path is valid.
-// It returns an error describing why but not mentioning path.
-// Because these checks apply to both module paths and import paths,
-// the caller is expected to add the "malformed ___ path %q: " prefix.
-// fileName indicates whether the final element of the path is a file name
-// (as opposed to a directory name).
+// checkPath checks that a general path is valid. kind indicates what
+// specific constraints should be applied.
+//
+// checkPath returns an error describing why the path is not valid.
+// Because these checks apply to module, import, and file paths,
+// and because other checks may be applied, the caller is expected to wrap
+// this error with InvalidPathError.
 func checkPath(path string, kind pathKind) error {
 func checkPath(path string, kind pathKind) error {
 	if !utf8.ValidString(path) {
 	if !utf8.ValidString(path) {
 		return fmt.Errorf("invalid UTF-8")
 		return fmt.Errorf("invalid UTF-8")
@@ -371,7 +388,7 @@ func checkPath(path string, kind pathKind) error {
 	if path == "" {
 	if path == "" {
 		return fmt.Errorf("empty string")
 		return fmt.Errorf("empty string")
 	}
 	}
-	if path[0] == '-' {
+	if path[0] == '-' && kind != filePath {
 		return fmt.Errorf("leading dash")
 		return fmt.Errorf("leading dash")
 	}
 	}
 	if strings.Contains(path, "//") {
 	if strings.Contains(path, "//") {
@@ -477,7 +494,7 @@ func checkElem(elem string, kind pathKind) error {
 // subtleties of Unicode.
 // subtleties of Unicode.
 func CheckFilePath(path string) error {
 func CheckFilePath(path string) error {
 	if err := checkPath(path, filePath); err != nil {
 	if err := checkPath(path, filePath); err != nil {
-		return fmt.Errorf("malformed file path %q: %v", path, err)
+		return &InvalidPathError{Kind: "file", Path: path, Err: err}
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -781,6 +798,7 @@ func unescapeString(escaped string) (string, bool) {
 // GOPRIVATE environment variable, as described by 'go help module-private'.
 // GOPRIVATE environment variable, as described by 'go help module-private'.
 //
 //
 // It ignores any empty or malformed patterns in the list.
 // It ignores any empty or malformed patterns in the list.
+// Trailing slashes on patterns are ignored.
 func MatchPrefixPatterns(globs, target string) bool {
 func MatchPrefixPatterns(globs, target string) bool {
 	for globs != "" {
 	for globs != "" {
 		// Extract next non-empty glob in comma-separated list.
 		// Extract next non-empty glob in comma-separated list.
@@ -790,6 +808,7 @@ func MatchPrefixPatterns(globs, target string) bool {
 		} else {
 		} else {
 			glob, globs = globs, ""
 			glob, globs = globs, ""
 		}
 		}
+		glob = strings.TrimSuffix(glob, "/")
 		if glob == "" {
 		if glob == "" {
 			continue
 			continue
 		}
 		}

+ 250 - 0
vendor/golang.org/x/mod/module/pseudo.go

@@ -0,0 +1,250 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Pseudo-versions
+//
+// Code authors are expected to tag the revisions they want users to use,
+// including prereleases. However, not all authors tag versions at all,
+// and not all commits a user might want to try will have tags.
+// A pseudo-version is a version with a special form that allows us to
+// address an untagged commit and order that version with respect to
+// other versions we might encounter.
+//
+// A pseudo-version takes one of the general forms:
+//
+//	(1) vX.0.0-yyyymmddhhmmss-abcdef123456
+//	(2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456
+//	(3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible
+//	(4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456
+//	(5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible
+//
+// If there is no recently tagged version with the right major version vX,
+// then form (1) is used, creating a space of pseudo-versions at the bottom
+// of the vX version range, less than any tagged version, including the unlikely v0.0.0.
+//
+// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible,
+// then the pseudo-version uses form (2) or (3), making it a prerelease for the next
+// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string
+// ensures that the pseudo-version compares less than possible future explicit prereleases
+// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1.
+//
+// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible,
+// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease.
+
+package module
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/internal/lazyregexp"
+	"golang.org/x/mod/semver"
+)
+
+var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`)
+
+const PseudoVersionTimestampFormat = "20060102150405"
+
+// PseudoVersion returns a pseudo-version for the given major version ("v1")
+// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time,
+// and revision identifier (usually a 12-byte commit hash prefix).
+func PseudoVersion(major, older string, t time.Time, rev string) string {
+	if major == "" {
+		major = "v0"
+	}
+	segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev)
+	build := semver.Build(older)
+	older = semver.Canonical(older)
+	if older == "" {
+		return major + ".0.0-" + segment // form (1)
+	}
+	if semver.Prerelease(older) != "" {
+		return older + ".0." + segment + build // form (4), (5)
+	}
+
+	// Form (2), (3).
+	// Extract patch from vMAJOR.MINOR.PATCH
+	i := strings.LastIndex(older, ".") + 1
+	v, patch := older[:i], older[i:]
+
+	// Reassemble.
+	return v + incDecimal(patch) + "-0." + segment + build
+}
+
+// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and
+// revision, which may be used as a placeholder.
+func ZeroPseudoVersion(major string) string {
+	return PseudoVersion(major, "", time.Time{}, "000000000000")
+}
+
+// incDecimal returns the decimal string incremented by 1.
+func incDecimal(decimal string) string {
+	// Scan right to left turning 9s to 0s until you find a digit to increment.
+	digits := []byte(decimal)
+	i := len(digits) - 1
+	for ; i >= 0 && digits[i] == '9'; i-- {
+		digits[i] = '0'
+	}
+	if i >= 0 {
+		digits[i]++
+	} else {
+		// digits is all zeros
+		digits[0] = '1'
+		digits = append(digits, '0')
+	}
+	return string(digits)
+}
+
+// decDecimal returns the decimal string decremented by 1, or the empty string
+// if the decimal is all zeroes.
+func decDecimal(decimal string) string {
+	// Scan right to left turning 0s to 9s until you find a digit to decrement.
+	digits := []byte(decimal)
+	i := len(digits) - 1
+	for ; i >= 0 && digits[i] == '0'; i-- {
+		digits[i] = '9'
+	}
+	if i < 0 {
+		// decimal is all zeros
+		return ""
+	}
+	if i == 0 && digits[i] == '1' && len(digits) > 1 {
+		digits = digits[1:]
+	} else {
+		digits[i]--
+	}
+	return string(digits)
+}
+
+// IsPseudoVersion reports whether v is a pseudo-version.
+func IsPseudoVersion(v string) bool {
+	return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v)
+}
+
+// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base,
+// timestamp, and revision, as returned by ZeroPseudoVersion.
+func IsZeroPseudoVersion(v string) bool {
+	return v == ZeroPseudoVersion(semver.Major(v))
+}
+
+// PseudoVersionTime returns the time stamp of the pseudo-version v.
+// It returns an error if v is not a pseudo-version or if the time stamp
+// embedded in the pseudo-version is not a valid time.
+func PseudoVersionTime(v string) (time.Time, error) {
+	_, timestamp, _, _, err := parsePseudoVersion(v)
+	if err != nil {
+		return time.Time{}, err
+	}
+	t, err := time.Parse("20060102150405", timestamp)
+	if err != nil {
+		return time.Time{}, &InvalidVersionError{
+			Version: v,
+			Pseudo:  true,
+			Err:     fmt.Errorf("malformed time %q", timestamp),
+		}
+	}
+	return t, nil
+}
+
+// PseudoVersionRev returns the revision identifier of the pseudo-version v.
+// It returns an error if v is not a pseudo-version.
+func PseudoVersionRev(v string) (rev string, err error) {
+	_, _, rev, _, err = parsePseudoVersion(v)
+	return
+}
+
+// PseudoVersionBase returns the canonical parent version, if any, upon which
+// the pseudo-version v is based.
+//
+// If v has no parent version (that is, if it is "vX.0.0-[…]"),
+// PseudoVersionBase returns the empty string and a nil error.
+func PseudoVersionBase(v string) (string, error) {
+	base, _, _, build, err := parsePseudoVersion(v)
+	if err != nil {
+		return "", err
+	}
+
+	switch pre := semver.Prerelease(base); pre {
+	case "":
+		// vX.0.0-yyyymmddhhmmss-abcdef123456 → ""
+		if build != "" {
+			// Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible
+			// are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag,
+			// but the "+incompatible" suffix implies that the major version of
+			// the parent tag is not compatible with the module's import path.
+			//
+			// There are a few such entries in the index generated by proxy.golang.org,
+			// but we believe those entries were generated by the proxy itself.
+			return "", &InvalidVersionError{
+				Version: v,
+				Pseudo:  true,
+				Err:     fmt.Errorf("lacks base version, but has build metadata %q", build),
+			}
+		}
+		return "", nil
+
+	case "-0":
+		// vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z
+		// vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible
+		base = strings.TrimSuffix(base, pre)
+		i := strings.LastIndexByte(base, '.')
+		if i < 0 {
+			panic("base from parsePseudoVersion missing patch number: " + base)
+		}
+		patch := decDecimal(base[i+1:])
+		if patch == "" {
+			// vX.0.0-0 is invalid, but has been observed in the wild in the index
+			// generated by requests to proxy.golang.org.
+			//
+			// NOTE(bcmills): I cannot find a historical bug that accounts for
+			// pseudo-versions of this form, nor have I seen such versions in any
+			// actual go.mod files. If we find actual examples of this form and a
+			// reasonable theory of how they came into existence, it seems fine to
+			// treat them as equivalent to vX.0.0 (especially since the invalid
+			// pseudo-versions have lower precedence than the real ones). For now, we
+			// reject them.
+			return "", &InvalidVersionError{
+				Version: v,
+				Pseudo:  true,
+				Err:     fmt.Errorf("version before %s would have negative patch number", base),
+			}
+		}
+		return base[:i+1] + patch + build, nil
+
+	default:
+		// vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre
+		// vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible
+		if !strings.HasSuffix(base, ".0") {
+			panic(`base from parsePseudoVersion missing ".0" before date: ` + base)
+		}
+		return strings.TrimSuffix(base, ".0") + build, nil
+	}
+}
+
+var errPseudoSyntax = errors.New("syntax error")
+
+func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) {
+	if !IsPseudoVersion(v) {
+		return "", "", "", "", &InvalidVersionError{
+			Version: v,
+			Pseudo:  true,
+			Err:     errPseudoSyntax,
+		}
+	}
+	build = semver.Build(v)
+	v = strings.TrimSuffix(v, build)
+	j := strings.LastIndex(v, "-")
+	v, rev = v[:j], v[j+1:]
+	i := strings.LastIndex(v, "-")
+	if j := strings.LastIndex(v, "."); j > i {
+		base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0"
+		timestamp = v[j+1:]
+	} else {
+		base = v[:i] // "vX.0.0"
+		timestamp = v[i+1:]
+	}
+	return base, timestamp, rev, build, nil
+}

+ 20 - 10
vendor/golang.org/x/mod/semver/semver.go

@@ -22,6 +22,8 @@
 // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
 // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
 package semver
 package semver
 
 
+import "sort"
+
 // parsed returns the parsed form of a semantic version string.
 // parsed returns the parsed form of a semantic version string.
 type parsed struct {
 type parsed struct {
 	major      string
 	major      string
@@ -30,7 +32,6 @@ type parsed struct {
 	short      string
 	short      string
 	prerelease string
 	prerelease string
 	build      string
 	build      string
-	err        string
 }
 }
 
 
 // IsValid reports whether v is a valid semantic version string.
 // IsValid reports whether v is a valid semantic version string.
@@ -150,14 +151,30 @@ func Max(v, w string) string {
 	return w
 	return w
 }
 }
 
 
+// ByVersion implements sort.Interface for sorting semantic version strings.
+type ByVersion []string
+
+func (vs ByVersion) Len() int      { return len(vs) }
+func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
+func (vs ByVersion) Less(i, j int) bool {
+	cmp := Compare(vs[i], vs[j])
+	if cmp != 0 {
+		return cmp < 0
+	}
+	return vs[i] < vs[j]
+}
+
+// Sort sorts a list of semantic version strings using ByVersion.
+func Sort(list []string) {
+	sort.Sort(ByVersion(list))
+}
+
 func parse(v string) (p parsed, ok bool) {
 func parse(v string) (p parsed, ok bool) {
 	if v == "" || v[0] != 'v' {
 	if v == "" || v[0] != 'v' {
-		p.err = "missing v prefix"
 		return
 		return
 	}
 	}
 	p.major, v, ok = parseInt(v[1:])
 	p.major, v, ok = parseInt(v[1:])
 	if !ok {
 	if !ok {
-		p.err = "bad major version"
 		return
 		return
 	}
 	}
 	if v == "" {
 	if v == "" {
@@ -167,13 +184,11 @@ func parse(v string) (p parsed, ok bool) {
 		return
 		return
 	}
 	}
 	if v[0] != '.' {
 	if v[0] != '.' {
-		p.err = "bad minor prefix"
 		ok = false
 		ok = false
 		return
 		return
 	}
 	}
 	p.minor, v, ok = parseInt(v[1:])
 	p.minor, v, ok = parseInt(v[1:])
 	if !ok {
 	if !ok {
-		p.err = "bad minor version"
 		return
 		return
 	}
 	}
 	if v == "" {
 	if v == "" {
@@ -182,31 +197,26 @@ func parse(v string) (p parsed, ok bool) {
 		return
 		return
 	}
 	}
 	if v[0] != '.' {
 	if v[0] != '.' {
-		p.err = "bad patch prefix"
 		ok = false
 		ok = false
 		return
 		return
 	}
 	}
 	p.patch, v, ok = parseInt(v[1:])
 	p.patch, v, ok = parseInt(v[1:])
 	if !ok {
 	if !ok {
-		p.err = "bad patch version"
 		return
 		return
 	}
 	}
 	if len(v) > 0 && v[0] == '-' {
 	if len(v) > 0 && v[0] == '-' {
 		p.prerelease, v, ok = parsePrerelease(v)
 		p.prerelease, v, ok = parsePrerelease(v)
 		if !ok {
 		if !ok {
-			p.err = "bad prerelease"
 			return
 			return
 		}
 		}
 	}
 	}
 	if len(v) > 0 && v[0] == '+' {
 	if len(v) > 0 && v[0] == '+' {
 		p.build, v, ok = parseBuild(v)
 		p.build, v, ok = parseBuild(v)
 		if !ok {
 		if !ok {
-			p.err = "bad build"
 			return
 			return
 		}
 		}
 	}
 	}
 	if v != "" {
 	if v != "" {
-		p.err = "junk on end"
 		ok = false
 		ok = false
 		return
 		return
 	}
 	}

+ 242 - 0
vendor/golang.org/x/tools/go/analysis/analysis.go

@@ -0,0 +1,242 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+import (
+	"flag"
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"reflect"
+
+	"golang.org/x/tools/internal/analysisinternal"
+)
+
+// An Analyzer describes an analysis function and its options.
+type Analyzer struct {
+	// The Name of the analyzer must be a valid Go identifier
+	// as it may appear in command-line flags, URLs, and so on.
+	Name string
+
+	// Doc is the documentation for the analyzer.
+	// The part before the first "\n\n" is the title
+	// (no capital or period, max ~60 letters).
+	Doc string
+
+	// Flags defines any flags accepted by the analyzer.
+	// The manner in which these flags are exposed to the user
+	// depends on the driver which runs the analyzer.
+	Flags flag.FlagSet
+
+	// Run applies the analyzer to a package.
+	// It returns an error if the analyzer failed.
+	//
+	// On success, the Run function may return a result
+	// computed by the Analyzer; its type must match ResultType.
+	// The driver makes this result available as an input to
+	// another Analyzer that depends directly on this one (see
+	// Requires) when it analyzes the same package.
+	//
+	// To pass analysis results between packages (and thus
+	// potentially between address spaces), use Facts, which are
+	// serializable.
+	Run func(*Pass) (interface{}, error)
+
+	// RunDespiteErrors allows the driver to invoke
+	// the Run method of this analyzer even on a
+	// package that contains parse or type errors.
+	RunDespiteErrors bool
+
+	// Requires is a set of analyzers that must run successfully
+	// before this one on a given package. This analyzer may inspect
+	// the outputs produced by each analyzer in Requires.
+	// The graph over analyzers implied by Requires edges must be acyclic.
+	//
+	// Requires establishes a "horizontal" dependency between
+	// analysis passes (different analyzers, same package).
+	Requires []*Analyzer
+
+	// ResultType is the type of the optional result of the Run function.
+	ResultType reflect.Type
+
+	// FactTypes indicates that this analyzer imports and exports
+	// Facts of the specified concrete types.
+	// An analyzer that uses facts may assume that its import
+	// dependencies have been similarly analyzed before it runs.
+	// Facts must be pointers.
+	//
+	// FactTypes establishes a "vertical" dependency between
+	// analysis passes (same analyzer, different packages).
+	FactTypes []Fact
+}
+
+func (a *Analyzer) String() string { return a.Name }
+
+func init() {
+	// Set the analysisinternal functions to be able to pass type errors
+	// to the Pass type without modifying the go/analysis API.
+	analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
+		p.(*Pass).typeErrors = errors
+	}
+	analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
+		return p.(*Pass).typeErrors
+	}
+}
+
+// A Pass provides information to the Run function that
+// applies a specific analyzer to a single Go package.
+//
+// It forms the interface between the analysis logic and the driver
+// program, and has both input and an output components.
+//
+// As in a compiler, one pass may depend on the result computed by another.
+//
+// The Run function should not call any of the Pass functions concurrently.
+type Pass struct {
+	Analyzer *Analyzer // the identity of the current analyzer
+
+	// syntax and type information
+	Fset         *token.FileSet // file position information
+	Files        []*ast.File    // the abstract syntax tree of each file
+	OtherFiles   []string       // names of non-Go files of this package
+	IgnoredFiles []string       // names of ignored source files in this package
+	Pkg          *types.Package // type information about the package
+	TypesInfo    *types.Info    // type information about the syntax trees
+	TypesSizes   types.Sizes    // function for computing sizes of types
+
+	// Report reports a Diagnostic, a finding about a specific location
+	// in the analyzed source code such as a potential mistake.
+	// It may be called by the Run function.
+	Report func(Diagnostic)
+
+	// ResultOf provides the inputs to this analysis pass, which are
+	// the corresponding results of its prerequisite analyzers.
+	// The map keys are the elements of Analysis.Required,
+	// and the type of each corresponding value is the required
+	// analysis's ResultType.
+	ResultOf map[*Analyzer]interface{}
+
+	// -- facts --
+
+	// ImportObjectFact retrieves a fact associated with obj.
+	// Given a value ptr of type *T, where *T satisfies Fact,
+	// ImportObjectFact copies the value to *ptr.
+	//
+	// ImportObjectFact panics if called after the pass is complete.
+	// ImportObjectFact is not concurrency-safe.
+	ImportObjectFact func(obj types.Object, fact Fact) bool
+
+	// ImportPackageFact retrieves a fact associated with package pkg,
+	// which must be this package or one of its dependencies.
+	// See comments for ImportObjectFact.
+	ImportPackageFact func(pkg *types.Package, fact Fact) bool
+
+	// ExportObjectFact associates a fact of type *T with the obj,
+	// replacing any previous fact of that type.
+	//
+	// ExportObjectFact panics if it is called after the pass is
+	// complete, or if obj does not belong to the package being analyzed.
+	// ExportObjectFact is not concurrency-safe.
+	ExportObjectFact func(obj types.Object, fact Fact)
+
+	// ExportPackageFact associates a fact with the current package.
+	// See comments for ExportObjectFact.
+	ExportPackageFact func(fact Fact)
+
+	// AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
+	// in unspecified order.
+	// WARNING: This is an experimental API and may change in the future.
+	AllPackageFacts func() []PackageFact
+
+	// AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
+	// in unspecified order.
+	// WARNING: This is an experimental API and may change in the future.
+	AllObjectFacts func() []ObjectFact
+
+	// typeErrors contains types.Errors that are associated with the pkg.
+	typeErrors []types.Error
+
+	/* Further fields may be added in future. */
+	// For example, suggested or applied refactorings.
+}
+
+// PackageFact is a package together with an associated fact.
+// WARNING: This is an experimental API and may change in the future.
+type PackageFact struct {
+	Package *types.Package
+	Fact    Fact
+}
+
+// ObjectFact is an object together with an associated fact.
+// WARNING: This is an experimental API and may change in the future.
+type ObjectFact struct {
+	Object types.Object
+	Fact   Fact
+}
+
+// Reportf is a helper function that reports a Diagnostic using the
+// specified position and formatted error message.
+func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	pass.Report(Diagnostic{Pos: pos, Message: msg})
+}
+
+// The Range interface provides a range. It's equivalent to and satisfied by
+// ast.Node.
+type Range interface {
+	Pos() token.Pos // position of first character belonging to the node
+	End() token.Pos // position of first character immediately after the node
+}
+
+// ReportRangef is a helper function that reports a Diagnostic using the
+// range provided. ast.Node values can be passed in as the range because
+// they satisfy the Range interface.
+func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg})
+}
+
+func (pass *Pass) String() string {
+	return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path())
+}
+
+// A Fact is an intermediate fact produced during analysis.
+//
+// Each fact is associated with a named declaration (a types.Object) or
+// with a package as a whole. A single object or package may have
+// multiple associated facts, but only one of any particular fact type.
+//
+// A Fact represents a predicate such as "never returns", but does not
+// represent the subject of the predicate such as "function F" or "package P".
+//
+// Facts may be produced in one analysis pass and consumed by another
+// analysis pass even if these are in different address spaces.
+// If package P imports Q, all facts about Q produced during
+// analysis of that package will be available during later analysis of P.
+// Facts are analogous to type export data in a build system:
+// just as export data enables separate compilation of several passes,
+// facts enable "separate analysis".
+//
+// Each pass (a, p) starts with the set of facts produced by the
+// same analyzer a applied to the packages directly imported by p.
+// The analysis may add facts to the set, and they may be exported in turn.
+// An analysis's Run function may retrieve facts by calling
+// Pass.Import{Object,Package}Fact and update them using
+// Pass.Export{Object,Package}Fact.
+//
+// A fact is logically private to its Analysis. To pass values
+// between different analyzers, use the results mechanism;
+// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf.
+//
+// A Fact type must be a pointer.
+// Facts are encoded and decoded using encoding/gob.
+// A Fact may implement the GobEncoder/GobDecoder interfaces
+// to customize its encoding. Fact encoding should not fail.
+//
+// A Fact should not be modified once exported.
+type Fact interface {
+	AFact() // dummy method to avoid type errors
+}

+ 65 - 0
vendor/golang.org/x/tools/go/analysis/diagnostic.go

@@ -0,0 +1,65 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+import "go/token"
+
+// A Diagnostic is a message associated with a source location or range.
+//
+// An Analyzer may return a variety of diagnostics; the optional Category,
+// which should be a constant, may be used to classify them.
+// It is primarily intended to make it easy to look up documentation.
+//
+// If End is provided, the diagnostic is specified to apply to the range between
+// Pos and End.
+type Diagnostic struct {
+	Pos      token.Pos
+	End      token.Pos // optional
+	Category string    // optional
+	Message  string
+
+	// SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
+	// edits to a file that address the diagnostic.
+	// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
+	// Diagnostics should not contain SuggestedFixes that overlap.
+	// Experimental: This API is experimental and may change in the future.
+	SuggestedFixes []SuggestedFix // optional
+
+	// Experimental: This API is experimental and may change in the future.
+	Related []RelatedInformation // optional
+}
+
+// RelatedInformation contains information related to a diagnostic.
+// For example, a diagnostic that flags duplicated declarations of a
+// variable may include one RelatedInformation per existing
+// declaration.
+type RelatedInformation struct {
+	Pos     token.Pos
+	End     token.Pos
+	Message string
+}
+
+// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
+// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
+// by the diagnostic.
+// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
+// should not contain edits for other packages.
+// Experimental: This API is experimental and may change in the future.
+type SuggestedFix struct {
+	// A description for this suggested fix to be shown to a user deciding
+	// whether to accept it.
+	Message   string
+	TextEdits []TextEdit
+}
+
+// A TextEdit represents the replacement of the code between Pos and End with the new text.
+// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
+// Experimental: This API is experimental and may change in the future.
+type TextEdit struct {
+	// For a pure insertion, End can either be set to Pos or token.NoPos.
+	Pos     token.Pos
+	End     token.Pos
+	NewText []byte
+}

+ 321 - 0
vendor/golang.org/x/tools/go/analysis/doc.go

@@ -0,0 +1,321 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package analysis defines the interface between a modular static
+analysis and an analysis driver program.
+
+
+Background
+
+A static analysis is a function that inspects a package of Go code and
+reports a set of diagnostics (typically mistakes in the code), and
+perhaps produces other results as well, such as suggested refactorings
+or other facts. An analysis that reports mistakes is informally called a
+"checker". For example, the printf checker reports mistakes in
+fmt.Printf format strings.
+
+A "modular" analysis is one that inspects one package at a time but can
+save information from a lower-level package and use it when inspecting a
+higher-level package, analogous to separate compilation in a toolchain.
+The printf checker is modular: when it discovers that a function such as
+log.Fatalf delegates to fmt.Printf, it records this fact, and checks
+calls to that function too, including calls made from another package.
+
+By implementing a common interface, checkers from a variety of sources
+can be easily selected, incorporated, and reused in a wide range of
+driver programs including command-line tools (such as vet), text editors and
+IDEs, build and test systems (such as go build, Bazel, or Buck), test
+frameworks, code review tools, code-base indexers (such as SourceGraph),
+documentation viewers (such as godoc), batch pipelines for large code
+bases, and so on.
+
+
+Analyzer
+
+The primary type in the API is Analyzer. An Analyzer statically
+describes an analysis function: its name, documentation, flags,
+relationship to other analyzers, and of course, its logic.
+
+To define an analysis, a user declares a (logically constant) variable
+of type Analyzer. Here is a typical example from one of the analyzers in
+the go/analysis/passes/ subdirectory:
+
+	package unusedresult
+
+	var Analyzer = &analysis.Analyzer{
+		Name: "unusedresult",
+		Doc:  "check for unused results of calls to some functions",
+		Run:  run,
+		...
+	}
+
+	func run(pass *analysis.Pass) (interface{}, error) {
+		...
+	}
+
+An analysis driver is a program such as vet that runs a set of
+analyses and prints the diagnostics that they report.
+The driver program must import the list of Analyzers it needs.
+Typically each Analyzer resides in a separate package.
+To add a new Analyzer to an existing driver, add another item to the list:
+
+	import ( "unusedresult"; "nilness"; "printf" )
+
+	var analyses = []*analysis.Analyzer{
+		unusedresult.Analyzer,
+		nilness.Analyzer,
+		printf.Analyzer,
+	}
+
+A driver may use the name, flags, and documentation to provide on-line
+help that describes the analyses it performs.
+The doc comment contains a brief one-line summary,
+optionally followed by paragraphs of explanation.
+
+The Analyzer type has more fields besides those shown above:
+
+	type Analyzer struct {
+		Name             string
+		Doc              string
+		Flags            flag.FlagSet
+		Run              func(*Pass) (interface{}, error)
+		RunDespiteErrors bool
+		ResultType       reflect.Type
+		Requires         []*Analyzer
+		FactTypes        []Fact
+	}
+
+The Flags field declares a set of named (global) flag variables that
+control analysis behavior. Unlike vet, analysis flags are not declared
+directly in the command line FlagSet; it is up to the driver to set the
+flag variables. A driver for a single analysis, a, might expose its flag
+f directly on the command line as -f, whereas a driver for multiple
+analyses might prefix the flag name by the analysis name (-a.f) to avoid
+ambiguity. An IDE might expose the flags through a graphical interface,
+and a batch pipeline might configure them from a config file.
+See the "findcall" analyzer for an example of flags in action.
+
+The RunDespiteErrors flag indicates whether the analysis is equipped to
+handle ill-typed code. If not, the driver will skip the analysis if
+there were parse or type errors.
+The optional ResultType field specifies the type of the result value
+computed by this analysis and made available to other analyses.
+The Requires field specifies a list of analyses upon which
+this one depends and whose results it may access, and it constrains the
+order in which a driver may run analyses.
+The FactTypes field is discussed in the section on Modularity.
+The analysis package provides a Validate function to perform basic
+sanity checks on an Analyzer, such as that its Requires graph is
+acyclic, its fact and result types are unique, and so on.
+
+Finally, the Run field contains a function to be called by the driver to
+execute the analysis on a single package. The driver passes it an
+instance of the Pass type.
+
+
+Pass
+
+A Pass describes a single unit of work: the application of a particular
+Analyzer to a particular package of Go code.
+The Pass provides information to the Analyzer's Run function about the
+package being analyzed, and provides operations to the Run function for
+reporting diagnostics and other information back to the driver.
+
+	type Pass struct {
+		Fset         *token.FileSet
+		Files        []*ast.File
+		OtherFiles   []string
+		IgnoredFiles []string
+		Pkg          *types.Package
+		TypesInfo    *types.Info
+		ResultOf     map[*Analyzer]interface{}
+		Report       func(Diagnostic)
+		...
+	}
+
+The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees,
+type information, and source positions for a single package of Go code.
+
+The OtherFiles field provides the names, but not the contents, of non-Go
+files such as assembly that are part of this package. See the "asmdecl"
+or "buildtags" analyzers for examples of loading non-Go files and reporting
+diagnostics against them.
+
+The IgnoredFiles field provides the names, but not the contents,
+of ignored Go and non-Go source files that are not part of this package
+with the current build configuration but may be part of other build
+configurations. See the "buildtags" analyzer for an example of loading
+and checking IgnoredFiles.
+
+The ResultOf field provides the results computed by the analyzers
+required by this one, as expressed in its Analyzer.Requires field. The
+driver runs the required analyzers first and makes their results
+available in this map. Each Analyzer must return a value of the type
+described in its Analyzer.ResultType field.
+For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which
+provides a control-flow graph for each function in the package (see
+golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that
+enables other Analyzers to traverse the syntax trees of the package more
+efficiently; and the "buildssa" analyzer constructs an SSA-form
+intermediate representation.
+Each of these Analyzers extends the capabilities of later Analyzers
+without adding a dependency to the core API, so an analysis tool pays
+only for the extensions it needs.
+
+The Report function emits a diagnostic, a message associated with a
+source position. For most analyses, diagnostics are their primary
+result.
+For convenience, Pass provides a helper method, Reportf, to report a new
+diagnostic by formatting a string.
+Diagnostic is defined as:
+
+	type Diagnostic struct {
+		Pos      token.Pos
+		Category string // optional
+		Message  string
+	}
+
+The optional Category field is a short identifier that classifies the
+kind of message when an analysis produces several kinds of diagnostic.
+
+Many analyses want to associate diagnostics with a severity level.
+Because Diagnostic does not have a severity level field, an Analyzer's
+diagnostics effectively all have the same severity level. To separate which
+diagnostics are high severity and which are low severity, expose multiple
+Analyzers instead. Analyzers should also be separated when their
+diagnostics belong in different groups, or could be tagged differently
+before being shown to the end user. Analyzers should document their severity
+level to help downstream tools surface diagnostics properly.
+
+Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
+and buildtag, inspect the raw text of Go source files or even non-Go
+files such as assembly. To report a diagnostic against a line of a
+raw text file, use the following sequence:
+
+	content, err := ioutil.ReadFile(filename)
+	if err != nil { ... }
+	tf := fset.AddFile(filename, -1, len(content))
+	tf.SetLinesForContent(content)
+	...
+	pass.Reportf(tf.LineStart(line), "oops")
+
+
+Modular analysis with Facts
+
+To improve efficiency and scalability, large programs are routinely
+built using separate compilation: units of the program are compiled
+separately, and recompiled only when one of their dependencies changes;
+independent modules may be compiled in parallel. The same technique may
+be applied to static analyses, for the same benefits. Such analyses are
+described as "modular".
+
+A compiler’s type checker is an example of a modular static analysis.
+Many other checkers we would like to apply to Go programs can be
+understood as alternative or non-standard type systems. For example,
+vet's printf checker infers whether a function has the "printf wrapper"
+type, and it applies stricter checks to calls of such functions. In
+addition, it records which functions are printf wrappers for use by
+later analysis passes to identify other printf wrappers by induction.
+A result such as “f is a printf wrapper” that is not interesting by
+itself but serves as a stepping stone to an interesting result (such as
+a diagnostic) is called a "fact".
+
+The analysis API allows an analysis to define new types of facts, to
+associate facts of these types with objects (named entities) declared
+within the current package, or with the package as a whole, and to query
+for an existing fact of a given type associated with an object or
+package.
+
+An Analyzer that uses facts must declare their types:
+
+	var Analyzer = &analysis.Analyzer{
+		Name:      "printf",
+		FactTypes: []analysis.Fact{new(isWrapper)},
+		...
+	}
+
+	type isWrapper struct{} // => *types.Func f “is a printf wrapper”
+
+The driver program ensures that facts for a pass’s dependencies are
+generated before analyzing the package and is responsible for propagating
+facts from one package to another, possibly across address spaces.
+Consequently, Facts must be serializable. The API requires that drivers
+use the gob encoding, an efficient, robust, self-describing binary
+protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
+if the default encoding is unsuitable. Facts should be stateless.
+
+The Pass type has functions to import and export facts,
+associated either with an object or with a package:
+
+	type Pass struct {
+		...
+		ExportObjectFact func(types.Object, Fact)
+		ImportObjectFact func(types.Object, Fact) bool
+
+		ExportPackageFact func(fact Fact)
+		ImportPackageFact func(*types.Package, Fact) bool
+	}
+
+An Analyzer may only export facts associated with the current package or
+its objects, though it may import facts from any package or object that
+is an import dependency of the current package.
+
+Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by
+the pair (obj, TypeOf(fact)), and the ImportObjectFact function
+retrieves the entry from this map and copies its value into the variable
+pointed to by fact. This scheme assumes that the concrete type of fact
+is a pointer; this assumption is checked by the Validate function.
+See the "printf" analyzer for an example of object facts in action.
+
+Some driver implementations (such as those based on Bazel and Blaze) do
+not currently apply analyzers to packages of the standard library.
+Therefore, for best results, analyzer authors should not rely on
+analysis facts being available for standard packages.
+For example, although the printf checker is capable of deducing during
+analysis of the log package that log.Printf is a printf wrapper,
+this fact is built in to the analyzer so that it correctly checks
+calls to log.Printf even when run in a driver that does not apply
+it to standard packages. We would like to remove this limitation in future.
+
+
+Testing an Analyzer
+
+The analysistest subpackage provides utilities for testing an Analyzer.
+In a few lines of code, it is possible to run an analyzer on a package
+of testdata files and check that it reported all the expected
+diagnostics and facts (and no more). Expectations are expressed using
+"// want ..." comments in the input code.
+
+
+Standalone commands
+
+Analyzers are provided in the form of packages that a driver program is
+expected to import. The vet command imports a set of several analyzers,
+but users may wish to define their own analysis commands that perform
+additional checks. To simplify the task of creating an analysis command,
+either for a single analyzer or for a whole suite, we provide the
+singlechecker and multichecker subpackages.
+
+The singlechecker package provides the main function for a command that
+runs one analyzer. By convention, each analyzer such as
+go/passes/findcall should be accompanied by a singlechecker-based
+command such as go/analysis/passes/findcall/cmd/findcall, defined in its
+entirety as:
+
+	package main
+
+	import (
+		"golang.org/x/tools/go/analysis/passes/findcall"
+		"golang.org/x/tools/go/analysis/singlechecker"
+	)
+
+	func main() { singlechecker.Main(findcall.Analyzer) }
+
+A tool that provides multiple analyzers can use multichecker in a
+similar way, giving it the list of Analyzers.
+
+*/
+package analysis

+ 49 - 0
vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go

@@ -0,0 +1,49 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package inspect defines an Analyzer that provides an AST inspector
+// (golang.org/x/tools/go/ast/inspector.Inspector) for the syntax trees
+// of a package. It is only a building block for other analyzers.
+//
+// Example of use in another analysis:
+//
+//	import (
+//		"golang.org/x/tools/go/analysis"
+//		"golang.org/x/tools/go/analysis/passes/inspect"
+//		"golang.org/x/tools/go/ast/inspector"
+//	)
+//
+//	var Analyzer = &analysis.Analyzer{
+//		...
+//		Requires:       []*analysis.Analyzer{inspect.Analyzer},
+//	}
+//
+// 	func run(pass *analysis.Pass) (interface{}, error) {
+// 		inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+// 		inspect.Preorder(nil, func(n ast.Node) {
+// 			...
+// 		})
+// 		return nil
+// 	}
+//
+package inspect
+
+import (
+	"reflect"
+
+	"golang.org/x/tools/go/analysis"
+	"golang.org/x/tools/go/ast/inspector"
+)
+
+var Analyzer = &analysis.Analyzer{
+	Name:             "inspect",
+	Doc:              "optimize AST traversal for later passes",
+	Run:              run,
+	RunDespiteErrors: true,
+	ResultType:       reflect.TypeOf(new(inspector.Inspector)),
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+	return inspector.New(pass.Files), nil
+}

+ 130 - 0
vendor/golang.org/x/tools/go/analysis/validate.go

@@ -0,0 +1,130 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode"
+)
+
+// Validate reports an error if any of the analyzers are misconfigured.
+// Checks include:
+// that the name is a valid identifier;
+// that the Requires graph is acyclic;
+// that analyzer fact types are unique;
+// that each fact type is a pointer.
+func Validate(analyzers []*Analyzer) error {
+	// Map each fact type to its sole generating analyzer.
+	factTypes := make(map[reflect.Type]*Analyzer)
+
+	// Traverse the Requires graph, depth first.
+	const (
+		white = iota
+		grey
+		black
+		finished
+	)
+	color := make(map[*Analyzer]uint8)
+	var visit func(a *Analyzer) error
+	visit = func(a *Analyzer) error {
+		if a == nil {
+			return fmt.Errorf("nil *Analyzer")
+		}
+		if color[a] == white {
+			color[a] = grey
+
+			// names
+			if !validIdent(a.Name) {
+				return fmt.Errorf("invalid analyzer name %q", a)
+			}
+
+			if a.Doc == "" {
+				return fmt.Errorf("analyzer %q is undocumented", a)
+			}
+
+			// fact types
+			for _, f := range a.FactTypes {
+				if f == nil {
+					return fmt.Errorf("analyzer %s has nil FactType", a)
+				}
+				t := reflect.TypeOf(f)
+				if prev := factTypes[t]; prev != nil {
+					return fmt.Errorf("fact type %s registered by two analyzers: %v, %v",
+						t, a, prev)
+				}
+				if t.Kind() != reflect.Ptr {
+					return fmt.Errorf("%s: fact type %s is not a pointer", a, t)
+				}
+				factTypes[t] = a
+			}
+
+			// recursion
+			for _, req := range a.Requires {
+				if err := visit(req); err != nil {
+					return err
+				}
+			}
+			color[a] = black
+		}
+
+		if color[a] == grey {
+			stack := []*Analyzer{a}
+			inCycle := map[string]bool{}
+			for len(stack) > 0 {
+				current := stack[len(stack)-1]
+				stack = stack[:len(stack)-1]
+				if color[current] == grey && !inCycle[current.Name] {
+					inCycle[current.Name] = true
+					stack = append(stack, current.Requires...)
+				}
+			}
+			return &CycleInRequiresGraphError{AnalyzerNames: inCycle}
+		}
+
+		return nil
+	}
+	for _, a := range analyzers {
+		if err := visit(a); err != nil {
+			return err
+		}
+	}
+
+	// Reject duplicates among analyzers.
+	// Precondition:  color[a] == black.
+	// Postcondition: color[a] == finished.
+	for _, a := range analyzers {
+		if color[a] == finished {
+			return fmt.Errorf("duplicate analyzer: %s", a.Name)
+		}
+		color[a] = finished
+	}
+
+	return nil
+}
+
+func validIdent(name string) bool {
+	for i, r := range name {
+		if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) {
+			return false
+		}
+	}
+	return name != ""
+}
+
+type CycleInRequiresGraphError struct {
+	AnalyzerNames map[string]bool
+}
+
+func (e *CycleInRequiresGraphError) Error() string {
+	var b strings.Builder
+	b.WriteString("cycle detected involving the following analyzers:")
+	for n := range e.AnalyzerNames {
+		b.WriteByte(' ')
+		b.WriteString(n)
+	}
+	return b.String()
+}

+ 16 - 4
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go

@@ -11,6 +11,8 @@ import (
 	"go/ast"
 	"go/ast"
 	"go/token"
 	"go/token"
 	"sort"
 	"sort"
+
+	"golang.org/x/tools/internal/typeparams"
 )
 )
 
 
 // PathEnclosingInterval returns the node that encloses the source
 // PathEnclosingInterval returns the node that encloses the source
@@ -294,8 +296,8 @@ func childrenOf(n ast.Node) []ast.Node {
 
 
 	case *ast.FieldList:
 	case *ast.FieldList:
 		children = append(children,
 		children = append(children,
-			tok(n.Opening, len("(")),
-			tok(n.Closing, len(")")))
+			tok(n.Opening, len("(")), // or len("[")
+			tok(n.Closing, len(")"))) // or len("]")
 
 
 	case *ast.File:
 	case *ast.File:
 		// TODO test: Doc
 		// TODO test: Doc
@@ -322,6 +324,9 @@ func childrenOf(n ast.Node) []ast.Node {
 			children = append(children, n.Recv)
 			children = append(children, n.Recv)
 		}
 		}
 		children = append(children, n.Name)
 		children = append(children, n.Name)
+		if tparams := typeparams.ForFuncType(n.Type); tparams != nil {
+			children = append(children, tparams)
+		}
 		if n.Type.Params != nil {
 		if n.Type.Params != nil {
 			children = append(children, n.Type.Params)
 			children = append(children, n.Type.Params)
 		}
 		}
@@ -371,8 +376,13 @@ func childrenOf(n ast.Node) []ast.Node {
 
 
 	case *ast.IndexExpr:
 	case *ast.IndexExpr:
 		children = append(children,
 		children = append(children,
-			tok(n.Lbrack, len("{")),
-			tok(n.Rbrack, len("}")))
+			tok(n.Lbrack, len("[")),
+			tok(n.Rbrack, len("]")))
+
+	case *typeparams.IndexListExpr:
+		children = append(children,
+			tok(n.Lbrack, len("[")),
+			tok(n.Rbrack, len("]")))
 
 
 	case *ast.InterfaceType:
 	case *ast.InterfaceType:
 		children = append(children,
 		children = append(children,
@@ -581,6 +591,8 @@ func NodeDescription(n ast.Node) string {
 		return "decrement statement"
 		return "decrement statement"
 	case *ast.IndexExpr:
 	case *ast.IndexExpr:
 		return "index expression"
 		return "index expression"
+	case *typeparams.IndexListExpr:
+		return "index list expression"
 	case *ast.InterfaceType:
 	case *ast.InterfaceType:
 		return "interface type"
 		return "interface type"
 	case *ast.KeyValueExpr:
 	case *ast.KeyValueExpr:

+ 5 - 7
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go

@@ -253,6 +253,10 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
 		a.apply(n, "X", nil, n.X)
 		a.apply(n, "X", nil, n.X)
 		a.apply(n, "Index", nil, n.Index)
 		a.apply(n, "Index", nil, n.Index)
 
 
+	case *typeparams.IndexListExpr:
+		a.apply(n, "X", nil, n.X)
+		a.applyList(n, "Indices")
+
 	case *ast.SliceExpr:
 	case *ast.SliceExpr:
 		a.apply(n, "X", nil, n.X)
 		a.apply(n, "X", nil, n.X)
 		a.apply(n, "Low", nil, n.Low)
 		a.apply(n, "Low", nil, n.Low)
@@ -439,13 +443,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
 		}
 		}
 
 
 	default:
 	default:
-		if ix := typeparams.GetIndexExprData(n); ix != nil {
-			a.apply(n, "X", nil, ix.X)
-			// *ast.IndexExpr was handled above, so n must be an *ast.MultiIndexExpr.
-			a.applyList(n, "Indices")
-		} else {
-			panic(fmt.Sprintf("Apply: unexpected node type %T", n))
-		}
+		panic(fmt.Sprintf("Apply: unexpected node type %T", n))
 	}
 	}
 
 
 	if a.post != nil && !a.post(&a.cursor) {
 	if a.post != nil && !a.post(&a.cursor) {

+ 186 - 0
vendor/golang.org/x/tools/go/ast/inspector/inspector.go

@@ -0,0 +1,186 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package inspector provides helper functions for traversal over the
+// syntax trees of a package, including node filtering by type, and
+// materialization of the traversal stack.
+//
+// During construction, the inspector does a complete traversal and
+// builds a list of push/pop events and their node type. Subsequent
+// method calls that request a traversal scan this list, rather than walk
+// the AST, and perform type filtering using efficient bit sets.
+//
+// Experiments suggest the inspector's traversals are about 2.5x faster
+// than ast.Inspect, but it may take around 5 traversals for this
+// benefit to amortize the inspector's construction cost.
+// If efficiency is the primary concern, do not use Inspector for
+// one-off traversals.
+package inspector
+
+// There are four orthogonal features in a traversal:
+//  1 type filtering
+//  2 pruning
+//  3 postorder calls to f
+//  4 stack
+// Rather than offer all of them in the API,
+// only a few combinations are exposed:
+// - Preorder is the fastest and has fewest features,
+//   but is the most commonly needed traversal.
+// - Nodes and WithStack both provide pruning and postorder calls,
+//   even though few clients need it, because supporting two versions
+//   is not justified.
+// More combinations could be supported by expressing them as
+// wrappers around a more generic traversal, but this was measured
+// and found to degrade performance significantly (30%).
+
+import (
+	"go/ast"
+)
+
+// An Inspector provides methods for inspecting
+// (traversing) the syntax trees of a package.
+type Inspector struct {
+	events []event
+}
+
+// New returns an Inspector for the specified syntax trees.
+func New(files []*ast.File) *Inspector {
+	return &Inspector{traverse(files)}
+}
+
+// An event represents a push or a pop
+// of an ast.Node during a traversal.
+type event struct {
+	node  ast.Node
+	typ   uint64 // typeOf(node)
+	index int    // 1 + index of corresponding pop event, or 0 if this is a pop
+}
+
+// Preorder visits all the nodes of the files supplied to New in
+// depth-first order. It calls f(n) for each node n before it visits
+// n's children.
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
+	// Because it avoids postorder calls to f, and the pruning
+	// check, Preorder is almost twice as fast as Nodes. The two
+	// features seem to contribute similar slowdowns (~1.4x each).
+
+	mask := maskOf(types)
+	for i := 0; i < len(in.events); {
+		ev := in.events[i]
+		if ev.typ&mask != 0 {
+			if ev.index > 0 {
+				f(ev.node)
+			}
+		}
+		i++
+	}
+}
+
+// Nodes visits the nodes of the files supplied to New in depth-first
+// order. It calls f(n, true) for each node n before it visits n's
+// children. If f returns true, Nodes invokes f recursively for each
+// of the non-nil children of the node, followed by a call of
+// f(n, false).
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
+	mask := maskOf(types)
+	for i := 0; i < len(in.events); {
+		ev := in.events[i]
+		if ev.typ&mask != 0 {
+			if ev.index > 0 {
+				// push
+				if !f(ev.node, true) {
+					i = ev.index // jump to corresponding pop + 1
+					continue
+				}
+			} else {
+				// pop
+				f(ev.node, false)
+			}
+		}
+		i++
+	}
+}
+
+// WithStack visits nodes in a similar manner to Nodes, but it
+// supplies each call to f an additional argument, the current
+// traversal stack. The stack's first element is the outermost node,
+// an *ast.File; its last is the innermost, n.
+func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
+	mask := maskOf(types)
+	var stack []ast.Node
+	for i := 0; i < len(in.events); {
+		ev := in.events[i]
+		if ev.index > 0 {
+			// push
+			stack = append(stack, ev.node)
+			if ev.typ&mask != 0 {
+				if !f(ev.node, true, stack) {
+					i = ev.index
+					stack = stack[:len(stack)-1]
+					continue
+				}
+			}
+		} else {
+			// pop
+			if ev.typ&mask != 0 {
+				f(ev.node, false, stack)
+			}
+			stack = stack[:len(stack)-1]
+		}
+		i++
+	}
+}
+
+// traverse builds the table of events representing a traversal.
+func traverse(files []*ast.File) []event {
+	// Preallocate approximate number of events
+	// based on source file extent.
+	// This makes traverse faster by 4x (!).
+	var extent int
+	for _, f := range files {
+		extent += int(f.End() - f.Pos())
+	}
+	// This estimate is based on the net/http package.
+	capacity := extent * 33 / 100
+	if capacity > 1e6 {
+		capacity = 1e6 // impose some reasonable maximum
+	}
+	events := make([]event, 0, capacity)
+
+	var stack []event
+	for _, f := range files {
+		ast.Inspect(f, func(n ast.Node) bool {
+			if n != nil {
+				// push
+				ev := event{
+					node:  n,
+					typ:   typeOf(n),
+					index: len(events), // push event temporarily holds own index
+				}
+				stack = append(stack, ev)
+				events = append(events, ev)
+			} else {
+				// pop
+				ev := stack[len(stack)-1]
+				stack = stack[:len(stack)-1]
+
+				events[ev.index].index = len(events) + 1 // make push refer to pop
+
+				ev.index = 0 // turn ev into a pop event
+				events = append(events, ev)
+			}
+			return true
+		})
+	}
+
+	return events
+}

+ 227 - 0
vendor/golang.org/x/tools/go/ast/inspector/typeof.go

@@ -0,0 +1,227 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file defines func typeOf(ast.Node) uint64.
+//
+// The initial map-based implementation was too slow;
+// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
+
+import (
+	"go/ast"
+
+	"golang.org/x/tools/internal/typeparams"
+)
+
+const (
+	nArrayType = iota
+	nAssignStmt
+	nBadDecl
+	nBadExpr
+	nBadStmt
+	nBasicLit
+	nBinaryExpr
+	nBlockStmt
+	nBranchStmt
+	nCallExpr
+	nCaseClause
+	nChanType
+	nCommClause
+	nComment
+	nCommentGroup
+	nCompositeLit
+	nDeclStmt
+	nDeferStmt
+	nEllipsis
+	nEmptyStmt
+	nExprStmt
+	nField
+	nFieldList
+	nFile
+	nForStmt
+	nFuncDecl
+	nFuncLit
+	nFuncType
+	nGenDecl
+	nGoStmt
+	nIdent
+	nIfStmt
+	nImportSpec
+	nIncDecStmt
+	nIndexExpr
+	nIndexListExpr
+	nInterfaceType
+	nKeyValueExpr
+	nLabeledStmt
+	nMapType
+	nPackage
+	nParenExpr
+	nRangeStmt
+	nReturnStmt
+	nSelectStmt
+	nSelectorExpr
+	nSendStmt
+	nSliceExpr
+	nStarExpr
+	nStructType
+	nSwitchStmt
+	nTypeAssertExpr
+	nTypeSpec
+	nTypeSwitchStmt
+	nUnaryExpr
+	nValueSpec
+)
+
+// typeOf returns a distinct single-bit value that represents the type of n.
+//
+// Various implementations were benchmarked with BenchmarkNewInspector:
+//								GOGC=off
+// - type switch				4.9-5.5ms	2.1ms
+// - binary search over a sorted list of types  5.5-5.9ms	2.5ms
+// - linear scan, frequency-ordered list 	5.9-6.1ms	2.7ms
+// - linear scan, unordered list		6.4ms		2.7ms
+// - hash table					6.5ms		3.1ms
+// A perfect hash seemed like overkill.
+//
+// The compiler's switch statement is the clear winner
+// as it produces a binary tree in code,
+// with constant conditions and good branch prediction.
+// (Sadly it is the most verbose in source code.)
+// Binary search suffered from poor branch prediction.
+//
+func typeOf(n ast.Node) uint64 {
+	// Fast path: nearly half of all nodes are identifiers.
+	if _, ok := n.(*ast.Ident); ok {
+		return 1 << nIdent
+	}
+
+	// These cases include all nodes encountered by ast.Inspect.
+	switch n.(type) {
+	case *ast.ArrayType:
+		return 1 << nArrayType
+	case *ast.AssignStmt:
+		return 1 << nAssignStmt
+	case *ast.BadDecl:
+		return 1 << nBadDecl
+	case *ast.BadExpr:
+		return 1 << nBadExpr
+	case *ast.BadStmt:
+		return 1 << nBadStmt
+	case *ast.BasicLit:
+		return 1 << nBasicLit
+	case *ast.BinaryExpr:
+		return 1 << nBinaryExpr
+	case *ast.BlockStmt:
+		return 1 << nBlockStmt
+	case *ast.BranchStmt:
+		return 1 << nBranchStmt
+	case *ast.CallExpr:
+		return 1 << nCallExpr
+	case *ast.CaseClause:
+		return 1 << nCaseClause
+	case *ast.ChanType:
+		return 1 << nChanType
+	case *ast.CommClause:
+		return 1 << nCommClause
+	case *ast.Comment:
+		return 1 << nComment
+	case *ast.CommentGroup:
+		return 1 << nCommentGroup
+	case *ast.CompositeLit:
+		return 1 << nCompositeLit
+	case *ast.DeclStmt:
+		return 1 << nDeclStmt
+	case *ast.DeferStmt:
+		return 1 << nDeferStmt
+	case *ast.Ellipsis:
+		return 1 << nEllipsis
+	case *ast.EmptyStmt:
+		return 1 << nEmptyStmt
+	case *ast.ExprStmt:
+		return 1 << nExprStmt
+	case *ast.Field:
+		return 1 << nField
+	case *ast.FieldList:
+		return 1 << nFieldList
+	case *ast.File:
+		return 1 << nFile
+	case *ast.ForStmt:
+		return 1 << nForStmt
+	case *ast.FuncDecl:
+		return 1 << nFuncDecl
+	case *ast.FuncLit:
+		return 1 << nFuncLit
+	case *ast.FuncType:
+		return 1 << nFuncType
+	case *ast.GenDecl:
+		return 1 << nGenDecl
+	case *ast.GoStmt:
+		return 1 << nGoStmt
+	case *ast.Ident:
+		return 1 << nIdent
+	case *ast.IfStmt:
+		return 1 << nIfStmt
+	case *ast.ImportSpec:
+		return 1 << nImportSpec
+	case *ast.IncDecStmt:
+		return 1 << nIncDecStmt
+	case *ast.IndexExpr:
+		return 1 << nIndexExpr
+	case *typeparams.IndexListExpr:
+		return 1 << nIndexListExpr
+	case *ast.InterfaceType:
+		return 1 << nInterfaceType
+	case *ast.KeyValueExpr:
+		return 1 << nKeyValueExpr
+	case *ast.LabeledStmt:
+		return 1 << nLabeledStmt
+	case *ast.MapType:
+		return 1 << nMapType
+	case *ast.Package:
+		return 1 << nPackage
+	case *ast.ParenExpr:
+		return 1 << nParenExpr
+	case *ast.RangeStmt:
+		return 1 << nRangeStmt
+	case *ast.ReturnStmt:
+		return 1 << nReturnStmt
+	case *ast.SelectStmt:
+		return 1 << nSelectStmt
+	case *ast.SelectorExpr:
+		return 1 << nSelectorExpr
+	case *ast.SendStmt:
+		return 1 << nSendStmt
+	case *ast.SliceExpr:
+		return 1 << nSliceExpr
+	case *ast.StarExpr:
+		return 1 << nStarExpr
+	case *ast.StructType:
+		return 1 << nStructType
+	case *ast.SwitchStmt:
+		return 1 << nSwitchStmt
+	case *ast.TypeAssertExpr:
+		return 1 << nTypeAssertExpr
+	case *ast.TypeSpec:
+		return 1 << nTypeSpec
+	case *ast.TypeSwitchStmt:
+		return 1 << nTypeSwitchStmt
+	case *ast.UnaryExpr:
+		return 1 << nUnaryExpr
+	case *ast.ValueSpec:
+		return 1 << nValueSpec
+	}
+	return 0
+}
+
+func maskOf(nodes []ast.Node) uint64 {
+	if nodes == nil {
+		return 1<<64 - 1 // match all node types
+	}
+	var mask uint64
+	for _, n := range nodes {
+		mask |= typeOf(n)
+	}
+	return mask
+}

+ 198 - 0
vendor/golang.org/x/tools/go/buildutil/allpackages.go

@@ -0,0 +1,198 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package buildutil provides utilities related to the go/build
+// package in the standard library.
+//
+// All I/O is done via the build.Context file system interface, which must
+// be concurrency-safe.
+package buildutil // import "golang.org/x/tools/go/buildutil"
+
+import (
+	"go/build"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// AllPackages returns the package path of each Go package in any source
+// directory of the specified build context (e.g. $GOROOT or an element
+// of $GOPATH).  Errors are ignored.  The results are sorted.
+// All package paths are canonical, and thus may contain "/vendor/".
+//
+// The result may include import paths for directories that contain no
+// *.go files, such as "archive" (in $GOROOT/src).
+//
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
+//
+func AllPackages(ctxt *build.Context) []string {
+	var list []string
+	ForEachPackage(ctxt, func(pkg string, _ error) {
+		list = append(list, pkg)
+	})
+	sort.Strings(list)
+	return list
+}
+
+// ForEachPackage calls the found function with the package path of
+// each Go package it finds in any source directory of the specified
+// build context (e.g. $GOROOT or an element of $GOPATH).
+// All package paths are canonical, and thus may contain "/vendor/".
+//
+// If the package directory exists but could not be read, the second
+// argument to the found function provides the error.
+//
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
+//
+func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
+	ch := make(chan item)
+
+	var wg sync.WaitGroup
+	for _, root := range ctxt.SrcDirs() {
+		root := root
+		wg.Add(1)
+		go func() {
+			allPackages(ctxt, root, ch)
+			wg.Done()
+		}()
+	}
+	go func() {
+		wg.Wait()
+		close(ch)
+	}()
+
+	// All calls to found occur in the caller's goroutine.
+	for i := range ch {
+		found(i.importPath, i.err)
+	}
+}
+
+type item struct {
+	importPath string
+	err        error // (optional)
+}
+
+// We use a process-wide counting semaphore to limit
+// the number of parallel calls to ReadDir.
+var ioLimit = make(chan bool, 20)
+
+func allPackages(ctxt *build.Context, root string, ch chan<- item) {
+	root = filepath.Clean(root) + string(os.PathSeparator)
+
+	var wg sync.WaitGroup
+
+	var walkDir func(dir string)
+	walkDir = func(dir string) {
+		// Avoid .foo, _foo, and testdata directory trees.
+		base := filepath.Base(dir)
+		if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
+			return
+		}
+
+		pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
+
+		// Prune search if we encounter any of these import paths.
+		switch pkg {
+		case "builtin":
+			return
+		}
+
+		ioLimit <- true
+		files, err := ReadDir(ctxt, dir)
+		<-ioLimit
+		if pkg != "" || err != nil {
+			ch <- item{pkg, err}
+		}
+		for _, fi := range files {
+			fi := fi
+			if fi.IsDir() {
+				wg.Add(1)
+				go func() {
+					walkDir(filepath.Join(dir, fi.Name()))
+					wg.Done()
+				}()
+			}
+		}
+	}
+
+	walkDir(root)
+	wg.Wait()
+}
+
+// ExpandPatterns returns the set of packages matched by patterns,
+// which may have the following forms:
+//
+//		golang.org/x/tools/cmd/guru     # a single package
+//		golang.org/x/tools/...          # all packages beneath dir
+//		...                             # the entire workspace.
+//
+// Order is significant: a pattern preceded by '-' removes matching
+// packages from the set.  For example, these patterns match all encoding
+// packages except encoding/xml:
+//
+// 	encoding/... -encoding/xml
+//
+// A trailing slash in a pattern is ignored.  (Path components of Go
+// package names are separated by slash, not the platform's path separator.)
+//
+func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
+	// TODO(adonovan): support other features of 'go list':
+	// - "std"/"cmd"/"all" meta-packages
+	// - "..." not at the end of a pattern
+	// - relative patterns using "./" or "../" prefix
+
+	pkgs := make(map[string]bool)
+	doPkg := func(pkg string, neg bool) {
+		if neg {
+			delete(pkgs, pkg)
+		} else {
+			pkgs[pkg] = true
+		}
+	}
+
+	// Scan entire workspace if wildcards are present.
+	// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
+	var all []string
+	for _, arg := range patterns {
+		if strings.HasSuffix(arg, "...") {
+			all = AllPackages(ctxt)
+			break
+		}
+	}
+
+	for _, arg := range patterns {
+		if arg == "" {
+			continue
+		}
+
+		neg := arg[0] == '-'
+		if neg {
+			arg = arg[1:]
+		}
+
+		if arg == "..." {
+			// ... matches all packages
+			for _, pkg := range all {
+				doPkg(pkg, neg)
+			}
+		} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
+			// dir/... matches all packages beneath dir
+			for _, pkg := range all {
+				if strings.HasPrefix(pkg, dir) &&
+					(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
+					doPkg(pkg, neg)
+				}
+			}
+		} else {
+			// single package
+			doPkg(strings.TrimSuffix(arg, "/"), neg)
+		}
+	}
+
+	return pkgs
+}

+ 113 - 0
vendor/golang.org/x/tools/go/buildutil/fakecontext.go

@@ -0,0 +1,113 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+	"fmt"
+	"go/build"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+)
+
+// FakeContext returns a build.Context for the fake file tree specified
+// by pkgs, which maps package import paths to a mapping from file base
+// names to contents.
+//
+// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
+// the necessary file access methods to read from memory instead of the
+// real file system.
+//
+// Unlike a real file tree, the fake one has only two levels---packages
+// and files---so ReadDir("/go/src/") returns all packages under
+// /go/src/ including, for instance, "math" and "math/big".
+// ReadDir("/go/src/math/big") would return all the files in the
+// "math/big" package.
+//
+func FakeContext(pkgs map[string]map[string]string) *build.Context {
+	clean := func(filename string) string {
+		f := path.Clean(filepath.ToSlash(filename))
+		// Removing "/go/src" while respecting segment
+		// boundaries has this unfortunate corner case:
+		if f == "/go/src" {
+			return ""
+		}
+		return strings.TrimPrefix(f, "/go/src/")
+	}
+
+	ctxt := build.Default // copy
+	ctxt.GOROOT = "/go"
+	ctxt.GOPATH = ""
+	ctxt.Compiler = "gc"
+	ctxt.IsDir = func(dir string) bool {
+		dir = clean(dir)
+		if dir == "" {
+			return true // needed by (*build.Context).SrcDirs
+		}
+		return pkgs[dir] != nil
+	}
+	ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
+		dir = clean(dir)
+		var fis []os.FileInfo
+		if dir == "" {
+			// enumerate packages
+			for importPath := range pkgs {
+				fis = append(fis, fakeDirInfo(importPath))
+			}
+		} else {
+			// enumerate files of package
+			for basename := range pkgs[dir] {
+				fis = append(fis, fakeFileInfo(basename))
+			}
+		}
+		sort.Sort(byName(fis))
+		return fis, nil
+	}
+	ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
+		filename = clean(filename)
+		dir, base := path.Split(filename)
+		content, ok := pkgs[path.Clean(dir)][base]
+		if !ok {
+			return nil, fmt.Errorf("file not found: %s", filename)
+		}
+		return ioutil.NopCloser(strings.NewReader(content)), nil
+	}
+	ctxt.IsAbsPath = func(path string) bool {
+		path = filepath.ToSlash(path)
+		// Don't rely on the default (filepath.Path) since on
+		// Windows, it reports virtual paths as non-absolute.
+		return strings.HasPrefix(path, "/")
+	}
+	return &ctxt
+}
+
+type byName []os.FileInfo
+
+func (s byName) Len() int           { return len(s) }
+func (s byName) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
+
+type fakeFileInfo string
+
+func (fi fakeFileInfo) Name() string    { return string(fi) }
+func (fakeFileInfo) Sys() interface{}   { return nil }
+func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
+func (fakeFileInfo) IsDir() bool        { return false }
+func (fakeFileInfo) Size() int64        { return 0 }
+func (fakeFileInfo) Mode() os.FileMode  { return 0644 }
+
+type fakeDirInfo string
+
+func (fd fakeDirInfo) Name() string    { return string(fd) }
+func (fakeDirInfo) Sys() interface{}   { return nil }
+func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
+func (fakeDirInfo) IsDir() bool        { return true }
+func (fakeDirInfo) Size() int64        { return 0 }
+func (fakeDirInfo) Mode() os.FileMode  { return 0755 }

+ 103 - 0
vendor/golang.org/x/tools/go/buildutil/overlay.go

@@ -0,0 +1,103 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"go/build"
+	"io"
+	"io/ioutil"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+// OverlayContext overlays a build.Context with additional files from
+// a map. Files in the map take precedence over other files.
+//
+// In addition to plain string comparison, two file names are
+// considered equal if their base names match and their directory
+// components point at the same directory on the file system. That is,
+// symbolic links are followed for directories, but not files.
+//
+// A common use case for OverlayContext is to allow editors to pass in
+// a set of unsaved, modified files.
+//
+// Currently, only the Context.OpenFile function will respect the
+// overlay. This may change in the future.
+func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
+	// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
+
+	rc := func(data []byte) (io.ReadCloser, error) {
+		return ioutil.NopCloser(bytes.NewBuffer(data)), nil
+	}
+
+	copy := *orig // make a copy
+	ctxt := &copy
+	ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
+		// Fast path: names match exactly.
+		if content, ok := overlay[path]; ok {
+			return rc(content)
+		}
+
+		// Slow path: check for same file under a different
+		// alias, perhaps due to a symbolic link.
+		for filename, content := range overlay {
+			if sameFile(path, filename) {
+				return rc(content)
+			}
+		}
+
+		return OpenFile(orig, path)
+	}
+	return ctxt
+}
+
+// ParseOverlayArchive parses an archive containing Go files and their
+// contents. The result is intended to be used with OverlayContext.
+//
+//
+// Archive format
+//
+// The archive consists of a series of files. Each file consists of a
+// name, a decimal file size and the file contents, separated by
+// newlines. No newline follows after the file contents.
+func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
+	overlay := make(map[string][]byte)
+	r := bufio.NewReader(archive)
+	for {
+		// Read file name.
+		filename, err := r.ReadString('\n')
+		if err != nil {
+			if err == io.EOF {
+				break // OK
+			}
+			return nil, fmt.Errorf("reading archive file name: %v", err)
+		}
+		filename = filepath.Clean(strings.TrimSpace(filename))
+
+		// Read file size.
+		sz, err := r.ReadString('\n')
+		if err != nil {
+			return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
+		}
+		sz = strings.TrimSpace(sz)
+		size, err := strconv.ParseUint(sz, 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
+		}
+
+		// Read file content.
+		content := make([]byte, size)
+		if _, err := io.ReadFull(r, content); err != nil {
+			return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
+		}
+		overlay[filename] = content
+	}
+
+	return overlay, nil
+}

+ 79 - 0
vendor/golang.org/x/tools/go/buildutil/tags.go

@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
+
+import "fmt"
+
+const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
+	"For more information about build tags, see the description of " +
+	"build constraints in the documentation for the go/build package"
+
+// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
+// a flag value in the same manner as go build's -tags flag and
+// populates a []string slice.
+//
+// See $GOROOT/src/go/build/doc.go for description of build tags.
+// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
+//
+// Example:
+// 	flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
+type TagsFlag []string
+
+func (v *TagsFlag) Set(s string) error {
+	var err error
+	*v, err = splitQuotedFields(s)
+	if *v == nil {
+		*v = []string{}
+	}
+	return err
+}
+
+func (v *TagsFlag) Get() interface{} { return *v }
+
+func splitQuotedFields(s string) ([]string, error) {
+	// Split fields allowing '' or "" around elements.
+	// Quotes further inside the string do not count.
+	var f []string
+	for len(s) > 0 {
+		for len(s) > 0 && isSpaceByte(s[0]) {
+			s = s[1:]
+		}
+		if len(s) == 0 {
+			break
+		}
+		// Accepted quoted string. No unescaping inside.
+		if s[0] == '"' || s[0] == '\'' {
+			quote := s[0]
+			s = s[1:]
+			i := 0
+			for i < len(s) && s[i] != quote {
+				i++
+			}
+			if i >= len(s) {
+				return nil, fmt.Errorf("unterminated %c string", quote)
+			}
+			f = append(f, s[:i])
+			s = s[i+1:]
+			continue
+		}
+		i := 0
+		for i < len(s) && !isSpaceByte(s[i]) {
+			i++
+		}
+		f = append(f, s[:i])
+		s = s[i:]
+	}
+	return f, nil
+}
+
+func (v *TagsFlag) String() string {
+	return "<tagsFlag>"
+}
+
+func isSpaceByte(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}

+ 212 - 0
vendor/golang.org/x/tools/go/buildutil/util.go

@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/token"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+)
+
+// ParseFile behaves like parser.ParseFile,
+// but uses the build context's file system interface, if any.
+//
+// If file is not absolute (as defined by IsAbsPath), the (dir, file)
+// components are joined using JoinPath; dir must be absolute.
+//
+// The displayPath function, if provided, is used to transform the
+// filename that will be attached to the ASTs.
+//
+// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
+//
+func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
+	if !IsAbsPath(ctxt, file) {
+		file = JoinPath(ctxt, dir, file)
+	}
+	rd, err := OpenFile(ctxt, file)
+	if err != nil {
+		return nil, err
+	}
+	defer rd.Close() // ignore error
+	if displayPath != nil {
+		file = displayPath(file)
+	}
+	return parser.ParseFile(fset, file, rd, mode)
+}
+
+// ContainingPackage returns the package containing filename.
+//
+// If filename is not absolute, it is interpreted relative to working directory dir.
+// All I/O is via the build context's file system interface, if any.
+//
+// The '...Files []string' fields of the resulting build.Package are not
+// populated (build.FindOnly mode).
+//
+func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
+	if !IsAbsPath(ctxt, filename) {
+		filename = JoinPath(ctxt, dir, filename)
+	}
+
+	// We must not assume the file tree uses
+	// "/" always,
+	// `\` always,
+	// or os.PathSeparator (which varies by platform),
+	// but to make any progress, we are forced to assume that
+	// paths will not use `\` unless the PathSeparator
+	// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
+
+	dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
+
+	// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
+	for _, srcdir := range ctxt.SrcDirs() {
+		srcdirSlash := filepath.ToSlash(srcdir) + "/"
+		if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
+			return ctxt.Import(importPath, dir, build.FindOnly)
+		}
+	}
+
+	return nil, fmt.Errorf("can't find package containing %s", filename)
+}
+
+// -- Effective methods of file system interface -------------------------
+
+// (go/build.Context defines these as methods, but does not export them.)
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
+	if f := ctxt.HasSubdir; f != nil {
+		return f(root, dir)
+	}
+
+	// Try using paths we received.
+	if rel, ok = hasSubdir(root, dir); ok {
+		return
+	}
+
+	// Try expanding symlinks and comparing
+	// expanded against unexpanded and
+	// expanded against expanded.
+	rootSym, _ := filepath.EvalSymlinks(root)
+	dirSym, _ := filepath.EvalSymlinks(dir)
+
+	if rel, ok = hasSubdir(rootSym, dir); ok {
+		return
+	}
+	if rel, ok = hasSubdir(root, dirSym); ok {
+		return
+	}
+	return hasSubdir(rootSym, dirSym)
+}
+
+func hasSubdir(root, dir string) (rel string, ok bool) {
+	const sep = string(filepath.Separator)
+	root = filepath.Clean(root)
+	if !strings.HasSuffix(root, sep) {
+		root += sep
+	}
+
+	dir = filepath.Clean(dir)
+	if !strings.HasPrefix(dir, root) {
+		return "", false
+	}
+
+	return filepath.ToSlash(dir[len(root):]), true
+}
+
+// FileExists returns true if the specified file exists,
+// using the build context's file system interface.
+func FileExists(ctxt *build.Context, path string) bool {
+	if ctxt.OpenFile != nil {
+		r, err := ctxt.OpenFile(path)
+		if err != nil {
+			return false
+		}
+		r.Close() // ignore error
+		return true
+	}
+	_, err := os.Stat(path)
+	return err == nil
+}
+
+// OpenFile behaves like os.Open,
+// but uses the build context's file system interface, if any.
+func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
+	if ctxt.OpenFile != nil {
+		return ctxt.OpenFile(path)
+	}
+	return os.Open(path)
+}
+
+// IsAbsPath behaves like filepath.IsAbs,
+// but uses the build context's file system interface, if any.
+func IsAbsPath(ctxt *build.Context, path string) bool {
+	if ctxt.IsAbsPath != nil {
+		return ctxt.IsAbsPath(path)
+	}
+	return filepath.IsAbs(path)
+}
+
+// JoinPath behaves like filepath.Join,
+// but uses the build context's file system interface, if any.
+func JoinPath(ctxt *build.Context, path ...string) string {
+	if ctxt.JoinPath != nil {
+		return ctxt.JoinPath(path...)
+	}
+	return filepath.Join(path...)
+}
+
+// IsDir behaves like os.Stat plus IsDir,
+// but uses the build context's file system interface, if any.
+func IsDir(ctxt *build.Context, path string) bool {
+	if ctxt.IsDir != nil {
+		return ctxt.IsDir(path)
+	}
+	fi, err := os.Stat(path)
+	return err == nil && fi.IsDir()
+}
+
+// ReadDir behaves like ioutil.ReadDir,
+// but uses the build context's file system interface, if any.
+func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
+	if ctxt.ReadDir != nil {
+		return ctxt.ReadDir(path)
+	}
+	return ioutil.ReadDir(path)
+}
+
+// SplitPathList behaves like filepath.SplitList,
+// but uses the build context's file system interface, if any.
+func SplitPathList(ctxt *build.Context, s string) []string {
+	if ctxt.SplitPathList != nil {
+		return ctxt.SplitPathList(s)
+	}
+	return filepath.SplitList(s)
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+//
+func sameFile(x, y string) bool {
+	if path.Clean(x) == path.Clean(y) {
+		return true
+	}
+	if filepath.Base(x) == filepath.Base(y) { // (optimisation)
+		if xi, err := os.Stat(x); err == nil {
+			if yi, err := os.Stat(y); err == nil {
+				return os.SameFile(xi, yi)
+			}
+		}
+	}
+	return false
+}

+ 18 - 5
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go

@@ -50,11 +50,24 @@ func Find(importPath, srcDir string) (filename, path string) {
 // additional trailing data beyond the end of the export data.
 // additional trailing data beyond the end of the export data.
 func NewReader(r io.Reader) (io.Reader, error) {
 func NewReader(r io.Reader) (io.Reader, error) {
 	buf := bufio.NewReader(r)
 	buf := bufio.NewReader(r)
-	_, err := gcimporter.FindExportData(buf)
-	// If we ever switch to a zip-like archive format with the ToC
-	// at the end, we can return the correct portion of export data,
-	// but for now we must return the entire rest of the file.
-	return buf, err
+	_, size, err := gcimporter.FindExportData(buf)
+	if err != nil {
+		return nil, err
+	}
+
+	if size >= 0 {
+		// We were given an archive and found the __.PKGDEF in it.
+		// This tells us the size of the export data, and we don't
+		// need to return the entire file.
+		return &io.LimitedReader{
+			R: buf,
+			N: size,
+		}, nil
+	} else {
+		// We were given an object file. As such, we don't know how large
+		// the export data is and must return the entire file.
+		return buf, nil
+	}
 }
 }
 
 
 // Read reads export data from in, decodes it, and returns type
 // Read reads export data from in, decodes it, and returns type

+ 222 - 0
vendor/golang.org/x/tools/go/internal/cgo/cgo.go

@@ -0,0 +1,222 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cgo handles cgo preprocessing of files containing `import "C"`.
+//
+// DESIGN
+//
+// The approach taken is to run the cgo processor on the package's
+// CgoFiles and parse the output, faking the filenames of the
+// resulting ASTs so that the synthetic file containing the C types is
+// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
+// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
+// not the names of the actual temporary files.
+//
+// The advantage of this approach is its fidelity to 'go build'.  The
+// downside is that the token.Position.Offset for each AST node is
+// incorrect, being an offset within the temporary file.  Line numbers
+// should still be correct because of the //line comments.
+//
+// The logic of this file is mostly plundered from the 'go build'
+// tool, which also invokes the cgo preprocessor.
+//
+//
+// REJECTED ALTERNATIVE
+//
+// An alternative approach that we explored is to extend go/types'
+// Importer mechanism to provide the identity of the importing package
+// so that each time `import "C"` appears it resolves to a different
+// synthetic package containing just the objects needed in that case.
+// The loader would invoke cgo but parse only the cgo_types.go file
+// defining the package-level objects, discarding the other files
+// resulting from preprocessing.
+//
+// The benefit of this approach would have been that source-level
+// syntax information would correspond exactly to the original cgo
+// file, with no preprocessing involved, making source tools like
+// godoc, guru, and eg happy.  However, the approach was rejected
+// due to the additional complexity it would impose on go/types.  (It
+// made for a beautiful demo, though.)
+//
+// cgo files, despite their *.go extension, are not legal Go source
+// files per the specification since they may refer to unexported
+// members of package "C" such as C.int.  Also, a function such as
+// C.getpwent has in effect two types, one matching its C type and one
+// which additionally returns (errno C.int).  The cgo preprocessor
+// uses name mangling to distinguish these two functions in the
+// processed code, but go/types would need to duplicate this logic in
+// its handling of function calls, analogous to the treatment of map
+// lookups in which y=m[k] and y,ok=m[k] are both legal.
+
+package cgo
+
+import (
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/token"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+
+	exec "golang.org/x/sys/execabs"
+)
+
+// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
+// the output and returns the resulting ASTs.
+//
+func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
+	tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
+	if err != nil {
+		return nil, err
+	}
+	defer os.RemoveAll(tmpdir)
+
+	pkgdir := bp.Dir
+	if DisplayPath != nil {
+		pkgdir = DisplayPath(pkgdir)
+	}
+
+	cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
+	if err != nil {
+		return nil, err
+	}
+	var files []*ast.File
+	for i := range cgoFiles {
+		rd, err := os.Open(cgoFiles[i])
+		if err != nil {
+			return nil, err
+		}
+		display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
+		f, err := parser.ParseFile(fset, display, rd, mode)
+		rd.Close()
+		if err != nil {
+			return nil, err
+		}
+		files = append(files, f)
+	}
+	return files, nil
+}
+
+var cgoRe = regexp.MustCompile(`[/\\:]`)
+
+// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
+// lists of files: the resulting processed files (in temporary
+// directory tmpdir) and the corresponding names of the unprocessed files.
+//
+// Run is adapted from (*builder).cgo in
+// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
+// Objective C, CGOPKGPATH, CGO_FLAGS.
+//
+// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
+// to the cgo preprocessor. This in turn will set the // line comments
+// referring to those files to use absolute paths. This is needed for
+// go/packages using the legacy go list support so it is able to find
+// the original files.
+func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
+	cgoCPPFLAGS, _, _, _ := cflags(bp, true)
+	_, cgoexeCFLAGS, _, _ := cflags(bp, false)
+
+	if len(bp.CgoPkgConfig) > 0 {
+		pcCFLAGS, err := pkgConfigFlags(bp)
+		if err != nil {
+			return nil, nil, err
+		}
+		cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+	}
+
+	// Allows including _cgo_export.h from .[ch] files in the package.
+	cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
+
+	// _cgo_gotypes.go (displayed "C") contains the type definitions.
+	files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
+	displayFiles = append(displayFiles, "C")
+	for _, fn := range bp.CgoFiles {
+		// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
+		f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
+		files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
+		displayFiles = append(displayFiles, fn)
+	}
+
+	var cgoflags []string
+	if bp.Goroot && bp.ImportPath == "runtime/cgo" {
+		cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+	}
+	if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
+		cgoflags = append(cgoflags, "-import_syscall=false")
+	}
+
+	var cgoFiles []string = bp.CgoFiles
+	if useabs {
+		cgoFiles = make([]string, len(bp.CgoFiles))
+		for i := range cgoFiles {
+			cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
+		}
+	}
+
+	args := stringList(
+		"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
+		cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
+	)
+	if false {
+		log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
+	}
+	cmd := exec.Command(args[0], args[1:]...)
+	cmd.Dir = pkgdir
+	cmd.Env = append(os.Environ(), "PWD="+pkgdir)
+	cmd.Stdout = os.Stderr
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
+	}
+
+	return files, displayFiles, nil
+}
+
+// -- unmodified from 'go build' ---------------------------------------
+
+// Return the flags to use when invoking the C or C++ compilers, or cgo.
+func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
+	var defaults string
+	if def {
+		defaults = "-g -O2"
+	}
+
+	cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
+	cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
+	cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
+	ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
+	return
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+func envList(key, def string) []string {
+	v := os.Getenv(key)
+	if v == "" {
+		v = def
+	}
+	return strings.Fields(v)
+}
+
+// stringList's arguments should be a sequence of string or []string values.
+// stringList flattens them into a single []string.
+func stringList(args ...interface{}) []string {
+	var x []string
+	for _, arg := range args {
+		switch arg := arg.(type) {
+		case []string:
+			x = append(x, arg...)
+		case string:
+			x = append(x, arg)
+		default:
+			panic("stringList: invalid argument")
+		}
+	}
+	return x
+}

+ 39 - 0
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go

@@ -0,0 +1,39 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgo
+
+import (
+	"errors"
+	"fmt"
+	"go/build"
+	exec "golang.org/x/sys/execabs"
+	"strings"
+)
+
+// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
+func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
+	cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
+		if len(out) > 0 {
+			s = fmt.Sprintf("%s: %s", s, out)
+		}
+		return nil, errors.New(s)
+	}
+	if len(out) > 0 {
+		flags = strings.Fields(string(out))
+	}
+	return
+}
+
+// pkgConfigFlags calls pkg-config if needed and returns the cflags
+// needed to build the package.
+func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
+	if len(p.CgoPkgConfig) == 0 {
+		return nil, nil
+	}
+	return pkgConfig("--cflags", p.CgoPkgConfig)
+}

+ 11 - 12
vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go

@@ -34,9 +34,6 @@ import (
 // (suspected) format errors, and whenever a change is made to the format.
 // (suspected) format errors, and whenever a change is made to the format.
 const debugFormat = false // default: false
 const debugFormat = false // default: false
 
 
-// If trace is set, debugging output is printed to std out.
-const trace = false // default: false
-
 // Current export format version. Increase with each format change.
 // Current export format version. Increase with each format change.
 // Note: The latest binary (non-indexed) export format is at version 6.
 // Note: The latest binary (non-indexed) export format is at version 6.
 //       This exporter is still at level 4, but it doesn't matter since
 //       This exporter is still at level 4, but it doesn't matter since
@@ -92,16 +89,18 @@ func internalErrorf(format string, args ...interface{}) error {
 // BExportData returns binary export data for pkg.
 // BExportData returns binary export data for pkg.
 // If no file set is provided, position info will be missing.
 // If no file set is provided, position info will be missing.
 func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
 func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
-	defer func() {
-		if e := recover(); e != nil {
-			if ierr, ok := e.(internalError); ok {
-				err = ierr
-				return
+	if !debug {
+		defer func() {
+			if e := recover(); e != nil {
+				if ierr, ok := e.(internalError); ok {
+					err = ierr
+					return
+				}
+				// Not an internal error; panic again.
+				panic(e)
 			}
 			}
-			// Not an internal error; panic again.
-			panic(e)
-		}
-	}()
+		}()
+	}
 
 
 	p := exporter{
 	p := exporter{
 		fset:          fset,
 		fset:          fset,

+ 33 - 19
vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go

@@ -74,9 +74,10 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
 		pathList:   []string{""}, // empty string is mapped to 0
 		pathList:   []string{""}, // empty string is mapped to 0
 		fake: fakeFileSet{
 		fake: fakeFileSet{
 			fset:  fset,
 			fset:  fset,
-			files: make(map[string]*token.File),
+			files: make(map[string]*fileInfo),
 		},
 		},
 	}
 	}
+	defer p.fake.setLines() // set lines for files in fset
 
 
 	// read version info
 	// read version info
 	var versionstr string
 	var versionstr string
@@ -338,37 +339,49 @@ func (p *importer) pos() token.Pos {
 // Synthesize a token.Pos
 // Synthesize a token.Pos
 type fakeFileSet struct {
 type fakeFileSet struct {
 	fset  *token.FileSet
 	fset  *token.FileSet
-	files map[string]*token.File
+	files map[string]*fileInfo
 }
 }
 
 
+type fileInfo struct {
+	file     *token.File
+	lastline int
+}
+
+const maxlines = 64 * 1024
+
 func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
 func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
 	// TODO(mdempsky): Make use of column.
 	// TODO(mdempsky): Make use of column.
 
 
-	// Since we don't know the set of needed file positions, we
-	// reserve maxlines positions per file.
-	const maxlines = 64 * 1024
+	// Since we don't know the set of needed file positions, we reserve maxlines
+	// positions per file. We delay calling token.File.SetLines until all
+	// positions have been calculated (by way of fakeFileSet.setLines), so that
+	// we can avoid setting unnecessary lines. See also golang/go#46586.
 	f := s.files[file]
 	f := s.files[file]
 	if f == nil {
 	if f == nil {
-		f = s.fset.AddFile(file, -1, maxlines)
+		f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
 		s.files[file] = f
 		s.files[file] = f
-		// Allocate the fake linebreak indices on first use.
-		// TODO(adonovan): opt: save ~512KB using a more complex scheme?
-		fakeLinesOnce.Do(func() {
-			fakeLines = make([]int, maxlines)
-			for i := range fakeLines {
-				fakeLines[i] = i
-			}
-		})
-		f.SetLines(fakeLines)
 	}
 	}
-
 	if line > maxlines {
 	if line > maxlines {
 		line = 1
 		line = 1
 	}
 	}
+	if line > f.lastline {
+		f.lastline = line
+	}
 
 
-	// Treat the file as if it contained only newlines
-	// and column=1: use the line number as the offset.
-	return f.Pos(line - 1)
+	// Return a fake position assuming that f.file consists only of newlines.
+	return token.Pos(f.file.Base() + line - 1)
+}
+
+func (s *fakeFileSet) setLines() {
+	fakeLinesOnce.Do(func() {
+		fakeLines = make([]int, maxlines)
+		for i := range fakeLines {
+			fakeLines[i] = i
+		}
+	})
+	for _, f := range s.files {
+		f.file.SetLines(fakeLines[:f.lastline])
+	}
 }
 }
 
 
 var (
 var (
@@ -1029,6 +1042,7 @@ func predeclared() []types.Type {
 			// used internally by gc; never used by this package or in .a files
 			// used internally by gc; never used by this package or in .a files
 			anyType{},
 			anyType{},
 		}
 		}
+		predecl = append(predecl, additionalPredeclared()...)
 	})
 	})
 	return predecl
 	return predecl
 }
 }

+ 11 - 5
vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go

@@ -16,7 +16,7 @@ import (
 	"strings"
 	"strings"
 )
 )
 
 
-func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
 	// See $GOROOT/include/ar.h.
 	// See $GOROOT/include/ar.h.
 	hdr := make([]byte, 16+12+6+6+8+10+2)
 	hdr := make([]byte, 16+12+6+6+8+10+2)
 	_, err = io.ReadFull(r, hdr)
 	_, err = io.ReadFull(r, hdr)
@@ -28,7 +28,8 @@ func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
 		fmt.Printf("header: %s", hdr)
 		fmt.Printf("header: %s", hdr)
 	}
 	}
 	s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
 	s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
-	size, err = strconv.Atoi(s)
+	length, err := strconv.Atoi(s)
+	size = int64(length)
 	if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
 	if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
 		err = fmt.Errorf("invalid archive header")
 		err = fmt.Errorf("invalid archive header")
 		return
 		return
@@ -42,8 +43,8 @@ func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
 // file by reading from it. The reader must be positioned at the
 // file by reading from it. The reader must be positioned at the
 // start of the file before calling this function. The hdr result
 // start of the file before calling this function. The hdr result
 // is the string before the export data, either "$$" or "$$B".
 // is the string before the export data, either "$$" or "$$B".
-//
-func FindExportData(r *bufio.Reader) (hdr string, err error) {
+// The size result is the length of the export data in bytes, or -1 if not known.
+func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
 	// Read first line to make sure this is an object file.
 	// Read first line to make sure this is an object file.
 	line, err := r.ReadSlice('\n')
 	line, err := r.ReadSlice('\n')
 	if err != nil {
 	if err != nil {
@@ -54,7 +55,7 @@ func FindExportData(r *bufio.Reader) (hdr string, err error) {
 	if string(line) == "!<arch>\n" {
 	if string(line) == "!<arch>\n" {
 		// Archive file. Scan to __.PKGDEF.
 		// Archive file. Scan to __.PKGDEF.
 		var name string
 		var name string
-		if name, _, err = readGopackHeader(r); err != nil {
+		if name, size, err = readGopackHeader(r); err != nil {
 			return
 			return
 		}
 		}
 
 
@@ -70,6 +71,7 @@ func FindExportData(r *bufio.Reader) (hdr string, err error) {
 			err = fmt.Errorf("can't find export data (%v)", err)
 			err = fmt.Errorf("can't find export data (%v)", err)
 			return
 			return
 		}
 		}
+		size -= int64(len(line))
 	}
 	}
 
 
 	// Now at __.PKGDEF in archive or still at beginning of file.
 	// Now at __.PKGDEF in archive or still at beginning of file.
@@ -86,8 +88,12 @@ func FindExportData(r *bufio.Reader) (hdr string, err error) {
 			err = fmt.Errorf("can't find export data (%v)", err)
 			err = fmt.Errorf("can't find export data (%v)", err)
 			return
 			return
 		}
 		}
+		size -= int64(len(line))
 	}
 	}
 	hdr = string(line)
 	hdr = string(line)
+	if size < 0 {
+		size = -1
+	}
 
 
 	return
 	return
 }
 }

+ 9 - 3
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go

@@ -29,8 +29,14 @@ import (
 	"text/scanner"
 	"text/scanner"
 )
 )
 
 
-// debugging/development support
-const debug = false
+const (
+	// Enable debug during development: it adds some additional checks, and
+	// prevents errors from being recovered.
+	debug = false
+
+	// If trace is set, debugging output is printed to std out.
+	trace = false
+)
 
 
 var pkgExts = [...]string{".a", ".o"}
 var pkgExts = [...]string{".a", ".o"}
 
 
@@ -179,7 +185,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
 
 
 	var hdr string
 	var hdr string
 	buf := bufio.NewReader(rc)
 	buf := bufio.NewReader(rc)
-	if hdr, err = FindExportData(buf); err != nil {
+	if hdr, _, err = FindExportData(buf); err != nil {
 		return
 		return
 	}
 	}
 
 

+ 274 - 48
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go

@@ -11,6 +11,7 @@ package gcimporter
 import (
 import (
 	"bytes"
 	"bytes"
 	"encoding/binary"
 	"encoding/binary"
+	"fmt"
 	"go/ast"
 	"go/ast"
 	"go/constant"
 	"go/constant"
 	"go/token"
 	"go/token"
@@ -19,11 +20,11 @@ import (
 	"math/big"
 	"math/big"
 	"reflect"
 	"reflect"
 	"sort"
 	"sort"
-)
+	"strconv"
+	"strings"
 
 
-// Current indexed export format version. Increase with each format change.
-// 0: Go1.11 encoding
-const iexportVersion = 0
+	"golang.org/x/tools/internal/typeparams"
+)
 
 
 // Current bundled export format version. Increase with each format change.
 // Current bundled export format version. Increase with each format change.
 // 0: initial implementation
 // 0: initial implementation
@@ -35,31 +36,35 @@ const bundleVersion = 0
 // The package path of the top-level package will not be recorded,
 // The package path of the top-level package will not be recorded,
 // so that calls to IImportData can override with a provided package path.
 // so that calls to IImportData can override with a provided package path.
 func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
 func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
-	return iexportCommon(out, fset, false, []*types.Package{pkg})
+	return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg})
 }
 }
 
 
 // IExportBundle writes an indexed export bundle for pkgs to out.
 // IExportBundle writes an indexed export bundle for pkgs to out.
 func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
 func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
-	return iexportCommon(out, fset, true, pkgs)
-}
-
-func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*types.Package) (err error) {
-	defer func() {
-		if e := recover(); e != nil {
-			if ierr, ok := e.(internalError); ok {
-				err = ierr
-				return
+	return iexportCommon(out, fset, true, iexportVersion, pkgs)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) {
+	if !debug {
+		defer func() {
+			if e := recover(); e != nil {
+				if ierr, ok := e.(internalError); ok {
+					err = ierr
+					return
+				}
+				// Not an internal error; panic again.
+				panic(e)
 			}
 			}
-			// Not an internal error; panic again.
-			panic(e)
-		}
-	}()
+		}()
+	}
 
 
 	p := iexporter{
 	p := iexporter{
 		fset:        fset,
 		fset:        fset,
+		version:     version,
 		allPkgs:     map[*types.Package]bool{},
 		allPkgs:     map[*types.Package]bool{},
 		stringIndex: map[string]uint64{},
 		stringIndex: map[string]uint64{},
 		declIndex:   map[types.Object]uint64{},
 		declIndex:   map[types.Object]uint64{},
+		tparamNames: map[types.Object]string{},
 		typIndex:    map[types.Type]uint64{},
 		typIndex:    map[types.Type]uint64{},
 	}
 	}
 	if !bundle {
 	if !bundle {
@@ -119,7 +124,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*type
 	if bundle {
 	if bundle {
 		hdr.uint64(bundleVersion)
 		hdr.uint64(bundleVersion)
 	}
 	}
-	hdr.uint64(iexportVersion)
+	hdr.uint64(uint64(p.version))
 	hdr.uint64(uint64(p.strings.Len()))
 	hdr.uint64(uint64(p.strings.Len()))
 	hdr.uint64(dataLen)
 	hdr.uint64(dataLen)
 
 
@@ -136,8 +141,12 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*type
 // non-compiler tools and includes a complete package description
 // non-compiler tools and includes a complete package description
 // (i.e., name and height).
 // (i.e., name and height).
 func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
 func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+	type pkgObj struct {
+		obj  types.Object
+		name string // qualified name; differs from obj.Name for type params
+	}
 	// Build a map from packages to objects from that package.
 	// Build a map from packages to objects from that package.
-	pkgObjs := map[*types.Package][]types.Object{}
+	pkgObjs := map[*types.Package][]pkgObj{}
 
 
 	// For the main index, make sure to include every package that
 	// For the main index, make sure to include every package that
 	// we reference, even if we're not exporting (or reexporting)
 	// we reference, even if we're not exporting (or reexporting)
@@ -150,7 +159,8 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
 	}
 	}
 
 
 	for obj := range index {
 	for obj := range index {
-		pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
+		name := w.p.exportName(obj)
+		pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
 	}
 	}
 
 
 	var pkgs []*types.Package
 	var pkgs []*types.Package
@@ -158,7 +168,7 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
 		pkgs = append(pkgs, pkg)
 		pkgs = append(pkgs, pkg)
 
 
 		sort.Slice(objs, func(i, j int) bool {
 		sort.Slice(objs, func(i, j int) bool {
-			return objs[i].Name() < objs[j].Name()
+			return objs[i].name < objs[j].name
 		})
 		})
 	}
 	}
 
 
@@ -175,15 +185,25 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
 		objs := pkgObjs[pkg]
 		objs := pkgObjs[pkg]
 		w.uint64(uint64(len(objs)))
 		w.uint64(uint64(len(objs)))
 		for _, obj := range objs {
 		for _, obj := range objs {
-			w.string(obj.Name())
-			w.uint64(index[obj])
+			w.string(obj.name)
+			w.uint64(index[obj.obj])
 		}
 		}
 	}
 	}
 }
 }
 
 
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+	if name := p.tparamNames[obj]; name != "" {
+		return name
+	}
+	return obj.Name()
+}
+
 type iexporter struct {
 type iexporter struct {
-	fset *token.FileSet
-	out  *bytes.Buffer
+	fset    *token.FileSet
+	out     *bytes.Buffer
+	version int
 
 
 	localpkg *types.Package
 	localpkg *types.Package
 
 
@@ -197,9 +217,21 @@ type iexporter struct {
 	strings     intWriter
 	strings     intWriter
 	stringIndex map[string]uint64
 	stringIndex map[string]uint64
 
 
-	data0     intWriter
-	declIndex map[types.Object]uint64
-	typIndex  map[types.Type]uint64
+	data0       intWriter
+	declIndex   map[types.Object]uint64
+	tparamNames map[types.Object]string // typeparam->exported name
+	typIndex    map[types.Type]uint64
+
+	indent int // for tracing support
+}
+
+func (p *iexporter) trace(format string, args ...interface{}) {
+	if !trace {
+		// Call sites should also be guarded, but having this check here allows
+		// easily enabling/disabling debug trace statements.
+		return
+	}
+	fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
 }
 }
 
 
 // stringOff returns the offset of s within the string section.
 // stringOff returns the offset of s within the string section.
@@ -225,7 +257,7 @@ func (p *iexporter) pushDecl(obj types.Object) {
 		return
 		return
 	}
 	}
 
 
-	p.declIndex[obj] = ^uint64(0) // mark n present in work queue
+	p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
 	p.declTodo.pushTail(obj)
 	p.declTodo.pushTail(obj)
 }
 }
 
 
@@ -233,10 +265,11 @@ func (p *iexporter) pushDecl(obj types.Object) {
 type exportWriter struct {
 type exportWriter struct {
 	p *iexporter
 	p *iexporter
 
 
-	data     intWriter
-	currPkg  *types.Package
-	prevFile string
-	prevLine int64
+	data       intWriter
+	currPkg    *types.Package
+	prevFile   string
+	prevLine   int64
+	prevColumn int64
 }
 }
 
 
 func (w *exportWriter) exportPath(pkg *types.Package) string {
 func (w *exportWriter) exportPath(pkg *types.Package) string {
@@ -247,6 +280,14 @@ func (w *exportWriter) exportPath(pkg *types.Package) string {
 }
 }
 
 
 func (p *iexporter) doDecl(obj types.Object) {
 func (p *iexporter) doDecl(obj types.Object) {
+	if trace {
+		p.trace("exporting decl %v (%T)", obj, obj)
+		p.indent++
+		defer func() {
+			p.indent--
+			p.trace("=> %s", obj)
+		}()
+	}
 	w := p.newWriter()
 	w := p.newWriter()
 	w.setPkg(obj.Pkg(), false)
 	w.setPkg(obj.Pkg(), false)
 
 
@@ -261,8 +302,24 @@ func (p *iexporter) doDecl(obj types.Object) {
 		if sig.Recv() != nil {
 		if sig.Recv() != nil {
 			panic(internalErrorf("unexpected method: %v", sig))
 			panic(internalErrorf("unexpected method: %v", sig))
 		}
 		}
-		w.tag('F')
+
+		// Function.
+		if typeparams.ForSignature(sig).Len() == 0 {
+			w.tag('F')
+		} else {
+			w.tag('G')
+		}
 		w.pos(obj.Pos())
 		w.pos(obj.Pos())
+		// The tparam list of the function type is the declaration of the type
+		// params. So, write out the type params right now. Then those type params
+		// will be referenced via their type offset (via typOff) in all other
+		// places in the signature and function where they are used.
+		//
+		// While importing the type parameters, tparamList computes and records
+		// their export name, so that it can be later used when writing the index.
+		if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 {
+			w.tparamList(obj.Name(), tparams, obj.Pkg())
+		}
 		w.signature(sig)
 		w.signature(sig)
 
 
 	case *types.Const:
 	case *types.Const:
@@ -271,30 +328,56 @@ func (p *iexporter) doDecl(obj types.Object) {
 		w.value(obj.Type(), obj.Val())
 		w.value(obj.Type(), obj.Val())
 
 
 	case *types.TypeName:
 	case *types.TypeName:
+		t := obj.Type()
+
+		if tparam, ok := t.(*typeparams.TypeParam); ok {
+			w.tag('P')
+			w.pos(obj.Pos())
+			constraint := tparam.Constraint()
+			if p.version >= iexportVersionGo1_18 {
+				implicit := false
+				if iface, _ := constraint.(*types.Interface); iface != nil {
+					implicit = typeparams.IsImplicit(iface)
+				}
+				w.bool(implicit)
+			}
+			w.typ(constraint, obj.Pkg())
+			break
+		}
+
 		if obj.IsAlias() {
 		if obj.IsAlias() {
 			w.tag('A')
 			w.tag('A')
 			w.pos(obj.Pos())
 			w.pos(obj.Pos())
-			w.typ(obj.Type(), obj.Pkg())
+			w.typ(t, obj.Pkg())
 			break
 			break
 		}
 		}
 
 
 		// Defined type.
 		// Defined type.
-		w.tag('T')
+		named, ok := t.(*types.Named)
+		if !ok {
+			panic(internalErrorf("%s is not a defined type", t))
+		}
+
+		if typeparams.ForNamed(named).Len() == 0 {
+			w.tag('T')
+		} else {
+			w.tag('U')
+		}
 		w.pos(obj.Pos())
 		w.pos(obj.Pos())
 
 
+		if typeparams.ForNamed(named).Len() > 0 {
+			// While importing the type parameters, tparamList computes and records
+			// their export name, so that it can be later used when writing the index.
+			w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg())
+		}
+
 		underlying := obj.Type().Underlying()
 		underlying := obj.Type().Underlying()
 		w.typ(underlying, obj.Pkg())
 		w.typ(underlying, obj.Pkg())
 
 
-		t := obj.Type()
 		if types.IsInterface(t) {
 		if types.IsInterface(t) {
 			break
 			break
 		}
 		}
 
 
-		named, ok := t.(*types.Named)
-		if !ok {
-			panic(internalErrorf("%s is not a defined type", t))
-		}
-
 		n := named.NumMethods()
 		n := named.NumMethods()
 		w.uint64(uint64(n))
 		w.uint64(uint64(n))
 		for i := 0; i < n; i++ {
 		for i := 0; i < n; i++ {
@@ -302,6 +385,17 @@ func (p *iexporter) doDecl(obj types.Object) {
 			w.pos(m.Pos())
 			w.pos(m.Pos())
 			w.string(m.Name())
 			w.string(m.Name())
 			sig, _ := m.Type().(*types.Signature)
 			sig, _ := m.Type().(*types.Signature)
+
+			// Receiver type parameters are type arguments of the receiver type, so
+			// their name must be qualified before exporting recv.
+			if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 {
+				prefix := obj.Name() + "." + m.Name()
+				for i := 0; i < rparams.Len(); i++ {
+					rparam := rparams.At(i)
+					name := tparamExportName(prefix, rparam)
+					w.p.tparamNames[rparam.Obj()] = name
+				}
+			}
 			w.param(sig.Recv())
 			w.param(sig.Recv())
 			w.signature(sig)
 			w.signature(sig)
 		}
 		}
@@ -318,6 +412,48 @@ func (w *exportWriter) tag(tag byte) {
 }
 }
 
 
 func (w *exportWriter) pos(pos token.Pos) {
 func (w *exportWriter) pos(pos token.Pos) {
+	if w.p.version >= iexportVersionPosCol {
+		w.posV1(pos)
+	} else {
+		w.posV0(pos)
+	}
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+	if w.p.fset == nil {
+		w.int64(0)
+		return
+	}
+
+	p := w.p.fset.Position(pos)
+	file := p.Filename
+	line := int64(p.Line)
+	column := int64(p.Column)
+
+	deltaColumn := (column - w.prevColumn) << 1
+	deltaLine := (line - w.prevLine) << 1
+
+	if file != w.prevFile {
+		deltaLine |= 1
+	}
+	if deltaLine != 0 {
+		deltaColumn |= 1
+	}
+
+	w.int64(deltaColumn)
+	if deltaColumn&1 != 0 {
+		w.int64(deltaLine)
+		if deltaLine&1 != 0 {
+			w.string(file)
+		}
+	}
+
+	w.prevFile = file
+	w.prevLine = line
+	w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
 	if w.p.fset == nil {
 	if w.p.fset == nil {
 		w.int64(0)
 		w.int64(0)
 		return
 		return
@@ -359,10 +495,11 @@ func (w *exportWriter) pkg(pkg *types.Package) {
 }
 }
 
 
 func (w *exportWriter) qualifiedIdent(obj types.Object) {
 func (w *exportWriter) qualifiedIdent(obj types.Object) {
+	name := w.p.exportName(obj)
+
 	// Ensure any referenced declarations are written out too.
 	// Ensure any referenced declarations are written out too.
 	w.p.pushDecl(obj)
 	w.p.pushDecl(obj)
-
-	w.string(obj.Name())
+	w.string(name)
 	w.pkg(obj.Pkg())
 	w.pkg(obj.Pkg())
 }
 }
 
 
@@ -396,11 +533,32 @@ func (w *exportWriter) startType(k itag) {
 }
 }
 
 
 func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
 func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+	if trace {
+		w.p.trace("exporting type %s (%T)", t, t)
+		w.p.indent++
+		defer func() {
+			w.p.indent--
+			w.p.trace("=> %s", t)
+		}()
+	}
 	switch t := t.(type) {
 	switch t := t.(type) {
 	case *types.Named:
 	case *types.Named:
+		if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 {
+			w.startType(instanceType)
+			// TODO(rfindley): investigate if this position is correct, and if it
+			// matters.
+			w.pos(t.Obj().Pos())
+			w.typeList(targs, pkg)
+			w.typ(typeparams.NamedTypeOrigin(t), pkg)
+			return
+		}
 		w.startType(definedType)
 		w.startType(definedType)
 		w.qualifiedIdent(t.Obj())
 		w.qualifiedIdent(t.Obj())
 
 
+	case *typeparams.TypeParam:
+		w.startType(typeParamType)
+		w.qualifiedIdent(t.Obj())
+
 	case *types.Pointer:
 	case *types.Pointer:
 		w.startType(pointerType)
 		w.startType(pointerType)
 		w.typ(t.Elem(), pkg)
 		w.typ(t.Elem(), pkg)
@@ -461,9 +619,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
 		n := t.NumEmbeddeds()
 		n := t.NumEmbeddeds()
 		w.uint64(uint64(n))
 		w.uint64(uint64(n))
 		for i := 0; i < n; i++ {
 		for i := 0; i < n; i++ {
-			f := t.Embedded(i)
-			w.pos(f.Obj().Pos())
-			w.typ(f.Obj().Type(), f.Obj().Pkg())
+			ft := t.EmbeddedType(i)
+			tPkg := pkg
+			if named, _ := ft.(*types.Named); named != nil {
+				w.pos(named.Obj().Pos())
+			} else {
+				w.pos(token.NoPos)
+			}
+			w.typ(ft, tPkg)
 		}
 		}
 
 
 		n = t.NumExplicitMethods()
 		n = t.NumExplicitMethods()
@@ -476,6 +639,16 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
 			w.signature(sig)
 			w.signature(sig)
 		}
 		}
 
 
+	case *typeparams.Union:
+		w.startType(unionType)
+		nt := t.Len()
+		w.uint64(uint64(nt))
+		for i := 0; i < nt; i++ {
+			term := t.Term(i)
+			w.bool(term.Tilde())
+			w.typ(term.Type(), pkg)
+		}
+
 	default:
 	default:
 		panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
 		panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
 	}
 	}
@@ -497,6 +670,56 @@ func (w *exportWriter) signature(sig *types.Signature) {
 	}
 	}
 }
 }
 
 
+func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) {
+	w.uint64(uint64(ts.Len()))
+	for i := 0; i < ts.Len(); i++ {
+		w.typ(ts.At(i), pkg)
+	}
+}
+
+func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) {
+	ll := uint64(list.Len())
+	w.uint64(ll)
+	for i := 0; i < list.Len(); i++ {
+		tparam := list.At(i)
+		// Set the type parameter exportName before exporting its type.
+		exportName := tparamExportName(prefix, tparam)
+		w.p.tparamNames[tparam.Obj()] = exportName
+		w.typ(list.At(i), pkg)
+	}
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *typeparams.TypeParam) string {
+	assert(prefix != "")
+	name := tparam.Obj().Name()
+	if name == "_" {
+		name = blankMarker + strconv.Itoa(tparam.Index())
+	}
+	return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+	// Remove the "path" from the type param name that makes it unique.
+	ix := strings.LastIndex(exportName, ".")
+	if ix < 0 {
+		errorf("malformed type parameter export name %s: missing prefix", exportName)
+	}
+	name := exportName[ix+1:]
+	if strings.HasPrefix(name, blankMarker) {
+		return "_"
+	}
+	return name
+}
+
 func (w *exportWriter) paramList(tup *types.Tuple) {
 func (w *exportWriter) paramList(tup *types.Tuple) {
 	n := tup.Len()
 	n := tup.Len()
 	w.uint64(uint64(n))
 	w.uint64(uint64(n))
@@ -513,6 +736,9 @@ func (w *exportWriter) param(obj types.Object) {
 
 
 func (w *exportWriter) value(typ types.Type, v constant.Value) {
 func (w *exportWriter) value(typ types.Type, v constant.Value) {
 	w.typ(typ, nil)
 	w.typ(typ, nil)
+	if w.p.version >= iexportVersionGo1_18 {
+		w.int64(int64(v.Kind()))
+	}
 
 
 	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
 	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
 	case types.IsBoolean:
 	case types.IsBoolean:

+ 250 - 30
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go

@@ -18,6 +18,9 @@ import (
 	"go/types"
 	"go/types"
 	"io"
 	"io"
 	"sort"
 	"sort"
+	"strings"
+
+	"golang.org/x/tools/internal/typeparams"
 )
 )
 
 
 type intReader struct {
 type intReader struct {
@@ -41,6 +44,19 @@ func (r *intReader) uint64() uint64 {
 	return i
 	return i
 }
 }
 
 
+// Keep this in sync with constants in iexport.go.
+const (
+	iexportVersionGo1_11   = 0
+	iexportVersionPosCol   = 1
+	iexportVersionGo1_18   = 2
+	iexportVersionGenerics = 2
+)
+
+type ident struct {
+	pkg  string
+	name string
+}
+
 const predeclReserved = 32
 const predeclReserved = 32
 
 
 type itag uint64
 type itag uint64
@@ -56,6 +72,9 @@ const (
 	signatureType
 	signatureType
 	structType
 	structType
 	interfaceType
 	interfaceType
+	typeParamType
+	instanceType
+	unionType
 )
 )
 
 
 // IImportData imports a package from the serialized package data
 // IImportData imports a package from the serialized package data
@@ -78,15 +97,17 @@ func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data
 func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
 func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
 	const currentVersion = 1
 	const currentVersion = 1
 	version := int64(-1)
 	version := int64(-1)
-	defer func() {
-		if e := recover(); e != nil {
-			if version > currentVersion {
-				err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
-			} else {
-				err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+	if !debug {
+		defer func() {
+			if e := recover(); e != nil {
+				if version > currentVersion {
+					err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+				} else {
+					err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+				}
 			}
 			}
-		}
-	}()
+		}()
+	}
 
 
 	r := &intReader{bytes.NewReader(data), path}
 	r := &intReader{bytes.NewReader(data), path}
 
 
@@ -101,9 +122,13 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
 
 
 	version = int64(r.uint64())
 	version = int64(r.uint64())
 	switch version {
 	switch version {
-	case currentVersion, 0:
+	case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
 	default:
 	default:
-		errorf("unknown iexport format version %d", version)
+		if version > iexportVersionGo1_18 {
+			errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+		} else {
+			errorf("unknown iexport format version %d", version)
+		}
 	}
 	}
 
 
 	sLen := int64(r.uint64())
 	sLen := int64(r.uint64())
@@ -115,8 +140,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
 	r.Seek(sLen+dLen, io.SeekCurrent)
 	r.Seek(sLen+dLen, io.SeekCurrent)
 
 
 	p := iimporter{
 	p := iimporter{
-		ipath:   path,
 		version: int(version),
 		version: int(version),
+		ipath:   path,
 
 
 		stringData:  stringData,
 		stringData:  stringData,
 		stringCache: make(map[uint64]string),
 		stringCache: make(map[uint64]string),
@@ -125,12 +150,16 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
 		declData: declData,
 		declData: declData,
 		pkgIndex: make(map[*types.Package]map[string]uint64),
 		pkgIndex: make(map[*types.Package]map[string]uint64),
 		typCache: make(map[uint64]types.Type),
 		typCache: make(map[uint64]types.Type),
+		// Separate map for typeparams, keyed by their package and unique
+		// name.
+		tparamIndex: make(map[ident]types.Type),
 
 
 		fake: fakeFileSet{
 		fake: fakeFileSet{
 			fset:  fset,
 			fset:  fset,
-			files: make(map[string]*token.File),
+			files: make(map[string]*fileInfo),
 		},
 		},
 	}
 	}
+	defer p.fake.setLines() // set lines for files in fset
 
 
 	for i, pt := range predeclared() {
 	for i, pt := range predeclared() {
 		p.typCache[uint64(i)] = pt
 		p.typCache[uint64(i)] = pt
@@ -208,6 +237,15 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
 		pkg.MarkComplete()
 		pkg.MarkComplete()
 	}
 	}
 
 
+	// SetConstraint can't be called if the constraint type is not yet complete.
+	// When type params are created in the 'P' case of (*importReader).obj(),
+	// the associated constraint type may not be complete due to recursion.
+	// Therefore, we defer calling SetConstraint there, and call it here instead
+	// after all types are complete.
+	for _, d := range p.later {
+		typeparams.SetTypeParamConstraint(d.t, d.constraint)
+	}
+
 	for _, typ := range p.interfaceList {
 	for _, typ := range p.interfaceList {
 		typ.Complete()
 		typ.Complete()
 	}
 	}
@@ -215,23 +253,51 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
 	return pkgs, nil
 	return pkgs, nil
 }
 }
 
 
+type setConstraintArgs struct {
+	t          *typeparams.TypeParam
+	constraint types.Type
+}
+
 type iimporter struct {
 type iimporter struct {
-	ipath   string
 	version int
 	version int
+	ipath   string
 
 
 	stringData  []byte
 	stringData  []byte
 	stringCache map[uint64]string
 	stringCache map[uint64]string
 	pkgCache    map[uint64]*types.Package
 	pkgCache    map[uint64]*types.Package
 
 
-	declData []byte
-	pkgIndex map[*types.Package]map[string]uint64
-	typCache map[uint64]types.Type
+	declData    []byte
+	pkgIndex    map[*types.Package]map[string]uint64
+	typCache    map[uint64]types.Type
+	tparamIndex map[ident]types.Type
 
 
 	fake          fakeFileSet
 	fake          fakeFileSet
 	interfaceList []*types.Interface
 	interfaceList []*types.Interface
+
+	// Arguments for calls to SetConstraint that are deferred due to recursive types
+	later []setConstraintArgs
+
+	indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...interface{}) {
+	if !trace {
+		// Call sites should also be guarded, but having this check here allows
+		// easily enabling/disabling debug trace statements.
+		return
+	}
+	fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
 }
 }
 
 
 func (p *iimporter) doDecl(pkg *types.Package, name string) {
 func (p *iimporter) doDecl(pkg *types.Package, name string) {
+	if debug {
+		p.trace("import decl %s", name)
+		p.indent++
+		defer func() {
+			p.indent--
+			p.trace("=> %s", name)
+		}()
+	}
 	// See if we've already imported this declaration.
 	// See if we've already imported this declaration.
 	if obj := pkg.Scope().Lookup(name); obj != nil {
 	if obj := pkg.Scope().Lookup(name); obj != nil {
 		return
 		return
@@ -273,7 +339,7 @@ func (p *iimporter) pkgAt(off uint64) *types.Package {
 }
 }
 
 
 func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
 func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
-	if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+	if t, ok := p.typCache[off]; ok && canReuse(base, t) {
 		return t
 		return t
 	}
 	}
 
 
@@ -285,12 +351,30 @@ func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
 	r.declReader.Reset(p.declData[off-predeclReserved:])
 	r.declReader.Reset(p.declData[off-predeclReserved:])
 	t := r.doType(base)
 	t := r.doType(base)
 
 
-	if base == nil || !isInterface(t) {
+	if canReuse(base, t) {
 		p.typCache[off] = t
 		p.typCache[off] = t
 	}
 	}
 	return t
 	return t
 }
 }
 
 
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+	if def == nil {
+		return true
+	}
+	iface, _ := rhs.(*types.Interface)
+	if iface == nil {
+		return true
+	}
+	// Don't use iface.Empty() here as iface may not be complete.
+	return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
 type importReader struct {
 type importReader struct {
 	p          *iimporter
 	p          *iimporter
 	declReader bytes.Reader
 	declReader bytes.Reader
@@ -315,17 +399,26 @@ func (r *importReader) obj(name string) {
 
 
 		r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
 		r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
 
 
-	case 'F':
-		sig := r.signature(nil)
-
+	case 'F', 'G':
+		var tparams []*typeparams.TypeParam
+		if tag == 'G' {
+			tparams = r.tparamList()
+		}
+		sig := r.signature(nil, nil, tparams)
 		r.declare(types.NewFunc(pos, r.currPkg, name, sig))
 		r.declare(types.NewFunc(pos, r.currPkg, name, sig))
 
 
-	case 'T':
+	case 'T', 'U':
 		// Types can be recursive. We need to setup a stub
 		// Types can be recursive. We need to setup a stub
 		// declaration before recursing.
 		// declaration before recursing.
 		obj := types.NewTypeName(pos, r.currPkg, name, nil)
 		obj := types.NewTypeName(pos, r.currPkg, name, nil)
 		named := types.NewNamed(obj, nil, nil)
 		named := types.NewNamed(obj, nil, nil)
+		// Declare obj before calling r.tparamList, so the new type name is recognized
+		// if used in the constraint of one of its own typeparams (see #48280).
 		r.declare(obj)
 		r.declare(obj)
+		if tag == 'U' {
+			tparams := r.tparamList()
+			typeparams.SetForNamed(named, tparams)
+		}
 
 
 		underlying := r.p.typAt(r.uint64(), named).Underlying()
 		underlying := r.p.typAt(r.uint64(), named).Underlying()
 		named.SetUnderlying(underlying)
 		named.SetUnderlying(underlying)
@@ -335,12 +428,59 @@ func (r *importReader) obj(name string) {
 				mpos := r.pos()
 				mpos := r.pos()
 				mname := r.ident()
 				mname := r.ident()
 				recv := r.param()
 				recv := r.param()
-				msig := r.signature(recv)
+
+				// If the receiver has any targs, set those as the
+				// rparams of the method (since those are the
+				// typeparams being used in the method sig/body).
+				base := baseType(recv.Type())
+				assert(base != nil)
+				targs := typeparams.NamedTypeArgs(base)
+				var rparams []*typeparams.TypeParam
+				if targs.Len() > 0 {
+					rparams = make([]*typeparams.TypeParam, targs.Len())
+					for i := range rparams {
+						rparams[i] = targs.At(i).(*typeparams.TypeParam)
+					}
+				}
+				msig := r.signature(recv, rparams, nil)
 
 
 				named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
 				named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
 			}
 			}
 		}
 		}
 
 
+	case 'P':
+		// We need to "declare" a typeparam in order to have a name that
+		// can be referenced recursively (if needed) in the type param's
+		// bound.
+		if r.p.version < iexportVersionGenerics {
+			errorf("unexpected type param type")
+		}
+		name0 := tparamName(name)
+		tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+		t := typeparams.NewTypeParam(tn, nil)
+
+		// To handle recursive references to the typeparam within its
+		// bound, save the partial type in tparamIndex before reading the bounds.
+		id := ident{r.currPkg.Name(), name}
+		r.p.tparamIndex[id] = t
+		var implicit bool
+		if r.p.version >= iexportVersionGo1_18 {
+			implicit = r.bool()
+		}
+		constraint := r.typ()
+		if implicit {
+			iface, _ := constraint.(*types.Interface)
+			if iface == nil {
+				errorf("non-interface constraint marked implicit")
+			}
+			typeparams.MarkImplicit(iface)
+		}
+		// The constraint type may not be complete, if we
+		// are in the middle of a type recursion involving type
+		// constraints. So, we defer SetConstraint until we have
+		// completely set up all types in ImportData.
+		r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
 	case 'V':
 	case 'V':
 		typ := r.typ()
 		typ := r.typ()
 
 
@@ -357,6 +497,10 @@ func (r *importReader) declare(obj types.Object) {
 
 
 func (r *importReader) value() (typ types.Type, val constant.Value) {
 func (r *importReader) value() (typ types.Type, val constant.Value) {
 	typ = r.typ()
 	typ = r.typ()
+	if r.p.version >= iexportVersionGo1_18 {
+		// TODO: add support for using the kind.
+		_ = constant.Kind(r.int64())
+	}
 
 
 	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
 	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
 	case types.IsBoolean:
 	case types.IsBoolean:
@@ -499,7 +643,7 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
 }
 }
 
 
 func (r *importReader) pos() token.Pos {
 func (r *importReader) pos() token.Pos {
-	if r.p.version >= 1 {
+	if r.p.version >= iexportVersionPosCol {
 		r.posv1()
 		r.posv1()
 	} else {
 	} else {
 		r.posv0()
 		r.posv0()
@@ -547,8 +691,17 @@ func isInterface(t types.Type) bool {
 func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
 func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
 func (r *importReader) string() string      { return r.p.stringAt(r.uint64()) }
 func (r *importReader) string() string      { return r.p.stringAt(r.uint64()) }
 
 
-func (r *importReader) doType(base *types.Named) types.Type {
-	switch k := r.kind(); k {
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+	k := r.kind()
+	if debug {
+		r.p.trace("importing type %d (base: %s)", k, base)
+		r.p.indent++
+		defer func() {
+			r.p.indent--
+			r.p.trace("=> %s", res)
+		}()
+	}
+	switch k {
 	default:
 	default:
 		errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
 		errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
 		return nil
 		return nil
@@ -571,7 +724,7 @@ func (r *importReader) doType(base *types.Named) types.Type {
 		return types.NewMap(r.typ(), r.typ())
 		return types.NewMap(r.typ(), r.typ())
 	case signatureType:
 	case signatureType:
 		r.currPkg = r.pkg()
 		r.currPkg = r.pkg()
-		return r.signature(nil)
+		return r.signature(nil, nil, nil)
 
 
 	case structType:
 	case structType:
 		r.currPkg = r.pkg()
 		r.currPkg = r.pkg()
@@ -611,13 +764,56 @@ func (r *importReader) doType(base *types.Named) types.Type {
 				recv = types.NewVar(token.NoPos, r.currPkg, "", base)
 				recv = types.NewVar(token.NoPos, r.currPkg, "", base)
 			}
 			}
 
 
-			msig := r.signature(recv)
+			msig := r.signature(recv, nil, nil)
 			methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
 			methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
 		}
 		}
 
 
 		typ := newInterface(methods, embeddeds)
 		typ := newInterface(methods, embeddeds)
 		r.p.interfaceList = append(r.p.interfaceList, typ)
 		r.p.interfaceList = append(r.p.interfaceList, typ)
 		return typ
 		return typ
+
+	case typeParamType:
+		if r.p.version < iexportVersionGenerics {
+			errorf("unexpected type param type")
+		}
+		pkg, name := r.qualifiedIdent()
+		id := ident{pkg.Name(), name}
+		if t, ok := r.p.tparamIndex[id]; ok {
+			// We're already in the process of importing this typeparam.
+			return t
+		}
+		// Otherwise, import the definition of the typeparam now.
+		r.p.doDecl(pkg, name)
+		return r.p.tparamIndex[id]
+
+	case instanceType:
+		if r.p.version < iexportVersionGenerics {
+			errorf("unexpected instantiation type")
+		}
+		// pos does not matter for instances: they are positioned on the original
+		// type.
+		_ = r.pos()
+		len := r.uint64()
+		targs := make([]types.Type, len)
+		for i := range targs {
+			targs[i] = r.typ()
+		}
+		baseType := r.typ()
+		// The imported instantiated type doesn't include any methods, so
+		// we must always use the methods of the base (orig) type.
+		// TODO provide a non-nil *Environment
+		t, _ := typeparams.Instantiate(nil, baseType, targs, false)
+		return t
+
+	case unionType:
+		if r.p.version < iexportVersionGenerics {
+			errorf("unexpected instantiation type")
+		}
+		terms := make([]*typeparams.Term, r.uint64())
+		for i := range terms {
+			terms[i] = typeparams.NewTerm(r.bool(), r.typ())
+		}
+		return typeparams.NewUnion(terms)
 	}
 	}
 }
 }
 
 
@@ -625,11 +821,25 @@ func (r *importReader) kind() itag {
 	return itag(r.uint64())
 	return itag(r.uint64())
 }
 }
 
 
-func (r *importReader) signature(recv *types.Var) *types.Signature {
+func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature {
 	params := r.paramList()
 	params := r.paramList()
 	results := r.paramList()
 	results := r.paramList()
 	variadic := params.Len() > 0 && r.bool()
 	variadic := params.Len() > 0 && r.bool()
-	return types.NewSignature(recv, params, results, variadic)
+	return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*typeparams.TypeParam {
+	n := r.uint64()
+	if n == 0 {
+		return nil
+	}
+	xs := make([]*typeparams.TypeParam, n)
+	for i := range xs {
+		// Note: the standard library importer is tolerant of nil types here,
+		// though would panic in SetTypeParams.
+		xs[i] = r.typ().(*typeparams.TypeParam)
+	}
+	return xs
 }
 }
 
 
 func (r *importReader) paramList() *types.Tuple {
 func (r *importReader) paramList() *types.Tuple {
@@ -674,3 +884,13 @@ func (r *importReader) byte() byte {
 	}
 	}
 	return x
 	return x
 }
 }
+
+func baseType(typ types.Type) *types.Named {
+	// pointer receivers are never types.Named types
+	if p, _ := typ.(*types.Pointer); p != nil {
+		typ = p.Elem()
+	}
+	// receiver base types are always (possibly generic) types.Named types
+	n, _ := typ.(*types.Named)
+	return n
+}

+ 16 - 0
vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go

@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGo1_11
+
+func additionalPredeclared() []types.Type {
+	return nil
+}

+ 23 - 0
vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go

@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGenerics
+
+// additionalPredeclared returns additional predeclared types in go.1.18.
+func additionalPredeclared() []types.Type {
+	return []types.Type{
+		// comparable
+		types.Universe.Lookup("comparable").Type(),
+
+		// any
+		types.Universe.Lookup("any").Type(),
+	}
+}

+ 204 - 0
vendor/golang.org/x/tools/go/loader/doc.go

@@ -0,0 +1,204 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package loader loads a complete Go program from source code, parsing
+// and type-checking the initial packages plus their transitive closure
+// of dependencies.  The ASTs and the derived facts are retained for
+// later use.
+//
+// Deprecated: This is an older API and does not have support
+// for modules. Use golang.org/x/tools/go/packages instead.
+//
+// The package defines two primary types: Config, which specifies a
+// set of initial packages to load and various other options; and
+// Program, which is the result of successfully loading the packages
+// specified by a configuration.
+//
+// The configuration can be set directly, but *Config provides various
+// convenience methods to simplify the common cases, each of which can
+// be called any number of times.  Finally, these are followed by a
+// call to Load() to actually load and type-check the program.
+//
+//      var conf loader.Config
+//
+//      // Use the command-line arguments to specify
+//      // a set of initial packages to load from source.
+//      // See FromArgsUsage for help.
+//      rest, err := conf.FromArgs(os.Args[1:], wantTests)
+//
+//      // Parse the specified files and create an ad hoc package with path "foo".
+//      // All files must have the same 'package' declaration.
+//      conf.CreateFromFilenames("foo", "foo.go", "bar.go")
+//
+//      // Create an ad hoc package with path "foo" from
+//      // the specified already-parsed files.
+//      // All ASTs must have the same 'package' declaration.
+//      conf.CreateFromFiles("foo", parsedFiles)
+//
+//      // Add "runtime" to the set of packages to be loaded.
+//      conf.Import("runtime")
+//
+//      // Adds "fmt" and "fmt_test" to the set of packages
+//      // to be loaded.  "fmt" will include *_test.go files.
+//      conf.ImportWithTests("fmt")
+//
+//      // Finally, load all the packages specified by the configuration.
+//      prog, err := conf.Load()
+//
+// See examples_test.go for examples of API usage.
+//
+//
+// CONCEPTS AND TERMINOLOGY
+//
+// The WORKSPACE is the set of packages accessible to the loader.  The
+// workspace is defined by Config.Build, a *build.Context.  The
+// default context treats subdirectories of $GOROOT and $GOPATH as
+// packages, but this behavior may be overridden.
+//
+// An AD HOC package is one specified as a set of source files on the
+// command line.  In the simplest case, it may consist of a single file
+// such as $GOROOT/src/net/http/triv.go.
+//
+// EXTERNAL TEST packages are those comprised of a set of *_test.go
+// files all with the same 'package foo_test' declaration, all in the
+// same directory.  (go/build.Package calls these files XTestFiles.)
+//
+// An IMPORTABLE package is one that can be referred to by some import
+// spec.  Every importable package is uniquely identified by its
+// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
+// or "cmd/vendor/golang.org/x/arch/x86/x86asm".  A package path
+// typically denotes a subdirectory of the workspace.
+//
+// An import declaration uses an IMPORT PATH to refer to a package.
+// Most import declarations use the package path as the import path.
+//
+// Due to VENDORING (https://golang.org/s/go15vendor), the
+// interpretation of an import path may depend on the directory in which
+// it appears.  To resolve an import path to a package path, go/build
+// must search the enclosing directories for a subdirectory named
+// "vendor".
+//
+// ad hoc packages and external test packages are NON-IMPORTABLE.  The
+// path of an ad hoc package is inferred from the package
+// declarations of its files and is therefore not a unique package key.
+// For example, Config.CreatePkgs may specify two initial ad hoc
+// packages, both with path "main".
+//
+// An AUGMENTED package is an importable package P plus all the
+// *_test.go files with same 'package foo' declaration as P.
+// (go/build.Package calls these files TestFiles.)
+//
+// The INITIAL packages are those specified in the configuration.  A
+// DEPENDENCY is a package loaded to satisfy an import in an initial
+// package or another dependency.
+//
+package loader
+
+// IMPLEMENTATION NOTES
+//
+// 'go test', in-package test files, and import cycles
+// ---------------------------------------------------
+//
+// An external test package may depend upon members of the augmented
+// package that are not in the unaugmented package, such as functions
+// that expose internals.  (See bufio/export_test.go for an example.)
+// So, the loader must ensure that for each external test package
+// it loads, it also augments the corresponding non-test package.
+//
+// The import graph over n unaugmented packages must be acyclic; the
+// import graph over n-1 unaugmented packages plus one augmented
+// package must also be acyclic.  ('go test' relies on this.)  But the
+// import graph over n augmented packages may contain cycles.
+//
+// First, all the (unaugmented) non-test packages and their
+// dependencies are imported in the usual way; the loader reports an
+// error if it detects an import cycle.
+//
+// Then, each package P for which testing is desired is augmented by
+// the list P' of its in-package test files, by calling
+// (*types.Checker).Files.  This arrangement ensures that P' may
+// reference definitions within P, but P may not reference definitions
+// within P'.  Furthermore, P' may import any other package, including
+// ones that depend upon P, without an import cycle error.
+//
+// Consider two packages A and B, both of which have lists of
+// in-package test files we'll call A' and B', and which have the
+// following import graph edges:
+//    B  imports A
+//    B' imports A
+//    A' imports B
+// This last edge would be expected to create an error were it not
+// for the special type-checking discipline above.
+// Cycles of size greater than two are possible.  For example:
+//   compress/bzip2/bzip2_test.go (package bzip2)  imports "io/ioutil"
+//   io/ioutil/tempfile_test.go   (package ioutil) imports "regexp"
+//   regexp/exec_test.go          (package regexp) imports "compress/bzip2"
+//
+//
+// Concurrency
+// -----------
+//
+// Let us define the import dependency graph as follows.  Each node is a
+// list of files passed to (Checker).Files at once.  Many of these lists
+// are the production code of an importable Go package, so those nodes
+// are labelled by the package's path.  The remaining nodes are
+// ad hoc packages and lists of in-package *_test.go files that augment
+// an importable package; those nodes have no label.
+//
+// The edges of the graph represent import statements appearing within a
+// file.  An edge connects a node (a list of files) to the node it
+// imports, which is importable and thus always labelled.
+//
+// Loading is controlled by this dependency graph.
+//
+// To reduce I/O latency, we start loading a package's dependencies
+// asynchronously as soon as we've parsed its files and enumerated its
+// imports (scanImports).  This performs a preorder traversal of the
+// import dependency graph.
+//
+// To exploit hardware parallelism, we type-check unrelated packages in
+// parallel, where "unrelated" means not ordered by the partial order of
+// the import dependency graph.
+//
+// We use a concurrency-safe non-blocking cache (importer.imported) to
+// record the results of type-checking, whether success or failure.  An
+// entry is created in this cache by startLoad the first time the
+// package is imported.  The first goroutine to request an entry becomes
+// responsible for completing the task and broadcasting completion to
+// subsequent requestors, which block until then.
+//
+// Type checking occurs in (parallel) postorder: we cannot type-check a
+// set of files until we have loaded and type-checked all of their
+// immediate dependencies (and thus all of their transitive
+// dependencies). If the input were guaranteed free of import cycles,
+// this would be trivial: we could simply wait for completion of the
+// dependencies and then invoke the typechecker.
+//
+// But as we saw in the 'go test' section above, some cycles in the
+// import graph over packages are actually legal, so long as the
+// cycle-forming edge originates in the in-package test files that
+// augment the package.  This explains why the nodes of the import
+// dependency graph are not packages, but lists of files: the unlabelled
+// nodes avoid the cycles.  Consider packages A and B where B imports A
+// and A's in-package tests AT import B.  The naively constructed import
+// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
+// the graph over lists of files is AT --> B --> A, where AT is an
+// unlabelled node.
+//
+// Awaiting completion of the dependencies in a cyclic graph would
+// deadlock, so we must materialize the import dependency graph (as
+// importer.graph) and check whether each import edge forms a cycle.  If
+// x imports y, and the graph already contains a path from y to x, then
+// there is an import cycle, in which case the processing of x must not
+// wait for the completion of processing of y.
+//
+// When the type-checker makes a callback (doImport) to the loader for a
+// given import edge, there are two possible cases.  In the normal case,
+// the dependency has already been completely type-checked; doImport
+// does a cache lookup and returns it.  In the cyclic case, the entry in
+// the cache is still necessarily incomplete, indicating a cycle.  We
+// perform the cycle check again to obtain the error message, and return
+// the error.
+//
+// The result of using concurrency is about a 2.5x speedup for stdlib_test.

+ 1080 - 0
vendor/golang.org/x/tools/go/loader/loader.go

@@ -0,0 +1,1080 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+	"errors"
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/go/internal/cgo"
+	"golang.org/x/tools/internal/typeparams"
+)
+
+var ignoreVendor build.ImportMode
+
+const trace = false // show timing info for type-checking
+
+// Config specifies the configuration for loading a whole program from
+// Go source code.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+	// Fset is the file set for the parser to use when loading the
+	// program.  If nil, it may be lazily initialized by any
+	// method of Config.
+	Fset *token.FileSet
+
+	// ParserMode specifies the mode to be used by the parser when
+	// loading source packages.
+	ParserMode parser.Mode
+
+	// TypeChecker contains options relating to the type checker.
+	//
+	// The supplied IgnoreFuncBodies is not used; the effective
+	// value comes from the TypeCheckFuncBodies func below.
+	// The supplied Import function is not used either.
+	TypeChecker types.Config
+
+	// TypeCheckFuncBodies is a predicate over package paths.
+	// A package for which the predicate is false will
+	// have its package-level declarations type checked, but not
+	// its function bodies; this can be used to quickly load
+	// dependencies from source.  If nil, all func bodies are type
+	// checked.
+	TypeCheckFuncBodies func(path string) bool
+
+	// If Build is non-nil, it is used to locate source packages.
+	// Otherwise &build.Default is used.
+	//
+	// By default, cgo is invoked to preprocess Go files that
+	// import the fake package "C".  This behaviour can be
+	// disabled by setting CGO_ENABLED=0 in the environment prior
+	// to startup, or by setting Build.CgoEnabled=false.
+	Build *build.Context
+
+	// The current directory, used for resolving relative package
+	// references such as "./go/loader".  If empty, os.Getwd will be
+	// used instead.
+	Cwd string
+
+	// If DisplayPath is non-nil, it is used to transform each
+	// file name obtained from Build.Import().  This can be used
+	// to prevent a virtualized build.Config's file names from
+	// leaking into the user interface.
+	DisplayPath func(path string) string
+
+	// If AllowErrors is true, Load will return a Program even
+	// if some of the its packages contained I/O, parser or type
+	// errors; such errors are accessible via PackageInfo.Errors.  If
+	// false, Load will fail if any package had an error.
+	AllowErrors bool
+
+	// CreatePkgs specifies a list of non-importable initial
+	// packages to create.  The resulting packages will appear in
+	// the corresponding elements of the Program.Created slice.
+	CreatePkgs []PkgSpec
+
+	// ImportPkgs specifies a set of initial packages to load.
+	// The map keys are package paths.
+	//
+	// The map value indicates whether to load tests.  If true, Load
+	// will add and type-check two lists of files to the package:
+	// non-test files followed by in-package *_test.go files.  In
+	// addition, it will append the external test package (if any)
+	// to Program.Created.
+	ImportPkgs map[string]bool
+
+	// FindPackage is called during Load to create the build.Package
+	// for a given import path from a given directory.
+	// If FindPackage is nil, (*build.Context).Import is used.
+	// A client may use this hook to adapt to a proprietary build
+	// system that does not follow the "go build" layout
+	// conventions, for example.
+	//
+	// It must be safe to call concurrently from multiple goroutines.
+	FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error)
+
+	// AfterTypeCheck is called immediately after a list of files
+	// has been type-checked and appended to info.Files.
+	//
+	// This optional hook function is the earliest opportunity for
+	// the client to observe the output of the type checker,
+	// which may be useful to reduce analysis latency when loading
+	// a large program.
+	//
+	// The function is permitted to modify info.Info, for instance
+	// to clear data structures that are no longer needed, which can
+	// dramatically reduce peak memory consumption.
+	//
+	// The function may be called twice for the same PackageInfo:
+	// once for the files of the package and again for the
+	// in-package test files.
+	//
+	// It must be safe to call concurrently from multiple goroutines.
+	AfterTypeCheck func(info *PackageInfo, files []*ast.File)
+}
+
+// A PkgSpec specifies a non-importable package to be created by Load.
+// Files are processed first, but typically only one of Files and
+// Filenames is provided.  The path needn't be globally unique.
+//
+// For vendoring purposes, the package's directory is the one that
+// contains the first file.
+type PkgSpec struct {
+	Path      string      // package path ("" => use package declaration)
+	Files     []*ast.File // ASTs of already-parsed files
+	Filenames []string    // names of files to be parsed
+}
+
+// A Program is a Go program loaded from source as specified by a Config.
+type Program struct {
+	Fset *token.FileSet // the file set for this program
+
+	// Created[i] contains the initial package whose ASTs or
+	// filenames were supplied by Config.CreatePkgs[i], followed by
+	// the external test package, if any, of each package in
+	// Config.ImportPkgs ordered by ImportPath.
+	//
+	// NOTE: these files must not import "C".  Cgo preprocessing is
+	// only performed on imported packages, not ad hoc packages.
+	//
+	// TODO(adonovan): we need to copy and adapt the logic of
+	// goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make
+	// Config.Import and Config.Create methods return the same kind
+	// of entity, essentially a build.Package.
+	// Perhaps we can even reuse that type directly.
+	Created []*PackageInfo
+
+	// Imported contains the initially imported packages,
+	// as specified by Config.ImportPkgs.
+	Imported map[string]*PackageInfo
+
+	// AllPackages contains the PackageInfo of every package
+	// encountered by Load: all initial packages and all
+	// dependencies, including incomplete ones.
+	AllPackages map[*types.Package]*PackageInfo
+
+	// importMap is the canonical mapping of package paths to
+	// packages.  It contains all Imported initial packages, but not
+	// Created ones, and all imported dependencies.
+	importMap map[string]*types.Package
+}
+
+// PackageInfo holds the ASTs and facts derived by the type-checker
+// for a single package.
+//
+// Not mutated once exposed via the API.
+//
+type PackageInfo struct {
+	Pkg                   *types.Package
+	Importable            bool        // true if 'import "Pkg.Path()"' would resolve to this
+	TransitivelyErrorFree bool        // true if Pkg and all its dependencies are free of errors
+	Files                 []*ast.File // syntax trees for the package's files
+	Errors                []error     // non-nil if the package had errors
+	types.Info                        // type-checker deductions.
+	dir                   string      // package directory
+
+	checker   *types.Checker // transient type-checker state
+	errorFunc func(error)
+}
+
+func (info *PackageInfo) String() string { return info.Pkg.Path() }
+
+func (info *PackageInfo) appendError(err error) {
+	if info.errorFunc != nil {
+		info.errorFunc(err)
+	} else {
+		fmt.Fprintln(os.Stderr, err)
+	}
+	info.Errors = append(info.Errors, err)
+}
+
+func (conf *Config) fset() *token.FileSet {
+	if conf.Fset == nil {
+		conf.Fset = token.NewFileSet()
+	}
+	return conf.Fset
+}
+
+// ParseFile is a convenience function (intended for testing) that invokes
+// the parser using the Config's FileSet, which is initialized if nil.
+//
+// src specifies the parser input as a string, []byte, or io.Reader, and
+// filename is its apparent name.  If src is nil, the contents of
+// filename are read from the file system.
+//
+func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
+	// TODO(adonovan): use conf.build() etc like parseFiles does.
+	return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
+}
+
+// FromArgsUsage is a partial usage message that applications calling
+// FromArgs may wish to include in their -help output.
+const FromArgsUsage = `
+<args> is a list of arguments denoting a set of initial packages.
+It may take one of two forms:
+
+1. A list of *.go source files.
+
+   All of the specified files are loaded, parsed and type-checked
+   as a single package.  All the files must belong to the same directory.
+
+2. A list of import paths, each denoting a package.
+
+   The package's directory is found relative to the $GOROOT and
+   $GOPATH using similar logic to 'go build', and the *.go files in
+   that directory are loaded, parsed and type-checked as a single
+   package.
+
+   In addition, all *_test.go files in the directory are then loaded
+   and parsed.  Those files whose package declaration equals that of
+   the non-*_test.go files are included in the primary package.  Test
+   files whose package declaration ends with "_test" are type-checked
+   as another package, the 'external' test package, so that a single
+   import path may denote two packages.  (Whether this behaviour is
+   enabled is tool-specific, and may depend on additional flags.)
+
+A '--' argument terminates the list of packages.
+`
+
+// FromArgs interprets args as a set of initial packages to load from
+// source and updates the configuration.  It returns the list of
+// unconsumed arguments.
+//
+// It is intended for use in command-line interfaces that require a
+// set of initial packages to be specified; see FromArgsUsage message
+// for details.
+//
+// Only superficial errors are reported at this stage; errors dependent
+// on I/O are detected during Load.
+//
+func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
+	var rest []string
+	for i, arg := range args {
+		if arg == "--" {
+			rest = args[i+1:]
+			args = args[:i]
+			break // consume "--" and return the remaining args
+		}
+	}
+
+	if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
+		// Assume args is a list of a *.go files
+		// denoting a single ad hoc package.
+		for _, arg := range args {
+			if !strings.HasSuffix(arg, ".go") {
+				return nil, fmt.Errorf("named files must be .go files: %s", arg)
+			}
+		}
+		conf.CreateFromFilenames("", args...)
+	} else {
+		// Assume args are directories each denoting a
+		// package and (perhaps) an external test, iff xtest.
+		for _, arg := range args {
+			if xtest {
+				conf.ImportWithTests(arg)
+			} else {
+				conf.Import(arg)
+			}
+		}
+	}
+
+	return rest, nil
+}
+
+// CreateFromFilenames is a convenience function that adds
+// a conf.CreatePkgs entry to create a package of the specified *.go
+// files.
+//
+func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
+	conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
+}
+
+// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
+// entry to create package of the specified path and parsed files.
+//
+func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
+	conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
+}
+
+// ImportWithTests is a convenience function that adds path to
+// ImportPkgs, the set of initial source packages located relative to
+// $GOPATH.  The package will be augmented by any *_test.go files in
+// its directory that contain a "package x" (not "package x_test")
+// declaration.
+//
+// In addition, if any *_test.go files contain a "package x_test"
+// declaration, an additional package comprising just those files will
+// be added to CreatePkgs.
+//
+func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
+
+// Import is a convenience function that adds path to ImportPkgs, the
+// set of initial packages that will be imported from source.
+//
+func (conf *Config) Import(path string) { conf.addImport(path, false) }
+
+func (conf *Config) addImport(path string, tests bool) {
+	if path == "C" {
+		return // ignore; not a real package
+	}
+	if conf.ImportPkgs == nil {
+		conf.ImportPkgs = make(map[string]bool)
+	}
+	conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
+}
+
+// PathEnclosingInterval returns the PackageInfo and ast.Node that
+// contain source interval [start, end), and all the node's ancestors
+// up to the AST root.  It searches all ast.Files of all packages in prog.
+// exact is defined as for astutil.PathEnclosingInterval.
+//
+// The zero value is returned if not found.
+//
+func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
+	for _, info := range prog.AllPackages {
+		for _, f := range info.Files {
+			if f.Pos() == token.NoPos {
+				// This can happen if the parser saw
+				// too many errors and bailed out.
+				// (Use parser.AllErrors to prevent that.)
+				continue
+			}
+			if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
+				continue
+			}
+			if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
+				return info, path, exact
+			}
+		}
+	}
+	return nil, nil, false
+}
+
+// InitialPackages returns a new slice containing the set of initial
+// packages (Created + Imported) in unspecified order.
+//
+func (prog *Program) InitialPackages() []*PackageInfo {
+	infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
+	infos = append(infos, prog.Created...)
+	for _, info := range prog.Imported {
+		infos = append(infos, info)
+	}
+	return infos
+}
+
+// Package returns the ASTs and results of type checking for the
+// specified package.
+func (prog *Program) Package(path string) *PackageInfo {
+	if info, ok := prog.AllPackages[prog.importMap[path]]; ok {
+		return info
+	}
+	for _, info := range prog.Created {
+		if path == info.Pkg.Path() {
+			return info
+		}
+	}
+	return nil
+}
+
+// ---------- Implementation ----------
+
+// importer holds the working state of the algorithm.
+type importer struct {
+	conf  *Config   // the client configuration
+	start time.Time // for logging
+
+	progMu sync.Mutex // guards prog
+	prog   *Program   // the resulting program
+
+	// findpkg is a memoization of FindPackage.
+	findpkgMu sync.Mutex // guards findpkg
+	findpkg   map[findpkgKey]*findpkgValue
+
+	importedMu sync.Mutex             // guards imported
+	imported   map[string]*importInfo // all imported packages (incl. failures) by import path
+
+	// import dependency graph: graph[x][y] => x imports y
+	//
+	// Since non-importable packages cannot be cyclic, we ignore
+	// their imports, thus we only need the subgraph over importable
+	// packages.  Nodes are identified by their import paths.
+	graphMu sync.Mutex
+	graph   map[string]map[string]bool
+}
+
+type findpkgKey struct {
+	importPath string
+	fromDir    string
+	mode       build.ImportMode
+}
+
+type findpkgValue struct {
+	ready chan struct{} // closed to broadcast readiness
+	bp    *build.Package
+	err   error
+}
+
+// importInfo tracks the success or failure of a single import.
+//
+// Upon completion, exactly one of info and err is non-nil:
+// info on successful creation of a package, err otherwise.
+// A successful package may still contain type errors.
+//
+type importInfo struct {
+	path     string        // import path
+	info     *PackageInfo  // results of typechecking (including errors)
+	complete chan struct{} // closed to broadcast that info is set.
+}
+
+// awaitCompletion blocks until ii is complete,
+// i.e. the info field is safe to inspect.
+func (ii *importInfo) awaitCompletion() {
+	<-ii.complete // wait for close
+}
+
+// Complete marks ii as complete.
+// Its info and err fields will not be subsequently updated.
+func (ii *importInfo) Complete(info *PackageInfo) {
+	if info == nil {
+		panic("info == nil")
+	}
+	ii.info = info
+	close(ii.complete)
+}
+
+type importError struct {
+	path string // import path
+	err  error  // reason for failure to create a package
+}
+
+// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
+// loading their dependencies packages as needed.
+//
+// On success, Load returns a Program containing a PackageInfo for
+// each package.  On failure, it returns an error.
+//
+// If AllowErrors is true, Load will return a Program even if some
+// packages contained I/O, parser or type errors, or if dependencies
+// were missing.  (Such errors are accessible via PackageInfo.Errors.  If
+// false, Load will fail if any package had an error.
+//
+// It is an error if no packages were loaded.
+//
+func (conf *Config) Load() (*Program, error) {
+	// Create a simple default error handler for parse/type errors.
+	if conf.TypeChecker.Error == nil {
+		conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
+	}
+
+	// Set default working directory for relative package references.
+	if conf.Cwd == "" {
+		var err error
+		conf.Cwd, err = os.Getwd()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Install default FindPackage hook using go/build logic.
+	if conf.FindPackage == nil {
+		conf.FindPackage = (*build.Context).Import
+	}
+
+	prog := &Program{
+		Fset:        conf.fset(),
+		Imported:    make(map[string]*PackageInfo),
+		importMap:   make(map[string]*types.Package),
+		AllPackages: make(map[*types.Package]*PackageInfo),
+	}
+
+	imp := importer{
+		conf:     conf,
+		prog:     prog,
+		findpkg:  make(map[findpkgKey]*findpkgValue),
+		imported: make(map[string]*importInfo),
+		start:    time.Now(),
+		graph:    make(map[string]map[string]bool),
+	}
+
+	// -- loading proper (concurrent phase) --------------------------------
+
+	var errpkgs []string // packages that contained errors
+
+	// Load the initially imported packages and their dependencies,
+	// in parallel.
+	// No vendor check on packages imported from the command line.
+	infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor)
+	for _, ie := range importErrors {
+		conf.TypeChecker.Error(ie.err) // failed to create package
+		errpkgs = append(errpkgs, ie.path)
+	}
+	for _, info := range infos {
+		prog.Imported[info.Pkg.Path()] = info
+	}
+
+	// Augment the designated initial packages by their tests.
+	// Dependencies are loaded in parallel.
+	var xtestPkgs []*build.Package
+	for importPath, augment := range conf.ImportPkgs {
+		if !augment {
+			continue
+		}
+
+		// No vendor check on packages imported from command line.
+		bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor)
+		if err != nil {
+			// Package not found, or can't even parse package declaration.
+			// Already reported by previous loop; ignore it.
+			continue
+		}
+
+		// Needs external test package?
+		if len(bp.XTestGoFiles) > 0 {
+			xtestPkgs = append(xtestPkgs, bp)
+		}
+
+		// Consult the cache using the canonical package path.
+		path := bp.ImportPath
+		imp.importedMu.Lock() // (unnecessary, we're sequential here)
+		ii, ok := imp.imported[path]
+		// Paranoid checks added due to issue #11012.
+		if !ok {
+			// Unreachable.
+			// The previous loop called importAll and thus
+			// startLoad for each path in ImportPkgs, which
+			// populates imp.imported[path] with a non-zero value.
+			panic(fmt.Sprintf("imported[%q] not found", path))
+		}
+		if ii == nil {
+			// Unreachable.
+			// The ii values in this loop are the same as in
+			// the previous loop, which enforced the invariant
+			// that at least one of ii.err and ii.info is non-nil.
+			panic(fmt.Sprintf("imported[%q] == nil", path))
+		}
+		if ii.info == nil {
+			// Unreachable.
+			// awaitCompletion has the postcondition
+			// ii.info != nil.
+			panic(fmt.Sprintf("imported[%q].info = nil", path))
+		}
+		info := ii.info
+		imp.importedMu.Unlock()
+
+		// Parse the in-package test files.
+		files, errs := imp.conf.parsePackageFiles(bp, 't')
+		for _, err := range errs {
+			info.appendError(err)
+		}
+
+		// The test files augmenting package P cannot be imported,
+		// but may import packages that import P,
+		// so we must disable the cycle check.
+		imp.addFiles(info, files, false)
+	}
+
+	createPkg := func(path, dir string, files []*ast.File, errs []error) {
+		info := imp.newPackageInfo(path, dir)
+		for _, err := range errs {
+			info.appendError(err)
+		}
+
+		// Ad hoc packages are non-importable,
+		// so no cycle check is needed.
+		// addFiles loads dependencies in parallel.
+		imp.addFiles(info, files, false)
+		prog.Created = append(prog.Created, info)
+	}
+
+	// Create packages specified by conf.CreatePkgs.
+	for _, cp := range conf.CreatePkgs {
+		files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode)
+		files = append(files, cp.Files...)
+
+		path := cp.Path
+		if path == "" {
+			if len(files) > 0 {
+				path = files[0].Name.Name
+			} else {
+				path = "(unnamed)"
+			}
+		}
+
+		dir := conf.Cwd
+		if len(files) > 0 && files[0].Pos().IsValid() {
+			dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name())
+		}
+		createPkg(path, dir, files, errs)
+	}
+
+	// Create external test packages.
+	sort.Sort(byImportPath(xtestPkgs))
+	for _, bp := range xtestPkgs {
+		files, errs := imp.conf.parsePackageFiles(bp, 'x')
+		createPkg(bp.ImportPath+"_test", bp.Dir, files, errs)
+	}
+
+	// -- finishing up (sequential) ----------------------------------------
+
+	if len(prog.Imported)+len(prog.Created) == 0 {
+		return nil, errors.New("no initial packages were loaded")
+	}
+
+	// Create infos for indirectly imported packages.
+	// e.g. incomplete packages without syntax, loaded from export data.
+	for _, obj := range prog.importMap {
+		info := prog.AllPackages[obj]
+		if info == nil {
+			prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
+		} else {
+			// finished
+			info.checker = nil
+			info.errorFunc = nil
+		}
+	}
+
+	if !conf.AllowErrors {
+		// Report errors in indirectly imported packages.
+		for _, info := range prog.AllPackages {
+			if len(info.Errors) > 0 {
+				errpkgs = append(errpkgs, info.Pkg.Path())
+			}
+		}
+		if errpkgs != nil {
+			var more string
+			if len(errpkgs) > 3 {
+				more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
+				errpkgs = errpkgs[:3]
+			}
+			return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
+				strings.Join(errpkgs, ", "), more)
+		}
+	}
+
+	markErrorFreePackages(prog.AllPackages)
+
+	return prog, nil
+}
+
+type byImportPath []*build.Package
+
+func (b byImportPath) Len() int           { return len(b) }
+func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
+func (b byImportPath) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
+
+// markErrorFreePackages sets the TransitivelyErrorFree flag on all
+// applicable packages.
+func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
+	// Build the transpose of the import graph.
+	importedBy := make(map[*types.Package]map[*types.Package]bool)
+	for P := range allPackages {
+		for _, Q := range P.Imports() {
+			clients, ok := importedBy[Q]
+			if !ok {
+				clients = make(map[*types.Package]bool)
+				importedBy[Q] = clients
+			}
+			clients[P] = true
+		}
+	}
+
+	// Find all packages reachable from some error package.
+	reachable := make(map[*types.Package]bool)
+	var visit func(*types.Package)
+	visit = func(p *types.Package) {
+		if !reachable[p] {
+			reachable[p] = true
+			for q := range importedBy[p] {
+				visit(q)
+			}
+		}
+	}
+	for _, info := range allPackages {
+		if len(info.Errors) > 0 {
+			visit(info.Pkg)
+		}
+	}
+
+	// Mark the others as "transitively error-free".
+	for _, info := range allPackages {
+		if !reachable[info.Pkg] {
+			info.TransitivelyErrorFree = true
+		}
+	}
+}
+
+// build returns the effective build context.
+func (conf *Config) build() *build.Context {
+	if conf.Build != nil {
+		return conf.Build
+	}
+	return &build.Default
+}
+
+// parsePackageFiles enumerates the files belonging to package path,
+// then loads, parses and returns them, plus a list of I/O or parse
+// errors that were encountered.
+//
+// 'which' indicates which files to include:
+//    'g': include non-test *.go source files (GoFiles + processed CgoFiles)
+//    't': include in-package *_test.go source files (TestGoFiles)
+//    'x': include external *_test.go source files. (XTestGoFiles)
+//
+func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
+	if bp.ImportPath == "unsafe" {
+		return nil, nil
+	}
+	var filenames []string
+	switch which {
+	case 'g':
+		filenames = bp.GoFiles
+	case 't':
+		filenames = bp.TestGoFiles
+	case 'x':
+		filenames = bp.XTestGoFiles
+	default:
+		panic(which)
+	}
+
+	files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
+
+	// Preprocess CgoFiles and parse the outputs (sequentially).
+	if which == 'g' && bp.CgoFiles != nil {
+		cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			files = append(files, cgofiles...)
+		}
+	}
+
+	return files, errs
+}
+
+// doImport imports the package denoted by path.
+// It implements the types.Importer signature.
+//
+// It returns an error if a package could not be created
+// (e.g. go/build or parse error), but type errors are reported via
+// the types.Config.Error callback (the first of which is also saved
+// in the package's PackageInfo).
+//
+// Idempotent.
+//
+func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
+	if to == "C" {
+		// This should be unreachable, but ad hoc packages are
+		// not currently subject to cgo preprocessing.
+		// See https://golang.org/issue/11627.
+		return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
+			from.Pkg.Path())
+	}
+
+	bp, err := imp.findPackage(to, from.dir, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	// The standard unsafe package is handled specially,
+	// and has no PackageInfo.
+	if bp.ImportPath == "unsafe" {
+		return types.Unsafe, nil
+	}
+
+	// Look for the package in the cache using its canonical path.
+	path := bp.ImportPath
+	imp.importedMu.Lock()
+	ii := imp.imported[path]
+	imp.importedMu.Unlock()
+	if ii == nil {
+		panic("internal error: unexpected import: " + path)
+	}
+	if ii.info != nil {
+		return ii.info.Pkg, nil
+	}
+
+	// Import of incomplete package: this indicates a cycle.
+	fromPath := from.Pkg.Path()
+	if cycle := imp.findPath(path, fromPath); cycle != nil {
+		// Normalize cycle: start from alphabetically largest node.
+		pos, start := -1, ""
+		for i, s := range cycle {
+			if pos < 0 || s > start {
+				pos, start = i, s
+			}
+		}
+		cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest
+		cycle = append(cycle, cycle[0])             // add start node to end to show cycliness
+		return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
+	}
+
+	panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
+}
+
+// findPackage locates the package denoted by the importPath in the
+// specified directory.
+func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
+	// We use a non-blocking duplicate-suppressing cache (gopl.io §9.7)
+	// to avoid holding the lock around FindPackage.
+	key := findpkgKey{importPath, fromDir, mode}
+	imp.findpkgMu.Lock()
+	v, ok := imp.findpkg[key]
+	if ok {
+		// cache hit
+		imp.findpkgMu.Unlock()
+
+		<-v.ready // wait for entry to become ready
+	} else {
+		// Cache miss: this goroutine becomes responsible for
+		// populating the map entry and broadcasting its readiness.
+		v = &findpkgValue{ready: make(chan struct{})}
+		imp.findpkg[key] = v
+		imp.findpkgMu.Unlock()
+
+		ioLimit <- true
+		v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode)
+		<-ioLimit
+
+		if _, ok := v.err.(*build.NoGoError); ok {
+			v.err = nil // empty directory is not an error
+		}
+
+		close(v.ready) // broadcast ready condition
+	}
+	return v.bp, v.err
+}
+
+// importAll loads, parses, and type-checks the specified packages in
+// parallel and returns their completed importInfos in unspecified order.
+//
+// fromPath is the package path of the importing package, if it is
+// importable, "" otherwise.  It is used for cycle detection.
+//
+// fromDir is the directory containing the import declaration that
+// caused these imports.
+//
+func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
+	if fromPath != "" {
+		// We're loading a set of imports.
+		//
+		// We must record graph edges from the importing package
+		// to its dependencies, and check for cycles.
+		imp.graphMu.Lock()
+		deps, ok := imp.graph[fromPath]
+		if !ok {
+			deps = make(map[string]bool)
+			imp.graph[fromPath] = deps
+		}
+		for importPath := range imports {
+			deps[importPath] = true
+		}
+		imp.graphMu.Unlock()
+	}
+
+	var pending []*importInfo
+	for importPath := range imports {
+		if fromPath != "" {
+			if cycle := imp.findPath(importPath, fromPath); cycle != nil {
+				// Cycle-forming import: we must not check it
+				// since it would deadlock.
+				if trace {
+					fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle)
+				}
+				continue
+			}
+		}
+		bp, err := imp.findPackage(importPath, fromDir, mode)
+		if err != nil {
+			errors = append(errors, importError{
+				path: importPath,
+				err:  err,
+			})
+			continue
+		}
+		pending = append(pending, imp.startLoad(bp))
+	}
+
+	for _, ii := range pending {
+		ii.awaitCompletion()
+		infos = append(infos, ii.info)
+	}
+
+	return infos, errors
+}
+
+// findPath returns an arbitrary path from 'from' to 'to' in the import
+// graph, or nil if there was none.
+func (imp *importer) findPath(from, to string) []string {
+	imp.graphMu.Lock()
+	defer imp.graphMu.Unlock()
+
+	seen := make(map[string]bool)
+	var search func(stack []string, importPath string) []string
+	search = func(stack []string, importPath string) []string {
+		if !seen[importPath] {
+			seen[importPath] = true
+			stack = append(stack, importPath)
+			if importPath == to {
+				return stack
+			}
+			for x := range imp.graph[importPath] {
+				if p := search(stack, x); p != nil {
+					return p
+				}
+			}
+		}
+		return nil
+	}
+	return search(make([]string, 0, 20), from)
+}
+
+// startLoad initiates the loading, parsing and type-checking of the
+// specified package and its dependencies, if it has not already begun.
+//
+// It returns an importInfo, not necessarily in a completed state.  The
+// caller must call awaitCompletion() before accessing its info field.
+//
+// startLoad is concurrency-safe and idempotent.
+//
+func (imp *importer) startLoad(bp *build.Package) *importInfo {
+	path := bp.ImportPath
+	imp.importedMu.Lock()
+	ii, ok := imp.imported[path]
+	if !ok {
+		ii = &importInfo{path: path, complete: make(chan struct{})}
+		imp.imported[path] = ii
+		go func() {
+			info := imp.load(bp)
+			ii.Complete(info)
+		}()
+	}
+	imp.importedMu.Unlock()
+
+	return ii
+}
+
+// load implements package loading by parsing Go source files
+// located by go/build.
+func (imp *importer) load(bp *build.Package) *PackageInfo {
+	info := imp.newPackageInfo(bp.ImportPath, bp.Dir)
+	info.Importable = true
+	files, errs := imp.conf.parsePackageFiles(bp, 'g')
+	for _, err := range errs {
+		info.appendError(err)
+	}
+
+	imp.addFiles(info, files, true)
+
+	imp.progMu.Lock()
+	imp.prog.importMap[bp.ImportPath] = info.Pkg
+	imp.progMu.Unlock()
+
+	return info
+}
+
+// addFiles adds and type-checks the specified files to info, loading
+// their dependencies if needed.  The order of files determines the
+// package initialization order.  It may be called multiple times on the
+// same package.  Errors are appended to the info.Errors field.
+//
+// cycleCheck determines whether the imports within files create
+// dependency edges that should be checked for potential cycles.
+//
+func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
+	// Ensure the dependencies are loaded, in parallel.
+	var fromPath string
+	if cycleCheck {
+		fromPath = info.Pkg.Path()
+	}
+	// TODO(adonovan): opt: make the caller do scanImports.
+	// Callers with a build.Package can skip it.
+	imp.importAll(fromPath, info.dir, scanImports(files), 0)
+
+	if trace {
+		fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
+			time.Since(imp.start), info.Pkg.Path(), len(files))
+	}
+
+	// Don't call checker.Files on Unsafe, even with zero files,
+	// because it would mutate the package, which is a global.
+	if info.Pkg == types.Unsafe {
+		if len(files) > 0 {
+			panic(`"unsafe" package contains unexpected files`)
+		}
+	} else {
+		// Ignore the returned (first) error since we
+		// already collect them all in the PackageInfo.
+		info.checker.Files(files)
+		info.Files = append(info.Files, files...)
+	}
+
+	if imp.conf.AfterTypeCheck != nil {
+		imp.conf.AfterTypeCheck(info, files)
+	}
+
+	if trace {
+		fmt.Fprintf(os.Stderr, "%s: stop %q\n",
+			time.Since(imp.start), info.Pkg.Path())
+	}
+}
+
+func (imp *importer) newPackageInfo(path, dir string) *PackageInfo {
+	var pkg *types.Package
+	if path == "unsafe" {
+		pkg = types.Unsafe
+	} else {
+		pkg = types.NewPackage(path, "")
+	}
+	info := &PackageInfo{
+		Pkg: pkg,
+		Info: types.Info{
+			Types:      make(map[ast.Expr]types.TypeAndValue),
+			Defs:       make(map[*ast.Ident]types.Object),
+			Uses:       make(map[*ast.Ident]types.Object),
+			Implicits:  make(map[ast.Node]types.Object),
+			Scopes:     make(map[ast.Node]*types.Scope),
+			Selections: make(map[*ast.SelectorExpr]*types.Selection),
+		},
+		errorFunc: imp.conf.TypeChecker.Error,
+		dir:       dir,
+	}
+	typeparams.InitInstanceInfo(&info.Info)
+
+	// Copy the types.Config so we can vary it across PackageInfos.
+	tc := imp.conf.TypeChecker
+	tc.IgnoreFuncBodies = false
+	if f := imp.conf.TypeCheckFuncBodies; f != nil {
+		tc.IgnoreFuncBodies = !f(path)
+	}
+	tc.Importer = closure{imp, info}
+	tc.Error = info.appendError // appendError wraps the user's Error function
+
+	info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
+	imp.progMu.Lock()
+	imp.prog.AllPackages[pkg] = info
+	imp.progMu.Unlock()
+	return info
+}
+
+type closure struct {
+	imp  *importer
+	info *PackageInfo
+}
+
+func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) }

+ 124 - 0
vendor/golang.org/x/tools/go/loader/util.go

@@ -0,0 +1,124 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader
+
+import (
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/token"
+	"io"
+	"os"
+	"strconv"
+	"sync"
+
+	"golang.org/x/tools/go/buildutil"
+)
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 10)
+
+// parseFiles parses the Go source files within directory dir and
+// returns the ASTs of the ones that could be at least partially parsed,
+// along with a list of I/O and parse errors encountered.
+//
+// I/O is done via ctxt, which may specify a virtual file system.
+// displayPath is used to transform the filenames attached to the ASTs.
+//
+func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
+	if displayPath == nil {
+		displayPath = func(path string) string { return path }
+	}
+	var wg sync.WaitGroup
+	n := len(files)
+	parsed := make([]*ast.File, n)
+	errors := make([]error, n)
+	for i, file := range files {
+		if !buildutil.IsAbsPath(ctxt, file) {
+			file = buildutil.JoinPath(ctxt, dir, file)
+		}
+		wg.Add(1)
+		go func(i int, file string) {
+			ioLimit <- true // wait
+			defer func() {
+				wg.Done()
+				<-ioLimit // signal
+			}()
+			var rd io.ReadCloser
+			var err error
+			if ctxt.OpenFile != nil {
+				rd, err = ctxt.OpenFile(file)
+			} else {
+				rd, err = os.Open(file)
+			}
+			if err != nil {
+				errors[i] = err // open failed
+				return
+			}
+
+			// ParseFile may return both an AST and an error.
+			parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
+			rd.Close()
+		}(i, file)
+	}
+	wg.Wait()
+
+	// Eliminate nils, preserving order.
+	var o int
+	for _, f := range parsed {
+		if f != nil {
+			parsed[o] = f
+			o++
+		}
+	}
+	parsed = parsed[:o]
+
+	o = 0
+	for _, err := range errors {
+		if err != nil {
+			errors[o] = err
+			o++
+		}
+	}
+	errors = errors[:o]
+
+	return parsed, errors
+}
+
+// scanImports returns the set of all import paths from all
+// import specs in the specified files.
+func scanImports(files []*ast.File) map[string]bool {
+	imports := make(map[string]bool)
+	for _, f := range files {
+		for _, decl := range f.Decls {
+			if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
+				for _, spec := range decl.Specs {
+					spec := spec.(*ast.ImportSpec)
+
+					// NB: do not assume the program is well-formed!
+					path, err := strconv.Unquote(spec.Path.Value)
+					if err != nil {
+						continue // quietly ignore the error
+					}
+					if path == "C" {
+						continue // skip pseudopackage
+					}
+					imports[path] = true
+				}
+			}
+		}
+	}
+	return imports
+}
+
+// ---------- Internal helpers ----------
+
+// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
+func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
+	p := int(pos)
+	base := f.Base()
+	return base <= p && p < base+f.Size()
+}

+ 5 - 0
vendor/golang.org/x/tools/go/packages/packages.go

@@ -26,6 +26,7 @@ import (
 	"golang.org/x/tools/go/gcexportdata"
 	"golang.org/x/tools/go/gcexportdata"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/packagesinternal"
 	"golang.org/x/tools/internal/packagesinternal"
+	"golang.org/x/tools/internal/typeparams"
 	"golang.org/x/tools/internal/typesinternal"
 	"golang.org/x/tools/internal/typesinternal"
 )
 )
 
 
@@ -327,6 +328,9 @@ type Package struct {
 	// The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
 	// The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
 	// If NeedDeps and NeedImports are also set, this field will also be populated
 	// If NeedDeps and NeedImports are also set, this field will also be populated
 	// for dependencies.
 	// for dependencies.
+	//
+	// Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are
+	// removed.  If parsing returned nil, Syntax may be shorter than CompiledGoFiles.
 	Syntax []*ast.File
 	Syntax []*ast.File
 
 
 	// TypesInfo provides type information about the package's syntax trees.
 	// TypesInfo provides type information about the package's syntax trees.
@@ -910,6 +914,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 		Scopes:     make(map[ast.Node]*types.Scope),
 		Scopes:     make(map[ast.Node]*types.Scope),
 		Selections: make(map[*ast.SelectorExpr]*types.Selection),
 		Selections: make(map[*ast.SelectorExpr]*types.Selection),
 	}
 	}
+	typeparams.InitInstanceInfo(lpkg.TypesInfo)
 	lpkg.TypesSizes = ld.sizes
 	lpkg.TypesSizes = ld.sizes
 
 
 	importer := importerFunc(func(path string) (*types.Package, error) {
 	importer := importerFunc(func(path string) (*types.Package, error) {

+ 617 - 0
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go

@@ -0,0 +1,617 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package objectpath defines a naming scheme for types.Objects
+// (that is, named entities in Go programs) relative to their enclosing
+// package.
+//
+// Type-checker objects are canonical, so they are usually identified by
+// their address in memory (a pointer), but a pointer has meaning only
+// within one address space. By contrast, objectpath names allow the
+// identity of an object to be sent from one program to another,
+// establishing a correspondence between types.Object variables that are
+// distinct but logically equivalent.
+//
+// A single object may have multiple paths. In this example,
+//     type A struct{ X int }
+//     type B A
+// the field X has two paths due to its membership of both A and B.
+// The For(obj) function always returns one of these paths, arbitrarily
+// but consistently.
+package objectpath
+
+import (
+	"fmt"
+	"go/types"
+	"sort"
+	"strconv"
+	"strings"
+
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// A Path is an opaque name that identifies a types.Object
+// relative to its package. Conceptually, the name consists of a
+// sequence of destructuring operations applied to the package scope
+// to obtain the original object.
+// The name does not include the package itself.
+type Path string
+
+// Encoding
+//
+// An object path is a textual and (with training) human-readable encoding
+// of a sequence of destructuring operators, starting from a types.Package.
+// The sequences represent a path through the package/object/type graph.
+// We classify these operators by their type:
+//
+//   PO package->object	Package.Scope.Lookup
+//   OT  object->type 	Object.Type
+//   TT    type->type 	Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
+//   TO   type->object	Type.{At,Field,Method,Obj} [AFMO]
+//
+// All valid paths start with a package and end at an object
+// and thus may be defined by the regular language:
+//
+//   objectpath = PO (OT TT* TO)*
+//
+// The concrete encoding follows directly:
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+//   which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTC];
+//   one of these (TypeParam) requires an integer operand,
+//   which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+//   three of these (At,Field,Method) require an integer operand,
+//   which is encoded as a string of decimal digits.
+//   These indices are stable across different representations
+//   of the same package, even source and export data.
+//   The indices used are implementation specific and may not correspond to
+//   the argument to the go/types function.
+//
+// In the example below,
+//
+//	package p
+//
+//	type T interface {
+//		f() (a string, b struct{ X int })
+//	}
+//
+// field X has the path "T.UM0.RA1.F0",
+// representing the following sequence of operations:
+//
+//    p.Lookup("T")					T
+//    .Type().Underlying().Method(0).			f
+//    .Type().Results().At(1)				b
+//    .Type().Field(0)					X
+//
+// The encoding is not maximally compact---every R or P is
+// followed by an A, for example---but this simplifies the
+// encoder and decoder.
+//
+const (
+	// object->type operators
+	opType = '.' // .Type()		  (Object)
+
+	// type->type operators
+	opElem       = 'E' // .Elem()		        (Pointer, Slice, Array, Chan, Map)
+	opKey        = 'K' // .Key()		        (Map)
+	opParams     = 'P' // .Params()		      (Signature)
+	opResults    = 'R' // .Results()	      (Signature)
+	opUnderlying = 'U' // .Underlying()	    (Named)
+	opTypeParam  = 'T' // .TypeParams.At(i) (Named, Signature)
+	opConstraint = 'C' // .Constraint()     (TypeParam)
+
+	// type->object operators
+	opAt     = 'A' // .At(i)		 (Tuple)
+	opField  = 'F' // .Field(i)	 (Struct)
+	opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+	opObj    = 'O' // .Obj()		 (Named, TypeParam)
+)
+
+// The For function returns the path to an object relative to its package,
+// or an error if the object is not accessible from the package's Scope.
+//
+// The For function guarantees to return a path only for the following objects:
+// - package-level types
+// - exported package-level non-types
+// - methods
+// - parameter and result variables
+// - struct fields
+// These objects are sufficient to define the API of their package.
+// The objects described by a package's export data are drawn from this set.
+//
+// For does not return a path for predeclared names, imported package
+// names, local names, and unexported package-level names (except
+// types).
+//
+// Example: given this definition,
+//
+//	package p
+//
+//	type T interface {
+//		f() (a string, b struct{ X int })
+//	}
+//
+// For(X) would return a path that denotes the following sequence of operations:
+//
+//    p.Scope().Lookup("T")				(TypeName T)
+//    .Type().Underlying().Method(0).			(method Func f)
+//    .Type().Results().At(1)				(field Var b)
+//    .Type().Field(0)					(field Var X)
+//
+// where p is the package (*types.Package) to which X belongs.
+func For(obj types.Object) (Path, error) {
+	pkg := obj.Pkg()
+
+	// This table lists the cases of interest.
+	//
+	// Object				Action
+	// ------                               ------
+	// nil					reject
+	// builtin				reject
+	// pkgname				reject
+	// label				reject
+	// var
+	//    package-level			accept
+	//    func param/result			accept
+	//    local				reject
+	//    struct field			accept
+	// const
+	//    package-level			accept
+	//    local				reject
+	// func
+	//    package-level			accept
+	//    init functions			reject
+	//    concrete method			accept
+	//    interface method			accept
+	// type
+	//    package-level			accept
+	//    local				reject
+	//
+	// The only accessible package-level objects are members of pkg itself.
+	//
+	// The cases are handled in four steps:
+	//
+	// 1. reject nil and builtin
+	// 2. accept package-level objects
+	// 3. reject obviously invalid objects
+	// 4. search the API for the path to the param/result/field/method.
+
+	// 1. reference to nil or builtin?
+	if pkg == nil {
+		return "", fmt.Errorf("predeclared %s has no path", obj)
+	}
+	scope := pkg.Scope()
+
+	// 2. package-level object?
+	if scope.Lookup(obj.Name()) == obj {
+		// Only exported objects (and non-exported types) have a path.
+		// Non-exported types may be referenced by other objects.
+		if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
+			return "", fmt.Errorf("no path for non-exported %v", obj)
+		}
+		return Path(obj.Name()), nil
+	}
+
+	// 3. Not a package-level object.
+	//    Reject obviously non-viable cases.
+	switch obj := obj.(type) {
+	case *types.TypeName:
+		if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
+			// With the exception of type parameters, only package-level type names
+			// have a path.
+			return "", fmt.Errorf("no path for %v", obj)
+		}
+	case *types.Const, // Only package-level constants have a path.
+		*types.Label,   // Labels are function-local.
+		*types.PkgName: // PkgNames are file-local.
+		return "", fmt.Errorf("no path for %v", obj)
+
+	case *types.Var:
+		// Could be:
+		// - a field (obj.IsField())
+		// - a func parameter or result
+		// - a local var.
+		// Sadly there is no way to distinguish
+		// a param/result from a local
+		// so we must proceed to the find.
+
+	case *types.Func:
+		// A func, if not package-level, must be a method.
+		if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
+			return "", fmt.Errorf("func is not a method: %v", obj)
+		}
+		// TODO(adonovan): opt: if the method is concrete,
+		// do a specialized version of the rest of this function so
+		// that it's O(1) not O(|scope|).  Basically 'find' is needed
+		// only for struct fields and interface methods.
+
+	default:
+		panic(obj)
+	}
+
+	// 4. Search the API for the path to the var (field/param/result) or method.
+
+	// First inspect package-level named types.
+	// In the presence of path aliases, these give
+	// the best paths because non-types may
+	// refer to types, but not the reverse.
+	empty := make([]byte, 0, 48) // initial space
+	names := scope.Names()
+	for _, name := range names {
+		o := scope.Lookup(name)
+		tname, ok := o.(*types.TypeName)
+		if !ok {
+			continue // handle non-types in second pass
+		}
+
+		path := append(empty, name...)
+		path = append(path, opType)
+
+		T := o.Type()
+
+		if tname.IsAlias() {
+			// type alias
+			if r := find(obj, T, path); r != nil {
+				return Path(r), nil
+			}
+		} else {
+			if named, _ := T.(*types.Named); named != nil {
+				if r := findTypeParam(obj, typeparams.ForNamed(named), path); r != nil {
+					// generic named type
+					return Path(r), nil
+				}
+			}
+			// defined (named) type
+			if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
+				return Path(r), nil
+			}
+		}
+	}
+
+	// Then inspect everything else:
+	// non-types, and declared methods of defined types.
+	for _, name := range names {
+		o := scope.Lookup(name)
+		path := append(empty, name...)
+		if _, ok := o.(*types.TypeName); !ok {
+			if o.Exported() {
+				// exported non-type (const, var, func)
+				if r := find(obj, o.Type(), append(path, opType)); r != nil {
+					return Path(r), nil
+				}
+			}
+			continue
+		}
+
+		// Inspect declared methods of defined types.
+		if T, ok := o.Type().(*types.Named); ok {
+			path = append(path, opType)
+			// Note that method index here is always with respect
+			// to canonical ordering of methods, regardless of how
+			// they appear in the underlying type.
+			canonical := canonicalize(T)
+			for i := 0; i < len(canonical); i++ {
+				m := canonical[i]
+				path2 := appendOpArg(path, opMethod, i)
+				if m == obj {
+					return Path(path2), nil // found declared method
+				}
+				if r := find(obj, m.Type(), append(path2, opType)); r != nil {
+					return Path(r), nil
+				}
+			}
+		}
+	}
+
+	return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
+}
+
+func appendOpArg(path []byte, op byte, arg int) []byte {
+	path = append(path, op)
+	path = strconv.AppendInt(path, int64(arg), 10)
+	return path
+}
+
+// find finds obj within type T, returning the path to it, or nil if not found.
+func find(obj types.Object, T types.Type, path []byte) []byte {
+	switch T := T.(type) {
+	case *types.Basic, *types.Named:
+		// Named types belonging to pkg were handled already,
+		// so T must belong to another package. No path.
+		return nil
+	case *types.Pointer:
+		return find(obj, T.Elem(), append(path, opElem))
+	case *types.Slice:
+		return find(obj, T.Elem(), append(path, opElem))
+	case *types.Array:
+		return find(obj, T.Elem(), append(path, opElem))
+	case *types.Chan:
+		return find(obj, T.Elem(), append(path, opElem))
+	case *types.Map:
+		if r := find(obj, T.Key(), append(path, opKey)); r != nil {
+			return r
+		}
+		return find(obj, T.Elem(), append(path, opElem))
+	case *types.Signature:
+		if r := findTypeParam(obj, typeparams.ForSignature(T), path); r != nil {
+			return r
+		}
+		if r := find(obj, T.Params(), append(path, opParams)); r != nil {
+			return r
+		}
+		return find(obj, T.Results(), append(path, opResults))
+	case *types.Struct:
+		for i := 0; i < T.NumFields(); i++ {
+			f := T.Field(i)
+			path2 := appendOpArg(path, opField, i)
+			if f == obj {
+				return path2 // found field var
+			}
+			if r := find(obj, f.Type(), append(path2, opType)); r != nil {
+				return r
+			}
+		}
+		return nil
+	case *types.Tuple:
+		for i := 0; i < T.Len(); i++ {
+			v := T.At(i)
+			path2 := appendOpArg(path, opAt, i)
+			if v == obj {
+				return path2 // found param/result var
+			}
+			if r := find(obj, v.Type(), append(path2, opType)); r != nil {
+				return r
+			}
+		}
+		return nil
+	case *types.Interface:
+		for i := 0; i < T.NumMethods(); i++ {
+			m := T.Method(i)
+			path2 := appendOpArg(path, opMethod, i)
+			if m == obj {
+				return path2 // found interface method
+			}
+			if r := find(obj, m.Type(), append(path2, opType)); r != nil {
+				return r
+			}
+		}
+		return nil
+	case *typeparams.TypeParam:
+		name := T.Obj()
+		if name == obj {
+			return append(path, opObj)
+		}
+		if r := find(obj, T.Constraint(), append(path, opConstraint)); r != nil {
+			return r
+		}
+		return nil
+	}
+	panic(T)
+}
+
+func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte) []byte {
+	for i := 0; i < list.Len(); i++ {
+		tparam := list.At(i)
+		path2 := appendOpArg(path, opTypeParam, i)
+		if r := find(obj, tparam, path2); r != nil {
+			return r
+		}
+	}
+	return nil
+}
+
+// Object returns the object denoted by path p within the package pkg.
+func Object(pkg *types.Package, p Path) (types.Object, error) {
+	if p == "" {
+		return nil, fmt.Errorf("empty path")
+	}
+
+	pathstr := string(p)
+	var pkgobj, suffix string
+	if dot := strings.IndexByte(pathstr, opType); dot < 0 {
+		pkgobj = pathstr
+	} else {
+		pkgobj = pathstr[:dot]
+		suffix = pathstr[dot:] // suffix starts with "."
+	}
+
+	obj := pkg.Scope().Lookup(pkgobj)
+	if obj == nil {
+		return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
+	}
+
+	// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
+	type hasElem interface {
+		Elem() types.Type
+	}
+	// abstraction of *types.{Named,Signature}
+	type hasTypeParams interface {
+		TypeParams() *typeparams.TypeParamList
+	}
+	// abstraction of *types.{Named,TypeParam}
+	type hasObj interface {
+		Obj() *types.TypeName
+	}
+
+	// The loop state is the pair (t, obj),
+	// exactly one of which is non-nil, initially obj.
+	// All suffixes start with '.' (the only object->type operation),
+	// followed by optional type->type operations,
+	// then a type->object operation.
+	// The cycle then repeats.
+	var t types.Type
+	for suffix != "" {
+		code := suffix[0]
+		suffix = suffix[1:]
+
+		// Codes [AFM] have an integer operand.
+		var index int
+		switch code {
+		case opAt, opField, opMethod, opTypeParam:
+			rest := strings.TrimLeft(suffix, "0123456789")
+			numerals := suffix[:len(suffix)-len(rest)]
+			suffix = rest
+			i, err := strconv.Atoi(numerals)
+			if err != nil {
+				return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
+			}
+			index = int(i)
+		case opObj:
+			// no operand
+		default:
+			// The suffix must end with a type->object operation.
+			if suffix == "" {
+				return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
+			}
+		}
+
+		if code == opType {
+			if t != nil {
+				return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
+			}
+			t = obj.Type()
+			obj = nil
+			continue
+		}
+
+		if t == nil {
+			return nil, fmt.Errorf("invalid path: code %q in object context", code)
+		}
+
+		// Inv: t != nil, obj == nil
+
+		switch code {
+		case opElem:
+			hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
+			}
+			t = hasElem.Elem()
+
+		case opKey:
+			mapType, ok := t.(*types.Map)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
+			}
+			t = mapType.Key()
+
+		case opParams:
+			sig, ok := t.(*types.Signature)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+			}
+			t = sig.Params()
+
+		case opResults:
+			sig, ok := t.(*types.Signature)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+			}
+			t = sig.Results()
+
+		case opUnderlying:
+			named, ok := t.(*types.Named)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
+			}
+			t = named.Underlying()
+
+		case opTypeParam:
+			hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+			}
+			tparams := hasTypeParams.TypeParams()
+			if n := tparams.Len(); index >= n {
+				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+			}
+			t = tparams.At(index)
+
+		case opConstraint:
+			tparam, ok := t.(*typeparams.TypeParam)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+			}
+			t = tparam.Constraint()
+
+		case opAt:
+			tuple, ok := t.(*types.Tuple)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
+			}
+			if n := tuple.Len(); index >= n {
+				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+			}
+			obj = tuple.At(index)
+			t = nil
+
+		case opField:
+			structType, ok := t.(*types.Struct)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
+			}
+			if n := structType.NumFields(); index >= n {
+				return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
+			}
+			obj = structType.Field(index)
+			t = nil
+
+		case opMethod:
+			hasMethods, ok := t.(hasMethods) // Interface or Named
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
+			}
+			canonical := canonicalize(hasMethods)
+			if n := len(canonical); index >= n {
+				return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
+			}
+			obj = canonical[index]
+			t = nil
+
+		case opObj:
+			hasObj, ok := t.(hasObj)
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
+			}
+			obj = hasObj.Obj()
+			t = nil
+
+		default:
+			return nil, fmt.Errorf("invalid path: unknown code %q", code)
+		}
+	}
+
+	if obj.Pkg() != pkg {
+		return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
+	}
+
+	return obj, nil // success
+}
+
+// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up
+// because it is used by methodOrdering, which is in turn used by both encoding
+// and decoding.
+type hasMethods interface {
+	Method(int) *types.Func
+	NumMethods() int
+}
+
+// canonicalize returns a canonical order for the methods in a hasMethod.
+func canonicalize(hm hasMethods) []*types.Func {
+	count := hm.NumMethods()
+	if count <= 0 {
+		return nil
+	}
+	canon := make([]*types.Func, count)
+	for i := 0; i < count; i++ {
+		canon[i] = hm.Method(i)
+	}
+	less := func(i, j int) bool {
+		return canon[i].Id() < canon[j].Id()
+	}
+	sort.Slice(canon, less)
+	return canon
+}

+ 69 - 0
vendor/golang.org/x/tools/go/types/typeutil/callee.go

@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+	"go/ast"
+	"go/types"
+
+	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+	fun := astutil.Unparen(call.Fun)
+
+	// Look through type instantiation if necessary.
+	isInstance := false
+	switch fun.(type) {
+	case *ast.IndexExpr, *typeparams.IndexListExpr:
+		// When extracting the callee from an *IndexExpr, we need to check that
+		// it is a *types.Func and not a *types.Var.
+		// Example: Don't match a slice m within the expression `m[0]()`.
+		isInstance = true
+		fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
+	}
+
+	var obj types.Object
+	switch fun := fun.(type) {
+	case *ast.Ident:
+		obj = info.Uses[fun] // type, var, builtin, or declared func
+	case *ast.SelectorExpr:
+		if sel, ok := info.Selections[fun]; ok {
+			obj = sel.Obj() // method or field
+		} else {
+			obj = info.Uses[fun.Sel] // qualified identifier?
+		}
+	}
+	if _, ok := obj.(*types.TypeName); ok {
+		return nil // T(x) is a conversion, not a call
+	}
+	// A Func is required to match instantiations.
+	if _, ok := obj.(*types.Func); isInstance && !ok {
+		return nil // Was not a Func.
+	}
+	return obj
+}
+
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+		return f
+	}
+	return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+	recv := f.Type().(*types.Signature).Recv()
+	return recv != nil && types.IsInterface(recv.Type())
+}

+ 31 - 0
vendor/golang.org/x/tools/go/types/typeutil/imports.go

@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+//
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+	var result []*types.Package
+	seen := make(map[*types.Package]bool)
+	var visit func(pkgs []*types.Package)
+	visit = func(pkgs []*types.Package) {
+		for _, p := range pkgs {
+			if !seen[p] {
+				seen[p] = true
+				visit(p.Imports())
+				result = append(result, p)
+			}
+		}
+	}
+	visit(pkgs)
+	return result
+}

+ 443 - 0
vendor/golang.org/x/tools/go/types/typeutil/map.go

@@ -0,0 +1,443 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to interface{} values.
+package typeutil // import "golang.org/x/tools/go/types/typeutil"
+
+import (
+	"bytes"
+	"fmt"
+	"go/types"
+	"reflect"
+
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary interface{} values.  The concrete types that implement
+// the Type interface are pointers.  Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+//
+type Map struct {
+	hasher Hasher             // shared by many Maps
+	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+	length int                // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+	key   types.Type
+	value interface{}
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps.  This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen.  Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+//
+func (m *Map) SetHasher(hasher Hasher) {
+	m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+//
+func (m *Map) Delete(key types.Type) bool {
+	if m != nil && m.table != nil {
+		hash := m.hasher.Hash(key)
+		bucket := m.table[hash]
+		for i, e := range bucket {
+			if e.key != nil && types.Identical(key, e.key) {
+				// We can't compact the bucket as it
+				// would disturb iterators.
+				bucket[i] = entry{}
+				m.length--
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+//
+func (m *Map) At(key types.Type) interface{} {
+	if m != nil && m.table != nil {
+		for _, e := range m.table[m.hasher.Hash(key)] {
+			if e.key != nil && types.Identical(key, e.key) {
+				return e.value
+			}
+		}
+	}
+	return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
+	if m.table != nil {
+		hash := m.hasher.Hash(key)
+		bucket := m.table[hash]
+		var hole *entry
+		for i, e := range bucket {
+			if e.key == nil {
+				hole = &bucket[i]
+			} else if types.Identical(key, e.key) {
+				prev = e.value
+				bucket[i].value = value
+				return
+			}
+		}
+
+		if hole != nil {
+			*hole = entry{key, value} // overwrite deleted entry
+		} else {
+			m.table[hash] = append(bucket, entry{key, value})
+		}
+	} else {
+		if m.hasher.memo == nil {
+			m.hasher = MakeHasher()
+		}
+		hash := m.hasher.Hash(key)
+		m.table = map[uint32][]entry{hash: {entry{key, value}}}
+	}
+
+	m.length++
+	return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+	if m != nil {
+		return m.length
+	}
+	return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+//
+func (m *Map) Iterate(f func(key types.Type, value interface{})) {
+	if m != nil {
+		for _, bucket := range m.table {
+			for _, e := range bucket {
+				if e.key != nil {
+					f(e.key, e.value)
+				}
+			}
+		}
+	}
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+	keys := make([]types.Type, 0, m.Len())
+	m.Iterate(func(key types.Type, _ interface{}) {
+		keys = append(keys, key)
+	})
+	return keys
+}
+
+func (m *Map) toString(values bool) string {
+	if m == nil {
+		return "{}"
+	}
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "{")
+	sep := ""
+	m.Iterate(func(key types.Type, value interface{}) {
+		fmt.Fprint(&buf, sep)
+		sep = ", "
+		fmt.Fprint(&buf, key)
+		if values {
+			fmt.Fprintf(&buf, ": %q", value)
+		}
+	})
+	fmt.Fprint(&buf, "}")
+	return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+//
+func (m *Map) String() string {
+	return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+//
+func (m *Map) KeysString() string {
+	return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+	memo map[types.Type]uint32
+
+	// ptrMap records pointer identity.
+	ptrMap map[interface{}]uint32
+
+	// sigTParams holds type parameters from the signature being hashed.
+	// Signatures are considered identical modulo renaming of type parameters, so
+	// within the scope of a signature type the identity of the signature's type
+	// parameters is just their index.
+	//
+	// Since the language does not currently support referring to uninstantiated
+	// generic types or functions, and instantiated signatures do not have type
+	// parameter lists, we should never encounter a second non-empty type
+	// parameter list when hashing a generic signature.
+	sigTParams *typeparams.TypeParamList
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+	return Hasher{
+		memo:       make(map[types.Type]uint32),
+		ptrMap:     make(map[interface{}]uint32),
+		sigTParams: nil,
+	}
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+	hash, ok := h.memo[t]
+	if !ok {
+		hash = h.hashFor(t)
+		h.memo[t] = hash
+	}
+	return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+	var h uint32
+	for i := 0; i < len(s); i++ {
+		h ^= uint32(s[i])
+		h *= 16777619
+	}
+	return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+	// See Identical for rationale.
+	switch t := t.(type) {
+	case *types.Basic:
+		return uint32(t.Kind())
+
+	case *types.Array:
+		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+	case *types.Slice:
+		return 9049 + 2*h.Hash(t.Elem())
+
+	case *types.Struct:
+		var hash uint32 = 9059
+		for i, n := 0, t.NumFields(); i < n; i++ {
+			f := t.Field(i)
+			if f.Anonymous() {
+				hash += 8861
+			}
+			hash += hashString(t.Tag(i))
+			hash += hashString(f.Name()) // (ignore f.Pkg)
+			hash += h.Hash(f.Type())
+		}
+		return hash
+
+	case *types.Pointer:
+		return 9067 + 2*h.Hash(t.Elem())
+
+	case *types.Signature:
+		var hash uint32 = 9091
+		if t.Variadic() {
+			hash *= 8863
+		}
+
+		// Use a separate hasher for types inside of the signature, where type
+		// parameter identity is modified to be (index, constraint). We must use a
+		// new memo for this hasher as type identity may be affected by this
+		// masking. For example, in func[T any](*T), the identity of *T depends on
+		// whether we are mapping the argument in isolation, or recursively as part
+		// of hashing the signature.
+		//
+		// We should never encounter a generic signature while hashing another
+		// generic signature, but defensively set sigTParams only if h.mask is
+		// unset.
+		tparams := typeparams.ForSignature(t)
+		if h.sigTParams == nil && tparams.Len() != 0 {
+			h = Hasher{
+				// There may be something more efficient than discarding the existing
+				// memo, but it would require detecting whether types are 'tainted' by
+				// references to type parameters.
+				memo: make(map[types.Type]uint32),
+				// Re-using ptrMap ensures that pointer identity is preserved in this
+				// hasher.
+				ptrMap:     h.ptrMap,
+				sigTParams: tparams,
+			}
+		}
+
+		for i := 0; i < tparams.Len(); i++ {
+			tparam := tparams.At(i)
+			hash += 7 * h.Hash(tparam.Constraint())
+		}
+
+		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+	case *typeparams.Union:
+		return h.hashUnion(t)
+
+	case *types.Interface:
+		// Interfaces are identical if they have the same set of methods, with
+		// identical names and types, and they have the same set of type
+		// restrictions. See go/types.identical for more details.
+		var hash uint32 = 9103
+
+		// Hash methods.
+		for i, n := 0, t.NumMethods(); i < n; i++ {
+			// Method order is not significant.
+			// Ignore m.Pkg().
+			m := t.Method(i)
+			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+		}
+
+		// Hash type restrictions.
+		terms, err := typeparams.InterfaceTermSet(t)
+		// if err != nil t has invalid type restrictions.
+		if err == nil {
+			hash += h.hashTermSet(terms)
+		}
+
+		return hash
+
+	case *types.Map:
+		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+	case *types.Chan:
+		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+	case *types.Named:
+		hash := h.hashPtr(t.Obj())
+		targs := typeparams.NamedTypeArgs(t)
+		for i := 0; i < targs.Len(); i++ {
+			targ := targs.At(i)
+			hash += 2 * h.Hash(targ)
+		}
+		return hash
+
+	case *typeparams.TypeParam:
+		return h.hashTypeParam(t)
+
+	case *types.Tuple:
+		return h.hashTuple(t)
+	}
+
+	panic(fmt.Sprintf("%T: %v", t, t))
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+	// See go/types.identicalTypes for rationale.
+	n := tuple.Len()
+	hash := 9137 + 2*uint32(n)
+	for i := 0; i < n; i++ {
+		hash += 3 * h.Hash(tuple.At(i).Type())
+	}
+	return hash
+}
+
+func (h Hasher) hashUnion(t *typeparams.Union) uint32 {
+	// Hash type restrictions.
+	terms, err := typeparams.UnionTermSet(t)
+	// if err != nil t has invalid type restrictions. Fall back on a non-zero
+	// hash.
+	if err != nil {
+		return 9151
+	}
+	return h.hashTermSet(terms)
+}
+
+func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 {
+	hash := 9157 + 2*uint32(len(terms))
+	for _, term := range terms {
+		// term order is not significant.
+		termHash := h.Hash(term.Type())
+		if term.Tilde() {
+			termHash *= 9161
+		}
+		hash += 3 * termHash
+	}
+	return hash
+}
+
+// hashTypeParam returns a hash of the type parameter t, with a hash value
+// depending on whether t is contained in h.sigTParams.
+//
+// If h.sigTParams is set and contains t, then we are in the process of hashing
+// a signature, and the hash value of t must depend only on t's index and
+// constraint: signatures are considered identical modulo type parameter
+// renaming. To avoid infinite recursion, we only hash the type parameter
+// index, and rely on types.Identical to handle signatures where constraints
+// are not identical.
+//
+// Otherwise the hash of t depends only on t's pointer identity.
+func (h Hasher) hashTypeParam(t *typeparams.TypeParam) uint32 {
+	if h.sigTParams != nil {
+		i := t.Index()
+		if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
+			return 9173 + 3*uint32(i)
+		}
+	}
+	return h.hashPtr(t.Obj())
+}
+
+// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
+// pointers values are not dependent on the GC.
+func (h Hasher) hashPtr(ptr interface{}) uint32 {
+	if hash, ok := h.ptrMap[ptr]; ok {
+		return hash
+	}
+	hash := uint32(reflect.ValueOf(ptr).Pointer())
+	h.ptrMap[ptr] = hash
+	return hash
+}

+ 72 - 0
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go

@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+	"go/types"
+	"sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+	mu     sync.Mutex
+	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+	others map[types.Type]*types.MethodSet                            // all other types
+}
+
+// MethodSet returns the method set of type T.  It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+//
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+	if cache == nil {
+		return types.NewMethodSet(T)
+	}
+	cache.mu.Lock()
+	defer cache.mu.Unlock()
+
+	switch T := T.(type) {
+	case *types.Named:
+		return cache.lookupNamed(T).value
+
+	case *types.Pointer:
+		if N, ok := T.Elem().(*types.Named); ok {
+			return cache.lookupNamed(N).pointer
+		}
+	}
+
+	// all other types
+	// (The map uses pointer equivalence, not type identity.)
+	mset := cache.others[T]
+	if mset == nil {
+		mset = types.NewMethodSet(T)
+		if cache.others == nil {
+			cache.others = make(map[types.Type]*types.MethodSet)
+		}
+		cache.others[T] = mset
+	}
+	return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+	if cache.named == nil {
+		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+	}
+	// Avoid recomputing mset(*T) for each distinct Pointer
+	// instance whose underlying type is a named type.
+	msets, ok := cache.named[named]
+	if !ok {
+		msets.value = types.NewMethodSet(named)
+		msets.pointer = types.NewMethodSet(types.NewPointer(named))
+		cache.named[named] = msets
+	}
+	return msets
+}

+ 52 - 0
vendor/golang.org/x/tools/go/types/typeutil/ui.go

@@ -0,0 +1,52 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import "go/types"
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+//
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+	isPointerToConcrete := func(T types.Type) bool {
+		ptr, ok := T.(*types.Pointer)
+		return ok && !types.IsInterface(ptr.Elem())
+	}
+
+	var result []*types.Selection
+	mset := msets.MethodSet(T)
+	if types.IsInterface(T) || isPointerToConcrete(T) {
+		for i, n := 0, mset.Len(); i < n; i++ {
+			result = append(result, mset.At(i))
+		}
+	} else {
+		// T is some other concrete type.
+		// Report methods of T and *T, preferring those of T.
+		pmset := msets.MethodSet(types.NewPointer(T))
+		for i, n := 0, pmset.Len(); i < n; i++ {
+			meth := pmset.At(i)
+			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+				meth = m
+			}
+			result = append(result, meth)
+		}
+
+	}
+	return result
+}

+ 428 - 0
vendor/golang.org/x/tools/internal/analysisinternal/analysis.go

@@ -0,0 +1,428 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package analysisinternal exposes internal-only fields from go/analysis.
+package analysisinternal
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strings"
+
+	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/internal/lsp/fuzzy"
+)
+
+// Flag to gate diagnostics for fuzz tests in 1.18.
+var DiagnoseFuzzTests bool = false
+
+var (
+	GetTypeErrors func(p interface{}) []types.Error
+	SetTypeErrors func(p interface{}, errors []types.Error)
+)
+
+func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
+	// Get the end position for the type error.
+	offset, end := fset.PositionFor(start, false).Offset, start
+	if offset >= len(src) {
+		return end
+	}
+	if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 {
+		end = start + token.Pos(width)
+	}
+	return end
+}
+
+func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	under := typ
+	if n, ok := typ.(*types.Named); ok {
+		under = n.Underlying()
+	}
+	switch u := under.(type) {
+	case *types.Basic:
+		switch {
+		case u.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}
+		case u.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}
+		case u.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+		default:
+			panic("unknown basic type")
+		}
+	case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array:
+		return ast.NewIdent("nil")
+	case *types.Struct:
+		texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here.
+		if texpr == nil {
+			return nil
+		}
+		return &ast.CompositeLit{
+			Type: texpr,
+		}
+	}
+	return nil
+}
+
+// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of
+// analysisinternal.ZeroValue)
+func IsZeroValue(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+	case *types.Pointer:
+		x := TypeExpr(fset, f, pkg, t.Elem())
+		if x == nil {
+			return nil
+		}
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  x,
+		}
+	case *types.Array:
+		elt := TypeExpr(fset, f, pkg, t.Elem())
+		if elt == nil {
+			return nil
+		}
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: elt,
+		}
+	case *types.Slice:
+		elt := TypeExpr(fset, f, pkg, t.Elem())
+		if elt == nil {
+			return nil
+		}
+		return &ast.ArrayType{
+			Elt: elt,
+		}
+	case *types.Map:
+		key := TypeExpr(fset, f, pkg, t.Key())
+		value := TypeExpr(fset, f, pkg, t.Elem())
+		if key == nil || value == nil {
+			return nil
+		}
+		return &ast.MapType{
+			Key:   key,
+			Value: value,
+		}
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		value := TypeExpr(fset, f, pkg, t.Elem())
+		if value == nil {
+			return nil
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: value,
+		}
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			p := TypeExpr(fset, f, pkg, t.Params().At(i).Type())
+			if p == nil {
+				return nil
+			}
+			params = append(params, &ast.Field{
+				Type: p,
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			r := TypeExpr(fset, f, pkg, t.Results().At(i).Type())
+			if r == nil {
+				return nil
+			}
+			returns = append(returns, &ast.Field{
+				Type: r,
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+	case *types.Named:
+		if t.Obj().Pkg() == nil {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		if t.Obj().Pkg() == pkg {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		pkgName := t.Obj().Pkg().Name()
+		// If the file already imports the package under another name, use that.
+		for _, group := range astutil.Imports(fset, f) {
+			for _, cand := range group {
+				if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() {
+					if cand.Name != nil && cand.Name.Name != "" {
+						pkgName = cand.Name.Name
+					}
+				}
+			}
+		}
+		if pkgName == "." {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+	default:
+		return nil
+	}
+}
+
+type TypeErrorPass string
+
+const (
+	NoNewVars      TypeErrorPass = "nonewvars"
+	NoResultValues TypeErrorPass = "noresultvalues"
+	UndeclaredName TypeErrorPass = "undeclaredname"
+)
+
+// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable.
+// Some examples:
+//
+// Basic Example:
+// z := 1
+// y := z + x
+// If x is undeclared, then this function would return `y := z + x`, so that we
+// can insert `x := ` on the line before `y := z + x`.
+//
+// If stmt example:
+// if z == 1 {
+// } else if z == y {}
+// If y is undeclared, then this function would return `if z == 1 {`, because we cannot
+// insert a statement between an if and an else if statement. As a result, we need to find
+// the top of the if chain to insert `y := ` before.
+func StmtToInsertVarBefore(path []ast.Node) ast.Stmt {
+	enclosingIndex := -1
+	for i, p := range path {
+		if _, ok := p.(ast.Stmt); ok {
+			enclosingIndex = i
+			break
+		}
+	}
+	if enclosingIndex == -1 {
+		return nil
+	}
+	enclosingStmt := path[enclosingIndex]
+	switch enclosingStmt.(type) {
+	case *ast.IfStmt:
+		// The enclosingStmt is inside of the if declaration,
+		// We need to check if we are in an else-if stmt and
+		// get the base if statement.
+		return baseIfStmt(path, enclosingIndex)
+	case *ast.CaseClause:
+		// Get the enclosing switch stmt if the enclosingStmt is
+		// inside of the case statement.
+		for i := enclosingIndex + 1; i < len(path); i++ {
+			if node, ok := path[i].(*ast.SwitchStmt); ok {
+				return node
+			} else if node, ok := path[i].(*ast.TypeSwitchStmt); ok {
+				return node
+			}
+		}
+	}
+	if len(path) <= enclosingIndex+1 {
+		return enclosingStmt.(ast.Stmt)
+	}
+	// Check if the enclosing statement is inside another node.
+	switch expr := path[enclosingIndex+1].(type) {
+	case *ast.IfStmt:
+		// Get the base if statement.
+		return baseIfStmt(path, enclosingIndex+1)
+	case *ast.ForStmt:
+		if expr.Init == enclosingStmt || expr.Post == enclosingStmt {
+			return expr
+		}
+	}
+	return enclosingStmt.(ast.Stmt)
+}
+
+// baseIfStmt walks up the if/else-if chain until we get to
+// the top of the current if chain.
+func baseIfStmt(path []ast.Node, index int) ast.Stmt {
+	stmt := path[index]
+	for i := index + 1; i < len(path); i++ {
+		if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt {
+			stmt = node
+			continue
+		}
+		break
+	}
+	return stmt.(ast.Stmt)
+}
+
+// WalkASTWithParent walks the AST rooted at n. The semantics are
+// similar to ast.Inspect except it does not call f(nil).
+func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
+	var ancestors []ast.Node
+	ast.Inspect(n, func(n ast.Node) (recurse bool) {
+		if n == nil {
+			ancestors = ancestors[:len(ancestors)-1]
+			return false
+		}
+
+		var parent ast.Node
+		if len(ancestors) > 0 {
+			parent = ancestors[len(ancestors)-1]
+		}
+		ancestors = append(ancestors, n)
+		return f(n, parent)
+	})
+}
+
+// FindMatchingIdents finds all identifiers in 'node' that match any of the given types.
+// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within
+// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that
+// is unrecognized.
+func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident {
+	matches := map[types.Type][]*ast.Ident{}
+	// Initialize matches to contain the variable types we are searching for.
+	for _, typ := range typs {
+		if typ == nil {
+			continue
+		}
+		matches[typ] = []*ast.Ident{}
+	}
+	seen := map[types.Object]struct{}{}
+	ast.Inspect(node, func(n ast.Node) bool {
+		if n == nil {
+			return false
+		}
+		// Prevent circular definitions. If 'pos' is within an assignment statement, do not
+		// allow any identifiers in that assignment statement to be selected. Otherwise,
+		// we could do the following, where 'x' satisfies the type of 'f0':
+		//
+		// x := fakeStruct{f0: x}
+		//
+		assignment, ok := n.(*ast.AssignStmt)
+		if ok && pos > assignment.Pos() && pos <= assignment.End() {
+			return false
+		}
+		if n.End() > pos {
+			return n.Pos() <= pos
+		}
+		ident, ok := n.(*ast.Ident)
+		if !ok || ident.Name == "_" {
+			return true
+		}
+		obj := info.Defs[ident]
+		if obj == nil || obj.Type() == nil {
+			return true
+		}
+		if _, ok := obj.(*types.TypeName); ok {
+			return true
+		}
+		// Prevent duplicates in matches' values.
+		if _, ok = seen[obj]; ok {
+			return true
+		}
+		seen[obj] = struct{}{}
+		// Find the scope for the given position. Then, check whether the object
+		// exists within the scope.
+		innerScope := pkg.Scope().Innermost(pos)
+		if innerScope == nil {
+			return true
+		}
+		_, foundObj := innerScope.LookupParent(ident.Name, pos)
+		if foundObj != obj {
+			return true
+		}
+		// The object must match one of the types that we are searching for.
+		if idents, ok := matches[obj.Type()]; ok {
+			matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name))
+		}
+		// If the object type does not exactly match any of the target types, greedily
+		// find the first target type that the object type can satisfy.
+		for typ := range matches {
+			if obj.Type() == typ {
+				continue
+			}
+			if equivalentTypes(obj.Type(), typ) {
+				matches[typ] = append(matches[typ], ast.NewIdent(ident.Name))
+			}
+		}
+		return true
+	})
+	return matches
+}
+
+func equivalentTypes(want, got types.Type) bool {
+	if want == got || types.Identical(want, got) {
+		return true
+	}
+	// Code segment to help check for untyped equality from (golang/go#32146).
+	if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
+		if lhs, ok := got.Underlying().(*types.Basic); ok {
+			return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
+		}
+	}
+	return types.AssignableTo(want, got)
+}
+
+// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the
+// given pattern. We return the identifier whose name is most similar to the pattern.
+func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr {
+	fuzz := fuzzy.NewMatcher(pattern)
+	var bestFuzz ast.Expr
+	highScore := float32(0) // minimum score is 0 (no match)
+	for _, ident := range idents {
+		// TODO: Improve scoring algorithm.
+		score := fuzz.Score(ident.Name)
+		if score > highScore {
+			highScore = score
+			bestFuzz = ident
+		} else if score == 0 {
+			// Order matters in the fuzzy matching algorithm. If we find no match
+			// when matching the target to the identifier, try matching the identifier
+			// to the target.
+			revFuzz := fuzzy.NewMatcher(ident.Name)
+			revScore := revFuzz.Score(pattern)
+			if revScore > highScore {
+				highScore = revScore
+				bestFuzz = ident
+			}
+		}
+	}
+	return bestFuzz
+}

+ 12 - 4
vendor/golang.org/x/tools/internal/gocommand/invoke.go

@@ -9,7 +9,6 @@ import (
 	"bytes"
 	"bytes"
 	"context"
 	"context"
 	"fmt"
 	"fmt"
-	exec "golang.org/x/sys/execabs"
 	"io"
 	"io"
 	"os"
 	"os"
 	"regexp"
 	"regexp"
@@ -18,6 +17,8 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
+	exec "golang.org/x/sys/execabs"
+
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event"
 )
 )
 
 
@@ -131,9 +132,16 @@ type Invocation struct {
 	Verb       string
 	Verb       string
 	Args       []string
 	Args       []string
 	BuildFlags []string
 	BuildFlags []string
-	ModFlag    string
-	ModFile    string
-	Overlay    string
+
+	// If ModFlag is set, the go command is invoked with -mod=ModFlag.
+	ModFlag string
+
+	// If ModFile is set, the go command is invoked with -modfile=ModFile.
+	ModFile string
+
+	// If Overlay is set, the go command is invoked with -overlay=Overlay.
+	Overlay string
+
 	// If CleanEnv is set, the invocation will run only with the environment
 	// If CleanEnv is set, the invocation will run only with the environment
 	// in Env, not starting with os.Environ.
 	// in Env, not starting with os.Environ.
 	CleanEnv   bool
 	CleanEnv   bool

+ 12 - 10
vendor/golang.org/x/tools/internal/gocommand/vendor.go

@@ -38,10 +38,10 @@ var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
 // with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
 // with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
 // of which only Verb and Args are modified to run the appropriate Go command.
 // of which only Verb and Args are modified to run the appropriate Go command.
 // Inspired by setDefaultBuildMod in modload/init.go
 // Inspired by setDefaultBuildMod in modload/init.go
-func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
+func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
 	mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
 	mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
 	if err != nil {
 	if err != nil {
-		return nil, false, err
+		return false, nil, err
 	}
 	}
 
 
 	// We check the GOFLAGS to see if there is anything overridden or not.
 	// We check the GOFLAGS to see if there is anything overridden or not.
@@ -49,7 +49,7 @@ func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON,
 	inv.Args = []string{"GOFLAGS"}
 	inv.Args = []string{"GOFLAGS"}
 	stdout, err := r.Run(ctx, inv)
 	stdout, err := r.Run(ctx, inv)
 	if err != nil {
 	if err != nil {
-		return nil, false, err
+		return false, nil, err
 	}
 	}
 	goflags := string(bytes.TrimSpace(stdout.Bytes()))
 	goflags := string(bytes.TrimSpace(stdout.Bytes()))
 	matches := modFlagRegexp.FindStringSubmatch(goflags)
 	matches := modFlagRegexp.FindStringSubmatch(goflags)
@@ -57,25 +57,27 @@ func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON,
 	if len(matches) != 0 {
 	if len(matches) != 0 {
 		modFlag = matches[1]
 		modFlag = matches[1]
 	}
 	}
-	if modFlag != "" {
-		// Don't override an explicit '-mod=' argument.
-		return mainMod, modFlag == "vendor", nil
+	// Don't override an explicit '-mod=' argument.
+	if modFlag == "vendor" {
+		return true, mainMod, nil
+	} else if modFlag != "" {
+		return false, nil, nil
 	}
 	}
 	if mainMod == nil || !go114 {
 	if mainMod == nil || !go114 {
-		return mainMod, false, nil
+		return false, nil, nil
 	}
 	}
 	// Check 1.14's automatic vendor mode.
 	// Check 1.14's automatic vendor mode.
 	if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
 	if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
 		if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
 		if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
 			// The Go version is at least 1.14, and a vendor directory exists.
 			// The Go version is at least 1.14, and a vendor directory exists.
 			// Set -mod=vendor by default.
 			// Set -mod=vendor by default.
-			return mainMod, true, nil
+			return true, mainMod, nil
 		}
 		}
 	}
 	}
-	return mainMod, false, nil
+	return false, nil, nil
 }
 }
 
 
-// getMainModuleAnd114 gets the main module's information and whether the
+// getMainModuleAnd114 gets one of the main modules' information and whether the
 // go command in use is 1.14+. This is the information needed to figure out
 // go command in use is 1.14+. This is the information needed to figure out
 // if vendoring should be enabled.
 // if vendoring should be enabled.
 func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
 func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {

+ 1 - 1
vendor/golang.org/x/tools/internal/imports/imports.go

@@ -306,7 +306,7 @@ func matchSpace(orig []byte, src []byte) []byte {
 	return b.Bytes()
 	return b.Bytes()
 }
 }
 
 
-var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
+var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`)
 
 
 func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
 func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
 	var out bytes.Buffer
 	var out bytes.Buffer

+ 14 - 11
vendor/golang.org/x/tools/internal/imports/mod.go

@@ -34,7 +34,8 @@ type ModuleResolver struct {
 	scannedRoots   map[gopathwalk.Root]bool
 	scannedRoots   map[gopathwalk.Root]bool
 
 
 	initialized   bool
 	initialized   bool
-	main          *gocommand.ModuleJSON
+	mains         []*gocommand.ModuleJSON
+	mainByDir     map[string]*gocommand.ModuleJSON
 	modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
 	modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
 	modsByDir     []*gocommand.ModuleJSON // ...or Dir.
 	modsByDir     []*gocommand.ModuleJSON // ...or Dir.
 
 
@@ -69,21 +70,21 @@ func (r *ModuleResolver) init() error {
 		Logf:       r.env.Logf,
 		Logf:       r.env.Logf,
 		WorkingDir: r.env.WorkingDir,
 		WorkingDir: r.env.WorkingDir,
 	}
 	}
-	mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
+	vendorEnabled, mainModVendor, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if mainMod != nil && vendorEnabled {
+	if mainModVendor != nil && vendorEnabled {
 		// Vendor mode is on, so all the non-Main modules are irrelevant,
 		// Vendor mode is on, so all the non-Main modules are irrelevant,
 		// and we need to search /vendor for everything.
 		// and we need to search /vendor for everything.
-		r.main = mainMod
+		r.mains = []*gocommand.ModuleJSON{mainModVendor}
 		r.dummyVendorMod = &gocommand.ModuleJSON{
 		r.dummyVendorMod = &gocommand.ModuleJSON{
 			Path: "",
 			Path: "",
-			Dir:  filepath.Join(mainMod.Dir, "vendor"),
+			Dir:  filepath.Join(mainModVendor.Dir, "vendor"),
 		}
 		}
-		r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
-		r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
+		r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
+		r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
 	} else {
 	} else {
 		// Vendor mode is off, so run go list -m ... to find everything.
 		// Vendor mode is off, so run go list -m ... to find everything.
 		err := r.initAllMods()
 		err := r.initAllMods()
@@ -122,8 +123,10 @@ func (r *ModuleResolver) init() error {
 	r.roots = []gopathwalk.Root{
 	r.roots = []gopathwalk.Root{
 		{filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
 		{filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
 	}
 	}
-	if r.main != nil {
-		r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
+	r.mainByDir = make(map[string]*gocommand.ModuleJSON)
+	for _, main := range r.mains {
+		r.roots = append(r.roots, gopathwalk.Root{main.Dir, gopathwalk.RootCurrentModule})
+		r.mainByDir[main.Dir] = main
 	}
 	}
 	if vendorEnabled {
 	if vendorEnabled {
 		r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
 		r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
@@ -189,7 +192,7 @@ func (r *ModuleResolver) initAllMods() error {
 		r.modsByModPath = append(r.modsByModPath, mod)
 		r.modsByModPath = append(r.modsByModPath, mod)
 		r.modsByDir = append(r.modsByDir, mod)
 		r.modsByDir = append(r.modsByDir, mod)
 		if mod.Main {
 		if mod.Main {
-			r.main = mod
+			r.mains = append(r.mains, mod)
 		}
 		}
 	}
 	}
 	return nil
 	return nil
@@ -609,7 +612,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
 	}
 	}
 	switch root.Type {
 	switch root.Type {
 	case gopathwalk.RootCurrentModule:
 	case gopathwalk.RootCurrentModule:
-		importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
+		importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir))
 	case gopathwalk.RootModuleCache:
 	case gopathwalk.RootModuleCache:
 		matches := modCacheRegexp.FindStringSubmatch(subdir)
 		matches := modCacheRegexp.FindStringSubmatch(subdir)
 		if len(matches) == 0 {
 		if len(matches) == 0 {

+ 13 - 2
vendor/golang.org/x/tools/internal/imports/sortimports.go

@@ -9,6 +9,7 @@ package imports
 import (
 import (
 	"go/ast"
 	"go/ast"
 	"go/token"
 	"go/token"
+	"log"
 	"sort"
 	"sort"
 	"strconv"
 	"strconv"
 )
 )
@@ -60,6 +61,7 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
 
 
 // mergeImports merges all the import declarations into the first one.
 // mergeImports merges all the import declarations into the first one.
 // Taken from golang.org/x/tools/ast/astutil.
 // Taken from golang.org/x/tools/ast/astutil.
+// This does not adjust line numbers properly
 func mergeImports(fset *token.FileSet, f *ast.File) {
 func mergeImports(fset *token.FileSet, f *ast.File) {
 	if len(f.Decls) <= 1 {
 	if len(f.Decls) <= 1 {
 		return
 		return
@@ -237,8 +239,17 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
 		p := s.Pos()
 		p := s.Pos()
 		line := fset.File(p).Line(p)
 		line := fset.File(p).Line(p)
 		for previousLine := line - 1; previousLine >= firstSpecLine; {
 		for previousLine := line - 1; previousLine >= firstSpecLine; {
-			fset.File(p).MergeLine(previousLine)
-			previousLine--
+			// MergeLine can panic. Avoid the panic at the cost of not removing the blank line
+			// golang/go#50329
+			if previousLine > 0 && previousLine < fset.File(p).LineCount() {
+				fset.File(p).MergeLine(previousLine)
+				previousLine--
+			} else {
+				// try to gather some data to diagnose how this could happen
+				req := "Please report what the imports section of your go file looked like."
+				log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s",
+					firstSpecLine, line, previousLine, fset.File(p).LineCount(), req)
+			}
 		}
 		}
 	}
 	}
 	return specs
 	return specs

+ 23 - 0
vendor/golang.org/x/tools/internal/imports/zstdlib.go

@@ -180,6 +180,8 @@ var stdlib = map[string][]string{
 		"NewReader",
 		"NewReader",
 		"NewWriter",
 		"NewWriter",
 		"Order",
 		"Order",
+		"Reader",
+		"Writer",
 	},
 	},
 	"compress/zlib": []string{
 	"compress/zlib": []string{
 		"BestCompression",
 		"BestCompression",
@@ -641,7 +643,9 @@ var stdlib = map[string][]string{
 		"Named",
 		"Named",
 		"NamedArg",
 		"NamedArg",
 		"NullBool",
 		"NullBool",
+		"NullByte",
 		"NullFloat64",
 		"NullFloat64",
+		"NullInt16",
 		"NullInt32",
 		"NullInt32",
 		"NullInt64",
 		"NullInt64",
 		"NullString",
 		"NullString",
@@ -2248,6 +2252,7 @@ var stdlib = map[string][]string{
 		"SHT_LOOS",
 		"SHT_LOOS",
 		"SHT_LOPROC",
 		"SHT_LOPROC",
 		"SHT_LOUSER",
 		"SHT_LOUSER",
+		"SHT_MIPS_ABIFLAGS",
 		"SHT_NOBITS",
 		"SHT_NOBITS",
 		"SHT_NOTE",
 		"SHT_NOTE",
 		"SHT_NULL",
 		"SHT_NULL",
@@ -3061,6 +3066,7 @@ var stdlib = map[string][]string{
 		"ParseExpr",
 		"ParseExpr",
 		"ParseExprFrom",
 		"ParseExprFrom",
 		"ParseFile",
 		"ParseFile",
+		"SkipObjectResolution",
 		"SpuriousErrors",
 		"SpuriousErrors",
 		"Trace",
 		"Trace",
 	},
 	},
@@ -3441,6 +3447,7 @@ var stdlib = map[string][]string{
 		"Pt",
 		"Pt",
 		"RGBA",
 		"RGBA",
 		"RGBA64",
 		"RGBA64",
+		"RGBA64Image",
 		"Rect",
 		"Rect",
 		"Rectangle",
 		"Rectangle",
 		"RegisterFormat",
 		"RegisterFormat",
@@ -3507,6 +3514,7 @@ var stdlib = map[string][]string{
 		"Op",
 		"Op",
 		"Over",
 		"Over",
 		"Quantizer",
 		"Quantizer",
+		"RGBA64Image",
 		"Src",
 		"Src",
 	},
 	},
 	"image/gif": []string{
 	"image/gif": []string{
@@ -3612,6 +3620,7 @@ var stdlib = map[string][]string{
 		"FS",
 		"FS",
 		"File",
 		"File",
 		"FileInfo",
 		"FileInfo",
+		"FileInfoToDirEntry",
 		"FileMode",
 		"FileMode",
 		"Glob",
 		"Glob",
 		"GlobFS",
 		"GlobFS",
@@ -3772,15 +3781,18 @@ var stdlib = map[string][]string{
 		"Max",
 		"Max",
 		"MaxFloat32",
 		"MaxFloat32",
 		"MaxFloat64",
 		"MaxFloat64",
+		"MaxInt",
 		"MaxInt16",
 		"MaxInt16",
 		"MaxInt32",
 		"MaxInt32",
 		"MaxInt64",
 		"MaxInt64",
 		"MaxInt8",
 		"MaxInt8",
+		"MaxUint",
 		"MaxUint16",
 		"MaxUint16",
 		"MaxUint32",
 		"MaxUint32",
 		"MaxUint64",
 		"MaxUint64",
 		"MaxUint8",
 		"MaxUint8",
 		"Min",
 		"Min",
+		"MinInt",
 		"MinInt16",
 		"MinInt16",
 		"MinInt32",
 		"MinInt32",
 		"MinInt64",
 		"MinInt64",
@@ -4078,6 +4090,7 @@ var stdlib = map[string][]string{
 		"UnknownNetworkError",
 		"UnknownNetworkError",
 	},
 	},
 	"net/http": []string{
 	"net/http": []string{
+		"AllowQuerySemicolons",
 		"CanonicalHeaderKey",
 		"CanonicalHeaderKey",
 		"Client",
 		"Client",
 		"CloseNotifier",
 		"CloseNotifier",
@@ -4660,6 +4673,7 @@ var stdlib = map[string][]string{
 		"Value",
 		"Value",
 		"ValueError",
 		"ValueError",
 		"ValueOf",
 		"ValueOf",
+		"VisibleFields",
 		"Zero",
 		"Zero",
 	},
 	},
 	"regexp": []string{
 	"regexp": []string{
@@ -4799,6 +4813,10 @@ var stdlib = map[string][]string{
 		"UnlockOSThread",
 		"UnlockOSThread",
 		"Version",
 		"Version",
 	},
 	},
+	"runtime/cgo": []string{
+		"Handle",
+		"NewHandle",
+	},
 	"runtime/debug": []string{
 	"runtime/debug": []string{
 		"BuildInfo",
 		"BuildInfo",
 		"FreeOSMemory",
 		"FreeOSMemory",
@@ -4915,6 +4933,7 @@ var stdlib = map[string][]string{
 		"QuoteRuneToGraphic",
 		"QuoteRuneToGraphic",
 		"QuoteToASCII",
 		"QuoteToASCII",
 		"QuoteToGraphic",
 		"QuoteToGraphic",
+		"QuotedPrefix",
 		"Unquote",
 		"Unquote",
 		"UnquoteChar",
 		"UnquoteChar",
 	},
 	},
@@ -10334,6 +10353,7 @@ var stdlib = map[string][]string{
 		"PipeNode",
 		"PipeNode",
 		"Pos",
 		"Pos",
 		"RangeNode",
 		"RangeNode",
+		"SkipFuncCheck",
 		"StringNode",
 		"StringNode",
 		"TemplateNode",
 		"TemplateNode",
 		"TextNode",
 		"TextNode",
@@ -10358,6 +10378,7 @@ var stdlib = map[string][]string{
 		"July",
 		"July",
 		"June",
 		"June",
 		"Kitchen",
 		"Kitchen",
+		"Layout",
 		"LoadLocation",
 		"LoadLocation",
 		"LoadLocationFromTZData",
 		"LoadLocationFromTZData",
 		"Local",
 		"Local",
@@ -10406,6 +10427,8 @@ var stdlib = map[string][]string{
 		"UTC",
 		"UTC",
 		"Unix",
 		"Unix",
 		"UnixDate",
 		"UnixDate",
+		"UnixMicro",
+		"UnixMilli",
 		"Until",
 		"Until",
 		"Wednesday",
 		"Wednesday",
 		"Weekday",
 		"Weekday",

+ 183 - 0
vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go

@@ -0,0 +1,183 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+	"unicode"
+)
+
+// RuneRole specifies the role of a rune in the context of an input.
+type RuneRole byte
+
+const (
+	// RNone specifies a rune without any role in the input (i.e., whitespace/non-ASCII).
+	RNone RuneRole = iota
+	// RSep specifies a rune with the role of segment separator.
+	RSep
+	// RTail specifies a rune which is a lower-case tail in a word in the input.
+	RTail
+	// RUCTail specifies a rune which is an upper-case tail in a word in the input.
+	RUCTail
+	// RHead specifies a rune which is the first character in a word in the input.
+	RHead
+)
+
+// RuneRoles detects the roles of each byte rune in an input string and stores it in the output
+// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string
+// or when it filled the output. If output is nil, then it gets created.
+func RuneRoles(candidate []byte, reuse []RuneRole) []RuneRole {
+	var output []RuneRole
+	if cap(reuse) < len(candidate) {
+		output = make([]RuneRole, 0, len(candidate))
+	} else {
+		output = reuse[:0]
+	}
+
+	prev, prev2 := rtNone, rtNone
+	for i := 0; i < len(candidate); i++ {
+		r := rune(candidate[i])
+
+		role := RNone
+
+		curr := rtLower
+		if candidate[i] <= unicode.MaxASCII {
+			curr = runeType(rt[candidate[i]] - '0')
+		}
+
+		if curr == rtLower {
+			if prev == rtNone || prev == rtPunct {
+				role = RHead
+			} else {
+				role = RTail
+			}
+		} else if curr == rtUpper {
+			role = RHead
+
+			if prev == rtUpper {
+				// This and previous characters are both upper case.
+
+				if i+1 == len(candidate) {
+					// This is last character, previous was also uppercase -> this is UCTail
+					// i.e., (current char is C): aBC / BC / ABC
+					role = RUCTail
+				}
+			}
+		} else if curr == rtPunct {
+			switch r {
+			case '.', ':':
+				role = RSep
+			}
+		}
+		if curr != rtLower {
+			if i > 1 && output[i-1] == RHead && prev2 == rtUpper && (output[i-2] == RHead || output[i-2] == RUCTail) {
+				// The previous two characters were uppercase. The current one is not a lower case, so the
+				// previous one can't be a HEAD. Make it a UCTail.
+				// i.e., (last char is current char - B must be a UCTail): ABC / ZABC / AB.
+				output[i-1] = RUCTail
+			}
+		}
+
+		output = append(output, role)
+		prev2 = prev
+		prev = curr
+	}
+	return output
+}
+
+type runeType byte
+
+const (
+	rtNone runeType = iota
+	rtPunct
+	rtLower
+	rtUpper
+)
+
+const rt = "00000000000000000000000000000000000000000000001122222222221000000333333333333333333333333330000002222222222222222222222222200000"
+
+// LastSegment returns the substring representing the last segment from the input, where each
+// byte has an associated RuneRole in the roles slice. This makes sense only for inputs of Symbol
+// or Filename type.
+func LastSegment(input string, roles []RuneRole) string {
+	// Exclude ending separators.
+	end := len(input) - 1
+	for end >= 0 && roles[end] == RSep {
+		end--
+	}
+	if end < 0 {
+		return ""
+	}
+
+	start := end - 1
+	for start >= 0 && roles[start] != RSep {
+		start--
+	}
+
+	return input[start+1 : end+1]
+}
+
+// fromChunks copies string chunks into the given buffer.
+func fromChunks(chunks []string, buffer []byte) []byte {
+	ii := 0
+	for _, chunk := range chunks {
+		for i := 0; i < len(chunk); i++ {
+			if ii >= cap(buffer) {
+				break
+			}
+			buffer[ii] = chunk[i]
+			ii++
+		}
+	}
+	return buffer[:ii]
+}
+
+// toLower transforms the input string to lower case, which is stored in the output byte slice.
+// The lower casing considers only ASCII values - non ASCII values are left unmodified.
+// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets
+// created.
+func toLower(input []byte, reuse []byte) []byte {
+	output := reuse
+	if cap(reuse) < len(input) {
+		output = make([]byte, len(input))
+	}
+
+	for i := 0; i < len(input); i++ {
+		r := rune(input[i])
+		if input[i] <= unicode.MaxASCII {
+			if 'A' <= r && r <= 'Z' {
+				r += 'a' - 'A'
+			}
+		}
+		output[i] = byte(r)
+	}
+	return output[:len(input)]
+}
+
+// WordConsumer defines a consumer for a word delimited by the [start,end) byte offsets in an input
+// (start is inclusive, end is exclusive).
+type WordConsumer func(start, end int)
+
+// Words find word delimiters in an input based on its bytes' mappings to rune roles. The offset
+// delimiters for each word are fed to the provided consumer function.
+func Words(roles []RuneRole, consume WordConsumer) {
+	var wordStart int
+	for i, r := range roles {
+		switch r {
+		case RUCTail, RTail:
+		case RHead, RNone, RSep:
+			if i != wordStart {
+				consume(wordStart, i)
+			}
+			wordStart = i
+			if r != RHead {
+				// Skip this character.
+				wordStart = i + 1
+			}
+		}
+	}
+	if wordStart != len(roles) {
+		consume(wordStart, len(roles))
+	}
+}

+ 407 - 0
vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go

@@ -0,0 +1,407 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fuzzy implements a fuzzy matching algorithm.
+package fuzzy
+
+import (
+	"bytes"
+	"fmt"
+)
+
+const (
+	// MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs
+	// will be truncated to this size.
+	MaxInputSize = 127
+	// MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer
+	// inputs are truncated to this size.
+	MaxPatternSize = 63
+)
+
+type scoreVal int
+
+func (s scoreVal) val() int {
+	return int(s) >> 1
+}
+
+func (s scoreVal) prevK() int {
+	return int(s) & 1
+}
+
+func score(val int, prevK int /*0 or 1*/) scoreVal {
+	return scoreVal(val<<1 + prevK)
+}
+
+// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern.
+// The matcher does not support parallel usage.
+type Matcher struct {
+	pattern       string
+	patternLower  []byte // lower-case version of the pattern
+	patternShort  []byte // first characters of the pattern
+	caseSensitive bool   // set if the pattern is mix-cased
+
+	patternRoles []RuneRole // the role of each character in the pattern
+	roles        []RuneRole // the role of each character in the tested string
+
+	scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal
+
+	scoreScale float32
+
+	lastCandidateLen     int // in bytes
+	lastCandidateMatched bool
+
+	// Reusable buffers to avoid allocating for every candidate.
+	//  - inputBuf stores the concatenated input chunks
+	//  - lowerBuf stores the last candidate in lower-case
+	//  - rolesBuf stores the calculated roles for each rune in the last
+	//    candidate.
+	inputBuf [MaxInputSize]byte
+	lowerBuf [MaxInputSize]byte
+	rolesBuf [MaxInputSize]RuneRole
+}
+
+func (m *Matcher) bestK(i, j int) int {
+	if m.scores[i][j][0].val() < m.scores[i][j][1].val() {
+		return 1
+	}
+	return 0
+}
+
+// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern.
+func NewMatcher(pattern string) *Matcher {
+	if len(pattern) > MaxPatternSize {
+		pattern = pattern[:MaxPatternSize]
+	}
+
+	m := &Matcher{
+		pattern:      pattern,
+		patternLower: toLower([]byte(pattern), nil),
+	}
+
+	for i, c := range m.patternLower {
+		if pattern[i] != c {
+			m.caseSensitive = true
+			break
+		}
+	}
+
+	if len(pattern) > 3 {
+		m.patternShort = m.patternLower[:3]
+	} else {
+		m.patternShort = m.patternLower
+	}
+
+	m.patternRoles = RuneRoles([]byte(pattern), nil)
+
+	if len(pattern) > 0 {
+		maxCharScore := 4
+		m.scoreScale = 1 / float32(maxCharScore*len(pattern))
+	}
+
+	return m
+}
+
+// Score returns the score returned by matching the candidate to the pattern.
+// This is not designed for parallel use. Multiple candidates must be scored sequentially.
+// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
+func (m *Matcher) Score(candidate string) float32 {
+	return m.ScoreChunks([]string{candidate})
+}
+
+func (m *Matcher) ScoreChunks(chunks []string) float32 {
+	candidate := fromChunks(chunks, m.inputBuf[:])
+	if len(candidate) > MaxInputSize {
+		candidate = candidate[:MaxInputSize]
+	}
+	lower := toLower(candidate, m.lowerBuf[:])
+	m.lastCandidateLen = len(candidate)
+
+	if len(m.pattern) == 0 {
+		// Empty patterns perfectly match candidates.
+		return 1
+	}
+
+	if m.match(candidate, lower) {
+		sc := m.computeScore(candidate, lower)
+		if sc > minScore/2 && !m.poorMatch() {
+			m.lastCandidateMatched = true
+			if len(m.pattern) == len(candidate) {
+				// Perfect match.
+				return 1
+			}
+
+			if sc < 0 {
+				sc = 0
+			}
+			normalizedScore := float32(sc) * m.scoreScale
+			if normalizedScore > 1 {
+				normalizedScore = 1
+			}
+
+			return normalizedScore
+		}
+	}
+
+	m.lastCandidateMatched = false
+	return 0
+}
+
+const minScore = -10000
+
+// MatchedRanges returns matches ranges for the last scored string as a flattened array of
+// [begin, end) byte offset pairs.
+func (m *Matcher) MatchedRanges() []int {
+	if len(m.pattern) == 0 || !m.lastCandidateMatched {
+		return nil
+	}
+	i, j := m.lastCandidateLen, len(m.pattern)
+	if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 {
+		return nil
+	}
+
+	var ret []int
+	k := m.bestK(i, j)
+	for i > 0 {
+		take := (k == 1)
+		k = m.scores[i][j][k].prevK()
+		if take {
+			if len(ret) == 0 || ret[len(ret)-1] != i {
+				ret = append(ret, i)
+				ret = append(ret, i-1)
+			} else {
+				ret[len(ret)-1] = i - 1
+			}
+			j--
+		}
+		i--
+	}
+	// Reverse slice.
+	for i := 0; i < len(ret)/2; i++ {
+		ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i]
+	}
+	return ret
+}
+
+func (m *Matcher) match(candidate []byte, candidateLower []byte) bool {
+	i, j := 0, 0
+	for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
+		if candidateLower[i] == m.patternLower[j] {
+			j++
+		}
+	}
+	if j != len(m.patternLower) {
+		return false
+	}
+
+	// The input passes the simple test against pattern, so it is time to classify its characters.
+	// Character roles are used below to find the last segment.
+	m.roles = RuneRoles(candidate, m.rolesBuf[:])
+
+	return true
+}
+
+func (m *Matcher) computeScore(candidate []byte, candidateLower []byte) int {
+	pattLen, candLen := len(m.pattern), len(candidate)
+
+	for j := 0; j <= len(m.pattern); j++ {
+		m.scores[0][j][0] = minScore << 1
+		m.scores[0][j][1] = minScore << 1
+	}
+	m.scores[0][0][0] = score(0, 0) // Start with 0.
+
+	segmentsLeft, lastSegStart := 1, 0
+	for i := 0; i < candLen; i++ {
+		if m.roles[i] == RSep {
+			segmentsLeft++
+			lastSegStart = i + 1
+		}
+	}
+
+	// A per-character bonus for a consecutive match.
+	consecutiveBonus := 2
+	wordIdx := 0 // Word count within segment.
+	for i := 1; i <= candLen; i++ {
+
+		role := m.roles[i-1]
+		isHead := role == RHead
+
+		if isHead {
+			wordIdx++
+		} else if role == RSep && segmentsLeft > 1 {
+			wordIdx = 0
+			segmentsLeft--
+		}
+
+		var skipPenalty int
+		if i == 1 || (i-1) == lastSegStart {
+			// Skipping the start of first or last segment.
+			skipPenalty++
+		}
+
+		for j := 0; j <= pattLen; j++ {
+			// By default, we don't have a match. Fill in the skip data.
+			m.scores[i][j][1] = minScore << 1
+
+			// Compute the skip score.
+			k := 0
+			if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() {
+				k = 1
+			}
+
+			skipScore := m.scores[i-1][j][k].val()
+			// Do not penalize missing characters after the last matched segment.
+			if j != pattLen {
+				skipScore -= skipPenalty
+			}
+			m.scores[i][j][0] = score(skipScore, k)
+
+			if j == 0 || candidateLower[i-1] != m.patternLower[j-1] {
+				// Not a match.
+				continue
+			}
+			pRole := m.patternRoles[j-1]
+
+			if role == RTail && pRole == RHead {
+				if j > 1 {
+					// Not a match: a head in the pattern matches a tail character in the candidate.
+					continue
+				}
+				// Special treatment for the first character of the pattern. We allow
+				// matches in the middle of a word if they are long enough, at least
+				// min(3, pattern.length) characters.
+				if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) {
+					continue
+				}
+			}
+
+			// Compute the char score.
+			var charScore int
+			// Bonus 1: the char is in the candidate's last segment.
+			if segmentsLeft <= 1 {
+				charScore++
+			}
+			// Bonus 2: Case match or a Head in the pattern aligns with one in the word.
+			// Single-case patterns lack segmentation signals and we assume any character
+			// can be a head of a segment.
+			if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) {
+				charScore++
+			}
+
+			// Penalty 1: pattern char is Head, candidate char is Tail.
+			if role == RTail && pRole == RHead {
+				charScore--
+			}
+			// Penalty 2: first pattern character matched in the middle of a word.
+			if j == 1 && role == RTail {
+				charScore -= 4
+			}
+
+			// Third dimension encodes whether there is a gap between the previous match and the current
+			// one.
+			for k := 0; k < 2; k++ {
+				sc := m.scores[i-1][j-1][k].val() + charScore
+
+				isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart
+				if isConsecutive {
+					// Bonus 3: a consecutive match. First character match also gets a bonus to
+					// ensure prefix final match score normalizes to 1.0.
+					// Logically, this is a part of charScore, but we have to compute it here because it
+					// only applies for consecutive matches (k == 1).
+					sc += consecutiveBonus
+				}
+				if k == 0 {
+					// Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack
+					// of alignment.
+					if role == RTail || role == RUCTail {
+						sc -= 3
+					}
+				}
+
+				if sc > m.scores[i][j][1].val() {
+					m.scores[i][j][1] = score(sc, k)
+				}
+			}
+		}
+	}
+
+	result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val()
+
+	return result
+}
+
+// ScoreTable returns the score table computed for the provided candidate. Used only for debugging.
+func (m *Matcher) ScoreTable(candidate string) string {
+	var buf bytes.Buffer
+
+	var line1, line2, separator bytes.Buffer
+	line1.WriteString("\t")
+	line2.WriteString("\t")
+	for j := 0; j < len(m.pattern); j++ {
+		line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j]))
+		separator.WriteString("----------------")
+	}
+
+	buf.WriteString(line1.String())
+	buf.WriteString("\n")
+	buf.WriteString(separator.String())
+	buf.WriteString("\n")
+
+	for i := 1; i <= len(candidate); i++ {
+		line1.Reset()
+		line2.Reset()
+
+		line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1]))
+		line2.WriteString("\t")
+
+		for j := 1; j <= len(m.pattern); j++ {
+			line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK())))
+			line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK())))
+		}
+		buf.WriteString(line1.String())
+		buf.WriteString("\n")
+		buf.WriteString(line2.String())
+		buf.WriteString("\n")
+		buf.WriteString(separator.String())
+		buf.WriteString("\n")
+	}
+
+	return buf.String()
+}
+
+func dir(prevK int) rune {
+	if prevK == 0 {
+		return 'M'
+	}
+	return 'H'
+}
+
+func (m *Matcher) poorMatch() bool {
+	if len(m.pattern) < 2 {
+		return false
+	}
+
+	i, j := m.lastCandidateLen, len(m.pattern)
+	k := m.bestK(i, j)
+
+	var counter, len int
+	for i > 0 {
+		take := (k == 1)
+		k = m.scores[i][j][k].prevK()
+		if take {
+			len++
+			if k == 0 && len < 3 && m.roles[i-1] == RTail {
+				// Short match in the middle of a word
+				counter++
+				if counter > 1 {
+					return true
+				}
+			}
+			j--
+		} else {
+			len = 0
+		}
+		i--
+	}
+	return false
+}

+ 236 - 0
vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go

@@ -0,0 +1,236 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+	"unicode"
+)
+
+// SymbolMatcher implements a fuzzy matching algorithm optimized for Go symbols
+// of the form:
+//  example.com/path/to/package.object.field
+//
+// Knowing that we are matching symbols like this allows us to make the
+// following optimizations:
+//  - We can incorporate right-to-left relevance directly into the score
+//    calculation.
+//  - We can match from right to left, discarding leading bytes if the input is
+//    too long.
+//  - We just take the right-most match without losing too much precision. This
+//    allows us to use an O(n) algorithm.
+//  - We can operate directly on chunked strings; in many cases we will
+//    be storing the package path and/or package name separately from the
+//    symbol or identifiers, so doing this avoids allocating strings.
+//  - We can return the index of the right-most match, allowing us to trim
+//    irrelevant qualification.
+//
+// This implementation is experimental, serving as a reference fast algorithm
+// to compare to the fuzzy algorithm implemented by Matcher.
+type SymbolMatcher struct {
+	// Using buffers of length 256 is both a reasonable size for most qualified
+	// symbols, and makes it easy to avoid bounds checks by using uint8 indexes.
+	pattern     [256]rune
+	patternLen  uint8
+	inputBuffer [256]rune   // avoid allocating when considering chunks
+	roles       [256]uint32 // which roles does a rune play (word start, etc.)
+	segments    [256]uint8  // how many segments from the right is each rune
+}
+
+const (
+	segmentStart uint32 = 1 << iota
+	wordStart
+	separator
+)
+
+// NewSymbolMatcher creates a SymbolMatcher that may be used to match the given
+// search pattern.
+//
+// Currently this matcher only accepts case-insensitive fuzzy patterns.
+//
+// An empty pattern matches no input.
+func NewSymbolMatcher(pattern string) *SymbolMatcher {
+	m := &SymbolMatcher{}
+	for _, p := range pattern {
+		m.pattern[m.patternLen] = unicode.ToLower(p)
+		m.patternLen++
+		if m.patternLen == 255 || int(m.patternLen) == len(pattern) {
+			// break at 255 so that we can represent patternLen with a uint8.
+			break
+		}
+	}
+	return m
+}
+
+// Match looks for the right-most match of the search pattern within the symbol
+// represented by concatenating the given chunks, returning its offset and
+// score.
+//
+// If a match is found, the first return value will hold the absolute byte
+// offset within all chunks for the start of the symbol. In other words, the
+// index of the match within strings.Join(chunks, ""). If no match is found,
+// the first return value will be -1.
+//
+// The second return value will be the score of the match, which is always
+// between 0 and 1, inclusive. A score of 0 indicates no match.
+func (m *SymbolMatcher) Match(chunks []string) (int, float64) {
+	// Explicit behavior for an empty pattern.
+	//
+	// As a minor optimization, this also avoids nilness checks later on, since
+	// the compiler can prove that m != nil.
+	if m.patternLen == 0 {
+		return -1, 0
+	}
+
+	// First phase: populate the input buffer with lower-cased runes.
+	//
+	// We could also check for a forward match here, but since we'd have to write
+	// the entire input anyway this has negligible impact on performance.
+
+	var (
+		inputLen  = uint8(0)
+		modifiers = wordStart | segmentStart
+	)
+
+input:
+	for _, chunk := range chunks {
+		for _, r := range chunk {
+			if r == '.' || r == '/' {
+				modifiers |= separator
+			}
+			// optimization: avoid calls to unicode.ToLower, which can't be inlined.
+			l := r
+			if r <= unicode.MaxASCII {
+				if 'A' <= r && r <= 'Z' {
+					l = r + 'a' - 'A'
+				}
+			} else {
+				l = unicode.ToLower(r)
+			}
+			if l != r {
+				modifiers |= wordStart
+			}
+			m.inputBuffer[inputLen] = l
+			m.roles[inputLen] = modifiers
+			inputLen++
+			if m.roles[inputLen-1]&separator != 0 {
+				modifiers = wordStart | segmentStart
+			} else {
+				modifiers = 0
+			}
+			// TODO: we should prefer the right-most input if it overflows, rather
+			//       than the left-most as we're doing here.
+			if inputLen == 255 {
+				break input
+			}
+		}
+	}
+
+	// Second phase: find the right-most match, and count segments from the
+	// right.
+
+	var (
+		pi    = uint8(m.patternLen - 1) // pattern index
+		p     = m.pattern[pi]           // pattern rune
+		start = -1                      // start offset of match
+		rseg  = uint8(0)
+	)
+	const maxSeg = 3 // maximum number of segments from the right to count, for scoring purposes.
+
+	for ii := inputLen - 1; ; ii-- {
+		r := m.inputBuffer[ii]
+		if rseg < maxSeg && m.roles[ii]&separator != 0 {
+			rseg++
+		}
+		m.segments[ii] = rseg
+		if p == r {
+			if pi == 0 {
+				start = int(ii)
+				break
+			}
+			pi--
+			p = m.pattern[pi]
+		}
+		// Don't check ii >= 0 in the loop condition: ii is a uint8.
+		if ii == 0 {
+			break
+		}
+	}
+
+	if start < 0 {
+		// no match: skip scoring
+		return -1, 0
+	}
+
+	// Third phase: find the shortest match, and compute the score.
+
+	// Score is the average score for each character.
+	//
+	// A character score is the multiple of:
+	//   1. 1.0 if the character starts a segment, .8 if the character start a
+	//      mid-segment word, otherwise 0.6. This carries over to immediately
+	//      following characters.
+	//   2. For the final character match, the multiplier from (1) is reduced to
+	//     .8 if the next character in the input is a mid-segment word, or 0.6 if
+	//      the next character in the input is not a word or segment start. This
+	//      ensures that we favor whole-word or whole-segment matches over prefix
+	//      matches.
+	//   3. 1.0 if the character is part of the last segment, otherwise
+	//      1.0-.2*<segments from the right>, with a max segment count of 3.
+	//
+	// This is a very naive algorithm, but it is fast. There's lots of prior art
+	// here, and we should leverage it. For example, we could explicitly consider
+	// character distance, and exact matches of words or segments.
+	//
+	// Also note that this might not actually find the highest scoring match, as
+	// doing so could require a non-linear algorithm, depending on how the score
+	// is calculated.
+
+	pi = 0
+	p = m.pattern[pi]
+
+	const (
+		segStreak  = 1.0
+		wordStreak = 0.8
+		noStreak   = 0.6
+		perSegment = 0.2 // we count at most 3 segments above
+	)
+
+	streakBonus := noStreak
+	totScore := 0.0
+	for ii := uint8(start); ii < inputLen; ii++ {
+		r := m.inputBuffer[ii]
+		if r == p {
+			pi++
+			p = m.pattern[pi]
+			// Note: this could be optimized with some bit operations.
+			switch {
+			case m.roles[ii]&segmentStart != 0 && segStreak > streakBonus:
+				streakBonus = segStreak
+			case m.roles[ii]&wordStart != 0 && wordStreak > streakBonus:
+				streakBonus = wordStreak
+			}
+			finalChar := pi >= m.patternLen
+			// finalCost := 1.0
+			if finalChar && streakBonus > noStreak {
+				switch {
+				case ii == inputLen-1 || m.roles[ii+1]&segmentStart != 0:
+					// Full segment: no reduction
+				case m.roles[ii+1]&wordStart != 0:
+					streakBonus = wordStreak
+				default:
+					streakBonus = noStreak
+				}
+			}
+			totScore += streakBonus * (1.0 - float64(m.segments[ii])*perSegment)
+			if finalChar {
+				break
+			}
+		} else {
+			streakBonus = noStreak
+		}
+	}
+
+	return start, totScore / float64(m.patternLen)
+}

+ 167 - 12
vendor/golang.org/x/tools/internal/typeparams/common.go

@@ -2,24 +2,179 @@
 // Use of this source code is governed by a BSD-style
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 // license that can be found in the LICENSE file.
 
 
-// Package typeparams provides functions to work indirectly with type parameter
-// data stored in go/ast and go/types objects, while these API are guarded by a
-// build constraint.
+// Package typeparams contains common utilities for writing tools that interact
+// with generic Go code, as introduced with Go 1.18.
 //
 //
-// This package exists to make it easier for tools to work with generic code,
-// while also compiling against older Go versions.
+// Many of the types and functions in this package are proxies for the new APIs
+// introduced in the standard library with Go 1.18. For example, the
+// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
+// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
+// versions older than 1.18 these helpers are implemented as stubs, allowing
+// users of this package to write code that handles generic constructs inline,
+// even if the Go version being used to compile does not support generics.
+//
+// Additionally, this package contains common utilities for working with the
+// new generic constructs, to supplement the standard library APIs. Notably,
+// the StructuralTerms API computes a minimal representation of the structural
+// restrictions on a type parameter. In the future, this API may be available
+// from go/types.
+//
+// See the example/README.md for a more detailed guide on how to update tools
+// to support generics.
 package typeparams
 package typeparams
 
 
 import (
 import (
 	"go/ast"
 	"go/ast"
 	"go/token"
 	"go/token"
+	"go/types"
 )
 )
 
 
-// A IndexExprData holds data from both ast.IndexExpr and the new
-// ast.MultiIndexExpr, which was introduced in Go 1.18.
-type IndexExprData struct {
-	X       ast.Expr   // expression
-	Lbrack  token.Pos  // position of "["
-	Indices []ast.Expr // index expressions
-	Rbrack  token.Pos  // position of "]"
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+	switch e := n.(type) {
+	case *ast.IndexExpr:
+		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+	case *IndexListExpr:
+		return e.X, e.Lbrack, e.Indices, e.Rbrack
+	}
+	return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+	switch len(indices) {
+	case 0:
+		panic("empty indices")
+	case 1:
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  indices[0],
+			Rbrack: rbrack,
+		}
+	default:
+		return &IndexListExpr{
+			X:       x,
+			Lbrack:  lbrack,
+			Indices: indices,
+			Rbrack:  rbrack,
+		}
+	}
+}
+
+// IsTypeParam reports whether t is a type parameter.
+func IsTypeParam(t types.Type) bool {
+	_, ok := t.(*TypeParam)
+	return ok
+}
+
+// OriginMethod returns the origin method associated with the method fn.
+// For methods on a non-generic receiver base type, this is just
+// fn. However, for methods with a generic receiver, OriginMethod returns the
+// corresponding method in the method set of the origin type.
+//
+// As a special case, if fn is not a method (has no receiver), OriginMethod
+// returns fn.
+func OriginMethod(fn *types.Func) *types.Func {
+	recv := fn.Type().(*types.Signature).Recv()
+	if recv == nil {
+
+		return fn
+	}
+	base := recv.Type()
+	p, isPtr := base.(*types.Pointer)
+	if isPtr {
+		base = p.Elem()
+	}
+	named, isNamed := base.(*types.Named)
+	if !isNamed {
+		// Receiver is a *types.Interface.
+		return fn
+	}
+	if ForNamed(named).Len() == 0 {
+		// Receiver base has no type parameters, so we can avoid the lookup below.
+		return fn
+	}
+	orig := NamedTypeOrigin(named)
+	gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
+	return gfn.(*types.Func)
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+//  type Interface[T any] interface {
+//  	Accept(T)
+//  }
+//
+//  type Container[T any] struct {
+//  	Element T
+//  }
+//
+//  func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *Context, V, T types.Type) bool {
+	// If V and T are not both named, or do not have matching non-empty type
+	// parameter lists, fall back on types.AssignableTo.
+
+	VN, Vnamed := V.(*types.Named)
+	TN, Tnamed := T.(*types.Named)
+	if !Vnamed || !Tnamed {
+		return types.AssignableTo(V, T)
+	}
+
+	vtparams := ForNamed(VN)
+	ttparams := ForNamed(TN)
+	if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
+		return types.AssignableTo(V, T)
+	}
+
+	// V and T have the same (non-zero) number of type params. Instantiate both
+	// with the type parameters of V. This must always succeed for V, and will
+	// succeed for T if and only if the type set of each type parameter of V is a
+	// subset of the type set of the corresponding type parameter of T, meaning
+	// that every instantiation of V corresponds to a valid instantiation of T.
+
+	// Minor optimization: ensure we share a context across the two
+	// instantiations below.
+	if ctxt == nil {
+		ctxt = NewContext()
+	}
+
+	var targs []types.Type
+	for i := 0; i < vtparams.Len(); i++ {
+		targs = append(targs, vtparams.At(i))
+	}
+
+	vinst, err := Instantiate(ctxt, V, targs, true)
+	if err != nil {
+		panic("type parameters should satisfy their own constraints")
+	}
+
+	tinst, err := Instantiate(ctxt, T, targs, true)
+	if err != nil {
+		return false
+	}
+
+	return types.AssignableTo(vinst, tinst)
 }
 }

+ 12 - 0
vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go

@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = false

+ 15 - 0
vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go

@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+// Note: this constant is in a separate file as this is the only acceptable
+// diff between the <1.18 API of this package and the 1.18 API.
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = true

+ 216 - 0
vendor/golang.org/x/tools/internal/typeparams/normalize.go

@@ -0,0 +1,216 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"errors"
+	"fmt"
+	"go/types"
+	"os"
+	"strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//  type T[P interface{~int; m()}] int
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//  type A interface{ ~string|~[]byte }
+//
+//  type B interface{ int|string }
+//
+//  type C interface { ~string|~int }
+//
+//  type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *TypeParam) ([]*Term, error) {
+	constraint := tparam.Constraint()
+	if constraint == nil {
+		return nil, fmt.Errorf("%s has nil constraint", tparam)
+	}
+	iface, _ := constraint.Underlying().(*types.Interface)
+	if iface == nil {
+		return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+	}
+	return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*Term, error) {
+	return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *Union) ([]*Term, error) {
+	return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*Term, error) {
+	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+	if err != nil {
+		return nil, err
+	}
+	if tset.terms.isEmpty() {
+		return nil, ErrEmptyTypeSet
+	}
+	if tset.terms.isAll() {
+		return nil, nil
+	}
+	var terms []*Term
+	for _, term := range tset.terms {
+		terms = append(terms, NewTerm(term.tilde, term.typ))
+	}
+	return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+	complete bool
+	terms    termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+	if t == nil {
+		panic("nil type")
+	}
+
+	if debug {
+		indentf(depth, "%s", t.String())
+		defer func() {
+			if err != nil {
+				indentf(depth, "=> %s", err)
+			} else {
+				indentf(depth, "=> %s", res.terms.String())
+			}
+		}()
+	}
+
+	const maxTermCount = 100
+	if tset, ok := seen[t]; ok {
+		if !tset.complete {
+			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+		}
+		return tset, nil
+	}
+
+	// Mark the current type as seen to avoid infinite recursion.
+	tset := new(termSet)
+	defer func() {
+		tset.complete = true
+	}()
+	seen[t] = tset
+
+	switch u := t.Underlying().(type) {
+	case *types.Interface:
+		// The term set of an interface is the intersection of the term sets of its
+		// embedded types.
+		tset.terms = allTermlist
+		for i := 0; i < u.NumEmbeddeds(); i++ {
+			embedded := u.EmbeddedType(i)
+			if _, ok := embedded.Underlying().(*TypeParam); ok {
+				return nil, fmt.Errorf("invalid embedded type %T", embedded)
+			}
+			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+			if err != nil {
+				return nil, err
+			}
+			tset.terms = tset.terms.intersect(tset2.terms)
+		}
+	case *Union:
+		// The term set of a union is the union of term sets of its terms.
+		tset.terms = nil
+		for i := 0; i < u.Len(); i++ {
+			t := u.Term(i)
+			var terms termlist
+			switch t.Type().Underlying().(type) {
+			case *types.Interface:
+				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+				if err != nil {
+					return nil, err
+				}
+				terms = tset2.terms
+			case *TypeParam, *Union:
+				// A stand-alone type parameter or union is not permitted as union
+				// term.
+				return nil, fmt.Errorf("invalid union term %T", t)
+			default:
+				if t.Type() == types.Typ[types.Invalid] {
+					continue
+				}
+				terms = termlist{{t.Tilde(), t.Type()}}
+			}
+			tset.terms = tset.terms.union(terms)
+			if len(tset.terms) > maxTermCount {
+				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+			}
+		}
+	case *TypeParam:
+		panic("unreachable")
+	default:
+		// For all other types, the term set is just a single non-tilde term
+		// holding the type itself.
+		if u != types.Typ[types.Invalid] {
+			tset.terms = termlist{{false, t}}
+		}
+	}
+	return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+	return t.Underlying()
+}

+ 0 - 93
vendor/golang.org/x/tools/internal/typeparams/notypeparams.go

@@ -1,93 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams || !go1.18
-// +build !typeparams !go1.18
-
-package typeparams
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// NOTE: doc comments must be kept in sync with typeparams.go.
-
-// Enabled reports whether type parameters are enabled in the current build
-// environment.
-const Enabled = false
-
-// GetIndexExprData extracts data from AST nodes that represent index
-// expressions.
-//
-// For an ast.IndexExpr, the resulting IndexExprData will have exactly one
-// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a
-// variable number of index expressions.
-//
-// For nodes that don't represent index expressions, GetIndexExprData returns
-// nil.
-func GetIndexExprData(n ast.Node) *IndexExprData {
-	if e, _ := n.(*ast.IndexExpr); e != nil {
-		return &IndexExprData{
-			X:       e.X,
-			Lbrack:  e.Lbrack,
-			Indices: []ast.Expr{e.Index},
-			Rbrack:  e.Rbrack,
-		}
-	}
-	return nil
-}
-
-// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
-func ForTypeDecl(*ast.TypeSpec) *ast.FieldList {
-	return nil
-}
-
-// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
-func ForFuncDecl(*ast.FuncDecl) *ast.FieldList {
-	return nil
-}
-
-// ForSignature extracts the (possibly empty) type parameter object list from
-// sig.
-func ForSignature(*types.Signature) []*types.TypeName {
-	return nil
-}
-
-// IsComparable reports if iface is the comparable interface.
-func IsComparable(*types.Interface) bool {
-	return false
-}
-
-// IsConstraint reports whether iface may only be used as a type parameter
-// constraint (i.e. has a type set or is the comparable interface).
-func IsConstraint(*types.Interface) bool {
-	return false
-}
-
-// ForNamed extracts the (possibly empty) type parameter object list from
-// named.
-func ForNamed(*types.Named) []*types.TypeName {
-	return nil
-}
-
-// NamedTArgs extracts the (possibly empty) type argument list from named.
-func NamedTArgs(*types.Named) []types.Type {
-	return nil
-}
-
-// InitInferred initializes info to record inferred type information.
-func InitInferred(*types.Info) {
-}
-
-// GetInferred extracts inferred type information from info for e.
-//
-// The expression e may have an inferred type if it is an *ast.IndexExpr
-// representing partial instantiation of a generic function type for which type
-// arguments have been inferred using constraint type inference, or if it is an
-// *ast.CallExpr for which type type arguments have be inferred using both
-// constraint type inference and function argument inference.
-func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) {
-	return nil, nil
-}

+ 172 - 0
vendor/golang.org/x/tools/internal/typeparams/termlist.go

@@ -0,0 +1,172 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+	"bytes"
+	"go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+	if len(xl) == 0 {
+		return "∅"
+	}
+	var buf bytes.Buffer
+	for i, x := range xl {
+		if i > 0 {
+			buf.WriteString(" ∪ ")
+		}
+		buf.WriteString(x.String())
+	}
+	return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+	// If there's a non-nil term, the entire list is not empty.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil {
+			return false
+		}
+	}
+	return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+	// If there's a 𝓤 term, the entire list is 𝓤.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil && x.typ == nil {
+			return true
+		}
+	}
+	return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	used := make([]bool, len(xl))
+	var rl termlist
+	for i, xi := range xl {
+		if xi == nil || used[i] {
+			continue
+		}
+		for j := i + 1; j < len(xl); j++ {
+			xj := xl[j]
+			if xj == nil || used[j] {
+				continue
+			}
+			if u1, u2 := xi.union(xj); u2 == nil {
+				// If we encounter a 𝓤 term, the entire list is 𝓤.
+				// Exit early.
+				// (Note that this is not just an optimization;
+				// if we continue, we may end up with a 𝓤 term
+				// and other terms and the result would not be
+				// in normal form.)
+				if u1.typ == nil {
+					return allTermlist
+				}
+				xi = u1
+				used[j] = true // xj is now unioned into xi - ignore it in future iterations
+			}
+		}
+		rl = append(rl, xi)
+	}
+	return rl
+}
+
+// If the type set represented by xl is specified by a single (non-𝓤) term,
+// structuralType returns that type. Otherwise it returns nil.
+func (xl termlist) structuralType() types.Type {
+	if nl := xl.norm(); len(nl) == 1 {
+		return nl[0].typ // if nl.isAll() then typ is nil, which is ok
+	}
+	return nil
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+	return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+	if xl.isEmpty() || yl.isEmpty() {
+		return nil
+	}
+
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	var rl termlist
+	for _, x := range xl {
+		for _, y := range yl {
+			if r := x.intersect(y); r != nil {
+				rl = append(rl, r)
+			}
+		}
+	}
+	return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+	// TODO(gri) this should be more efficient
+	return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+	for _, x := range xl {
+		if x.includes(t) {
+			return true
+		}
+	}
+	return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+	for _, x := range xl {
+		if y.subsetOf(x) {
+			return true
+		}
+	}
+	return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+	if yl.isEmpty() {
+		return xl.isEmpty()
+	}
+
+	// each term x of xl must be a subset of yl
+	for _, x := range xl {
+		if !yl.supersetOf(x) {
+			return false // x is not a subset yl
+		}
+	}
+	return true
+}

+ 0 - 125
vendor/golang.org/x/tools/internal/typeparams/typeparams.go

@@ -1,125 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams && go1.18
-// +build typeparams,go1.18
-
-package typeparams
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// NOTE: doc comments must be kept in sync with notypeparams.go.
-
-// Enabled reports whether type parameters are enabled in the current build
-// environment.
-const Enabled = true
-
-// GetIndexExprData extracts data from AST nodes that represent index
-// expressions.
-//
-// For an ast.IndexExpr, the resulting IndexExprData will have exactly one
-// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a
-// variable number of index expressions.
-//
-// For nodes that don't represent index expressions, GetIndexExprData returns
-// nil.
-func GetIndexExprData(n ast.Node) *IndexExprData {
-	switch e := n.(type) {
-	case *ast.IndexExpr:
-		return &IndexExprData{
-			X:       e.X,
-			Lbrack:  e.Lbrack,
-			Indices: []ast.Expr{e.Index},
-			Rbrack:  e.Rbrack,
-		}
-	case *ast.MultiIndexExpr:
-		return (*IndexExprData)(e)
-	}
-	return nil
-}
-
-// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
-func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList {
-	return n.TParams
-}
-
-// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
-func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList {
-	if n.Type != nil {
-		return n.Type.TParams
-	}
-	return nil
-}
-
-// ForSignature extracts the (possibly empty) type parameter object list from
-// sig.
-func ForSignature(sig *types.Signature) []*types.TypeName {
-	return tparamsSlice(sig.TParams())
-}
-
-// IsComparable reports if iface is the comparable interface.
-func IsComparable(iface *types.Interface) bool {
-	return iface.IsComparable()
-}
-
-// IsConstraint reports whether iface may only be used as a type parameter
-// constraint (i.e. has a type set or is the comparable interface).
-func IsConstraint(iface *types.Interface) bool {
-	return iface.IsConstraint()
-}
-
-// ForNamed extracts the (possibly empty) type parameter object list from
-// named.
-func ForNamed(named *types.Named) []*types.TypeName {
-	return tparamsSlice(named.TParams())
-}
-
-func tparamsSlice(tparams *types.TypeParams) []*types.TypeName {
-	if tparams.Len() == 0 {
-		return nil
-	}
-	result := make([]*types.TypeName, tparams.Len())
-	for i := 0; i < tparams.Len(); i++ {
-		result[i] = tparams.At(i)
-	}
-	return result
-}
-
-// NamedTArgs extracts the (possibly empty) type argument list from named.
-func NamedTArgs(named *types.Named) []types.Type {
-	ntargs := named.NumTArgs()
-	if ntargs == 0 {
-		return nil
-	}
-
-	targs := make([]types.Type, ntargs)
-	for i := 0; i < ntargs; i++ {
-		targs[i] = named.TArg(i)
-	}
-
-	return targs
-}
-
-// InitInferred initializes info to record inferred type information.
-func InitInferred(info *types.Info) {
-	info.Inferred = make(map[ast.Expr]types.Inferred)
-}
-
-// GetInferred extracts inferred type information from info for e.
-//
-// The expression e may have an inferred type if it is an *ast.IndexExpr
-// representing partial instantiation of a generic function type for which type
-// arguments have been inferred using constraint type inference, or if it is an
-// *ast.CallExpr for which type type arguments have be inferred using both
-// constraint type inference and function argument inference.
-func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) {
-	if info.Inferred == nil {
-		return nil, nil
-	}
-	inf := info.Inferred[e]
-	return inf.TArgs, inf.Sig
-}

+ 197 - 0
vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go

@@ -0,0 +1,197 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+import (
+	"go/ast"
+	"go/token"
+	"go/types"
+)
+
+func unsupported() {
+	panic("type parameters are unsupported at this go version")
+}
+
+// IndexListExpr is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type IndexListExpr struct {
+	ast.Expr
+	X       ast.Expr   // expression
+	Lbrack  token.Pos  // position of "["
+	Indices []ast.Expr // index expressions
+	Rbrack  token.Pos  // position of "]"
+}
+
+// ForTypeSpec returns an empty field list, as type parameters on not supported
+// at this Go version.
+func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
+	return nil
+}
+
+// ForFuncType returns an empty field list, as type parameters are not
+// supported at this Go version.
+func ForFuncType(*ast.FuncType) *ast.FieldList {
+	return nil
+}
+
+// TypeParam is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type TypeParam struct{ types.Type }
+
+func (*TypeParam) Index() int             { unsupported(); return 0 }
+func (*TypeParam) Constraint() types.Type { unsupported(); return nil }
+func (*TypeParam) Obj() *types.TypeName   { unsupported(); return nil }
+
+// TypeParamList is a placeholder for an empty type parameter list.
+type TypeParamList struct{}
+
+func (*TypeParamList) Len() int          { return 0 }
+func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
+
+// TypeList is a placeholder for an empty type list.
+type TypeList struct{}
+
+func (*TypeList) Len() int          { return 0 }
+func (*TypeList) At(int) types.Type { unsupported(); return nil }
+
+// NewTypeParam is unsupported at this Go version, and panics.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+	unsupported()
+	return nil
+}
+
+// SetTypeParamConstraint is unsupported at this Go version, and panics.
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+	unsupported()
+}
+
+// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
+// typeParams is non-empty.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+	if len(recvTypeParams) != 0 || len(typeParams) != 0 {
+		panic("signatures cannot have type parameters at this Go version")
+	}
+	return types.NewSignature(recv, params, results, variadic)
+}
+
+// ForSignature returns an empty slice.
+func ForSignature(*types.Signature) *TypeParamList {
+	return nil
+}
+
+// RecvTypeParams returns a nil slice.
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+	return nil
+}
+
+// IsComparable returns false, as no interfaces are type-restricted at this Go
+// version.
+func IsComparable(*types.Interface) bool {
+	return false
+}
+
+// IsMethodSet returns true, as no interfaces are type-restricted at this Go
+// version.
+func IsMethodSet(*types.Interface) bool {
+	return true
+}
+
+// IsImplicit returns false, as no interfaces are implicit at this Go version.
+func IsImplicit(*types.Interface) bool {
+	return false
+}
+
+// MarkImplicit does nothing, because this Go version does not have implicit
+// interfaces.
+func MarkImplicit(*types.Interface) {}
+
+// ForNamed returns an empty type parameter list, as type parameters are not
+// supported at this Go version.
+func ForNamed(*types.Named) *TypeParamList {
+	return nil
+}
+
+// SetForNamed panics if tparams is non-empty.
+func SetForNamed(_ *types.Named, tparams []*TypeParam) {
+	if len(tparams) > 0 {
+		unsupported()
+	}
+}
+
+// NamedTypeArgs returns nil.
+func NamedTypeArgs(*types.Named) *TypeList {
+	return nil
+}
+
+// NamedTypeOrigin is the identity method at this Go version.
+func NamedTypeOrigin(named *types.Named) types.Type {
+	return named
+}
+
+// Term holds information about a structural type restriction.
+type Term struct {
+	tilde bool
+	typ   types.Type
+}
+
+func (m *Term) Tilde() bool      { return m.tilde }
+func (m *Term) Type() types.Type { return m.typ }
+func (m *Term) String() string {
+	pre := ""
+	if m.tilde {
+		pre = "~"
+	}
+	return pre + m.typ.String()
+}
+
+// NewTerm is unsupported at this Go version, and panics.
+func NewTerm(tilde bool, typ types.Type) *Term {
+	return &Term{tilde, typ}
+}
+
+// Union is a placeholder type, as type parameters are not supported at this Go
+// version. Its methods panic on use.
+type Union struct{ types.Type }
+
+func (*Union) Len() int         { return 0 }
+func (*Union) Term(i int) *Term { unsupported(); return nil }
+
+// NewUnion is unsupported at this Go version, and panics.
+func NewUnion(terms []*Term) *Union {
+	unsupported()
+	return nil
+}
+
+// InitInstanceInfo is a noop at this Go version.
+func InitInstanceInfo(*types.Info) {}
+
+// Instance is a placeholder type, as type parameters are not supported at this
+// Go version.
+type Instance struct {
+	TypeArgs *TypeList
+	Type     types.Type
+}
+
+// GetInstances returns a nil map, as type parameters are not supported at this
+// Go version.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
+
+// Context is a placeholder type, as type parameters are not supported at
+// this Go version.
+type Context struct{}
+
+// NewContext returns a placeholder Context instance.
+func NewContext() *Context {
+	return &Context{}
+}
+
+// Instantiate is unsupported on this Go version, and panics.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+	unsupported()
+	return nil, nil
+}

+ 151 - 0
vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go

@@ -0,0 +1,151 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+// IndexListExpr is an alias for ast.IndexListExpr.
+type IndexListExpr = ast.IndexListExpr
+
+// ForTypeSpec returns n.TypeParams.
+func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
+	if n == nil {
+		return nil
+	}
+	return n.TypeParams
+}
+
+// ForFuncType returns n.TypeParams.
+func ForFuncType(n *ast.FuncType) *ast.FieldList {
+	if n == nil {
+		return nil
+	}
+	return n.TypeParams
+}
+
+// TypeParam is an alias for types.TypeParam
+type TypeParam = types.TypeParam
+
+// TypeParamList is an alias for types.TypeParamList
+type TypeParamList = types.TypeParamList
+
+// TypeList is an alias for types.TypeList
+type TypeList = types.TypeList
+
+// NewTypeParam calls types.NewTypeParam.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+	return types.NewTypeParam(name, constraint)
+}
+
+// SetTypeParamConstraint calls tparam.SetConstraint(constraint).
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+	tparam.SetConstraint(constraint)
+}
+
+// NewSignatureType calls types.NewSignatureType.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+	return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
+}
+
+// ForSignature returns sig.TypeParams()
+func ForSignature(sig *types.Signature) *TypeParamList {
+	return sig.TypeParams()
+}
+
+// RecvTypeParams returns sig.RecvTypeParams().
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+	return sig.RecvTypeParams()
+}
+
+// IsComparable calls iface.IsComparable().
+func IsComparable(iface *types.Interface) bool {
+	return iface.IsComparable()
+}
+
+// IsMethodSet calls iface.IsMethodSet().
+func IsMethodSet(iface *types.Interface) bool {
+	return iface.IsMethodSet()
+}
+
+// IsImplicit calls iface.IsImplicit().
+func IsImplicit(iface *types.Interface) bool {
+	return iface.IsImplicit()
+}
+
+// MarkImplicit calls iface.MarkImplicit().
+func MarkImplicit(iface *types.Interface) {
+	iface.MarkImplicit()
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(named *types.Named) *TypeParamList {
+	return named.TypeParams()
+}
+
+// SetForNamed sets the type params tparams on n. Each tparam must be of
+// dynamic type *types.TypeParam.
+func SetForNamed(n *types.Named, tparams []*TypeParam) {
+	n.SetTypeParams(tparams)
+}
+
+// NamedTypeArgs returns named.TypeArgs().
+func NamedTypeArgs(named *types.Named) *TypeList {
+	return named.TypeArgs()
+}
+
+// NamedTypeOrigin returns named.Orig().
+func NamedTypeOrigin(named *types.Named) types.Type {
+	return named.Origin()
+}
+
+// Term is an alias for types.Term.
+type Term = types.Term
+
+// NewTerm calls types.NewTerm.
+func NewTerm(tilde bool, typ types.Type) *Term {
+	return types.NewTerm(tilde, typ)
+}
+
+// Union is an alias for types.Union
+type Union = types.Union
+
+// NewUnion calls types.NewUnion.
+func NewUnion(terms []*Term) *Union {
+	return types.NewUnion(terms)
+}
+
+// InitInstanceInfo initializes info to record information about type and
+// function instances.
+func InitInstanceInfo(info *types.Info) {
+	info.Instances = make(map[*ast.Ident]types.Instance)
+}
+
+// Instance is an alias for types.Instance.
+type Instance = types.Instance
+
+// GetInstances returns info.Instances.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance {
+	return info.Instances
+}
+
+// Context is an alias for types.Context.
+type Context = types.Context
+
+// NewContext calls types.NewContext.
+func NewContext() *Context {
+	return types.NewContext()
+}
+
+// Instantiate calls types.Instantiate.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+	return types.Instantiate(ctxt, typ, targs, validate)
+}

+ 170 - 0
vendor/golang.org/x/tools/internal/typeparams/typeterm.go

@@ -0,0 +1,170 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+//   ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
+//   𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
+//   T:  &term{false, T}  == {T}                    // set of type T
+//  ~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
+//
+type term struct {
+	tilde bool // valid if typ != nil
+	typ   types.Type
+}
+
+func (x *term) String() string {
+	switch {
+	case x == nil:
+		return "∅"
+	case x.typ == nil:
+		return "𝓤"
+	case x.tilde:
+		return "~" + x.typ.String()
+	default:
+		return x.typ.String()
+	}
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return x == y
+	case x.typ == nil || y.typ == nil:
+		return x.typ == y.typ
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+	// easy cases
+	switch {
+	case x == nil && y == nil:
+		return nil, nil // ∅ ∪ ∅ == ∅
+	case x == nil:
+		return y, nil // ∅ ∪ y == y
+	case y == nil:
+		return x, nil // x ∪ ∅ == x
+	case x.typ == nil:
+		return x, nil // 𝓤 ∪ y == 𝓤
+	case y.typ == nil:
+		return y, nil // x ∪ 𝓤 == 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∪ ~t == ~t
+	// ~t ∪  T == ~t
+	//  T ∪ ~t == ~t
+	//  T ∪  T ==  T
+	if x.tilde || !y.tilde {
+		return x, nil
+	}
+	return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+	case x.typ == nil:
+		return y // 𝓤 ∩ y == y
+	case y.typ == nil:
+		return x // x ∩ 𝓤 == x
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return nil // x ∩ y == ∅ if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∩ ~t == ~t
+	// ~t ∩  T ==  T
+	//  T ∩ ~t ==  T
+	//  T ∩  T ==  T
+	if !x.tilde || y.tilde {
+		return x
+	}
+	return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return false // t ∈ ∅ == false
+	case x.typ == nil:
+		return true // t ∈ 𝓤 == true
+	}
+	// ∅ ⊂ x ⊂ 𝓤
+
+	u := t
+	if x.tilde {
+		u = under(u)
+	}
+	return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return true // ∅ ⊆ y == true
+	case y == nil:
+		return false // x ⊆ ∅ == false since x != ∅
+	case y.typ == nil:
+		return true // x ⊆ 𝓤 == true
+	case x.typ == nil:
+		return false // 𝓤 ⊆ y == false since y != 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return false // x ⊆ y == false if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ⊆ ~t == true
+	// ~t ⊆ T == false
+	//  T ⊆ ~t == true
+	//  T ⊆  T == true
+	return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+	if debug && (x.typ == nil || y.typ == nil) {
+		panic("invalid argument(s)")
+	}
+	ux := x.typ
+	if y.tilde {
+		ux = under(ux)
+	}
+	uy := y.typ
+	if x.tilde {
+		uy = under(uy)
+	}
+	return !types.Identical(ux, uy)
+}

+ 158 - 0
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go

@@ -1365,4 +1365,162 @@ const (
 	//  	return i
 	//  	return i
 	//  }
 	//  }
 	InvalidGo
 	InvalidGo
+
+	// All codes below were added in Go 1.17.
+
+	/* decl */
+
+	// BadDecl occurs when a declaration has invalid syntax.
+	BadDecl
+
+	// RepeatedDecl occurs when an identifier occurs more than once on the left
+	// hand side of a short variable declaration.
+	//
+	// Example:
+	//  func _() {
+	//  	x, y, y := 1, 2, 3
+	//  }
+	RepeatedDecl
+
+	/* unsafe */
+
+	// InvalidUnsafeAdd occurs when unsafe.Add is called with a
+	// length argument that is not of integer type.
+	//
+	// Example:
+	//  import "unsafe"
+	//
+	//  var p unsafe.Pointer
+	//  var _ = unsafe.Add(p, float64(1))
+	InvalidUnsafeAdd
+
+	// InvalidUnsafeSlice occurs when unsafe.Slice is called with a
+	// pointer argument that is not of pointer type or a length argument
+	// that is not of integer type, negative, or out of bounds.
+	//
+	// Example:
+	//  import "unsafe"
+	//
+	//  var x int
+	//  var _ = unsafe.Slice(x, 1)
+	//
+	// Example:
+	//  import "unsafe"
+	//
+	//  var x int
+	//  var _ = unsafe.Slice(&x, float64(1))
+	//
+	// Example:
+	//  import "unsafe"
+	//
+	//  var x int
+	//  var _ = unsafe.Slice(&x, -1)
+	//
+	// Example:
+	//  import "unsafe"
+	//
+	//  var x int
+	//  var _ = unsafe.Slice(&x, uint64(1) << 63)
+	InvalidUnsafeSlice
+
+	// All codes below were added in Go 1.18.
+
+	/* features */
+
+	// UnsupportedFeature occurs when a language feature is used that is not
+	// supported at this Go version.
+	UnsupportedFeature
+
+	/* type params */
+
+	// NotAGenericType occurs when a non-generic type is used where a generic
+	// type is expected: in type or function instantiation.
+	//
+	// Example:
+	//  type T int
+	//
+	//  var _ T[int]
+	NotAGenericType
+
+	// WrongTypeArgCount occurs when a type or function is instantiated with an
+	// incorrent number of type arguments, including when a generic type or
+	// function is used without instantiation.
+	//
+	// Errors inolving failed type inference are assigned other error codes.
+	//
+	// Example:
+	//  type T[p any] int
+	//
+	//  var _ T[int, string]
+	//
+	// Example:
+	//  func f[T any]() {}
+	//
+	//  var x = f
+	WrongTypeArgCount
+
+	// CannotInferTypeArgs occurs when type or function type argument inference
+	// fails to infer all type arguments.
+	//
+	// Example:
+	//  func f[T any]() {}
+	//
+	//  func _() {
+	//  	f()
+	//  }
+	//
+	// Example:
+	//   type N[P, Q any] struct{}
+	//
+	//   var _ N[int]
+	CannotInferTypeArgs
+
+	// InvalidTypeArg occurs when a type argument does not satisfy its
+	// corresponding type parameter constraints.
+	//
+	// Example:
+	//  type T[P ~int] struct{}
+	//
+	//  var _ T[string]
+	InvalidTypeArg // arguments? InferenceFailed
+
+	// InvalidInstanceCycle occurs when an invalid cycle is detected
+	// within the instantiation graph.
+	//
+	// Example:
+	//  func f[T any]() { f[*T]() }
+	InvalidInstanceCycle
+
+	// InvalidUnion occurs when an embedded union or approximation element is
+	// not valid.
+	//
+	// Example:
+	//  type _ interface {
+	//   	~int | interface{ m() }
+	//  }
+	InvalidUnion
+
+	// MisplacedConstraintIface occurs when a constraint-type interface is used
+	// outside of constraint position.
+	//
+	// Example:
+	//   type I interface { ~int }
+	//
+	//   var _ I
+	MisplacedConstraintIface
+
+	// InvalidMethodTypeParams occurs when methods have type parameters.
+	//
+	// It cannot be encountered with an AST parsed using go/parser.
+	InvalidMethodTypeParams
+
+	// MisplacedTypeParam occurs when a type parameter is used in a place where
+	// it is not permitted.
+	//
+	// Example:
+	//  type T[P any] P
+	//
+	// Example:
+	//  type T[P any] struct{ *P }
+	MisplacedTypeParam
 )
 )

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 14 - 1
vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go


+ 9 - 2
vendor/golang.org/x/tools/internal/typesinternal/types.go

@@ -30,10 +30,15 @@ func SetUsesCgo(conf *types.Config) bool {
 	return true
 	return true
 }
 }
 
 
-func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) {
+// ReadGo116ErrorData extracts additional information from types.Error values
+// generated by Go version 1.16 and later: the error code, start position, and
+// end position. If all positions are valid, start <= err.Pos <= end.
+//
+// If the data could not be read, the final result parameter will be false.
+func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
 	var data [3]int
 	var data [3]int
 	// By coincidence all of these fields are ints, which simplifies things.
 	// By coincidence all of these fields are ints, which simplifies things.
-	v := reflect.ValueOf(terr)
+	v := reflect.ValueOf(err)
 	for i, name := range []string{"go116code", "go116start", "go116end"} {
 	for i, name := range []string{"go116code", "go116start", "go116end"} {
 		f := v.FieldByName(name)
 		f := v.FieldByName(name)
 		if !f.IsValid() {
 		if !f.IsValid() {
@@ -43,3 +48,5 @@ func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool
 	}
 	}
 	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
 	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
 }
 }
+
+var SetGoVersion = func(conf *types.Config, version string) bool { return false }

+ 19 - 0
vendor/golang.org/x/tools/internal/typesinternal/types_118.go

@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typesinternal
+
+import (
+	"go/types"
+)
+
+func init() {
+	SetGoVersion = func(conf *types.Config, version string) bool {
+		conf.GoVersion = version
+		return true
+	}
+}

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů