Просмотр исходного кода

Merge pull request #410 from squat/update_ci

feat(CI): overhaul and update build-system
Lucas Servén Marín 2 месяцев назад
Родитель
Сommit
ed5d8a96bf
100 измененных файлов с 2825 добавлено и 1776 удалено
  1. 7 1
      .dockerignore
  2. 1 0
      .envrc
  3. 4 0
      .github/actionlint.yaml
  4. 19 0
      .github/dependabot.yml
  5. 136 109
      .github/workflows/ci.yml
  6. 14 13
      .github/workflows/release.yaml
  7. 27 0
      .github/workflows/update.yaml
  8. 5 6
      .gitignore
  9. 26 9
      Dockerfile
  10. 21 275
      Makefile
  11. 9 9
      cmd/docs-gen/main.go
  12. 11 2
      cmd/kg/handlers.go
  13. 3 3
      cmd/kg/main.go
  14. 24 24
      cmd/kg/webhook.go
  15. 23 31
      cmd/kgctl/connect_linux.go
  16. 21 8
      cmd/kgctl/main.go
  17. 29 1
      cmd/kgctl/showconf.go
  18. 2 3
      docs/kg.md
  19. 0 1
      e2e/full-mesh.sh
  20. 2 2
      e2e/handlers.sh
  21. 8 7
      e2e/kilo-kind-userspace.yaml
  22. 4 4
      e2e/lib.sh
  23. 0 1
      e2e/location-mesh.sh
  24. 3 0
      e2e/multi-cluster.sh
  25. 3 0
      e2e/setup.sh
  26. 121 0
      flake.lock
  27. 295 0
      flake.nix
  28. 64 46
      go.mod
  29. 103 708
      go.sum
  30. 22 24
      manifests/crds.yaml
  31. 3 3
      manifests/kilo-bootkube-flannel.yaml
  32. 5 4
      manifests/kilo-bootkube.yaml
  33. 3 3
      manifests/kilo-k3s-cilium.yaml
  34. 3 3
      manifests/kilo-k3s-flannel.yaml
  35. 10 8
      manifests/kilo-k3s-userspace-heterogeneous.yaml
  36. 9 7
      manifests/kilo-k3s-userspace.yaml
  37. 5 4
      manifests/kilo-k3s.yaml
  38. 16 20
      manifests/kilo-kubeadm-cilium.yaml
  39. 11 11
      manifests/kilo-kubeadm-flannel-userspace.yaml
  40. 3 3
      manifests/kilo-kubeadm-flannel.yaml
  41. 13 12
      manifests/kilo-kubeadm-userspace.yaml
  42. 5 4
      manifests/kilo-kubeadm.yaml
  43. 3 3
      manifests/kilo-typhoon-flannel.yaml
  44. 5 4
      manifests/kilo-typhoon.yaml
  45. 3 3
      manifests/kube-router.yaml
  46. 12 12
      manifests/peer-validation.yaml
  47. 7 7
      pkg/iptables/iptables.go
  48. 1 1
      pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go
  49. 30 30
      pkg/k8s/backend.go
  50. 2 2
      pkg/k8s/backend_test.go
  51. 2 3
      pkg/k8s/clientset/versioned/clientset.go
  52. 1 1
      pkg/k8s/clientset/versioned/fake/clientset_generated.go
  53. 1 1
      pkg/k8s/clientset/versioned/fake/doc.go
  54. 1 1
      pkg/k8s/clientset/versioned/fake/register.go
  55. 1 1
      pkg/k8s/clientset/versioned/scheme/doc.go
  56. 1 1
      pkg/k8s/clientset/versioned/scheme/register.go
  57. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/doc.go
  58. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/doc.go
  59. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/fake_kilo_client.go
  60. 3 4
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/fake_peer.go
  61. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/generated_expansion.go
  62. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/kilo_client.go
  63. 1 1
      pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go
  64. 87 6
      pkg/k8s/informers/factory.go
  65. 1 1
      pkg/k8s/informers/generic.go
  66. 1 1
      pkg/k8s/informers/internalinterfaces/factory_interfaces.go
  67. 1 1
      pkg/k8s/informers/kilo/interface.go
  68. 1 1
      pkg/k8s/informers/kilo/v1alpha1/interface.go
  69. 1 1
      pkg/k8s/informers/kilo/v1alpha1/peer.go
  70. 1 1
      pkg/k8s/listers/kilo/v1alpha1/expansion_generated.go
  71. 1 1
      pkg/k8s/listers/kilo/v1alpha1/peer.go
  72. 6 6
      pkg/mesh/cni.go
  73. 45 45
      pkg/mesh/mesh.go
  74. 6 6
      pkg/mesh/topology.go
  75. 2 2
      pkg/route/route.go
  76. 0 8
      vendor/github.com/cespare/xxhash/v2/.travis.yml
  77. 21 16
      vendor/github.com/cespare/xxhash/v2/README.md
  78. 10 0
      vendor/github.com/cespare/xxhash/v2/testall.sh
  79. 20 28
      vendor/github.com/cespare/xxhash/v2/xxhash.go
  80. 165 171
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
  81. 183 0
      vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
  82. 2 0
      vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
  83. 11 11
      vendor/github.com/cespare/xxhash/v2/xxhash_other.go
  84. 1 0
      vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
  85. 33 21
      vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
  86. 71 0
      vendor/github.com/emicklei/go-restful/v3/.gitignore
  87. 1 0
      vendor/github.com/emicklei/go-restful/v3/.goconvey
  88. 13 0
      vendor/github.com/emicklei/go-restful/v3/.travis.yml
  89. 396 0
      vendor/github.com/emicklei/go-restful/v3/CHANGES.md
  90. 22 0
      vendor/github.com/emicklei/go-restful/v3/LICENSE
  91. 8 0
      vendor/github.com/emicklei/go-restful/v3/Makefile
  92. 112 0
      vendor/github.com/emicklei/go-restful/v3/README.md
  93. 13 0
      vendor/github.com/emicklei/go-restful/v3/SECURITY.md
  94. 1 0
      vendor/github.com/emicklei/go-restful/v3/Srcfile
  95. 10 0
      vendor/github.com/emicklei/go-restful/v3/bench_test.sh
  96. 127 0
      vendor/github.com/emicklei/go-restful/v3/compress.go
  97. 103 0
      vendor/github.com/emicklei/go-restful/v3/compressor_cache.go
  98. 91 0
      vendor/github.com/emicklei/go-restful/v3/compressor_pools.go
  99. 54 0
      vendor/github.com/emicklei/go-restful/v3/compressors.go
  100. 32 0
      vendor/github.com/emicklei/go-restful/v3/constants.go

+ 7 - 1
.dockerignore

@@ -1,3 +1,9 @@
 **
 
-!/bin/linux
+!/flake.lock
+!/flake.nix
+!/cmd
+!/pkg
+!/go.mod
+!/go.sum
+!/vendor

+ 1 - 0
.envrc

@@ -0,0 +1 @@
+use flake

+ 4 - 0
.github/actionlint.yaml

@@ -0,0 +1,4 @@
+self-hosted-runner:
+  labels:
+    - nscloud-*
+    - namespace-*

+ 19 - 0
.github/dependabot.yml

@@ -0,0 +1,19 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+
+version: 2
+updates:
+  - package-ecosystem: "docker"
+    directory: "/"
+    schedule:
+      interval: "weekly"
+  - package-ecosystem: "github-actions"
+    directory: "/"
+    schedule:
+      interval: "weekly"
+  - package-ecosystem: "gomod"
+    directory: "/"
+    schedule:
+      interval: "weekly"

+ 136 - 109
.github/workflows/ci.yml

@@ -2,163 +2,190 @@ name: CI
 
 on:
   push:
-    branches: [ main ]
+    branches: [main]
     tags:
       - "*"
   pull_request:
   schedule:
-  - cron:  '0 0 * * *'
+    - cron: "0 0 * * *"
   workflow_dispatch:
 
-jobs:
+env:
+  IMAGE_NAME: ${{ github.repository }}
 
+jobs:
   vendor:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Vendor
-      run: |
-        make vendor
-        git diff --exit-code
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: |
+          go mod tidy
+          go mod vendor
+          git diff --exit-code
 
   build:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build
-      run: make
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix build
 
   docs:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build docs
-      run: |
-        make gen-docs
-        git diff --exit-code
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - name: Build docs
+        run: |
+          nix develop . --command make gen-docs
+          git diff --exit-code
 
   linux:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build kg and kgctl for all Linux Architectures
-      run: make all-build
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix build .#kilo-cross-linux-amd64 .#kilo-cross-linux-arm64 .#kilo-cross-linux-arm
 
   darwin:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build kgctl for Darwin amd64
-      run: make OS=darwin ARCH=amd64
-    - name: Build kgctl for Darwin arm64
-      run: make OS=darwin ARCH=arm64
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix build .#kgctl-cross-darwin-amd64 .#kgctl-cross-darwin-arm64
 
   windows:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build kgctl for Windows
-      run: make OS=windows
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix build .#kgctl-cross-windows-amd64
 
   unit:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Run Unit Tests
-      run: make unit
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix develop . --command go test -mod=vendor --race ./...
 
   e2e:
-    runs-on: ubuntu-latest
+    runs-on:
+      - nscloud-ubuntu-22.04-amd64-8x16-with-features
+      - namespace-features:kernel.release-channel=bleeding-edge
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Run e2e Tests
-      run: make e2e
+      - uses: actions/checkout@v6
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+      - name: Build
+        uses: docker/build-push-action@v6
+        with:
+          context: .
+          platforms: linux/amd64
+          tags: squat/kilo:test
+          cache-from: type=gha
+          cache-to: type=gha,mode=max
+          load: "true"
+          build-args: |
+            VERSION=${{ github.sha }}
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - env:
+          E2E_SKIP_TEARDOWN_ON_FAILURE: "true"
+        run: nix develop . --command make e2e
+      - name: Breakpoint if tests failed
+        if: failure()
+        uses: namespacelabs/breakpoint-action@v0
+        with:
+          duration: 15m
+          authorized-users: squat, leonnicolas
 
   lint:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Lint Code
-      run: make lint
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: nix flake check -L --show-trace
 
   container:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Container
-      run: make container
+      - uses: actions/checkout@v6
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+      - name: Extract Docker metadata
+        id: meta
+        uses: docker/metadata-action@v5
+        with:
+          images: ghcr.io/${{ env.IMAGE_NAME }},docker.io/${{ env.IMAGE_NAME }}
+          tags: type=sha,prefix=
+          flavor: latest=true
+      - name: Build
+        uses: docker/build-push-action@v6
+        with:
+          context: .
+          platforms: linux/amd64, linux/arm64, linux/arm
+          tags: ${{ steps.meta.outputs.tags }}
+          labels: ${{ steps.meta.outputs.labels }}
+          cache-from: type=gha
+          cache-to: type=gha,mode=max
+          build-args: |
+            VERSION=${{ github.sha }}
 
   push:
     if: github.event_name != 'pull_request'
     needs:
-    - vendor
-    - build
-    - linux
-    - darwin
-    - windows
-    - unit
-    - lint
-    - container
+      - vendor
+      - build
+      - linux
+      - darwin
+      - windows
+      - unit
+      - lint
+      - container
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Set up QEMU
-      uses: docker/setup-qemu-action@v2
-      with:
-        platforms: all
-    - name: Login to DockerHub
-      if: github.event_name != 'pull_request'
-      uses: docker/login-action@v2
-      with:
-        username: ${{ secrets.DOCKER_USERNAME }}
-        password: ${{ secrets.DOCKER_PASSWORD }}
-    - name: Build and push
-      if: github.event_name != 'pull_request'
-      run: make manifest
-    - name: Build and push latest
-      if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
-      run: make manifest-latest
+      - uses: actions/checkout@v6
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+      - name: Login to GitHub Container Registry
+        uses: docker/login-action@v3
+        with:
+          registry: ghcr.io
+          username: ${{ github.repository_owner }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+      - name: Login to Docker Hub
+        uses: docker/login-action@v3
+        with:
+          username: ${{ secrets.DOCKER_USERNAME }}
+          password: ${{ secrets.DOCKER_PASSWORD }}
+      - name: Extract Docker metadata
+        id: meta
+        uses: docker/metadata-action@v5
+        with:
+          images: ghcr.io/${{ env.IMAGE_NAME }},docker.io/${{ env.IMAGE_NAME }}
+          tags: type=sha,prefix=
+          flavor: latest=true
+      - name: Build and push
+        id: push
+        uses: docker/build-push-action@v6
+        with:
+          context: .
+          push: ${{ github.event_name != 'pull_request' && github.ref == 'refs/heads/main' }}
+          platforms: linux/amd64, linux/arm64, linux/arm
+          tags: ${{ steps.meta.outputs.tags }}
+          labels: ${{ steps.meta.outputs.labels }}
+          cache-from: type=gha
+          cache-to: type=gha,mode=max
+          build-args: |
+            VERSION=${{ github.sha }}
+      - name: Determine digest
+        run: echo ${{ steps.push.outputs.digest }}

+ 14 - 13
.github/workflows/release.yaml

@@ -6,16 +6,17 @@ jobs:
   kgctl:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
-    - name: Set up Go
-      uses: actions/setup-go@v4
-      with:
-        go-version: 1.19
-    - name: Build kgctl Binaries to Be Released
-      run: make release
-    - name: Publish Release
-      uses: skx/github-action-publish-binaries@master
-      env:
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-      with:
-        args: 'bin/release/kgctl-*'
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/magic-nix-cache-action@v13
+      - run: |
+          nix build .#kgctl-cross-linux-amd64 .#kgctl-cross-linux-arm64 .#kgctl-cross-linux-arm .#kgctl-cross-darwin-amd64 .#kgctl-cross-darwin-arm64 .#kgctl-cross-windows-amd64
+          for result in $(find -L . -name 'kgctl*' | grep result); do
+            cp "$result" "$(echo "$result" | sed 's|.*bin/\(.\+\)_\(.\+\)/kgctl\(.*\)|kgctl-\1-\2\3|g' | sed 's|.*bin/kgctl|kgctl-amd64|g')"
+          done
+      - name: Publish Release
+        uses: skx/github-action-publish-binaries@master
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          args: "kgctl-*"

+ 27 - 0
.github/workflows/update.yaml

@@ -0,0 +1,27 @@
+name: "Flake.lock: update Nix dependencies"
+
+on:
+  workflow_dispatch: # allows manual triggering
+  schedule:
+    - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00
+
+jobs:
+  nix-flake-update:
+    permissions:
+      contents: write
+      id-token: write
+      issues: write
+      pull-requests: write
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v6
+      - uses: DeterminateSystems/determinate-nix-action@v3.15.1
+      - uses: DeterminateSystems/update-flake-lock@v28
+        with:
+          pr-title: Update Nix flake inputs
+          pr-labels: |
+            dependencies
+            automated
+          sign-commits: true
+          gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
+          token: ${{ secrets.GH_TOKEN_FOR_FLAKE_LOCK_UPDATES }}

+ 5 - 6
.gitignore

@@ -1,7 +1,6 @@
-.cache/
-.container*
-.manifest*
-.push*
-bin/
-tmp/
 e2e/kind.yaml*
+.direnv/
+/.pre-commit-config.yaml
+/result*
+/e2e-*/
+/help.txt

+ 26 - 9
Dockerfile

@@ -1,13 +1,30 @@
-ARG FROM=alpine
-FROM $FROM AS cni
-ARG GOARCH=amd64
-ARG CNI_PLUGINS_VERSION=v1.1.1
+FROM --platform=$BUILDPLATFORM docker.io/nixos/nix:2.33.0 AS builder
+
+COPY . /tmp/build
+WORKDIR /tmp/build
+
+ARG BUILDOS
+ARG BUILDARCH
+ARG TARGETOS
+ARG TARGETARCH
+ARG VERSION
+
+RUN VERSION="$VERSION" nix \
+    --extra-experimental-features "nix-command flakes" \
+    --option filter-syscalls false \
+    build --impure ".#kilo-cross-$TARGETOS-$TARGETARCH"
+RUN ln -s ../bin result/bin/"$BUILDOS"_"$BUILDARCH"
+
+FROM alpine:3.20 AS cni
+ARG TARGETARCH
+ARG CNI_PLUGINS_VERSION=v1.9.0
 RUN apk add --no-cache curl && \
-    curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-$GOARCH-$CNI_PLUGINS_VERSION.tgz && \
+    curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-$TARGETARCH-$CNI_PLUGINS_VERSION.tgz && \
     tar -xf cni.tar.gz
 
-FROM $FROM
-ARG GOARCH
+FROM alpine:3.20
+ARG TARGETOS
+ARG TARGETARCH
 ARG ALPINE_VERSION=v3.20
 LABEL maintainer="squat <lserven@gmail.com>"
 RUN echo -e "https://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/main\nhttps://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/community" > /etc/apk/repositories && \
@@ -15,6 +32,6 @@ RUN echo -e "https://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/main\nh
 COPY --from=cni bridge host-local loopback portmap /opt/cni/bin/
 ADD https://raw.githubusercontent.com/kubernetes-sigs/iptables-wrappers/e139a115350974aac8a82ec4b815d2845f86997e/iptables-wrapper-installer.sh /
 RUN chmod 700 /iptables-wrapper-installer.sh && /iptables-wrapper-installer.sh --no-sanity-check
-COPY bin/linux/$GOARCH/kg /opt/bin/
-COPY bin/linux/$GOARCH/kgctl /opt/bin/
+COPY --from=builder /tmp/build/result/bin/"$TARGETOS"_"$TARGETARCH"/kg /opt/bin/kg
+COPY --from=builder /tmp/build/result/bin/"$TARGETOS"_"$TARGETARCH"/kgctl /opt/bin/kgctl
 ENTRYPOINT ["/opt/bin/kg"]

+ 21 - 275
Makefile

@@ -1,91 +1,22 @@
-export GO111MODULE=on
-.PHONY: push container clean container-name container-latest push-latest fmt lint test unit vendor header generate crd client deepcopy informer lister manifest manfest-latest manifest-annotate manifest manfest-latest manifest-annotate release gen-docs e2e
+.PHONY: fmt lint test unit generate crd client deepcopy informer lister gen-docs e2e
 
-OS ?= $(shell go env GOOS)
-ARCH ?= $(shell go env GOARCH)
-ALL_ARCH := amd64 arm arm64
-DOCKER_ARCH := "amd64" "arm v7" "arm64 v8"
-ifeq ($(OS),linux)
-    BINS := bin/$(OS)/$(ARCH)/kg bin/$(OS)/$(ARCH)/kgctl
-else
-    BINS := bin/$(OS)/$(ARCH)/kgctl
-endif
-RELEASE_BINS := $(addprefix bin/release/kgctl-, $(addprefix linux-, $(ALL_ARCH)) darwin-amd64 darwin-arm64 windows-amd64)
 PROJECT := kilo
 PKG := github.com/squat/$(PROJECT)
-REGISTRY ?= index.docker.io
-IMAGE ?= squat/$(PROJECT)
-FULLY_QUALIFIED_IMAGE := $(REGISTRY)/$(IMAGE)
-
-TAG := $(shell git describe --abbrev=0 --tags HEAD 2>/dev/null)
-COMMIT := $(shell git rev-parse HEAD)
-VERSION := $(COMMIT)
-ifneq ($(TAG),)
-    ifeq ($(COMMIT), $(shell git rev-list -n1 $(TAG)))
-        VERSION := $(TAG)
-    endif
-endif
-DIRTY := $(shell test -z "$$(git diff --shortstat 2>/dev/null)" || echo -dirty)
-VERSION := $(VERSION)$(DIRTY)
-LD_FLAGS := -ldflags '-X $(PKG)/pkg/version.Version=$(VERSION)'
-SRC := $(shell find . -type f -name '*.go' -not -path "./vendor/*")
 GO_FILES ?= $$(find . -name '*.go' -not -path './vendor/*')
 GO_PKGS ?= $$(go list ./... | grep -v "$(PKG)/vendor")
 
-CONTROLLER_GEN_BINARY := bin/controller-gen
-CLIENT_GEN_BINARY := bin/client-gen
-DOCS_GEN_BINARY := bin/docs-gen
-DEEPCOPY_GEN_BINARY := bin/deepcopy-gen
-INFORMER_GEN_BINARY := bin/informer-gen
-LISTER_GEN_BINARY := bin/lister-gen
-STATICCHECK_BINARY := bin/staticcheck
-EMBEDMD_BINARY := bin/embedmd
-KIND_BINARY := $(shell pwd)/bin/kind
-KUBECTL_BINARY := $(shell pwd)/bin/kubectl
-BASH_UNIT := $(shell pwd)/bin/bash_unit
-BASH_UNIT_FLAGS :=
-
-BUILD_IMAGE ?= golang:1.19.0
-BASE_IMAGE ?= alpine:3.20
-
-build: $(BINS)
-
-build-%:
-	@$(MAKE) --no-print-directory OS=$(word 1,$(subst -, ,$*)) ARCH=$(word 2,$(subst -, ,$*)) build
-
-container-latest-%:
-	@$(MAKE) --no-print-directory ARCH=$* container-latest
-
-container-%:
-	@$(MAKE) --no-print-directory ARCH=$* container
-
-push-latest-%:
-	@$(MAKE) --no-print-directory ARCH=$* push-latest
-
-push-%:
-	@$(MAKE) --no-print-directory ARCH=$* push
-
-all-build: $(addprefix build-$(OS)-, $(ALL_ARCH))
-
-all-container: $(addprefix container-, $(ALL_ARCH))
-
-all-push: $(addprefix push-, $(ALL_ARCH))
-
-all-container-latest: $(addprefix container-latest-, $(ALL_ARCH))
-
-all-push-latest: $(addprefix push-latest-, $(ALL_ARCH))
-
 generate: client deepcopy informer lister crd
 
 crd: manifests/crds.yaml
-manifests/crds.yaml: pkg/k8s/apis/kilo/v1alpha1/types.go $(CONTROLLER_GEN_BINARY)
-	$(CONTROLLER_GEN_BINARY) crd \
+manifests/crds.yaml: pkg/k8s/apis/kilo/v1alpha1/types.go
+	go tool controller-gen crd \
 	paths=./pkg/k8s/apis/kilo/... \
 	output:crd:stdout > $@
+	yamlfmt --formatter indentless_arrays=true manifests/crds.yaml
 
 client: pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go
-pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(CLIENT_GEN_BINARY)
-	$(CLIENT_GEN_BINARY) \
+pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go
+	go tool client-gen \
 	--clientset-name versioned \
 	--input-base "" \
 	--input $(PKG)/pkg/k8s/apis/kilo/v1alpha1 \
@@ -99,8 +30,8 @@ pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/ki
 	go fmt ./pkg/k8s/clientset/...
 
 deepcopy: pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go
-pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(DEEPCOPY_GEN_BINARY)
-	$(DEEPCOPY_GEN_BINARY) \
+pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go
+	go tool deepcopy-gen \
 	--input-dirs ./$(@D) \
 	--go-header-file=.header \
 	--logtostderr \
@@ -111,8 +42,8 @@ pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go: .header pkg/k8s/apis/kilo/v
 	go fmt $@
 
 informer: pkg/k8s/informers/kilo/v1alpha1/peer.go
-pkg/k8s/informers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(INFORMER_GEN_BINARY)
-	$(INFORMER_GEN_BINARY) \
+pkg/k8s/informers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go
+	go tool informer-gen \
 	--input-dirs $(PKG)/pkg/k8s/apis/kilo/v1alpha1 \
 	--go-header-file=.header \
 	--logtostderr \
@@ -127,8 +58,8 @@ pkg/k8s/informers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/type
 	go fmt ./pkg/k8s/informers/...
 
 lister: pkg/k8s/listers/kilo/v1alpha1/peer.go
-pkg/k8s/listers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(LISTER_GEN_BINARY)
-	$(LISTER_GEN_BINARY) \
+pkg/k8s/listers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go
+	go tool lister-gen \
 	--input-dirs $(PKG)/pkg/k8s/apis/kilo/v1alpha1 \
 	--go-header-file=.header \
 	--logtostderr \
@@ -140,99 +71,27 @@ pkg/k8s/listers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.
 	go fmt ./pkg/k8s/listers/...
 
 gen-docs: generate docs/api.md docs/kg.md
-docs/api.md: pkg/k8s/apis/kilo/v1alpha1/types.go $(DOCS_GEN_BINARY)
-	$(DOCS_GEN_BINARY) $< > $@
-
-$(BINS): $(SRC) go.mod
-	@mkdir -p bin/$(word 2,$(subst /, ,$@))/$(word 3,$(subst /, ,$@))
-	@echo "building: $@"
-	@docker run --rm \
-	    -u $$(id -u):$$(id -g) \
-	    -v $$(pwd):/$(PROJECT) \
-	    -w /$(PROJECT) \
-	    $(BUILD_IMAGE) \
-	    /bin/sh -c " \
-	        GOARCH=$(word 3,$(subst /, ,$@)) \
-	        GOOS=$(word 2,$(subst /, ,$@)) \
-	        GOCACHE=/$(PROJECT)/.cache \
-		CGO_ENABLED=0 \
-		go build -mod=vendor -o $@ \
-		    $(LD_FLAGS) \
-		    ./cmd/$(@F)/... \
-	    "
+docs/api.md: pkg/k8s/apis/kilo/v1alpha1/types.go
+	go run ./cmd/docs-gen/... $< > $@
 
 fmt:
 	@echo $(GO_PKGS)
 	gofmt -w -s $(GO_FILES)
 
-lint: header $(STATICCHECK_BINARY)
-	@echo 'go vet $(GO_PKGS)'
-	@vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \
-		echo ""; \
-		echo "Go vet found issues. Please check the reported issues"; \
-		echo "and fix them if necessary before submitting the code for review:"; \
-		echo "$$vet_res"; \
-		exit 1; \
-	fi
-	@echo '$(STATICCHECK_BINARY) $(GO_PKGS)'
-	@lint_res=$$($(STATICCHECK_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
-		echo ""; \
-		echo "Staticcheck found style issues. Please check the reported issues"; \
-		echo "and fix them if necessary before submitting the code for review:"; \
-		echo "$$lint_res"; \
-		exit 1; \
-	fi
-	@echo 'gofmt -d -s $(GO_FILES)'
-	@fmt_res=$$(gofmt -d -s $(GO_FILES)); if [ -n "$$fmt_res" ]; then \
-		echo ""; \
-		echo "Gofmt found style issues. Please check the reported issues"; \
-		echo "and fix them if necessary before submitting the code for review:"; \
-		echo "$$fmt_res"; \
-		exit 1; \
-	fi
+lint:
+	pre-commit run --all
 
 unit:
 	go test -mod=vendor --race ./...
 
 test: lint unit e2e
 
-$(KIND_BINARY):
-	curl -Lo $@ https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-$(ARCH)
-	chmod +x $@
-
-$(KUBECTL_BINARY):
-	curl -Lo $@ https://dl.k8s.io/release/v1.21.0/bin/linux/$(ARCH)/kubectl
-	chmod +x $@
-
-$(BASH_UNIT):
-	curl -Lo $@ https://raw.githubusercontent.com/pgrange/bash_unit/v1.7.2/bash_unit
-	chmod +x $@
+e2e:
+	KILO_IMAGE=squat/kilo:test bash_unit $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/kgctl.sh ./e2e/teardown.sh
 
-e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl
-	KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/kgctl.sh ./e2e/teardown.sh
-
-header: .header
-	@HEADER=$$(cat .header); \
-	HEADER_LEN=$$(wc -l .header | awk '{print $$1}'); \
-	FILES=; \
-	for f in $(GO_FILES); do \
-		for i in 0 1 2 3 4 5; do \
-			FILE=$$(t=$$(mktemp) && tail -n +$$i $$f > $$t && head -n $$HEADER_LEN $$t | sed "s/[0-9]\{4\}/YEAR/"); \
-			[ "$$FILE" = "$$HEADER" ] && continue 2; \
-		done; \
-		FILES="$$FILES$$f "; \
-	done; \
-	if [ -n "$$FILES" ]; then \
-		printf 'the following files are missing the license header: %s\n' "$$FILES"; \
-		exit 1; \
-	fi
-
-tmp/help.txt: bin/$(OS)/$(ARCH)/kg
-	mkdir -p tmp
-	bin//$(OS)/$(ARCH)/kg --help 2>&1 | head -n -1 > $@
-
-docs/kg.md: $(EMBEDMD_BINARY) tmp/help.txt
-	$(EMBEDMD_BINARY) -w $@
+docs/kg.md:
+	go run ./cmd/kg/... --help | head -n -2 > help.txt
+	go tool embedmd -w docs/kg.md
 
 website/docs/README.md: README.md
 	rm -rf website/static/img/graphs
@@ -250,116 +109,3 @@ website/docs/README.md: README.md
 website/build/index.html: website/docs/README.md docs/api.md
 	yarn --cwd website install
 	yarn --cwd website build
-
-container: .container-$(ARCH)-$(VERSION) container-name
-.container-$(ARCH)-$(VERSION): bin/linux/$(ARCH)/kg bin/linux/$(ARCH)/kgctl Dockerfile
-	@i=0; for a in $(ALL_ARCH); do [ "$$a" = $(ARCH) ] && break; i=$$((i+1)); done; \
-	ia=""; iv=""; \
-	j=0; for a in $(DOCKER_ARCH); do \
-	    [ "$$i" -eq "$$j" ] && ia=$$(echo "$$a" | awk '{print $$1}') && iv=$$(echo "$$a" | awk '{print $$2}') && break; j=$$((j+1)); \
-	done; \
-	SHA=$$(docker manifest inspect $(BASE_IMAGE) | jq '.manifests[] | select(.platform.architecture == "'$$ia'") | if .platform | has("variant") then select(.platform.variant == "'$$iv'") else . end | .digest' -r); \
-	docker build -t $(IMAGE):$(ARCH)-$(VERSION) --build-arg FROM=$(BASE_IMAGE)@$$SHA --build-arg GOARCH=$(ARCH) .
-	@docker images -q $(IMAGE):$(ARCH)-$(VERSION) > $@
-
-container-latest: .container-$(ARCH)-$(VERSION)
-	@docker tag $(IMAGE):$(ARCH)-$(VERSION) $(FULLY_QUALIFIED_IMAGE):$(ARCH)-latest
-	@echo "container: $(IMAGE):$(ARCH)-latest"
-
-container-name:
-	@echo "container: $(IMAGE):$(ARCH)-$(VERSION)"
-
-manifest: .manifest-$(VERSION) manifest-name
-.manifest-$(VERSION): Dockerfile $(addprefix push-, $(ALL_ARCH))
-	@docker manifest create --amend $(FULLY_QUALIFIED_IMAGE):$(VERSION) $(addsuffix -$(VERSION), $(addprefix $(FULLY_QUALIFIED_IMAGE):, $(ALL_ARCH)))
-	@$(MAKE) --no-print-directory manifest-annotate-$(VERSION)
-	@docker manifest push $(FULLY_QUALIFIED_IMAGE):$(VERSION) > $@
-
-manifest-latest: Dockerfile $(addprefix push-latest-, $(ALL_ARCH))
-	@docker manifest rm $(FULLY_QUALIFIED_IMAGE):latest || echo no old manifest
-	@docker manifest create --amend $(FULLY_QUALIFIED_IMAGE):latest $(addsuffix -latest, $(addprefix $(FULLY_QUALIFIED_IMAGE):, $(ALL_ARCH)))
-	@$(MAKE) --no-print-directory manifest-annotate-latest
-	@docker manifest push $(FULLY_QUALIFIED_IMAGE):latest
-	@echo "manifest: $(IMAGE):latest"
-
-manifest-annotate: manifest-annotate-$(VERSION)
-
-manifest-annotate-%:
-	@i=0; \
-	for a in $(ALL_ARCH); do \
-	    annotate=; \
-	    j=0; for da in $(DOCKER_ARCH); do \
-		if [ "$$j" -eq "$$i" ] && [ -n "$$da" ]; then \
-		    annotate="docker manifest annotate $(FULLY_QUALIFIED_IMAGE):$* $(FULLY_QUALIFIED_IMAGE):$$a-$* --os linux --arch"; \
-		    k=0; for ea in $$da; do \
-			[ "$$k" = 0 ] && annotate="$$annotate $$ea"; \
-			[ "$$k" != 0 ] && annotate="$$annotate --variant $$ea"; \
-			k=$$((k+1)); \
-		    done; \
-		    $$annotate; \
-		fi; \
-		j=$$((j+1)); \
-	    done; \
-	    i=$$((i+1)); \
-	done
-
-manifest-name:
-	@echo "manifest: $(IMAGE):$(VERSION)"
-
-push: .push-$(ARCH)-$(VERSION) push-name
-.push-$(ARCH)-$(VERSION): .container-$(ARCH)-$(VERSION)
-ifneq ($(REGISTRY),index.docker.io)
-	@docker tag $(IMAGE):$(ARCH)-$(VERSION) $(FULLY_QUALIFIED_IMAGE):$(ARCH)-$(VERSION)
-endif
-	@docker push $(FULLY_QUALIFIED_IMAGE):$(ARCH)-$(VERSION)
-	@docker images -q $(IMAGE):$(ARCH)-$(VERSION) > $@
-
-push-latest: container-latest
-	@docker push $(FULLY_QUALIFIED_IMAGE):$(ARCH)-latest
-	@echo "pushed: $(IMAGE):$(ARCH)-latest"
-
-push-name:
-	@echo "pushed: $(IMAGE):$(ARCH)-$(VERSION)"
-
-release: $(RELEASE_BINS)
-$(RELEASE_BINS):
-	@make OS=$(word 2,$(subst -, ,$(@F))) ARCH=$(word 3,$(subst -, ,$(@F)))
-	mkdir -p $(@D)
-	cp bin/$(word 2,$(subst -, ,$(@F)))/$(word 3,$(subst -, ,$(@F)))/kgctl $@
-
-clean: container-clean bin-clean
-	rm -rf .cache
-
-container-clean:
-	rm -rf .container-* .manifest-* .push-*
-
-bin-clean:
-	rm -rf bin
-
-vendor:
-	go mod tidy
-	go mod vendor
-
-$(CONTROLLER_GEN_BINARY):
-	go build -mod=vendor -o $@ sigs.k8s.io/controller-tools/cmd/controller-gen
-
-$(CLIENT_GEN_BINARY):
-	go build -mod=vendor -o $@ k8s.io/code-generator/cmd/client-gen
-
-$(DEEPCOPY_GEN_BINARY):
-	go build -mod=vendor -o $@ k8s.io/code-generator/cmd/deepcopy-gen
-
-$(INFORMER_GEN_BINARY):
-	go build -mod=vendor -o $@ k8s.io/code-generator/cmd/informer-gen
-
-$(LISTER_GEN_BINARY):
-	go build -mod=vendor -o $@ k8s.io/code-generator/cmd/lister-gen
-
-$(DOCS_GEN_BINARY): cmd/docs-gen/main.go
-	go build -mod=vendor -o $@ ./cmd/docs-gen
-
-$(STATICCHECK_BINARY):
-	go build -mod=vendor -o $@ honnef.co/go/tools/cmd/staticcheck
-
-$(EMBEDMD_BINARY):
-	go build -mod=vendor -o $@ github.com/campoy/embedmd

+ 9 - 9
cmd/docs-gen/main.go

@@ -47,7 +47,7 @@ var (
 
 func toSectionLink(name string) string {
 	name = strings.ToLower(name)
-	name = strings.Replace(name, " ", "-", -1)
+	name = strings.ReplaceAll(name, " ", "-")
 	return name
 }
 
@@ -120,7 +120,7 @@ func parseDocumentationFrom(srcs []string) []KubeTypes {
 				for _, field := range structType.Fields.List {
 					// Skip fields that are not tagged.
 					if field.Tag == nil {
-						os.Stderr.WriteString(fmt.Sprintf("Tag is nil, skipping field: %v of type %v\n", field, field.Type))
+						_, _ = fmt.Fprintf(os.Stderr, "Tag is nil, skipping field: %v of type %v\n", field, field.Type)
 						continue
 					}
 					// Treat inlined fields separately as we don't want the original types to appear in the doc.
@@ -158,7 +158,7 @@ func astFrom(filePath string) *doc.Package {
 	}
 
 	m[filePath] = f
-	apkg, _ := ast.NewPackage(fset, m, nil, nil)
+	apkg, _ := ast.NewPackage(fset, m, nil, nil) //nolint:all
 
 	return doc.New(apkg, "", 0)
 }
@@ -174,7 +174,7 @@ func fmtRawDoc(rawDoc string) string {
 	// Ignore all lines after ---
 	rawDoc = strings.Split(rawDoc, "---")[0]
 
-	for _, line := range strings.Split(rawDoc, "\n") {
+	for line := range strings.SplitSeq(rawDoc, "\n") {
 		line = strings.TrimRight(line, " ")
 		leading := strings.TrimLeft(line, " ")
 		switch {
@@ -195,11 +195,11 @@ func fmtRawDoc(rawDoc string) string {
 	}
 
 	postDoc := strings.TrimRight(buffer.String(), "\n")
-	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
-	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
-	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
-	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
-	postDoc = strings.Replace(postDoc, "|", "\\|", -1)
+	postDoc = strings.ReplaceAll(postDoc, "\\\"", "\"") // replace user's \" to "
+	postDoc = strings.ReplaceAll(postDoc, "\"", "\\\"") // Escape "
+	postDoc = strings.ReplaceAll(postDoc, "\n", "\\n")
+	postDoc = strings.ReplaceAll(postDoc, "\t", "\\t")
+	postDoc = strings.ReplaceAll(postDoc, "|", "\\|")
 
 	return postDoc
 }

+ 11 - 2
cmd/kg/handlers.go

@@ -74,6 +74,7 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	dot, err := topo.Dot()
 	if err != nil {
 		http.Error(w, fmt.Sprintf("failed to generate graph: %v", err), http.StatusInternalServerError)
+		return
 	}
 
 	buf := bytes.NewBufferString(dot)
@@ -85,7 +86,11 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	case "dot", "gv":
 		// If the raw dot data is requested, return it as string.
 		// This allows client-side rendering rather than server-side.
-		w.Write(buf.Bytes())
+		_, err = w.Write(buf.Bytes())
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to generate graph: %v", err), http.StatusInternalServerError)
+			return
+		}
 		return
 
 	case "svg", "png", "bmp", "fig", "gif", "json", "ps":
@@ -140,7 +145,11 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	w.Header().Add("content-type", mimeType)
-	w.Write(output)
+	_, err = w.Write(output)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
 }
 
 func healthHandler(w http.ResponseWriter, _ *http.Request) {

+ 3 - 3
cmd/kg/main.go

@@ -292,7 +292,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 			}
 			return nil
 		}, func(error) {
-			l.Close()
+			_ = l.Close()
 		})
 	}
 
@@ -300,7 +300,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 		ctx, cancel := context.WithCancel(context.Background())
 		// Start the mesh.
 		g.Add(func() error {
-			logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
+			_ = logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
 			if err := m.Run(ctx); err != nil {
 				return fmt.Errorf("error: Kilo exited unexpectedly: %v", err)
 			}
@@ -319,7 +319,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 			for {
 				select {
 				case <-term:
-					logger.Log("msg", "caught interrupt; gracefully cleaning up; see you next time!")
+					_ = logger.Log("msg", "caught interrupt; gracefully cleaning up; see you next time!")
 					return nil
 				case <-cancel:
 					return nil

+ 24 - 24
cmd/kg/webhook.go

@@ -91,11 +91,11 @@ var (
 )
 
 func validationHandler(w http.ResponseWriter, r *http.Request) {
-	level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
+	_ = level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
 	body, err := io.ReadAll(r.Body)
 	if err != nil {
 		errorCounter.Inc()
-		level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
+		_ = level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
 		http.Error(w, err.Error(), http.StatusBadRequest)
 		return
 	}
@@ -106,7 +106,7 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if contentType != "application/json" {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("received Content-Type=%s, expected application/json", contentType)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
@@ -117,14 +117,14 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("Request could not be decoded: %v", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
 	if *gvk != v1.SchemeGroupVersion.WithKind("AdmissionReview") {
 		errorCounter.Inc()
 		msg := "only API v1 is supported"
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
@@ -139,17 +139,17 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err := json.Unmarshal(rawExtension.Raw, &peer); err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("could not unmarshal extension to peer spec: %v:", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
 
 	if err := peer.Validate(); err == nil {
-		level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
+		_ = level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.Name)
 		validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "allowed"}).Inc()
 		response.Response.Allowed = true
 	} else {
-		level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
+		_ = level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.Name)
 		validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "denied"}).Inc()
 		response.Response.Result = &metav1.Status{
 			Message: err.Error(),
@@ -160,14 +160,14 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("failed to marshal response: %v", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusInternalServerError)
 		return
 	}
 
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write(res); err != nil {
-		level.Error(logger).Log("err", err, "msg", "failed to write response")
+		_ = level.Error(logger).Log("err", err, "msg", "failed to write response")
 	}
 }
 
@@ -204,27 +204,27 @@ func webhook(_ *cobra.Command, _ []string) error {
 
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
+				_ = level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
 				err := msrv.ListenAndServe()
-				level.Info(logger).Log("msg", "metrics server exited", "err", err)
+				_ = level.Info(logger).Log("msg", "metrics server exited", "err", err)
 				return err
 
 			},
 			func(err error) {
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
-				level.Info(logger).Log("msg", "shutting down metrics server gracefully")
+				_ = level.Info(logger).Log("msg", "shutting down metrics server gracefully")
 				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
 				defer func() {
 					cancel()
 				}()
 				if err := msrv.Shutdown(ctx); err != nil {
-					level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
-					msrv.Close()
+					_ = level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
+					_ = msrv.Close()
 				}
 			},
 		)
@@ -239,26 +239,26 @@ func webhook(_ *cobra.Command, _ []string) error {
 		}
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
+				_ = level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
 				err := srv.ListenAndServeTLS(certPath, keyPath)
-				level.Info(logger).Log("msg", "webhook server exited", "err", err)
+				_ = level.Info(logger).Log("msg", "webhook server exited", "err", err)
 				return err
 			},
 			func(err error) {
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
-				level.Info(logger).Log("msg", "shutting down webhook server gracefully")
+				_ = level.Info(logger).Log("msg", "shutting down webhook server gracefully")
 				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
 				defer func() {
 					cancel()
 				}()
 				if err := srv.Shutdown(ctx); err != nil {
-					level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
-					srv.Close()
+					_ = level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
+					_ = srv.Close()
 				}
 			},
 		)

+ 23 - 31
cmd/kgctl/connect_linux.go

@@ -75,7 +75,8 @@ func connect() *cobra.Command {
 	}
 	cmd.Flags().IPNetVarP(&connectOpts.allowedIP, "allowed-ip", "a", *takeIPNet(net.ParseCIDR("10.10.10.10/32")), "Allowed IP of the peer.")
 	cmd.Flags().StringSliceVar(&allowedIPs, "allowed-ips", []string{}, "Additional allowed IPs of the cluster, e.g. the service CIDR.")
-	cmd.Flags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
+	cmd.Flags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", strings.Join(availableLogLevels, ", ")))
+	_ = cmd.RegisterFlagCompletionFunc("log-level", cobra.FixedCompletions(availableLogLevels, cobra.ShellCompDirectiveNoFileComp))
 	cmd.Flags().StringVar(&connectOpts.privateKey, "private-key", "", "Path to an existing WireGuard private key file.")
 	cmd.Flags().BoolVar(&connectOpts.cleanUp, "clean-up", true, "Should Kilo clean up the routes and interface when it shuts down?")
 	cmd.Flags().UintVar(&connectOpts.mtu, "mtu", uint(1420), "The MTU for the WireGuard interface.")
@@ -83,15 +84,6 @@ func connect() *cobra.Command {
 	cmd.Flags().StringVarP(&connectOpts.interfaceName, "interface", "i", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
 	cmd.Flags().IntVar(&connectOpts.persistentKeepalive, "persistent-keepalive", 10, "How often should WireGuard send keepalives? Setting to 0 will disable sending keepalives.")
 
-	availableLogLevels = strings.Join([]string{
-		logLevelAll,
-		logLevelDebug,
-		logLevelInfo,
-		logLevelWarn,
-		logLevelError,
-		logLevelNone,
-	}, ", ")
-
 	return cmd
 }
 
@@ -114,7 +106,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	case logLevelNone:
 		logger = level.NewFilter(logger, level.AllowNone())
 	default:
-		return fmt.Errorf("log level %s unknown; possible values are: %s", logLevel, availableLogLevels)
+		return fmt.Errorf("log level %s unknown; possible values are: %s", logLevel, strings.Join(availableLogLevels, ", "))
 	}
 	logger = log.With(logger, "ts", log.DefaultTimestampUTC)
 	logger = log.With(logger, "caller", log.DefaultCaller)
@@ -123,7 +115,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if len(args) > 0 {
 		peerName = args[0]
 	} else {
-		level.Debug(logger).Log("msg", "no peer name provided; using hostname")
+		_ = level.Debug(logger).Log("msg", "no peer name provided; using hostname")
 		if peerName, err = os.Hostname(); err != nil {
 			return fmt.Errorf("could not determine hostname: %w", err)
 		}
@@ -154,7 +146,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 		}
 	}
 	publicKey := privateKey.PublicKey()
-	level.Info(logger).Log("msg", "generated public key", "key", publicKey)
+	_ = level.Info(logger).Log("msg", "generated public key", "key", publicKey)
 
 	if _, err := opts.kc.KiloV1alpha1().Peers().Get(ctx, peerName, metav1.GetOptions{}); apierrors.IsNotFound(err) {
 		peer := &v1alpha1.Peer{
@@ -170,15 +162,15 @@ func runConnect(cmd *cobra.Command, args []string) error {
 		if _, err := opts.kc.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{}); err != nil {
 			return fmt.Errorf("failed to create peer: %w", err)
 		}
-		level.Info(logger).Log("msg", "created peer", "peer", peerName)
+		_ = level.Info(logger).Log("msg", "created peer", "peer", peerName)
 		if connectOpts.cleanUp {
 			defer func() {
 				ctxWithTimeout, cancelWithTimeout := context.WithTimeout(context.Background(), 10*time.Second)
 				defer cancelWithTimeout()
 				if err := opts.kc.KiloV1alpha1().Peers().Delete(ctxWithTimeout, peerName, metav1.DeleteOptions{}); err != nil {
-					level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
+					_ = level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
 				} else {
-					level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
+					_ = level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
 				}
 			}()
 		}
@@ -191,7 +183,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if err != nil {
 		return fmt.Errorf("failed to create wg interface: %w", err)
 	}
-	level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
+	_ = level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
 
 	table := route.NewTable()
 	if connectOpts.cleanUp {
@@ -201,7 +193,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if err := iproute.SetAddress(iface, &connectOpts.allowedIP); err != nil {
 		return err
 	}
-	level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
+	_ = level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
 
 	if err := iproute.Set(iface, true); err != nil {
 		return err
@@ -221,7 +213,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 					select {
 					case err, ok := <-errCh:
 						if ok {
-							level.Error(logger).Log("err", err.Error())
+							_ = level.Error(logger).Log("err", err.Error())
 						} else {
 							return nil
 						}
@@ -234,9 +226,9 @@ func runConnect(cmd *cobra.Command, args []string) error {
 				cancel()
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
 			},
 		)
@@ -244,10 +236,10 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	{
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting syncer")
+				_ = level.Info(logger).Log("msg", "starting syncer")
 				for {
 					if err := sync(table, peerName, privateKey, iface, logger); err != nil {
-						level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
+						_ = level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
 					}
 					select {
 					case <-time.After(connectOpts.resyncPeriod):
@@ -259,9 +251,9 @@ func runConnect(cmd *cobra.Command, args []string) error {
 				cancel()
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
 			})
 	}
@@ -276,13 +268,13 @@ func runConnect(cmd *cobra.Command, args []string) error {
 
 func cleanUp(iface int, t *route.Table, logger log.Logger) {
 	if err := iproute.Set(iface, false); err != nil {
-		level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
+		_ = level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
 	}
 	if err := iproute.RemoveInterface(iface); err != nil {
-		level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
+		_ = level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
 	}
 	if err := t.CleanUp(); err != nil {
-		level.Error(logger).Log("failed to clean up routes: %v", err)
+		_ = level.Error(logger).Log("failed to clean up routes: %v", err)
 	}
 }
 
@@ -350,7 +342,7 @@ func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int
 	if err != nil {
 		return err
 	}
-	defer wgClient.Close()
+	defer func() { _ = wgClient.Close() }()
 
 	current, err := wgClient.Device(connectOpts.interfaceName)
 	if err != nil {
@@ -364,9 +356,9 @@ func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int
 		// If the key is empty, then it's the first time we are running
 		// so don't bother printing a diff.
 		if current.PrivateKey != [wgtypes.KeyLen]byte{} {
-			level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
+			_ = level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
 		}
-		level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
+		_ = level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
 		if err := wgClient.ConfigureDevice(connectOpts.interfaceName, conf.WGConfig()); err != nil {
 			return err
 		}

+ 21 - 8
cmd/kgctl/main.go

@@ -43,22 +43,22 @@ const (
 )
 
 var (
-	availableBackends = strings.Join([]string{
+	availableBackends = []string{
 		k8s.Backend,
-	}, ", ")
-	availableGranularities = strings.Join([]string{
+	}
+	availableGranularities = []string{
 		string(mesh.LogicalGranularity),
 		string(mesh.FullGranularity),
 		string(mesh.AutoGranularity),
-	}, ", ")
-	availableLogLevels = strings.Join([]string{
+	}
+	availableLogLevels = []string{
 		logLevelAll,
 		logLevelDebug,
 		logLevelInfo,
 		logLevelWarn,
 		logLevelError,
 		logLevelNone,
-	}, ", ")
+	}
 	opts struct {
 		backend     mesh.Backend
 		granularity mesh.Granularity
@@ -72,6 +72,17 @@ var (
 )
 
 func runRoot(c *cobra.Command, _ []string) error {
+	p := c
+	for {
+		if p.Name() == "completion" || strings.HasPrefix(p.Name(), cobra.ShellCompRequestCmd) {
+			return nil
+		}
+		if !p.HasParent() {
+			break
+		}
+		p = p.Parent()
+	}
+
 	if opts.port < 1 || opts.port > 1<<16-1 {
 		return fmt.Errorf("invalid port: port mus be in range [%d:%d], but got %d", 1, 1<<16-1, opts.port)
 	}
@@ -118,8 +129,10 @@ func main() {
 		Version:           version.Version,
 		SilenceErrors:     true,
 	}
-	cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
-	cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
+	cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", strings.Join(availableBackends, ", ")))
+	_ = cmd.RegisterFlagCompletionFunc("backend", cobra.FixedCompletions(availableBackends, cobra.ShellCompDirectiveNoFileComp))
+	cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", strings.Join(availableGranularities, ", ")))
+	_ = cmd.RegisterFlagCompletionFunc("mesh-granularity", cobra.FixedCompletions(availableGranularities, cobra.ShellCompDirectiveNoFileComp))
 	defaultKubeconfig := os.Getenv("KUBECONFIG")
 	if _, err := os.Stat(defaultKubeconfig); os.IsNotExist(err) {
 		defaultKubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config")

+ 29 - 1
cmd/kgctl/showconf.go

@@ -101,6 +101,20 @@ func showConfNode() *cobra.Command {
 		Short: "Show the WireGuard configuration for a node in the Kilo network",
 		RunE:  runShowConfNode,
 		Args:  cobra.ExactArgs(1),
+		ValidArgsFunction: func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+			ns, err := opts.backend.Nodes().List()
+			if err != nil {
+				cobra.CompError(err.Error())
+				return nil, cobra.ShellCompDirectiveNoFileComp
+			}
+			completions := make([]string, 0, len(ns))
+			for _, n := range ns {
+				if n.Ready() {
+					completions = append(completions, n.Name)
+				}
+			}
+			return completions, cobra.ShellCompDirectiveNoFileComp
+		},
 	}
 }
 
@@ -110,6 +124,20 @@ func showConfPeer() *cobra.Command {
 		Short: "Show the WireGuard configuration for a peer in the Kilo network",
 		RunE:  runShowConfPeer,
 		Args:  cobra.ExactArgs(1),
+		ValidArgsFunction: func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+			ps, err := opts.backend.Peers().List()
+			if err != nil {
+				cobra.CompError(err.Error())
+				return nil, cobra.ShellCompDirectiveNoFileComp
+			}
+			completions := make([]string, 0, len(ps))
+			for _, p := range ps {
+				if p.Ready() {
+					completions = append(completions, p.Name)
+				}
+			}
+			return completions, cobra.ShellCompDirectiveNoFileComp
+		},
 	}
 }
 
@@ -165,7 +193,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
 		}
 	}
 	if !found {
-		_, err := os.Stderr.WriteString(fmt.Sprintf("Node %q is not a leader node\n", hostname))
+		_, err := fmt.Fprintf(os.Stderr, "Node %q is not a leader node\n", hostname)
 		return err
 	}
 

+ 2 - 3
docs/kg.md

@@ -14,7 +14,7 @@ Example manifests can be found [in the manifests directory](https://github.com/s
 
 The behavior of `kg` can be configured using the command line flags listed below.
 
-[embedmd]:# (../tmp/help.txt)
+[embedmd]:# (../help.txt)
 ```txt
 kg is the Kilo agent.
 		It runs on every node of a cluster,
@@ -26,7 +26,7 @@ Usage:
   kg [command]
 
 Available Commands:
-  completion  generate the autocompletion script for the specified shell
+  completion  Generate the autocompletion script for the specified shell
   help        Help about any command
   version     Print the version and exit.
   webhook     webhook starts a HTTPS server to validate updates and creations of Kilo peers.
@@ -58,5 +58,4 @@ Flags:
       --subnet string                  CIDR from which to allocate addresses for WireGuard interfaces. (default "10.4.0.0/16")
       --topology-label string          Kubernetes node label used to group nodes into logical locations. (default "topology.kubernetes.io/region")
       --version                        Print version and exit.
-
 ```

+ 0 - 1
e2e/full-mesh.sh

@@ -6,7 +6,6 @@ setup_suite() {
 	# shellcheck disable=SC2016
 	_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
 	block_until_ready_by_name kube-system kilo-userspace 
-	_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
 }
 
 test_full_mesh_connectivity() {

+ 2 - 2
e2e/handlers.sh

@@ -11,13 +11,13 @@ setup_suite() {
 test_connect() {
 	local PEER=test
 	local ALLOWED_IP=10.5.0.1/32
-        docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$KGCTL_BINARY":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect "$PEER" --allowed-ip "$ALLOWED_IP"
+        docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$(which "$KGCTL_BINARY")":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect "$PEER" --allowed-ip "$ALLOWED_IP"
 	assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
         docker stop "$PEER"
 
 	local PEER=test-hostname
 	local ALLOWED_IP=10.5.0.1/32
-        docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$KGCTL_BINARY":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect --allowed-ip "$ALLOWED_IP"
+        docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$(which "$KGCTL_BINARY")":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect --allowed-ip "$ALLOWED_IP"
 	assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host using auto-discovered name"
         docker stop "$PEER"
 }

+ 8 - 7
e2e/kilo-kind-userspace.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -135,10 +135,10 @@ spec:
         - name: kubeconfig
           mountPath: /etc/kubernetes
           readOnly: true
-      - name: boringtun
-        image: leonnicolas/boringtun
+      - name: wireguard
+        image: masipcat/wireguard-go:0.0.20230223
         args:
-        - --disable-drop-privileges
+        - wireguard-go
         - --foreground
         - kilo0
         securityContext:
@@ -154,7 +154,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 4 - 4
e2e/lib.sh

@@ -65,7 +65,7 @@ build_kind_config() {
 }
 
 create_interface() {
-	docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges "$1"
+	docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug masipcat/wireguard-go:0.0.20230223 wireguard-go --foreground "$1"
 }
 
 delete_interface() {
@@ -126,14 +126,14 @@ create_cluster() {
 	# Apply Kilo the the cluster.
 	_kubectl apply -f ../manifests/crds.yaml
 	_kubectl apply -f kilo-kind-userspace.yaml
-	block_until_ready_by_name kube-system kilo-userspace
+	if ! block_until_ready_by_name kube-system kilo-userspace; then return 1; fi
 	_kubectl wait nodes --all --for=condition=Ready
 	# Wait for CoreDNS.
 	block_until_ready kube_system k8s-app=kube-dns
 	# Ensure the curl helper is not scheduled on a control-plane node.
 	_kubectl apply -f helper-curl.yaml
-	block_until_ready_by_name default curl
-	_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
+	block_until_ready_by_name default curl || return 1
+	_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/control-plane:NoSchedule-
 	_kubectl apply -f https://raw.githubusercontent.com/kilo-io/adjacency/main/example.yaml
 	block_until_ready_by_name default adjacency
 }

+ 0 - 1
e2e/location-mesh.sh

@@ -6,7 +6,6 @@ setup_suite() {
 	# shellcheck disable=SC2016
 	_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
 	block_until_ready_by_name kube-system kilo-userspace 
-	_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
 }
 
 test_location_mesh_connectivity() {

+ 3 - 0
e2e/multi-cluster.sh

@@ -58,6 +58,9 @@ EOF
 }
 
 teardown_suite () {
+	if [ -n "$E2E_SKIP_TEARDOWN_ON_FAILURE" ]; then
+		return
+	fi
 	# Remove the nodes in cluster2 as peers of cluster1.
 	for n in $(KUBECONFIG=$KUBECONFIG2 _kubectl get no -o name | cut -d'/' -f2); do
 		_kubectl delete peer "$n"

+ 3 - 0
e2e/setup.sh

@@ -3,5 +3,8 @@
 . lib.sh
 
 teardown_suite () {
+	if [ -n "$E2E_SKIP_TEARDOWN_ON_FAILURE" ]; then
+		return
+	fi
 	delete_cluster
 }

+ 121 - 0
flake.lock

@@ -0,0 +1,121 @@
+{
+  "nodes": {
+    "flake-compat": {
+      "flake": false,
+      "locked": {
+        "lastModified": 1761588595,
+        "narHash": "sha256-XKUZz9zewJNUj46b4AJdiRZJAvSZ0Dqj2BNfXvFlJC4=",
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "rev": "f387cd2afec9419c8ee37694406ca490c3f34ee5",
+        "type": "github"
+      },
+      "original": {
+        "owner": "edolstra",
+        "repo": "flake-compat",
+        "type": "github"
+      }
+    },
+    "flake-parts": {
+      "inputs": {
+        "nixpkgs-lib": "nixpkgs-lib"
+      },
+      "locked": {
+        "lastModified": 1765495779,
+        "narHash": "sha256-MhA7wmo/7uogLxiewwRRmIax70g6q1U/YemqTGoFHlM=",
+        "owner": "hercules-ci",
+        "repo": "flake-parts",
+        "rev": "5635c32d666a59ec9a55cab87e898889869f7b71",
+        "type": "github"
+      },
+      "original": {
+        "owner": "hercules-ci",
+        "repo": "flake-parts",
+        "type": "github"
+      }
+    },
+    "git-hooks-nix": {
+      "inputs": {
+        "flake-compat": "flake-compat",
+        "gitignore": "gitignore",
+        "nixpkgs": [
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1765464257,
+        "narHash": "sha256-dixPWKiHzh80PtD0aLuxYNQ0xP+843dfXG/yM3OzaYQ=",
+        "owner": "cachix",
+        "repo": "git-hooks.nix",
+        "rev": "09e45f2598e1a8499c3594fe11ec2943f34fe509",
+        "type": "github"
+      },
+      "original": {
+        "owner": "cachix",
+        "repo": "git-hooks.nix",
+        "type": "github"
+      }
+    },
+    "gitignore": {
+      "inputs": {
+        "nixpkgs": [
+          "git-hooks-nix",
+          "nixpkgs"
+        ]
+      },
+      "locked": {
+        "lastModified": 1709087332,
+        "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
+        "type": "github"
+      },
+      "original": {
+        "owner": "hercules-ci",
+        "repo": "gitignore.nix",
+        "type": "github"
+      }
+    },
+    "nixpkgs": {
+      "locked": {
+        "lastModified": 1765472234,
+        "narHash": "sha256-9VvC20PJPsleGMewwcWYKGzDIyjckEz8uWmT0vCDYK0=",
+        "owner": "nixos",
+        "repo": "nixpkgs",
+        "rev": "2fbfb1d73d239d2402a8fe03963e37aab15abe8b",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nixos",
+        "ref": "nixos-unstable",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
+    "nixpkgs-lib": {
+      "locked": {
+        "lastModified": 1761765539,
+        "narHash": "sha256-b0yj6kfvO8ApcSE+QmA6mUfu8IYG6/uU28OFn4PaC8M=",
+        "owner": "nix-community",
+        "repo": "nixpkgs.lib",
+        "rev": "719359f4562934ae99f5443f20aa06c2ffff91fc",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nix-community",
+        "repo": "nixpkgs.lib",
+        "type": "github"
+      }
+    },
+    "root": {
+      "inputs": {
+        "flake-parts": "flake-parts",
+        "git-hooks-nix": "git-hooks-nix",
+        "nixpkgs": "nixpkgs"
+      }
+    }
+  },
+  "root": "root",
+  "version": 7
+}

+ 295 - 0
flake.nix

@@ -0,0 +1,295 @@
+{
+  description = "Kilo is a multi-cloud network overlay built on WireGuard and designed for Kubernetes (k8s + wg = kg)";
+
+  inputs = {
+    nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
+    flake-parts.url = "github:hercules-ci/flake-parts";
+    git-hooks-nix = {
+      url = "github:cachix/git-hooks.nix";
+      inputs.nixpkgs.follows = "nixpkgs";
+    };
+  };
+
+  outputs =
+    { self, ... }@inputs:
+    inputs.flake-parts.lib.mkFlake { inherit inputs; } {
+      imports = [
+        inputs.git-hooks-nix.flakeModule
+      ];
+      systems = [
+        "x86_64-linux"
+        "aarch64-linux"
+        "aarch64-darwin"
+      ];
+      perSystem =
+        {
+          pkgs,
+          system,
+          config,
+          ...
+        }:
+        {
+          packages =
+            let
+              _version = builtins.getEnv "VERSION";
+              homepage = "https://github.com/squat/kilo";
+              base = pkgs.buildGoModule rec {
+                pname = "kilo";
+                version = if _version != "" then _version else toString (self.rev or self.dirtyRev or "unknown");
+                src = ./.;
+                vendorHash = null;
+                env.CGO_ENABLED = 0;
+                ldflags = [
+                  "-X github.com/squat/kilo/pkg/version.Version=${version}"
+                ];
+                nativeBuildInputs = [ pkgs.installShellFiles ];
+                meta = {
+                  inherit homepage;
+                };
+              };
+              kg = base.overrideAttrs {
+                pname = "kg";
+                subPackages = [
+                  "cmd/kg"
+                ];
+                postInstall = ''
+                  installShellCompletion --cmd kg \
+                    --bash <($out/bin/kg completion bash) \
+                    --fish <($out/bin/kg completion fish) \
+                    --zsh <($out/bin/kg completion zsh)
+                '';
+                meta.mainProgram = "kg";
+                meta.description = "kg is the Kilo agent that runs on every Kubernetes node in a Kilo mesh";
+              };
+
+              kgctl = base.overrideAttrs {
+                pname = "kgctl";
+                subPackages = [
+                  "cmd/kgctl"
+                ];
+                postInstall = ''
+                  installShellCompletion --cmd kgctl \
+                    --bash <($out/bin/kgctl completion bash) \
+                    --fish <($out/bin/kgctl completion fish) \
+                    --zsh <($out/bin/kgctl completion zsh)
+                '';
+                meta.mainProgram = "kgctl";
+                meta.description = "kgctl is Kilo's command line tool for inspecting and interacting with clusters: kgctl. It can be used to understand a mesh's topology, get the WireGuard configuration for a peer, or graph a cluster";
+
+              };
+
+              kilo = pkgs.symlinkJoin {
+                name = "kilo";
+                paths = [
+                  kg
+                  kgctl
+                ];
+                meta = {
+                  inherit homepage;
+                  description = "Kilo is a multi-cloud network overlay built on WireGuard and designed for Kubernetes (k8s + wg = kg)";
+                };
+              };
+
+            in
+            {
+              inherit kg kgctl kilo;
+              default = kilo;
+            }
+            // (builtins.listToAttrs (
+              map
+                (target: {
+                  name = "kg-cross-${target.os}-${target.arch}";
+                  value = kg.overrideAttrs {
+                    env.GOOS = target.os;
+                    env.GOARCH = target.arch;
+                    env.CGO_ENABLED = 0;
+                    checkPhase = false;
+                    postInstall = "";
+                  };
+                })
+                [
+                  {
+                    os = "linux";
+                    arch = "amd64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm";
+                  }
+                ]
+            ))
+            // (builtins.listToAttrs (
+              map
+                (target: {
+                  name = "kgctl-cross-${target.os}-${target.arch}";
+                  value = kgctl.overrideAttrs {
+                    env.GOOS = target.os;
+                    env.GOARCH = target.arch;
+                    env.CGO_ENABLED = 0;
+                    checkPhase = false;
+                    postInstall = "";
+                  };
+                })
+                [
+                  {
+                    os = "linux";
+                    arch = "amd64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm";
+                  }
+                  {
+                    os = "darwin";
+                    arch = "amd64";
+                  }
+                  {
+                    os = "darwin";
+                    arch = "arm64";
+                  }
+                  {
+                    os = "windows";
+                    arch = "amd64";
+                  }
+                ]
+            ))
+            // (builtins.listToAttrs (
+              map
+                (target: {
+                  name = "kilo-cross-${target.os}-${target.arch}";
+                  value = kilo.overrideAttrs {
+                    paths = [
+                      config.packages."kg-cross-${target.os}-${target.arch}"
+                      config.packages."kgctl-cross-${target.os}-${target.arch}"
+                    ];
+                  };
+                })
+                [
+                  {
+                    os = "linux";
+                    arch = "amd64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm64";
+                  }
+                  {
+                    os = "linux";
+                    arch = "arm";
+                  }
+                ]
+            ));
+
+          pre-commit = {
+            check.enable = true;
+            settings = {
+              src = ./.;
+              hooks = {
+                actionlint.enable = true;
+                nixfmt.enable = true;
+                nixfmt.excludes = [ "vendor" ];
+                gofmt.enable = true;
+                gofmt.excludes = [ "vendor" ];
+                golangci-lint.enable = true;
+                golangci-lint.excludes = [ "vendor" ];
+                golangci-lint.extraPackages = [ pkgs.go ];
+                govet.enable = true;
+                govet.excludes = [ "vendor" ];
+                shellcheck.enable = true;
+                shellcheck.excludes = [
+                  ".envrc"
+                  "vendor"
+                ];
+                yamlfmt.enable = true;
+                yamlfmt.args = [
+                  "--formatter"
+                  "indentless_arrays=true"
+                ];
+                yamlfmt.excludes = [
+                  ".github"
+                  "vendor"
+                ];
+                header = {
+                  enable = true;
+                  name = "Header";
+                  entry =
+                    let
+                      headerCheck = pkgs.writeShellApplication {
+                        name = "header-check";
+                        text = ''
+                          HEADER=$(cat ${./.header})
+                          HEADER_LEN=$(wc -l ${./.header} | awk '{print $1}')
+                          FILES=
+                          for f in "$@"; do 
+                              for i in 0 1 2 3 4 5; do 
+                                  FILE=$(tail -n +$i "$f" | ( head -n "$HEADER_LEN"; cat > /dev/null ) | sed "s/[0-9]\{4\}/YEAR/")
+                                  [ "$FILE" = "$HEADER" ] && continue 2
+                              done
+                              FILES="$FILES$f "
+                          done
+                          if [ -n "$FILES" ]; then \
+                              printf 'the following files are missing the license header: %s\n' "$FILES"; \
+                              exit 1
+                          fi
+                        '';
+                      };
+                    in
+                    pkgs.lib.getExe headerCheck;
+                  files = "\\.(go)$";
+                  excludes = [ "vendor" ];
+                };
+                kgMDGen = {
+                  enable = true;
+                  name = "kg.md";
+                  entry =
+                    let
+                      kgMDGen = pkgs.writeShellApplication {
+                        name = "kgmdgen";
+                        text = ''
+                          go run ./cmd/kg/... --help | head -n -2 > help.txt
+                          go tool embedmd -d docs/kg.md
+                        '';
+                      };
+                    in
+                    pkgs.lib.getExe kgMDGen;
+                  files = "^README\\.md$";
+                  extraPackages = [ pkgs.go ];
+                };
+              };
+            };
+          };
+
+          devShells = {
+            default = pkgs.mkShell {
+              inherit (config.pre-commit.devShell) shellHook;
+              packages =
+                with pkgs;
+                [
+                  bash_unit
+                  (config.packages.kgctl.overrideAttrs rec {
+                    version = "dev";
+                    __intentionallyOverridingVersion = true;
+                    ldflags = [
+                      "-X github.com/squat/kilo/pkg/version.Version=${version}"
+                    ];
+                  })
+                  gettext # provides envsubst
+                  go
+                  kind
+                  kubectl
+                  yarn
+                ]
+                ++ config.pre-commit.settings.enabledPackages;
+            };
+          };
+        };
+    };
+}

+ 64 - 46
go.mod

@@ -1,10 +1,9 @@
 module github.com/squat/kilo
 
-go 1.18
+go 1.25
 
 require (
 	github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310
-	github.com/campoy/embedmd v1.0.0
 	github.com/containernetworking/cni v1.0.1
 	github.com/containernetworking/plugins v1.1.1
 	github.com/coreos/go-iptables v0.6.1-0.20220901214115-d2b8608923d1
@@ -12,76 +11,95 @@ require (
 	github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348
 	github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a
 	github.com/oklog/run v1.1.0
-	github.com/prometheus/client_golang v1.11.1
-	github.com/spf13/cobra v1.2.1
+	github.com/prometheus/client_golang v1.16.0
+	github.com/spf13/cobra v1.8.0
 	github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
-	golang.org/x/sys v0.5.0
+	golang.org/x/sys v0.26.0
 	golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211124212657-dd7407c86d22
-	honnef.co/go/tools v0.3.1
-	k8s.io/api v0.23.6
-	k8s.io/apiextensions-apiserver v0.23.6
-	k8s.io/apimachinery v0.23.6
-	k8s.io/client-go v0.23.6
-	k8s.io/code-generator v0.23.6
-	sigs.k8s.io/controller-tools v0.8.0
+	k8s.io/api v0.29.15
+	k8s.io/apiextensions-apiserver v0.29.15
+	k8s.io/apimachinery v0.29.15
+	k8s.io/client-go v0.29.15
 )
 
 require (
 	github.com/BurntSushi/toml v0.4.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/cespare/xxhash/v2 v2.1.1 // indirect
+	github.com/campoy/embedmd v1.0.0 // indirect
+	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
 	github.com/evanphx/json-patch v4.12.0+incompatible // indirect
-	github.com/fatih/color v1.12.0 // indirect
-	github.com/go-logfmt/logfmt v0.5.0 // indirect
-	github.com/go-logr/logr v1.2.0 // indirect
-	github.com/gobuffalo/flect v0.2.3 // indirect
+	github.com/fatih/color v1.16.0 // indirect
+	github.com/go-logfmt/logfmt v0.5.1 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
+	github.com/go-openapi/jsonpointer v0.19.6 // indirect
+	github.com/go-openapi/jsonreference v0.20.2 // indirect
+	github.com/go-openapi/swag v0.22.3 // indirect
+	github.com/gobuffalo/flect v1.0.2 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
-	github.com/golang/protobuf v1.5.2 // indirect
-	github.com/google/go-cmp v0.5.6 // indirect
-	github.com/google/gofuzz v1.1.0 // indirect
-	github.com/google/uuid v1.2.0 // indirect
-	github.com/googleapis/gnostic v0.5.5 // indirect
+	github.com/golang/protobuf v1.5.4 // indirect
+	github.com/google/gnostic-models v0.6.8 // indirect
+	github.com/google/go-cmp v0.6.0 // indirect
+	github.com/google/gofuzz v1.2.0 // indirect
+	github.com/google/uuid v1.3.0 // indirect
 	github.com/imdario/mergo v0.3.11 // indirect
-	github.com/inconshreveable/mousetrap v1.0.0 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/josharian/intern v1.0.0 // indirect
 	github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
-	github.com/mattn/go-colorable v0.1.8 // indirect
-	github.com/mattn/go-isatty v0.0.12 // indirect
-	github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+	github.com/mailru/easyjson v0.7.7 // indirect
+	github.com/mattn/go-colorable v0.1.13 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mdlayher/genetlink v1.0.0 // indirect
 	github.com/mdlayher/netlink v1.4.1 // indirect
 	github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.28.0 // indirect
-	github.com/prometheus/procfs v0.6.0 // indirect
+	github.com/prometheus/client_model v0.4.0 // indirect
+	github.com/prometheus/common v0.44.0 // indirect
+	github.com/prometheus/procfs v0.10.1 // indirect
 	github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect
 	github.com/spf13/pflag v1.0.5 // indirect
 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
-	golang.org/x/crypto v0.1.0 // indirect
+	golang.org/x/crypto v0.28.0 // indirect
 	golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
-	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
-	golang.org/x/net v0.7.0 // indirect
-	golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
-	golang.org/x/term v0.5.0 // indirect
-	golang.org/x/text v0.7.0 // indirect
-	golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
-	golang.org/x/tools v0.1.12 // indirect
+	golang.org/x/mod v0.21.0 // indirect
+	golang.org/x/net v0.30.0 // indirect
+	golang.org/x/oauth2 v0.10.0 // indirect
+	golang.org/x/sync v0.8.0 // indirect
+	golang.org/x/term v0.25.0 // indirect
+	golang.org/x/text v0.19.0 // indirect
+	golang.org/x/time v0.3.0 // indirect
+	golang.org/x/tools v0.26.0 // indirect
 	golang.zx2c4.com/wireguard v0.0.0-20211123210315-387f7c461a16 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
-	google.golang.org/protobuf v1.27.1 // indirect
+	google.golang.org/protobuf v1.33.0 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
-	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
-	k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
-	k8s.io/klog/v2 v2.30.0 // indirect
-	k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
-	k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
-	sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
-	sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
-	sigs.k8s.io/yaml v1.3.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+	honnef.co/go/tools v0.3.1 // indirect
+	k8s.io/code-generator v0.29.15 // indirect
+	k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
+	k8s.io/klog/v2 v2.110.1 // indirect
+	k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
+	k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
+	sigs.k8s.io/controller-tools v0.14.0 // indirect
+	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+	sigs.k8s.io/yaml v1.4.0 // indirect
+)
+
+tool (
+	github.com/campoy/embedmd
+	honnef.co/go/tools/cmd/staticcheck
+	k8s.io/code-generator/cmd/client-gen
+	k8s.io/code-generator/cmd/deepcopy-gen
+	k8s.io/code-generator/cmd/informer-gen
+	k8s.io/code-generator/cmd/lister-gen
+	sigs.k8s.io/controller-tools/cmd/controller-gen
 )

Разница между файлами не показана из-за своего большого размера
+ 103 - 708
go.sum


+ 22 - 24
manifests/crds.yaml

@@ -1,10 +1,8 @@
----
 apiVersion: apiextensions.k8s.io/v1
 kind: CustomResourceDefinition
 metadata:
   annotations:
-    controller-gen.kubebuilder.io/version: v0.8.0
-  creationTimestamp: null
+    controller-gen.kubebuilder.io/version: v0.14.0
   name: peers.kilo.squat.ai
 spec:
   group: kilo.squat.ai
@@ -21,30 +19,36 @@ spec:
         description: Peer is a WireGuard peer that should have access to the VPN.
         properties:
           apiVersion:
-            description: 'APIVersion defines the versioned schema of this representation
-              of an object. Servers should convert recognized schemas to the latest
-              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+            description: |-
+              APIVersion defines the versioned schema of this representation of an object.
+              Servers should convert recognized schemas to the latest internal value, and
+              may reject unrecognized values.
+              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
             type: string
           kind:
-            description: 'Kind is a string value representing the REST resource this
-              object represents. Servers may infer this from the endpoint the client
-              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+            description: |-
+              Kind is a string value representing the REST resource this object represents.
+              Servers may infer this from the endpoint the client submits requests to.
+              Cannot be updated.
+              In CamelCase.
+              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
             type: string
           metadata:
             type: object
           spec:
-            description: 'Specification of the desired behavior of the Kilo Peer.
-              More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
+            description: |-
+              Specification of the desired behavior of the Kilo Peer. More info:
+              https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status
             properties:
               allowedIPs:
-                description: AllowedIPs is the list of IP addresses that are allowed
+                description: |-
+                  AllowedIPs is the list of IP addresses that are allowed
                   for the given peer's tunnel.
                 items:
                   type: string
                 type: array
               endpoint:
-                description: Endpoint is the initial endpoint for connections to the
-                  peer.
+                description: Endpoint is the initial endpoint for connections to the peer.
                 properties:
                   dnsOrIP:
                     description: DNSOrIP is a DNS name or an IP address.
@@ -65,13 +69,13 @@ spec:
                 - port
                 type: object
               persistentKeepalive:
-                description: PersistentKeepalive is the interval in seconds of the
-                  emission of keepalive packets by the peer. This defaults to 0, which
+                description: |-
+                  PersistentKeepalive is the interval in seconds of the emission
+                  of keepalive packets by the peer. This defaults to 0, which
                   disables the feature.
                 type: integer
               presharedKey:
-                description: PresharedKey is the optional symmetric encryption key
-                  for the peer.
+                description: PresharedKey is the optional symmetric encryption key for the peer.
                 type: string
               publicKey:
                 description: PublicKey is the WireGuard public key for the peer.
@@ -85,9 +89,3 @@ spec:
         type: object
     served: true
     storage: true
-status:
-  acceptedNames:
-    kind: ""
-    plural: ""
-  conditions: []
-  storedVersions: []

+ 3 - 3
manifests/kilo-bootkube-flannel.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet

+ 5 - 4
manifests/kilo-bootkube.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -135,7 +135,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 3 - 3
manifests/kilo-k3s-cilium.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: v1
 kind: ConfigMap

+ 3 - 3
manifests/kilo-k3s-flannel.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: v1
 kind: ConfigMap

+ 10 - 8
manifests/kilo-k3s-userspace-heterogeneous.yaml

@@ -75,9 +75,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: v1
 kind: ConfigMap
@@ -189,7 +189,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
@@ -297,10 +298,10 @@ spec:
         - name: wireguard
           mountPath: /var/run/wireguard
           readOnly: false
-      - name: boringtun
-        image: leonnicolas/boringtun:cc19859
+      - name: wireguard
+        image: masipcat/wireguard-go:0.0.20230223:cc19859
         args:
-        - --disable-drop-privileges
+        - wireguard-go
         - --foreground
         - kilo0
         securityContext:
@@ -336,7 +337,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 9 - 7
manifests/kilo-k3s-userspace.yaml

@@ -74,10 +74,11 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
+
 ---
 apiVersion: v1
 kind: ConfigMap
@@ -164,10 +165,10 @@ spec:
         - name: wireguard
           mountPath: /var/run/wireguard
           readOnly: false
-      - name: boringtun
-        image: leonnicolas/boringtun:cc19859
+      - name: wireguard
+        image: masipcat/wireguard-go:0.0.20230223:cc19859
         args:
-        - --disable-drop-privileges
+        - wireguard-go
         - --foreground
         - kilo0
         securityContext:
@@ -203,7 +204,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 5 - 4
manifests/kilo-k3s.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: v1
 kind: ConfigMap
@@ -185,7 +185,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 16 - 20
manifests/kilo-kubeadm-cilium.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -92,18 +92,16 @@ spec:
         volumeMounts:
         - name: kilo-dir
           mountPath: /var/lib/kilo
-
-        # with kube-proxy configmap
-        # - name: kubeconfig
-        #   mountPath: /etc/kubernetes
-        #   readOnly: true
-
         # without kube-proxy host kubeconfig binding
         - name: kubeconfig
+          # with kube-proxy configmap
+          # - name: kubeconfig
+          #   mountPath: /etc/kubernetes
+          #   readOnly: true
+
           mountPath: /etc/kubernetes/kubeconfig
           subPath: admin.conf
           readOnly: true
-
         - name: lib-modules
           mountPath: /lib/modules
           readOnly: true
@@ -119,20 +117,18 @@ spec:
       - name: kilo-dir
         hostPath:
           path: /var/lib/kilo
-
-      # with kube-proxy configmap
-      # - name: kubeconfig
-      #   configMap:
-      #     name: kube-proxy
-      #     items:
-      #       - key: kubeconfig.conf
-      #         path: kubeconfig
-
       # without kube-proxy host kubeconfig binding
       - name: kubeconfig
+        # with kube-proxy configmap
+        # - name: kubeconfig
+        #   configMap:
+        #     name: kube-proxy
+        #     items:
+        #       - key: kubeconfig.conf
+        #         path: kubeconfig
+
         hostPath:
           path: /etc/kubernetes
-
       - name: lib-modules
         hostPath:
           path: /lib/modules

+ 11 - 11
manifests/kilo-kubeadm-flannel-userspace.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -66,18 +66,18 @@ spec:
       serviceAccountName: kilo
       hostNetwork: true
       containers:
-      - name: boringtun
-        image: leonnicolas/boringtun:cc19859
+      - name: wireguard
+        image: masipcat/wireguard-go:0.0.20230223:cc19859
         args:
-          - --disable-drop-privileges=true
-          - --foreground
-          - kilo0
+        - wireguard-go=true
+        - --foreground
+        - kilo0
         securityContext:
           privileged: true
         volumeMounts:
-          - name: wireguard
-            mountPath: /var/run/wireguard
-            readOnly: false
+        - name: wireguard
+          mountPath: /var/run/wireguard
+          readOnly: false
       - name: kilo
         image: squat/kilo:0.6.0
         args:

+ 3 - 3
manifests/kilo-kubeadm-flannel.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet

+ 13 - 12
manifests/kilo-kubeadm-userspace.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -100,19 +100,19 @@ spec:
       serviceAccountName: kilo
       hostNetwork: true
       containers:
-      - name: boringtun
-        image: leonnicolas/boringtun:cc19859
+      - name: wireguard
+        image: masipcat/wireguard-go:0.0.20230223:cc19859
         imagePullPolicy: IfNotPresent
         args:
-          - --disable-drop-privileges
-          - --foreground
-          - kilo0
+        - wireguard-go
+        - --foreground
+        - kilo0
         securityContext:
           privileged: true
         volumeMounts:
-          - name: wireguard
-            mountPath: /var/run/wireguard
-            readOnly: false
+        - name: wireguard
+          mountPath: /var/run/wireguard
+          readOnly: false
       - name: kilo
         image: squat/kilo:0.6.0
         imagePullPolicy: IfNotPresent
@@ -155,7 +155,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 5 - 4
manifests/kilo-kubeadm.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -135,7 +135,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 3 - 3
manifests/kilo-typhoon-flannel.yaml

@@ -40,9 +40,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet

+ 5 - 4
manifests/kilo-typhoon.yaml

@@ -74,9 +74,9 @@ roleRef:
   kind: ClusterRole
   name: kilo
 subjects:
-  - kind: ServiceAccount
-    name: kilo
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kilo
+  namespace: kube-system
 ---
 apiVersion: apps/v1
 kind: DaemonSet
@@ -135,7 +135,8 @@ spec:
         command:
         - /bin/sh
         - -c
-        - set -e -x;
+        - |
+          set -e -x;
           cp /opt/cni/bin/* /host/opt/cni/bin/;
           TMP_CONF="$CNI_CONF_NAME".tmp;
           echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;

+ 3 - 3
manifests/kube-router.yaml

@@ -107,6 +107,6 @@ roleRef:
   kind: ClusterRole
   name: kube-router
 subjects:
-  - kind: ServiceAccount
-    name: kube-router
-    namespace: kube-system
+- kind: ServiceAccount
+  name: kube-router
+  namespace: kube-system

+ 12 - 12
manifests/peer-validation.yaml

@@ -10,11 +10,11 @@ metadata:
 webhooks:
 - name: "peers.kilo.squat.ai"
   rules:
-  - apiGroups:   ["kilo.squat.ai"]
+  - apiGroups: ["kilo.squat.ai"]
     apiVersions: ["v1alpha1"]
-    operations:  ["CREATE","UPDATE"]
-    resources:   ["peers"]
-    scope:       "Cluster"
+    operations: ["CREATE", "UPDATE"]
+    resources: ["peers"]
+    scope: "Cluster"
   clientConfig:
     service:
       namespace: "kilo"
@@ -76,8 +76,8 @@ spec:
   selector:
     app.kubernetes.io/name: peer-validation-server
   ports:
-    - port: 443
-      targetPort: webhook
+  - port: 443
+    targetPort: webhook
 ---
 apiVersion: v1
 kind: ServiceAccount
@@ -109,9 +109,9 @@ roleRef:
   kind: ClusterRole
   name: kilo-peer-validation
 subjects:
-  - kind: ServiceAccount
-    namespace: kilo
-    name: kilo-peer-validation
+- kind: ServiceAccount
+  namespace: kilo
+  name: kilo-peer-validation
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: Role
@@ -137,9 +137,9 @@ roleRef:
   kind: Role
   name: kilo-peer-validation
 subjects:
-  - kind: ServiceAccount
-    namespace: kilo
-    name: kilo-peer-validation
+- kind: ServiceAccount
+  namespace: kilo
+  name: kilo-peer-validation
 ---
 apiVersion: batch/v1
 kind: Job

+ 7 - 7
pkg/iptables/iptables.go

@@ -35,7 +35,7 @@ func ipv6Disabled() (bool, error) {
 	if err != nil {
 		return false, err
 	}
-	defer f.Close()
+	defer func() { _ = f.Close() }()
 	disabled := make([]byte, 1)
 	if _, err = io.ReadFull(f, disabled); err != nil {
 		return false, err
@@ -145,7 +145,7 @@ func (r *rule) Append(client Client) error {
 func (r *rule) Delete(client Client) error {
 	// Ignore the returned error as an error likely means
 	// that the rule doesn't exist, which is fine.
-	client.Delete(r.table, r.chain, r.spec...)
+	_ = client.Delete(r.table, r.chain, r.spec...)
 	return nil
 }
 
@@ -210,7 +210,7 @@ func (c *chain) Delete(client Client) error {
 	}
 	// Ignore the returned error as an error likely means
 	// that the chain doesn't exist, which is fine.
-	client.DeleteChain(c.table, c.chain)
+	_ = client.DeleteChain(c.table, c.chain)
 	return nil
 }
 
@@ -223,7 +223,7 @@ func (c *chain) Exists(client Client) (bool, error) {
 	case err == nil:
 		// If there was no error adding a new chain, then it did not exist.
 		// Delete it and return false.
-		client.DeleteChain(c.table, c.chain)
+		_ = client.DeleteChain(c.table, c.chain)
 		return false, nil
 	case ok && se.ExitStatus() == existsErr:
 		return true, nil
@@ -317,7 +317,7 @@ func New(opts ...ControllerOption) (*Controller, error) {
 			return nil, fmt.Errorf("failed to check IPv6 status: %v", err)
 		}
 		if disabled {
-			level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
+			_ = level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
 			c.v6 = &fakeClient{}
 		} else {
 			v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
@@ -380,7 +380,7 @@ func (c *Controller) reconcileAppendRules(rc ruleCache) error {
 			return fmt.Errorf("failed to check if rule exists: %v", err)
 		}
 		if !ok {
-			level.Info(c.logger).Log("msg", fmt.Sprintf("applying %d iptables rules", len(c.appendRules)-i))
+			_ = level.Info(c.logger).Log("msg", fmt.Sprintf("applying %d iptables rules", len(c.appendRules)-i))
 			if err := c.resetFromIndex(i, c.appendRules); err != nil {
 				return fmt.Errorf("failed to add rule: %v", err)
 			}
@@ -397,7 +397,7 @@ func (c *Controller) reconcilePrependRules(rc ruleCache) error {
 			return fmt.Errorf("failed to check if rule exists: %v", err)
 		}
 		if !ok {
-			level.Info(c.logger).Log("msg", "prepending iptables rule")
+			_ = level.Info(c.logger).Log("msg", "prepending iptables rule")
 			if err := r.Prepend(c.client(r.Proto())); err != nil {
 				return fmt.Errorf("failed to prepend rule: %v", err)
 			}

+ 1 - 1
pkg/k8s/apis/kilo/v1alpha1/zz_generated.deepcopy.go

@@ -1,7 +1,7 @@
 //go:build !ignore_autogenerated
 // +build !ignore_autogenerated
 
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 30 - 30
pkg/k8s/backend.go

@@ -162,7 +162,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error {
 	}); !ok {
 		return errors.New("failed to sync node cache")
 	}
-	nb.informer.AddEventHandler(
+	_, err := nb.informer.AddEventHandler(
 		cache.ResourceEventHandlerFuncs{
 			AddFunc: func(obj interface{}) {
 				n, ok := obj.(*v1.Node)
@@ -195,7 +195,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error {
 			},
 		},
 	)
-	return nil
+	return err
 }
 
 // List gets all the Nodes in the cluster.
@@ -218,29 +218,29 @@ func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) er
 		return fmt.Errorf("failed to find node: %v", err)
 	}
 	n := old.DeepCopy()
-	n.ObjectMeta.Annotations[endpointAnnotationKey] = node.Endpoint.String()
+	n.Annotations[endpointAnnotationKey] = node.Endpoint.String()
 	if node.InternalIP == nil {
-		n.ObjectMeta.Annotations[internalIPAnnotationKey] = ""
+		n.Annotations[internalIPAnnotationKey] = ""
 	} else {
-		n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
+		n.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
 	}
-	n.ObjectMeta.Annotations[keyAnnotationKey] = node.Key.String()
-	n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
+	n.Annotations[keyAnnotationKey] = node.Key.String()
+	n.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
 	if node.WireGuardIP == nil {
-		n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = ""
+		n.Annotations[wireGuardIPAnnotationKey] = ""
 	} else {
-		n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = node.WireGuardIP.String()
+		n.Annotations[wireGuardIPAnnotationKey] = node.WireGuardIP.String()
 	}
 	if node.DiscoveredEndpoints == nil {
-		n.ObjectMeta.Annotations[discoveredEndpointsKey] = ""
+		n.Annotations[discoveredEndpointsKey] = ""
 	} else {
 		discoveredEndpoints, err := json.Marshal(node.DiscoveredEndpoints)
 		if err != nil {
 			return err
 		}
-		n.ObjectMeta.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
+		n.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
 	}
-	n.ObjectMeta.Annotations[granularityKey] = string(node.Granularity)
+	n.Annotations[granularityKey] = string(node.Granularity)
 	oldData, err := json.Marshal(old)
 	if err != nil {
 		return err
@@ -275,37 +275,37 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	if err != nil {
 		subnet = nil
 	}
-	_, leader := node.ObjectMeta.Annotations[leaderAnnotationKey]
+	_, leader := node.Annotations[leaderAnnotationKey]
 	// Allow the region to be overridden by an explicit location.
-	location, ok := node.ObjectMeta.Annotations[locationAnnotationKey]
+	location, ok := node.Annotations[locationAnnotationKey]
 	if !ok {
-		location = node.ObjectMeta.Labels[topologyLabel]
+		location = node.Labels[topologyLabel]
 	}
 	// Allow the endpoint to be overridden.
-	endpoint := wireguard.ParseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
+	endpoint := wireguard.ParseEndpoint(node.Annotations[forceEndpointAnnotationKey])
 	if endpoint == nil {
-		endpoint = wireguard.ParseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
+		endpoint = wireguard.ParseEndpoint(node.Annotations[endpointAnnotationKey])
 	}
 	// Allow the internal IP to be overridden.
-	internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey])
+	internalIP := normalizeIP(node.Annotations[forceInternalIPAnnotationKey])
 	if internalIP == nil {
-		internalIP = normalizeIP(node.ObjectMeta.Annotations[internalIPAnnotationKey])
+		internalIP = normalizeIP(node.Annotations[internalIPAnnotationKey])
 	}
 	// Set the ForceInternalIP flag, if force-internal-ip annotation was set to "".
 	noInternalIP := false
-	if s, ok := node.ObjectMeta.Annotations[forceInternalIPAnnotationKey]; ok && (s == "" || s == "-") {
+	if s, ok := node.Annotations[forceInternalIPAnnotationKey]; ok && (s == "" || s == "-") {
 		noInternalIP = true
 		internalIP = nil
 	}
 	// Set Wireguard PersistentKeepalive setting for the node.
 	var persistentKeepalive time.Duration
-	if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; ok {
+	if keepAlive, ok := node.Annotations[persistentKeepaliveKey]; ok {
 		// We can ignore the error, because p will be set to 0 if an error occures.
 		p, _ := strconv.ParseInt(keepAlive, 10, 64)
 		persistentKeepalive = time.Duration(p) * time.Second
 	}
 	var lastSeen int64
-	if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok {
+	if ls, ok := node.Annotations[lastSeenAnnotationKey]; !ok {
 		lastSeen = 0
 	} else {
 		if lastSeen, err = strconv.ParseInt(ls, 10, 64); err != nil {
@@ -313,7 +313,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		}
 	}
 	var discoveredEndpoints map[string]*net.UDPAddr
-	if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok {
+	if de, ok := node.Annotations[discoveredEndpointsKey]; ok {
 		err := json.Unmarshal([]byte(de), &discoveredEndpoints)
 		if err != nil {
 			discoveredEndpoints = nil
@@ -321,7 +321,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	}
 	// Set allowed IPs for a location.
 	var allowedLocationIPs []net.IPNet
-	if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok {
+	if str, ok := node.Annotations[allowedLocationIPsKey]; ok {
 		for _, ip := range strings.Split(str, ",") {
 			if ipnet := normalizeIP(ip); ipnet != nil {
 				allowedLocationIPs = append(allowedLocationIPs, *ipnet)
@@ -329,7 +329,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		}
 	}
 	var meshGranularity mesh.Granularity
-	if gr, ok := node.ObjectMeta.Annotations[granularityKey]; ok {
+	if gr, ok := node.Annotations[granularityKey]; ok {
 		meshGranularity = mesh.Granularity(gr)
 		switch meshGranularity {
 		case mesh.LogicalGranularity:
@@ -340,7 +340,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	}
 
 	// TODO log some error or warning.
-	key, _ := wgtypes.ParseKey(node.ObjectMeta.Annotations[keyAnnotationKey])
+	key, _ := wgtypes.ParseKey(node.Annotations[keyAnnotationKey])
 
 	return &mesh.Node{
 		// Endpoint and InternalIP should only ever fail to parse if the
@@ -362,7 +362,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		// WireGuardIP can fail to parse if the node is not a leader or if
 		// the node's agent has not yet reconciled. In either case, the IP
 		// will parse as nil.
-		WireGuardIP:         normalizeIP(node.ObjectMeta.Annotations[wireGuardIPAnnotationKey]),
+		WireGuardIP:         normalizeIP(node.Annotations[wireGuardIPAnnotationKey]),
 		DiscoveredEndpoints: discoveredEndpoints,
 		AllowedLocationIPs:  allowedLocationIPs,
 		Granularity:         meshGranularity,
@@ -403,7 +403,7 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
 
 	key, err := wgtypes.ParseKey(peer.Spec.PublicKey)
 	if err != nil {
-		level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
+		_ = level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
 	}
 	var psk *wgtypes.Key
 	if k, err := wgtypes.ParseKey(peer.Spec.PresharedKey); err != nil {
@@ -458,7 +458,7 @@ func (pb *peerBackend) Init(ctx context.Context) error {
 	}); !ok {
 		return errors.New("failed to sync peer cache")
 	}
-	pb.informer.AddEventHandler(
+	_, err := pb.informer.AddEventHandler(
 		cache.ResourceEventHandlerFuncs{
 			AddFunc: func(obj interface{}) {
 				p, ok := obj.(*v1alpha1.Peer)
@@ -491,7 +491,7 @@ func (pb *peerBackend) Init(ctx context.Context) error {
 			},
 		},
 	)
-	return nil
+	return err
 }
 
 // List gets all the Peers in the cluster.

+ 2 - 2
pkg/k8s/backend_test.go

@@ -316,8 +316,8 @@ func TestTranslateNode(t *testing.T) {
 		},
 	} {
 		n := &v1.Node{}
-		n.ObjectMeta.Annotations = tc.annotations
-		n.ObjectMeta.Labels = tc.labels
+		n.Annotations = tc.annotations
+		n.Labels = tc.labels
 		n.Spec.PodCIDR = tc.subnet
 		node := translateNode(n, RegionLabelKey)
 		if diff := pretty.Compare(node, tc.out); diff != "" {

+ 2 - 3
pkg/k8s/clientset/versioned/clientset.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -31,8 +31,7 @@ type Interface interface {
 	KiloV1alpha1() kilov1alpha1.KiloV1alpha1Interface
 }
 
-// Clientset contains the clients for groups. Each group has exactly one
-// version included in a Clientset.
+// Clientset contains the clients for groups.
 type Clientset struct {
 	*discovery.DiscoveryClient
 	kiloV1alpha1 *kilov1alpha1.KiloV1alpha1Client

+ 1 - 1
pkg/k8s/clientset/versioned/fake/clientset_generated.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/fake/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/fake/register.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/scheme/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/scheme/register.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/fake_kilo_client.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 3 - 4
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake/fake_peer.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -22,7 +22,6 @@ import (
 	v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	labels "k8s.io/apimachinery/pkg/labels"
-	schema "k8s.io/apimachinery/pkg/runtime/schema"
 	types "k8s.io/apimachinery/pkg/types"
 	watch "k8s.io/apimachinery/pkg/watch"
 	testing "k8s.io/client-go/testing"
@@ -33,9 +32,9 @@ type FakePeers struct {
 	Fake *FakeKiloV1alpha1
 }
 
-var peersResource = schema.GroupVersionResource{Group: "kilo.squat.ai", Version: "v1alpha1", Resource: "peers"}
+var peersResource = v1alpha1.SchemeGroupVersion.WithResource("peers")
 
-var peersKind = schema.GroupVersionKind{Group: "kilo.squat.ai", Version: "v1alpha1", Kind: "Peer"}
+var peersKind = v1alpha1.SchemeGroupVersion.WithKind("Peer")
 
 // Get takes name of the peer, and returns the corresponding peer object, and an error if there is any.
 func (c *FakePeers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Peer, err error) {

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/generated_expansion.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/kilo_client.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 87 - 6
pkg/k8s/informers/factory.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -40,11 +40,17 @@ type sharedInformerFactory struct {
 	lock             sync.Mutex
 	defaultResync    time.Duration
 	customResync     map[reflect.Type]time.Duration
+	transform        cache.TransformFunc
 
 	informers map[reflect.Type]cache.SharedIndexInformer
 	// startedInformers is used for tracking which informers have been started.
 	// This allows Start() to be called multiple times safely.
 	startedInformers map[reflect.Type]bool
+	// wg tracks how many goroutines were started.
+	wg sync.WaitGroup
+	// shuttingDown is true when Shutdown has been called. It may still be running
+	// because it needs to wait for goroutines.
+	shuttingDown bool
 }
 
 // WithCustomResyncConfig sets a custom resync period for the specified informer types.
@@ -73,6 +79,14 @@ func WithNamespace(namespace string) SharedInformerOption {
 	}
 }
 
+// WithTransform sets a transform on all informers.
+func WithTransform(transform cache.TransformFunc) SharedInformerOption {
+	return func(factory *sharedInformerFactory) *sharedInformerFactory {
+		factory.transform = transform
+		return factory
+	}
+}
+
 // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
 func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
 	return NewSharedInformerFactoryWithOptions(client, defaultResync)
@@ -105,20 +119,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
 	return factory
 }
 
-// Start initializes all requested informers.
 func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 
+	if f.shuttingDown {
+		return
+	}
+
 	for informerType, informer := range f.informers {
 		if !f.startedInformers[informerType] {
-			go informer.Run(stopCh)
+			f.wg.Add(1)
+			// We need a new variable in each loop iteration,
+			// otherwise the goroutine would use the loop variable
+			// and that keeps changing.
+			informer := informer
+			go func() {
+				defer f.wg.Done()
+				informer.Run(stopCh)
+			}()
 			f.startedInformers[informerType] = true
 		}
 	}
 }
 
-// WaitForCacheSync waits for all started informers' cache were synced.
+func (f *sharedInformerFactory) Shutdown() {
+	f.lock.Lock()
+	f.shuttingDown = true
+	f.lock.Unlock()
+
+	// Will return immediately if there is nothing to wait for.
+	f.wg.Wait()
+}
+
 func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
 	informers := func() map[reflect.Type]cache.SharedIndexInformer {
 		f.lock.Lock()
@@ -140,7 +173,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref
 	return res
 }
 
-// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+// InformerFor returns the SharedIndexInformer for obj using an internal
 // client.
 func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
 	f.lock.Lock()
@@ -158,6 +191,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
 	}
 
 	informer = newFunc(f.client, resyncPeriod)
+	informer.SetTransform(f.transform)
 	f.informers[informerType] = informer
 
 	return informer
@@ -165,11 +199,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
 
 // SharedInformerFactory provides shared informers for resources in all known
 // API group versions.
+//
+// It is typically used like this:
+//
+//	ctx, cancel := context.Background()
+//	defer cancel()
+//	factory := NewSharedInformerFactory(client, resyncPeriod)
+//	defer factory.WaitForStop()    // Returns immediately if nothing was started.
+//	genericInformer := factory.ForResource(resource)
+//	typedInformer := factory.SomeAPIGroup().V1().SomeType()
+//	factory.Start(ctx.Done())          // Start processing these informers.
+//	synced := factory.WaitForCacheSync(ctx.Done())
+//	for v, ok := range synced {
+//	    if !ok {
+//	        fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
+//	        return
+//	    }
+//	}
+//
+//	// Creating informers can also be created after Start, but then
+//	// Start must be called again:
+//	anotherGenericInformer := factory.ForResource(resource)
+//	factory.Start(ctx.Done())
 type SharedInformerFactory interface {
 	internalinterfaces.SharedInformerFactory
-	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// Start initializes all requested informers. They are handled in goroutines
+	// which run until the stop channel gets closed.
+	Start(stopCh <-chan struct{})
+
+	// Shutdown marks a factory as shutting down. At that point no new
+	// informers can be started anymore and Start will return without
+	// doing anything.
+	//
+	// In addition, Shutdown blocks until all goroutines have terminated. For that
+	// to happen, the close channel(s) that they were started with must be closed,
+	// either before Shutdown gets called or while it is waiting.
+	//
+	// Shutdown may be called multiple times, even concurrently. All such calls will
+	// block until all goroutines have terminated.
+	Shutdown()
+
+	// WaitForCacheSync blocks until all started informers' caches were synced
+	// or the stop channel gets closed.
 	WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
 
+	// ForResource gives generic access to a shared informer of the matching type.
+	ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+	// InformerFor returns the SharedIndexInformer for obj using an internal
+	// client.
+	InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
+
 	Kilo() kilo.Interface
 }
 

+ 1 - 1
pkg/k8s/informers/generic.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/informers/internalinterfaces/factory_interfaces.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/informers/kilo/interface.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/informers/kilo/v1alpha1/interface.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/informers/kilo/v1alpha1/peer.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/listers/kilo/v1alpha1/expansion_generated.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 1 - 1
pkg/k8s/listers/kilo/v1alpha1/peer.go

@@ -1,4 +1,4 @@
-// Copyright 2024 the Kilo authors
+// Copyright 2026 the Kilo authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.

+ 6 - 6
pkg/mesh/cni.go

@@ -52,13 +52,13 @@ func (m *Mesh) updateCNIConfig() {
 	n := m.nodes[m.hostname]
 	m.mu.Unlock()
 	if n == nil || n.Subnet == nil {
-		level.Debug(m.logger).Log("msg", "local node does not have a valid subnet assigned")
+		_ = level.Debug(m.logger).Log("msg", "local node does not have a valid subnet assigned")
 		return
 	}
 
 	cidr, err := getCIDRFromCNI(m.cniPath)
 	if err != nil {
-		level.Warn(m.logger).Log("msg", "failed to get CIDR from CNI file; overwriting it", "err", err.Error())
+		_ = level.Warn(m.logger).Log("msg", "failed to get CIDR from CNI file; overwriting it", "err", err.Error())
 	}
 
 	if ipNetsEqual(cidr, n.Subnet) {
@@ -66,14 +66,14 @@ func (m *Mesh) updateCNIConfig() {
 	}
 
 	if cidr == nil {
-		level.Info(m.logger).Log("msg", "CIDR in CNI file is empty")
+		_ = level.Info(m.logger).Log("msg", "CIDR in CNI file is empty")
 	} else {
-		level.Info(m.logger).Log("msg", "CIDR in CNI file is not empty; overwriting", "old", cidr.String(), "new", n.Subnet.String())
+		_ = level.Info(m.logger).Log("msg", "CIDR in CNI file is not empty; overwriting", "old", cidr.String(), "new", n.Subnet.String())
 	}
 
-	level.Info(m.logger).Log("msg", "setting CIDR in CNI file", "CIDR", n.Subnet.String())
+	_ = level.Info(m.logger).Log("msg", "setting CIDR in CNI file", "CIDR", n.Subnet.String())
 	if err := setCIDRInCNI(m.cniPath, n.Subnet); err != nil {
-		level.Warn(m.logger).Log("msg", "failed to set CIDR in CNI file", "err", err.Error())
+		_ = level.Warn(m.logger).Log("msg", "failed to set CIDR in CNI file", "err", err.Error())
 	}
 }
 

+ 45 - 45
pkg/mesh/mesh.go

@@ -100,7 +100,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 	privateB = bytes.Trim(privateB, "\n")
 	private, err := wgtypes.ParseKey(string(privateB))
 	if err != nil {
-		level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
+		_ = level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
 		if private, err = wgtypes.GeneratePrivateKey(); err != nil {
 			return nil, err
 		}
@@ -150,10 +150,10 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 				return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
 			}
 		}
-		level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
+		_ = level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
 	} else {
 		enc = encapsulation.Noop(enc.Strategy())
-		level.Debug(logger).Log("msg", "running without a private IP address")
+		_ = level.Debug(logger).Log("msg", "running without a private IP address")
 	}
 	var externalIP *net.IPNet
 	if prioritisePrivateAddr && privateIP != nil {
@@ -161,7 +161,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 	} else {
 		externalIP = publicIP
 	}
-	level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
+	_ = level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
 	ipTables, err := iptables.New(iptables.WithRegisterer(registerer), iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
 	if err != nil {
 		return nil, fmt.Errorf("failed to IP tables controller: %v", err)
@@ -235,7 +235,7 @@ func (m *Mesh) Run(ctx context.Context) error {
 			m.nodes[m.hostname] = n
 			m.updateCNIConfig()
 		} else {
-			level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
+			_ = level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
 		}
 	}
 	if err := m.Peers().Init(ctx); err != nil {
@@ -259,7 +259,7 @@ func (m *Mesh) Run(ctx context.Context) error {
 				return
 			}
 			if err != nil {
-				level.Error(m.logger).Log("error", err)
+				_ = level.Error(m.logger).Log("error", err)
 				m.errorCounter.WithLabelValues("run").Inc()
 			}
 		}
@@ -296,9 +296,9 @@ func (m *Mesh) Run(ctx context.Context) error {
 
 func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	logger := log.With(m.logger, "event", e.Type)
-	level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
+	_ = level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
 	if isSelf(m.hostname, e.Node) {
-		level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
+		_ = level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
 		m.handleLocal(ctx, e.Node)
 		return
 	}
@@ -307,7 +307,7 @@ func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	if !e.Node.Ready() {
 		// Trace non ready nodes with their presence in the mesh.
 		_, ok := m.nodes[e.Node.Name]
-		level.Debug(logger).Log("msg", "received non ready node", "node", e.Node, "in-mesh", ok)
+		_ = level.Debug(logger).Log("msg", "received non ready node", "node", e.Node, "in-mesh", ok)
 	}
 	switch e.Type {
 	case AddEvent:
@@ -325,14 +325,14 @@ func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	}
 	m.mu.Unlock()
 	if diff {
-		level.Info(logger).Log("node", e.Node)
+		_ = level.Info(logger).Log("node", e.Node)
 		m.applyTopology()
 	}
 }
 
 func (m *Mesh) syncPeers(e *PeerEvent) {
 	logger := log.With(m.logger, "event", e.Type)
-	level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
+	_ = level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
 	var diff bool
 	m.mu.Lock()
 	// Peers are indexed by public key.
@@ -340,7 +340,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
 	if !e.Peer.Ready() {
 		// Trace non ready peer with their presence in the mesh.
 		_, ok := m.peers[key]
-		level.Debug(logger).Log("msg", "received non ready peer", "peer", e.Peer, "in-mesh", ok)
+		_ = level.Debug(logger).Log("msg", "received non ready peer", "peer", e.Peer, "in-mesh", ok)
 	}
 	switch e.Type {
 	case AddEvent:
@@ -360,7 +360,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
 	}
 	m.mu.Unlock()
 	if diff {
-		level.Info(logger).Log("peer", e.Peer)
+		_ = level.Info(logger).Log("peer", e.Peer)
 		m.applyTopology()
 	}
 }
@@ -372,26 +372,26 @@ func (m *Mesh) checkIn(ctx context.Context) {
 	defer m.mu.Unlock()
 	n := m.nodes[m.hostname]
 	if n == nil {
-		level.Debug(m.logger).Log("msg", "no local node found in backend")
+		_ = level.Debug(m.logger).Log("msg", "no local node found in backend")
 		return
 	}
 	oldTime := n.LastSeen
 	n.LastSeen = time.Now().Unix()
 	if err := m.Nodes().Set(ctx, m.hostname, n); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
 		m.errorCounter.WithLabelValues("checkin").Inc()
 		// Revert time.
 		n.LastSeen = oldTime
 		return
 	}
-	level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
+	_ = level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
 }
 
 func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
 	// Allow the IPs to be overridden.
 	if !n.Endpoint.Ready() {
 		e := wireguard.NewEndpoint(m.externalIP.IP, m.port)
-		level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
+		_ = level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
 		n.Endpoint = e
 	}
 	if n.InternalIP == nil && !n.NoInternalIP {
@@ -417,13 +417,13 @@ func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
 		Granularity:         m.granularity,
 	}
 	if !nodesAreEqual(n, local) {
-		level.Debug(m.logger).Log("msg", "local node differs from backend")
+		_ = level.Debug(m.logger).Log("msg", "local node differs from backend")
 		if err := m.Nodes().Set(ctx, m.hostname, local); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
 			m.errorCounter.WithLabelValues("local").Inc()
 			return
 		}
-		level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
+		_ = level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
 	}
 	m.mu.Lock()
 
@@ -446,7 +446,7 @@ func (m *Mesh) applyTopology() {
 	defer m.mu.Unlock()
 	// If we can't resolve an endpoint, then fail and retry later.
 	if err := m.resolveEndpoints(); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -482,23 +482,23 @@ func (m *Mesh) applyTopology() {
 	// Find the Kilo interface name.
 	link, err := linkByIndex(m.kiloIface)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
 
 	wgClient, err := wgctrl.New()
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
-	defer wgClient.Close()
+	defer func() { _ = wgClient.Close() }()
 
 	// wgDevice is the current configuration of the wg interface.
 	wgDevice, err := wgClient.Device(m.kiloIfaceName)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -507,7 +507,7 @@ func (m *Mesh) applyTopology() {
 	nodes[m.hostname].DiscoveredEndpoints = natEndpoints
 	t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port(), m.priv, m.subnet, m.serviceCIDRs, nodes[m.hostname].PersistentKeepalive, m.logger)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -541,20 +541,20 @@ func (m *Mesh) applyTopology() {
 		// If we are handling local routes, ensure the local
 		// tunnel has an IP address.
 		if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
 	}
 	if err := m.ipTables.Set(ipRules); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
 	if t.leader {
 		m.leaderGuage.Set(1)
 		if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
@@ -563,24 +563,24 @@ func (m *Mesh) applyTopology() {
 		conf := t.Conf()
 		equal, diff := conf.Equal(wgDevice)
 		if !equal {
-			level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff)
-			level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig())
+			_ = level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff)
+			_ = level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig())
 			if err := wgClient.ConfigureDevice(m.kiloIfaceName, conf.WGConfig()); err != nil {
-				level.Error(m.logger).Log("error", err)
+				_ = level.Error(m.logger).Log("error", err)
 				m.errorCounter.WithLabelValues("apply").Inc()
 				return
 			}
 		}
 		if err := iproute.Set(m.kiloIface, true); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
 	} else {
 		m.leaderGuage.Set(0)
-		level.Debug(m.logger).Log("msg", "local node is not the leader")
+		_ = level.Debug(m.logger).Log("msg", "local node is not the leader")
 		if err := iproute.Set(m.kiloIface, false); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
@@ -589,23 +589,23 @@ func (m *Mesh) applyTopology() {
 	// on the WireGuard interface.
 	routes, rules := t.Routes(link.Attrs().Name, m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
 	if err := m.table.Set(routes, rules); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 	}
 }
 
 func (m *Mesh) cleanUp() {
 	if err := m.ipTables.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 	if err := m.table.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 	if m.cleanUpIface {
 		if err := iproute.RemoveInterface(m.kiloIface); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
@@ -613,7 +613,7 @@ func (m *Mesh) cleanUp() {
 		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 		defer cancel()
 		if err := m.Nodes().CleanUp(ctx, m.hostname); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
@@ -621,12 +621,12 @@ func (m *Mesh) cleanUp() {
 		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 		defer cancel()
 		if err := m.Peers().CleanUp(ctx, m.hostname); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
 	if err := m.enc.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 }
@@ -694,7 +694,7 @@ func nodesAreEqual(a, b *Node) bool {
 }
 
 func peersAreEqual(a, b *Peer) bool {
-	if !(a != nil) == (b != nil) {
+	if (a != nil) != (b != nil) {
 		return false
 	}
 	if a == b {
@@ -813,7 +813,7 @@ func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *
 	}
 	for _, n := range nodes {
 		if peer, ok := keys[n.Key.String()]; ok && n.PersistentKeepalive != time.Duration(0) {
-			level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime)
+			_ = level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime)
 			// Don't update the endpoint, if there was never any handshake.
 			if !peer.LastHandshakeTime.Equal(time.Time{}) {
 				natEndpoints[n.Key.String()] = peer.Endpoint
@@ -827,6 +827,6 @@ func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *
 			}
 		}
 	}
-	level.Debug(logger).Log("msg", "Discovered WireGuard NAT Endpoints", "DiscoveredEndpoints", natEndpoints)
+	_ = level.Debug(logger).Log("msg", "Discovered WireGuard NAT Endpoints", "DiscoveredEndpoints", natEndpoints)
 	return natEndpoints
 }

+ 6 - 6
pkg/mesh/topology.go

@@ -193,7 +193,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
 			privateIPs:          privateIPs,
 			allowedLocationIPs:  allowedLocationIPs,
 		})
-		level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
+		_ = level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
 
 	}
 	// Sort the Topology segments so the result is stable.
@@ -241,7 +241,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
 		segment.allowedLocationIPs = t.filterAllowedLocationIPs(segment.allowedLocationIPs, segment.location)
 	}
 
-	level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
+	_ = level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
 	return &t, nil
 }
 
@@ -257,7 +257,7 @@ CheckIPs:
 			if location != s.location {
 				for _, i := range s.allowedLocationIPs {
 					if intersect(ip, i) {
-						level.Warn(t.logger).Log("msg", "overlapping allowed location IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+						_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 						continue CheckIPs
 					}
 				}
@@ -265,14 +265,14 @@ CheckIPs:
 			// Check if allowed location IPs intersect with the allowed IPs.
 			for _, i := range s.allowedIPs {
 				if intersect(ip, i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with allowed IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with allowed IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 					continue CheckIPs
 				}
 			}
 			// Check if allowed location IPs intersect with the private IPs of the segment.
 			for _, i := range s.privateIPs {
 				if ip.Contains(i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with privateIP", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with privateIP", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 					continue CheckIPs
 				}
 			}
@@ -281,7 +281,7 @@ CheckIPs:
 		for _, p := range t.peers {
 			for _, i := range p.AllowedIPs {
 				if intersect(ip, i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with peer IPnet", "IP", ip.String(), "IP2", i.String(), "peer", p.Name)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with peer IPnet", "IP", ip.String(), "IP2", i.String(), "peer", p.Name)
 					continue CheckIPs
 				}
 			}

+ 2 - 2
pkg/route/route.go

@@ -109,7 +109,7 @@ func (t *Table) Run(stop <-chan struct{}) (<-chan error, error) {
 			// Watch for deleted routes to reconcile this table's routes.
 			case unix.RTM_DELROUTE:
 				// Filter out invalid routes.
-				if e.Route.Dst == nil {
+				if e.Dst == nil {
 					continue
 				}
 				t.mu.Lock()
@@ -118,7 +118,7 @@ func (t *Table) Run(stop <-chan struct{}) (<-chan error, error) {
 					case *netlink.Route:
 						// If any deleted route's destination matches a destination
 						// in the table, reset the corresponding route just in case.
-						if r.Dst.IP.Equal(e.Route.Dst.IP) && r.Dst.Mask.String() == e.Route.Dst.Mask.String() {
+						if r.Dst.IP.Equal(e.Dst.IP) && r.Dst.Mask.String() == e.Dst.Mask.String() {
 							if err := t.addRoute(r); err != nil {
 								nonBlockingSend(t.errors, fmt.Errorf("failed add route: %v", err))
 							}

+ 0 - 8
vendor/github.com/cespare/xxhash/v2/.travis.yml

@@ -1,8 +0,0 @@
-language: go
-go:
-  - "1.x"
-  - master
-env:
-  - TAGS=""
-  - TAGS="-tags purego"
-script: go test $TAGS -v ./...

+ 21 - 16
vendor/github.com/cespare/xxhash/v2/README.md

@@ -1,10 +1,9 @@
 # xxhash
 
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
 
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
 high-quality hashing algorithm that is much faster than anything in the Go
 standard library.
 
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
 func (*Digest) Sum64() uint64
 ```
 
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
 
 ## Compatibility
 
@@ -45,23 +47,26 @@ I recommend using the latest release of Go.
 Here are some quick benchmarks comparing the pure-Go and assembly
 implementations of Sum64.
 
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
-| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego    | asm       |
+| ---------- | --------- | --------- |
+| 4 B        |  1.3 GB/s |  1.2 GB/s |
+| 16 B       |  2.9 GB/s |  3.5 GB/s |
+| 100 B      |  6.9 GB/s |  8.1 GB/s |
+| 4 KB       | 11.7 GB/s | 16.7 GB/s |
+| 10 MB      | 12.0 GB/s | 17.3 GB/s |
 
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
 
 ```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
 ```
 
 ## Projects using this package
 
 - [InfluxDB](https://github.com/influxdata/influxdb)
 - [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
 - [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)

+ 10 - 0
vendor/github.com/cespare/xxhash/v2/testall.sh

@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego

+ 20 - 28
vendor/github.com/cespare/xxhash/v2/xxhash.go

@@ -16,19 +16,11 @@ const (
 	prime5 uint64 = 2870177450012600261
 )
 
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
-	prime1v = prime1
-	prime2v = prime2
-	prime3v = prime3
-	prime4v = prime4
-	prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
 
 // Digest implements hash.Hash64.
 type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
 
 // Reset clears the Digest's state so that it can be reused.
 func (d *Digest) Reset() {
-	d.v1 = prime1v + prime2
+	d.v1 = primes[0] + prime2
 	d.v2 = prime2
 	d.v3 = 0
-	d.v4 = -prime1v
+	d.v4 = -primes[0]
 	d.total = 0
 	d.n = 0
 }
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
 	n = len(b)
 	d.total += uint64(n)
 
+	memleft := d.mem[d.n&(len(d.mem)-1):]
+
 	if d.n+n < 32 {
 		// This new data doesn't even fill the current block.
-		copy(d.mem[d.n:], b)
+		copy(memleft, b)
 		d.n += n
 		return
 	}
 
 	if d.n > 0 {
 		// Finish off the partial block.
-		copy(d.mem[d.n:], b)
+		c := copy(memleft, b)
 		d.v1 = round(d.v1, u64(d.mem[0:8]))
 		d.v2 = round(d.v2, u64(d.mem[8:16]))
 		d.v3 = round(d.v3, u64(d.mem[16:24]))
 		d.v4 = round(d.v4, u64(d.mem[24:32]))
-		b = b[32-d.n:]
+		b = b[c:]
 		d.n = 0
 	}
 
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
 
 	h += d.total
 
-	i, end := 0, d.n
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(d.mem[i:i+8]))
+	b := d.mem[:d.n&(len(d.mem)-1)]
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for i < end {
-		h ^= uint64(d.mem[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
-		i++
 	}
 
 	h ^= h >> 33
@@ -193,7 +186,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
 	b, d.v4 = consumeUint64(b)
 	b, d.total = consumeUint64(b)
 	copy(d.mem[:], b)
-	b = b[len(d.mem):]
 	d.n = int(d.total % uint64(len(d.mem)))
 	return nil
 }

+ 165 - 171
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s

@@ -1,215 +1,209 @@
+//go:build !appengine && gc && !purego
 // +build !appengine
 // +build gc
 // +build !purego
 
 #include "textflag.h"
 
-// Register allocation:
-// AX	h
-// CX	pointer to advance through b
-// DX	n
-// BX	loop end
-// R8	v1, k1
-// R9	v2
-// R10	v3
-// R11	v4
-// R12	tmp
-// R13	prime1v
-// R14	prime2v
-// R15	prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
-	MOVQ  (CX), R12 \
-	ADDQ  $8, CX    \
-	IMULQ R14, R12  \
-	ADDQ  R12, r    \
-	ROLQ  $31, r    \
-	IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
-	IMULQ R14, val \
-	ROLQ  $31, val \
-	IMULQ R13, val \
-	XORQ  val, acc \
-	IMULQ R13, acc \
-	ADDQ  R15, acc
+// Registers:
+#define h      AX
+#define d      AX
+#define p      SI // pointer to advance through b
+#define n      DX
+#define end    BX // loop end
+#define v1     R8
+#define v2     R9
+#define v3     R10
+#define v4     R11
+#define x      R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+	IMULQ prime2, x   \
+	ADDQ  x, acc      \
+	ROLQ  $31, acc    \
+	IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	IMULQ prime2, x \
+	ROLQ  $31, x    \
+	IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+	round0(x)         \
+	XORQ  x, acc      \
+	IMULQ prime1, acc \
+	ADDQ  prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop:  \
+	MOVQ +0(p), x  \
+	round(v1, x)   \
+	MOVQ +8(p), x  \
+	round(v2, x)   \
+	MOVQ +16(p), x \
+	round(v3, x)   \
+	MOVQ +24(p), x \
+	round(v4, x)   \
+	ADDQ $32, p    \
+	CMPQ p, end    \
+	JLE  loop
 
 // func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
 	// Load fixed primes.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
-	MOVQ ·prime4v(SB), R15
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
+	MOVQ ·primes+24(SB), prime4
 
 	// Load slice.
-	MOVQ b_base+0(FP), CX
-	MOVQ b_len+8(FP), DX
-	LEAQ (CX)(DX*1), BX
+	MOVQ b_base+0(FP), p
+	MOVQ b_len+8(FP), n
+	LEAQ (p)(n*1), end
 
 	// The first loop limit will be len(b)-32.
-	SUBQ $32, BX
+	SUBQ $32, end
 
 	// Check whether we have at least one block.
-	CMPQ DX, $32
+	CMPQ n, $32
 	JLT  noBlocks
 
 	// Set up initial state (v1, v2, v3, v4).
-	MOVQ R13, R8
-	ADDQ R14, R8
-	MOVQ R14, R9
-	XORQ R10, R10
-	XORQ R11, R11
-	SUBQ R13, R11
-
-	// Loop until CX > BX.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ CX, BX
-	JLE  blockLoop
-
-	MOVQ R8, AX
-	ROLQ $1, AX
-	MOVQ R9, R12
-	ROLQ $7, R12
-	ADDQ R12, AX
-	MOVQ R10, R12
-	ROLQ $12, R12
-	ADDQ R12, AX
-	MOVQ R11, R12
-	ROLQ $18, R12
-	ADDQ R12, AX
-
-	mergeRound(AX, R8)
-	mergeRound(AX, R9)
-	mergeRound(AX, R10)
-	mergeRound(AX, R11)
+	MOVQ prime1, v1
+	ADDQ prime2, v1
+	MOVQ prime2, v2
+	XORQ v3, v3
+	XORQ v4, v4
+	SUBQ prime1, v4
+
+	blockLoop()
+
+	MOVQ v1, h
+	ROLQ $1, h
+	MOVQ v2, x
+	ROLQ $7, x
+	ADDQ x, h
+	MOVQ v3, x
+	ROLQ $12, x
+	ADDQ x, h
+	MOVQ v4, x
+	ROLQ $18, x
+	ADDQ x, h
+
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
 
 	JMP afterBlocks
 
 noBlocks:
-	MOVQ ·prime5v(SB), AX
+	MOVQ ·primes+32(SB), h
 
 afterBlocks:
-	ADDQ DX, AX
-
-	// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
-	ADDQ $24, BX
-
-	CMPQ CX, BX
-	JG   fourByte
-
-wordLoop:
-	// Calculate k1.
-	MOVQ  (CX), R8
-	ADDQ  $8, CX
-	IMULQ R14, R8
-	ROLQ  $31, R8
-	IMULQ R13, R8
-
-	XORQ  R8, AX
-	ROLQ  $27, AX
-	IMULQ R13, AX
-	ADDQ  R15, AX
-
-	CMPQ CX, BX
-	JLE  wordLoop
-
-fourByte:
-	ADDQ $4, BX
-	CMPQ CX, BX
-	JG   singles
-
-	MOVL  (CX), R8
-	ADDQ  $4, CX
-	IMULQ R13, R8
-	XORQ  R8, AX
-
-	ROLQ  $23, AX
-	IMULQ R14, AX
-	ADDQ  ·prime3v(SB), AX
-
-singles:
-	ADDQ $4, BX
-	CMPQ CX, BX
+	ADDQ n, h
+
+	ADDQ $24, end
+	CMPQ p, end
+	JG   try4
+
+loop8:
+	MOVQ  (p), x
+	ADDQ  $8, p
+	round0(x)
+	XORQ  x, h
+	ROLQ  $27, h
+	IMULQ prime1, h
+	ADDQ  prime4, h
+
+	CMPQ p, end
+	JLE  loop8
+
+try4:
+	ADDQ $4, end
+	CMPQ p, end
+	JG   try1
+
+	MOVL  (p), x
+	ADDQ  $4, p
+	IMULQ prime1, x
+	XORQ  x, h
+
+	ROLQ  $23, h
+	IMULQ prime2, h
+	ADDQ  ·primes+16(SB), h
+
+try1:
+	ADDQ $4, end
+	CMPQ p, end
 	JGE  finalize
 
-singlesLoop:
-	MOVBQZX (CX), R12
-	ADDQ    $1, CX
-	IMULQ   ·prime5v(SB), R12
-	XORQ    R12, AX
+loop1:
+	MOVBQZX (p), x
+	ADDQ    $1, p
+	IMULQ   ·primes+32(SB), x
+	XORQ    x, h
+	ROLQ    $11, h
+	IMULQ   prime1, h
 
-	ROLQ  $11, AX
-	IMULQ R13, AX
-
-	CMPQ CX, BX
-	JL   singlesLoop
+	CMPQ p, end
+	JL   loop1
 
 finalize:
-	MOVQ  AX, R12
-	SHRQ  $33, R12
-	XORQ  R12, AX
-	IMULQ R14, AX
-	MOVQ  AX, R12
-	SHRQ  $29, R12
-	XORQ  R12, AX
-	IMULQ ·prime3v(SB), AX
-	MOVQ  AX, R12
-	SHRQ  $32, R12
-	XORQ  R12, AX
-
-	MOVQ AX, ret+24(FP)
+	MOVQ  h, x
+	SHRQ  $33, x
+	XORQ  x, h
+	IMULQ prime2, h
+	MOVQ  h, x
+	SHRQ  $29, x
+	XORQ  x, h
+	IMULQ ·primes+16(SB), h
+	MOVQ  h, x
+	SHRQ  $32, x
+	XORQ  x, h
+
+	MOVQ h, ret+24(FP)
 	RET
 
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
 // func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
 	// Load fixed primes needed for round.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
 
 	// Load slice.
-	MOVQ b_base+8(FP), CX
-	MOVQ b_len+16(FP), DX
-	LEAQ (CX)(DX*1), BX
-	SUBQ $32, BX
+	MOVQ b_base+8(FP), p
+	MOVQ b_len+16(FP), n
+	LEAQ (p)(n*1), end
+	SUBQ $32, end
 
 	// Load vN from d.
-	MOVQ d+0(FP), AX
-	MOVQ 0(AX), R8   // v1
-	MOVQ 8(AX), R9   // v2
-	MOVQ 16(AX), R10 // v3
-	MOVQ 24(AX), R11 // v4
+	MOVQ s+0(FP), d
+	MOVQ 0(d), v1
+	MOVQ 8(d), v2
+	MOVQ 16(d), v3
+	MOVQ 24(d), v4
 
 	// We don't need to check the loop condition here; this function is
 	// always called with at least one block of data to process.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ CX, BX
-	JLE  blockLoop
+	blockLoop()
 
 	// Copy vN back to d.
-	MOVQ R8, 0(AX)
-	MOVQ R9, 8(AX)
-	MOVQ R10, 16(AX)
-	MOVQ R11, 24(AX)
-
-	// The number of bytes written is CX minus the old base pointer.
-	SUBQ b_base+8(FP), CX
-	MOVQ CX, ret+32(FP)
+	MOVQ v1, 0(d)
+	MOVQ v2, 8(d)
+	MOVQ v3, 16(d)
+	MOVQ v4, 24(d)
+
+	// The number of bytes written is p minus the old base pointer.
+	SUBQ b_base+8(FP), p
+	MOVQ p, ret+32(FP)
 
 	RET

+ 183 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s

@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest	R1
+#define h	R2 // return value
+#define p	R3 // input pointer
+#define n	R4 // input length
+#define nblocks	R5 // n / 32
+#define prime1	R7
+#define prime2	R8
+#define prime3	R9
+#define prime4	R10
+#define prime5	R11
+#define v1	R12
+#define v2	R13
+#define v3	R14
+#define v4	R15
+#define x1	R20
+#define x2	R21
+#define x3	R22
+#define x4	R23
+
+#define round(acc, x) \
+	MADD prime2, acc, x, acc \
+	ROR  $64-31, acc         \
+	MUL  prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	MUL prime2, x \
+	ROR $64-31, x \
+	MUL prime1, x
+
+#define mergeRound(acc, x) \
+	round0(x)                     \
+	EOR  x, acc                   \
+	MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+	LSR     $5, n, nblocks  \
+	PCALIGN $16             \
+	loop:                   \
+	LDP.P   16(p), (x1, x2) \
+	LDP.P   16(p), (x3, x4) \
+	round(v1, x1)           \
+	round(v2, x2)           \
+	round(v3, x3)           \
+	round(v4, x4)           \
+	SUB     $1, nblocks     \
+	CBNZ    nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+	LDP b_base+0(FP), (p, n)
+
+	LDP  ·primes+0(SB), (prime1, prime2)
+	LDP  ·primes+16(SB), (prime3, prime4)
+	MOVD ·primes+32(SB), prime5
+
+	CMP  $32, n
+	CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+	BLT  afterLoop
+
+	ADD  prime1, prime2, v1
+	MOVD prime2, v2
+	MOVD $0, v3
+	NEG  prime1, v4
+
+	blockLoop()
+
+	ROR $64-1, v1, x1
+	ROR $64-7, v2, x2
+	ADD x1, x2
+	ROR $64-12, v3, x3
+	ROR $64-18, v4, x4
+	ADD x3, x4
+	ADD x2, x4, h
+
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
+
+afterLoop:
+	ADD n, h
+
+	TBZ   $4, n, try8
+	LDP.P 16(p), (x1, x2)
+
+	round0(x1)
+
+	// NOTE: here and below, sequencing the EOR after the ROR (using a
+	// rotated register) is worth a small but measurable speedup for small
+	// inputs.
+	ROR  $64-27, h
+	EOR  x1 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+	round0(x2)
+	ROR  $64-27, h
+	EOR  x2 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+try8:
+	TBZ    $3, n, try4
+	MOVD.P 8(p), x1
+
+	round0(x1)
+	ROR  $64-27, h
+	EOR  x1 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+try4:
+	TBZ     $2, n, try2
+	MOVWU.P 4(p), x2
+
+	MUL  prime1, x2
+	ROR  $64-23, h
+	EOR  x2 @> 64-23, h, h
+	MADD h, prime3, prime2, h
+
+try2:
+	TBZ     $1, n, try1
+	MOVHU.P 2(p), x3
+	AND     $255, x3, x1
+	LSR     $8, x3, x2
+
+	MUL prime5, x1
+	ROR $64-11, h
+	EOR x1 @> 64-11, h, h
+	MUL prime1, h
+
+	MUL prime5, x2
+	ROR $64-11, h
+	EOR x2 @> 64-11, h, h
+	MUL prime1, h
+
+try1:
+	TBZ   $0, n, finalize
+	MOVBU (p), x4
+
+	MUL prime5, x4
+	ROR $64-11, h
+	EOR x4 @> 64-11, h, h
+	MUL prime1, h
+
+finalize:
+	EOR h >> 33, h
+	MUL prime2, h
+	EOR h >> 29, h
+	MUL prime3, h
+	EOR h >> 32, h
+
+	MOVD h, ret+24(FP)
+	RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+	LDP ·primes+0(SB), (prime1, prime2)
+
+	// Load state. Assume v[1-4] are stored contiguously.
+	MOVD d+0(FP), digest
+	LDP  0(digest), (v1, v2)
+	LDP  16(digest), (v3, v4)
+
+	LDP b_base+8(FP), (p, n)
+
+	blockLoop()
+
+	// Store updated state.
+	STP (v1, v2), 0(digest)
+	STP (v3, v4), 16(digest)
+
+	BIC  $31, n
+	MOVD n, ret+32(FP)
+	RET

+ 2 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go → vendor/github.com/cespare/xxhash/v2/xxhash_asm.go

@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
 // +build !appengine
 // +build gc
 // +build !purego

+ 11 - 11
vendor/github.com/cespare/xxhash/v2/xxhash_other.go

@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
 
 package xxhash
 
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
 	var h uint64
 
 	if n >= 32 {
-		v1 := prime1v + prime2
+		v1 := primes[0] + prime2
 		v2 := prime2
 		v3 := uint64(0)
-		v4 := -prime1v
+		v4 := -primes[0]
 		for len(b) >= 32 {
 			v1 = round(v1, u64(b[0:8:len(b)]))
 			v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
 
 	h += uint64(n)
 
-	i, end := 0, len(b)
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(b[i:i+8:len(b)]))
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for ; i < end; i++ {
-		h ^= uint64(b[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
 	}
 

+ 1 - 0
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go

@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 // This file contains the safe implementations of otherwise unsafe-using code.

+ 33 - 21
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go

@@ -1,3 +1,4 @@
+//go:build !appengine
 // +build !appengine
 
 // This file encapsulates usage of unsafe.
@@ -6,41 +7,52 @@
 package xxhash
 
 import (
-	"reflect"
 	"unsafe"
 )
 
-// Notes:
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
 //
-// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
-// for some discussion about these unsafe conversions.
+//   var b []byte
+//   bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+//   bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+//   bh.Len = len(s)
+//   bh.Cap = len(s)
 //
-// In the future it's possible that compiler optimizations will make these
-// unsafe operations unnecessary: https://golang.org/issue/2205.
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
 //
-// Both of these wrapper functions still incur function call overhead since they
-// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
-// for strings to squeeze out a bit more speed. Mid-stack inlining should
-// eventually fix this.
+// See https://github.com/golang/go/issues/42739 for discussion.
 
 // Sum64String computes the 64-bit xxHash digest of s.
 // It may be faster than Sum64([]byte(s)) by avoiding a copy.
 func Sum64String(s string) uint64 {
-	var b []byte
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-	bh.Len = len(s)
-	bh.Cap = len(s)
+	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
 	return Sum64(b)
 }
 
 // WriteString adds more data to d. It always returns len(s), nil.
 // It may be faster than Write([]byte(s)) by avoiding a copy.
 func (d *Digest) WriteString(s string) (n int, err error) {
-	var b []byte
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
-	bh.Len = len(s)
-	bh.Cap = len(s)
-	return d.Write(b)
+	d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+	// d.Write always returns len(s), nil.
+	// Ignoring the return output and returning these fixed values buys a
+	// savings of 6 in the inliner's cost model.
+	return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+	s   string
+	cap int
 }

+ 71 - 0
vendor/github.com/emicklei/go-restful/v3/.gitignore

@@ -0,0 +1,71 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+restful.html
+
+*.out
+
+tmp.prof
+
+go-restful.test
+
+examples/restful-basic-authentication
+
+examples/restful-encoding-filter
+
+examples/restful-filters
+
+examples/restful-hello-world
+
+examples/restful-resource-functions
+
+examples/restful-serve-static
+
+examples/restful-user-service
+
+*.DS_Store
+examples/restful-user-resource
+
+examples/restful-multi-containers
+
+examples/restful-form-handling
+
+examples/restful-CORS-filter
+
+examples/restful-options-filter
+
+examples/restful-curly-router
+
+examples/restful-cpuprofiler-service
+
+examples/restful-pre-post-filters
+
+curly.prof
+
+examples/restful-NCSA-logging
+
+examples/restful-html-template
+
+s.html
+restful-path-tail
+.idea

+ 1 - 0
vendor/github.com/emicklei/go-restful/v3/.goconvey

@@ -0,0 +1 @@
+ignore

+ 13 - 0
vendor/github.com/emicklei/go-restful/v3/.travis.yml

@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - 1.x
+
+before_install:
+  - go test -v
+
+script:
+  - go test -race -coverprofile=coverage.txt -covermode=atomic
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)

+ 396 - 0
vendor/github.com/emicklei/go-restful/v3/CHANGES.md

@@ -0,0 +1,396 @@
+# Change history of go-restful
+
+## [v3.11.0] - 2023-08-19
+
+- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. 
+
+## [v3.10.2] - 2023-03-09 - DO NOT USE
+
+- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0
+  see comment in Readme how to customize this behaviour.
+
+## [v3.10.1] - 2022-11-19 - DO NOT USE
+
+- fix broken 3.10.0 by using path package for joining paths
+
+## [v3.10.0] - 2022-10-11 - BROKEN
+
+- changed tokenizer to match std route match behavior; do not trimright the path (#511)
+- Add MIME_ZIP (#512)
+- Add MIME_ZIP and HEADER_ContentDisposition (#513)
+- Changed how to get query parameter issue #510
+
+## [v3.9.0] - 2022-07-21
+
+- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
+
+## [v3.8.0] - 2022-06-06
+
+- use exact matching of allowed domain entries, issue #489 (#493)
+	- this changes fixes [security] Authorization Bypass Through User-Controlled Key
+	  by changing the behaviour of the AllowedDomains setting in the CORS filter.
+	  To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc
+	  callback mechanism which is called when a simple domain match fails. 
+- add test and fix for POST without body and Content-type, issue #492 (#496)
+- [Minor] Bad practice to have a mix of Receiver types. (#491)
+
+## [v3.7.2] - 2021-11-24
+
+- restored FilterChain (#482 by SVilgelm)
+
+
+## [v3.7.1] - 2021-10-04
+
+- fix problem with contentEncodingEnabled setting (#479)
+
+## [v3.7.0] - 2021-09-24
+
+- feat(parameter): adds additional openapi mappings (#478)
+
+## [v3.6.0] - 2021-09-18
+
+- add support for vendor extensions (#477 thx erraggy)
+
+## [v3.5.2] - 2021-07-14
+
+- fix removing absent route from webservice (#472)
+
+## [v3.5.1] - 2021-04-12
+
+- fix handling no match access selected path
+- remove obsolete field
+
+## [v3.5.0] - 2021-04-10
+
+- add check for wildcard (#463) in CORS
+- add access to Route from Request, issue #459 (#462)
+
+## [v3.4.0] - 2020-11-10
+
+- Added OPTIONS to WebService
+
+## [v3.3.2] - 2020-01-23
+
+- Fixed duplicate compression in dispatch. #449
+
+
+## [v3.3.1] - 2020-08-31
+
+- Added check on writer to prevent compression of response twice. #447
+
+## [v3.3.0] - 2020-08-19
+
+- Enable content encoding on Handle and ServeHTTP (#446)
+- List available representations in 406 body (#437)
+- Convert to string using rune() (#443)
+
+## [v3.2.0] - 2020-06-21
+
+- 405 Method Not Allowed must have Allow header (#436) (thx Bracken <abdawson@gmail.com>)
+- add field allowedMethodsWithoutContentType (#424)
+
+## [v3.1.0]
+
+- support describing response headers (#426)
+- fix openapi examples (#425)
+
+v3.0.0
+
+- fix: use request/response resulting from filter chain
+- add Go module
+  Module consumer should use github.com/emicklei/go-restful/v3 as import path
+
+v2.10.0
+
+- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>)
+- fixed static example (thanks Arthur <yang_yapo@126.com>)
+- simplify code (thanks Christian Muehlhaeuser <muesli@gmail.com>)
+- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben <amim.knabben@gmail.com>)
+
+v2.9.6
+
+- small optimization in filter code
+
+v2.11.1
+
+- fix WriteError return value (#415)
+
+v2.11.0 
+
+- allow prefix and suffix in path variable expression (#414)
+
+v2.9.6
+
+- support google custome verb (#413)
+
+v2.9.5
+
+- fix panic in Response.WriteError if err == nil
+
+v2.9.4
+
+- fix issue #400 , parsing mime type quality
+- Route Builder added option for contentEncodingEnabled (#398)
+
+v2.9.3
+
+- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
+
+v2.9.2
+
+- Reduce allocations in per-request methods to improve performance (#395)
+
+v2.9.1
+
+- Fix issue with default responses and invalid status code 0. (#393)
+
+v2.9.0
+
+- add per Route content encoding setting (overrides container setting)
+
+v2.8.0
+
+- add Request.QueryParameters()
+- add json-iterator (via build tag)
+- disable vgo module (until log is moved)
+
+v2.7.1
+
+- add vgo module
+
+v2.6.1
+
+- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
+
+v2.6.0
+
+- Make JSR 311 routing and path param processing consistent
+- Adding description to RouteBuilder.Reads()
+- Update example for Swagger12 and OpenAPI
+
+2017-09-13
+
+- added route condition functions using `.If(func)` in route building.
+
+2017-02-16
+
+- solved issue #304, make operation names unique
+
+2017-01-30
+ 
+	[IMPORTANT] For swagger users, change your import statement to:	
+	swagger "github.com/emicklei/go-restful-swagger12"
+
+- moved swagger 1.2 code to go-restful-swagger12
+- created TAG 2.0.0
+
+2017-01-27
+
+- remove defer request body close
+- expose Dispatch for testing filters and Routefunctions
+- swagger response model cannot be array 
+- created TAG 1.0.0
+
+2016-12-22
+
+- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
+
+2016-11-26
+
+- Default change! now use CurlyRouter (was RouterJSR311)
+- Default change! no more caching of request content
+- Default change! do not recover from panics
+
+2016-09-22
+
+- fix the DefaultRequestContentType feature
+
+2016-02-14
+
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json 
+
+2015-09-27
+
+- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
+
+2015-09-25
+
+- fixed problem with changing Header after WriteHeader (issue 235)
+
+2015-09-14
+
+- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
+- added support for custom EntityReaderWriters.
+
+2015-08-06
+
+- add support for reading entities from compressed request content
+- use sync.Pool for compressors of http response and request body
+- add Description to Parameter for documentation in Swagger UI
+
+2015-03-20
+
+- add configurable logging
+
+2015-03-18
+
+- if not specified, the Operation is derived from the Route function
+
+2015-03-17
+
+- expose Parameter creation functions
+- make trace logger an interface
+- fix OPTIONSFilter
+- customize rendering of ServiceError
+- JSR311 router now handles wildcards
+- add Notes to Route
+
+2014-11-27
+
+- (api add) PrettyPrint per response. (as proposed in #167)
+
+2014-11-12
+
+- (api add) ApiVersion(.) for documentation in Swagger UI
+
+2014-11-10
+
+- (api change) struct fields tagged with "description" show up in Swagger UI
+
+2014-10-31
+
+- (api change) ReturnsError -> Returns
+- (api add)    RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
+- fix swagger nested structs
+- sort Swagger response messages by code
+
+2014-10-23
+
+- (api add) ReturnsError allows you to document Http codes in swagger
+- fixed problem with greedy CurlyRouter
+- (api add) Access-Control-Max-Age in CORS
+- add tracing functionality (injectable) for debugging purposes
+- support JSON parse 64bit int 
+- fix empty parameters for swagger
+- WebServicesUrl is now optional for swagger
+- fixed duplicate AccessControlAllowOrigin in CORS
+- (api change) expose ServeMux in container
+- (api add) added AllowedDomains in CORS
+- (api add) ParameterNamed for detailed documentation
+
+2014-04-16
+
+- (api add) expose constructor of Request for testing.
+
+2014-06-27
+
+- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
+- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
+
+2014-07-03
+
+- (api add) CORS can be configured with a list of allowed domains
+
+2014-03-12
+
+- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
+
+2014-02-26
+
+- (api add) Request now provides information about the matched Route, see method SelectedRoutePath 
+
+2014-02-17
+
+- (api change) renamed parameter constants (go-lint checks)
+
+2014-01-10
+
+- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+2014-01-07
+
+- (api change) Write* methods in Response now return the error or nil.
+- added example of serving HTML from a Go template.
+- fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+2013-11-13
+
+- (api add) Response knows how many bytes are written to the response body.
+
+2013-10-29
+
+- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+2013-10-04
+
+- (api add) Response knows what HTTP status has been written
+- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+2013-09-12
+
+- (api change) Router interface simplified
+- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+2013-08-05
+ - add OPTIONS support
+ - add CORS support
+
+2013-08-27
+
+- fixed some reported issues (see github)
+- (api change) deprecated use of WriteError; use WriteErrorString instead
+
+2014-04-15
+
+- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+2013-08-08
+
+- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+- (api add) the swagger package has be extended to have a UI per container.
+- if panic is detected then a small stack trace is printed (thanks to runner-mei)
+- (api add) WriteErrorString to Response
+
+Important API changes:
+
+- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+ 
+ 
+2013-07-06
+
+- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+
+2013-06-19
+
+- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+
+2013-06-03
+
+- (api change) removed Dispatcher interface, hide PathExpression
+- changed receiver names of type functions to be more idiomatic Go
+
+2013-06-02
+
+- (optimize) Cache the RegExp compilation of Paths.
+
+2013-05-22
+	
+- (api add) Added support for request/response filter functions
+
+2013-05-18
+
+
+- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+
+[2012-11-14 .. 2013-05-18>
+ 
+- See https://github.com/emicklei/go-restful/commits
+
+2012-11-14
+
+- Initial commit
+
+

+ 22 - 0
vendor/github.com/emicklei/go-restful/v3/LICENSE

@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 8 - 0
vendor/github.com/emicklei/go-restful/v3/Makefile

@@ -0,0 +1,8 @@
+all: test
+
+test:
+	go vet .
+	go test -cover -v .
+
+ex:
+	find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {}

+ 112 - 0
vendor/github.com/emicklei/go-restful/v3/README.md

@@ -0,0 +1,112 @@
+go-restful
+==========
+package for building REST-style Web Services using Google Go
+
+[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
+[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
+[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
+[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
+
+- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+    
+### Usage
+
+#### Without Go Modules
+
+All versions up to `v2.*.*` (on the master) are not supporting Go modules.
+
+```
+import (
+	restful "github.com/emicklei/go-restful"
+)
+```
+
+#### Using Go Modules
+
+As of version `v3.0.0` (on the v3 branch), this package supports Go modules.
+
+```
+import (
+	restful "github.com/emicklei/go-restful/v3"
+)
+```
+
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+	Path("/users").
+	Consumes(restful.MIME_XML, restful.MIME_JSON).
+	Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+	Doc("get a user").
+	Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+	Writes(User{}))		
+...
+	
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+	id := request.PathParameter("user-id")
+	...
+}
+```
+	
+[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) 
+		
+### Features
+
+- Routes for request &#8594; function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support
+- Configurable router:
+	- (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*})
+	- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
+- Request API for reading structs from JSON/XML and accessing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Customizable encoding using EntityReaderWriter registration
+- Filters for intercepting the request &#8594; response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function
+
+## How to customize
+There are several hooks to customize the behavior of the go-restful package.
+
+- Router algorithm
+- Panic recovery
+- JSON decoder
+- Trace logging
+- Compression
+- Encoders for other serializers
+- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` 
+- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` 
+
+## Resources
+
+- [Example programs](./examples)
+- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+Type ```git shortlog -s``` for a full list of contributors.
+
+© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome.

+ 13 - 0
vendor/github.com/emicklei/go-restful/v3/SECURITY.md

@@ -0,0 +1,13 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported          |
+| ------- | ------------------ |
+| v3.7.x     | :white_check_mark: |
+| < v3.0.1   | :x:                |
+
+## Reporting a Vulnerability
+
+Create an Issue and put the label `[security]` in the title of the issue.
+Valid reported security issues are expected to be solved within a week.

+ 1 - 0
vendor/github.com/emicklei/go-restful/v3/Srcfile

@@ -0,0 +1 @@
+{"SkipDirs": ["examples"]}

+ 10 - 0
vendor/github.com/emicklei/go-restful/v3/bench_test.sh

@@ -0,0 +1,10 @@
+#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
+
+go test -c
+./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
+./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
+
+#go tool pprof go-restful.test tmp.prof
+go tool pprof go-restful.test curly.prof
+
+

+ 127 - 0
vendor/github.com/emicklei/go-restful/v3/compress.go

@@ -0,0 +1,127 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bufio"
+	"compress/gzip"
+	"compress/zlib"
+	"errors"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+	writer     http.ResponseWriter
+	compressor io.WriteCloser
+	encoding   string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+	return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+	c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+	if c.isCompressorClosed() {
+		return -1, errors.New("Compressing error: tried to write data using closed compressor")
+	}
+	return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+	return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+	if c.isCompressorClosed() {
+		return errors.New("Compressing error: tried to close already closed compressor")
+	}
+
+	c.compressor.Close()
+	if ENCODING_GZIP == c.encoding {
+		currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+	}
+	if ENCODING_DEFLATE == c.encoding {
+		currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+	}
+	// gc hint needed?
+	c.compressor = nil
+	return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+	return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	hijacker, ok := c.writer.(http.Hijacker)
+	if !ok {
+		return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+	}
+	return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+// It also inspects the httpWriter whether its content-encoding is already set (non-empty).
+func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) {
+	if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" {
+		return false, ""
+	}
+	header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+	gi := strings.Index(header, ENCODING_GZIP)
+	zi := strings.Index(header, ENCODING_DEFLATE)
+	// use in order of appearance
+	if gi == -1 {
+		return zi != -1, ENCODING_DEFLATE
+	} else if zi == -1 {
+		return gi != -1, ENCODING_GZIP
+	} else {
+		if gi < zi {
+			return true, ENCODING_GZIP
+		}
+		return true, ENCODING_DEFLATE
+	}
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+	httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+	c := new(CompressingResponseWriter)
+	c.writer = httpWriter
+	var err error
+	if ENCODING_GZIP == encoding {
+		w := currentCompressorProvider.AcquireGzipWriter()
+		w.Reset(httpWriter)
+		c.compressor = w
+		c.encoding = ENCODING_GZIP
+	} else if ENCODING_DEFLATE == encoding {
+		w := currentCompressorProvider.AcquireZlibWriter()
+		w.Reset(httpWriter)
+		c.compressor = w
+		c.encoding = ENCODING_DEFLATE
+	} else {
+		return nil, errors.New("Unknown encoding:" + encoding)
+	}
+	return c, err
+}

+ 103 - 0
vendor/github.com/emicklei/go-restful/v3/compressor_cache.go

@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"compress/gzip"
+	"compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+	gzipWriters     chan *gzip.Writer
+	gzipReaders     chan *gzip.Reader
+	zlibWriters     chan *zlib.Writer
+	writersCapacity int
+	readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache,  BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+	b := &BoundedCachedCompressors{
+		gzipWriters:     make(chan *gzip.Writer, writersCapacity),
+		gzipReaders:     make(chan *gzip.Reader, readersCapacity),
+		zlibWriters:     make(chan *zlib.Writer, writersCapacity),
+		writersCapacity: writersCapacity,
+		readersCapacity: readersCapacity,
+	}
+	for ix := 0; ix < writersCapacity; ix++ {
+		b.gzipWriters <- newGzipWriter()
+		b.zlibWriters <- newZlibWriter()
+	}
+	for ix := 0; ix < readersCapacity; ix++ {
+		b.gzipReaders <- newGzipReader()
+	}
+	return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+	var writer *gzip.Writer
+	select {
+	case writer, _ = <-b.gzipWriters:
+	default:
+		// return a new unmanaged one
+		writer = newGzipWriter()
+	}
+	return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+	// forget the unmanaged ones
+	if len(b.gzipWriters) < b.writersCapacity {
+		b.gzipWriters <- w
+	}
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+	var reader *gzip.Reader
+	select {
+	case reader, _ = <-b.gzipReaders:
+	default:
+		// return a new unmanaged one
+		reader = newGzipReader()
+	}
+	return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+	// forget the unmanaged ones
+	if len(b.gzipReaders) < b.readersCapacity {
+		b.gzipReaders <- r
+	}
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+	var writer *zlib.Writer
+	select {
+	case writer, _ = <-b.zlibWriters:
+	default:
+		// return a new unmanaged one
+		writer = newZlibWriter()
+	}
+	return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+	// forget the unmanaged ones
+	if len(b.zlibWriters) < b.writersCapacity {
+		b.zlibWriters <- w
+	}
+}

+ 91 - 0
vendor/github.com/emicklei/go-restful/v3/compressor_pools.go

@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bytes"
+	"compress/gzip"
+	"compress/zlib"
+	"sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+	GzipWriterPool *sync.Pool
+	GzipReaderPool *sync.Pool
+	ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+	return &SyncPoolCompessors{
+		GzipWriterPool: &sync.Pool{
+			New: func() interface{} { return newGzipWriter() },
+		},
+		GzipReaderPool: &sync.Pool{
+			New: func() interface{} { return newGzipReader() },
+		},
+		ZlibWriterPool: &sync.Pool{
+			New: func() interface{} { return newZlibWriter() },
+		},
+	}
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+	return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+	s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+	return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+	s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+	return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+	s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+	// create with an empty bytes writer; it will be replaced before using the gzipWriter
+	writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+	if err != nil {
+		panic(err.Error())
+	}
+	return writer
+}
+
+func newGzipReader() *gzip.Reader {
+	// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+	// we can safely use currentCompressProvider because it is set on package initialization.
+	w := currentCompressorProvider.AcquireGzipWriter()
+	defer currentCompressorProvider.ReleaseGzipWriter(w)
+	b := new(bytes.Buffer)
+	w.Reset(b)
+	w.Flush()
+	w.Close()
+	reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+	if err != nil {
+		panic(err.Error())
+	}
+	return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+	writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+	if err != nil {
+		panic(err.Error())
+	}
+	return writer
+}

+ 54 - 0
vendor/github.com/emicklei/go-restful/v3/compressors.go

@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"compress/gzip"
+	"compress/zlib"
+)
+
+// CompressorProvider describes a component that can provider compressors for the std methods.
+type CompressorProvider interface {
+	// Returns a *gzip.Writer which needs to be released later.
+	// Before using it, call Reset().
+	AcquireGzipWriter() *gzip.Writer
+
+	// Releases an acquired *gzip.Writer.
+	ReleaseGzipWriter(w *gzip.Writer)
+
+	// Returns a *gzip.Reader which needs to be released later.
+	AcquireGzipReader() *gzip.Reader
+
+	// Releases an acquired *gzip.Reader.
+	ReleaseGzipReader(w *gzip.Reader)
+
+	// Returns a *zlib.Writer which needs to be released later.
+	// Before using it, call Reset().
+	AcquireZlibWriter() *zlib.Writer
+
+	// Releases an acquired *zlib.Writer.
+	ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+	currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+	return currentCompressorProvider
+}
+
+// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+	if p == nil {
+		panic("cannot set compressor provider to nil")
+	}
+	currentCompressorProvider = p
+}

+ 32 - 0
vendor/github.com/emicklei/go-restful/v3/constants.go

@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+	MIME_XML   = "application/xml"          // Accept or Content-Type used in Consumes() and/or Produces()
+	MIME_JSON  = "application/json"         // Accept or Content-Type used in Consumes() and/or Produces()
+	MIME_ZIP   = "application/zip"          // Accept or Content-Type used in Consumes() and/or Produces()
+	MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+	HEADER_Allow                         = "Allow"
+	HEADER_Accept                        = "Accept"
+	HEADER_Origin                        = "Origin"
+	HEADER_ContentType                   = "Content-Type"
+	HEADER_ContentDisposition            = "Content-Disposition"
+	HEADER_LastModified                  = "Last-Modified"
+	HEADER_AcceptEncoding                = "Accept-Encoding"
+	HEADER_ContentEncoding               = "Content-Encoding"
+	HEADER_AccessControlExposeHeaders    = "Access-Control-Expose-Headers"
+	HEADER_AccessControlRequestMethod    = "Access-Control-Request-Method"
+	HEADER_AccessControlRequestHeaders   = "Access-Control-Request-Headers"
+	HEADER_AccessControlAllowMethods     = "Access-Control-Allow-Methods"
+	HEADER_AccessControlAllowOrigin      = "Access-Control-Allow-Origin"
+	HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+	HEADER_AccessControlAllowHeaders     = "Access-Control-Allow-Headers"
+	HEADER_AccessControlMaxAge           = "Access-Control-Max-Age"
+
+	ENCODING_GZIP    = "gzip"
+	ENCODING_DEFLATE = "deflate"
+)

Некоторые файлы не были показаны из-за большого количества измененных файлов