소스 검색

fix: golangci-lint warnings

Signed-off-by: squat <lserven@gmail.com>
squat 2 달 전
부모
커밋
04ecc6f039
13개의 변경된 파일166개의 추가작업 그리고 157개의 파일을 삭제
  1. 9 9
      cmd/docs-gen/main.go
  2. 11 2
      cmd/kg/handlers.go
  3. 3 3
      cmd/kg/main.go
  4. 24 24
      cmd/kg/webhook.go
  5. 20 20
      cmd/kgctl/connect_linux.go
  6. 1 1
      cmd/kgctl/showconf.go
  7. 7 7
      pkg/iptables/iptables.go
  8. 30 30
      pkg/k8s/backend.go
  9. 2 2
      pkg/k8s/backend_test.go
  10. 6 6
      pkg/mesh/cni.go
  11. 45 45
      pkg/mesh/mesh.go
  12. 6 6
      pkg/mesh/topology.go
  13. 2 2
      pkg/route/route.go

+ 9 - 9
cmd/docs-gen/main.go

@@ -47,7 +47,7 @@ var (
 
 func toSectionLink(name string) string {
 	name = strings.ToLower(name)
-	name = strings.Replace(name, " ", "-", -1)
+	name = strings.ReplaceAll(name, " ", "-")
 	return name
 }
 
@@ -120,7 +120,7 @@ func parseDocumentationFrom(srcs []string) []KubeTypes {
 				for _, field := range structType.Fields.List {
 					// Skip fields that are not tagged.
 					if field.Tag == nil {
-						os.Stderr.WriteString(fmt.Sprintf("Tag is nil, skipping field: %v of type %v\n", field, field.Type))
+						_, _ = fmt.Fprintf(os.Stderr, "Tag is nil, skipping field: %v of type %v\n", field, field.Type)
 						continue
 					}
 					// Treat inlined fields separately as we don't want the original types to appear in the doc.
@@ -158,7 +158,7 @@ func astFrom(filePath string) *doc.Package {
 	}
 
 	m[filePath] = f
-	apkg, _ := ast.NewPackage(fset, m, nil, nil)
+	apkg, _ := ast.NewPackage(fset, m, nil, nil) //nolint:all
 
 	return doc.New(apkg, "", 0)
 }
@@ -174,7 +174,7 @@ func fmtRawDoc(rawDoc string) string {
 	// Ignore all lines after ---
 	rawDoc = strings.Split(rawDoc, "---")[0]
 
-	for _, line := range strings.Split(rawDoc, "\n") {
+	for line := range strings.SplitSeq(rawDoc, "\n") {
 		line = strings.TrimRight(line, " ")
 		leading := strings.TrimLeft(line, " ")
 		switch {
@@ -195,11 +195,11 @@ func fmtRawDoc(rawDoc string) string {
 	}
 
 	postDoc := strings.TrimRight(buffer.String(), "\n")
-	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
-	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
-	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
-	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
-	postDoc = strings.Replace(postDoc, "|", "\\|", -1)
+	postDoc = strings.ReplaceAll(postDoc, "\\\"", "\"") // replace user's \" to "
+	postDoc = strings.ReplaceAll(postDoc, "\"", "\\\"") // Escape "
+	postDoc = strings.ReplaceAll(postDoc, "\n", "\\n")
+	postDoc = strings.ReplaceAll(postDoc, "\t", "\\t")
+	postDoc = strings.ReplaceAll(postDoc, "|", "\\|")
 
 	return postDoc
 }

+ 11 - 2
cmd/kg/handlers.go

@@ -74,6 +74,7 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	dot, err := topo.Dot()
 	if err != nil {
 		http.Error(w, fmt.Sprintf("failed to generate graph: %v", err), http.StatusInternalServerError)
+		return
 	}
 
 	buf := bytes.NewBufferString(dot)
@@ -85,7 +86,11 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	case "dot", "gv":
 		// If the raw dot data is requested, return it as string.
 		// This allows client-side rendering rather than server-side.
-		w.Write(buf.Bytes())
+		_, err = w.Write(buf.Bytes())
+		if err != nil {
+			http.Error(w, fmt.Sprintf("failed to generate graph: %v", err), http.StatusInternalServerError)
+			return
+		}
 		return
 
 	case "svg", "png", "bmp", "fig", "gif", "json", "ps":
@@ -140,7 +145,11 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	w.Header().Add("content-type", mimeType)
-	w.Write(output)
+	_, err = w.Write(output)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
 }
 
 func healthHandler(w http.ResponseWriter, _ *http.Request) {

+ 3 - 3
cmd/kg/main.go

@@ -292,7 +292,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 			}
 			return nil
 		}, func(error) {
-			l.Close()
+			_ = l.Close()
 		})
 	}
 
@@ -300,7 +300,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 		ctx, cancel := context.WithCancel(context.Background())
 		// Start the mesh.
 		g.Add(func() error {
-			logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
+			_ = logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
 			if err := m.Run(ctx); err != nil {
 				return fmt.Errorf("error: Kilo exited unexpectedly: %v", err)
 			}
@@ -319,7 +319,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
 			for {
 				select {
 				case <-term:
-					logger.Log("msg", "caught interrupt; gracefully cleaning up; see you next time!")
+					_ = logger.Log("msg", "caught interrupt; gracefully cleaning up; see you next time!")
 					return nil
 				case <-cancel:
 					return nil

+ 24 - 24
cmd/kg/webhook.go

@@ -91,11 +91,11 @@ var (
 )
 
 func validationHandler(w http.ResponseWriter, r *http.Request) {
-	level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
+	_ = level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
 	body, err := io.ReadAll(r.Body)
 	if err != nil {
 		errorCounter.Inc()
-		level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
+		_ = level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
 		http.Error(w, err.Error(), http.StatusBadRequest)
 		return
 	}
@@ -106,7 +106,7 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if contentType != "application/json" {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("received Content-Type=%s, expected application/json", contentType)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
@@ -117,14 +117,14 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("Request could not be decoded: %v", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
 	if *gvk != v1.SchemeGroupVersion.WithKind("AdmissionReview") {
 		errorCounter.Inc()
 		msg := "only API v1 is supported"
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
@@ -139,17 +139,17 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err := json.Unmarshal(rawExtension.Raw, &peer); err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("could not unmarshal extension to peer spec: %v:", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusBadRequest)
 		return
 	}
 
 	if err := peer.Validate(); err == nil {
-		level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
+		_ = level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.Name)
 		validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "allowed"}).Inc()
 		response.Response.Allowed = true
 	} else {
-		level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
+		_ = level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.Name)
 		validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "denied"}).Inc()
 		response.Response.Result = &metav1.Status{
 			Message: err.Error(),
@@ -160,14 +160,14 @@ func validationHandler(w http.ResponseWriter, r *http.Request) {
 	if err != nil {
 		errorCounter.Inc()
 		msg := fmt.Sprintf("failed to marshal response: %v", err)
-		level.Error(logger).Log("err", msg)
+		_ = level.Error(logger).Log("err", msg)
 		http.Error(w, msg, http.StatusInternalServerError)
 		return
 	}
 
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write(res); err != nil {
-		level.Error(logger).Log("err", err, "msg", "failed to write response")
+		_ = level.Error(logger).Log("err", err, "msg", "failed to write response")
 	}
 }
 
@@ -204,27 +204,27 @@ func webhook(_ *cobra.Command, _ []string) error {
 
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
+				_ = level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
 				err := msrv.ListenAndServe()
-				level.Info(logger).Log("msg", "metrics server exited", "err", err)
+				_ = level.Info(logger).Log("msg", "metrics server exited", "err", err)
 				return err
 
 			},
 			func(err error) {
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
-				level.Info(logger).Log("msg", "shutting down metrics server gracefully")
+				_ = level.Info(logger).Log("msg", "shutting down metrics server gracefully")
 				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
 				defer func() {
 					cancel()
 				}()
 				if err := msrv.Shutdown(ctx); err != nil {
-					level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
-					msrv.Close()
+					_ = level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
+					_ = msrv.Close()
 				}
 			},
 		)
@@ -239,26 +239,26 @@ func webhook(_ *cobra.Command, _ []string) error {
 		}
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
+				_ = level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
 				err := srv.ListenAndServeTLS(certPath, keyPath)
-				level.Info(logger).Log("msg", "webhook server exited", "err", err)
+				_ = level.Info(logger).Log("msg", "webhook server exited", "err", err)
 				return err
 			},
 			func(err error) {
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
-				level.Info(logger).Log("msg", "shutting down webhook server gracefully")
+				_ = level.Info(logger).Log("msg", "shutting down webhook server gracefully")
 				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
 				defer func() {
 					cancel()
 				}()
 				if err := srv.Shutdown(ctx); err != nil {
-					level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
-					srv.Close()
+					_ = level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
+					_ = srv.Close()
 				}
 			},
 		)

+ 20 - 20
cmd/kgctl/connect_linux.go

@@ -115,7 +115,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if len(args) > 0 {
 		peerName = args[0]
 	} else {
-		level.Debug(logger).Log("msg", "no peer name provided; using hostname")
+		_ = level.Debug(logger).Log("msg", "no peer name provided; using hostname")
 		if peerName, err = os.Hostname(); err != nil {
 			return fmt.Errorf("could not determine hostname: %w", err)
 		}
@@ -146,7 +146,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 		}
 	}
 	publicKey := privateKey.PublicKey()
-	level.Info(logger).Log("msg", "generated public key", "key", publicKey)
+	_ = level.Info(logger).Log("msg", "generated public key", "key", publicKey)
 
 	if _, err := opts.kc.KiloV1alpha1().Peers().Get(ctx, peerName, metav1.GetOptions{}); apierrors.IsNotFound(err) {
 		peer := &v1alpha1.Peer{
@@ -162,15 +162,15 @@ func runConnect(cmd *cobra.Command, args []string) error {
 		if _, err := opts.kc.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{}); err != nil {
 			return fmt.Errorf("failed to create peer: %w", err)
 		}
-		level.Info(logger).Log("msg", "created peer", "peer", peerName)
+		_ = level.Info(logger).Log("msg", "created peer", "peer", peerName)
 		if connectOpts.cleanUp {
 			defer func() {
 				ctxWithTimeout, cancelWithTimeout := context.WithTimeout(context.Background(), 10*time.Second)
 				defer cancelWithTimeout()
 				if err := opts.kc.KiloV1alpha1().Peers().Delete(ctxWithTimeout, peerName, metav1.DeleteOptions{}); err != nil {
-					level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
+					_ = level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
 				} else {
-					level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
+					_ = level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
 				}
 			}()
 		}
@@ -183,7 +183,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if err != nil {
 		return fmt.Errorf("failed to create wg interface: %w", err)
 	}
-	level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
+	_ = level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
 
 	table := route.NewTable()
 	if connectOpts.cleanUp {
@@ -193,7 +193,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	if err := iproute.SetAddress(iface, &connectOpts.allowedIP); err != nil {
 		return err
 	}
-	level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
+	_ = level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
 
 	if err := iproute.Set(iface, true); err != nil {
 		return err
@@ -213,7 +213,7 @@ func runConnect(cmd *cobra.Command, args []string) error {
 					select {
 					case err, ok := <-errCh:
 						if ok {
-							level.Error(logger).Log("err", err.Error())
+							_ = level.Error(logger).Log("err", err.Error())
 						} else {
 							return nil
 						}
@@ -226,9 +226,9 @@ func runConnect(cmd *cobra.Command, args []string) error {
 				cancel()
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
 			},
 		)
@@ -236,10 +236,10 @@ func runConnect(cmd *cobra.Command, args []string) error {
 	{
 		g.Add(
 			func() error {
-				level.Info(logger).Log("msg", "starting syncer")
+				_ = level.Info(logger).Log("msg", "starting syncer")
 				for {
 					if err := sync(table, peerName, privateKey, iface, logger); err != nil {
-						level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
+						_ = level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
 					}
 					select {
 					case <-time.After(connectOpts.resyncPeriod):
@@ -251,9 +251,9 @@ func runConnect(cmd *cobra.Command, args []string) error {
 				cancel()
 				var serr run.SignalError
 				if ok := errors.As(err, &serr); ok {
-					level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
+					_ = level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
 				} else {
-					level.Error(logger).Log("msg", "received error", "err", err.Error())
+					_ = level.Error(logger).Log("msg", "received error", "err", err.Error())
 				}
 			})
 	}
@@ -268,13 +268,13 @@ func runConnect(cmd *cobra.Command, args []string) error {
 
 func cleanUp(iface int, t *route.Table, logger log.Logger) {
 	if err := iproute.Set(iface, false); err != nil {
-		level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
+		_ = level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
 	}
 	if err := iproute.RemoveInterface(iface); err != nil {
-		level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
+		_ = level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
 	}
 	if err := t.CleanUp(); err != nil {
-		level.Error(logger).Log("failed to clean up routes: %v", err)
+		_ = level.Error(logger).Log("failed to clean up routes: %v", err)
 	}
 }
 
@@ -342,7 +342,7 @@ func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int
 	if err != nil {
 		return err
 	}
-	defer wgClient.Close()
+	defer func() { _ = wgClient.Close() }()
 
 	current, err := wgClient.Device(connectOpts.interfaceName)
 	if err != nil {
@@ -356,9 +356,9 @@ func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int
 		// If the key is empty, then it's the first time we are running
 		// so don't bother printing a diff.
 		if current.PrivateKey != [wgtypes.KeyLen]byte{} {
-			level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
+			_ = level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
 		}
-		level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
+		_ = level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
 		if err := wgClient.ConfigureDevice(connectOpts.interfaceName, conf.WGConfig()); err != nil {
 			return err
 		}

+ 1 - 1
cmd/kgctl/showconf.go

@@ -193,7 +193,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
 		}
 	}
 	if !found {
-		_, err := os.Stderr.WriteString(fmt.Sprintf("Node %q is not a leader node\n", hostname))
+		_, err := fmt.Fprintf(os.Stderr, "Node %q is not a leader node\n", hostname)
 		return err
 	}
 

+ 7 - 7
pkg/iptables/iptables.go

@@ -35,7 +35,7 @@ func ipv6Disabled() (bool, error) {
 	if err != nil {
 		return false, err
 	}
-	defer f.Close()
+	defer func() { _ = f.Close() }()
 	disabled := make([]byte, 1)
 	if _, err = io.ReadFull(f, disabled); err != nil {
 		return false, err
@@ -145,7 +145,7 @@ func (r *rule) Append(client Client) error {
 func (r *rule) Delete(client Client) error {
 	// Ignore the returned error as an error likely means
 	// that the rule doesn't exist, which is fine.
-	client.Delete(r.table, r.chain, r.spec...)
+	_ = client.Delete(r.table, r.chain, r.spec...)
 	return nil
 }
 
@@ -210,7 +210,7 @@ func (c *chain) Delete(client Client) error {
 	}
 	// Ignore the returned error as an error likely means
 	// that the chain doesn't exist, which is fine.
-	client.DeleteChain(c.table, c.chain)
+	_ = client.DeleteChain(c.table, c.chain)
 	return nil
 }
 
@@ -223,7 +223,7 @@ func (c *chain) Exists(client Client) (bool, error) {
 	case err == nil:
 		// If there was no error adding a new chain, then it did not exist.
 		// Delete it and return false.
-		client.DeleteChain(c.table, c.chain)
+		_ = client.DeleteChain(c.table, c.chain)
 		return false, nil
 	case ok && se.ExitStatus() == existsErr:
 		return true, nil
@@ -317,7 +317,7 @@ func New(opts ...ControllerOption) (*Controller, error) {
 			return nil, fmt.Errorf("failed to check IPv6 status: %v", err)
 		}
 		if disabled {
-			level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
+			_ = level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
 			c.v6 = &fakeClient{}
 		} else {
 			v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
@@ -380,7 +380,7 @@ func (c *Controller) reconcileAppendRules(rc ruleCache) error {
 			return fmt.Errorf("failed to check if rule exists: %v", err)
 		}
 		if !ok {
-			level.Info(c.logger).Log("msg", fmt.Sprintf("applying %d iptables rules", len(c.appendRules)-i))
+			_ = level.Info(c.logger).Log("msg", fmt.Sprintf("applying %d iptables rules", len(c.appendRules)-i))
 			if err := c.resetFromIndex(i, c.appendRules); err != nil {
 				return fmt.Errorf("failed to add rule: %v", err)
 			}
@@ -397,7 +397,7 @@ func (c *Controller) reconcilePrependRules(rc ruleCache) error {
 			return fmt.Errorf("failed to check if rule exists: %v", err)
 		}
 		if !ok {
-			level.Info(c.logger).Log("msg", "prepending iptables rule")
+			_ = level.Info(c.logger).Log("msg", "prepending iptables rule")
 			if err := r.Prepend(c.client(r.Proto())); err != nil {
 				return fmt.Errorf("failed to prepend rule: %v", err)
 			}

+ 30 - 30
pkg/k8s/backend.go

@@ -162,7 +162,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error {
 	}); !ok {
 		return errors.New("failed to sync node cache")
 	}
-	nb.informer.AddEventHandler(
+	_, err := nb.informer.AddEventHandler(
 		cache.ResourceEventHandlerFuncs{
 			AddFunc: func(obj interface{}) {
 				n, ok := obj.(*v1.Node)
@@ -195,7 +195,7 @@ func (nb *nodeBackend) Init(ctx context.Context) error {
 			},
 		},
 	)
-	return nil
+	return err
 }
 
 // List gets all the Nodes in the cluster.
@@ -218,29 +218,29 @@ func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) er
 		return fmt.Errorf("failed to find node: %v", err)
 	}
 	n := old.DeepCopy()
-	n.ObjectMeta.Annotations[endpointAnnotationKey] = node.Endpoint.String()
+	n.Annotations[endpointAnnotationKey] = node.Endpoint.String()
 	if node.InternalIP == nil {
-		n.ObjectMeta.Annotations[internalIPAnnotationKey] = ""
+		n.Annotations[internalIPAnnotationKey] = ""
 	} else {
-		n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
+		n.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
 	}
-	n.ObjectMeta.Annotations[keyAnnotationKey] = node.Key.String()
-	n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
+	n.Annotations[keyAnnotationKey] = node.Key.String()
+	n.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
 	if node.WireGuardIP == nil {
-		n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = ""
+		n.Annotations[wireGuardIPAnnotationKey] = ""
 	} else {
-		n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = node.WireGuardIP.String()
+		n.Annotations[wireGuardIPAnnotationKey] = node.WireGuardIP.String()
 	}
 	if node.DiscoveredEndpoints == nil {
-		n.ObjectMeta.Annotations[discoveredEndpointsKey] = ""
+		n.Annotations[discoveredEndpointsKey] = ""
 	} else {
 		discoveredEndpoints, err := json.Marshal(node.DiscoveredEndpoints)
 		if err != nil {
 			return err
 		}
-		n.ObjectMeta.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
+		n.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
 	}
-	n.ObjectMeta.Annotations[granularityKey] = string(node.Granularity)
+	n.Annotations[granularityKey] = string(node.Granularity)
 	oldData, err := json.Marshal(old)
 	if err != nil {
 		return err
@@ -275,37 +275,37 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	if err != nil {
 		subnet = nil
 	}
-	_, leader := node.ObjectMeta.Annotations[leaderAnnotationKey]
+	_, leader := node.Annotations[leaderAnnotationKey]
 	// Allow the region to be overridden by an explicit location.
-	location, ok := node.ObjectMeta.Annotations[locationAnnotationKey]
+	location, ok := node.Annotations[locationAnnotationKey]
 	if !ok {
-		location = node.ObjectMeta.Labels[topologyLabel]
+		location = node.Labels[topologyLabel]
 	}
 	// Allow the endpoint to be overridden.
-	endpoint := wireguard.ParseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
+	endpoint := wireguard.ParseEndpoint(node.Annotations[forceEndpointAnnotationKey])
 	if endpoint == nil {
-		endpoint = wireguard.ParseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
+		endpoint = wireguard.ParseEndpoint(node.Annotations[endpointAnnotationKey])
 	}
 	// Allow the internal IP to be overridden.
-	internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey])
+	internalIP := normalizeIP(node.Annotations[forceInternalIPAnnotationKey])
 	if internalIP == nil {
-		internalIP = normalizeIP(node.ObjectMeta.Annotations[internalIPAnnotationKey])
+		internalIP = normalizeIP(node.Annotations[internalIPAnnotationKey])
 	}
 	// Set the ForceInternalIP flag, if force-internal-ip annotation was set to "".
 	noInternalIP := false
-	if s, ok := node.ObjectMeta.Annotations[forceInternalIPAnnotationKey]; ok && (s == "" || s == "-") {
+	if s, ok := node.Annotations[forceInternalIPAnnotationKey]; ok && (s == "" || s == "-") {
 		noInternalIP = true
 		internalIP = nil
 	}
 	// Set Wireguard PersistentKeepalive setting for the node.
 	var persistentKeepalive time.Duration
-	if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; ok {
+	if keepAlive, ok := node.Annotations[persistentKeepaliveKey]; ok {
 		// We can ignore the error, because p will be set to 0 if an error occures.
 		p, _ := strconv.ParseInt(keepAlive, 10, 64)
 		persistentKeepalive = time.Duration(p) * time.Second
 	}
 	var lastSeen int64
-	if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok {
+	if ls, ok := node.Annotations[lastSeenAnnotationKey]; !ok {
 		lastSeen = 0
 	} else {
 		if lastSeen, err = strconv.ParseInt(ls, 10, 64); err != nil {
@@ -313,7 +313,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		}
 	}
 	var discoveredEndpoints map[string]*net.UDPAddr
-	if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok {
+	if de, ok := node.Annotations[discoveredEndpointsKey]; ok {
 		err := json.Unmarshal([]byte(de), &discoveredEndpoints)
 		if err != nil {
 			discoveredEndpoints = nil
@@ -321,7 +321,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	}
 	// Set allowed IPs for a location.
 	var allowedLocationIPs []net.IPNet
-	if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok {
+	if str, ok := node.Annotations[allowedLocationIPsKey]; ok {
 		for _, ip := range strings.Split(str, ",") {
 			if ipnet := normalizeIP(ip); ipnet != nil {
 				allowedLocationIPs = append(allowedLocationIPs, *ipnet)
@@ -329,7 +329,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		}
 	}
 	var meshGranularity mesh.Granularity
-	if gr, ok := node.ObjectMeta.Annotations[granularityKey]; ok {
+	if gr, ok := node.Annotations[granularityKey]; ok {
 		meshGranularity = mesh.Granularity(gr)
 		switch meshGranularity {
 		case mesh.LogicalGranularity:
@@ -340,7 +340,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 	}
 
 	// TODO log some error or warning.
-	key, _ := wgtypes.ParseKey(node.ObjectMeta.Annotations[keyAnnotationKey])
+	key, _ := wgtypes.ParseKey(node.Annotations[keyAnnotationKey])
 
 	return &mesh.Node{
 		// Endpoint and InternalIP should only ever fail to parse if the
@@ -362,7 +362,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
 		// WireGuardIP can fail to parse if the node is not a leader or if
 		// the node's agent has not yet reconciled. In either case, the IP
 		// will parse as nil.
-		WireGuardIP:         normalizeIP(node.ObjectMeta.Annotations[wireGuardIPAnnotationKey]),
+		WireGuardIP:         normalizeIP(node.Annotations[wireGuardIPAnnotationKey]),
 		DiscoveredEndpoints: discoveredEndpoints,
 		AllowedLocationIPs:  allowedLocationIPs,
 		Granularity:         meshGranularity,
@@ -403,7 +403,7 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
 
 	key, err := wgtypes.ParseKey(peer.Spec.PublicKey)
 	if err != nil {
-		level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
+		_ = level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
 	}
 	var psk *wgtypes.Key
 	if k, err := wgtypes.ParseKey(peer.Spec.PresharedKey); err != nil {
@@ -458,7 +458,7 @@ func (pb *peerBackend) Init(ctx context.Context) error {
 	}); !ok {
 		return errors.New("failed to sync peer cache")
 	}
-	pb.informer.AddEventHandler(
+	_, err := pb.informer.AddEventHandler(
 		cache.ResourceEventHandlerFuncs{
 			AddFunc: func(obj interface{}) {
 				p, ok := obj.(*v1alpha1.Peer)
@@ -491,7 +491,7 @@ func (pb *peerBackend) Init(ctx context.Context) error {
 			},
 		},
 	)
-	return nil
+	return err
 }
 
 // List gets all the Peers in the cluster.

+ 2 - 2
pkg/k8s/backend_test.go

@@ -316,8 +316,8 @@ func TestTranslateNode(t *testing.T) {
 		},
 	} {
 		n := &v1.Node{}
-		n.ObjectMeta.Annotations = tc.annotations
-		n.ObjectMeta.Labels = tc.labels
+		n.Annotations = tc.annotations
+		n.Labels = tc.labels
 		n.Spec.PodCIDR = tc.subnet
 		node := translateNode(n, RegionLabelKey)
 		if diff := pretty.Compare(node, tc.out); diff != "" {

+ 6 - 6
pkg/mesh/cni.go

@@ -52,13 +52,13 @@ func (m *Mesh) updateCNIConfig() {
 	n := m.nodes[m.hostname]
 	m.mu.Unlock()
 	if n == nil || n.Subnet == nil {
-		level.Debug(m.logger).Log("msg", "local node does not have a valid subnet assigned")
+		_ = level.Debug(m.logger).Log("msg", "local node does not have a valid subnet assigned")
 		return
 	}
 
 	cidr, err := getCIDRFromCNI(m.cniPath)
 	if err != nil {
-		level.Warn(m.logger).Log("msg", "failed to get CIDR from CNI file; overwriting it", "err", err.Error())
+		_ = level.Warn(m.logger).Log("msg", "failed to get CIDR from CNI file; overwriting it", "err", err.Error())
 	}
 
 	if ipNetsEqual(cidr, n.Subnet) {
@@ -66,14 +66,14 @@ func (m *Mesh) updateCNIConfig() {
 	}
 
 	if cidr == nil {
-		level.Info(m.logger).Log("msg", "CIDR in CNI file is empty")
+		_ = level.Info(m.logger).Log("msg", "CIDR in CNI file is empty")
 	} else {
-		level.Info(m.logger).Log("msg", "CIDR in CNI file is not empty; overwriting", "old", cidr.String(), "new", n.Subnet.String())
+		_ = level.Info(m.logger).Log("msg", "CIDR in CNI file is not empty; overwriting", "old", cidr.String(), "new", n.Subnet.String())
 	}
 
-	level.Info(m.logger).Log("msg", "setting CIDR in CNI file", "CIDR", n.Subnet.String())
+	_ = level.Info(m.logger).Log("msg", "setting CIDR in CNI file", "CIDR", n.Subnet.String())
 	if err := setCIDRInCNI(m.cniPath, n.Subnet); err != nil {
-		level.Warn(m.logger).Log("msg", "failed to set CIDR in CNI file", "err", err.Error())
+		_ = level.Warn(m.logger).Log("msg", "failed to set CIDR in CNI file", "err", err.Error())
 	}
 }
 

+ 45 - 45
pkg/mesh/mesh.go

@@ -100,7 +100,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 	privateB = bytes.Trim(privateB, "\n")
 	private, err := wgtypes.ParseKey(string(privateB))
 	if err != nil {
-		level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
+		_ = level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
 		if private, err = wgtypes.GeneratePrivateKey(); err != nil {
 			return nil, err
 		}
@@ -150,10 +150,10 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 				return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
 			}
 		}
-		level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
+		_ = level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
 	} else {
 		enc = encapsulation.Noop(enc.Strategy())
-		level.Debug(logger).Log("msg", "running without a private IP address")
+		_ = level.Debug(logger).Log("msg", "running without a private IP address")
 	}
 	var externalIP *net.IPNet
 	if prioritisePrivateAddr && privateIP != nil {
@@ -161,7 +161,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 	} else {
 		externalIP = publicIP
 	}
-	level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
+	_ = level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
 	ipTables, err := iptables.New(iptables.WithRegisterer(registerer), iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
 	if err != nil {
 		return nil, fmt.Errorf("failed to IP tables controller: %v", err)
@@ -235,7 +235,7 @@ func (m *Mesh) Run(ctx context.Context) error {
 			m.nodes[m.hostname] = n
 			m.updateCNIConfig()
 		} else {
-			level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
+			_ = level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
 		}
 	}
 	if err := m.Peers().Init(ctx); err != nil {
@@ -259,7 +259,7 @@ func (m *Mesh) Run(ctx context.Context) error {
 				return
 			}
 			if err != nil {
-				level.Error(m.logger).Log("error", err)
+				_ = level.Error(m.logger).Log("error", err)
 				m.errorCounter.WithLabelValues("run").Inc()
 			}
 		}
@@ -296,9 +296,9 @@ func (m *Mesh) Run(ctx context.Context) error {
 
 func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	logger := log.With(m.logger, "event", e.Type)
-	level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
+	_ = level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
 	if isSelf(m.hostname, e.Node) {
-		level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
+		_ = level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
 		m.handleLocal(ctx, e.Node)
 		return
 	}
@@ -307,7 +307,7 @@ func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	if !e.Node.Ready() {
 		// Trace non ready nodes with their presence in the mesh.
 		_, ok := m.nodes[e.Node.Name]
-		level.Debug(logger).Log("msg", "received non ready node", "node", e.Node, "in-mesh", ok)
+		_ = level.Debug(logger).Log("msg", "received non ready node", "node", e.Node, "in-mesh", ok)
 	}
 	switch e.Type {
 	case AddEvent:
@@ -325,14 +325,14 @@ func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
 	}
 	m.mu.Unlock()
 	if diff {
-		level.Info(logger).Log("node", e.Node)
+		_ = level.Info(logger).Log("node", e.Node)
 		m.applyTopology()
 	}
 }
 
 func (m *Mesh) syncPeers(e *PeerEvent) {
 	logger := log.With(m.logger, "event", e.Type)
-	level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
+	_ = level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
 	var diff bool
 	m.mu.Lock()
 	// Peers are indexed by public key.
@@ -340,7 +340,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
 	if !e.Peer.Ready() {
 		// Trace non ready peer with their presence in the mesh.
 		_, ok := m.peers[key]
-		level.Debug(logger).Log("msg", "received non ready peer", "peer", e.Peer, "in-mesh", ok)
+		_ = level.Debug(logger).Log("msg", "received non ready peer", "peer", e.Peer, "in-mesh", ok)
 	}
 	switch e.Type {
 	case AddEvent:
@@ -360,7 +360,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
 	}
 	m.mu.Unlock()
 	if diff {
-		level.Info(logger).Log("peer", e.Peer)
+		_ = level.Info(logger).Log("peer", e.Peer)
 		m.applyTopology()
 	}
 }
@@ -372,26 +372,26 @@ func (m *Mesh) checkIn(ctx context.Context) {
 	defer m.mu.Unlock()
 	n := m.nodes[m.hostname]
 	if n == nil {
-		level.Debug(m.logger).Log("msg", "no local node found in backend")
+		_ = level.Debug(m.logger).Log("msg", "no local node found in backend")
 		return
 	}
 	oldTime := n.LastSeen
 	n.LastSeen = time.Now().Unix()
 	if err := m.Nodes().Set(ctx, m.hostname, n); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
 		m.errorCounter.WithLabelValues("checkin").Inc()
 		// Revert time.
 		n.LastSeen = oldTime
 		return
 	}
-	level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
+	_ = level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
 }
 
 func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
 	// Allow the IPs to be overridden.
 	if !n.Endpoint.Ready() {
 		e := wireguard.NewEndpoint(m.externalIP.IP, m.port)
-		level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
+		_ = level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
 		n.Endpoint = e
 	}
 	if n.InternalIP == nil && !n.NoInternalIP {
@@ -417,13 +417,13 @@ func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
 		Granularity:         m.granularity,
 	}
 	if !nodesAreEqual(n, local) {
-		level.Debug(m.logger).Log("msg", "local node differs from backend")
+		_ = level.Debug(m.logger).Log("msg", "local node differs from backend")
 		if err := m.Nodes().Set(ctx, m.hostname, local); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
 			m.errorCounter.WithLabelValues("local").Inc()
 			return
 		}
-		level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
+		_ = level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
 	}
 	m.mu.Lock()
 
@@ -446,7 +446,7 @@ func (m *Mesh) applyTopology() {
 	defer m.mu.Unlock()
 	// If we can't resolve an endpoint, then fail and retry later.
 	if err := m.resolveEndpoints(); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -482,23 +482,23 @@ func (m *Mesh) applyTopology() {
 	// Find the Kilo interface name.
 	link, err := linkByIndex(m.kiloIface)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
 
 	wgClient, err := wgctrl.New()
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
-	defer wgClient.Close()
+	defer func() { _ = wgClient.Close() }()
 
 	// wgDevice is the current configuration of the wg interface.
 	wgDevice, err := wgClient.Device(m.kiloIfaceName)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -507,7 +507,7 @@ func (m *Mesh) applyTopology() {
 	nodes[m.hostname].DiscoveredEndpoints = natEndpoints
 	t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port(), m.priv, m.subnet, m.serviceCIDRs, nodes[m.hostname].PersistentKeepalive, m.logger)
 	if err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
@@ -541,20 +541,20 @@ func (m *Mesh) applyTopology() {
 		// If we are handling local routes, ensure the local
 		// tunnel has an IP address.
 		if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
 	}
 	if err := m.ipTables.Set(ipRules); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 		return
 	}
 	if t.leader {
 		m.leaderGuage.Set(1)
 		if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
@@ -563,24 +563,24 @@ func (m *Mesh) applyTopology() {
 		conf := t.Conf()
 		equal, diff := conf.Equal(wgDevice)
 		if !equal {
-			level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff)
-			level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig())
+			_ = level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff)
+			_ = level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig())
 			if err := wgClient.ConfigureDevice(m.kiloIfaceName, conf.WGConfig()); err != nil {
-				level.Error(m.logger).Log("error", err)
+				_ = level.Error(m.logger).Log("error", err)
 				m.errorCounter.WithLabelValues("apply").Inc()
 				return
 			}
 		}
 		if err := iproute.Set(m.kiloIface, true); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
 	} else {
 		m.leaderGuage.Set(0)
-		level.Debug(m.logger).Log("msg", "local node is not the leader")
+		_ = level.Debug(m.logger).Log("msg", "local node is not the leader")
 		if err := iproute.Set(m.kiloIface, false); err != nil {
-			level.Error(m.logger).Log("error", err)
+			_ = level.Error(m.logger).Log("error", err)
 			m.errorCounter.WithLabelValues("apply").Inc()
 			return
 		}
@@ -589,23 +589,23 @@ func (m *Mesh) applyTopology() {
 	// on the WireGuard interface.
 	routes, rules := t.Routes(link.Attrs().Name, m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
 	if err := m.table.Set(routes, rules); err != nil {
-		level.Error(m.logger).Log("error", err)
+		_ = level.Error(m.logger).Log("error", err)
 		m.errorCounter.WithLabelValues("apply").Inc()
 	}
 }
 
 func (m *Mesh) cleanUp() {
 	if err := m.ipTables.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 	if err := m.table.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 	if m.cleanUpIface {
 		if err := iproute.RemoveInterface(m.kiloIface); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
@@ -613,7 +613,7 @@ func (m *Mesh) cleanUp() {
 		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 		defer cancel()
 		if err := m.Nodes().CleanUp(ctx, m.hostname); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
@@ -621,12 +621,12 @@ func (m *Mesh) cleanUp() {
 		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 		defer cancel()
 		if err := m.Peers().CleanUp(ctx, m.hostname); err != nil {
-			level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
+			_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
 			m.errorCounter.WithLabelValues("cleanUp").Inc()
 		}
 	}
 	if err := m.enc.CleanUp(); err != nil {
-		level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
+		_ = level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
 		m.errorCounter.WithLabelValues("cleanUp").Inc()
 	}
 }
@@ -694,7 +694,7 @@ func nodesAreEqual(a, b *Node) bool {
 }
 
 func peersAreEqual(a, b *Peer) bool {
-	if !(a != nil) == (b != nil) {
+	if (a != nil) != (b != nil) {
 		return false
 	}
 	if a == b {
@@ -813,7 +813,7 @@ func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *
 	}
 	for _, n := range nodes {
 		if peer, ok := keys[n.Key.String()]; ok && n.PersistentKeepalive != time.Duration(0) {
-			level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime)
+			_ = level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime)
 			// Don't update the endpoint, if there was never any handshake.
 			if !peer.LastHandshakeTime.Equal(time.Time{}) {
 				natEndpoints[n.Key.String()] = peer.Endpoint
@@ -827,6 +827,6 @@ func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *
 			}
 		}
 	}
-	level.Debug(logger).Log("msg", "Discovered WireGuard NAT Endpoints", "DiscoveredEndpoints", natEndpoints)
+	_ = level.Debug(logger).Log("msg", "Discovered WireGuard NAT Endpoints", "DiscoveredEndpoints", natEndpoints)
 	return natEndpoints
 }

+ 6 - 6
pkg/mesh/topology.go

@@ -193,7 +193,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
 			privateIPs:          privateIPs,
 			allowedLocationIPs:  allowedLocationIPs,
 		})
-		level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
+		_ = level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
 
 	}
 	// Sort the Topology segments so the result is stable.
@@ -241,7 +241,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
 		segment.allowedLocationIPs = t.filterAllowedLocationIPs(segment.allowedLocationIPs, segment.location)
 	}
 
-	level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
+	_ = level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
 	return &t, nil
 }
 
@@ -257,7 +257,7 @@ CheckIPs:
 			if location != s.location {
 				for _, i := range s.allowedLocationIPs {
 					if intersect(ip, i) {
-						level.Warn(t.logger).Log("msg", "overlapping allowed location IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+						_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 						continue CheckIPs
 					}
 				}
@@ -265,14 +265,14 @@ CheckIPs:
 			// Check if allowed location IPs intersect with the allowed IPs.
 			for _, i := range s.allowedIPs {
 				if intersect(ip, i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with allowed IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with allowed IPnets", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 					continue CheckIPs
 				}
 			}
 			// Check if allowed location IPs intersect with the private IPs of the segment.
 			for _, i := range s.privateIPs {
 				if ip.Contains(i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with privateIP", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with privateIP", "IP", ip.String(), "IP2", i.String(), "segment-location", s.location)
 					continue CheckIPs
 				}
 			}
@@ -281,7 +281,7 @@ CheckIPs:
 		for _, p := range t.peers {
 			for _, i := range p.AllowedIPs {
 				if intersect(ip, i) {
-					level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with peer IPnet", "IP", ip.String(), "IP2", i.String(), "peer", p.Name)
+					_ = level.Warn(t.logger).Log("msg", "overlapping allowed location IPnet with peer IPnet", "IP", ip.String(), "IP2", i.String(), "peer", p.Name)
 					continue CheckIPs
 				}
 			}

+ 2 - 2
pkg/route/route.go

@@ -109,7 +109,7 @@ func (t *Table) Run(stop <-chan struct{}) (<-chan error, error) {
 			// Watch for deleted routes to reconcile this table's routes.
 			case unix.RTM_DELROUTE:
 				// Filter out invalid routes.
-				if e.Route.Dst == nil {
+				if e.Dst == nil {
 					continue
 				}
 				t.mu.Lock()
@@ -118,7 +118,7 @@ func (t *Table) Run(stop <-chan struct{}) (<-chan error, error) {
 					case *netlink.Route:
 						// If any deleted route's destination matches a destination
 						// in the table, reset the corresponding route just in case.
-						if r.Dst.IP.Equal(e.Route.Dst.IP) && r.Dst.Mask.String() == e.Route.Dst.Mask.String() {
+						if r.Dst.IP.Equal(e.Dst.IP) && r.Dst.Mask.String() == e.Dst.Mask.String() {
 							if err := t.addRoute(r); err != nil {
 								nonBlockingSend(t.errors, fmt.Errorf("failed add route: %v", err))
 							}