Quellcode durchsuchen

Merge pull request #403 from cozystack/feature/internal-cidr-flag

feat(kg): add --internal-cidr flag to filter IP auto-detection
Lucas Servén Marín vor 2 Monaten
Ursprung
Commit
840176b1b4

+ 12 - 1
cmd/kg/main.go

@@ -120,6 +120,7 @@ var (
 	topologyLabel         string
 	port                  int
 	serviceCIDRsRaw       []string
+	internalCIDRsRaw      []string
 	subnet                string
 	resyncPeriod          time.Duration
 	iptablesForwardRule   bool
@@ -152,6 +153,7 @@ func init() {
 	cmd.Flags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
 	cmd.Flags().IntVar(&port, "port", mesh.DefaultKiloPort, "The port over which WireGuard peers should communicate.")
 	cmd.Flags().StringSliceVar(&serviceCIDRsRaw, "service-cidr", nil, "The service CIDR for the Kubernetes cluster. Can be provided optionally to avoid masquerading packets sent to service IPs. Can be specified multiple times.")
+	cmd.Flags().StringSliceVar(&internalCIDRsRaw, "internal-cidr", nil, "CIDRs to consider for internal IP auto-detection. If specified, only IPs within these CIDRs will be used. Can be specified multiple times.")
 	cmd.Flags().StringVar(&subnet, "subnet", mesh.DefaultKiloSubnet.String(), "CIDR from which to allocate addresses for WireGuard interfaces.")
 	cmd.Flags().DurationVar(&resyncPeriod, "resync-period", 30*time.Second, "How often should the Kilo controllers reconcile?")
 	cmd.Flags().BoolVar(&iptablesForwardRule, "iptables-forward-rules", false, "Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!")
@@ -266,7 +268,16 @@ func runRoot(_ *cobra.Command, _ []string) error {
 		serviceCIDRs = append(serviceCIDRs, s)
 	}
 
-	m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, cleanUp, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, serviceCIDRs, log.With(logger, "component", "kilo"), registry)
+	var internalCIDRs []*net.IPNet
+	for _, internalCIDR := range internalCIDRsRaw {
+		_, s, err := net.ParseCIDR(internalCIDR)
+		if err != nil {
+			return fmt.Errorf("failed to parse %q as CIDR: %v", internalCIDR, err)
+		}
+		internalCIDRs = append(internalCIDRs, s)
+	}
+
+	m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, cleanUp, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, internalCIDRs, serviceCIDRs, log.With(logger, "component", "kilo"), registry)
 	if err != nil {
 		return fmt.Errorf("failed to create Kilo mesh: %v", err)
 	}

+ 1 - 0
docs/kg.md

@@ -43,6 +43,7 @@ Flags:
   -h, --help                           help for kg
       --hostname string                Hostname of the node on which this process is running.
       --interface string               Name of the Kilo interface to use; if it does not exist, it will be created. (default "kilo0")
+      --internal-cidr strings          CIDRs to consider for internal IP auto-detection. If specified, only IPs within these CIDRs will be used. Can be specified multiple times.
       --iptables-forward-rules         Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!
       --kubeconfig string              Path to kubeconfig.
       --listen string                  The address at which to listen for health and metrics. (default ":1107")

+ 5 - 0
manifests/kilo-bootkube-flannel.yaml

@@ -74,11 +74,16 @@ spec:
         - --cni=false
         - --compatibility=flannel
         - --local=false
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-k3s-cilium.yaml

@@ -106,11 +106,16 @@ spec:
         - --encapsulate=crosssubnet
         - --clean-up-interface=true
         - --log-level=all
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-k3s-flannel.yaml

@@ -103,11 +103,16 @@ spec:
         - --cni=false
         - --compatibility=flannel
         - --local=false
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-kubeadm-cilium.yaml

@@ -79,11 +79,16 @@ spec:
         - --clean-up-interface=true
         - --subnet=172.31.254.0/24
         - --log-level=all
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-kubeadm-flannel-userspace.yaml

@@ -88,11 +88,16 @@ spec:
         - --cni=false
         - --compatibility=flannel
         - --local=false
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-kubeadm-flannel.yaml

@@ -74,11 +74,16 @@ spec:
         - --cni=false
         - --compatibility=flannel
         - --local=false
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 5 - 0
manifests/kilo-typhoon-flannel.yaml

@@ -74,11 +74,16 @@ spec:
         - --cni=false
         - --compatibility=flannel
         - --local=false
+        - --internal-cidr=$(NODE_IP)/32
         env:
         - name: NODE_NAME
           valueFrom:
             fieldRef:
               fieldPath: spec.nodeName
+        - name: NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.hostIP
         ports:
         - containerPort: 1107
           name: metrics

+ 16 - 1
pkg/mesh/discoverips.go

@@ -40,7 +40,8 @@ import (
 // - private IP assigned to interface of default route
 // - private IP assigned to local interface
 // - if no IP was found, return nil and an error.
-func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error) {
+// If allowedCIDRs is not empty, only IPs within these CIDRs will be considered for private IP selection.
+func getIP(hostname string, allowedCIDRs []*net.IPNet, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error) {
 	ignore := make(map[string]struct{})
 	for i := range ignoreIfaces {
 		if ignoreIfaces[i] == 0 {
@@ -144,6 +145,10 @@ func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error)
 		if _, ok := ignore[tmpPriv[i].String()]; ok {
 			continue
 		}
+		// If allowedCIDRs is specified, filter private IPs by these CIDRs.
+		if len(allowedCIDRs) > 0 && !isInCIDRs(tmpPriv[i].IP, allowedCIDRs) {
+			continue
+		}
 		priv = append(priv, tmpPriv[i])
 	}
 	for i := range tmpPub {
@@ -290,3 +295,13 @@ func defaultInterface() (*net.Interface, error) {
 
 	return nil, errors.New("failed to find default route")
 }
+
+// isInCIDRs checks if the given IP is within any of the provided CIDRs.
+func isInCIDRs(ip net.IP, cidrs []*net.IPNet) bool {
+	for _, cidr := range cidrs {
+		if cidr.Contains(ip) {
+			return true
+		}
+	}
+	return false
+}

+ 2 - 2
pkg/mesh/mesh.go

@@ -89,7 +89,7 @@ type Mesh struct {
 }
 
 // New returns a new Mesh instance.
-func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanup bool, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, serviceCIDRs []*net.IPNet, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) {
+func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanup bool, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, allowedInternalCIDRs []*net.IPNet, serviceCIDRs []*net.IPNet, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) {
 	if err := os.MkdirAll(kiloPath, 0700); err != nil {
 		return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
 	}
@@ -134,7 +134,7 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
 		}
 		kiloIface = link.Attrs().Index
 	}
-	privateIP, publicIP, err := getIP(hostname, kiloIface, enc.Index(), cniIndex)
+	privateIP, publicIP, err := getIP(hostname, allowedInternalCIDRs, kiloIface, enc.Index(), cniIndex)
 	if err != nil {
 		return nil, fmt.Errorf("failed to find public IP: %v", err)
 	}