multi-cluster.sh 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. #!/usr/bin/env bash
  2. # shellcheck disable=SC1091
  3. . lib.sh
  4. # shellcheck disable=SC2153
  5. KUBECONFIG2="$KUBECONFIG"2
  6. # shellcheck disable=SC2153
  7. KIND_CLUSTER2="$KIND_CLUSTER"2
  8. setup_suite() {
  9. KUBECONFIG=$KUBECONFIG2 KIND_CLUSTER=$KIND_CLUSTER2 create_cluster "$(build_kind_config 1 6444 10.44.0.0/16 10.45.0.0/16)"
  10. # shellcheck disable=SC2016
  11. KUBECONFIG=$KUBECONFIG2 _kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full","--subnet=10.6.0.0/16"]}]}}}}'
  12. KUBECONFIG=$KUBECONFIG2 block_until_ready_by_name kube-system kilo-userspace
  13. # Register the nodes in cluster1 as peers of cluster2.
  14. for n in $(_kubectl get no -o name | cut -d'/' -f2); do
  15. # Specify the service CIDR as an extra IP range that should be routable.
  16. $KGCTL_BINARY --kubeconfig "$KUBECONFIG" showconf node "$n" --as-peer -o yaml --allowed-ips 10.43.0.0/16 | $KUBECTL_BINARY --kubeconfig "$KUBECONFIG2" apply -f -
  17. done
  18. # Register the nodes in cluster2 as peers of cluster1.
  19. for n in $(KUBECONFIG=$KUBECONFIG2 _kubectl get no -o name | cut -d'/' -f2); do
  20. # Specify the service CIDR as an extra IP range that should be routable.
  21. $KGCTL_BINARY --kubeconfig "$KUBECONFIG2" showconf node "$n" --as-peer -o yaml --allowed-ips 10.45.0.0/16 | $KUBECTL_BINARY --kubeconfig "$KUBECONFIG" apply -f -
  22. done
  23. }
  24. test_multi_cluster_pod_connectivity() {
  25. for ip in $(KUBECONFIG=$KUBECONFIG2 _kubectl get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
  26. assert_equals pong "$(retry 10 5 "$ip is not yet routable" curl_pod -m 1 -s http://"$ip":8080/ping)" "should be able to make HTTP request from cluster 1 to Pod in cluster 2"
  27. done
  28. for ip in $(_kubectl get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
  29. assert_equals pong "$(KUBECONFIG="$KUBECONFIG2" retry 10 5 "$ip is not yet routable" curl_pod -m 1 -s http://"$ip":8080/ping)" "should be able to make HTTP request from cluster 2 to Pod in cluster 1"
  30. done
  31. }
  32. test_multi_cluster_service_connectivity() {
  33. # Mirror the Kubernetes API service from cluster1 into cluster2.
  34. cat <<EOF | $KUBECTL_BINARY --kubeconfig "$KUBECONFIG2" apply -f -
  35. apiVersion: v1
  36. kind: Service
  37. metadata:
  38. name: mirrored-kubernetes
  39. spec:
  40. ports:
  41. - port: 443
  42. ---
  43. apiVersion: v1
  44. kind: Endpoints
  45. metadata:
  46. name: mirrored-kubernetes
  47. subsets:
  48. - addresses:
  49. - ip: $(_kubectl get service kubernetes -o jsonpath='{.spec.clusterIP}') # The cluster IP of the Kubernetes API service on cluster1.
  50. ports:
  51. - port: 443
  52. EOF
  53. assert_equals ok "$(KUBECONFIG="$KUBECONFIG2" retry 10 5 "service is not yet routable" curl_pod -m 1 -s -k https://mirrored-kubernetes/readyz)" "should be able to make HTTP request from cluster 2 to service in cluster 1"
  54. }
  55. teardown_suite () {
  56. if [ -n "$E2E_SKIP_TEARDOWN_ON_FAILURE" ]; then
  57. return
  58. fi
  59. # Remove the nodes in cluster2 as peers of cluster1.
  60. for n in $(KUBECONFIG=$KUBECONFIG2 _kubectl get no -o name | cut -d'/' -f2); do
  61. _kubectl delete peer "$n"
  62. done
  63. KUBECONFIG=$KUBECONFIG2 KIND_CLUSTER=$KIND_CLUSTER2 delete_cluster
  64. }