Просмотр исходного кода

e2e: reuse kind cluster across suites

Currently, each test suite spins up its own kind cluster, which results
in longer e2e test times as each test suite needs to wait for the
cluster to be ready and for images to download. This commit creates two
new virtual test suites that are run before and after the actual e2e
tests and are responsible for creating and destroying a kind cluster
respectively. Any test suite that needs a fresh cluster can still spin
up its own using the `create_cluster` helper in the lib.sh file.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
Lucas Servén Marín 4 лет назад
Родитель
Сommit
c9e4786893
6 измененных файлов с 16 добавлено и 11 удалено
  1. 1 0
      .gitignore
  2. 1 1
      Makefile
  3. 0 5
      e2e/full-mesh.sh
  4. 0 5
      e2e/location-mesh.sh
  5. 7 0
      e2e/setup.sh
  6. 7 0
      e2e/teardown.sh

+ 1 - 0
.gitignore

@@ -4,3 +4,4 @@
 .push*
 bin/
 tmp/
+e2e/kind.yaml

+ 1 - 1
Makefile

@@ -208,7 +208,7 @@ $(BASH_UNIT):
 	chmod +x $@
 
 e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl
-	KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) ./e2e/full-mesh.sh ./e2e/location-mesh.sh
+	KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/teardown.sh
 
 header: .header
 	@HEADER=$$(cat .header); \

+ 0 - 5
e2e/full-mesh.sh

@@ -3,17 +3,12 @@
 . lib.sh
 
 setup_suite() {
-	create_cluster
 	# shellcheck disable=SC2016
 	$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
 	block_until_ready_by_name kube-system kilo-userspace 
 	$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
 }
 
-teardown_suite () {
-	delete_cluster
-}
-
 test_full_mesh_connectivity() {
 	assert "retry 30 5 '' check_ping" "should be able to ping all Pods"
 	assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 12" "adjacency should return the right number of successful pings"

+ 0 - 5
e2e/location-mesh.sh

@@ -3,7 +3,6 @@
 . lib.sh
 
 setup_suite() {
-	create_cluster
 	# shellcheck disable=SC2016
 	$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
 	block_until_ready_by_name kube-system kilo-userspace 
@@ -25,7 +24,3 @@ test_location_mesh_peer() {
 test_mesh_granularity_auto_detect() {
 	 assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity location)"
 }
-
-teardown_suite () {
-	delete_cluster
-}

+ 7 - 0
e2e/setup.sh

@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1091
+. lib.sh
+
+setup_suite() {
+	create_cluster
+}

+ 7 - 0
e2e/teardown.sh

@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1091
+. lib.sh
+
+teardown_suite () {
+	delete_cluster
+}