Просмотр исходного кода

Add unit tests for Minion Manager RPC Server

Sergiu Miclea 3 лет назад
Родитель
Сommit
b36fdc3fa0

+ 3 - 3
coriolis/minion_manager/rpc/server.py

@@ -715,7 +715,7 @@ class MinionManagerServerEndpoint(object):
                 " the DB for pool '%s' for use with action '%s'. Clearing "
                 "any DB entries added so far (%s). Error was: %s",
                 minion_pool.id, action_id,
-                [m.id for m in new_machine_db_entries_added],
+                new_machine_db_entries_added,
                 utils.get_exception_details())
             try:
                 LOG.debug(
@@ -739,13 +739,13 @@ class MinionManagerServerEndpoint(object):
                     utils.get_exception_details())
             for new_machine in new_machine_db_entries_added:
                 try:
-                    db_api.delete_minion_machine(ctxt, new_machine.id)
+                    db_api.delete_minion_machine(ctxt, new_machine)
                 except Exception:
                     LOG.warn(
                         "Error occurred while removing minion machine entry "
                         "'%s' from the DB. This may leave the pool in an "
                         "inconsistent state. Error trace was: %s" % (
-                            new_machine.id, utils.get_exception_details()))
+                            new_machine, utils.get_exception_details()))
                     continue
             raise
 

+ 0 - 0
coriolis/tests/minion_manager/__init__.py


+ 0 - 0
coriolis/tests/minion_manager/rpc/__init__.py


+ 199 - 0
coriolis/tests/minion_manager/rpc/data/get_minion_pool_refresh_flow.yaml

@@ -0,0 +1,199 @@
+# perform health checks for powered on machines
+# skip health checks for non-powered on machines
+# skip health checks for non-healthcheckable (ex. IN_USE)
+- config:
+    minion_pool:
+      id: minion_pool1
+      minimum_minions: 4
+      minion_retention_strategy: delete
+      minion_machines:
+        - id: minion1
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+        - id: minion2
+          allocation_status: ERROR
+          power_status: POWERED_ERROR
+        - id: minion3
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+        - id: minion4
+          allocation_status: IN_USE
+          power_status: POWERED_ON
+  expect:
+    result:
+      flow_tasks:
+        - pool-minion_pool1-machine-minion1-healthcheck
+        - pool-minion_pool1-machine-minion3-healthcheck
+      # "include" are the db calls that must be present
+      # "exclude" are the db calls that must not be present
+      db_calls:
+        include:
+          - set_minion_machine_allocation_status:
+              id: minion1
+              allocation_status: HEALTHCHECKING
+          - set_minion_machine_allocation_status:
+              id: minion3
+              allocation_status: HEALTHCHECKING
+        exclude:
+          - set_minion_machine_allocation_status:
+              id: minion2
+              allocation_status: HEALTHCHECKING
+
+# power off machines that are not needed and are expired
+- config:
+    minion_pool:
+      id: minion_pool1
+      # minimum_minions 1 means we have too many minions
+      minimum_minions: 1
+      minion_max_idle_time: 0
+      minion_retention_strategy: poweroff
+      minion_machines:
+        - id: minion1
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+        - id: minion2
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+          # not expired
+          last_used_at: [2099, 1, 1, 0, 0, 0]
+        - id: minion3
+          allocation_status: AVAILABLE
+          power_status: POWERED_OFF
+        - id: minion4
+          allocation_status: ERROR_DEPLOYING
+          power_status: POWERED_ON
+        - id: minion5
+          allocation_status: ERROR
+          power_status: POWERED_ERROR
+  expect:
+    result:
+      flow_tasks:
+        - pool-minion_pool1-machine-minion1-power-off
+        - pool-minion_pool1-machine-minion2-healthcheck # not expired
+        - pool-minion_pool1-machine-minion4-healthcheck
+      db_calls:
+        include:
+          - set_minion_machine_allocation_status:
+              id: minion1
+              allocation_status: POWERING_OFF
+          - set_minion_machine_allocation_status:
+              id: minion2
+              allocation_status: HEALTHCHECKING
+          - set_minion_machine_allocation_status:
+              id: minion4
+              allocation_status: HEALTHCHECKING
+        exclude:
+          - set_minion_machine_allocation_status:
+              id: minion3
+              allocation_status: POWERING_OFF
+          - set_minion_machine_allocation_status:
+              id: minion5
+              allocation_status: HEALTHCHECKING
+
+# delete machines that are not needed
+- config:
+    minion_pool:
+      id: minion_pool1
+      # minimum_minions 1 means we have too many minions
+      minimum_minions: 1
+      minion_max_idle_time: 0
+      minion_retention_strategy: delete
+      minion_machines:
+        - id: minion1
+          allocation_status: AVAILABLE
+          power_status: POWERING_OFF
+        - id: minion2
+          allocation_status: ERROR
+          power_status: POWERED_ERROR
+        - id: minion3
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+        - id: minion4
+          allocation_status: ERROR_DEPLOYING
+          power_status: POWERED_ON
+  expect:
+    result:
+      flow_tasks:
+        - pool-minion_pool1-machine-minion1-deallocation
+        - pool-minion_pool1-machine-minion3-healthcheck
+        - pool-minion_pool1-machine-minion4-healthcheck
+      db_calls:
+        include:
+          - set_minion_machine_allocation_status:
+              id: minion1
+              allocation_status: DEALLOCATING
+          - set_minion_machine_allocation_status:
+              id: minion3
+              allocation_status: HEALTHCHECKING
+          - set_minion_machine_allocation_status:
+              id: minion4
+              allocation_status: HEALTHCHECKING
+        exclude:
+          - set_minion_machine_allocation_status:
+              id: minion2
+              allocation_status: HEALTHCHECKING
+          - set_minion_machine_allocation_status:
+              id: minion2
+              allocation_status: DEALLOCATING
+
+# invalid retention strategy
+- config:
+    minion_pool:
+      id: minion_pool1
+      # minimum_minions 1 means we have too many minions
+      minimum_minions: 1
+      minion_max_idle_time: 0
+      minion_retention_strategy: invalid
+      minion_machines:
+        - id: minion1
+          allocation_status: AVAILABLE
+          power_status: POWERING_OFF
+        - id: minion2
+          allocation_status: ERROR
+          power_status: POWERED_ERROR
+        - id: minion3
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+        - id: minion4
+          allocation_status: ERROR_DEPLOYING
+          power_status: POWERED_ON
+  expect:
+    exception: InvalidMinionPoolState
+
+# no machines to healthcheck
+- config:
+    minion_pool:
+      id: minion_pool1
+      # minimum_minions 1 means we have too many minions
+      minimum_minions: 1
+      minion_max_idle_time: 0
+      minion_retention_strategy: poweroff
+      minion_machines:
+        - id: minion1
+          allocation_status: AVAILABLE
+          power_status: POWERED_ON
+          # is expired, no healthcheck
+          last_used_at: [2000, 1, 1, 0, 0, 0]
+        - id: minion2
+          allocation_status: AVAILABLE
+          # not powered on, no healthcheck
+          power_status: POWERED_OFF
+        - id: minion3
+          allocation_status: ERROR
+          power_status: POWERED_ERROR
+  expect:
+    result:
+      flow_tasks:
+        - pool-minion_pool1-machine-minion1-power-off
+      db_calls:
+        include:
+          - set_minion_machine_allocation_status:
+              id: minion1
+              allocation_status: POWERING_OFF
+        exclude:
+          - set_minion_machine_allocation_status:
+              id: minion2
+              allocation_status: HEALTHCHECKING
+          - set_minion_machine_allocation_status:
+              id: minion3
+              allocation_status: HEALTHCHECKING

+ 243 - 0
coriolis/tests/minion_manager/rpc/data/make_minion_machine_allocation_subflow_for_action.yaml

@@ -0,0 +1,243 @@
+## EXCEPTIONS
+
+# requires 2 machines, but only 1 is available
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 1
+      minion_machines:
+        - allocation_status: AVAILABLE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+  expect:
+    exception: InvalidMinionPoolState
+
+# requires 2 machines, but only 1 is available
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 2
+      minion_machines:
+        - allocation_status: IN_USE
+        - allocation_status: AVAILABLE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+  expect:
+    exception: InvalidMinionPoolState
+
+# maximum_minions is too low for 4 new machines
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 6
+      minion_machines:
+        - id: machine_1
+          allocation_status: IN_USE
+        - id: machine_2
+          allocation_status: IN_USE
+        - id: machine_3
+          allocation_status: IN_USE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    exception: InvalidMinionPoolState
+
+# duplicate instance names
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 4
+      minion_machines:
+        - id: machine_1
+          allocation_status: AVAILABLE
+        - id: machine_2
+          allocation_status: AVAILABLE
+    action_instances:
+      - instance_1
+      - instance_1
+      - instance_2
+  expect:
+    exception: InvalidInput
+
+## SUCCESS
+
+# no new machines need to be allocated
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 4
+      minion_machines:
+        - id: machine_1
+          allocation_status: AVAILABLE
+        - id: machine_2
+          allocation_status: AVAILABLE
+        - id: machine_3
+          allocation_status: AVAILABLE
+        - id: machine_4
+          allocation_status: AVAILABLE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    result:
+      mappings:
+        instance_1: machine_1
+        instance_2: machine_2
+        instance_3: machine_3
+        instance_4: machine_4
+      flow_allocations:
+        - pool-minion_pool_1-machine-machine_1-healthcheck
+        - pool-minion_pool_1-machine-machine_2-healthcheck
+        - pool-minion_pool_1-machine-machine_3-healthcheck
+        - pool-minion_pool_1-machine-machine_4-healthcheck
+
+# 2 new machines are allocated
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 5
+      minion_machines:
+        - id: machine_1
+          allocation_status: AVAILABLE
+        - id: machine_2
+          allocation_status: IN_USE
+        - id: machine_3
+          allocation_status: AVAILABLE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    result:
+      mappings:
+        instance_1: machine_1
+        instance_2: machine_3
+        instance_3: new_machine
+        instance_4: new_machine
+      flow_allocations:
+        - pool-minion_pool_1-machine-machine_1-healthcheck
+        - pool-minion_pool_1-machine-machine_3-healthcheck
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+
+# 3 new machines are allocated
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 6
+      minion_machines:
+        - id: machine_1
+          allocation_status: AVAILABLE
+        - id: machine_2
+          allocation_status: IN_USE
+        - id: machine_3
+          allocation_status: IN_USE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    result:
+      mappings:
+        instance_1: machine_1
+        instance_2: new_machine
+        instance_3: new_machine
+        instance_4: new_machine
+      flow_allocations:
+        - pool-minion_pool_1-machine-machine_1-healthcheck
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+
+# 4 new machines are allocated
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 7
+      minion_machines:
+        - id: machine_1
+          allocation_status: IN_USE
+        - id: machine_2
+          allocation_status: IN_USE
+        - id: machine_3
+          allocation_status: IN_USE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    result:
+      mappings:
+        instance_1: new_machine
+        instance_2: new_machine
+        instance_3: new_machine
+        instance_4: new_machine
+      flow_allocations:
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+
+# 1 allocated
+- config:
+    minion_pool:
+      id: minion_pool_1
+      maximum_minions: 4
+      minion_machines:
+        - id: machine_1
+          allocation_status: AVAILABLE
+    action_instances:
+      instance_1:
+        name: Instance 1
+      instance_2:
+        name: Instance 2
+      instance_3:
+        name: Instance 3
+      instance_4:
+        name: Instance 4
+  expect:
+    result:
+      mappings:
+        instance_1: machine_1
+        instance_2: new_machine
+        instance_3: new_machine
+        instance_4: new_machine
+      flow_allocations:
+        - pool-minion_pool_1-machine-machine_1-healthcheck
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation
+        - pool-minion_pool_1-machine-new_machine-allocation

+ 389 - 0
coriolis/tests/minion_manager/rpc/data/validate_minion_pool_selections_for_action_config.yaml

@@ -0,0 +1,389 @@
+# valid action
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: ~
+
+# could not find minion pool (1)
+- config:
+    minion_pools:
+      - id: "invalid id"
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: NotFound
+
+# could not find minion pool (2)
+- config:
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: NotFound
+
+# maxmimum minions is less than number of instances
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 1
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# minion pool is not allocated
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: DEALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolState
+
+## ORIGIN TESTS ##
+
+# origin endpoint id does not match with minion pool endpoint id
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "invalid id"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# origin pool platform is not source
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# origin minion pool is not linux
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: windows
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+## DESTINATION TESTS ##
+
+# destination endpoint id does not match with minion pool endpoint id
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "invalid id"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# destination pool platform is not destination
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# destination minion pool is not linux
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: windows
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings: {}
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# OSMORPHING TESTS ##
+
+# valid action with os morphing mappings (1)
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings:
+        instance-1: "destination-pool-1"
+      instances: ["instance-1", "instance-2"]
+  expected_exception: ~
+# valid action with os morphing mappings (2)
+
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings:
+        instance-1: "destination-pool-1"
+        instance-2: "destination-pool-1"
+      instances: ["instance-1", "instance-2"]
+  expected_exception: ~
+
+# pool belongs to origin endpoint
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings:
+        instance-1: "origin-pool-1"
+        instance-2: "destination-pool-1"
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# os morphing pool platform is not destination
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: ~
+      destination_minion_pool_id: ~
+      instance_osmorphing_minion_pool_mappings:
+        instance-1: "origin-pool-1"
+        instance-2: "destination-pool-1"
+      instances: ["instance-1", "instance-2"]
+  expected_exception: InvalidMinionPoolSelection
+
+# os morphing mapping with invalid instance ID should get ignored
+- config:
+    minion_pools:
+      - id: "origin-pool-1"
+        endpoint_id: "origin-endpoint-1"
+        platform: source
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 2
+      - id: "destination-pool-1"
+        endpoint_id: "destination-endpoint-1"
+        platform: destination
+        os_type: linux
+        status: ALLOCATED
+        maximum_minions: 3
+    action:
+      id: "action-1"
+      origin_endpoint_id: "origin-endpoint-1"
+      destination_endpoint_id: "destination-endpoint-1"
+      origin_minion_pool_id: "origin-pool-1"
+      destination_minion_pool_id: "destination-pool-1"
+      instance_osmorphing_minion_pool_mappings:
+        invalid-instance-id: "destination-pool-1"
+      instances: ["instance-1", "instance-2"]
+  expected_exception: ~

+ 335 - 0
coriolis/tests/minion_manager/rpc/test_server.py

@@ -0,0 +1,335 @@
+# Copyright 2023 Cloudbase Solutions Srl
+# All Rights Reserved.
+
+from unittest import mock
+
+import datetime
+import ddt
+import uuid
+
+from coriolis import constants
+from coriolis.db import api as db_api
+from coriolis import exception
+from coriolis.minion_manager.rpc import server
+from coriolis.tests import test_base
+from coriolis.tests import testutils
+
+
+@ddt.ddt
+class MinionManagerServerEndpointTestCase(test_base.CoriolisBaseTestCase):
+    """Test suite for the Coriolis Minion Manager RPC server."""
+
+    def setUp(self, *_, **__):
+        super(MinionManagerServerEndpointTestCase, self).setUp()
+        self.server = server.MinionManagerServerEndpoint()
+
+    @mock.patch.object(
+        server.MinionManagerServerEndpoint,
+        '_check_keys_for_action_dict')
+    @mock.patch.object(db_api, "get_minion_pools")
+    @ddt.file_data(
+        "data/validate_minion_pool_selections_for_action_config.yaml"
+    )
+    @ddt.unpack
+    def test_validate_minion_pool_selections_for_action(
+            self,
+            mock_get_minion_pools,
+            mock_check_keys_for_action_dict,
+            config,
+            expected_exception,
+    ):
+        action = config.get("action")
+        minion_pools = config.get("minion_pools", [])
+
+        mock_get_minion_pools.return_value = [
+            mock.MagicMock(
+                **pool,
+            ) for pool in minion_pools
+        ]
+
+        if expected_exception:
+            exception_type = getattr(exception, expected_exception)
+            self.assertRaises(
+                exception_type,
+                self.server.validate_minion_pool_selections_for_action,
+                mock.sentinel.context,
+                action,
+            )
+            return
+
+        self.server.validate_minion_pool_selections_for_action(
+            mock.sentinel.context,
+            action,
+        )
+
+        mock_check_keys_for_action_dict.assert_called_once_with(
+            action,
+            mock.ANY,
+            operation="minion pool selection validation")
+
+        mock_get_minion_pools.assert_called_once_with(
+            mock.sentinel.context,
+            include_machines=False,
+            include_events=False,
+            include_progress_updates=False,
+            to_dict=False)
+
+    @mock.patch.object(uuid, "uuid4", return_value="new_machine")
+    @mock.patch.object(
+        server.MinionManagerServerEndpoint,
+        "_add_minion_pool_event")
+    @mock.patch.object(db_api, "add_minion_machine")
+    @mock.patch.object(db_api, "set_minion_machines_allocation_statuses")
+    @ddt.file_data(
+        "data/make_minion_machine_allocation_subflow_for_action.yaml"
+    )
+    @ddt.unpack
+    def test_make_minion_machine_allocation_subflow_for_action(
+            self,
+            mock_set_minion_machines_allocation_statuses,
+            mock_add_minion_machine,
+            mock_add_minion_pool_event,
+            mock_uuid4,
+            config,
+            expect
+    ):
+        expected_exception = expect.get("exception")
+        expected_result = expect.get("result")
+
+        minion_pool = testutils.DictToObject(config.get("minion_pool"))
+        action_instances = config.get("action_instances")
+
+        args = [
+            mock.sentinel.context,
+            minion_pool,
+            mock.sentinel.action_id,
+            action_instances,
+            mock.sentinel.subflow_name,
+        ]
+
+        if expected_exception:
+            exception_type = getattr(exception, expected_exception)
+            self.assertRaises(
+                exception_type,
+                self.server._make_minion_machine_allocation_subflow_for_action,
+                *args)
+            return
+
+        result = self.server\
+            ._make_minion_machine_allocation_subflow_for_action(
+                *args)
+
+        mappings = expected_result.get("mappings")
+        exptected_flow_tasks = expected_result.get("flow_allocations", [])
+
+        num_new_machines = list(mappings.values()).count("new_machine")
+
+        # db_api.add_minion_machine is called once for each new machine
+        self.assertEqual(
+            num_new_machines,
+            mock_add_minion_machine.call_count)
+
+        num_non_new_machines = len(mappings) - num_new_machines
+        if num_non_new_machines:
+            # db_api.set_minion_machines_allocation_statuses is called once
+            # with the non-new machines
+            mock_set_minion_machines_allocation_statuses\
+                .assert_called_once_with(
+                    mock.sentinel.context,
+                    list(mappings.values())[:num_non_new_machines],
+                    mock.sentinel.action_id,
+                    constants.MINION_MACHINE_STATUS_RESERVED,
+                    refresh_allocation_time=True)
+
+        # _add_minion_pool_event is called once if there're new machines,
+        # twice if there's both new and non-new machines
+        if num_new_machines > 0 and num_non_new_machines > 0:
+            add_event_count = 2
+        else:
+            add_event_count = 1
+
+        self.assertEqual(
+            add_event_count,
+            mock_add_minion_pool_event.call_count)
+        add_event_args = [
+            mock.sentinel.context,
+            minion_pool.id,
+            constants.TASK_EVENT_INFO,
+            mock.ANY,
+        ]
+        if add_event_count == 1:
+            mock_add_minion_pool_event.assert_called_once_with(
+                *add_event_args)
+        else:
+            mock_add_minion_pool_event.assert_has_calls([
+                mock.call(*add_event_args),
+                mock.call(*add_event_args),
+            ])
+
+        flow_allocations = [
+            node.name for node, _ in result.get("flow").iter_nodes()]
+
+        self.assertEqual(
+            exptected_flow_tasks,
+            flow_allocations)
+
+        self.assertEqual(
+            mappings,
+            result.get("action_instance_minion_allocation_mappings"))
+
+    @mock.patch.object(db_api, "delete_minion_machine")
+    @mock.patch.object(uuid, "uuid4", return_value="new_machine")
+    @mock.patch.object(
+        server.MinionManagerServerEndpoint,
+        "_add_minion_pool_event")
+    @mock.patch.object(db_api, "add_minion_machine")
+    @mock.patch.object(db_api, "set_minion_machines_allocation_statuses")
+    def test_make_minion_machine_allocation_subflow_for_action_delete(
+            self,
+            mock_set_minion_machines_allocation_statuses,
+            mock_add_minion_machine,
+            mock_add_minion_pool_event,
+            mock_uuid4,
+            mock_delete_minion_machine,
+    ):
+        # This test is a special case of the test above, where the added
+        # minion machines are deleted when there's an exception trying to add
+        # the last one.
+
+        minion_pool = testutils.DictToObject({
+            "id": "minion_pool_1",
+            "maximum_minions": 5,
+            "minion_machines": [
+                {"id": "machine_1", "allocation_status": "AVAILABLE"},
+            ]
+        })
+        action_instances = {
+            "instance_1": {"name": "Instance 1"},
+            "instance_2": {"name": "Instance 2"},
+            "instance_3": {"name": "Instance 3"},
+            "instance_4": {"name": "Instance 4"},
+        }
+
+        # two machines are added, but the third one fails
+        mock_add_minion_machine.side_effect = [
+            mock.Mock(), mock.Mock(), exception.CoriolisException]
+
+        self.assertRaises(
+            exception.CoriolisException,
+            self.server._make_minion_machine_allocation_subflow_for_action,
+            mock.sentinel.context,
+            minion_pool,
+            mock.sentinel.action_id,
+            action_instances,
+            mock.sentinel.subflow_name
+        )
+
+        # two machines are deleted
+        delete_call = mock.call(mock.sentinel.context, "new_machine")
+        mock_delete_minion_machine.assert_has_calls([delete_call, delete_call])
+
+        mock_set_minion_machines_allocation_statuses.assert_has_calls(
+            [
+                mock.call(
+                    mock.sentinel.context,
+                    ["machine_1"],
+                    mock.sentinel.action_id,
+                    constants.MINION_MACHINE_STATUS_RESERVED,
+                    refresh_allocation_time=True),
+                mock.call(
+                    mock.sentinel.context,
+                    ["machine_1"],
+                    None,
+                    constants.MINION_MACHINE_STATUS_AVAILABLE,
+                    refresh_allocation_time=False),
+            ])
+
+    @mock.patch.object(db_api, "set_minion_machine_allocation_status")
+    @mock.patch.object(
+        server.MinionManagerServerEndpoint,
+        "_add_minion_pool_event")
+    @mock.patch.object(
+        server.MinionManagerServerEndpoint,
+        "_get_minion_pool")
+    @ddt.file_data(
+        "data/get_minion_pool_refresh_flow.yaml"
+    )
+    @ddt.unpack
+    def test_get_minion_pool_refresh_flow(
+            self,
+            mock_get_minion_pool,
+            mock_add_minion_pool_event,
+            mock_set_minion_machine_allocation_status,
+            config,
+            expect,
+    ):
+        minion_pool_dict = config.get("minion_pool", {})
+        if minion_pool_dict:
+            for minion in minion_pool_dict.get("minion_machines", []):
+                if minion.get("last_used_at"):
+                    minion["last_used_at"] = datetime.datetime(
+                        *minion["last_used_at"])
+        minion_pool = testutils.DictToObject(minion_pool_dict, {})
+
+        expected_result = expect.get("result", {})
+        expect_exception = expect.get("exception")
+        exptected_flow_tasks = expected_result.get("flow_tasks", [])
+        expected_db_calls = expected_result.get("db_calls", {})
+        expected_db_calls_include = expected_db_calls.get("include", [])
+        expected_db_calls_exclude = expected_db_calls.get("exclude", [])
+
+        mock_get_minion_pool.return_value = minion_pool
+
+        if expect_exception:
+            exception_type = getattr(exception, expect_exception)
+            self.assertRaises(
+                exception_type,
+                self.server._get_minion_pool_refresh_flow,
+                mock.sentinel.context,
+                minion_pool,
+                requery=False,
+            )
+            mock_get_minion_pool.assert_not_called()
+            return
+
+        flow = self.server._get_minion_pool_refresh_flow(
+            mock.sentinel.context,
+            minion_pool,
+            requery=True,
+        )
+
+        mock_get_minion_pool.assert_called_once_with(
+            mock.sentinel.context,
+            minion_pool.id,
+            include_machines=True,
+            include_events=False,
+            include_progress_updates=False,
+        )
+
+        # Test the flow returned by the function
+        flow_tasks = [node.name for node, _ in flow.iter_nodes()]
+        self.assertEqual(
+            exptected_flow_tasks,
+            flow_tasks)
+
+        # Test DB calls that should be made
+        for call in expected_db_calls_include:
+            for method, args in call.items():
+                if method == "set_minion_machine_allocation_status":
+                    mock_set_minion_machine_allocation_status\
+                        .assert_any_call(
+                            mock.sentinel.context,
+                            args.get("id"),
+                            args.get("allocation_status"))
+
+        # Test DB calls that should not be made
+        for call in expected_db_calls_exclude:
+            for method, args in call.items():
+                if method == "set_minion_machine_allocation_status":
+                    assert mock.call(
+                        mock.sentinel.context,
+                        args.get("id"),
+                        args.get("allocation_status"))\
+                        not in mock_set_minion_machine_allocation_status\
+                        .mock_calls, f"Unexpected call to {method}, " \
+                        f"args: {args}"