Przeglądaj źródła

Add 'deployments' and update 'replicas' middle APIs.

Signed-off-by: Nashwan Azhari <nazhari@cloudbasesolutions.com>
Nashwan Azhari 2 lat temu
rodzic
commit
f48664f3e4

+ 0 - 0
coriolis/deployments/__init__.py


+ 35 - 0
coriolis/deployments/api.py

@@ -0,0 +1,35 @@
+# Copyright 2024 Cloudbase Solutions Srl
+# All Rights Reserved.
+
+from coriolis.conductor.rpc import client as rpc_client
+
+
+class API(object):
+    def __init__(self):
+        self._rpc_client = rpc_client.ConductorClient()
+
+    def deploy_replica_instances(self, ctxt, replica_id,
+                                 instance_osmorphing_minion_pool_mappings,
+                                 clone_disks=False, force=False,
+                                 skip_os_morphing=False, user_scripts=None):
+        return self._rpc_client.deploy_replica_instances(
+            ctxt, replica_id, instance_osmorphing_minion_pool_mappings=(
+                instance_osmorphing_minion_pool_mappings),
+            clone_disks=clone_disks, force=force,
+            skip_os_morphing=skip_os_morphing,
+            user_scripts=user_scripts)
+
+    def delete(self, ctxt, deployment_id):
+        self._rpc_client.delete_deployment(ctxt, deployment_id)
+
+    def cancel(self, ctxt, deployment_id, force):
+        self._rpc_client.cancel_deployment(ctxt, deployment_id, force)
+
+    def get_deployments(self, ctxt, include_tasks=False,
+                        include_task_info=False):
+        return self._rpc_client.get_deployments(
+            ctxt, include_tasks, include_task_info=include_task_info)
+
+    def get_deployment(self, ctxt, deployment_id, include_task_info=False):
+        return self._rpc_client.get_deployment(
+            ctxt, deployment_id, include_task_info=include_task_info)

+ 101 - 0
coriolis/deployments/manager.py

@@ -0,0 +1,101 @@
+# Copyright 2017 Cloudbase Solutions Srl
+# All Rights Reserved.
+
+import gc
+import sys
+
+import eventlet
+from oslo_log import log as logging
+from oslo_utils import units
+
+from coriolis import events
+from coriolis.providers import backup_writers
+from coriolis import qemu_reader
+from coriolis import utils
+
+LOG = logging.getLogger(__name__)
+
+
+def _copy_volume(volume, disk_image_reader, backup_writer, event_manager):
+    disk_id = volume["disk_id"]
+    # for now we assume it is a local file
+    path = volume["disk_image_uri"]
+    skip_zeroes = volume.get("zeroed", False)
+
+    with backup_writer.open("", disk_id) as writer:
+        with disk_image_reader.open(path) as reader:
+            disk_size = reader.disk_size
+
+            perc_step = event_manager.add_percentage_step(
+                "Copying data of disk %s" % disk_id, disk_size)
+
+            offset = 0
+            max_block_size = 10 * units.Mi  # 10 MB
+
+            while offset < disk_size:
+                allocated, zero_block, block_size = reader.get_block_status(
+                    offset, max_block_size)
+                if not allocated or zero_block and skip_zeroes:
+                    if not allocated:
+                        LOG.debug(
+                            "Unallocated block detected: %s", block_size)
+                    else:
+                        LOG.debug("Skipping zero block: %s", block_size)
+                    offset += block_size
+                    writer.seek(offset)
+                else:
+                    buf = reader.read(offset, block_size)
+                    writer.write(buf)
+                    offset += len(buf)
+                    buf = None
+                    gc.collect()
+
+                event_manager.set_percentage_step(
+                    perc_step, offset)
+
+
+def _copy_wrapper(job_args):
+    disk_id = job_args[0].get("disk_id")
+    try:
+        return _copy_volume(*job_args), disk_id, False
+    except BaseException:
+        return sys.exc_info(), disk_id, True
+
+
+def copy_disk_data(target_conn_info, volumes_info, event_handler):
+    # TODO(gsamfira): the disk image should be an URI that can either be local
+    # (file://) or remote (https://, ftp://, smb://, nfs:// etc).
+    # This must happen if we are to implement multi-worker scenarios.
+    # In such cases, it is not guaranteed that the disk sync task
+    # will be started on the same node onto which the import
+    # happened. It may also be conceivable, that wherever the disk
+    # image ends up, we might be able to directly expose it using
+    # NFS, iSCSI or any other network protocol. In which case,
+    # we can skip downloading it locally just to sync it.
+
+    event_manager = events.EventManager(event_handler)
+
+    ip = target_conn_info["ip"]
+    port = target_conn_info.get("port", 22)
+    username = target_conn_info["username"]
+    pkey = target_conn_info.get("pkey")
+    password = target_conn_info.get("password")
+    event_manager.progress_update("Waiting for connectivity on %s:%s" % (
+        ip, port))
+    utils.wait_for_port_connectivity(ip, port)
+    backup_writer = backup_writers.SSHBackupWriter(
+        ip, port, username, pkey, password, volumes_info)
+    disk_image_reader = qemu_reader.QEMUDiskImageReader()
+
+    pool = eventlet.greenpool.GreenPool()
+    job_data = [(vol, disk_image_reader, backup_writer, event_manager)
+                for vol in volumes_info]
+    for result, disk_id, error in pool.imap(_copy_wrapper, job_data):
+        # TODO(gsamfira): There is no use in letting the other disks finish
+        # sync-ing as we don't save the state of the disk sync anywhere (yet).
+        # When/If we ever do add this info to the database, keep track of
+        # failures, and allow any other paralel sync to finish
+        if error:
+            event_manager.progress_update(
+                "Volume \"%s\" failed to sync" % disk_id)
+            raise result[0](result[1]).with_traceback(result[2])

+ 4 - 2
coriolis/replicas/api.py

@@ -8,13 +8,15 @@ class API(object):
     def __init__(self):
         self._rpc_client = rpc_client.ConductorClient()
 
-    def create(self, ctxt, origin_endpoint_id, destination_endpoint_id,
+    def create(self, ctxt, replica_scenario,
+               origin_endpoint_id, destination_endpoint_id,
                origin_minion_pool_id, destination_minion_pool_id,
                instance_osmorphing_minion_pool_mappings,
                source_environment, destination_environment, instances,
                network_map, storage_mappings, notes=None, user_scripts=None):
         return self._rpc_client.create_instances_replica(
-            ctxt, origin_endpoint_id, destination_endpoint_id,
+            ctxt, replica_scenario,
+            origin_endpoint_id, destination_endpoint_id,
             origin_minion_pool_id, destination_minion_pool_id,
             instance_osmorphing_minion_pool_mappings,
             source_environment, destination_environment, instances,