Răsfoiți Sursa

Merged in alexpilotti/coriolis (pull request #12)

Adds VMWare CBT backup support
Alessandro Pilotti 9 ani în urmă
părinte
comite
5ce0a67974

+ 68 - 43
coriolis/providers/openstack/__init__.py

@@ -284,6 +284,9 @@ class ImportProvider(base.BaseImportProvider):
             raise exception.CoriolisException(
                 "Glance image \"%s\" not found" % migr_image_name)
 
+        LOG.debug("Migration image name: %s", migr_image_name)
+        LOG.debug("Migration flavor name: %s", migr_flavor_name)
+
         image = nova.images.find(name=migr_image_name)
         flavor = nova.flavors.find(name=migr_flavor_name)
 
@@ -383,67 +386,86 @@ class ImportProvider(base.BaseImportProvider):
             instance.id, volume.id, volume_dev)
         _wait_for_volume(nova, volume, 'in-use')
 
-    def import_instance(self, ctxt, connection_info, target_environment,
-                        instance_name, export_info):
-        session = keystone.create_keystone_session(ctxt, connection_info)
-
-        glance_api_version = connection_info.get("image_api_version",
-                                                 GLANCE_API_VERSION)
-
-        nova = nova_client.Client(NOVA_API_VERSION, session=session)
-        glance = glance_client.Client(glance_api_version, session=session)
-        neutron = neutron_client.Client(NEUTRON_API_VERSION, session=session)
-        cinder = cinder_client.Client(CINDER_API_VERSION, session=session)
-
-        os_type = export_info.get('os_type')
-        LOG.info("os_type: %s", os_type)
+    def _get_import_config(self, target_environment, os_type):
+        config = collections.namedtuple(
+            "glance_upload",
+            "target_disk_format",
+            "container_format",
+            "hypervisor_type",
+            "fip_pool_name",
+            "network_map",
+            "keypair_name",
+            "migr_image_name",
+            "migr_flavor_name",
+            "migr_fip_pool_name",
+            "migr_network_name",
+            "flavor_name")
 
-        glance_upload = target_environment.get(
+        config.glance_upload = target_environment.get(
             "glance_upload", CONF.openstack_migration_provider.glance_upload)
-        target_disk_format = target_environment.get(
+        config.target_disk_format = target_environment.get(
             "disk_format", CONF.openstack_migration_provider.disk_format)
-        container_format = target_environment.get(
+        config.container_format = target_environment.get(
             "container_format",
             CONF.openstack_migration_provider.container_format)
-        hypervisor_type = target_environment.get(
+        config.hypervisor_type = target_environment.get(
             "hypervisor_type",
             CONF.openstack_migration_provider.hypervisor_type)
-        fip_pool_name = target_environment.get(
+        config.fip_pool_name = target_environment.get(
             "fip_pool_name", CONF.openstack_migration_provider.fip_pool_name)
-        network_map = target_environment.get("network_map", {})
-        keypair_name = target_environment.get("keypair_name")
+        config.network_map = target_environment.get("network_map", {})
+        config.keypair_name = target_environment.get("keypair_name")
 
-        migr_image_name = target_environment.get(
+        config.migr_image_name = target_environment.get(
             "migr_image_name",
             target_environment.get("migr_image_name_map", {}).get(
                 os_type,
                 CONF.openstack_migration_provider.migr_image_name_map.get(
                     os_type)))
-        migr_flavor_name = target_environment.get(
+        config.migr_flavor_name = target_environment.get(
             "migr_flavor_name",
             CONF.openstack_migration_provider.migr_flavor_name)
 
-        migr_fip_pool_name = target_environment.get(
+        config.migr_fip_pool_name = target_environment.get(
             "migr_fip_pool_name",
-            fip_pool_name or CONF.openstack_migration_provider.fip_pool_name)
-        migr_network_name = target_environment.get(
+            config.fip_pool_name or
+            CONF.openstack_migration_provider.fip_pool_name)
+        config.migr_network_name = target_environment.get(
             "migr_network_name",
             CONF.openstack_migration_provider.migr_network_name)
 
-        flavor_name = target_environment.get("flavor_name", migr_flavor_name)
+        config.flavor_name = target_environment.get(
+            "flavor_name", config.migr_flavor_name)
 
-        if not migr_image_name:
+        if not config.migr_image_name:
             raise exception.CoriolisException(
                 "No matching migration image type found")
 
-        LOG.info("Migration image name: %s", migr_image_name)
-
-        if not migr_network_name:
-            if len(network_map) != 1:
+        if not config.migr_network_name:
+            if len(config.network_map) != 1:
                 raise exception.CoriolisException(
                     'If "migr_network_name" is not provided, "network_map" '
                     'must contain exactly one entry')
-            migr_network_name = network_map.values()[0]
+            config.migr_network_name = config.network_map.values()[0]
+
+        return config
+
+    def import_instance(self, ctxt, connection_info, target_environment,
+                        instance_name, export_info):
+        session = keystone.create_keystone_session(ctxt, connection_info)
+
+        glance_api_version = connection_info.get("image_api_version",
+                                                 GLANCE_API_VERSION)
+
+        nova = nova_client.Client(NOVA_API_VERSION, session=session)
+        glance = glance_client.Client(glance_api_version, session=session)
+        neutron = neutron_client.Client(NEUTRON_API_VERSION, session=session)
+        cinder = cinder_client.Client(CINDER_API_VERSION, session=session)
+
+        os_type = export_info.get('os_type')
+        LOG.info("os_type: %s", os_type)
+
+        config = self._get_import_config(target_environment, os_type)
 
         disks_info = export_info["devices"]["disks"]
 
@@ -452,19 +474,19 @@ class ImportProvider(base.BaseImportProvider):
         ports = []
 
         try:
-            if glance_upload:
+            if config.glance_upload:
                 for disk_info in disks_info:
                     disk_path = disk_info["path"]
                     disk_file_info = utils.get_disk_info(disk_path)
 
-                    # if target_disk_format == disk_file_info["format"]:
+                    # if config.target_disk_format == disk_file_info["format"]:
                     #    target_disk_path = disk_path
                     # else:
                     #    target_disk_path = (
                     #        "%s.%s" % (os.path.splitext(disk_path)[0],
-                    #                   target_disk_format))
+                    #                   config.target_disk_format))
                     #    utils.convert_disk_format(disk_path, target_disk_path,
-                    #                              target_disk_format)
+                    #                              config.target_disk_format)
 
                     self._event_manager.progress_update(
                         "Uploading Glance image")
@@ -473,7 +495,8 @@ class ImportProvider(base.BaseImportProvider):
                     image = self._create_image(
                         glance, _get_unique_name(),
                         disk_path, disk_format,
-                        container_format, hypervisor_type)
+                        config.container_format,
+                        config.hypervisor_type)
                     images.append(image)
 
                     self._event_manager.progress_update(
@@ -495,8 +518,9 @@ class ImportProvider(base.BaseImportProvider):
                     volumes.append(volume)
 
             migr_resources = self._deploy_migration_resources(
-                nova, glance, neutron, os_type, migr_image_name,
-                migr_flavor_name, migr_network_name, migr_fip_pool_name)
+                nova, glance, neutron, os_type, config.migr_image_name,
+                config.migr_flavor_name, config.migr_network_name,
+                config.migr_fip_pool_name)
 
             nics_info = export_info["devices"].get("nics", [])
 
@@ -513,7 +537,7 @@ class ImportProvider(base.BaseImportProvider):
                     conn_info = migr_resources.get_guest_connection_info()
 
                 osmorphing_hv_type = self._get_osmorphing_hypervisor_type(
-                    hypervisor_type)
+                    config.hypervisor_type)
 
                 self._event_manager.progress_update(
                     "Preparing instance for target platform")
@@ -544,7 +568,7 @@ class ImportProvider(base.BaseImportProvider):
                                "%s, skipping", nic_info.get("name"))
                     continue
 
-                network_name = network_map.get(origin_network_name)
+                network_name = config.network_map.get(origin_network_name)
                 if not network_name:
                     raise exception.CoriolisException(
                         "Network not mapped in network_map: %s" %
@@ -557,7 +581,8 @@ class ImportProvider(base.BaseImportProvider):
                 "Creating migrated instance")
 
             self._create_target_instance(
-                nova, flavor_name, instance_name, keypair_name, ports, volumes)
+                nova, config.flavor_name, instance_name,
+                config.keypair_name, ports, volumes)
         except Exception:
             self._event_manager.progress_update("Deleting volumes")
             for volume in volumes:

+ 186 - 31
coriolis/providers/vmware_vsphere/__init__.py

@@ -1,11 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
+import contextlib
 import os
 import re
 import sys
 import time
 from urllib import request
+import uuid
 
 import eventlet
 from oslo_config import cfg
@@ -18,6 +20,7 @@ from coriolis import exception
 from coriolis.providers import base
 from coriolis.providers.vmware_vsphere import guestid
 from coriolis import schemas
+from coriolis.providers.vmware_vsphere import vixdisklib
 from coriolis import utils
 
 vmware_vsphere_opts = [
@@ -100,6 +103,24 @@ class ExportProvider(base.BaseExportProvider):
 
         return vm
 
+    @utils.retry_on_error()
+    def _shutdown_vm(self, vm):
+        if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+            power_off = True
+            if (vm.guest.toolsRunningStatus !=
+                    vim.vm.GuestInfo.ToolsRunningStatus.guestToolsNotRunning):
+                self._event_manager.progress_update("Shutting down guest OS")
+                vm.ShutdownGuest()
+                if self._wait_for_vm_status(
+                        vm, vim.VirtualMachinePowerState.poweredOff):
+                    power_off = False
+
+            if power_off:
+                self._event_manager.progress_update(
+                    "Powering off the virtual machine")
+                task = vm.PowerOff()
+                self._wait_for_task(task)
+
     @utils.retry_on_error()
     def _get_vm_info(self, si, instance_path):
 
@@ -116,8 +137,8 @@ class ExportProvider(base.BaseExportProvider):
         vm_info = {
             'num_cpu': vm.config.hardware.numCPU,
             'num_cores_per_socket': vm.config.hardware.numCoresPerSocket,
-            'memory_mb':  vm.config.hardware.memoryMB,
-            'firmware_type':  firmware_type_map[vm.config.firmware],
+            'memory_mb': vm.config.hardware.memoryMB,
+            'firmware_type': firmware_type_map[vm.config.firmware],
             'nested_virtualization': vm.config.nestedHVEnabled,
             'dynamic_memory_enabled':
                 not vm.config.memoryReservationLockedToMax,
@@ -129,22 +150,6 @@ class ExportProvider(base.BaseExportProvider):
 
         LOG.info("vm info: %s" % str(vm_info))
 
-        if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
-            power_off = True
-            if (vm.guest.toolsRunningStatus !=
-                    vim.vm.GuestInfo.ToolsRunningStatus.guestToolsNotRunning):
-                self._event_manager.progress_update("Shutting down guest OS")
-                vm.ShutdownGuest()
-                if self._wait_for_vm_status(
-                        vm, vim.VirtualMachinePowerState.poweredOff):
-                    power_off = False
-
-            if power_off:
-                self._event_manager.progress_update(
-                    "Powering off the virtual machine")
-                task = vm.PowerOff()
-                self._wait_for_task(task)
-
         disk_ctrls = []
         devices = [d for d in vm.config.hardware.device if
                    isinstance(d, vim.vm.device.VirtualController)]
@@ -233,6 +238,8 @@ class ExportProvider(base.BaseExportProvider):
     @utils.retry_on_error()
     def _export_disks(self, vm, export_path, context):
         disk_paths = []
+
+        self._shutdown_vm(vm)
         lease = vm.ExportVm()
         while True:
             if lease.state == vim.HttpNfcLease.State.ready:
@@ -262,7 +269,10 @@ class ExportProvider(base.BaseExportProvider):
 
                         response = request.urlopen(du.url, context=context)
                         path = os.path.join(export_path, du.targetId)
-                        disk_paths.append({'path': path, 'id': disk_key})
+                        disk_paths.append({
+                            'path': path,
+                            'id': disk_key,
+                            'format': constants.DISK_FORMAT_VMDK})
 
                         LOG.info("Downloading: %s" % path)
                         with open(path, 'wb') as f:
@@ -287,6 +297,137 @@ class ExportProvider(base.BaseExportProvider):
             else:
                 time.sleep(.1)
 
+    def _connect_vixdisklib(self, connection_info, context,
+                            vmx_spec, snapshot_ref):
+        host = connection_info["host"]
+        port = connection_info.get("port", 443)
+        username = connection_info["username"]
+        password = connection_info["password"]
+
+        thumbprint = utils.get_ssl_cert_thumbprint(context, host, port)
+
+        return vixdisklib.connect(
+            server_name=host,
+            port=port,
+            thumbprint=thumbprint,
+            username=username,
+            password=password,
+            vmx_spec=vmx_spec,
+            snapshot_ref=snapshot_ref)
+
+    def _take_vm_snapshot(self, vm, snapshot_name, memory=False, quiesce=True):
+        task = vm.CreateSnapshot_Task(
+            name=snapshot_name, memory=False, quiesce=True)
+        self._wait_for_task(task)
+        return task.info.result
+
+    def _remove_vm_snapshot(self, snapshot, remove_children=False):
+        self._wait_for_task(snapshot.RemoveSnapshot_Task(remove_children))
+
+    @contextlib.contextmanager
+    def _take_temp_vm_snapshot(self, vm, snapshot_name, memory=False,
+                               quiesce=True):
+        self._event_manager.progress_update("Creating backup snapshot")
+        snapshot = self._take_vm_snapshot(vm, snapshot_name, memory, quiesce)
+        try:
+            yield snapshot
+        finally:
+            self._event_manager.progress_update("Removing backup snapshot")
+            self._remove_vm_snapshot(snapshot)
+
+    @utils.retry_on_error()
+    def _backup_snapshot_disks(self, snapshot, export_path, connection_info,
+                               context, disk_paths):
+        vm = snapshot.vm
+        vmx_spec = "moref=%s" % vm._GetMoId()
+        snapshot_ref = snapshot._GetMoId()
+        pos = 0
+        sector_size = vixdisklib.VIXDISKLIB_SECTOR_SIZE
+        max_sectors_per_read = 40 * 2048
+
+        with self._connect_vixdisklib(connection_info, context,
+                                      vmx_spec, snapshot_ref) as conn:
+            for disk in [d for d in snapshot.config.hardware.device
+                         if isinstance(d, vim.vm.device.VirtualDisk)]:
+
+                l = [d for d in disk_paths if d['id'] == disk.key]
+                if l:
+                    disk_path = l[0]
+                    change_id = disk_path["change_id"]
+                    path = disk_path["path"]
+                    disk_path['change_id'] = disk.backing.changeId
+                else:
+                    change_id = '*'
+                    path = os.path.join(export_path, "disk-%s.raw" % disk.key)
+                    disk_paths.append({
+                        'path': path,
+                        'id': disk.key,
+                        'format': constants.DISK_FORMAT_RAW,
+                        'change_id': disk.backing.changeId})
+
+                LOG.debug("CBT change id: %s", change_id)
+                changed_disk_areas = vm.QueryChangedDiskAreas(
+                    snapshot_ref, disk.key, pos, change_id)
+
+                backup_disk_path = disk.backing.fileName
+                with vixdisklib.open(
+                        conn, backup_disk_path) as disk_handle:
+
+                    # Create file if it doesn't exist
+                    open(path, "ab").close()
+
+                    with open(path, "rb+") as f:
+                        # Create a sparse file
+                        f.truncate(disk.capacityInBytes)
+
+                        for area in changed_disk_areas.changedArea:
+                            start_sector = area.start // sector_size
+                            num_sectors = area.length // sector_size
+
+                            f.seek(area.start)
+
+                            i = 0
+                            while i < num_sectors:
+                                curr_num_sectors = min(
+                                    num_sectors - i, max_sectors_per_read)
+
+                                buf = vixdisklib.get_buffer(
+                                    curr_num_sectors * sector_size)
+
+                                vixdisklib.read(
+                                    disk_handle, start_sector + i,
+                                    curr_num_sectors, buf)
+                                i += curr_num_sectors
+
+                                f.write(buf.raw)
+
+    def _backup_disks(self, vm, export_path, connection_info, context):
+        if not vm.config.changeTrackingEnabled:
+            raise exception.CoriolisException("Change Tracking not enabled")
+
+        disk_paths = []
+        vixdisklib.init()
+        try:
+            LOG.info("First backup pass")
+            snapshot_name = str(uuid.uuid4())
+            with self._take_temp_vm_snapshot(vm, snapshot_name) as snapshot:
+                self._backup_snapshot_disks(
+                    snapshot, export_path, connection_info, context,
+                    disk_paths)
+
+            self._shutdown_vm(vm)
+
+            LOG.info("Second backup pass")
+            snapshot_name = str(uuid.uuid4())
+            with self._take_temp_vm_snapshot(vm, snapshot_name) as snapshot:
+                self._backup_snapshot_disks(
+                    snapshot, export_path, connection_info, context,
+                    disk_paths)
+
+            return disk_paths
+        finally:
+            vixdisklib.exit()
+
     def export_instance(self, ctxt, connection_info, instance_name,
                         export_path):
         host = connection_info["host"]
@@ -295,6 +436,8 @@ class ExportProvider(base.BaseExportProvider):
         password = connection_info["password"]
         allow_untrusted = connection_info.get("allow_untrusted", False)
 
+        backup_disks = False
+
         # pyVmomi locks otherwise
         sys.modules['socket'] = eventlet.patcher.original('socket')
         ssl = eventlet.patcher.original('ssl')
@@ -312,22 +455,34 @@ class ExportProvider(base.BaseExportProvider):
                 "Retrieving virtual machine data")
             vm_info, vm = self._get_vm_info(si, instance_name)
             self._event_manager.progress_update("Exporting disks")
-            disk_paths = self._export_disks(vm, export_path, context)
+
+            # Take advantage of CBT if available
+            backup_disks = vm.config.changeTrackingEnabled
+
+            if backup_disks:
+                disk_paths = self._backup_disks(
+                    vm, export_path, connection_info, context)
+            else:
+                disk_paths = self._export_disks(vm, export_path, context)
         finally:
             connect.Disconnect(si)
 
-        self._event_manager.progress_update("Converting virtual disks format")
+        if not backup_disks:
+            self._event_manager.progress_update(
+                "Converting virtual disks format")
+            for disk_path in disk_paths:
+                path = disk_path["path"]
+                LOG.info("Converting VMDK type: %s" % path)
+                tmp_path = "%s.tmp" % path
+                self._convert_disk_type(path, tmp_path)
+                os.remove(path)
+                os.rename(tmp_path, path)
+
+        disks = vm_info["devices"]["disks"]
+
         for disk_path in disk_paths:
-            path = disk_path["path"]
-            LOG.info("Converting VMDK type: %s" % path)
-            tmp_path = "%s.tmp" % path
-            self._convert_disk_type(path, tmp_path)
-            os.remove(path)
-            os.rename(tmp_path, path)
-
-            disks = vm_info["devices"]["disks"]
             disk_info = [d for d in disks if d["id"] == disk_path["id"]][0]
-            disk_info["path"] = os.path.abspath(path)
-            disk_info["format"] = constants.DISK_FORMAT_VMDK
+            disk_info["format"] = disk_path["format"]
+            disk_info["path"] = os.path.abspath(disk_path["path"])
 
         return vm_info

+ 234 - 0
coriolis/providers/vmware_vsphere/vixdisklib.py

@@ -0,0 +1,234 @@
+# Copyright 2016 Cloudbase Solutions Srl
+# All Rights Reserved.
+
+import contextlib
+import ctypes
+import os
+
+if os.name == 'nt':
+    vixDiskLibName = 'vixDiskLib.dll'
+else:
+    vixDiskLibName = 'libvixDiskLib.so'
+
+vixDiskLib = ctypes.cdll.LoadLibrary(vixDiskLibName)
+
+
+class VixDiskLibUidPasswdCreds(ctypes.Structure):
+    _fields_ = [
+        ("userName", ctypes.c_char_p),
+        ("password", ctypes.c_char_p),
+    ]
+
+
+class VixDiskLibSessionIdCreds(ctypes.Structure):
+    _fields_ = [
+        ("cookie", ctypes.c_char_p),
+        ("userName", ctypes.c_char_p),
+        ("key", ctypes.c_char_p),
+    ]
+
+
+class VixDiskLibCreds(ctypes.Union):
+    _fields_ = [
+        ("uid", VixDiskLibUidPasswdCreds),
+        ("sessionId", VixDiskLibSessionIdCreds),
+    ]
+
+
+class VixDiskLibConnectParams(ctypes.Structure):
+    _fields_ = [
+        ("vmxSpec", ctypes.c_char_p),
+        ("serverName", ctypes.c_char_p),
+        ("thumbPrint", ctypes.c_char_p),
+        # Note: this is 32bit on Windows
+        ("privateUse", ctypes.c_longlong),
+        ("credType", ctypes.c_uint32),
+        ("creds", VixDiskLibCreds),
+        ("port", ctypes.c_uint32),
+        ("nfcHostPort", ctypes.c_uint32),
+    ]
+
+
+class VixDiskLibConnection(ctypes.Structure):
+    _fields_ = []
+
+
+vixDiskLib.VixDiskLib_InitEx.argtypes = [
+    ctypes.c_uint32, ctypes.c_uint32, ctypes.c_void_p, ctypes.c_void_p,
+    ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p]
+vixDiskLib.VixDiskLib_InitEx.restype = ctypes.c_uint64
+
+
+vixDiskLib.VixDiskLib_GetErrorText.argtypes = [
+    ctypes.c_uint64, ctypes.c_char_p]
+vixDiskLib.VixDiskLib_GetErrorText.restype = ctypes.c_void_p
+
+vixDiskLib.VixDiskLib_FreeErrorText.arg_types = [ctypes.c_char_p]
+vixDiskLib.VixDiskLib_FreeErrorText.restype = None
+
+vixDiskLib.VixDiskLib_ListTransportModes.argtypes = []
+vixDiskLib.VixDiskLib_ListTransportModes.restype = ctypes.c_char_p
+
+vixDiskLib.VixDiskLib_ConnectEx.argtypes = [
+    ctypes.POINTER(VixDiskLibConnectParams), ctypes.c_char, ctypes.c_char_p,
+    ctypes.c_char_p, ctypes.POINTER(ctypes.c_void_p)]
+vixDiskLib.VixDiskLib_ConnectEx.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_Open.argtypes = [
+    ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint32,
+    ctypes.POINTER(ctypes.c_void_p)]
+vixDiskLib.VixDiskLib_Open.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_Read.argtypes = [
+    ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_char_p]
+vixDiskLib.VixDiskLib_Read.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_GetMetadataKeys.argtypes = [
+    ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint64,
+    ctypes.POINTER(ctypes.c_uint64)]
+vixDiskLib.VixDiskLib_GetMetadataKeys.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_ReadMetadata.argtypes = [
+    ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint64,
+    ctypes.POINTER(ctypes.c_uint64)]
+vixDiskLib.VixDiskLib_ReadMetadata.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_Close.argtypes = [ctypes.c_void_p]
+vixDiskLib.VixDiskLib_Close.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_Disconnect.argtypes = [ctypes.c_void_p]
+vixDiskLib.VixDiskLib_Disconnect.restype = ctypes.c_uint64
+
+vixDiskLib.VixDiskLib_Exit.argtypes = []
+vixDiskLib.VixDiskLib_Exit.restype = None
+
+
+VIXDISKLIB_VERSION_MAJOR = 6
+VIXDISKLIB_VERSION_MINOR = 0
+
+VIXDISKLIB_SECTOR_SIZE = 512
+
+VIXDISKLIB_CRED_UID = 1
+
+VIXDISKLIB_FLAG_OPEN_UNBUFFERED = 1
+VIXDISKLIB_FLAG_OPEN_SINGLE_LINK = 2
+VIXDISKLIB_FLAG_OPEN_READ_ONLY = 4
+
+VIX_OK = 0
+VIX_E_BUFFER_TOOSMALL = 24
+
+
+def _check_err(err, allowed_values=[VIX_OK]):
+    if err not in allowed_values:
+        err_msg = vixDiskLib.VixDiskLib_GetErrorText(err, None)
+        err_msg_copy = str(ctypes.cast(
+            err_msg, ctypes.c_char_p).value.decode())
+        vixDiskLib.VixDiskLib_FreeErrorText(err_msg)
+        raise Exception(err_msg_copy)
+
+
+def init(config_path=None, major_ver=VIXDISKLIB_VERSION_MAJOR,
+         minor_ver=VIXDISKLIB_VERSION_MINOR):
+    if config_path:
+        config_path = config_path.encode()
+
+    _check_err(vixDiskLib.VixDiskLib_InitEx(
+        major_ver, minor_ver, None, None, None, None, config_path))
+
+
+def get_transport_modes():
+    transport_modes = vixDiskLib.VixDiskLib_ListTransportModes()
+    return transport_modes.decode().split(':')
+
+
+@contextlib.contextmanager
+def connect(server_name, thumbprint, username, password, vmx_spec=None,
+            snapshot_ref=None, read_only=True, transport_modes=None, port=443):
+    connectParams = VixDiskLibConnectParams()
+
+    connectParams.serverName = server_name.encode()
+    if vmx_spec:
+        connectParams.vmxSpec = vmx_spec.encode()
+    if thumbprint:
+        connectParams.thumbPrint = thumbprint.encode()
+
+    connectParams.credType = VIXDISKLIB_CRED_UID
+    connectParams.creds.uid.userName = username.encode()
+    connectParams.creds.uid.password = password.encode()
+    connectParams.port = port
+
+    if transport_modes:
+        transport_modes = transport_modes.encode()
+
+    if snapshot_ref:
+        snapshot_ref = snapshot_ref.encode()
+
+    conn = ctypes.c_void_p()
+    _check_err(vixDiskLib.VixDiskLib_ConnectEx(
+        connectParams, read_only, snapshot_ref, transport_modes,
+        ctypes.byref(conn)))
+    try:
+        yield conn
+    finally:
+        disconnect(conn)
+
+
+@contextlib.contextmanager
+def open(conn, disk_path, flags=VIXDISKLIB_FLAG_OPEN_READ_ONLY):
+    disk_handle = ctypes.c_void_p()
+    _check_err(vixDiskLib.VixDiskLib_Open(
+        conn, disk_path.encode(), flags, ctypes.byref(disk_handle)))
+    try:
+        yield disk_handle
+    finally:
+        close(disk_handle)
+
+
+def get_metadata_keys(disk_handle):
+    buf_len = ctypes.c_uint64()
+
+    _check_err(vixDiskLib.VixDiskLib_GetMetadataKeys(
+        disk_handle, None, 0, ctypes.byref(buf_len)),
+        [VIX_OK, VIX_E_BUFFER_TOOSMALL])
+
+    buf = ctypes.create_string_buffer(buf_len.value)
+    _check_err(vixDiskLib.VixDiskLib_GetMetadataKeys(
+        disk_handle, buf, buf_len, None))
+
+    return [k.decode() for k in buf.raw.split(b'\0') if len(k)]
+
+
+def read_metadata(disk_handle, key):
+    key = key.encode()
+    buf_len = ctypes.c_uint64()
+
+    _check_err(vixDiskLib.VixDiskLib_ReadMetadata(
+        disk_handle, key, None, 0, ctypes.byref(buf_len)),
+        [VIX_OK, VIX_E_BUFFER_TOOSMALL])
+
+    buf = ctypes.create_string_buffer(buf_len.value)
+    _check_err(vixDiskLib.VixDiskLib_ReadMetadata(
+        disk_handle, key, buf, buf_len, None))
+
+    return buf.value.decode()
+
+
+def get_buffer(size):
+    return ctypes.create_string_buffer(size)
+
+
+def read(disk_handle, start_sector, num_sectors, buf):
+    _check_err(vixDiskLib.VixDiskLib_Read(
+        disk_handle, start_sector, num_sectors, buf))
+
+
+def close(disk_handle):
+    _check_err(vixDiskLib.VixDiskLib_Close(disk_handle))
+
+
+def disconnect(conn):
+    _check_err(vixDiskLib.VixDiskLib_Disconnect(conn))
+
+
+def exit():
+    vixDiskLib.VixDiskLib_Exit()

+ 13 - 0
coriolis/utils.py

@@ -9,6 +9,7 @@ import subprocess
 import time
 import traceback
 
+import OpenSSL
 from oslo_config import cfg
 from oslo_log import log as logging
 
@@ -223,3 +224,15 @@ def walk_class_hierarchy(clazz, encountered=None):
             for subsubclass in walk_class_hierarchy(subclass, encountered):
                 yield subsubclass
             yield subclass
+
+
+def get_ssl_cert_thumbprint(context, host, port=443, digest_algorithm="sha1"):
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    ssl_sock = context.wrap_socket(sock, server_hostname=host)
+    ssl_sock.connect((host, port))
+    # binary_form is the only option when the certificate is not validated
+    cert = ssl_sock.getpeercert(binary_form=True)
+    sock.close()
+
+    x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert)
+    return x509.digest('sha1').decode()

+ 1 - 0
requirements.txt

@@ -18,6 +18,7 @@ paramiko
 paste
 pbr
 psutil
+pyOpenSSL
 python-cinderclient
 python-glanceclient>=2.0.0
 python-keystoneclient