Просмотр исходного кода

Merged in aznashwan/coriolis/schemas (pull request #5)

Added JSON schemas for various parameters and implemented schema validation
Alessandro Pilotti 9 лет назад
Родитель
Сommit
b3f433369c

+ 4 - 0
coriolis/api/v1/migrations.py

@@ -51,6 +51,10 @@ class MigrationController(api_wsgi.Controller):
             # TODO: use a decent exception
             raise exception.CoriolisException("Invalid connection info")
 
+        if not import_provider.validate_target_environment(
+                destination.get("target_environment", {})):
+            raise exception.CoriolisException("Invalid target environment")
+
         return origin, destination, migration["instances"]
 
     def create(self, req, body):

+ 4 - 0
coriolis/constants.py

@@ -16,10 +16,14 @@ PROVIDER_TYPE_EXPORT = 2
 
 DISK_FORMAT_VMDK = 'vmdk'
 DISK_FORMAT_RAW = 'raw'
+DISK_FORMAT_QCOW = "qcow"
 DISK_FORMAT_QCOW2 = 'qcow2'
 DISK_FORMAT_VHD = 'vhd'
 DISK_FORMAT_VHDX = 'vhdx'
 
+FIRMWARE_TYPE_BIOS = 'BIOS'
+FIRMWARE_TYPE_EFI = 'EFI'
+
 HYPERVISOR_VMWARE = "vmware"
 HYPERVISOR_HYPERV = "hyperv"
 HYPERVISOR_QEMU = "qemu"

+ 40 - 1
coriolis/providers/base.py

@@ -1,6 +1,7 @@
 import abc
 
 from coriolis import events
+from coriolis import schemas
 
 
 class BaseProvider(object):
@@ -9,17 +10,52 @@ class BaseProvider(object):
     def __init__(self, event_handler):
         self._event_manager = events.EventManager(event_handler)
 
+    @property
+    def connection_info_schema(self):
+        raise NotImplementedError("Missing connection info schema.")
+
     @abc.abstractmethod
     def validate_connection_info(self, connection_info):
-        pass
+        """ Checks the provided connection info and raises an exception
+        if it is invalid.
+        """
+        try:
+            schemas.validate_value(
+                connection_info, self.connection_info_schema)
+        except:
+            return False
+
+        return True
 
 
 class BaseImportProvider(BaseProvider):
     __metaclass__ = abc.ABCMeta
 
+    @property
+    def target_environment_schema(self):
+        raise NotImplementedError("Missing target environment schema.")
+
+    @abc.abstractmethod
+    def validate_target_environment(self, target_environment):
+        """ Checks the provided target environment info and raises an exception
+        if it is invalid.
+        """
+        try:
+            schemas.validate_value(
+                target_environment, self.target_environment_schema)
+        except:
+            return False
+
+        return True
+
+
     @abc.abstractmethod
     def import_instance(self, ctxt, connection_info, target_environment,
                         instance_name, export_info):
+        """ Imports the instance given by its name to the specified target
+        environment within the destination cloud based on the provided
+        connection and export info.
+        """
         pass
 
 
@@ -29,4 +65,7 @@ class BaseExportProvider(BaseProvider):
     @abc.abstractmethod
     def export_instance(self, ctxt, connection_info, instance_name,
                         export_path):
+        """ Exports the instance given by its name from the given source cloud
+        to the provided export directory path using the given connection info.
+        """
         pass

+ 37 - 11
coriolis/providers/openstack/__init__.py

@@ -1,3 +1,4 @@
+import collections
 import math
 import os
 import tempfile
@@ -18,6 +19,7 @@ from coriolis import exception
 from coriolis import keystone
 from coriolis.osmorphing import manager as osmorphing_manager
 from coriolis.providers import base
+from coriolis import schemas
 from coriolis import utils
 
 opts = [
@@ -85,6 +87,10 @@ MIGR_GUEST_USERNAME_WINDOWS = "admin"
 LOG = logging.getLogger(__name__)
 
 
+GlanceImage = collections.namedtuple(
+    "GlanceImage", "id format size path os_type")
+
+
 def _get_unique_name():
     return MIGRATION_TMP_FORMAT % str(uuid.uuid4())
 
@@ -180,8 +186,12 @@ class _MigrationResources(object):
 
 
 class ImportProvider(base.BaseImportProvider):
-    def validate_connection_info(self, connection_info):
-        return True
+
+    connection_info_schema = schemas.get_schema(
+        __name__, schemas.PROVIDER_CONNECTION_INFO_SCHEMA_NAME)
+
+    target_environment_schema = schemas.get_schema(
+        __name__, schemas.PROVIDER_TARGET_ENVIRONMENT_SCHEMA_NAME)
 
     def _create_image(self, glance, name, disk_path, disk_format,
                       container_format, hypervisor_type):
@@ -630,8 +640,8 @@ class ExportProvider(base.BaseExportProvider):
         'ubuntu': constants.OS_TYPE_LINUX,
     }
 
-    def validate_connection_info(self, connection_info):
-        return True
+    connection_info_schema = schemas.get_schema(
+        __name__, schemas.PROVIDER_CONNECTION_INFO_SCHEMA_NAME)
 
     @utils.retry_on_error()
     def _get_instance(self, nova, instance_name):
@@ -680,6 +690,8 @@ class ExportProvider(base.BaseExportProvider):
         image_id = instance.create_image(_get_unique_name())
         try:
             image = glance.images.get(image_id)
+            image_size = image.size
+
             if image.container_format != 'bare':
                 raise exception.CoriolisException(
                     "Unsupported container format: %s" %
@@ -705,7 +717,13 @@ class ExportProvider(base.BaseExportProvider):
             _del_image()
 
         os_type = self._get_os_type(image)
-        return image_id, image_path, image_format, os_type
+        return GlanceImage(
+            id=image_id,
+            path=image_path,
+            format=image_format,
+            os_type=os_type,
+            size=image_size
+        )
 
     def export_instance(self, ctxt, connection_info, instance_name,
                         export_path):
@@ -735,7 +753,7 @@ class ExportProvider(base.BaseExportProvider):
             nics.append({'name': iface.port_id,
                          'id': iface.port_id,
                          'mac_address': iface.mac_addr,
-                         'fixed_ips': iface.fixed_ips,
+                         'ip_addresses': [ip[0] for ip in ips],
                          'network_id': iface.net_id,
                          'network_name': net_name})
 
@@ -752,25 +770,33 @@ class ExportProvider(base.BaseExportProvider):
 
         self._event_manager.progress_update("Creating instance snapshot")
 
-        image_id, image_path, image_format, os_type = self._create_snapshot(
+        image = self._create_snapshot(
             nova, glance, instance, export_path)
 
         disks = []
-        disks.append({'format': image_format,
-                      'path': image_path,
-                      'id': image_id})
+        disks.append({
+            'format': image.format,
+            'path': image.path,
+            'size_bytes': image.size,
+            'id': image.id
+        })
 
         vm_info = {
             'num_cpu': flavor.vcpus,
             'num_cores_per_socket': 1,
             'memory_mb': flavor.ram,
+            'nested_virtualization': False,
             'name': instance_name,
-            'os_type': os_type,
+            'os_type': image.os_type,
             'id': instance.id,
             'flavor_name': flavor.name,
             'devices': {
                 "nics": nics,
                 "disks": disks,
+                "cdroms": [],
+                "serial_ports": [],
+                "floppies": [],
+                "controllers": []
             }
         }
 

+ 50 - 0
coriolis/providers/openstack/schemas/connection_info_schema.json

@@ -0,0 +1,50 @@
+{
+  "$schema": "http://cloudbase.it/coriolis/schemas/openstack_connection#",
+  "type": "object",
+  "properties": {
+    "secret_ref": {
+      "type": "string"
+    },
+    "identity_api_version": {
+      "type": "integer"
+    },
+    "username": {
+      "type": "string"
+    },
+    "password": {
+      "type": "string"
+    },
+    "project_name": {
+      "type": "string"
+    },
+    "user_domain_name": {
+      "type": "string"
+    },
+    "project_domain_name": {
+      "type": "string"
+    },
+    "auth_url": {
+      "type": "string"
+    },
+    "allow_untrusted": {
+      "type": "boolean",
+      "default": false
+    }
+  },
+  "oneOf": [
+    {
+      "required": ["secret_ref"]
+    },
+    {
+      "required": [
+        "identity_api_version",
+        "username",
+        "password",
+        "project_name",
+        "user_domain_name",
+        "project_domain_name",
+        "auth_url"
+      ]
+    }
+  ]
+}

+ 50 - 0
coriolis/providers/openstack/schemas/target_environment_schema.json

@@ -0,0 +1,50 @@
+{
+  "$schema": "http://cloudbase.it/coriolis/schemas/openstack_target_environment#",
+  "type": "object",
+  "properties": {
+    "secret_ref": {
+      "type": "string"
+    },
+    "network_map": {
+      "type": "object",
+      "properties": {
+        "VM Network Local": {
+          "type": "string"
+        },
+        "VM Network": {
+          "type": "string"
+        }
+      },
+      "required": [
+        "VM Network Local",
+        "VM Network"
+      ]
+    },
+    "flavor_name": {
+      "type": "string"
+    },
+    "fip_pool_name": {
+      "type": "string"
+    },
+    "migr_fip_pool_name": {
+      "type": "string"
+    },
+    "keypair_name": {
+      "type": "string"
+    }
+  },
+  "oneOf": [
+    {
+      "required": ["secret_ref"]
+    },
+    {
+      "required": [
+        "network_map",
+        "flavor_name",
+        "fip_pool_name",
+        "migr_fip_pool_name",
+        "keypair_name"
+      ]
+    }
+  ]
+}

+ 16 - 11
coriolis/providers/vmware_vsphere/__init__.py

@@ -14,6 +14,7 @@ from coriolis import constants
 from coriolis import exception
 from coriolis.providers import base
 from coriolis.providers.vmware_vsphere import guestid
+from coriolis import schemas
 from coriolis import utils
 
 vmware_vsphere_opts = [
@@ -29,8 +30,9 @@ LOG = logging.getLogger(__name__)
 
 
 class ExportProvider(base.BaseExportProvider):
-    def validate_connection_info(self, connection_info):
-        return True
+
+    connection_info_schema = schemas.get_schema(
+        __name__, schemas.PROVIDER_CONNECTION_INFO_SCHEMA_NAME)
 
     @utils.retry_on_error()
     def _convert_disk_type(self, disk_path, target_disk_path, target_type=0):
@@ -102,8 +104,11 @@ class ExportProvider(base.BaseExportProvider):
         vm = self._get_vm(si, instance_path)
 
         firmware_type_map = {
-            vim.vm.GuestOsDescriptor.FirmwareType.bios: 'BIOS',
-            vim.vm.GuestOsDescriptor.FirmwareType.efi: 'EFI'}
+            vim.vm.GuestOsDescriptor.FirmwareType.bios:
+                constants.FIRMWARE_TYPE_BIOS,
+            vim.vm.GuestOsDescriptor.FirmwareType.efi:
+                constants.FIRMWARE_TYPE_EFI
+        }
 
         vm_info = {
             'num_cpu': vm.config.hardware.numCPU,
@@ -161,8 +166,8 @@ class ExportProvider(base.BaseExportProvider):
         devices = [d for d in vm.config.hardware.device if
                    isinstance(d, vim.vm.device.VirtualDisk)]
         for device in devices:
-            disks.append({'size': device.capacityInBytes,
-                          'address': device.unitNumber,
+            disks.append({'size_bytes': device.capacityInBytes,
+                          'unit_number': device.unitNumber,
                           'id': device.key,
                           'controller_id': device.controllerKey})
 
@@ -170,15 +175,15 @@ class ExportProvider(base.BaseExportProvider):
         devices = [d for d in vm.config.hardware.device if
                    isinstance(d, vim.vm.device.VirtualCdrom)]
         for device in devices:
-            cdroms.append({'address': device.unitNumber, 'id': device.key,
+            cdroms.append({'unit_number': device.unitNumber, 'id': device.key,
                            'controller_id': device.controllerKey})
 
-        floppy = []
+        floppies = []
         devices = [d for d in vm.config.hardware.device if
                    isinstance(d, vim.vm.device.VirtualFloppy)]
         for device in devices:
-            floppy.append({'address': device.unitNumber, 'id': device.key,
-                           'controller_id': device.controllerKey})
+            floppies.append({'unit_number': device.unitNumber, 'id': device.key,
+                             'controller_id': device.controllerKey})
 
         nics = []
         devices = [d for d in vm.config.hardware.device if
@@ -215,7 +220,7 @@ class ExportProvider(base.BaseExportProvider):
             "controllers": disk_ctrls,
             "disks": disks,
             "cdroms": cdroms,
-            "floppy": floppy,
+            "floppies": floppies,
             "serial_ports": serial_ports
         }
         vm_info["boot_order"] = boot_order

+ 38 - 0
coriolis/providers/vmware_vsphere/schemas/connection_info_schema.json

@@ -0,0 +1,38 @@
+{
+  "$schema": "http://cloudbase.it/coriolis/schemas/vmware_vsphere_connection#",
+  "type": "object",
+  "properties": {
+    "secret_ref": {
+      "type": "string"
+    },
+    "host": {
+      "type": "string"
+    },
+    "port": {
+      "type": "integer"
+    },
+    "username": {
+      "type": "string"
+    },
+    "password": {
+      "type": "string"
+    },
+    "allow_untrusted": {
+      "type": "boolean",
+      "default": false
+    }
+  },
+  "oneOf": [
+    {
+      "required": ["secret_ref"]
+    },
+    {
+      "required": [
+        "host",
+        "port",
+        "username",
+        "password"
+      ]
+    }
+  ]
+}

+ 57 - 0
coriolis/schemas.py

@@ -0,0 +1,57 @@
+""" Defines various schemas used for validation throughout the project. """
+
+import json
+
+import logging
+import jinja2
+import jsonschema
+
+
+LOG = logging.getLogger(__name__)
+
+
+DEFAULT_SCHEMAS_DIRECTORY = "schemas"
+
+PROVIDER_CONNECTION_INFO_SCHEMA_NAME = "connection_info_schema.json"
+
+PROVIDER_TARGET_ENVIRONMENT_SCHEMA_NAME = "target_environment_schema.json"
+
+
+def get_schema(package_name, schema_name,
+               schemas_directory=DEFAULT_SCHEMAS_DIRECTORY):
+    """ Loads the schema with the given 'schema_name' using jinja2 template
+    loading from the provided 'package_name' under the given
+    'schemas_directory'.
+    """
+    template_env = jinja2.Environment(
+        loader=jinja2.PackageLoader(package_name, schemas_directory))
+
+    schema = json.loads(template_env.get_template(schema_name).render())
+
+    LOG.debug("Succesfully loaded and parsed schema '%s' from '%s'.",
+             schema_name, package_name)
+    return schema
+
+
+def validate_value(val, schema):
+    """ Simple wrapper for jsonschema.validate for usability.
+
+    NOTE: silently passes empty schemas.
+    """
+    jsonschema.validate(val, schema)
+
+
+def validate_string(string, schema):
+    """ Attempts to validate the json value provided as a string against the
+    given JSON schema.
+
+    Runs silently on success or raises an exception otherwise.
+    Silently passes empty schemas.
+    """
+    jsonschema.validate(json.loads(string), schema)
+
+
+# Global schemas:
+CORIOLIS_VM_EXPORT_INFO_SCHEMA_NAME = "vm_export_info_schema.json"
+CORIOLIS_VM_EXPORT_INFO_SCHEMA = get_schema(
+    __name__, CORIOLIS_VM_EXPORT_INFO_SCHEMA_NAME)

+ 264 - 0
coriolis/schemas/vm_export_info_schema.json

@@ -0,0 +1,264 @@
+{
+  "$schema": "http://cloudbase.it/coriolis/schemas/vm_export_info#",
+  "type": "object",
+  "properties": {
+    "num_cpu": {
+      "type": "integer",
+      "description": "Number of CPUs of the VM."
+    },
+    "num_cores_per_socket": {
+      "type": "integer",
+      "description": "Number of CPU cores per socket, if applicable."
+    },
+    "memory_mb": {
+      "type": "integer",
+      "description": "Memory of the VM in MegaBytes."
+    },
+    "name": {
+      "type": "string",
+      "description": "Human-readable name of th VM."
+    },
+    "id": {
+      "type": "string",
+      "description": "Unique identifier of the VM."
+    },
+    "dynamic_memory_enabled": {
+      "type": "boolean",
+      "description": "Indicates whether not the VM's physical memory was allocated dynamically."
+    },
+    "os_type": {
+      "type": "string",
+      "description": "The generic type of the operating system installed on the VM.",
+      "enum": ["bsd", "linux", "osx", "solaris", "windows"]
+    },
+    "firmware_type": {
+      "type": "string",
+      "description": "The type of firmware of the VM.",
+      "enum": ["BIOS", "EFI"]
+    },
+    "nested_virtualization": {
+      "type": "boolean",
+      "description": "Indicates whether or not nested hardware accelerated virtualization is possible on the VM."
+    },
+    "guest_id": {
+      "type": "string",
+      "description": "Extra ID field for added categorisation."
+    },
+    "flavor_name": {
+      "type": "string",
+      "description": "Name of the exported VM's flavor."
+    },
+    "devices": {
+      "type": "object",
+      "description": "Contains information about all of the VM's devices.",
+      "properties": {
+        "disks": {
+          "type": "array",
+          "description": "List of all disks attached to the VM.",
+          "items": {
+            "type": "object",
+            "properties": {
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "format": {
+                "type": "string",
+                "enum": ["vmdk", "raw", "qcow", "qcow2", "vhd", "vhdx"]
+              },
+              "unit_number": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "size_bytes": {
+                "type": "integer"
+              },
+              "path": {
+                "type": "string"
+              },
+              "controller_id": {
+                "$ref": "#/definitions/numberOrString"
+              }
+            },
+            "required": [
+              "format",
+              "size_bytes",
+              "path"
+            ]
+          }
+        },
+        "cdroms": {
+          "type": "array",
+          "description": "List of all CDROM devices attached to the VM.",
+          "items": {
+            "type": "object",
+            "properties": {
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "unit_number": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "controller_id": {
+                "$ref": "#/definitions/numberOrString"
+              }
+            },
+            "required": [
+              "unit_number"
+            ]
+          }
+        },
+        "nics": {
+          "type": "array",
+          "description": "List of the network interface devices attached to the VM",
+          "items": {
+            "type": "object",
+            "properties": {
+              "network_name": {
+                "type": "string"
+              },
+              "network_id": {
+                "type": "string"
+              },
+              "name": {
+                "type": "string"
+              },
+              "ip_addresses": {
+                "type": "array",
+                "items": {
+                  "type": "string"
+                }
+              },
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "mac_address": {
+                "$ref": "#/definitions/nullableString"
+              }
+            },
+            "required": [
+              "network_name",
+              "mac_address"
+            ]
+          }
+        },
+        "serial_ports": {
+          "type": "array",
+          "description": "List of additional serial ports attached to the VM.",
+          "items": {
+            "type": "object",
+            "properties": {
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              }
+            }
+          },
+          "required": [
+            "id"
+          ]
+        },
+        "floppies": {
+          "type": "array",
+          "description": "List of all floppy devices attached to the VM.",
+          "items": {
+            "type": "object",
+            "properties": {
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "unit_number": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "controller_id": {
+                "$ref": "#/definitions/numberOrString"
+              }
+            },
+            "required": [
+              "unit_number"
+            ]
+          }
+        },
+        "controllers": {
+          "type": "array",
+          "description": "List of all disk controllers available to the VM.",
+          "items": {
+            "type": "object",
+            "properties": {
+              "type": {
+                "type": "string"
+              },
+              "bus_number": {
+                "$ref": "#/definitions/numberOrString"
+              },
+              "id": {
+                "$ref": "#/definitions/numberOrString"
+              }
+            },
+            "required": [
+              "type",
+              "bus_number",
+              "id"
+            ]
+          }
+        }
+      },
+      "required": [
+        "disks",
+        "cdroms",
+        "nics",
+        "serial_ports",
+        "floppies",
+        "controllers"
+      ]
+    },
+    "boot_order": {
+      "type": "array",
+      "description": "List specifying the boot order of the VM.",
+      "items": {
+        "type": "object",
+        "properties": {
+          "type": {
+            "type": "string"
+          },
+          "id": {
+            "$ref": "#/definitions/numberOrString"
+          }
+        },
+        "required": [
+          "type",
+          "id"
+        ]
+      }
+    }
+  },
+  "required": [
+    "id",
+    "name",
+    "num_cpu",
+    "memory_mb",
+    "os_type",
+    "nested_virtualization",
+    "devices"
+  ],
+  "definitions": {
+    "numberOrString": {
+      "oneOf": [{
+        "type": "number"
+      }, {
+        "type": "string"
+      }]
+    },
+    "nullableNumber": {
+      "oneOf": [{
+        "type": "number"
+      }, {
+        "type": "null"
+      }]
+    },
+    "nullableString": {
+      "oneOf": [{
+        "type": "string"
+      }, {
+        "type": "null"
+      }]
+    }
+  }
+}

+ 32 - 0
coriolis/schemas_exceptions.py

@@ -0,0 +1,32 @@
+""" Defines a set of exceptions possible during schema loading/validation. """
+
+import json
+
+import jinja2
+import jsonschema
+
+from coriolis import exception
+
+
+class CoriolisSchemaException(exception.CoriolisException):
+    """ Base class for all coriolis schema handling exceptions. """
+    message = "Exception occured during schema validation: %(msg)s."
+
+class CoriolisSchemaValidationError(
+        CoriolisSchemaException, jsonschema.ValidationError):
+    """ Raised when a schema validation has failed. """
+    message = "Failed to validate JSON schema: %(msg)s."
+
+
+class CoriolisSchemaParsingError(
+        CoriolisSchemaException, ValueError):
+    """ Raised when decoding of either the JSON schema or the JSON value being
+    validated occurs.
+    """
+    message = "Failed to parse JSON for schema validation: %(msg)s."
+
+
+class CoriolisSchemaLoadingException(
+        CoriolisSchemaException, jinja2.TemplateNotFound):
+    """ Raised when schema files are not found. """
+    message = "Failed to load schema: %(msg)s."

+ 0 - 0
coriolis/tests/__init__.py


+ 11 - 0
coriolis/tests/test_base.py

@@ -0,0 +1,11 @@
+""" Defines base class for all tests. """
+
+import mock
+
+from oslotest import base
+
+
+class CoriolisBaseTestCase(base.BaseTestCase):
+
+    def setUp(self):
+        super(CoriolisBaseTestCase, self).setUp()

+ 87 - 0
coriolis/tests/test_schemas.py

@@ -0,0 +1,87 @@
+import json
+import jsonschema
+import mock
+
+import jinja2
+
+from coriolis import schemas
+from coriolis.tests import test_base
+
+
+RENDERED_TEMPLATE_SENTINEL = mock.sentinel.some_string_schema
+
+
+def _get_mock_template_env():
+    temp = mock.MagicMock()
+    temp.render.return_value = RENDERED_TEMPLATE_SENTINEL
+
+    tempenv = mock.MagicMock()
+    tempenv.get_template.return_value = temp
+
+    return tempenv
+
+
+class SchemasTestCase(test_base.CoriolisBaseTestCase):
+    """ Collection of tests for the Coriolis schemas package. """
+
+    def setUp(self):
+        super(SchemasTestCase, self).setUp()
+
+    def _assert_tempenv_calls(self, mock_tempenv, temp_name):
+        mock_tempenv.get_template.assert_called_once_with(temp_name)
+        mock_tempenv.get_template().render.assert_called_once_with()
+
+    @mock.patch.object(jinja2, 'Environment')
+    @mock.patch.object(jinja2, 'PackageLoader')
+    @mock.patch.object(json, 'loads')
+    def test_get_schema(self, mock_loads, mock_loader, mock_environ):
+        test_schema_name = mock.sentinel.schema_name
+        test_package_name = mock.sentinel.package_name
+
+        test_loader = mock.sentinel.loader
+        mock_loader.return_value = test_loader
+
+        test_rendered_template = mock.sentinel.rendered_template
+        mock_template = mock.MagicMock()
+        mock_template.render.return_value = test_rendered_template
+
+        mock_env = mock.MagicMock()
+        mock_env.get_template.return_value = mock_template
+
+        mock_environ.return_value = mock_env
+
+        test_loaded_schema = mock.sentinel.loaded_schema
+        mock_loads.return_value = test_loaded_schema
+
+        res = schemas.get_schema(test_package_name, test_schema_name)
+
+        mock_loader.assert_called_once_with(
+            test_package_name, schemas.DEFAULT_SCHEMAS_DIRECTORY)
+        mock_environ.assert_called_once_with(loader=test_loader)
+        mock_env.get_template.assert_called_once_with(test_schema_name)
+        mock_loads.assert_called_once_with(test_rendered_template)
+
+        self.assertEqual(res, test_loaded_schema)
+
+    @mock.patch.object(jsonschema, 'validate')
+    def test_validate_value(self, mock_validate):
+        test_value = mock.sentinel.test_value
+        test_schema = mock.sentinel.test_schema
+
+        schemas.validate_value(test_value, test_schema)
+
+        mock_validate.assert_called_once_with(test_value, test_schema)
+
+    @mock.patch.object(json, 'loads')
+    @mock.patch.object(jsonschema, 'validate')
+    def test_validate_string(self, mock_validate, mock_loads):
+        test_value = mock.sentinel.test_value
+        test_string = mock.sentinel.test_string
+        test_schema = mock.sentinel.test_schema
+
+        mock_loads.return_value = test_value
+
+        schemas.validate_string(test_string, test_schema)
+
+        mock_loads.assert_called_once_with(test_string)
+        mock_validate.assert_called_once_with(test_value, test_schema)

+ 6 - 1
coriolis/worker/rpc/server.py

@@ -5,15 +5,16 @@ import queue
 import shutil
 import sys
 
+import psutil
 from oslo_config import cfg
 from oslo_log import log as logging
-import psutil
 
 from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis import constants
 from coriolis import events
 from coriolis import exception
 from coriolis.providers import factory
+from coriolis import schemas
 from coriolis import secrets
 from coriolis import utils
 
@@ -208,6 +209,10 @@ def _task_process(ctxt, task_id, task_type, origin, destination, instance,
             result = provider.export_instance(ctxt, connection_info, instance,
                                               export_path)
             result[TMP_DIRS_KEY] = [export_path]
+
+            # validate the outputted VM info:
+            schemas.validate_value(
+                result, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA)
         else:
             result = provider.import_instance(ctxt, connection_info,
                                               target_environment, instance,

+ 1 - 0
requirements.txt

@@ -1,6 +1,7 @@
 eventlet
 keystoneauth1
 keystonemiddleware
+jsonschema
 PyMySQL
 oslo.concurrency
 oslo.config

+ 3 - 0
test-requirements.txt

@@ -0,0 +1,3 @@
+coverage
+discover
+oslotest