Browse Source

github_actions: add flake8 fixes and enabled checks

Co-Authored-By: Cristian Matiut <cmatiut@cloudbasesolutions.com>
Adrian Vladu 2 years ago
parent
commit
ac58aff52b
100 changed files with 874 additions and 770 deletions
  1. 2 1
      .github/workflows/unit-tests.yml
  2. 3 4
      coriolis/api/v1/diagnostics.py
  3. 3 3
      coriolis/api/v1/endpoint_actions.py
  4. 7 5
      coriolis/api/v1/endpoint_destination_minion_pool_options.py
  5. 3 3
      coriolis/api/v1/endpoint_destination_options.py
  6. 3 3
      coriolis/api/v1/endpoint_instances.py
  7. 3 3
      coriolis/api/v1/endpoint_networks.py
  8. 5 4
      coriolis/api/v1/endpoint_source_minion_pool_options.py
  9. 3 3
      coriolis/api/v1/endpoint_source_options.py
  10. 3 3
      coriolis/api/v1/endpoint_storage.py
  11. 6 5
      coriolis/api/v1/endpoints.py
  12. 3 3
      coriolis/api/v1/migration_actions.py
  13. 10 8
      coriolis/api/v1/migrations.py
  14. 4 4
      coriolis/api/v1/minion_pool_actions.py
  15. 19 14
      coriolis/api/v1/minion_pools.py
  16. 3 3
      coriolis/api/v1/providers.py
  17. 5 5
      coriolis/api/v1/regions.py
  18. 5 4
      coriolis/api/v1/replica_actions.py
  19. 7 7
      coriolis/api/v1/replica_schedules.py
  20. 1 1
      coriolis/api/v1/replica_tasks_execution_actions.py
  21. 3 3
      coriolis/api/v1/replica_tasks_executions.py
  22. 7 7
      coriolis/api/v1/replicas.py
  23. 6 6
      coriolis/api/v1/router.py
  24. 5 5
      coriolis/api/v1/services.py
  25. 2 2
      coriolis/api/v1/utils.py
  26. 1 1
      coriolis/api/v1/views/diagnostic_view.py
  27. 1 1
      coriolis/cache.py
  28. 3 4
      coriolis/cmd/api.py
  29. 4 5
      coriolis/cmd/conductor.py
  30. 4 5
      coriolis/cmd/minion_manager.py
  31. 1 1
      coriolis/cmd/replica_cron.py
  32. 4 5
      coriolis/cmd/scheduler.py
  33. 3 4
      coriolis/cmd/worker.py
  34. 10 10
      coriolis/conductor/rpc/client.py
  35. 129 107
      coriolis/conductor/rpc/server.py
  36. 1 1
      coriolis/conductor/rpc/utils.py
  37. 4 3
      coriolis/constants.py
  38. 9 15
      coriolis/context.py
  39. 5 4
      coriolis/cron/cron.py
  40. 1 1
      coriolis/data_transfer.py
  41. 12 11
      coriolis/db/api.py
  42. 24 17
      coriolis/db/sqlalchemy/migrate_repo/versions/001_initial.py
  43. 0 1
      coriolis/db/sqlalchemy/migrate_repo/versions/012_adds_migration_sync_fields.py
  44. 0 1
      coriolis/db/sqlalchemy/migrate_repo/versions/013_adds_task_index.py
  45. 40 33
      coriolis/db/sqlalchemy/migrate_repo/versions/016_adds_minion_vm_pools.py
  46. 0 1
      coriolis/db/sqlalchemy/migrate_repo/versions/018_adds_task_progress_idices.py
  47. 3 3
      coriolis/db/sqlalchemy/models.py
  48. 2 2
      coriolis/db/sqlalchemy/types.py
  49. 0 2
      coriolis/endpoint_options/__init__.py
  50. 4 2
      coriolis/endpoint_options/api.py
  51. 0 2
      coriolis/endpoint_resources/__init__.py
  52. 5 3
      coriolis/endpoints/api.py
  53. 4 3
      coriolis/events.py
  54. 4 5
      coriolis/exception.py
  55. 7 7
      coriolis/keystone.py
  56. 2 1
      coriolis/licensing/client.py
  57. 2 1
      coriolis/migrations/manager.py
  58. 11 9
      coriolis/minion_manager/rpc/client.py
  59. 176 141
      coriolis/minion_manager/rpc/server.py
  60. 30 23
      coriolis/minion_manager/rpc/tasks.py
  61. 0 1
      coriolis/minion_pools/api.py
  62. 2 1
      coriolis/osmorphing/base.py
  63. 1 1
      coriolis/osmorphing/centos.py
  64. 3 3
      coriolis/osmorphing/debian.py
  65. 4 4
      coriolis/osmorphing/manager.py
  66. 1 1
      coriolis/osmorphing/oracle.py
  67. 2 1
      coriolis/osmorphing/osdetect/base.py
  68. 3 2
      coriolis/osmorphing/osdetect/centos.py
  69. 0 1
      coriolis/osmorphing/osdetect/coreos.py
  70. 1 1
      coriolis/osmorphing/osdetect/rocky.py
  71. 2 2
      coriolis/osmorphing/osdetect/windows.py
  72. 8 6
      coriolis/osmorphing/osmount/base.py
  73. 13 9
      coriolis/osmorphing/osmount/windows.py
  74. 7 6
      coriolis/osmorphing/redhat.py
  75. 10 7
      coriolis/osmorphing/suse.py
  76. 2 1
      coriolis/osmorphing/ubuntu.py
  77. 9 10
      coriolis/osmorphing/windows.py
  78. 2 2
      coriolis/policy.py
  79. 6 5
      coriolis/providers/backup_writers.py
  80. 0 1
      coriolis/providers/base.py
  81. 23 22
      coriolis/providers/provider_utils.py
  82. 11 6
      coriolis/providers/replicator.py
  83. 2 1
      coriolis/qemu.py
  84. 0 1
      coriolis/regions/api.py
  85. 2 2
      coriolis/replica_cron/rpc/server.py
  86. 5 7
      coriolis/rpc.py
  87. 2 3
      coriolis/scheduler/filters/trivial_filters.py
  88. 7 7
      coriolis/scheduler/rpc/client.py
  89. 5 10
      coriolis/scheduler/rpc/server.py
  90. 1 1
      coriolis/scheduler/scheduler_utils.py
  91. 6 3
      coriolis/service.py
  92. 0 1
      coriolis/services/api.py
  93. 21 22
      coriolis/taskflow/base.py
  94. 5 4
      coriolis/taskflow/runner.py
  95. 1 1
      coriolis/tasks/base.py
  96. 3 2
      coriolis/tasks/minion_pool_tasks.py
  97. 3 2
      coriolis/tasks/osmorphing_tasks.py
  98. 1 1
      coriolis/tasks/replica_tasks.py
  99. 71 64
      coriolis/tests/conductor/rpc/test_server.py
  100. 2 1
      coriolis/tests/db/test_api.py

+ 2 - 1
.github/workflows/unit-tests.yml

@@ -31,4 +31,5 @@ jobs:
     - name: Run unit tests with tox
       shell: bash
       run: |
-        tox -e py3 -v
+        tox -e py3,pep8 -v
+

+ 3 - 4
coriolis/api/v1/diagnostics.py

@@ -1,14 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-import logging
-
-from coriolis import exception
-from coriolis.api import wsgi as api_wsgi
 from coriolis.api.v1.views import diagnostic_view
+from coriolis.api import wsgi as api_wsgi
 from coriolis.diagnostics import api
 from coriolis.policies import diagnostics
 
+import logging
+
 
 LOG = logging.getLogger(__name__)
 

+ 3 - 3
coriolis/api/v1/endpoint_actions.py

@@ -1,13 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from webob import exc
-
-from coriolis import exception
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoints import api
+from coriolis import exception
 from coriolis.policies import endpoints as endpoint_policies
 
+from webob import exc
+
 
 class EndpointActionsController(api_wsgi.Controller):
     def __init__(self):

+ 7 - 5
coriolis/api/v1/endpoint_destination_minion_pool_options.py

@@ -1,13 +1,13 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_options_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_options import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)
@@ -35,9 +35,11 @@ class EndpointDestinationMinionPoolOptionsController(api_wsgi.Controller):
         else:
             options = {}
 
-        return endpoint_options_view.destination_minion_pool_options_collection(
+        return (endpoint_options_view.
+                destination_minion_pool_options_collection)(
             req,
-            self._minion_pool_options_api.get_endpoint_destination_minion_pool_options(
+            (self._minion_pool_options_api.
+             get_endpoint_destination_minion_pool_options)(
                 context, endpoint_id, env=env, option_names=options))
 
 

+ 3 - 3
coriolis/api/v1/endpoint_destination_options.py

@@ -1,13 +1,13 @@
 # Copyright 2018 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_options_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_options import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)

+ 3 - 3
coriolis/api/v1/endpoint_instances.py

@@ -1,14 +1,14 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api import common
 from coriolis.api.v1.views import endpoint_resources_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_resources import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 LOG = logging.getLogger(__name__)
 

+ 3 - 3
coriolis/api/v1/endpoint_networks.py

@@ -1,13 +1,13 @@
 # Copyright 2017 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_resources_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_resources import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 LOG = logging.getLogger(__name__)
 

+ 5 - 4
coriolis/api/v1/endpoint_source_minion_pool_options.py

@@ -1,13 +1,13 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_options_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_options import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)
@@ -37,7 +37,8 @@ class EndpointSourceMinionPoolOptionsController(api_wsgi.Controller):
 
         return endpoint_options_view.source_minion_pool_options_collection(
             req,
-            self._minion_pool_options_api.get_endpoint_source_minion_pool_options(
+            (self._minion_pool_options_api.
+             get_endpoint_source_minion_pool_options)(
                 context, endpoint_id, env=env, option_names=options))
 
 

+ 3 - 3
coriolis/api/v1/endpoint_source_options.py

@@ -1,13 +1,13 @@
 # Copyright 2019 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_options_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_options import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)

+ 3 - 3
coriolis/api/v1/endpoint_storage.py

@@ -1,13 +1,13 @@
 # Copyright 2018 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
-from coriolis import utils
 from coriolis.api.v1.views import endpoint_resources_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoint_resources import api
 from coriolis.policies import endpoints as endpoint_policies
+from coriolis import utils
+
+from oslo_log import log as logging
 
 LOG = logging.getLogger(__name__)
 

+ 6 - 5
coriolis/api/v1/endpoints.py

@@ -1,16 +1,17 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import exception
-from coriolis.api.v1.views import endpoint_view
 from coriolis.api.v1 import utils as api_utils
+from coriolis.api.v1.views import endpoint_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoints import api
+from coriolis import exception
 from coriolis.policies import endpoints as endpoint_policies
 
+from oslo_log import log as logging
+from webob import exc
+
+
 LOG = logging.getLogger(__name__)
 
 

+ 3 - 3
coriolis/api/v1/migration_actions.py

@@ -1,13 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from webob import exc
-
-from coriolis import exception
 from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
 from coriolis.migrations import api
 from coriolis.policies import migrations as migration_policies
 
+from webob import exc
+
 
 class MigrationActionsController(api_wsgi.Controller):
     def __init__(self):

+ 10 - 8
coriolis/api/v1/migrations.py

@@ -1,18 +1,18 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_config import cfg as conf
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import exception
 from coriolis.api.v1 import utils as api_utils
 from coriolis.api.v1.views import migration_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoints import api as endpoints_api
+from coriolis import exception
 from coriolis.migrations import api
 from coriolis.policies import migrations as migration_policies
 
+from oslo_config import cfg as conf
+from oslo_log import log as logging
+from webob import exc
+
 
 MIGRATIONS_API_OPTS = [
     conf.BoolOpt("include_task_info_in_migrations_api",
@@ -53,7 +53,8 @@ class MigrationController(api_wsgi.Controller):
             req, self._migration_api.get_migrations(
                 context,
                 include_tasks=CONF.api.include_task_info_in_migrations_api,
-                include_task_info=CONF.api.include_task_info_in_migrations_api))
+                include_task_info=CONF.api.include_task_info_in_migrations_api
+            ))
 
     def index(self, req):
         return self._list(req)
@@ -143,8 +144,9 @@ class MigrationController(api_wsgi.Controller):
             # NOTE: destination environment for replica should have been
             # validated upon its creation.
             migration = self._migration_api.deploy_replica_instances(
-                context, replica_id, instance_osmorphing_minion_pool_mappings, clone_disks,
-                force, skip_os_morphing, user_scripts=user_scripts)
+                context, replica_id, instance_osmorphing_minion_pool_mappings,
+                clone_disks, force, skip_os_morphing,
+                user_scripts=user_scripts)
         else:
             (origin_endpoint_id,
              destination_endpoint_id,

+ 4 - 4
coriolis/api/v1/minion_pool_actions.py

@@ -1,13 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from webob import exc
-
-from coriolis import exception
 from coriolis.api.v1.views import minion_pool_view
 from coriolis.api import wsgi as api_wsgi
-from coriolis.policies import minion_pools as minion_pool_policies
+from coriolis import exception
 from coriolis.minion_pools import api
+from coriolis.policies import minion_pools as minion_pool_policies
+
+from webob import exc
 
 
 class MinionPoolActionsController(api_wsgi.Controller):

+ 19 - 14
coriolis/api/v1/minion_pools.py

@@ -1,17 +1,17 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import constants
-from coriolis import exception
-from coriolis.api.v1.views import minion_pool_view
 from coriolis.api.v1 import utils as api_utils
+from coriolis.api.v1.views import minion_pool_view
 from coriolis.api import wsgi as api_wsgi
+from coriolis import constants
 from coriolis.endpoints import api as endpoints_api
-from coriolis.policies import minion_pools as pools_policies
+from coriolis import exception
 from coriolis.minion_pools import api
+from coriolis.policies import minion_pools as pools_policies
+
+from oslo_log import log as logging
+from webob import exc
 
 LOG = logging.getLogger(__name__)
 
@@ -65,15 +65,16 @@ class MinionPoolController(api_wsgi.Controller):
             if maximum_minions < minimum_minions:
                 raise Exception(
                     "'maximum_minions' value (%s) must be at least as large as"
-                    " the 'minimum_minions' value (%s)." % (
-                        maximum_minions, minimum_minions))
+                    " the 'minimum_minions' value (%s)." %
+                    (maximum_minions, minimum_minions))
         if minion_max_idle_time is not None:
             if minion_max_idle_time <= 0:
                 raise Exception(
                     "'minion_max_idle_time' must be a strictly positive "
                     "integer. Got: %s" % maximum_minions)
 
-    @api_utils.format_keyerror_message(resource='minion_pool', method='create')
+    @api_utils.format_keyerror_message(resource='minion_pool',
+                                       method='create')
     def _validate_create_body(self, ctxt, body):
         minion_pool = body["minion_pool"]
         name = minion_pool["name"]
@@ -104,7 +105,8 @@ class MinionPoolController(api_wsgi.Controller):
             self._endpoints_api.validate_endpoint_source_minion_pool_options(
                 ctxt, endpoint_id, environment_options)
         elif pool_platform == constants.PROVIDER_PLATFORM_DESTINATION:
-            self._endpoints_api.validate_endpoint_destination_minion_pool_options(
+            (self._endpoints_api.
+             validate_endpoint_destination_minion_pool_options)(
                 ctxt, endpoint_id, environment_options)
 
         minimum_minions = minion_pool.get("minimum_minions", 1)
@@ -141,7 +143,8 @@ class MinionPoolController(api_wsgi.Controller):
             minion_max_idle_time, minion_retention_strategy, notes=notes,
             skip_allocation=skip_allocation))
 
-    @api_utils.format_keyerror_message(resource='minion_pool', method='update')
+    @api_utils.format_keyerror_message(resource='minion_pool',
+                                       method='update')
     def _validate_update_body(self, id, context, body):
         minion_pool = body["minion_pool"]
         if 'endpoint_id' in minion_pool:
@@ -173,12 +176,14 @@ class MinionPoolController(api_wsgi.Controller):
             if 'environment_options' in vals:
                 if minion_pool['platform'] == (
                         constants.PROVIDER_PLATFORM_SOURCE):
-                    self._endpoints_api.validate_endpoint_source_minion_pool_options(
+                    (self._endpoints_api.
+                     validate_endpoint_source_minion_pool_options)(
                         context, minion_pool['endpoint_id'],
                         vals['environment_options'])
                 elif minion_pool['platform'] == (
                         constants.PROVIDER_PLATFORM_DESTINATION):
-                    self._endpoints_api.validate_endpoint_destination_minion_pool_options(
+                    (self._endpoints_api.
+                     validate_endpoint_destination_minion_pool_options)(
                         context, minion_pool['endpoint_id'],
                         vals['environment_options'])
                 else:

+ 3 - 3
coriolis/api/v1/providers.py

@@ -1,11 +1,11 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-
 from coriolis.api import wsgi as api_wsgi
-from coriolis.providers import api
 from coriolis.policies import general as general_policies
+from coriolis.providers import api
+
+from oslo_log import log as logging
 
 LOG = logging.getLogger(__name__)
 

+ 5 - 5
coriolis/api/v1/regions.py

@@ -1,16 +1,16 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import exception
-from coriolis.api.v1.views import region_view
 from coriolis.api.v1 import utils as api_utils
+from coriolis.api.v1.views import region_view
 from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
 from coriolis.policies import regions as region_policies
 from coriolis.regions import api
 
+from oslo_log import log as logging
+from webob import exc
+
 LOG = logging.getLogger(__name__)
 
 

+ 5 - 4
coriolis/api/v1/replica_actions.py

@@ -1,14 +1,14 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from webob import exc
-
-from coriolis import exception
 from coriolis.api.v1.views import replica_tasks_execution_view
 from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
 from coriolis.policies import replicas as replica_policies
 from coriolis.replicas import api
 
+from webob import exc
+
 
 class ReplicaActionsController(api_wsgi.Controller):
     def __init__(self):
@@ -18,7 +18,8 @@ class ReplicaActionsController(api_wsgi.Controller):
     @api_wsgi.action('delete-disks')
     def _delete_disks(self, req, id, body):
         context = req.environ['coriolis.context']
-        context.can(replica_policies.get_replicas_policy_label("delete_disks"))
+        context.can(
+            replica_policies.get_replicas_policy_label("delete_disks"))
         try:
             return replica_tasks_execution_view.single(
                 req, self._replica_api.delete_disks(context, id))

+ 7 - 7
coriolis/api/v1/replica_schedules.py

@@ -1,19 +1,19 @@
 # Copyright 2017 Cloudbase Solutions Srl
 # All Rights Reserved.
 
+from coriolis.api.v1.views import replica_schedule_view
+from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
+from coriolis.policies import replica_schedules as schedules_policies
+from coriolis.replica_cron import api
+from coriolis import schemas
+
 import jsonschema
 from oslo_log import log as logging
 from oslo_utils import strutils
 from oslo_utils import timeutils
 from webob import exc
 
-from coriolis import exception
-from coriolis import schemas
-from coriolis.api.v1.views import replica_schedule_view
-from coriolis.api import wsgi as api_wsgi
-from coriolis.policies import replica_schedules as schedules_policies
-from coriolis.replica_cron import api
-
 
 LOG = logging.getLogger(__name__)
 

+ 1 - 1
coriolis/api/v1/replica_tasks_execution_actions.py

@@ -3,8 +3,8 @@
 
 from webob import exc
 
-from coriolis import exception
 from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
 from coriolis.policies import replica_tasks_executions as execution_policies
 from coriolis.replica_tasks_executions import api
 

+ 3 - 3
coriolis/api/v1/replica_tasks_executions.py

@@ -1,13 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from webob import exc
-
 from coriolis.api.v1.views import replica_tasks_execution_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis import exception
-from coriolis.replica_tasks_executions import api
 from coriolis.policies import replica_tasks_executions as executions_policies
+from coriolis.replica_tasks_executions import api
+
+from webob import exc
 
 
 class ReplicaTasksExecutionController(api_wsgi.Controller):

+ 7 - 7
coriolis/api/v1/replicas.py

@@ -1,19 +1,18 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_config import cfg as conf
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import exception
 from coriolis.api.v1 import utils as api_utils
-from coriolis.api.v1.views import replica_view
 from coriolis.api.v1.views import replica_tasks_execution_view
+from coriolis.api.v1.views import replica_view
 from coriolis.api import wsgi as api_wsgi
 from coriolis.endpoints import api as endpoints_api
+from coriolis import exception
 from coriolis.policies import replicas as replica_policies
 from coriolis.replicas import api
 
+from oslo_config import cfg as conf
+from oslo_log import log as logging
+from webob import exc
 
 REPLICA_API_OPTS = [
     conf.BoolOpt("include_task_info_in_replicas_api",
@@ -158,7 +157,8 @@ class ReplicaController(api_wsgi.Controller):
 
         backend_mappings = original_storage_mappings.get(
             'backend_mappings', [])
-        new_backend_mappings = new_storage_mappings.get('backend_mappings', [])
+        new_backend_mappings = new_storage_mappings.get(
+            'backend_mappings', [])
         new_backend_mapping_sources = [mapping['source'] for mapping in
                                        new_backend_mappings]
 

+ 6 - 6
coriolis/api/v1/router.py

@@ -16,8 +16,8 @@ from coriolis.api.v1 import endpoint_storage
 from coriolis.api.v1 import endpoints
 from coriolis.api.v1 import migration_actions
 from coriolis.api.v1 import migrations
-from coriolis.api.v1 import minion_pools
 from coriolis.api.v1 import minion_pool_actions
+from coriolis.api.v1 import minion_pools
 from coriolis.api.v1 import provider_schemas
 from coriolis.api.v1 import providers
 from coriolis.api.v1 import regions
@@ -89,11 +89,11 @@ class APIRouter(api.APIRouter):
 
         self.resources['endpoint_destination_minion_pool_options'] = \
             endpoint_destination_minion_pool_options.create_resource()
-        mapper.resource('minion_pool_options',
-                        'endpoints/{endpoint_id}/destination-minion-pool-options',
-                        controller=(
-                            self.resources[
-                                'endpoint_destination_minion_pool_options']))
+        mapper.resource(
+            'minion_pool_options',
+            'endpoints/{endpoint_id}/destination-minion-pool-options',
+            controller=(self.resources
+                        ['endpoint_destination_minion_pool_options']))
 
         endpoint_actions_resource = endpoint_actions.create_resource()
         self.resources['endpoint_actions'] = endpoint_actions_resource

+ 5 - 5
coriolis/api/v1/services.py

@@ -1,16 +1,16 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from oslo_log import log as logging
-from webob import exc
-
-from coriolis import exception
-from coriolis.api.v1.views import service_view
 from coriolis.api.v1 import utils as api_utils
+from coriolis.api.v1.views import service_view
 from coriolis.api import wsgi as api_wsgi
+from coriolis import exception
 from coriolis.policies import services as service_policies
 from coriolis.services import api
 
+from oslo_log import log as logging
+from webob import exc
+
 LOG = logging.getLogger(__name__)
 
 

+ 2 - 2
coriolis/api/v1/utils.py

@@ -153,7 +153,7 @@ def validate_instances_list_for_transfer(instances):
         inst: count for (inst, count) in appearances.items() if count > 1}
     if duplicates:
         raise exception.InvalidInput(
-            "Transfer action instances (%s) list contained duplicates: %s " % (
-                instances, duplicates))
+            "Transfer action instances (%s) list contained duplicates: %s " %
+            (instances, duplicates))
 
     return instances

+ 1 - 1
coriolis/api/v1/views/diagnostic_view.py

@@ -3,7 +3,7 @@
 
 
 def single(req, diag):
-    return {"diagnostic":  diag}
+    return {"diagnostic": diag}
 
 
 def collection(req, diag):

+ 1 - 1
coriolis/cache.py

@@ -3,8 +3,8 @@
 
 from coriolis import exception
 
-from oslo_config import cfg
 from oslo_cache import core as cache
+from oslo_config import cfg
 
 opts = [
     cfg.BoolOpt('caching', default=False),

+ 3 - 4
coriolis/cmd/api.py

@@ -10,10 +10,9 @@ from coriolis import service
 from coriolis import utils
 
 api_opts = [
-    cfg.IntOpt('worker_count',
-               min=1, default=processutils.get_worker_count(),
-               help='Number of processes in which the service will be running')
-]
+    cfg.IntOpt(
+        'worker_count', min=1, default=processutils.get_worker_count(),
+        help='Number of processes in which the service will be running')]
 
 CONF = cfg.CONF
 CONF.register_opts(api_opts, 'api')

+ 4 - 5
coriolis/cmd/conductor.py

@@ -6,16 +6,15 @@ import sys
 from oslo_concurrency import processutils
 from oslo_config import cfg
 
-from coriolis import constants
 from coriolis.conductor.rpc import server as rpc_server
+from coriolis import constants
 from coriolis import service
 from coriolis import utils
 
 conductor_opts = [
-    cfg.IntOpt('worker_count',
-               min=1, default=processutils.get_worker_count(),
-               help='Number of processes in which the service will be running')
-]
+    cfg.IntOpt(
+        'worker_count', min=1, default=processutils.get_worker_count(),
+        help='Number of processes in which the service will be running')]
 
 CONF = cfg.CONF
 CONF.register_opts(conductor_opts, 'conductor')

+ 4 - 5
coriolis/cmd/minion_manager.py

@@ -6,15 +6,14 @@ import sys
 from oslo_config import cfg
 
 from coriolis import constants
+from coriolis.minion_manager.rpc import server as rpc_server
 from coriolis import service
 from coriolis import utils
-from coriolis.minion_manager.rpc import server as rpc_server
 
 minion_manager_opts = [
-    cfg.IntOpt('worker_count',
-               min=1, default=1,
-               help='Number of processes in which the service will be running')
-]
+    cfg.IntOpt(
+        'worker_count', min=1, default=1,
+        help='Number of processes in which the service will be running')]
 
 CONF = cfg.CONF
 CONF.register_opts(minion_manager_opts, 'minion_manager')

+ 1 - 1
coriolis/cmd/replica_cron.py

@@ -6,9 +6,9 @@ import sys
 from oslo_config import cfg
 
 from coriolis import constants
+from coriolis.replica_cron.rpc import server as rpc_server
 from coriolis import service
 from coriolis import utils
-from coriolis.replica_cron.rpc import server as rpc_server
 
 CONF = cfg.CONF
 

+ 4 - 5
coriolis/cmd/scheduler.py

@@ -6,15 +6,14 @@ import sys
 from oslo_config import cfg
 
 from coriolis import constants
+from coriolis.scheduler.rpc import server as rpc_server
 from coriolis import service
 from coriolis import utils
-from coriolis.scheduler.rpc import server as rpc_server
 
 scheduler_opts = [
-    cfg.IntOpt('worker_count',
-               min=1, default=1,
-               help='Number of processes in which the service will be running')
-]
+    cfg.IntOpt(
+        'worker_count', min=1, default=1,
+        help='Number of processes in which the service will be running')]
 
 CONF = cfg.CONF
 CONF.register_opts(scheduler_opts, 'scheduler')

+ 3 - 4
coriolis/cmd/worker.py

@@ -12,10 +12,9 @@ from coriolis import utils
 from coriolis.worker.rpc import server as rpc_server
 
 worker_opts = [
-    cfg.IntOpt('worker_count',
-               min=1, default=processutils.get_worker_count(),
-               help='Number of processes in which the service will be running')
-]
+    cfg.IntOpt(
+        'worker_count', min=1, default=processutils.get_worker_count(),
+        help='Number of processes in which the service will be running')]
 
 CONF = cfg.CONF
 CONF.register_opts(worker_opts, 'worker')

+ 10 - 10
coriolis/conductor/rpc/client.py

@@ -242,11 +242,10 @@ class ConductorClient(rpc.BaseRPCClient):
             source_environment=source_environment,
             user_scripts=user_scripts)
 
-    def deploy_replica_instances(self, ctxt, replica_id,
-                                 instance_osmorphing_minion_pool_mappings=None,
-                                 clone_disks=False,
-                                 force=False, skip_os_morphing=False,
-                                 user_scripts=None):
+    def deploy_replica_instances(
+            self, ctxt, replica_id,
+            instance_osmorphing_minion_pool_mappings=None, clone_disks=False,
+            force=False, skip_os_morphing=False, user_scripts=None):
         return self._call(
             ctxt, 'deploy_replica_instances', replica_id=replica_id,
             instance_osmorphing_minion_pool_mappings=(
@@ -286,8 +285,8 @@ class ConductorClient(rpc.BaseRPCClient):
             exception_details=exception_details)
 
     def add_task_event(self, ctxt, task_id, level, message):
-        self._cast(
-            ctxt, 'add_task_event', task_id=task_id, level=level, message=message)
+        self._cast(ctxt, 'add_task_event', task_id=task_id,
+                   level=level, message=message)
 
     def add_task_progress_update(
             self, ctxt, task_id, message, initial_step=0, total_steps=0,
@@ -306,8 +305,8 @@ class ConductorClient(rpc.BaseRPCClient):
         self._cast(
             ctxt, 'update_task_progress_update', task_id=task_id,
             progress_update_index=progress_update_index,
-            new_current_step=new_current_step, new_total_steps=new_total_steps,
-            new_message=new_message)
+            new_current_step=new_current_step,
+            new_total_steps=new_total_steps, new_message=new_message)
 
     def create_replica_schedule(self, ctxt, replica_id,
                                 schedule, enabled, exp_date,
@@ -426,7 +425,8 @@ class ConductorClient(rpc.BaseRPCClient):
     def report_replica_minions_allocation_error(
             self, ctxt, replica_id, minion_allocation_error_details):
         self._call(
-            ctxt, 'report_replica_minions_allocation_error', replica_id=replica_id,
+            ctxt, 'report_replica_minions_allocation_error',
+            replica_id=replica_id,
             minion_allocation_error_details=minion_allocation_error_details)
 
     def confirm_migration_minions_allocation(

+ 129 - 107
coriolis/conductor/rpc/server.py

@@ -9,6 +9,7 @@ import uuid
 from oslo_concurrency import lockutils
 from oslo_config import cfg
 from oslo_log import log as logging
+
 from coriolis import constants
 from coriolis import context
 from coriolis.db import api as db_api
@@ -216,7 +217,7 @@ class ConductorServerEndpoint(object):
                     rpc_worker_client.WorkerClient.from_service_definition(
                         wrk, timeout=10))
                 for wrk in worker_services})
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Exception occurred while listing worker services for "
                 "diagnostics fetching. Exception was: %s",
@@ -226,20 +227,20 @@ class ConductorServerEndpoint(object):
         for (service_name, service_client) in client_objects.items():
             try:
                 diagnostics.append(service_client.get_diagnostics(ctxt))
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Exception occurred while fetching diagnostics for service"
                     " '%s'. Exception was: %s",
                     service_name, utils.get_exception_details())
 
-        worker_diagnostics = []
         for worker_service in self._scheduler_client.get_workers_for_specs(
                 ctxt):
-            worker_rpc = rpc_worker_client.WorkerClient.from_service_definition(
-                worker_service)
+            worker_rpc = (
+                rpc_worker_client.WorkerClient.from_service_definition(
+                    worker_service))
             try:
                 diagnostics.append(worker_rpc.get_diagnostics(ctxt))
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Exception occurred while fetching diagnostics for "
                     "worker service '%s'. Error was: %s",
@@ -359,11 +360,12 @@ class ConductorServerEndpoint(object):
                 db_api.update_endpoint(
                     ctxt, endpoint.id, {
                         "mapped_regions": mapped_regions})
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
-                    "Error adding region mappings during new endpoint creation "
-                    "(name: %s), cleaning up endpoint and all created "
-                    "mappings for regions: %s", endpoint.name, mapped_regions)
+                    "Error adding region mappings during new endpoint "
+                    "creation (name: %s), cleaning up endpoint and all "
+                    "created mappings for regions: %s",
+                    endpoint.name, mapped_regions)
                 db_api.delete_endpoint(ctxt, endpoint.id)
                 raise
 
@@ -591,11 +593,12 @@ class ConductorServerEndpoint(object):
             retry_count=5, retry_period=2, random_choice=True):
         worker_service = None
         try:
-            worker_service = self._scheduler_client.get_worker_service_for_task(
-                ctxt, {"id": task.id, "task_type": task.task_type},
-                origin_endpoint, destination_endpoint,
-                retry_count=retry_count, retry_period=retry_period,
-                random_choice=random_choice)
+            worker_service = (
+                self._scheduler_client.get_worker_service_for_task(
+                    ctxt, {"id": task.id, "task_type": task.task_type},
+                    origin_endpoint, destination_endpoint,
+                    retry_count=retry_count, retry_period=retry_period,
+                    random_choice=random_choice))
         except Exception as ex:
             LOG.debug(
                 "Failed to get worker service for task '%s'. Updating status "
@@ -650,7 +653,7 @@ class ConductorServerEndpoint(object):
                         destination=destination,
                         instance=task.instance,
                         task_info=task_info.get(task.instance, {}))
-                except Exception as ex:
+                except Exception:
                     LOG.warn(
                         "Error occured while starting new task '%s'. "
                         "Cancelling execution '%s'. Error was: %s",
@@ -753,7 +756,8 @@ class ConductorServerEndpoint(object):
                         "will never get queued. Already processed tasks are: "
                         "%s. Tasks left: %s" % (
                             execution.id, execution.type, instance,
-                            processed_tasks_type_map, remaining_tasks_deps_map))
+                            processed_tasks_type_map, remaining_tasks_deps_map
+                        ))
 
                 # mapping for task_info fields modified by each task:
                 modified_fields_by_queued_tasks = {}
@@ -763,7 +767,8 @@ class ConductorServerEndpoint(object):
                     for new_field in _check_task_cls_param_requirements(
                             task, task_info_keys):
                         if new_field not in modified_fields_by_queued_tasks:
-                            modified_fields_by_queued_tasks[new_field] = [task]
+                            modified_fields_by_queued_tasks[new_field] = [
+                                task]
                         else:
                             modified_fields_by_queued_tasks[new_field].append(
                                 task)
@@ -793,9 +798,8 @@ class ConductorServerEndpoint(object):
                 LOG.debug(
                     "Successfully processed following tasks for instance '%s' "
                     "for execution %s (type '%s') for any state conflict "
-                    "checks: %s",
-                    instance, execution.id, execution.type, [
-                        (t.id, t.task_type) for t in queued_tasks])
+                    "checks: %s", instance, execution.id, execution.type,
+                    [(t.id, t.task_type) for t in queued_tasks])
             LOG.debug(
                 "Successfully checked all tasks for instance '%s' as part of "
                 "execution '%s' (type '%s') for any state conflicts: %s",
@@ -876,7 +880,7 @@ class ConductorServerEndpoint(object):
                     "origin_minion_connection_info": None})
                 validate_origin_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY,
+                    constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY,  # noqa: E501
                     execution,
                     depends_on=[
                         get_instance_info_task.id,
@@ -900,7 +904,7 @@ class ConductorServerEndpoint(object):
                     "destination_minion_backup_writer_connection_info": None})
                 validate_destination_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY,
+                    constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY,  # noqa: E501
                     execution,
                     depends_on=[
                         validate_replica_destination_inputs_task.id])
@@ -1020,7 +1024,8 @@ class ConductorServerEndpoint(object):
         else:
             self._begin_tasks(ctxt, replica, execution)
 
-        return self.get_replica_tasks_execution(ctxt, replica_id, execution.id)
+        return self.get_replica_tasks_execution(
+            ctxt, replica_id, execution.id)
 
     @replica_synchronized
     def get_replica_tasks_executions(self, ctxt, replica_id,
@@ -1144,7 +1149,8 @@ class ConductorServerEndpoint(object):
         LOG.info("Replica tasks execution created: %s", execution.id)
 
         self._begin_tasks(ctxt, replica, execution)
-        return self.get_replica_tasks_execution(ctxt, replica_id, execution.id)
+        return self.get_replica_tasks_execution(
+            ctxt, replica_id, execution.id)
 
     @staticmethod
     def _check_endpoints(ctxt, origin_endpoint, destination_endpoint):
@@ -1169,7 +1175,8 @@ class ConductorServerEndpoint(object):
                                  network_map, storage_mappings, notes=None,
                                  user_scripts=None):
         origin_endpoint = self.get_endpoint(ctxt, origin_endpoint_id)
-        destination_endpoint = self.get_endpoint(ctxt, destination_endpoint_id)
+        destination_endpoint = self.get_endpoint(
+            ctxt, destination_endpoint_id)
         self._check_endpoints(ctxt, origin_endpoint, destination_endpoint)
 
         replica = models.Replica()
@@ -1271,11 +1278,10 @@ class ConductorServerEndpoint(object):
         return provider_types["types"]
 
     @replica_synchronized
-    def deploy_replica_instances(self, ctxt, replica_id,
-                                 clone_disks, force,
-                                 instance_osmorphing_minion_pool_mappings=None,
-                                 skip_os_morphing=False,
-                                 user_scripts=None):
+    def deploy_replica_instances(
+            self, ctxt, replica_id, clone_disks, force,
+            instance_osmorphing_minion_pool_mappings=None,
+            skip_os_morphing=False, user_scripts=None):
         replica = self._get_replica(ctxt, replica_id, include_task_info=True)
         self._check_reservation_for_transfer(
             replica, licensing_client.RESERVATION_TYPE_REPLICA)
@@ -1376,7 +1382,7 @@ class ConductorServerEndpoint(object):
                     "osmorphing_minion_connection_info": None})
                 validate_osmorphing_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY,
+                    constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY,  # noqa: E501
                     execution, depends_on=[
                         validate_replica_deployment_inputs_task.id])
                 last_validation_task = validate_osmorphing_minion_task
@@ -1404,7 +1410,7 @@ class ConductorServerEndpoint(object):
                     osmorphing_vol_attachment_deps.extend(depends_on)
                     attach_osmorphing_minion_volumes_task = self._create_task(
                         instance,
-                        constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION,
+                        constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION,  # noqa: E501
                         execution, depends_on=osmorphing_vol_attachment_deps)
                     last_osmorphing_resources_deployment_task = (
                         attach_osmorphing_minion_volumes_task)
@@ -1435,7 +1441,7 @@ class ConductorServerEndpoint(object):
                         migration.instance_osmorphing_minion_pool_mappings):
                     detach_osmorphing_minion_volumes_task = self._create_task(
                         instance,
-                        constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION,
+                        constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION,  # noqa: E501
                         execution, depends_on=[
                             attach_osmorphing_minion_volumes_task.id,
                             task_osmorphing.id],
@@ -1451,7 +1457,8 @@ class ConductorServerEndpoint(object):
                     depends_on.append(release_osmorphing_minion_task.id)
                 else:
                     task_delete_os_morphing_resources = self._create_task(
-                        instance, constants.TASK_TYPE_DELETE_OS_MORPHING_RESOURCES,
+                        instance,
+                        constants.TASK_TYPE_DELETE_OS_MORPHING_RESOURCES,
                         execution, depends_on=[
                             task_deploy_os_morphing_resources.id,
                             task_osmorphing.id],
@@ -1507,9 +1514,10 @@ class ConductorServerEndpoint(object):
             with lockutils.lock(
                     constants.MIGRATION_LOCK_NAME_FORMAT % migration.id,
                     external=True):
-                self._minion_manager_client.allocate_minion_machines_for_migration(
-                    ctxt, migration, include_transfer_minions=False,
-                    include_osmorphing_minions=True)
+                (self._minion_manager_client
+                     .allocate_minion_machines_for_migration(
+                         ctxt, migration, include_transfer_minions=False,
+                         include_osmorphing_minions=True))
                 self._set_tasks_execution_status(
                     ctxt, execution,
                     constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS)
@@ -1532,8 +1540,10 @@ class ConductorServerEndpoint(object):
         return ret
 
     def _deallocate_minion_machines_for_action(self, ctxt, action):
-        return self._minion_manager_client.deallocate_minion_machines_for_action(
-            ctxt, action.base_id)
+        return (
+            self._minion_manager_client.deallocate_minion_machines_for_action(
+                ctxt, action.base_id)
+        )
 
     def _check_minion_pools_for_action(self, ctxt, action):
         self._minion_manager_client.validate_minion_pool_selections_for_action(
@@ -1558,27 +1568,28 @@ class ConductorServerEndpoint(object):
                     "origin_minion_connection_info": (
                         instance_origin_minion['connection_info'])})
 
-            instance_destination_minion = instance_minion_machines.get(
+            instance_dst_minion = instance_minion_machines.get(
                 'destination_minion')
-            if instance_destination_minion:
-                action.info[instance].update({
-                    "destination_minion_machine_id": instance_destination_minion['id'],
+            if instance_dst_minion:
+                instance_dst_minion_dict = {
+                    "destination_minion_machine_id": instance_dst_minion['id'],
                     "destination_minion_provider_properties": (
-                        instance_destination_minion['provider_properties']),
+                        instance_dst_minion['provider_properties']),
                     "destination_minion_connection_info": (
-                        instance_destination_minion['connection_info']),
+                        instance_dst_minion['connection_info']),
                     "destination_minion_backup_writer_connection_info": (
-                        instance_destination_minion['backup_writer_connection_info'])})
+                        instance_dst_minion['backup_writer_connection_info'])}
+                action.info[instance].update(instance_dst_minion_dict)
 
-            instance_osmorphing_minion = instance_minion_machines.get(
+            osmorph_min = instance_minion_machines.get(
                 'osmorphing_minion')
-            if instance_osmorphing_minion:
+            if osmorph_min:
                 action.info[instance].update({
-                    "osmorphing_minion_machine_id": instance_osmorphing_minion['id'],
+                    "osmorphing_minion_machine_id": osmorph_min['id'],
                     "osmorphing_minion_provider_properties": (
-                        instance_osmorphing_minion['provider_properties']),
+                        osmorph_min['provider_properties']),
                     "osmorphing_minion_connection_info": (
-                        instance_osmorphing_minion['connection_info'])})
+                        osmorph_min['connection_info'])})
 
         # update the action info for all of the instances:
         for instance in minion_machine_allocations:
@@ -1701,8 +1712,9 @@ class ConductorServerEndpoint(object):
         execution = self._get_execution_for_migration(
             ctxt, migration, requery=False)
         LOG.warn(
-            "Error occured while allocating minion machines for Migration '%s'. "
-            "Cancelling the current Execution ('%s'). Error was: %s",
+            "Error occured while allocating minion machines for "
+            "Migration '%s'. Cancelling the current Execution ('%s'). "
+            "Error was: %s",
             migration_id, execution.id, minion_allocation_error_details)
         self._cancel_tasks_execution(
             ctxt, execution, requery=True)
@@ -1710,16 +1722,16 @@ class ConductorServerEndpoint(object):
             ctxt, execution,
             constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS)
 
-    def migrate_instances(self, ctxt, origin_endpoint_id,
-                          destination_endpoint_id, origin_minion_pool_id,
-                          destination_minion_pool_id,
-                          instance_osmorphing_minion_pool_mappings,
-                          source_environment, destination_environment,
-                          instances, network_map, storage_mappings,
-                          replication_count, shutdown_instances=False,
-                          notes=None, skip_os_morphing=False, user_scripts=None):
+    def migrate_instances(
+            self, ctxt, origin_endpoint_id, destination_endpoint_id,
+            origin_minion_pool_id, destination_minion_pool_id,
+            instance_osmorphing_minion_pool_mappings, source_environment,
+            destination_environment, instances, network_map, storage_mappings,
+            replication_count, shutdown_instances=False, notes=None,
+            skip_os_morphing=False, user_scripts=None):
         origin_endpoint = self.get_endpoint(ctxt, origin_endpoint_id)
-        destination_endpoint = self.get_endpoint(ctxt, destination_endpoint_id)
+        destination_endpoint = self.get_endpoint(
+            ctxt, destination_endpoint_id)
         self._check_endpoints(ctxt, origin_endpoint, destination_endpoint)
 
         destination_provider_types = self._get_provider_types(
@@ -1807,7 +1819,7 @@ class ConductorServerEndpoint(object):
                     "origin_minion_connection_info": None})
                 validate_origin_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY,
+                    constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY,  # noqa: E501
                     execution,
                     depends_on=migration_resources_task_deps)
                 migration_resources_task_ids.append(
@@ -1840,7 +1852,7 @@ class ConductorServerEndpoint(object):
                     "destination_minion_connection_info": None,
                     "destination_minion_backup_writer_connection_info": None})
                 ttyp = (
-                    constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY)
+                    constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY)  # noqa: E501
                 validate_destination_minion_task = self._create_task(
                     instance, ttyp, execution, depends_on=[
                         validate_migration_destination_inputs_task.id])
@@ -1874,7 +1886,7 @@ class ConductorServerEndpoint(object):
                     "osmorphing_minion_connection_info": None})
                 validate_osmorphing_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY,
+                    constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY,  # noqa: E501
                     execution, depends_on=[
                         validate_migration_destination_inputs_task.id])
                 migration_resources_task_ids.append(
@@ -1909,7 +1921,7 @@ class ConductorServerEndpoint(object):
             if migration.origin_minion_pool_id:
                 release_origin_minion_task = self._create_task(
                     instance,
-                    constants.TASK_TYPE_RELEASE_SOURCE_MINION,
+                    constants.TASK_TYPE_RELEASE_SOURCE_MINION,  # noqa: E501
                     execution,
                     depends_on=[
                         validate_origin_minion_task.id,
@@ -1935,14 +1947,15 @@ class ConductorServerEndpoint(object):
 
             target_resources_cleanup_task = None
             if migration.destination_minion_pool_id:
-                detach_volumes_from_destination_minion_task = self._create_task(
-                    instance,
-                    constants.TASK_TYPE_DETACH_VOLUMES_FROM_DESTINATION_MINION,
-                    execution,
-                    depends_on=[
-                        attach_destination_minion_disks_task.id,
-                        last_sync_task.id],
-                    on_error=True)
+                detach_volumes_from_destination_minion_task = (
+                    self._create_task(
+                        instance,
+                        constants.TASK_TYPE_DETACH_VOLUMES_FROM_DESTINATION_MINION,  # noqa: E501
+                        execution,
+                        depends_on=[
+                            attach_destination_minion_disks_task.id,
+                            last_sync_task.id],
+                        on_error=True))
 
                 release_destination_minion_task = self._create_task(
                     instance,
@@ -1983,7 +1996,7 @@ class ConductorServerEndpoint(object):
                     osmorphing_vol_attachment_deps.extend(depends_on)
                     attach_osmorphing_minion_volumes_task = self._create_task(
                         instance,
-                        constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION,
+                        constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION,  # noqa: E501
                         execution, depends_on=osmorphing_vol_attachment_deps)
                     last_osmorphing_resources_deployment_task = (
                         attach_osmorphing_minion_volumes_task)
@@ -2014,7 +2027,7 @@ class ConductorServerEndpoint(object):
                         migration.instance_osmorphing_minion_pool_mappings):
                     detach_osmorphing_minion_volumes_task = self._create_task(
                         instance,
-                        constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION,
+                        constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION,  # noqa: E501
                         execution, depends_on=[
                             attach_osmorphing_minion_volumes_task.id,
                             task_osmorphing.id],
@@ -2031,12 +2044,14 @@ class ConductorServerEndpoint(object):
                     osmorphing_resources_cleanup_task = (
                         release_osmorphing_minion_task)
                 else:
-                    task_delete_os_morphing_resources = self._create_task(
-                        instance, constants.TASK_TYPE_DELETE_OS_MORPHING_RESOURCES,
-                        execution, depends_on=[
-                            task_deploy_os_morphing_resources.id,
-                            task_osmorphing.id],
-                        on_error=True)
+                    task_delete_os_morphing_resources = (
+                        self._create_task(
+                            instance, constants.TASK_TYPE_DELETE_OS_MORPHING_RESOURCES,  # noqa: E501
+                            execution, depends_on=[
+                                task_deploy_os_morphing_resources.id,
+                                task_osmorphing.id],
+                            on_error=True))
+
                     depends_on.append(task_delete_os_morphing_resources.id)
                     osmorphing_resources_cleanup_task = (
                         task_delete_os_morphing_resources)
@@ -2087,9 +2102,11 @@ class ConductorServerEndpoint(object):
             with lockutils.lock(
                     constants.MIGRATION_LOCK_NAME_FORMAT % migration.id,
                     external=True):
-                self._minion_manager_client.allocate_minion_machines_for_migration(
-                    ctxt, migration, include_transfer_minions=True,
-                    include_osmorphing_minions=not skip_os_morphing)
+                (self._minion_manager_client
+                    .allocate_minion_machines_for_migration(
+                        ctxt, migration, include_transfer_minions=True,
+                        include_osmorphing_minions=not skip_os_morphing)
+                 )
                 self._set_tasks_execution_status(
                     ctxt, execution,
                     constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS)
@@ -2470,7 +2487,8 @@ class ConductorServerEndpoint(object):
                         constants.TASK_STATUS_CANCELED_FROM_DEADLOCK,
                         exception_details=TASK_DEADLOCK_ERROR_MESSAGE)
             LOG.warn(
-                "Marking deadlocked execution '%s' as DEADLOCKED", execution.id)
+                "Marking deadlocked execution '%s' as DEADLOCKED",
+                execution.id)
             self._set_tasks_execution_status(
                 ctxt, execution, constants.EXECUTION_STATUS_DEADLOCKED)
             LOG.error(
@@ -2611,9 +2629,9 @@ class ConductorServerEndpoint(object):
                 LOG.error(
                     "No info present for instance '%s' in action '%s' for task"
                     " '%s' (type '%s') of execution '%s' (type '%s'). "
-                    "Defaulting to empty dict." % (
-                        task.instance, action.id, task.id, task.task_type,
-                        execution.id, execution.type))
+                    "Defaulting to empty dict." %
+                    (task.instance, action.id, task.id, task.task_type,
+                     execution.id, execution.type))
                 task_info = {}
             else:
                 task_info = action.info[task.instance]
@@ -2636,7 +2654,7 @@ class ConductorServerEndpoint(object):
                     execution.id)
                 started_tasks.append(task.id)
                 return constants.TASK_STATUS_PENDING
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Error occured while starting new task '%s'. "
                     "Cancelling execution '%s'. Error was: %s",
@@ -2751,8 +2769,8 @@ class ConductorServerEndpoint(object):
                                 "tasks have been finalized and there are "
                                 "no non-error parents to directly depend on, "
                                 "but one or more on-error tasks have completed"
-                                " successfully: %s",
-                                task.id, parent_task_statuses)
+                                " successfully: %s", task.id,
+                                parent_task_statuses)
                             task_statuses[task.id] = _start_task(task)
                         # start on-error tasks only if at least one non-error
                         # parent task has completed successfully:
@@ -2950,8 +2968,8 @@ class ConductorServerEndpoint(object):
                     # as they are in the DB:
                     LOG.info(
                         "All tasks of the '%s' Replica update procedure have "
-                        "completed successfully.  Setting the updated parameter "
-                        "values on the parent Replica itself.",
+                        "completed successfully.  Setting the updated "
+                        "parameter values on the parent Replica itself.",
                         execution.action_id)
                     # NOTE: considering all the instances of the Replica get
                     # the same params, it doesn't matter which instance's
@@ -2989,7 +3007,8 @@ class ConductorServerEndpoint(object):
                 task_info['destination_minion_machine_id'],
                 task.id, task_type, updated_values)
             db_api.update_minion_machine(
-                ctxt, task_info['destination_minion_machine_id'], updated_values)
+                ctxt, task_info['destination_minion_machine_id'],
+                updated_values)
 
         elif task_type in (
                 constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION,
@@ -3078,7 +3097,7 @@ class ConductorServerEndpoint(object):
             LOG.error(
                 "Received confirmation that presumably cancelling task '%s' "
                 "(status '%s') has just completed successfully. "
-                "This should have never happened and indicates that its worker "
+                "This should have never happened, indicates that its worker "
                 "host ('%s') has either failed to cancel it properly, or it "
                 "was completed before the cancellation request was received. "
                 "Please check the worker logs for more details. "
@@ -3087,7 +3106,8 @@ class ConductorServerEndpoint(object):
                 task.id, task.status, task.host,
                 constants.TASK_STATUS_CANCELED_AFTER_COMPLETION)
             db_api.set_task_status(
-                ctxt, task_id, constants.TASK_STATUS_CANCELED_AFTER_COMPLETION,
+                ctxt, task_id,
+                constants.TASK_STATUS_CANCELED_AFTER_COMPLETION,
                 exception_details=(
                     "The worker host for this task ('%s') has either failed "
                     "at cancelling it or the cancellation request arrived "
@@ -3097,7 +3117,7 @@ class ConductorServerEndpoint(object):
                         task.host)))
         elif task.status == constants.TASK_STATUS_FAILED_TO_CANCEL:
             LOG.error(
-                "Received confirmation that presumably '%s' task '%s' has just "
+                "Received confirmation '%s' task '%s' has presumably just "
                 "completed successfully. Marking as '%s' and processing its "
                 "result as if it had completed normally.",
                 task.status, task.id,
@@ -3137,7 +3157,8 @@ class ConductorServerEndpoint(object):
                     execution.type] % execution.action_id,
                 external=True):
             action_id = execution.action_id
-            action = db_api.get_action(ctxt, action_id, include_task_info=True)
+            action = db_api.get_action(
+                ctxt, action_id, include_task_info=True)
 
             updated_task_info = None
             if task_result:
@@ -3179,7 +3200,7 @@ class ConductorServerEndpoint(object):
                         newly_started_tasks))
             else:
                 LOG.debug(
-                    "No new tasks were started for execution '%s' for instance "
+                    "No new tasks started for execution '%s' for instance "
                     "'%s' following the successful completion of task '%s'.",
                     execution.id, task.instance, task.id)
 
@@ -3204,8 +3225,8 @@ class ConductorServerEndpoint(object):
                     msg = (
                         "%s Please note that any cleanup operations this task "
                         "should have included will need to performed manually "
-                        "once the debugging process has been completed." % (
-                            msg))
+                        "once the debugging process has been completed." %
+                        (msg))
                 db_api.set_task_status(
                     ctxt, subtask.id,
                     constants.TASK_STATUS_CANCELED_FOR_DEBUGGING,
@@ -3503,8 +3524,8 @@ class ConductorServerEndpoint(object):
         if replica_status not in valid_statuses:
             raise exception.InvalidReplicaState(
                 'Replica Schedule cannot be deleted while the Replica is in '
-                '%s state. Please wait for the Replica execution to finish' % (
-                    replica_status))
+                '%s state. Please wait for the Replica execution to finish' %
+                (replica_status))
         db_api.delete_replica_schedule(
             ctxt, replica_id, schedule_id, None,
             lambda ctxt, sched: self._cleanup_schedule_resources(
@@ -3620,7 +3641,8 @@ class ConductorServerEndpoint(object):
 
         self._begin_tasks(ctxt, replica, execution)
 
-        return self.get_replica_tasks_execution(ctxt, replica_id, execution.id)
+        return self.get_replica_tasks_execution(
+            ctxt, replica_id, execution.id)
 
     def get_diagnostics(self, ctxt):
         diagnostics = utils.get_diagnostics_info()
@@ -3711,7 +3733,7 @@ class ConductorServerEndpoint(object):
                 db_api.update_service(
                     ctxt, service.id, {
                         "mapped_regions": mapped_regions})
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Error adding region mappings during new service "
                     "registration (host: %s), cleaning up endpoint and "

+ 1 - 1
coriolis/conductor/rpc/utils.py

@@ -46,7 +46,7 @@ def check_create_registration_for_service(
                     mapped_regions=mapped_regions, providers=providers,
                     specs=specs)
             return worker_service
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Failed to register service with specs %s. Retrying again in "
                 "%d seconds. Error was: %s", props, retry_period,

+ 4 - 3
coriolis/constants.py

@@ -268,8 +268,8 @@ OS_TYPE_UNKNOWN = "unknown"
 
 DEFAULT_OS_TYPE = OS_TYPE_LINUX
 
-VALID_OS_TYPES = [
-    OS_TYPE_BSD, OS_TYPE_LINUX, OS_TYPE_OS_X, OS_TYPE_SOLARIS, OS_TYPE_WINDOWS]
+VALID_OS_TYPES = [OS_TYPE_BSD, OS_TYPE_LINUX,
+                  OS_TYPE_OS_X, OS_TYPE_SOLARIS, OS_TYPE_WINDOWS]
 
 TMP_DIRS_KEY = "__tmp_dirs"
 
@@ -360,7 +360,8 @@ ACTIVE_MINION_POOL_STATUSES = [
     MINION_POOL_STATUS_DEALLOCATING_MACHINES,
     MINION_POOL_STATUS_DEALLOCATING_SHARED_RESOURCES]
 
-MINION_MACHINE_IDENTIFIER_FORMAT = "coriolis-pool-%(pool_id)s-minion-%(minion_id)s"
+MINION_MACHINE_IDENTIFIER_FORMAT = (
+    "coriolis-pool-%(pool_id)s-minion-%(minion_id)s")
 MINION_MACHINE_STATUS_UNINITIALIZED = "UNINITIALIZED"
 MINION_MACHINE_STATUS_HEALTHCHECKING = "HEALTHCHECKING"
 MINION_MACHINE_STATUS_ALLOCATING = "ALLOCATING"

+ 9 - 15
coriolis/context.py

@@ -22,21 +22,15 @@ class RequestContext(context.RequestContext):
                  show_deleted=None, trust_id=None,
                  delete_trust_id=False, **kwargs):
 
-        super(RequestContext, self).__init__(auth_token=auth_token,
-                                             user=user,
-                                             project_id=project_id,
-                                             domain_name=domain_name,
-                                             domain_id=domain_id,
-                                             user_domain_name=user_domain_name,
-                                             user_domain_id=user_domain_id,
-                                             project_domain_name=(
-                                                 project_domain_name),
-                                             project_domain_id=(
-                                                 project_domain_id),
-                                             is_admin=is_admin,
-                                             show_deleted=show_deleted,
-                                             request_id=request_id,
-                                             overwrite=overwrite)
+        super(
+            RequestContext, self).__init__(
+            auth_token=auth_token, user=user, project_id=project_id,
+            domain_name=domain_name, domain_id=domain_id,
+            user_domain_name=user_domain_name, user_domain_id=user_domain_id,
+            project_domain_name=(project_domain_name),
+            project_domain_id=(project_domain_id),
+            is_admin=is_admin, show_deleted=show_deleted,
+            request_id=request_id, overwrite=overwrite)
         self.roles = roles or []
         self.project_name = project_name
         self.remote_address = remote_address

+ 5 - 4
coriolis/cron/cron.py

@@ -108,7 +108,8 @@ class CronJob(object):
                   'minute', 'second', 'dow')
         dt_fields = dict(zip(fields, dt.timetuple()))
 
-        pairs = [(dt_fields[i], self.schedule.get(i)) for i in SCHEDULE_FIELDS]
+        pairs = [(dt_fields[i], self.schedule.get(i))
+                 for i in SCHEDULE_FIELDS]
         compared = self._compare(pairs)
         return False not in compared
 
@@ -211,9 +212,9 @@ class Cron(object):
             # TODO(gsamfira): send this to the controller and update
             # the logs table...or do something much more meaningful
             if error:
-                LOG.error("Job %(job_desc)s exited with error: %(job_err)r" % {
-                    "job_desc": desc,
-                    "job_err": error})
+                LOG.error(
+                    "Job %(job_desc)s exited with error: %(job_err)r" %
+                    {"job_desc": desc, "job_err": error})
             if result:
                 LOG.info("Job %(desc)s returned: %(ret)r" % {
                     "desc": desc,

+ 1 - 1
coriolis/data_transfer.py

@@ -10,9 +10,9 @@ import zlib
 import requests
 import requests_unixsocket
 
-from urllib import parse
 from oslo_config import cfg
 from oslo_log import log as logging
+from urllib import parse
 
 from coriolis import constants
 from coriolis import exception

+ 12 - 11
coriolis/db/api.py

@@ -65,7 +65,8 @@ def _model_query(context, *args):
     return session.query(*args)
 
 
-def _update_sqlalchemy_object_fields(obj, updateable_fields, values_to_update):
+def _update_sqlalchemy_object_fields(
+        obj, updateable_fields, values_to_update):
     """ Updates the given 'values_to_update' on the provided sqlalchemy object
     as long as they are included as 'updateable_fields'.
     :param obj: object: sqlalchemy object
@@ -182,7 +183,7 @@ def update_endpoint(context, endpoint_id, updated_values):
                     region_to_unmap, endpoint_id)
                 delete_endpoint_region_mapping(
                     context, endpoint_id, region_to_unmap)
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Exception occurred while attempting to unmap region '%s' "
                     "from endpoint '%s'. Ignoring. Error was: %s",
@@ -227,7 +228,7 @@ def update_endpoint(context, endpoint_id, updated_values):
                 mapping.endpoint_id = endpoint_id
                 add_endpoint_region_mapping(context, mapping)
                 newly_mapped_regions.append(region_id)
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Exception occurred while adding region mapping for '%s' to "
                 "endpoint '%s'. Cleaning up created mappings (%s). Error was: "
@@ -240,7 +241,7 @@ def update_endpoint(context, endpoint_id, updated_values):
     try:
         _update_sqlalchemy_object_fields(
             endpoint, updateable_fields, updated_values)
-    except Exception as ex:
+    except Exception:
         LOG.warn(
             "Exception occurred while updating fields of endpoint '%s'. "
             "Cleaning ""up created mappings (%s). Error was: %s",
@@ -886,7 +887,8 @@ def add_task_progress_update(
 def update_task_progress_update(
         context, task_id, update_index, new_current_step,
         new_total_steps=None, new_message=None):
-    task_progress_update = _get_progress_update(context, task_id, update_index)
+    task_progress_update = _get_progress_update(
+        context, task_id, update_index)
     if not task_progress_update:
         raise exception.NotFound(
             "Could not find progress update for task with ID '%s' and "
@@ -1042,7 +1044,7 @@ def get_region_mappings_for_endpoint(
         models.EndpointRegionMapping.endpoint_id == endpoint_id)
     if enabled_regions_only:
         q = q.filter(
-            models.Region.enabled == True)
+            models.Region.enabled == True)  # noqa: E712
     return q.all()
 
 
@@ -1108,7 +1110,7 @@ def update_service(context, service_id, updated_values):
                     region_to_unmap, service_id)
                 delete_service_region_mapping(
                     context, service_id, region_to_unmap)
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Exception occurred while attempting to unmap region '%s' "
                     "from service '%s'. Ignoring. Error was: %s",
@@ -1153,7 +1155,7 @@ def update_service(context, service_id, updated_values):
                 mapping.service_id = service_id
                 add_service_region_mapping(context, mapping)
                 newly_mapped_regions.append(region_id)
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Exception occurred while adding region mapping for '%s' to "
                 "service '%s'. Cleaning up created mappings (%s). Error was: "
@@ -1166,7 +1168,7 @@ def update_service(context, service_id, updated_values):
     try:
         _update_sqlalchemy_object_fields(
             service, updateable_fields, updated_values)
-    except Exception as ex:
+    except Exception:
         LOG.warn(
             "Exception occurred while updating fields of service '%s'. "
             "Cleaning ""up created mappings (%s). Error was: %s",
@@ -1244,7 +1246,7 @@ def get_region_mappings_for_service(
         models.ServiceRegionMapping.service_id == service_id)
     if enabled_regions_only:
         q = q.filter(
-            models.Region.enabled == True)
+            models.Region.enabled == True)  # noqa: E712
     return q.all()
 
 
@@ -1349,7 +1351,6 @@ def set_minion_machines_allocation_statuses(
 
 @enginefacade.writer
 def delete_minion_machine(context, minion_machine_id):
-    minion_machine = get_minion_machine(context, minion_machine_id)
     # TODO(aznashwan): update models to be soft-delete-aware to
     # avoid needing to hard-delete here:
     count = _soft_delete_aware_query(context, models.MinionMachine).filter_by(

+ 24 - 17
coriolis/db/sqlalchemy/migrate_repo/versions/001_initial.py

@@ -45,29 +45,36 @@ def upgrade(migrate_engine):
     )
 
     task = sqlalchemy.Table(
-        'task', meta,
-        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
-                          default=lambda: str(uuid.uuid4())),
+        'task', meta, sqlalchemy.Column(
+            'id', sqlalchemy.String(36),
+            primary_key=True, default=lambda: str(uuid.uuid4())),
         sqlalchemy.Column('created_at', sqlalchemy.DateTime),
         sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
         sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
         sqlalchemy.Column('deleted', sqlalchemy.String(36)),
-        sqlalchemy.Column("execution_id", sqlalchemy.String(36),
-                          sqlalchemy.ForeignKey(
-                              'tasks_execution.id'),
-                          nullable=False),
-        sqlalchemy.Column("instance", sqlalchemy.String(1024), nullable=False),
-        sqlalchemy.Column("host", sqlalchemy.String(1024), nullable=True),
-        sqlalchemy.Column("process_id", sqlalchemy.Integer, nullable=True),
-        sqlalchemy.Column("status", sqlalchemy.String(100), nullable=False),
-        sqlalchemy.Column("task_type", sqlalchemy.String(100),
-                          nullable=False),
-        sqlalchemy.Column("exception_details", sqlalchemy.Text, nullable=True),
+        sqlalchemy.Column(
+            "execution_id", sqlalchemy.String(36),
+            sqlalchemy.ForeignKey('tasks_execution.id'),
+            nullable=False),
+        sqlalchemy.Column(
+            "instance", sqlalchemy.String(1024),
+            nullable=False),
+        sqlalchemy.Column(
+            "host", sqlalchemy.String(1024),
+            nullable=True),
+        sqlalchemy.Column(
+            "process_id", sqlalchemy.Integer, nullable=True),
+        sqlalchemy.Column(
+            "status", sqlalchemy.String(100),
+            nullable=False),
+        sqlalchemy.Column(
+            "task_type", sqlalchemy.String(100),
+            nullable=False),
+        sqlalchemy.Column(
+            "exception_details", sqlalchemy.Text, nullable=True),
         sqlalchemy.Column("depends_on", sqlalchemy.Text, nullable=True),
         sqlalchemy.Column("on_error", sqlalchemy.Boolean, nullable=True),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'
-    )
+        mysql_engine='InnoDB', mysql_charset='utf8')
 
     tasks_execution = sqlalchemy.Table(
         'tasks_execution', meta,

+ 0 - 1
coriolis/db/sqlalchemy/migrate_repo/versions/012_adds_migration_sync_fields.py

@@ -1,5 +1,4 @@
 import sqlalchemy
-from sqlalchemy import types
 
 
 def upgrade(migrate_engine):

+ 0 - 1
coriolis/db/sqlalchemy/migrate_repo/versions/013_adds_task_index.py

@@ -1,5 +1,4 @@
 import sqlalchemy
-from sqlalchemy import types
 
 
 def upgrade(migrate_engine):

+ 40 - 33
coriolis/db/sqlalchemy/migrate_repo/versions/016_adds_minion_vm_pools.py

@@ -10,8 +10,6 @@ def upgrade(migrate_engine):
     meta = sqlalchemy.MetaData()
     meta.bind = migrate_engine
 
-    endpoint = sqlalchemy.Table(
-        'endpoint', meta, autoload=True)
     base_transfer_action = sqlalchemy.Table(
         'base_transfer_action', meta, autoload=True)
 
@@ -25,36 +23,42 @@ def upgrade(migrate_engine):
     # add table for pool lifecycles:
     tables.append(
         sqlalchemy.Table(
-            'minion_pool',
-            meta,
-            sqlalchemy.Column(
+            'minion_pool', meta, sqlalchemy.Column(
                 "id", sqlalchemy.String(36),
-                default=lambda: str(uuid.uuid4()), primary_key=True),
+                default=lambda: str(uuid.uuid4()),
+                primary_key=True),
             sqlalchemy.Column("notes", sqlalchemy.Text, nullable=True),
             sqlalchemy.Column(
-                "user_id", sqlalchemy.String(255), nullable=False),
+                "user_id", sqlalchemy.String(255),
+                nullable=False),
             sqlalchemy.Column(
-                "project_id", sqlalchemy.String(255), nullable=False),
+                "project_id", sqlalchemy.String(255),
+                nullable=False),
             sqlalchemy.Column(
-                "maintenance_trust_id", sqlalchemy.String(255), nullable=True),
+                "maintenance_trust_id", sqlalchemy.String(255),
+                nullable=True),
             sqlalchemy.Column('created_at', sqlalchemy.DateTime),
             sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
             sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
             sqlalchemy.Column('deleted', sqlalchemy.String(36)),
             sqlalchemy.Column(
-                "name", sqlalchemy.String(255), nullable=False),
+                "name", sqlalchemy.String(255),
+                nullable=False),
             sqlalchemy.Column(
                 "endpoint_id", sqlalchemy.String(36),
-                sqlalchemy.ForeignKey('endpoint.id'), nullable=False),
+                sqlalchemy.ForeignKey('endpoint.id'),
+                nullable=False),
             sqlalchemy.Column(
                 "environment_options", sqlalchemy.Text, nullable=False),
             sqlalchemy.Column(
-                "os_type", sqlalchemy.String(255), nullable=False),
+                "os_type", sqlalchemy.String(255),
+                nullable=False),
             sqlalchemy.Column(
-                "platform", sqlalchemy.String(255), nullable=True),
+                "platform", sqlalchemy.String(255),
+                nullable=True),
             sqlalchemy.Column(
-                "status", sqlalchemy.String(255), nullable=False,
-                default=lambda: "UNKNOWN"),
+                "status", sqlalchemy.String(255),
+                nullable=False, default=lambda: "UNKNOWN"),
             sqlalchemy.Column(
                 "shared_resources", sqlalchemy.Text, nullable=True),
             sqlalchemy.Column(
@@ -121,24 +125,27 @@ def upgrade(migrate_engine):
         mysql_engine='InnoDB',
         mysql_charset='utf8'))
 
-    tables.append(sqlalchemy.Table(
-        'minion_pool_progress_update', meta,
-        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
-                          default=lambda: str(uuid.uuid4())),
-        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
-        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
-        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
-        sqlalchemy.Column('index', sqlalchemy.Integer, default=0),
-        sqlalchemy.Column('deleted', sqlalchemy.String(36)),
-        sqlalchemy.Column("pool_id", sqlalchemy.String(36),
-                          sqlalchemy.ForeignKey('minion_pool.id'),
-                          nullable=False),
-        sqlalchemy.Column(
-            "current_step", sqlalchemy.BigInteger, nullable=False),
-        sqlalchemy.Column("total_steps", sqlalchemy.BigInteger, nullable=True),
-        sqlalchemy.Column("message", sqlalchemy.Text, nullable=True),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'))
+    tables.append(
+        sqlalchemy.Table(
+            'minion_pool_progress_update', meta, sqlalchemy.Column(
+                'id', sqlalchemy.String(36),
+                primary_key=True, default=lambda: str(uuid.uuid4())),
+            sqlalchemy.Column('created_at', sqlalchemy.DateTime),
+            sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
+            sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
+            sqlalchemy.Column('index', sqlalchemy.Integer, default=0),
+            sqlalchemy.Column('deleted', sqlalchemy.String(36)),
+            sqlalchemy.Column(
+                "pool_id", sqlalchemy.String(36),
+                sqlalchemy.ForeignKey('minion_pool.id'),
+                nullable=False),
+            sqlalchemy.Column(
+                "current_step", sqlalchemy.BigInteger, nullable=False),
+            sqlalchemy.Column(
+                "total_steps", sqlalchemy.BigInteger, nullable=True),
+            sqlalchemy.Column(
+                "message", sqlalchemy.Text, nullable=True),
+            mysql_engine='InnoDB', mysql_charset='utf8'))
 
     # add the pool option properties for the transfer:
     origin_minion_pool_id = sqlalchemy.Column(

+ 0 - 1
coriolis/db/sqlalchemy/migrate_repo/versions/018_adds_task_progress_idices.py

@@ -1,5 +1,4 @@
 import sqlalchemy
-from sqlalchemy import types
 
 
 def upgrade(migrate_engine):

+ 3 - 3
coriolis/db/sqlalchemy/models.py

@@ -45,7 +45,7 @@ class TaskEvent(BASE, models.TimestampMixin, models.SoftDeleteMixin,
 
 
 class MinionPoolEvent(BASE, models.TimestampMixin, models.SoftDeleteMixin,
-                models.ModelBase):
+                      models.ModelBase):
     __tablename__ = 'minion_pool_event'
 
     id = sqlalchemy.Column(sqlalchemy.String(36),
@@ -538,8 +538,8 @@ class MinionMachine(BASE, models.TimestampMixin, models.ModelBase,
 
 
 class MinionPool(
-            BASE, models.TimestampMixin, models.ModelBase,
-            models.SoftDeleteMixin):
+    BASE, models.TimestampMixin, models.ModelBase,
+    models.SoftDeleteMixin):
     __tablename__ = 'minion_pool'
 
     id = sqlalchemy.Column(

+ 2 - 2
coriolis/db/sqlalchemy/types.py

@@ -56,7 +56,7 @@ class Bson(Blob):
 
     def process_bind_param(self, value, dialect):
         return zlib.compress(
-                jsonutils.dumps(value).encode('utf-8'))
+            jsonutils.dumps(value).encode('utf-8'))
 
     def process_result_value(self, value, dialect):
         if value is None:
@@ -64,7 +64,7 @@ class Bson(Blob):
         data = None
         try:
             data = zlib.decompress(value)
-        except:
+        except Exception:
             data = value
         return jsonutils.loads(data)
 

+ 0 - 2
coriolis/endpoint_options/__init__.py

@@ -1,2 +0,0 @@
-# Copyright 2020 Cloudbase Solutions Srl
-# All Rights Reserved.

+ 4 - 2
coriolis/endpoint_options/api.py

@@ -23,10 +23,12 @@ class API(object):
 
     def get_endpoint_source_minion_pool_options(
             self, ctxt, endpoint_id, env=None, option_names=None):
-        return self._rpc_minion_manager_client.get_endpoint_source_minion_pool_options(
+        return (self._rpc_minion_manager_client.
+                get_endpoint_source_minion_pool_options)(
             ctxt, endpoint_id, env, option_names)
 
     def get_endpoint_destination_minion_pool_options(
             self, ctxt, endpoint_id, env=None, option_names=None):
-        return self._rpc_minion_manager_client.get_endpoint_destination_minion_pool_options(
+        return (self._rpc_minion_manager_client.
+                get_endpoint_destination_minion_pool_options)(
             ctxt, endpoint_id, env, option_names)

+ 0 - 2
coriolis/endpoint_resources/__init__.py

@@ -1,2 +0,0 @@
-# Copyright 2020 Cloudbase Solutions Srl
-# All Rights Reserved.

+ 5 - 3
coriolis/endpoints/api.py

@@ -1,9 +1,9 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from coriolis import utils
 from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis.minion_manager.rpc import client as rpc_minion_manager_client
+from coriolis import utils
 
 
 class API(object):
@@ -48,12 +48,14 @@ class API(object):
     @utils.bad_request_on_error("Invalid source minion pool environment: %s")
     def validate_endpoint_source_minion_pool_options(
             self, ctxt, endpoint_id, pool_environment):
-        return self._rpc_minion_manager_client.validate_endpoint_source_minion_pool_options(
+        return (self._rpc_minion_manager_client.
+                validate_endpoint_source_minion_pool_options)(
             ctxt, endpoint_id, pool_environment)
 
     @utils.bad_request_on_error(
         "Invalid destination minion pool environment: %s")
     def validate_endpoint_destination_minion_pool_options(
             self, ctxt, endpoint_id, pool_environment):
-        return self._rpc_minion_manager_client.validate_endpoint_destination_minion_pool_options(
+        return (self._rpc_minion_manager_client.
+                validate_endpoint_destination_minion_pool_options)(
             ctxt, endpoint_id, pool_environment)

+ 4 - 3
coriolis/events.py

@@ -56,18 +56,19 @@ class EventManager(object, with_metaclass(abc.ABCMeta)):
         if initial_step > 0 and total_steps > 0:
             perc = int(initial_step * 100 // total_steps)
         self._perc_steps[progress_update_id] = _PercStepData(
-                progress_update_id, perc, initial_step, total_steps)
+            progress_update_id, perc, initial_step, total_steps)
 
         return self._perc_steps[progress_update_id]
 
     def set_percentage_step(self, step, new_current_step):
         perc_step = self._perc_steps.get(
-                step.progress_update_id, None)
+            step.progress_update_id, None)
         if perc_step is None:
             return
 
         if perc_step.last_value > new_current_step:
-            LOG.warn("rollback for perc update %s not allowed" % step.progress_update_id)
+            LOG.warn("rollback for perc update %s not allowed" %
+                     step.progress_update_id)
             return
 
         perc = 0

+ 4 - 5
coriolis/exception.py

@@ -300,7 +300,8 @@ class OSMorphingToolsNotFound(NotFound):
 
 class OSDetectToolsNotFound(NotFound):
     message = _(
-        'No "%(os_type)s" OS detect tools were able to identify the OS for this VM. '
+        'No "%(os_type)s" OS detect tools were able to identify the OS for '
+        ' this VM. '
         'This would indicate that it was either not possible to determine the '
         'exact OS release, or this OS release is not supported by Coriolis. '
         'Suggestions include performing any needed OSMorphing steps manually '
@@ -506,8 +507,7 @@ class OSMorphingSSHOperationTimeout(OSMorphingOperationTimeout):
         "Coriolis may have encountered connection issues to the minion machine"
         " or the command execution time exceeds the timeout set. Try extending"
         " the timeout by editing the 'default_osmorphing_operation_timeout' "
-        "in Coriolis' static configuration file."
-    )
+        "in Coriolis' static configuration file.")
 
 
 class OSMorphingWinRMOperationTimeout(OSMorphingOperationTimeout):
@@ -516,5 +516,4 @@ class OSMorphingWinRMOperationTimeout(OSMorphingOperationTimeout):
         "Coriolis may have encountered connection issues to the minion machine"
         " or the command execution time exceeds the timeout set. Try extending"
         " the timeout by editing the 'default_osmorphing_operation_timeout' "
-        "in Coriolis' static configuration file."
-    )
+        "in Coriolis' static configuration file.")

+ 7 - 7
coriolis/keystone.py

@@ -65,13 +65,13 @@ def create_trust(ctxt):
     trustor_proj_id = ctxt.project_id
     roles = ctxt.roles
 
-    LOG.debug("Granting Keystone trust. Trustor: %(trustor_user_id)s, trustee:"
-              " %(trustee_user_id)s, project: %(trustor_proj_id)s, roles:"
-              " %(roles)s",
-              {"trustor_user_id": trustor_user_id,
-               "trustee_user_id": trustee_user_id,
-               "trustor_proj_id": trustor_proj_id,
-               "roles": roles})
+    LOG.debug(
+        "Granting Keystone trust. Trustor: %(trustor_user_id)s, trustee:"
+        " %(trustee_user_id)s, project: %(trustor_proj_id)s, roles:"
+        " %(roles)s",
+        {"trustor_user_id": trustor_user_id,
+         "trustee_user_id": trustee_user_id,
+         "trustor_proj_id": trustor_proj_id, "roles": roles})
 
     # Trusts are not supported before Keystone v3
     client = kc_v3.Client(session=session)

+ 2 - 1
coriolis/licensing/client.py

@@ -140,7 +140,8 @@ class LicensingClient(object):
             response_key=response_key,
             appliance_scoped=appliance_scoped)
 
-    def _delete(self, resource, body, response_key=None, appliance_scoped=True):
+    def _delete(self, resource, body, response_key=None,
+                appliance_scoped=True):
         return self._do_req(
             "DELETE", resource, body=body,
             response_key=response_key,

+ 2 - 1
coriolis/migrations/manager.py

@@ -37,7 +37,8 @@ def _copy_volume(volume, disk_image_reader, backup_writer, event_manager):
                     offset, max_block_size)
                 if not allocated or zero_block and skip_zeroes:
                     if not allocated:
-                        LOG.debug("Unallocated block detected: %s", block_size)
+                        LOG.debug(
+                            "Unallocated block detected: %s", block_size)
                     else:
                         LOG.debug("Skipping zero block: %s", block_size)
                     offset += block_size

+ 11 - 9
coriolis/minion_manager/rpc/client.py

@@ -26,15 +26,16 @@ CONF.register_opts(MINION_MANAGER_OPTS, 'minion_manager')
 class MinionManagerClient(rpc.BaseRPCClient):
 
     def __init__(self, timeout=None):
-        target = messaging.Target(topic='coriolis_minion_manager', version=VERSION)
+        target = messaging.Target(
+            topic='coriolis_minion_manager', version=VERSION)
         if timeout is None:
             timeout = CONF.minion_manager.minion_mananger_rpc_timeout
         super(MinionManagerClient, self).__init__(
             target, timeout=timeout)
 
     def add_minion_pool_progress_update(
-            self, ctxt, minion_pool_id, message, initial_step=0, total_steps=0,
-            return_event=False):
+            self, ctxt, minion_pool_id, message, initial_step=0,
+            total_steps=0, return_event=False):
         operation = self._cast
         if return_event:
             operation = self._call
@@ -44,14 +45,14 @@ class MinionManagerClient(rpc.BaseRPCClient):
             initial_step=initial_step, total_steps=total_steps)
 
     def update_minion_pool_progress_update(
-            self, ctxt, minion_pool_id, progress_update_index, new_current_step,
-            new_total_steps=None, new_message=None):
+            self, ctxt, minion_pool_id, progress_update_index,
+            new_current_step, new_total_steps=None, new_message=None):
         self._cast(
             ctxt, 'update_minion_pool_progress_update',
             minion_pool_id=minion_pool_id,
             progress_update_index=progress_update_index,
-            new_current_step=new_current_step, new_total_steps=new_total_steps,
-            new_message=new_message)
+            new_current_step=new_current_step,
+            new_total_steps=new_total_steps, new_message=new_message)
 
     def add_minion_pool_event(self, ctxt, minion_pool_id, level, message):
         return self._cast(
@@ -81,13 +82,14 @@ class MinionManagerClient(rpc.BaseRPCClient):
             include_osmorphing_minions=include_osmorphing_minions)
 
     def deallocate_minion_machine(self, ctxt, minion_machine_id):
-         return self._cast(
+        return self._cast(
             ctxt, 'deallocate_minion_machine',
             minion_machine_id=minion_machine_id)
 
     def deallocate_minion_machines_for_action(self, ctxt, action_id):
         return self._cast(
-            ctxt, 'deallocate_minion_machines_for_action', action_id=action_id)
+            ctxt, 'deallocate_minion_machines_for_action',
+            action_id=action_id)
 
     def create_minion_pool(
             self, ctxt, name, endpoint_id, pool_platform, pool_os_type,

+ 176 - 141
coriolis/minion_manager/rpc/server.py

@@ -13,21 +13,21 @@ from taskflow.patterns import graph_flow
 from taskflow.patterns import linear_flow
 from taskflow.patterns import unordered_flow
 
+from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis import constants
 from coriolis import context
-from coriolis import exception
-from coriolis import keystone
-from coriolis import utils
-from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis.cron import cron
 from coriolis.db import api as db_api
 from coriolis.db.sqlalchemy import models
+from coriolis import exception
+from coriolis import keystone
 from coriolis.minion_manager.rpc import client as rpc_minion_manager_client
-from coriolis.minion_manager.rpc import tasks as minion_manager_tasks
+from coriolis.minion_manager.rpc import tasks as minion_mgr_tasks
 from coriolis.minion_manager.rpc import utils as minion_manager_utils
 from coriolis.scheduler.rpc import client as rpc_scheduler_client
 from coriolis.taskflow import runner as taskflow_runner
 from coriolis.taskflow import utils as taskflow_utils
+from coriolis import utils
 from coriolis.worker.rpc import client as rpc_worker_client
 
 
@@ -74,7 +74,7 @@ class MinionManagerServerEndpoint(object):
             self._cron = cron.Cron()
             self._init_pools_refresh_cron_jobs()
             self._cron.start()
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "A fatal exception occurred while attempting to set up cron "
                 "jobs for automatic pool refreshing. Automatic refreshing will"
@@ -110,7 +110,7 @@ class MinionManagerServerEndpoint(object):
                 "server startup.", minion_pool.id)
             try:
                 self._register_refresh_jobs_for_minion_pool(minion_pool)
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "An Exception occurred while setting up automatic "
                     "refreshing for minion pool with ID '%s'. Error was: %s",
@@ -119,7 +119,8 @@ class MinionManagerServerEndpoint(object):
     def _register_refresh_jobs_for_minion_pool(
             self, minion_pool, period_minutes=None):
         if period_minutes is None:
-            period_minutes = CONF.minion_manager.minion_pool_default_refresh_period_minutes
+            period_minutes = (
+                CONF.minion_manager.minion_pool_default_refresh_period_minutes)
 
         if period_minutes < 0:
             LOG.warn(
@@ -136,8 +137,8 @@ class MinionManagerServerEndpoint(object):
 
         if period_minutes > 60:
             LOG.warn(
-                "Selected pool refresh period_minutes is greater than 60, defaulting "
-                "to 10. Original value was: %s", period_minutes)
+                "Selected pool refresh period_minutes is greater than 60, "
+                "defaulting to 10. Original value was: %s", period_minutes)
             period_minutes = 10
         admin_ctxt = context.get_admin_context(
             minion_pool.maintenance_trust_id)
@@ -165,7 +166,7 @@ class MinionManagerServerEndpoint(object):
             minion_pool.id)
         try:
             self._cron.unregister_jobs_with_prefix(job_prefix)
-        except Exception as ex:
+        except Exception:
             if not raise_on_error:
                 LOG.warn(
                     "Exception occurred while unregistering minion pool "
@@ -221,12 +222,14 @@ class MinionManagerServerEndpoint(object):
             self, ctxt, endpoint_id, env, option_names):
         endpoint = self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
 
-        worker_service = self._rpc_scheduler_client.get_worker_service_for_specs(
-            ctxt, enabled=True,
-            region_sets=[[reg['id'] for reg in endpoint['mapped_regions']]],
-            provider_requirements={
-                endpoint['type']: [
-                    constants.PROVIDER_TYPE_SOURCE_MINION_POOL]})
+        worker_service = (
+            self._rpc_scheduler_client.get_worker_service_for_specs(
+                ctxt, enabled=True,
+                region_sets=[[r['id'] for r in endpoint['mapped_regions']]],
+                provider_requirements={
+                    endpoint['type']: [
+                        constants.PROVIDER_TYPE_SOURCE_MINION_POOL]})
+        )
         worker_rpc = rpc_worker_client.WorkerClient.from_service_definition(
             worker_service)
 
@@ -238,12 +241,13 @@ class MinionManagerServerEndpoint(object):
             self, ctxt, endpoint_id, env, option_names):
         endpoint = self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
 
-        worker_service = self._rpc_scheduler_client.get_worker_service_for_specs(
-            ctxt, enabled=True,
-            region_sets=[[reg['id'] for reg in endpoint['mapped_regions']]],
-            provider_requirements={
-                endpoint['type']: [
-                    constants.PROVIDER_TYPE_DESTINATION_MINION_POOL]})
+        worker_service = (
+            self._rpc_scheduler_client.get_worker_service_for_specs(
+                ctxt, enabled=True,
+                region_sets=[[r['id'] for r in endpoint['mapped_regions']]],
+                provider_requirements={
+                    endpoint['type']: [
+                        constants.PROVIDER_TYPE_DESTINATION_MINION_POOL]}))
         worker_rpc = rpc_worker_client.WorkerClient.from_service_definition(
             worker_service)
 
@@ -255,12 +259,13 @@ class MinionManagerServerEndpoint(object):
             self, ctxt, endpoint_id, pool_environment):
         endpoint = self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
 
-        worker_service = self._rpc_scheduler_client.get_worker_service_for_specs(
-            ctxt, enabled=True,
-            region_sets=[[reg['id'] for reg in endpoint['mapped_regions']]],
-            provider_requirements={
-                endpoint['type']: [
-                    constants.PROVIDER_TYPE_SOURCE_MINION_POOL]})
+        worker_service = (
+            self._rpc_scheduler_client.get_worker_service_for_specs(
+                ctxt, enabled=True,
+                region_sets=[[r['id'] for r in endpoint['mapped_regions']]],
+                provider_requirements={
+                    endpoint['type']: [
+                        constants.PROVIDER_TYPE_SOURCE_MINION_POOL]}))
         worker_rpc = rpc_worker_client.WorkerClient.from_service_definition(
             worker_service)
 
@@ -271,12 +276,13 @@ class MinionManagerServerEndpoint(object):
             self, ctxt, endpoint_id, pool_environment):
         endpoint = self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
 
-        worker_service = self._rpc_scheduler_client.get_worker_service_for_specs(
-            ctxt, enabled=True,
-            region_sets=[[reg['id'] for reg in endpoint['mapped_regions']]],
-            provider_requirements={
-                endpoint['type']: [
-                    constants.PROVIDER_TYPE_DESTINATION_MINION_POOL]})
+        worker_service = (
+            self._rpc_scheduler_client.get_worker_service_for_specs(
+                ctxt, enabled=True,
+                region_sets=[[r['id'] for r in endpoint['mapped_regions']]],
+                provider_requirements={
+                    endpoint['type']: [
+                        constants.PROVIDER_TYPE_DESTINATION_MINION_POOL]}))
         worker_rpc = rpc_worker_client.WorkerClient.from_service_definition(
             worker_service)
 
@@ -294,7 +300,8 @@ class MinionManagerServerEndpoint(object):
         self._add_minion_pool_event(ctxt, minion_pool_id, level, message)
 
     def _add_minion_pool_progress_update(
-            self, ctxt, minion_pool_id, message, initial_step=0, total_steps=0):
+            self, ctxt, minion_pool_id, message, initial_step=0,
+            total_steps=0):
         LOG.info(
             "Adding pool progress update for %s: %s", minion_pool_id, message)
         db_api.add_minion_pool_progress_update(
@@ -303,7 +310,8 @@ class MinionManagerServerEndpoint(object):
 
     @minion_manager_utils.minion_pool_synchronized_op
     def add_minion_pool_progress_update(
-            self, ctxt, minion_pool_id, message, initial_step=0, total_steps=0):
+            self, ctxt, minion_pool_id, message, initial_step=0,
+            total_steps=0):
         self._add_minion_pool_progress_update(
             ctxt, minion_pool_id, message, initial_step=initial_step,
             total_steps=total_steps)
@@ -351,12 +359,14 @@ class MinionManagerServerEndpoint(object):
             for pool in db_api.get_minion_pools(
                 ctxt, include_machines=False, include_events=False,
                 include_progress_updates=False, to_dict=False)}
+
         def _get_pool(pool_id):
             pool = minion_pools.get(pool_id)
             if not pool:
                 raise exception.NotFound(
                     "Could not find minion pool with ID '%s'." % pool_id)
             return pool
+
         def _check_pool_minion_count(
                 minion_pool, instances, minion_pool_type=""):
             desired_minion_count = len(instances)
@@ -364,7 +374,7 @@ class MinionManagerServerEndpoint(object):
                 raise exception.InvalidMinionPoolState(
                     "Minion Pool '%s' is an invalid state ('%s') to be "
                     "used as a %s pool for action '%s'. The pool must be "
-                    "in '%s' status."  % (
+                    "in '%s' status." % (
                         minion_pool.id, minion_pool.status,
                         minion_pool_type.lower(), action['id'],
                         constants.MINION_POOL_STATUS_ALLOCATED))
@@ -469,7 +479,7 @@ class MinionManagerServerEndpoint(object):
                 else:
                     osmorphing_pool_mappings[pool_id].append(instance_id)
 
-            for (pool_id, instances_to_osmorph) in osmorphing_pool_mappings.items():
+            for (pool_id, instances) in osmorphing_pool_mappings.items():
                 osmorphing_pool = _get_pool(pool_id)
                 if osmorphing_pool.endpoint_id != (
                         action['destination_endpoint_id']):
@@ -477,7 +487,7 @@ class MinionManagerServerEndpoint(object):
                         "The selected OSMorphing minion pool for instances %s"
                         " ('%s') belongs to a different Coriolis endpoint "
                         "('%s') than the destination endpoint ('%s')" % (
-                            instances_to_osmorph, pool_id,
+                            instances, pool_id,
                             osmorphing_pool.endpoint_id,
                             action['destination_endpoint_id']))
                 if osmorphing_pool.platform != (
@@ -486,17 +496,17 @@ class MinionManagerServerEndpoint(object):
                         "The selected OSMorphing minion pool for instances %s "
                         "('%s') is configured as a '%s' pool. The pool must "
                         "be of type %s to be used for OSMorphing." % (
-                            instances_to_osmorph, pool_id,
+                            instances, pool_id,
                             osmorphing_pool.platform,
                             constants.PROVIDER_PLATFORM_DESTINATION))
                 _check_pool_minion_count(
-                    osmorphing_pool, instances_to_osmorph,
+                    osmorphing_pool, instances,
                     minion_pool_type="OSMorphing")
                 LOG.debug(
                     "Successfully validated compatibility of destination "
                     "minion pool '%s' for use as OSMorphing minion for "
                     "instances %s during action '%s'." % (
-                        pool_id, instances_to_osmorph, action['id']))
+                        pool_id, instances, action['id']))
         LOG.debug(
             "Successfully validated minion pool selections for action '%s' "
             "with properties: %s", action['id'], action)
@@ -504,8 +514,9 @@ class MinionManagerServerEndpoint(object):
     def allocate_minion_machines_for_replica(
             self, ctxt, replica):
         try:
-            minion_allocations = self._run_machine_allocation_subflow_for_action(
-                ctxt, replica, constants.TRANSFER_ACTION_TYPE_REPLICA,
+            self._run_machine_allocation_subflow_for_action(
+                ctxt, replica,
+                constants.TRANSFER_ACTION_TYPE_REPLICA,
                 include_transfer_minions=True,
                 include_osmorphing_minions=False)
         except Exception as ex:
@@ -543,8 +554,9 @@ class MinionManagerServerEndpoint(object):
                 [constants.MINION_MACHINE_STATUS_UNINITIALIZED])
             self.deallocate_minion_machines_for_action(
                 ctxt, migration['id'])
-            self._rpc_conductor_client.report_migration_minions_allocation_error(
-                ctxt, migration['id'], str(ex))
+            (self._rpc_conductor_client
+                .report_migration_minions_allocation_error(
+                    ctxt, migration['id'], str(ex)))
             raise
 
     def _make_minion_machine_allocation_subflow_for_action(
@@ -563,13 +575,14 @@ class MinionManagerServerEndpoint(object):
         """
         currently_available_machines = [
             machine for machine in minion_pool.minion_machines
-            if machine.allocation_status == constants.MINION_MACHINE_STATUS_AVAILABLE]
+            if machine.allocation_status
+            == constants.MINION_MACHINE_STATUS_AVAILABLE]
         extra_available_machine_slots = (
             minion_pool.maximum_minions - len(minion_pool.minion_machines))
         num_instances = len(action_instances)
         num_currently_available_machines = len(currently_available_machines)
-        if num_instances > (len(currently_available_machines) + (
-                                extra_available_machine_slots)):
+        if (num_instances > (len(currently_available_machines) + (
+                extra_available_machine_slots))):
             raise exception.InvalidMinionPoolState(
                 "Minion pool '%s' is unable to accommodate the requested "
                 "number of machines (%s) for transfer action '%s', as it only "
@@ -594,7 +607,8 @@ class MinionManagerServerEndpoint(object):
                         "Excluding minion machine '%s' from search for use "
                         "action '%s'", machine.id, action_id)
                     continue
-                if machine.allocation_status != constants.MINION_MACHINE_STATUS_AVAILABLE:
+                if (machine.allocation_status !=
+                        constants.MINION_MACHINE_STATUS_AVAILABLE):
                     LOG.debug(
                         "Minion machine with ID '%s' is in status '%s' "
                         "instead of the expected '%s'. Skipping for use "
@@ -616,8 +630,8 @@ class MinionManagerServerEndpoint(object):
                 raise exception.InvalidInput(
                     "Instance with identifier '%s' passed twice for "
                     "minion machine allocation from pool '%s' for action "
-                    "'%s'. Full instances list was: %s" % (
-                        instance, minion_pool.id, action_id, action_instances))
+                    "'%s'. Full instances list was: %s" %
+                    (instance, minion_pool.id, action_id, action_instances))
             minion_machine = _select_machine(
                 minion_pool, exclude=instance_minion_allocations.values())
             if minion_machine:
@@ -656,7 +670,7 @@ class MinionManagerServerEndpoint(object):
 
                 instance_minion_allocations[instance] = new_machine_id
                 allocation_subflow.add(
-                    minion_manager_tasks.AllocateMinionMachineTask(
+                    minion_mgr_tasks.AllocateMinionMachineTask(
                         minion_pool.id, new_machine_id, minion_pool.platform,
                         allocate_to_action=action_id,
                         raise_on_cleanup_failure=False,
@@ -685,8 +699,8 @@ class MinionManagerServerEndpoint(object):
             if machine_db_entries_to_add:
                 for new_machine in machine_db_entries_to_add:
                     LOG.info(
-                        "Adding new minion machine with ID '%s' to the DB for pool "
-                        "'%s' for use with action '%s'.",
+                        "Adding new minion machine with ID '%s' to the DB "
+                        "for pool '%s' for use with action '%s'.",
                         new_machine_id, minion_pool.id, action_id)
                     db_api.add_minion_machine(ctxt, new_machine)
                     new_machine_db_entries_added.append(new_machine.id)
@@ -695,7 +709,7 @@ class MinionManagerServerEndpoint(object):
                     "The following new minion machines will be created for use"
                     " in transfer action '%s': %s" % (
                         action_id, [m.id for m in machine_db_entries_to_add]))
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Exception occurred while adding new minion machine entries to"
                 " the DB for pool '%s' for use with action '%s'. Clearing "
@@ -726,7 +740,7 @@ class MinionManagerServerEndpoint(object):
             for new_machine in new_machine_db_entries_added:
                 try:
                     db_api.delete_minion_machine(ctxt, new_machine.id)
-                except Exception as ex:
+                except Exception:
                     LOG.warn(
                         "Error occurred while removing minion machine entry "
                         "'%s' from the DB. This may leave the pool in an "
@@ -768,26 +782,32 @@ class MinionManagerServerEndpoint(object):
         allocation_confirmation_reporting_task_class = None
         if action_type == constants.TRANSFER_ACTION_TYPE_MIGRATION:
             allocation_flow_name_format = (
-                minion_manager_tasks.MINION_POOL_MIGRATION_ALLOCATION_FLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_MIGRATION_ALLOCATION_FLOW_NAME_FORMAT))
             allocation_failure_reporting_task_class = (
-                minion_manager_tasks.ReportMinionAllocationFailureForMigrationTask)
+                minion_mgr_tasks.ReportMinionAllocationFailureForMigrationTask)
             allocation_confirmation_reporting_task_class = (
-                minion_manager_tasks.ConfirmMinionAllocationForMigrationTask)
+                minion_mgr_tasks.ConfirmMinionAllocationForMigrationTask)
             machines_allocation_subflow_name_format = (
-                minion_manager_tasks.MINION_POOL_MIGRATION_ALLOCATION_SUBFLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_MIGRATION_ALLOCATION_SUBFLOW_NAME_FORMAT))
             machine_action_allocation_subflow_name_format = (
-                minion_manager_tasks.MINION_POOL_ALLOCATE_MACHINES_FOR_MIGRATION_SUBFLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_ALLOCATE_MACHINES_FOR_MIGRATION_SUBFLOW_NAME_FORMAT))  # noqa: E501
         elif action_type == constants.TRANSFER_ACTION_TYPE_REPLICA:
             allocation_flow_name_format = (
-                minion_manager_tasks.MINION_POOL_REPLICA_ALLOCATION_FLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_REPLICA_ALLOCATION_FLOW_NAME_FORMAT))
             allocation_failure_reporting_task_class = (
-                minion_manager_tasks.ReportMinionAllocationFailureForReplicaTask)
+                minion_mgr_tasks.ReportMinionAllocationFailureForReplicaTask)
             allocation_confirmation_reporting_task_class = (
-                minion_manager_tasks.ConfirmMinionAllocationForReplicaTask)
+                minion_mgr_tasks.ConfirmMinionAllocationForReplicaTask)
             machines_allocation_subflow_name_format = (
-                minion_manager_tasks.MINION_POOL_REPLICA_ALLOCATION_SUBFLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_REPLICA_ALLOCATION_SUBFLOW_NAME_FORMAT))
             machine_action_allocation_subflow_name_format = (
-                minion_manager_tasks.MINION_POOL_ALLOCATE_MACHINES_FOR_REPLICA_SUBFLOW_NAME_FORMAT)
+                (minion_mgr_tasks.
+                    MINION_POOL_ALLOCATE_MACHINES_FOR_REPLICA_SUBFLOW_NAME_FORMAT))  # noqa: E501
         else:
             raise exception.InvalidInput(
                 "Unknown transfer action type '%s'" % action_type)
@@ -807,7 +827,6 @@ class MinionManagerServerEndpoint(object):
         # define subflow for all the pool minions allocations:
         machines_subflow = unordered_flow.Flow(
             machines_allocation_subflow_name_format % action['id'])
-        new_pools_machines_db_entries = {}
         pools_used = []
 
         def _check_pool_allocation_status(
@@ -816,7 +835,7 @@ class MinionManagerServerEndpoint(object):
                 raise exception.InvalidMinionPoolState(
                     "Minion Pool '%s' is an invalid state ('%s') to be "
                     "used as a %s pool for action '%s'. The pool must be "
-                    "in '%s' status."  % (
+                    "in '%s' status." % (
                         minion_pool.id, minion_pool.status,
                         minion_pool_type.lower(), action['id'],
                         constants.MINION_POOL_STATUS_ALLOCATED))
@@ -838,8 +857,9 @@ class MinionManagerServerEndpoint(object):
                     ctxt, minion_pool, endpoint_dict)
 
                 # add subflow for machine allocations from origin pool:
-                subflow_name = machine_action_allocation_subflow_name_format % (
-                    minion_pool.id, action['id'])
+                subflow_name = (
+                    machine_action_allocation_subflow_name_format % (
+                        minion_pool.id, action['id']))
                 # NOTE: required to avoid internal taskflow conflicts
                 subflow_name = "origin-%s" % subflow_name
                 allocations_subflow_result = (
@@ -875,8 +895,9 @@ class MinionManagerServerEndpoint(object):
                         ctxt, minion_pool, endpoint_dict))
 
                 # add subflow for machine allocations from destination pool:
-                subflow_name = machine_action_allocation_subflow_name_format % (
-                    minion_pool.id, action['id'])
+                subflow_name = (
+                    machine_action_allocation_subflow_name_format % (
+                        minion_pool.id, action['id']))
                 # NOTE: required to avoid internal taskflow conflicts
                 subflow_name = "destination-%s" % subflow_name
                 allocations_subflow_result = (
@@ -920,7 +941,8 @@ class MinionManagerServerEndpoint(object):
                         "Reusing destination minion pool with ID '%s' for the "
                         "following instances which had it selected as an "
                         "OSMorphing pool for action '%s': %s",
-                        osmorphing_pool_id, action['id'], action_instance_ids)
+                        osmorphing_pool_id, action['id'],
+                        action_instance_ids)
                     for instance in action_instance_ids:
                         instance_machine_allocations[
                             instance]['osmorphing_minion_id'] = (
@@ -940,24 +962,28 @@ class MinionManagerServerEndpoint(object):
                         minion_pool, "OSMorphing")
                     endpoint_dict = self._rpc_conductor_client.get_endpoint(
                         ctxt, minion_pool.endpoint_id)
-                    osmorphing_pool_store = self._get_pool_initial_taskflow_store_base(
-                        ctxt, minion_pool, endpoint_dict)
+                    osmorphing_pool_store = (
+                        self._get_pool_initial_taskflow_store_base(
+                            ctxt, minion_pool, endpoint_dict))
 
                     # add subflow for machine allocations from osmorphing pool:
-                    subflow_name = machine_action_allocation_subflow_name_format % (
-                        minion_pool.id, action['id'])
+                    subflow_name = (
+                        machine_action_allocation_subflow_name_format % (
+                            minion_pool.id, action['id']))
                     # NOTE: required to avoid internal taskflow conflicts
                     subflow_name = "osmorphing-%s" % subflow_name
                     allocations_subflow_result = (
-                        self._make_minion_machine_allocation_subflow_for_action(
+                        self._make_minion_machine_allocation_subflow_for_action(  # noqa: E501
                             ctxt, minion_pool, action['id'],
                             action_instance_ids,
-                            subflow_name, inject_for_tasks=osmorphing_pool_store))
+                            subflow_name,
+                            inject_for_tasks=osmorphing_pool_store))
                     machines_subflow.add(allocations_subflow_result['flow'])
 
                     # register each instances' osmorphing minion:
-                    osmorphing_machine_allocations = allocations_subflow_result[
-                        'action_instance_minion_allocation_mappings']
+                    osmorphing_machine_allocations = (
+                        allocations_subflow_result[
+                            'action_instance_minion_allocation_mappings'])
                     for (action_instance_id, allocated_minion_id) in (
                             osmorphing_machine_allocations.items()):
                         instance_machine_allocations[
@@ -1031,7 +1057,8 @@ class MinionManagerServerEndpoint(object):
             if machine.pool_id in exclude_pools:
                 LOG.debug(
                     "Skipping deletion of machine '%s' (status '%s') from "
-                    "whitelisted pool '%s'", machine.id, machine.allocation_status,
+                    "whitelisted pool '%s'",
+                    machine.id, machine.allocation_status,
                     machine.pool_id)
                 continue
 
@@ -1041,12 +1068,14 @@ class MinionManagerServerEndpoint(object):
                 pool_machine_mappings[machine.pool_id].append(machine)
 
         for (pool_id, machines) in pool_machine_mappings.items():
-            with minion_manager_utils.get_minion_pool_lock(
-                   pool_id, external=True):
+            with (minion_manager_utils.
+                  get_minion_pool_lock(pool_id, external=True)):
                 for machine in machines:
                     LOG.debug(
-                        "Deleting machine with ID '%s' (pool '%s', status '%s') "
-                        "from the DB.", machine.id, pool_id, machine.allocation_status)
+                        "Deleting machine with ID '%s' "
+                        "(pool '%s', status '%s') "
+                        "from the DB.", machine.id,
+                        pool_id, machine.allocation_status)
                     db_api.delete_minion_machine(ctxt, machine.id)
 
     def deallocate_minion_machine(self, ctxt, minion_machine_id):
@@ -1063,8 +1092,8 @@ class MinionManagerServerEndpoint(object):
         machine_allocated_status = constants.MINION_MACHINE_STATUS_IN_USE
         with minion_manager_utils.get_minion_pool_lock(
                 minion_machine.pool_id, external=True):
-            if minion_machine.allocation_status != machine_allocated_status or (
-                    not minion_machine.allocated_action):
+            if (minion_machine.allocation_status != machine_allocated_status
+                    or not minion_machine.allocated_action):
                 LOG.warn(
                     "Minion machine '%s' was either in an improper status (%s)"
                     ", or did not have an associated action ('%s') for "
@@ -1078,7 +1107,7 @@ class MinionManagerServerEndpoint(object):
                 minion_machine.allocation_status)
             db_api.update_minion_machine(
                 ctxt, minion_machine.id, {
-                    "allocation_status": constants.MINION_MACHINE_STATUS_AVAILABLE,
+                    "allocation_status": constants.MINION_MACHINE_STATUS_AVAILABLE,  # noqa: E501
                     "allocated_action": None})
             LOG.debug(
                 "Successfully deallocated minion machine with '%s'.",
@@ -1118,19 +1147,20 @@ class MinionManagerServerEndpoint(object):
                         LOG.warn(
                             "Found minion machine '%s' in pool '%s' which "
                             "is in '%s' status. Removing from the DB "
-                            "entirely." % (
-                                machine.id, pool_id, machine.allocation_status))
+                            "entirely." %
+                            (machine.id, pool_id, machine.allocation_status))
                         db_api.delete_minion_machine(
                             ctxt, machine.id)
                         LOG.info(
                             "Successfully deleted minion machine entry '%s' "
-                            "from pool '%s' from the DB.", machine.id, pool_id)
+                            "from pool '%s' from the DB.", machine.id,
+                            pool_id)
                         continue
                     LOG.debug(
                         "Going to mark minion machine '%s' (current status "
                         "'%s') of pool '%s' as available following machine "
-                        "deallocation request for action '%s'.",
-                        machine.id, machine.allocation_status, pool_id, action_id)
+                        "deallocation request for action '%s'.", machine.id,
+                        machine.allocation_status, pool_id, action_id)
                     machine_ids_to_deallocate.append(machine.id)
 
                 LOG.info(
@@ -1148,18 +1178,19 @@ class MinionManagerServerEndpoint(object):
 
     def _get_healtchcheck_flow_for_minion_machine(
             self, minion_pool, minion_machine, allocate_to_action=None,
-            machine_status_on_success=constants.MINION_MACHINE_STATUS_AVAILABLE,
+            machine_status_on_success=constants.MINION_MACHINE_STATUS_AVAILABLE,  # noqa: E501
             power_on_machine=True, inject_for_tasks=None):
         """ Returns a taskflow graph flow with a healtcheck task
         and redeployment subflow on error. """
         # define healthcheck subflow for each machine:
         machine_healthcheck_subflow = graph_flow.Flow(
-            minion_manager_tasks.MINION_POOL_HEALTHCHECK_MACHINE_SUBFLOW_NAME_FORMAT % (
+            (minion_mgr_tasks.
+                MINION_POOL_HEALTHCHECK_MACHINE_SUBFLOW_NAME_FORMAT) % (
                 minion_pool.id, minion_machine.id))
 
         # add healtcheck task to healthcheck subflow:
         machine_healthcheck_task = (
-            minion_manager_tasks.HealthcheckMinionMachineTask(
+            minion_mgr_tasks.HealthcheckMinionMachineTask(
                 minion_pool.id, minion_machine.id, minion_pool.platform,
                 machine_status_on_success=machine_status_on_success,
                 inject=inject_for_tasks,
@@ -1172,7 +1203,7 @@ class MinionManagerServerEndpoint(object):
         if power_on_machine:
             if minion_machine.power_status == (
                     constants.MINION_MACHINE_POWER_STATUS_POWERED_OFF):
-                power_on_task = minion_manager_tasks.PowerOnMinionMachineTask(
+                power_on_task = minion_mgr_tasks.PowerOnMinionMachineTask(
                     minion_pool.id, minion_machine.id, minion_pool.platform,
                     inject=inject_for_tasks,
                     # we prevent a raise here as the healthcheck subflow
@@ -1202,14 +1233,15 @@ class MinionManagerServerEndpoint(object):
 
         # define reallocation subflow:
         machine_reallocation_subflow = linear_flow.Flow(
-            minion_manager_tasks.MINION_POOL_REALLOCATE_MACHINE_SUBFLOW_NAME_FORMAT % (
+            (minion_mgr_tasks.
+                MINION_POOL_REALLOCATE_MACHINE_SUBFLOW_NAME_FORMAT) % (
                 minion_pool.id, minion_machine.id))
         machine_reallocation_subflow.add(
-            minion_manager_tasks.DeallocateMinionMachineTask(
+            minion_mgr_tasks.DeallocateMinionMachineTask(
                 minion_pool.id, minion_machine.id, minion_pool.platform,
                 inject=inject_for_tasks))
         machine_reallocation_subflow.add(
-            minion_manager_tasks.AllocateMinionMachineTask(
+            minion_mgr_tasks.AllocateMinionMachineTask(
                 minion_pool.id, minion_machine.id, minion_pool.platform,
                 allocate_to_action=allocate_to_action,
                 inject=inject_for_tasks))
@@ -1225,7 +1257,7 @@ class MinionManagerServerEndpoint(object):
             machine_healthcheck_task, machine_reallocation_subflow,
             # NOTE: this is required to prevent any parent flows from skipping:
             decider_depth=taskflow_deciders.Depth.FLOW,
-            decider=minion_manager_tasks.MinionMachineHealtchcheckDecider(
+            decider=minion_mgr_tasks.MinionMachineHealtchcheckDecider(
                 minion_pool.id, minion_machine.id,
                 on_successful_healthcheck=False))
 
@@ -1252,7 +1284,7 @@ class MinionManagerServerEndpoint(object):
         max_minions_to_deallocate = (
             len([
                 mid for mid in machine_statuses
-                if machine_statuses[mid] not in ignorable_machine_statuses]) - (
+                if machine_statuses[mid] not in ignorable_machine_statuses]) - (  # noqa: E501
                     minion_pool.minimum_minions))
         LOG.debug(
             "Determined minion pool '%s' machine deallocation number to be %d "
@@ -1262,7 +1294,7 @@ class MinionManagerServerEndpoint(object):
 
         # define refresh flow and process all relevant machines:
         pool_refresh_flow = unordered_flow.Flow(
-            minion_manager_tasks.MINION_POOL_REFRESH_FLOW_NAME_FORMAT % (
+            minion_mgr_tasks.MINION_POOL_REFRESH_FLOW_NAME_FORMAT % (
                 minion_pool.id))
         now = timeutils.utcnow()
         machines_to_deallocate = []
@@ -1295,10 +1327,10 @@ class MinionManagerServerEndpoint(object):
             # deallocate the machine if it is expired:
             if max_minions_to_deallocate > 0 and minion_expired:
                 if minion_pool.minion_retention_strategy == (
-                        constants.MINION_POOL_MACHINE_RETENTION_STRATEGY_POWEROFF):
+                        constants.MINION_POOL_MACHINE_RETENTION_STRATEGY_POWEROFF):  # noqa: E501
                     if machine.power_status in (
                             constants.MINION_MACHINE_POWER_STATUS_POWERED_OFF,
-                            constants.MINION_MACHINE_POWER_STATUS_POWERING_OFF):
+                            constants.MINION_MACHINE_POWER_STATUS_POWERING_OFF):  # noqa: E501
                         LOG.debug(
                             "Skipping powering off minion machine '%s' of pool"
                             " '%s' as it is already in powered off state.",
@@ -1313,17 +1345,17 @@ class MinionManagerServerEndpoint(object):
                         "deallocation count %d excluding the current machine)",
                         machine.id, minion_pool.id, max_minions_to_deallocate)
                     pool_refresh_flow.add(
-                        minion_manager_tasks.PowerOffMinionMachineTask(
+                        minion_mgr_tasks.PowerOffMinionMachineTask(
                             minion_pool.id, machine.id, minion_pool.platform,
                             fail_on_error=False,
                             status_once_powered_off=(
                                 constants.MINION_MACHINE_STATUS_AVAILABLE)))
                 elif minion_pool.minion_retention_strategy == (
-                        constants.MINION_POOL_MACHINE_RETENTION_STRATEGY_DELETE):
+                        constants.MINION_POOL_MACHINE_RETENTION_STRATEGY_DELETE):  # noqa: E501
                     pool_refresh_flow.add(
-                        minion_manager_tasks.DeallocateMinionMachineTask(
-                                minion_pool.id, machine.id,
-                                minion_pool.platform))
+                        minion_mgr_tasks.DeallocateMinionMachineTask(
+                            minion_pool.id, machine.id,
+                            minion_pool.platform))
                 else:
                     raise exception.InvalidMinionPoolState(
                         "Unknown minion pool retention strategy '%s' for pool "
@@ -1347,7 +1379,7 @@ class MinionManagerServerEndpoint(object):
 
         # update DB entried for all machines and emit relevant events:
         if skipped_machines:
-            base_msg =  (
+            base_msg = (
                 "The following minion machines were skipped during the "
                 "refreshing of the minion pool as they were in other "
                 "statuses than the serviceable ones: %s")
@@ -1420,8 +1452,8 @@ class MinionManagerServerEndpoint(object):
             ctxt, minion_pool, requery=False)
         if not refresh_flow:
             msg = (
-                "There are no minion machine refresh operations to be performed "
-                "at this time")
+                "There are no minion machine refresh operations to be "
+                "performed at this time")
             db_api.add_minion_pool_event(
                 ctxt, minion_pool.id, constants.TASK_EVENT_INFO, msg)
             return self._get_minion_pool(ctxt, minion_pool.id)
@@ -1443,16 +1475,16 @@ class MinionManagerServerEndpoint(object):
         """
         # create task flow:
         allocation_flow = linear_flow.Flow(
-            minion_manager_tasks.MINION_POOL_ALLOCATION_FLOW_NAME_FORMAT % (
+            minion_mgr_tasks.MINION_POOL_ALLOCATION_FLOW_NAME_FORMAT % (
                 minion_pool.id))
 
         # tansition pool to VALIDATING:
-        allocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+        allocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
             minion_pool.id, constants.MINION_POOL_STATUS_VALIDATING_INPUTS,
             status_to_revert_to=constants.MINION_POOL_STATUS_ERROR))
 
         # add pool options validation task:
-        allocation_flow.add(minion_manager_tasks.ValidateMinionPoolOptionsTask(
+        allocation_flow.add(minion_mgr_tasks.ValidateMinionPoolOptionsTask(
             # NOTE: we pass in the ID of the minion pool itself as both
             # the task ID and the instance ID for tasks which are strictly
             # pool-related.
@@ -1461,13 +1493,13 @@ class MinionManagerServerEndpoint(object):
             minion_pool.platform))
 
         # transition pool to 'DEPLOYING_SHARED_RESOURCES':
-        allocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+        allocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
             minion_pool.id,
             constants.MINION_POOL_STATUS_ALLOCATING_SHARED_RESOURCES))
 
         # add pool shared resources deployment task:
         allocation_flow.add(
-            minion_manager_tasks.AllocateSharedPoolResourcesTask(
+            minion_mgr_tasks.AllocateSharedPoolResourcesTask(
                 minion_pool.id, minion_pool.id, minion_pool.platform,
                 # NOTE: the shared resource deployment task will always get
                 # run by itself so it is safe to have it override task_info:
@@ -1475,18 +1507,18 @@ class MinionManagerServerEndpoint(object):
 
         # add subflow for deploying all of the minion machines:
         fmt = (
-            minion_manager_tasks.MINION_POOL_ALLOCATE_MINIONS_SUBFLOW_NAME_FORMAT)
+            minion_mgr_tasks.MINION_POOL_ALLOCATE_MINIONS_SUBFLOW_NAME_FORMAT)
         machines_flow = unordered_flow.Flow(fmt % minion_pool.id)
         pool_machine_ids = []
         for _ in range(minion_pool.minimum_minions):
             machine_id = str(uuid.uuid4())
             pool_machine_ids.append(machine_id)
             machines_flow.add(
-                minion_manager_tasks.AllocateMinionMachineTask(
+                minion_mgr_tasks.AllocateMinionMachineTask(
                     minion_pool.id, machine_id, minion_pool.platform))
         # NOTE: bool(flow) == False if the flow has no child flows/tasks:
         if machines_flow:
-            allocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+            allocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
                 minion_pool.id,
                 constants.MINION_POOL_STATUS_ALLOCATING_MACHINES))
             LOG.debug(
@@ -1499,7 +1531,7 @@ class MinionManagerServerEndpoint(object):
                 "pool with ID '%s'", minion_pool.id)
 
         # transition pool to ALLOCATED:
-        allocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+        allocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
             minion_pool.id, constants.MINION_POOL_STATUS_ALLOCATED))
 
         return allocation_flow
@@ -1607,19 +1639,20 @@ class MinionManagerServerEndpoint(object):
             mch for mch in minion_pool.minion_machines
             if mch.allocation_status not in unused_machine_states}
         if used_machines and raise_if_in_use:
+            mch_id = {mch.id: mch.allocation_status for mch in used_machines}
             raise exception.InvalidMinionPoolState(
                 "Minion pool '%s' has one or more machines which are in an"
                 " active state: %s" % (
-                    minion_pool.id, {
-                        mch.id: mch.allocation_status for mch in used_machines}))
+                    minion_pool.id,
+                    mch_id))
         return used_machines
 
     @minion_manager_utils.minion_pool_synchronized_op
     def allocate_minion_pool(self, ctxt, minion_pool_id):
         LOG.info("Attempting to allocate Minion Pool '%s'.", minion_pool_id)
         minion_pool = self._get_minion_pool(
-            ctxt, minion_pool_id, include_events=False, include_machines=False,
-            include_progress_updates=False)
+            ctxt, minion_pool_id, include_events=False,
+            include_machines=False, include_progress_updates=False)
         endpoint_dict = self._rpc_conductor_client.get_endpoint(
             ctxt, minion_pool.endpoint_id)
         acceptable_allocation_statuses = [
@@ -1629,9 +1662,9 @@ class MinionManagerServerEndpoint(object):
             raise exception.InvalidMinionPoolState(
                 "Minion machines for pool '%s' cannot be allocated as the pool"
                 " is in '%s' state instead of the expected %s. Please "
-                "force-deallocate the pool and try again." % (
-                    minion_pool_id, minion_pool.status,
-                    acceptable_allocation_statuses))
+                "force-deallocate the pool and try again." %
+                (minion_pool_id, minion_pool.status,
+                 acceptable_allocation_statuses))
 
         allocation_flow = self._get_minion_pool_allocation_flow(minion_pool)
         initial_store = self._get_pool_initial_taskflow_store_base(
@@ -1669,43 +1702,45 @@ class MinionManagerServerEndpoint(object):
         """
         # create task flow:
         deallocation_flow = linear_flow.Flow(
-            minion_manager_tasks.MINION_POOL_DEALLOCATION_FLOW_NAME_FORMAT % (
+            minion_mgr_tasks.MINION_POOL_DEALLOCATION_FLOW_NAME_FORMAT % (
                 minion_pool.id))
 
         # add subflow for deallocating all of the minion machines:
         fmt = (
-            minion_manager_tasks.MINION_POOL_DEALLOCATE_MACHINES_SUBFLOW_NAME_FORMAT)
+            (minion_mgr_tasks.
+                MINION_POOL_DEALLOCATE_MACHINES_SUBFLOW_NAME_FORMAT))
         machines_flow = unordered_flow.Flow(fmt % minion_pool.id)
         for machine in minion_pool.minion_machines:
             machines_flow.add(
-                minion_manager_tasks.DeallocateMinionMachineTask(
+                minion_mgr_tasks.DeallocateMinionMachineTask(
                     minion_pool.id, machine.id, minion_pool.platform,
                     raise_on_cleanup_failure=raise_on_error))
         # NOTE: bool(flow) == False if the flow has no child flows/tasks:
         if machines_flow:
             # tansition pool to DEALLOCATING_MACHINES:
-            deallocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+            deallocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
                 minion_pool.id,
                 constants.MINION_POOL_STATUS_DEALLOCATING_MACHINES,
                 status_to_revert_to=constants.MINION_POOL_STATUS_ERROR))
             deallocation_flow.add(machines_flow)
         else:
             LOG.debug(
-                "No machines for pool '%s' require deallocating.", minion_pool.id)
+                "No machines for pool '%s' require deallocating.",
+                minion_pool.id)
 
         # transition pool to DEALLOCATING_SHARED_RESOURCES:
-        deallocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+        deallocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
             minion_pool.id,
             constants.MINION_POOL_STATUS_DEALLOCATING_SHARED_RESOURCES,
             status_to_revert_to=constants.MINION_POOL_STATUS_ERROR))
 
         # add pool shared resources deletion task:
         deallocation_flow.add(
-            minion_manager_tasks.DeallocateSharedPoolResourcesTask(
+            minion_mgr_tasks.DeallocateSharedPoolResourcesTask(
                 minion_pool.id, minion_pool.id, minion_pool.platform))
 
         # transition pool to DEALLOCATED:
-        deallocation_flow.add(minion_manager_tasks.UpdateMinionPoolStatusTask(
+        deallocation_flow.add(minion_mgr_tasks.UpdateMinionPoolStatusTask(
             minion_pool.id, constants.MINION_POOL_STATUS_DEALLOCATED))
 
         return deallocation_flow
@@ -1739,7 +1774,7 @@ class MinionManagerServerEndpoint(object):
             if not force:
                 raise exception.InvalidMinionPoolState(
                     "Minion pool '%s' cannot be deallocated as the pool"
-                    " is in '%s' state instead of one of the expected %s"% (
+                    " is in '%s' state instead of one of the expected %s" % (
                         minion_pool_id, minion_pool.status,
                         acceptable_deallocation_statuses))
             else:

+ 30 - 23
coriolis/minion_manager/rpc/tasks.py

@@ -8,15 +8,16 @@ import copy
 from oslo_log import log as logging
 from oslo_utils import timeutils
 
-from coriolis import constants
-from coriolis import exception
-from coriolis import utils
 from coriolis.conductor.rpc import client as rpc_conductor_client
+from coriolis import constants
 from coriolis.db import api as db_api
 from coriolis.db.sqlalchemy import models
+from coriolis import exception
 from coriolis.minion_manager.rpc import client as rpc_minion_manager_client
 from coriolis.minion_manager.rpc import utils as minion_manager_utils
 from coriolis.taskflow import base as coriolis_taskflow_base
+from coriolis import utils
+
 from taskflow.types import failure
 
 
@@ -184,8 +185,9 @@ class ReportMinionAllocationFailureForMigrationTask(
         _BaseReportMinionAllocationFailureForActionTask):
 
     def _get_task_name(self, action_id):
-        return MINION_POOL_REPORT_MIGRATION_ALLOCATION_FAILURE_TASK_NAME_FORMAT % (
-            action_id)
+        return (
+            MINION_POOL_REPORT_MIGRATION_ALLOCATION_FAILURE_TASK_NAME_FORMAT
+            % (action_id))
 
     def _report_machine_allocation_failure(
             self, context, action_id, failure_str):
@@ -197,8 +199,9 @@ class ReportMinionAllocationFailureForReplicaTask(
         _BaseReportMinionAllocationFailureForActionTask):
 
     def _get_task_name(self, action_id):
-        return MINION_POOL_REPORT_REPLICA_ALLOCATION_FAILURE_TASK_NAME_FORMAT % (
-            action_id)
+        return (
+            MINION_POOL_REPORT_REPLICA_ALLOCATION_FAILURE_TASK_NAME_FORMAT
+            % (action_id))
 
     def _report_machine_allocation_failure(
             self, context, action_id, failure_str):
@@ -401,8 +404,9 @@ class ConfirmMinionAllocationForMigrationTask(
         return "migration"
 
     def _get_task_name(self, action_id):
-        return MINION_POOL_CONFIRM_MIGRATION_MINION_ALLOCATION_TASK_NAME_FORMAT % (
-            action_id)
+        return (
+            MINION_POOL_CONFIRM_MIGRATION_MINION_ALLOCATION_TASK_NAME_FORMAT
+            % (action_id))
 
     def _confirm_machine_allocation_for_action(
             self, context, action_id, machine_allocations):
@@ -417,8 +421,9 @@ class ConfirmMinionAllocationForReplicaTask(
         return "replica"
 
     def _get_task_name(self, action_id):
-        return MINION_POOL_CONFIRM_REPLICA_MINION_ALLOCATION_TASK_NAME_FORMAT % (
-            action_id)
+        return (
+            MINION_POOL_CONFIRM_REPLICA_MINION_ALLOCATION_TASK_NAME_FORMAT
+            % (action_id))
 
     def _confirm_machine_allocation_for_action(
             self, context, action_id, machine_allocations):
@@ -623,7 +628,7 @@ class AllocateSharedPoolResourcesTask(BaseMinionManangerTask):
             resource_deployment_task_type = (
                 constants.TASK_TYPE_SET_UP_DESTINATION_POOL_SHARED_RESOURCES)
             resource_cleanup_task_type = (
-                constants.TASK_TYPE_TEAR_DOWN_DESTINATION_POOL_SHARED_RESOURCES)
+                constants.TASK_TYPE_TEAR_DOWN_DESTINATION_POOL_SHARED_RESOURCES)  # noqa: E501
         super(AllocateSharedPoolResourcesTask, self).__init__(
             minion_pool_id, minion_machine_id, resource_deployment_task_type,
             cleanup_task_runner_type=resource_cleanup_task_type, **kwargs)
@@ -698,7 +703,7 @@ class DeallocateSharedPoolResourcesTask(BaseMinionManangerTask):
             constants.TASK_TYPE_TEAR_DOWN_SOURCE_POOL_SHARED_RESOURCES)
         if minion_pool_type != constants.PROVIDER_PLATFORM_SOURCE:
             resource_deallocation_task = (
-                constants.TASK_TYPE_TEAR_DOWN_DESTINATION_POOL_SHARED_RESOURCES)
+                constants.TASK_TYPE_TEAR_DOWN_DESTINATION_POOL_SHARED_RESOURCES)  # noqa: E501
         super(DeallocateSharedPoolResourcesTask, self).__init__(
             minion_pool_id, minion_machine_id, resource_deallocation_task,
             **kwargs)
@@ -769,7 +774,8 @@ class AllocateMinionMachineTask(BaseMinionManangerTask):
                     "Minion machine entry with ID '%s' already exists within "
                     "the DB and it is in '%s' status instead of the expected "
                     "'%s' status. Existing machine's properties are: %s" % (
-                        self._minion_machine_id, minion_machine.allocation_status,
+                        self._minion_machine_id,
+                        minion_machine.allocation_status,
                         constants.MINION_MACHINE_STATUS_UNINITIALIZED,
                         minion_machine.to_dict()))
             if minion_machine.pool_id != self._minion_pool_id:
@@ -919,10 +925,8 @@ class AllocateMinionMachineTask(BaseMinionManangerTask):
                     "[Task %s] Removing minion machine entry with ID '%s' for "
                     "minion pool '%s' from the DB as part of reversion of its "
                     "allocation task. Machine properties at deletion time "
-                    "were: %s",
-                    self._task_name, self._minion_machine_id,
-                    self._minion_pool_id,
-                    machine_db_entry.to_dict())
+                    "were: %s", self._task_name, self._minion_machine_id,
+                    self._minion_pool_id, machine_db_entry.to_dict())
                 if not minion_provider_properties and (
                         machine_db_entry.provider_properties):
                     minion_provider_properties = (
@@ -963,8 +967,9 @@ class AllocateMinionMachineTask(BaseMinionManangerTask):
                     context, origin, destination, cleanup_info, **kwargs)
             except Exception:
                 log_msg = (
-                    "[Task '%s'] Exception occurred while attempting to revert "
-                    "deployment of minion machine with ID '%s' for pool '%s'." % (
+                    "[Task '%s'] Exception occurred while attempting to "
+                    "revert deployment of minion machine with ID '%s' "
+                    "for pool '%s'." % (
                         self._task_name, self._minion_machine_id,
                         self._minion_pool_id))
                 if not self._raise_on_cleanup_failure:
@@ -1064,7 +1069,7 @@ class HealthcheckMinionMachineTask(BaseMinionManangerTask):
     def __init__(
             self, minion_pool_id, minion_machine_id, minion_pool_type,
             fail_on_error=False,
-            machine_status_on_success=constants.MINION_MACHINE_STATUS_AVAILABLE,
+            machine_status_on_success=constants.MINION_MACHINE_STATUS_AVAILABLE,  # noqa: E501
             **kwargs):
         self._fail_on_error = fail_on_error
         self._machine_status_on_success = machine_status_on_success
@@ -1233,7 +1238,8 @@ class PowerOnMinionMachineTask(BaseMinionManangerTask):
         machine = self._get_minion_machine(
             context, self._minion_machine_id, raise_if_not_found=True)
 
-        if machine.power_status == constants.MINION_MACHINE_POWER_STATUS_POWERED_ON:
+        if (machine.power_status ==
+                constants.MINION_MACHINE_POWER_STATUS_POWERED_ON):
             LOG.debug(
                 "[Task '%s'] Minion machine with ID '%s' from pool '%s' is "
                 "already marked as powered on. Returning early." % (
@@ -1241,7 +1247,8 @@ class PowerOnMinionMachineTask(BaseMinionManangerTask):
                     self._minion_pool_id))
             return task_info
 
-        if machine.power_status != constants.MINION_MACHINE_POWER_STATUS_POWERED_OFF:
+        if (machine.power_status !=
+                constants.MINION_MACHINE_POWER_STATUS_POWERED_OFF):
             raise exception.InvalidMinionMachineState(
                 "Minion machine with ID '%s' from pool '%s' is in '%s' state "
                 "instead of the expected '%s' required for it to be powered "

+ 0 - 1
coriolis/minion_pools/api.py

@@ -1,7 +1,6 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from coriolis import utils
 from coriolis.minion_manager.rpc import client as rpc_client
 
 

+ 2 - 1
coriolis/osmorphing/base.py

@@ -21,7 +21,8 @@ LOG = logging.getLogger(__name__)
 # Required OS release fields which are expected from the OSDetect tools.
 # 'schemas.CORIOLIS_DETECTED_OS_MORPHING_INFO_SCHEMA' schema:
 REQUIRED_DETECTED_OS_FIELDS = [
-    "os_type", "distribution_name", "release_version", "friendly_release_name"]
+    "os_type", "distribution_name", "release_version",
+    "friendly_release_name"]
 
 
 class BaseOSMorphingTools(object, with_metaclass(abc.ABCMeta)):

+ 1 - 1
coriolis/osmorphing/centos.py

@@ -2,8 +2,8 @@
 # All Rights Reserved.
 
 
-from coriolis.osmorphing import redhat
 from coriolis.osmorphing.osdetect import centos as centos_detect
+from coriolis.osmorphing import redhat
 
 
 CENTOS_DISTRO_IDENTIFIER = centos_detect.CENTOS_DISTRO_IDENTIFIER

+ 3 - 3
coriolis/osmorphing/debian.py

@@ -2,14 +2,14 @@
 # All Rights Reserved.
 
 import os
-from io import StringIO
-
 import yaml
 
+from io import StringIO
+
 from coriolis import exception
-from coriolis import utils
 from coriolis.osmorphing import base
 from coriolis.osmorphing.osdetect import debian as debian_osdetect
+from coriolis import utils
 
 
 DEBIAN_DISTRO_IDENTIFIER = debian_osdetect.DEBIAN_DISTRO_IDENTIFIER

+ 4 - 4
coriolis/osmorphing/manager.py

@@ -8,10 +8,10 @@ from oslo_log import log as logging
 
 from coriolis import events
 from coriolis import exception
-from coriolis import schemas
 from coriolis.osmorphing import base as base_osmorphing
-from coriolis.osmorphing.osmount import factory as osmount_factory
 from coriolis.osmorphing.osdetect import manager as osdetect_manager
+from coriolis.osmorphing.osmount import factory as osmount_factory
+from coriolis import schemas
 
 
 opts = [
@@ -187,8 +187,8 @@ def morph_image(origin_provider, destination_provider, connection_info,
             export_os_morphing_tools.set_environment(environment)
         else:
             LOG.debug(
-                "No compatible OSMorphing tools class found for export provider "
-                "'%s'", type(origin_provider).__name__)
+                "No compatible OSMorphing tools class found for export "
+                "provider '%s'", type(origin_provider).__name__)
     except exception.OSMorphingToolsNotFound:
         LOG.warn(
             "No tools found for export provider of type: %s",

+ 1 - 1
coriolis/osmorphing/oracle.py

@@ -3,8 +3,8 @@
 
 import uuid
 
-from coriolis.osmorphing import redhat
 from coriolis.osmorphing.osdetect import oracle as oracle_detect
+from coriolis.osmorphing import redhat
 
 
 ORACLE_DISTRO_IDENTIFIER = oracle_detect.ORACLE_DISTRO_IDENTIFIER

+ 2 - 1
coriolis/osmorphing/osdetect/base.py

@@ -13,7 +13,8 @@ from coriolis import utils
 # Required OS release fields to be returned as declared in the
 # 'schemas.CORIOLIS_DETECTED_OS_MORPHING_INFO_SCHEMA' schema:
 REQUIRED_DETECTED_OS_FIELDS = [
-    "os_type", "distribution_name", "release_version", "friendly_release_name"]
+    "os_type", "distribution_name", "release_version",
+    "friendly_release_name"]
 
 
 class BaseOSDetectTools(object, with_metaclass(abc.ABCMeta)):

+ 3 - 2
coriolis/osmorphing/osdetect/centos.py

@@ -3,9 +3,9 @@
 
 import re
 
-from oslo_log import log as logging
 from coriolis import constants
 from coriolis.osmorphing.osdetect import base
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)
@@ -28,7 +28,8 @@ class CentOSOSDetectTools(base.BaseLinuxOSDetectTools):
                     distro, version, _, _ = m.groups()
                     if CENTOS_DISTRO_IDENTIFIER not in distro:
                         LOG.debug(
-                            "Distro does not appear to be a CentOS: %s", distro)
+                            "Distro does not appear to be a CentOS: %s",
+                            distro)
                         return {}
 
                     distribution_name = CENTOS_DISTRO_IDENTIFIER

+ 0 - 1
coriolis/osmorphing/osdetect/coreos.py

@@ -15,7 +15,6 @@ class CoreOSOSDetectTools(base.BaseLinuxOSDetectTools):
         os_release = self._get_os_release()
         osid = os_release.get("ID")
         if osid == "coreos":
-            name = os_release.get("NAME")
             version = os_release.get("VERSION_ID")
             info = {
                 "os_type": constants.OS_TYPE_LINUX,

+ 1 - 1
coriolis/osmorphing/osdetect/rocky.py

@@ -3,9 +3,9 @@
 
 import re
 
-from oslo_log import log as logging
 from coriolis import constants
 from coriolis.osmorphing.osdetect import base
+from oslo_log import log as logging
 
 
 LOG = logging.getLogger(__name__)

+ 2 - 2
coriolis/osmorphing/osdetect/windows.py

@@ -11,8 +11,8 @@ from oslo_log import log as logging
 
 from coriolis import constants
 from coriolis import exception
-from coriolis import utils
 from coriolis.osmorphing.osdetect import base
+from coriolis import utils
 
 
 WINDOWS_SERVER_IDENTIFIER = "Server"
@@ -99,7 +99,7 @@ class WindowsOSDetectTools(base.BaseOSDetectTools):
              edition_id,
              installation_type,
              product_name) = self._get_image_version_info()
-        except exception.CoriolisException as ex:
+        except exception.CoriolisException:
             LOG.debug(
                 "Exception during Windows OS detection: %s",
                 utils.get_exception_details())

+ 8 - 6
coriolis/osmorphing/osmount/base.py

@@ -81,8 +81,8 @@ class BaseSSHOSMountTools(BaseOSMountTools):
         utils.wait_for_port_connectivity(ip, port)
 
         self._event_manager.progress_update(
-            "Connecting through SSH to OSMorphing host on: %(ip)s:%(port)s" % (
-                {"ip": ip, "port": port}))
+            "Connecting through SSH to OSMorphing host on: %(ip)s:%(port)s" %
+            ({"ip": ip, "port": port}))
         ssh = paramiko.SSHClient()
         ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         ssh.connect(hostname=ip, port=port, username=username, pkey=pkey,
@@ -152,7 +152,8 @@ class BaseLinuxOSMountTools(BaseSSHOSMountTools):
                         "VG with name '%s' already detected. Renaming VG with "
                         "UUID '%s' to '%s' to avoid conflicts",
                         vg_name, vg_uuid, new_name)
-                    self._exec_cmd("sudo vgrename %s %s" % (vg_uuid, new_name))
+                    self._exec_cmd("sudo vgrename %s %s" %
+                                   (vg_uuid, new_name))
                     vgs[new_name] = pv_name
             else:
                 LOG.warning("Ignoring improper `vgs` output entry: %s", line)
@@ -279,8 +280,8 @@ class BaseLinuxOSMountTools(BaseSSHOSMountTools):
                         "%s. Only LVM volumes or devices referenced by UUID=* "
                         "or /dev/disk/by-uuid/* notation are supported. "
                         "Devicemapper paths for LVM volumes are also "
-                        "supported. Skipping mounting directory." % (
-                            mountpoint, device))
+                        "supported. Skipping mounting directory." %
+                        (mountpoint, device))
                     continue
             if mountpoint in skip_mounts:
                 LOG.debug(
@@ -614,7 +615,8 @@ class BaseLinuxOSMountTools(BaseSSHOSMountTools):
                 self._exec_cmd('sudo umount %s' % d)
 
         dev_fs = "%s/%s" % (root_dir.rstrip('/'), "dev")
-        self._exec_cmd('mountpoint -q %s && sudo umount %s' % (dev_fs, dev_fs))
+        self._exec_cmd('mountpoint -q %s && sudo umount %s' %
+                       (dev_fs, dev_fs))
         self._exec_cmd(
             'mountpoint -q %s && sudo umount %s' % (root_dir, root_dir))
 

+ 13 - 9
coriolis/osmorphing/osmount/windows.py

@@ -77,9 +77,12 @@ class WindowsMountTools(base.BaseOSMountTools):
         # Disk Group also imports the other ones), so we must take care to
         # re-status before performing any operation on any disk => O(n**2)
         disk_list = self._run_diskpart_script(disk_list_script)
-        servicable_disk_ids = [m.group(1) for m in [
-            re.match(search_disk_entry_re, l) for l in disk_list.split("\r\n")]
-            if m is not None]
+        servicable_disk_ids = [
+            m.group(1)
+            for m
+            in
+            [re.match(search_disk_entry_re, d)
+             for d in disk_list.split("\r\n")] if m is not None]
         LOG.debug(
             "Servicing disks with status '%s' (%s) from disk list: %s",
             status, servicable_disk_ids, disk_list)
@@ -101,8 +104,8 @@ class WindowsMountTools(base.BaseOSMountTools):
                             LOG.warn(
                                 "Exception ocurred while servicing disk '%s' "
                                 "with status '%s'.Skipping running script '%s'"
-                                ". Error message: %s" % (
-                                    disk_id, status, script, ex))
+                                ". Error message: %s" %
+                                (disk_id, status, script, ex))
                         else:
                             raise
                     break
@@ -117,7 +120,7 @@ class WindowsMountTools(base.BaseOSMountTools):
             vol_details = self._run_diskpart_script(vol_detail_script)
             vol_disk_re = r"^\*?\s+Disk ([0-9]+)\s+"
             return [m.group(1) for m in [
-                re.match(vol_disk_re, l) for l in vol_details.split('\r\n')]
+                re.match(vol_disk_re, v) for v in vol_details.split('\r\n')]
                 if m is not None]
 
         volume_list_script = "LIST VOLUME\r\nEXIT"
@@ -215,12 +218,13 @@ class WindowsMountTools(base.BaseOSMountTools):
 
         volume_list = self._run_diskpart_script(volume_list_script)
         unhidden_volume_ids = [m.group(1) for m in [
-            re.match(volume_entry_re, l) for l in volume_list.split("\r\n")]
+            re.match(volume_entry_re, v) for v in volume_list.split("\r\n")]
             if m is not None and "HIDDEN" not in m.group(2).upper()]
         for vol_id in unhidden_volume_ids:
             try:
                 LOG.info(
-                    "Clearing NODEFAULTDRIVELETTER flag on volume %s" % vol_id)
+                    "Clearing NODEFAULTDRIVELETTER flag on volume %s" %
+                    vol_id)
                 script = enable_default_drive_letter_script_fmt % vol_id
                 self._run_diskpart_script(script)
             except Exception as ex:
@@ -239,7 +243,7 @@ class WindowsMountTools(base.BaseOSMountTools):
         self._set_volumes_drive_letter()
         self._refresh_storage()
         fs_roots = utils.retry_on_error(sleep_seconds=5)(self._get_fs_roots)(
-                fail_if_empty=True)
+            fail_if_empty=True)
         system_drive = self._get_system_drive()
 
         for fs_root in [r for r in fs_roots if not r[:-1] == system_drive]:

+ 7 - 6
coriolis/osmorphing/redhat.py

@@ -8,10 +8,10 @@ import uuid
 from oslo_log import log as logging
 
 from coriolis import exception
-from coriolis import utils
 from coriolis.osmorphing import base
 from coriolis.osmorphing.osdetect import centos as centos_detect
 from coriolis.osmorphing.osdetect import redhat as redhat_detect
+from coriolis import utils
 
 
 RED_HAT_DISTRO_IDENTIFIER = redhat_detect.RED_HAT_DISTRO_IDENTIFIER
@@ -54,10 +54,10 @@ class BaseRedHatMorphingTools(base.BaseLinuxOSMorphingTools):
     def __init__(self, conn, os_root_dir, os_root_dev,
                  hypervisor, event_manager, detected_os_info,
                  osmorphing_parameters, operation_timeout=None):
-        super(BaseRedHatMorphingTools, self).__init__(
-            conn, os_root_dir, os_root_dev,
-            hypervisor, event_manager, detected_os_info, osmorphing_parameters,
-            operation_timeout)
+        super(
+            BaseRedHatMorphingTools, self).__init__(
+            conn, os_root_dir, os_root_dev, hypervisor, event_manager,
+            detected_os_info, osmorphing_parameters, operation_timeout)
 
     def disable_predictable_nic_names(self):
         cmd = 'grubby --update-kernel=ALL --args="%s"'
@@ -160,7 +160,8 @@ class BaseRedHatMorphingTools(base.BaseLinuxOSMorphingTools):
         for ifcfgf in all_ifcfg_files:
             if not re.match(regex, ifcfgf):
                 LOG.debug(
-                    "Skipping ifcfg file with unknown filename '%s'." % ifcfgf)
+                    "Skipping ifcfg file with unknown filename '%s'." %
+                    ifcfgf)
                 continue
 
             if interfaces and not any([i in ifcfgf for i in interfaces]):

+ 10 - 7
coriolis/osmorphing/suse.py

@@ -8,9 +8,9 @@ import uuid
 from oslo_log import log as logging
 
 from coriolis import exception
-from coriolis import utils
 from coriolis.osmorphing import base
 from coriolis.osmorphing.osdetect import suse as suse_detect
+from coriolis import utils
 
 
 LOG = logging.getLogger(__name__)
@@ -118,8 +118,8 @@ class BaseSUSEMorphingTools(base.BaseLinuxOSMorphingTools):
             raise exception.CoriolisException(
                 "Failed to activate SLES module: %s. Please check whether the "
                 "SUSE system registration is still valid on the source VM "
-                "and retry. Review logs for more details. Error was: %s" % (
-                    module, str(err))) from err
+                "and retry. Review logs for more details. Error was: %s" %
+                (module, str(err))) from err
 
     def _add_cloud_tools_repo(self):
         repo_suffix = ""
@@ -135,7 +135,8 @@ class BaseSUSEMorphingTools(base.BaseLinuxOSMorphingTools):
     def _get_repos(self):
         repos = {}
         repos_list = self._exec_cmd_chroot(
-            "zypper repos -u | awk -F '|' '/^\s[0-9]+/ {print $2 $7}'").decode()
+            "zypper repos -u | awk -F '|' '/^\s[0-9]+/ {print $2 $7}'"
+        ).decode()
         for repo in repos_list.splitlines():
             alias, uri = repo.strip().split()
             repos[alias] = uri
@@ -146,8 +147,9 @@ class BaseSUSEMorphingTools(base.BaseLinuxOSMorphingTools):
         repos = self._get_repos()
         if repos.get(alias):
             if repos[alias] == uri:
-                LOG.debug('Repo with alias %s already exists and has the same '
-                          'URI. Enabling', alias)
+                LOG.debug(
+                    'Repo with alias %s already exists and has the same '
+                    'URI. Enabling', alias)
                 self._event_manager.progress_update(
                     "Enabling repository: %s" % alias)
                 self._exec_cmd_chroot(
@@ -184,7 +186,8 @@ class BaseSUSEMorphingTools(base.BaseLinuxOSMorphingTools):
     def uninstall_packages(self, package_names):
         try:
             self._exec_cmd_chroot(
-                'zypper --non-interactive remove %s' % " ".join(package_names))
+                'zypper --non-interactive remove %s' %
+                " ".join(package_names))
         except Exception:
             self._event_manager.progress_update(
                 "Error occured while uninstalling packages. Ignoring")

+ 2 - 1
coriolis/osmorphing/ubuntu.py

@@ -101,7 +101,8 @@ class BaseUbuntuMorphingTools(debian.BaseDebianMorphingTools):
                     LOG.debug(
                         "Renamed interface '%s' to '%s' in '%s'",
                         iface_name, new_iface_name, config_path_chroot)
-                new_config = copy.deepcopy(ethernet_configurations[iface_name])
+                new_config = copy.deepcopy(
+                    ethernet_configurations[iface_name])
                 if set_dhcp:
                     new_config["dhcp4"] = True
                     new_config["dhcp6"] = True

+ 9 - 10
coriolis/osmorphing/windows.py

@@ -13,9 +13,9 @@ from oslo_log import log as logging
 
 from coriolis import constants
 from coriolis import exception
-from coriolis import utils
 from coriolis.osmorphing import base
 from coriolis.osmorphing.osdetect import windows as windows_osdetect
+from coriolis import utils
 
 
 LOG = logging.getLogger(__name__)
@@ -148,7 +148,7 @@ $NICS_INFO = ConvertFrom-Json $nics_info_json
 $IPS_INFO = ConvertFrom-Json $ips_info_json
 
 Invoke-Main $NICS_INFO $IPS_INFO
-""" # noqa
+"""  # noqa
 
 
 class BaseWindowsMorphingTools(base.BaseOSMorphingTools):
@@ -221,7 +221,8 @@ class BaseWindowsMorphingTools(base.BaseOSMorphingTools):
             return self._conn.exec_command(
                 dism_path,
                 ["/add-driver", "/image:%s" % self._os_root_dir,
-                 "/driver:\"%s\"" % driver_path, "/recurse", "/forceunsigned"])
+                 "/driver:\"%s\"" % driver_path, "/recurse",
+                 "/forceunsigned"])
         except Exception as ex:
             dism_log_path = "%s\\Windows\\Logs\\DISM\\dism.log" % (
                 self._get_worker_os_drive_path())
@@ -234,7 +235,8 @@ class BaseWindowsMorphingTools(base.BaseOSMorphingTools):
                     driver_path, dism_log_path, dism_log_contents)
             else:
                 LOG.warn(
-                    "Could not find DISM error logs for failure:'%s'", str(ex))
+                    "Could not find DISM error logs for failure:'%s'",
+                    str(ex))
             raise
 
     def _mount_disk_image(self, path):
@@ -314,12 +316,9 @@ class BaseWindowsMorphingTools(base.BaseOSMorphingTools):
             "16 -Type DWord -Force;"
             "New-ItemProperty -Path '%(path)s' -Name 'ErrorControl' -Value "
             "0 -Type DWord -Force" %
-            {"path": registry_path,
-             "image_path": image_path,
-             "display_name": display_name,
-             "description": description,
-             "depends_on": depends_on_ps,
-             "service_account": service_account,
+            {"path": registry_path, "image_path": image_path,
+             "display_name": display_name, "description": description,
+             "depends_on": depends_on_ps, "service_account": service_account,
              "start_mode": start_mode},
             ignore_stdout=True)
 

+ 2 - 2
coriolis/policy.py

@@ -8,7 +8,6 @@ from oslo_log import log as logging
 from oslo_policy import policy
 
 from coriolis import exception
-from coriolis import utils
 from coriolis.policies import base
 from coriolis.policies import diagnostics
 from coriolis.policies import endpoints
@@ -16,10 +15,11 @@ from coriolis.policies import general
 from coriolis.policies import migrations
 from coriolis.policies import minion_pools
 from coriolis.policies import regions
-from coriolis.policies import replicas
 from coriolis.policies import replica_schedules
 from coriolis.policies import replica_tasks_executions
+from coriolis.policies import replicas
 from coriolis.policies import services
+from coriolis import utils
 
 
 LOG = logging.getLogger(__name__)

+ 6 - 5
coriolis/providers/backup_writers.py

@@ -7,11 +7,11 @@ import copy
 import datetime
 import errno
 import os
+import shutil
 import tempfile
 import threading
 import time
 import uuid
-import shutil
 
 import eventlet
 from oslo_config import cfg
@@ -368,8 +368,8 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
 
         if self._exception:
             raise exception.CoriolisException(
-                    "Failed to write data. See log "
-                    "for details.") from self._exception
+                "Failed to write data. See log "
+                "for details.") from self._exception
 
         payload = {
             "offset": self._offset,
@@ -482,7 +482,8 @@ class SSHBackupWriter(BaseBackupWriter):
 
         if not matching_devs:
             base_msg = (
-                "Could not locate disk with ID '%s' in volumes_info" % disk_id)
+                "Could not locate disk with ID '%s' in volumes_info" %
+                disk_id)
             LOG.error("%s: %s", base_msg, self._volumes_info)
             raise exception.CoriolisException(base_msg)
         elif len(matching_devs) > 1:
@@ -923,7 +924,7 @@ class HTTPBackupWriterBootstrapper(object):
                        "srv_key": cert_paths["srv_key"],
                        "srv_cert": cert_paths["srv_crt"],
                        "listen_port": self._writer_port,
-            }
+        }
         self._change_binary_se_context(ssh)
         utils.create_service(
             ssh, cmdline, _CORIOLIS_HTTP_WRITER_CMD, start=True)

+ 0 - 1
coriolis/providers/base.py

@@ -201,7 +201,6 @@ class BaseInstanceProvider(BaseProvider):
         """
         raise exception.OSMorphingToolsNotFound(os_type=os_type)
 
-
     def get_custom_os_detect_tools(self, os_type, osmorphing_info):
         """ Returns a list of custom OSDetect classes which inherit from
         coriolis.osmorphing.osdetect.base.BaseOSDetectTools.

+ 23 - 22
coriolis/providers/provider_utils.py

@@ -80,7 +80,8 @@ def get_storage_mapping_for_disk(
                     disk_info)
         else:
             LOG.debug(
-                "No 'storage_backend_identifier' set for disk '%s'", disk_info)
+                "No 'storage_backend_identifier' set for disk '%s'",
+                disk_info)
 
     # 3) use provided default:
     if not mapped_backend:
@@ -111,24 +112,24 @@ def get_storage_mapping_for_disk(
 
 def check_changed_storage_mappings(volumes_info, old_storage_mappings,
                                    new_storage_mappings):
-        if not volumes_info:
-            return
-
-        old_backend_mappings = old_storage_mappings.get('backend_mappings', [])
-        old_disk_mappings = old_storage_mappings.get('disk_mappings', [])
-        new_backend_mappings = new_storage_mappings.get('backend_mappings', [])
-        new_disk_mappings = new_storage_mappings.get('disk_mappings', [])
-
-        old_backend_mappings_set = [
-            tuple(mapping.values()) for mapping in old_backend_mappings]
-        old_disk_mappings_set = [
-            tuple(mapping.values()) for mapping in old_disk_mappings]
-        new_backend_mappings_set = [
-            tuple(mapping.values()) for mapping in new_backend_mappings]
-        new_disk_mappings_set = [
-            tuple(mapping.values()) for mapping in new_disk_mappings]
-
-        if (old_backend_mappings_set != new_backend_mappings_set or
-                old_disk_mappings_set != new_disk_mappings_set):
-            raise exception.CoriolisException("Modifying storage mappings is "
-                                              "not supported.")
+    if not volumes_info:
+        return
+
+    old_backend_mappings = old_storage_mappings.get('backend_mappings', [])
+    old_disk_mappings = old_storage_mappings.get('disk_mappings', [])
+    new_backend_mappings = new_storage_mappings.get('backend_mappings', [])
+    new_disk_mappings = new_storage_mappings.get('disk_mappings', [])
+
+    old_backend_mappings_set = [
+        tuple(mapping.values()) for mapping in old_backend_mappings]
+    old_disk_mappings_set = [
+        tuple(mapping.values()) for mapping in old_disk_mappings]
+    new_backend_mappings_set = [
+        tuple(mapping.values()) for mapping in new_backend_mappings]
+    new_disk_mappings_set = [
+        tuple(mapping.values()) for mapping in new_disk_mappings]
+
+    if (old_backend_mappings_set != new_backend_mappings_set or
+            old_disk_mappings_set != new_disk_mappings_set):
+        raise exception.CoriolisException("Modifying storage mappings is "
+                                          "not supported.")

+ 11 - 6
coriolis/providers/replicator.py

@@ -11,6 +11,7 @@ from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import units
 from sshtunnel import SSHTunnelForwarder
+
 import paramiko
 import requests
 
@@ -374,10 +375,12 @@ class Replicator(object):
         new_device_paths = None
         for i in range(retry_count):
             new_disks_status = self._cli.get_status()
-            new_device_paths = [dev['device-path'] for dev in new_disks_status]
+            new_device_paths = [dev['device-path']
+                                for dev in new_disks_status]
             LOG.debug(
                 "Polled devices while waiting for disk '%s' to attach "
-                "(try %d/%d): %s", disk_id, i+1, retry_count, new_device_paths)
+                "(try %d/%d): %s", disk_id, i + 1, retry_count,
+                new_device_paths)
 
             # check for missing/multiple new device paths:
             missing_device_paths = (
@@ -389,7 +392,8 @@ class Replicator(object):
                         dev for dev in previous_disks_status
                         if dev['device-path'] in missing_device_paths])
 
-            new_device_paths = set(new_device_paths) - set(previous_device_paths)
+            new_device_paths = set(
+                new_device_paths) - set(previous_device_paths)
             if new_device_paths:
                 break
             else:
@@ -433,8 +437,8 @@ class Replicator(object):
                 perc_step = perc_steps.get(devName)
                 if perc_step is None:
                     perc_step = self._event_manager.add_percentage_step(
-                        "Performing chunking for disk %s (total size %.2f MB)" % (
-                            devName, dev_size), 100)
+                        "Performing chunking for disk %s (total size %.2f MB)"
+                        % (devName, dev_size), 100)
                     perc_steps[devName] = perc_step
                 perc_done = vol["checksum-status"]["percentage"]
                 self._event_manager.set_percentage_step(
@@ -837,7 +841,8 @@ class Replicator(object):
 
         total = 0
         with self._cli._cli.get(diskUri, stream=True,
-                                timeout=CONF.replicator.default_requests_timeout) as dw:
+                                timeout=(CONF.replicator.
+                                         default_requests_timeout)) as dw:
             with open(path, 'wb') as dsk:
                 for chunk in dw.iter_content(chunk_size=self._chunk_size):
                     if chunk:

+ 2 - 1
coriolis/qemu.py

@@ -54,7 +54,8 @@ _libqemu.qemu_init_exec_dir.argtypes = [ctypes.c_char_p]
 _libqemu.qemu_init_exec_dir.restype = None
 qemu_init_exec_dir = _libqemu.qemu_init_exec_dir
 
-_libqemu.qemu_init_main_loop.argtypes = [ctypes.POINTER(ctypes.POINTER(Error))]
+_libqemu.qemu_init_main_loop.argtypes = [
+    ctypes.POINTER(ctypes.POINTER(Error))]
 _libqemu.qemu_init_main_loop.res_type = ctypes.c_int
 qemu_init_main_loop = _libqemu.qemu_init_main_loop
 

+ 0 - 1
coriolis/regions/api.py

@@ -1,7 +1,6 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from coriolis import utils
 from coriolis.conductor.rpc import client as rpc_client
 
 

+ 2 - 2
coriolis/replica_cron/rpc/server.py

@@ -6,11 +6,11 @@ import json
 from oslo_log import log as logging
 from oslo_utils import timeutils
 
+from coriolis.conductor.rpc import client as rpc_client
 from coriolis import context
+from coriolis.cron import cron
 from coriolis import exception
 from coriolis import utils
-from coriolis.conductor.rpc import client as rpc_client
-from coriolis.cron import cron
 
 LOG = logging.getLogger(__name__)
 

+ 5 - 7
coriolis/rpc.py

@@ -1,15 +1,13 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-import contextlib
-
+import coriolis.exception
 import oslo_messaging as messaging
+
 from oslo_config import cfg
 from oslo_log import log as logging
 
-import coriolis.exception
 from coriolis import context
-from coriolis import utils
 
 
 rpc_opts = [
@@ -102,9 +100,9 @@ class BaseRPCClient(object):
 
     def _rpc_client(self):
         return messaging.RPCClient(
-                self._transport, self._target,
-                serializer=self._serializer,
-                timeout=self._timeout)
+            self._transport, self._target,
+            serializer=self._serializer,
+            timeout=self._timeout)
 
     def _call(self, ctxt, method, **kwargs):
         client = self._rpc_client()

+ 2 - 3
coriolis/scheduler/filters/trivial_filters.py

@@ -3,7 +3,6 @@
 
 from oslo_log import log as logging
 
-from coriolis import constants
 from coriolis.scheduler.filters import base
 
 
@@ -98,8 +97,8 @@ class ProviderTypesFilter(base.BaseServiceFilter):
         for platform_type in self._provider_requirements:
             if platform_type not in service.providers:
                 LOG.debug(
-                    "Service with ID '%s' does not have a provider for platform "
-                    "type '%s'", service.id, platform_type)
+                    "Service with ID '%s' does not have a provider for "
+                    "platform type '%s'", service.id, platform_type)
                 return 0
 
             available_types = service.providers[

+ 7 - 7
coriolis/scheduler/rpc/client.py

@@ -1,18 +1,18 @@
 # Copyright 2016 Cloudbase Solutions Srl
 # All Rights Reserved.
 
+import oslo_messaging as messaging
 import random
 import time
 
-import oslo_messaging as messaging
-from oslo_config import cfg
-from oslo_log import log as logging
-
 from coriolis import constants
 from coriolis import exception
 from coriolis import rpc
-from coriolis import utils
 from coriolis.tasks import factory as tasks_factory
+from coriolis import utils
+
+from oslo_config import cfg
+from oslo_log import log as logging
 
 
 VERSION = "1.0"
@@ -163,11 +163,11 @@ class SchedulerClient(rpc.BaseRPCClient):
                     worker_service['id'], task['id'], task['task_type'],
                     origin_endpoint['id'], destination_endpoint['id'])
                 return worker_service
-            except Exception as ex:
+            except Exception:
                 LOG.warn(
                     "Failed to schedule task with ID '%s' (attempt %d/%d). "
                     "Waiting %d seconds and then retrying. Error was: %s",
-                    task['id'], i+1, retry_count, retry_period,
+                    task['id'], i + 1, retry_count, retry_period,
                     utils.get_exception_details())
                 time.sleep(retry_period)
 

+ 5 - 10
coriolis/scheduler/rpc/server.py

@@ -1,20 +1,15 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-import copy
-import functools
-import random
-import uuid
-
 from oslo_config import cfg
 from oslo_log import log as logging
 
+from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis import constants
+from coriolis.db import api as db_api
 from coriolis import exception
-from coriolis import utils
-from coriolis.conductor.rpc import client as rpc_conductor_client
 from coriolis.scheduler.filters import trivial_filters
-from coriolis.db import api as db_api
+from coriolis import utils
 
 
 VERSION = "1.0"
@@ -57,7 +52,6 @@ class SchedulerServerEndpoint(object):
 
         scores = []
 
-
         service_ids = [service.id for service in services]
         LOG.debug(
             "Running following filters on worker services '%s': %s",
@@ -156,7 +150,8 @@ class SchedulerServerEndpoint(object):
                         "None of the selected Regions (%s) are enabled or "
                         "otherwise usable." % region_set)
                 filters.append(
-                    trivial_filters.RegionsFilter(region_set, any_region=True))
+                    trivial_filters.RegionsFilter(
+                        region_set, any_region=True))
         if provider_requirements:
             filters.append(
                 trivial_filters.ProviderTypesFilter(provider_requirements))

+ 1 - 1
coriolis/scheduler/scheduler_utils.py

@@ -10,7 +10,6 @@ from coriolis.db import api as db_api
 from coriolis import exception
 from coriolis.replica_cron.rpc import client as rpc_cron_client
 from coriolis.scheduler.rpc import client as rpc_scheduler_client
-from coriolis import utils
 from coriolis.worker.rpc import client as rpc_worker_client
 
 
@@ -58,6 +57,7 @@ def get_any_worker_service(
         return service
     return db_api.get_service(ctxt, service['id'])
 
+
 def get_worker_rpc_for_host(host, *client_args, **client_kwargs):
     rpc_client_class = RPC_TOPIC_TO_CLIENT_CLASS_MAP[
         constants.WORKER_MAIN_MESSAGING_TOPIC]

+ 6 - 3
coriolis/service.py

@@ -42,6 +42,7 @@ def get_worker_count_from_args(argv):
     --worker-process-count is not present), as well as the unprocessed args.
     """
     parser = argparse.ArgumentParser()
+
     def _check_positive_worker_count(worker_count):
         count = int(worker_count)
         if count <= 0:
@@ -50,9 +51,10 @@ def get_worker_count_from_args(argv):
                 "got: %s" % worker_count)
         return count
     parser.add_argument(
-        '--worker-process-count', metavar='N', type=_check_positive_worker_count,
+        '--worker-process-count', metavar='N',
+        type=_check_positive_worker_count,
         help="Number of worker processes for this service. Defaults to the "
-             "number of logical CPU cores on the system.")
+        "number of logical CPU cores on the system.")
     args, unknown_args = parser.parse_known_args(args=argv)
     return args.worker_process_count, unknown_args
 
@@ -76,7 +78,8 @@ def check_locks_dir_empty():
 
     if not os.path.exists(locks_dir):
         LOG.warn(
-            "Configured 'lock_path' directory '%s' does NOT exist!", locks_dir)
+            "Configured 'lock_path' directory '%s' does NOT exist!",
+            locks_dir)
         return
 
     if not os.path.isdir(locks_dir):

+ 0 - 1
coriolis/services/api.py

@@ -1,7 +1,6 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-from coriolis import utils
 from coriolis.conductor.rpc import client as rpc_client
 
 

+ 21 - 22
coriolis/taskflow/base.py

@@ -8,22 +8,21 @@ from taskflow.types import failure
 
 from coriolis import constants
 from coriolis import exception
-from coriolis import utils
-from coriolis.tasks import factory as tasks_factory
 from coriolis.scheduler.rpc import client as rpc_scheduler_client
+from coriolis.tasks import factory as tasks_factory
+from coriolis import utils
 from coriolis.worker.rpc import client as rpc_worker_client
 
 
 TASK_RETURN_VALUE_FORMAT = "%s-result" % (
-        constants.TASK_LOCK_NAME_FORMAT)
+    constants.TASK_LOCK_NAME_FORMAT)
 LOG = logging.getLogger()
 
 taskflow_opts = [
-    cfg.IntOpt("worker_task_execution_timeout",
-               default=3600,
-               help="Number of seconds until Coriolis tasks which are executed"
-                    "remotely on a Worker Service through taskflow timeout.")
-]
+    cfg.IntOpt(
+        "worker_task_execution_timeout", default=3600,
+        help="Number of seconds until Coriolis tasks which are executed"
+        "remotely on a Worker Service through taskflow timeout.")]
 
 CONF = cfg.CONF
 CONF.register_opts(taskflow_opts, 'taskflow')
@@ -154,8 +153,8 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
             cleanup_task_deps = list(
                 set(
                     cleanup_task_runner.get_required_task_info_properties(
-                        )).difference(
-                            main_task_runner.get_returned_task_info_properties()))
+                    )).difference(
+                    main_task_runner.get_returned_task_info_properties()))
             new_requires.extend(cleanup_task_deps)
 
         kwargs['requires'] = new_requires
@@ -173,8 +172,8 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
             cleanup_task_res = list(
                 set(
                     cleanup_task_runner.get_returned_task_info_properties(
-                        )).difference(
-                            main_task_runner.get_returned_task_info_properties()))
+                    )).difference(
+                    main_task_runner.get_returned_task_info_properties()))
             new_provides.extend(cleanup_task_res)
 
         kwargs['provides'] = new_provides
@@ -191,9 +190,9 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
             ctxt, task_info, origin, destination, retry_count=retry_count,
             retry_period=retry_period, random_choice=True)
         LOG.debug(
-            "[Task '%s'] Was offered the following worker service for executing "
-            "Taskflow worker task '%s': %s",
-                self._task_name, task_id, worker_service['id'])
+            "[Task '%s'] Was offered the following worker service for "
+            "executing Taskflow worker task '%s': %s",
+            self._task_name, task_id, worker_service['id'])
 
         return rpc_worker_client.WorkerClient.from_service_definition(
             worker_service, timeout=rpc_timeout)
@@ -216,7 +215,7 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
                 "successfully run and returned the following info: %s" % (
                     self._task_name, task_id, task_type, res))
             return res
-        except Exception as ex:
+        except Exception:
             LOG.debug(
                 "[Task %s] Exception occurred while executing task '%s' "
                 "(type '%s') on the worker service: %s", self._task_name,
@@ -241,9 +240,9 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
 
         try:
             res = self._execute_task(
-                context, self._task_id, self._cleanup_task_runner_type, origin,
-                destination, task_info)
-        except Exception as ex:
+                context, self._task_id, self._cleanup_task_runner_type,
+                origin, destination, task_info)
+        except Exception:
             LOG.warn(
                 "Task cleanup for '%s' (main task type '%s', cleanup task type"
                 "'%s') has failed with the following trace: %s",
@@ -255,6 +254,6 @@ class BaseRunWorkerTask(BaseCoriolisTaskflowTask):
 
         LOG.debug(
             "Reversion of taskflow task '%s' (ID '%s') was successfully "
-            "executed using task runner '%s' with the following result: %s" % (
-                self._task_name, self._task_id, self._cleanup_task_runner_type,
-                res))
+            "executed using task runner '%s' with the following result: %s" %
+            (self._task_name, self._task_id, self._cleanup_task_runner_type,
+             res))

+ 5 - 4
coriolis/taskflow/runner.py

@@ -4,9 +4,9 @@
 # NOTE: we neeed to make sure eventlet is imported:
 import multiprocessing
 import sys
-from logging import handlers
+import eventlet  # noqa
 
-import eventlet  #noqa
+from logging import handlers
 from oslo_config import cfg
 from oslo_log import log as logging
 from six.moves import queue
@@ -76,10 +76,11 @@ class TaskFlowRunner(object):
         LOG.debug("Running flow with name '%s'", flow.name)
         try:
             engine.run()
-        except Exception as ex:
+        except Exception:
             LOG.warn(
                 "Fatal error occurred while attempting to run flow '%s'. "
-                "Full trace was: %s", flow.name, utils.get_exception_details())
+                "Full trace was: %s", flow.name,
+                utils.get_exception_details())
             raise
         LOG.info(
             "Successfully ran flow with name '%s'. Statistics were: %s",

+ 1 - 1
coriolis/tasks/base.py

@@ -10,8 +10,8 @@ from six import with_metaclass
 
 from coriolis import constants
 from coriolis import exception
-from coriolis import utils
 from coriolis.providers import factory as providers_factory
+from coriolis import utils
 
 serialization_opts = [
     cfg.StrOpt('temp_keypair_password',

+ 3 - 2
coriolis/tasks/minion_pool_tasks.py

@@ -530,7 +530,8 @@ class AttachVolumesToDestinationMinionTask(
         return TARGET_MINION_TASK_INFO_FIELD_MAPPINGS
 
 
-class DetachVolumesFromDestinationMinionTask(AttachVolumesToDestinationMinionTask):
+class DetachVolumesFromDestinationMinionTask(
+    AttachVolumesToDestinationMinionTask):
 
     @classmethod
     def _get_provider_disk_operation(cls, provider):
@@ -764,7 +765,7 @@ class ValidateOSMorphingMinionCompatibilityTask(
 
     @classmethod
     def _get_minion_task_info_field_mappings(cls):
-        return  OSMOPRHING_MINION_TASK_INFO_FIELD_MAPPINGS
+        return OSMOPRHING_MINION_TASK_INFO_FIELD_MAPPINGS
 
 
 class _BaseReleaseMinionTask(base.TaskRunner):

+ 3 - 2
coriolis/tasks/osmorphing_tasks.py

@@ -5,9 +5,9 @@ from oslo_log import log as logging
 
 from coriolis import constants
 from coriolis import exception
-from coriolis import schemas
 from coriolis.osmorphing import manager as osmorphing_manager
 from coriolis.providers import factory as providers_factory
+from coriolis import schemas
 from coriolis.tasks import base
 
 
@@ -108,7 +108,8 @@ class DeployOSMorphingResourcesTask(base.TaskRunner):
         instance_deployment_info = task_info["instance_deployment_info"]
 
         import_info = provider.deploy_os_morphing_resources(
-            ctxt, connection_info, target_environment, instance_deployment_info)
+            ctxt, connection_info, target_environment,
+            instance_deployment_info)
 
         schemas.validate_value(
             import_info, schemas.CORIOLIS_OS_MORPHING_RESOURCES_SCHEMA,

+ 1 - 1
coriolis/tasks/replica_tasks.py

@@ -6,8 +6,8 @@ from oslo_log import log as logging
 from coriolis import constants
 from coriolis import events
 from coriolis import exception
-from coriolis.providers import factory as providers_factory
 from coriolis.providers import backup_writers
+from coriolis.providers import factory as providers_factory
 from coriolis import schemas
 from coriolis.tasks import base
 from coriolis import utils

+ 71 - 64
coriolis/tests/conductor/rpc/test_server.py

@@ -7,17 +7,25 @@ import uuid
 
 from unittest import mock
 
-from coriolis import constants, exception, schemas, utils
 from coriolis.conductor.rpc import server
+from coriolis import constants
 from coriolis.db import api as db_api
 from coriolis.db.sqlalchemy import models
+from coriolis import exception
 from coriolis.licensing import client as licensing_client
-from coriolis.tests import test_base, testutils
+from coriolis import schemas
+from coriolis.tests import test_base
+from coriolis.tests import testutils
+from coriolis import utils
 from coriolis.worker.rpc import client as rpc_worker_client
 from oslo_concurrency import lockutils
 from oslo_config import cfg
 
 
+class CoriolisTestException(Exception):
+    pass
+
+
 @ddt.ddt
 class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     """Test suite for the Coriolis Conductor RPC server."""
@@ -31,9 +39,10 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     )
     @mock.patch.object(server.ConductorServerEndpoint, "_scheduler_client")
     def test_get_all_diagnostics(self, mock_scheduler_client, _):
-        mock_scheduler_client.get_workers_for_specs.side_effect = Exception()
+        mock_scheduler_client.get_workers_for_specs.side_effect = (
+            CoriolisTestException())
         self.assertRaises(
-            Exception,
+            CoriolisTestException,
             lambda: self.server.get_all_diagnostics(mock.sentinel.context),
         )
         mock_scheduler_client.get_workers_for_specs.side_effect = None
@@ -139,9 +148,9 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         mock_delete_endpoint.assert_not_called()
 
         # mapped_regions exist and there's an error updating the endpoint
-        mock_update_endpoint.side_effect = Exception()
+        mock_update_endpoint.side_effect = CoriolisTestException()
         self.assertRaises(
-            Exception,
+            CoriolisTestException,
             lambda: self.server.create_endpoint(
                 mock.sentinel.context,
                 mock.sentinel.name,
@@ -368,7 +377,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
                 mock_get_endpoint.return_value.connection_info,
                 mock.sentinel.environment,
                 mock.sentinel.option_names,
-                )
+            )
 
         self.assertEqual(
             options,
@@ -521,7 +530,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
                 mock.sentinel.context,
                 mock_get_endpoint.return_value.type,
                 mock.sentinel.target_env,
-                )
+            )
 
     @mock.patch.object(
         server.ConductorServerEndpoint, "_get_worker_service_rpc_for_specs"
@@ -556,7 +565,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
                 mock.sentinel.context,
                 mock_get_endpoint.return_value.type,
                 mock.sentinel.source_env,
-                )
+            )
 
     @mock.patch.object(
         rpc_worker_client.WorkerClient, "from_service_definition"
@@ -567,12 +576,12 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     ):
         providers = self.server.get_available_providers(mock.sentinel.context)
         mock_service_definition.assert_called_once_with(
-            mock_scheduler_client.get_any_worker_service(mock.sentinel.context)
-        )
+            mock_scheduler_client.get_any_worker_service(
+                mock.sentinel.context))
         mock_service_definition.return_value\
             .get_available_providers.assert_called_once_with(
                 mock.sentinel.context
-                )
+            )
         self.assertEqual(
             providers,
             mock_service_definition
@@ -592,14 +601,14 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock.sentinel.provider_type,
         )
         mock_service_definition.assert_called_once_with(
-            mock_scheduler_client.get_any_worker_service(mock.sentinel.context)
-        )
+            mock_scheduler_client.get_any_worker_service(
+                mock.sentinel.context))
         mock_service_definition.return_value\
             .get_provider_schemas.assert_called_once_with(
                 mock.sentinel.context,
                 mock.sentinel.platform_name,
                 mock.sentinel.provider_type,
-                )
+            )
         self.assertEqual(
             provider_schemas,
             mock_service_definition.return_value
@@ -610,7 +619,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     @mock.patch.object(uuid, "uuid4", return_value="task_id")
     def test_create_task(
             self, mock_uuid4, mock_task_model
-    ):  # pylint: disable=unused-argument
+    ):
         task1 = mock.sentinel.task1
         task1.id = mock.sentinel.task1_id
         task2 = mock.sentinel.task2
@@ -694,7 +703,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
                 retry_count=5,
                 retry_period=2,
                 random_choice=True,
-                )
+            )
         mock_service_definition.assert_called_once_with(
             mock_scheduler_client.get_worker_service_for_task.return_value
         )
@@ -703,10 +712,10 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
 
         # Handles exception
         mock_scheduler_client.get_worker_service_for_task.side_effect = (
-            Exception("test")
+            CoriolisTestException("test")
         )
         self.assertRaises(
-            Exception,
+            CoriolisTestException,
             self.server._get_worker_service_rpc_for_task,
             mock.sentinel.context,
             task_mock,
@@ -865,7 +874,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_check_replica_running_executions,
             mock_check_minion_pools_for_action,
             mock_tasks_execution,
-            mock_uuid4,  # pylint: disable=unused-argument
+            mock_uuid4,
             mock_create_task,
             mock_check_execution_tasks_sanity,
             mock_update_transfer_action_info_for_instance,
@@ -1030,7 +1039,8 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         self.assertEqual(
             mock_tasks_execution.return_value.type,
             constants.EXECUTION_TYPE_REPLICA_EXECUTION)
-        self.assertEqual(result, mock_get_replica_tasks_execution.return_value)
+        self.assertEqual(
+            result, mock_get_replica_tasks_execution.return_value)
 
     @mock.patch.object(
         server.ConductorServerEndpoint,
@@ -1101,7 +1111,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_get_replica,
             mock_check_replica_running_executions,
             mock_tasks_execution,
-            mock_uuid4,  # pylint: disable=unused-argument
+            mock_uuid4,
             mock_create_task,
             mock_deepcopy,
             mock_check_execution_tasks_sanity,
@@ -1215,7 +1225,8 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_tasks_execution.return_value.id
         )
 
-        self.assertEqual(result, mock_get_replica_tasks_execution.return_value)
+        self.assertEqual(
+            result, mock_get_replica_tasks_execution.return_value)
 
         # raises exception if instances have no volumes info
         instances[0].get.return_value = None
@@ -1356,8 +1367,8 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_get_instance_scripts,
             mock_tasks_execution,
             mock_check_minion_pools_for_action,
-            mock_deepcopy,  # pylint: disable=unused-argument
-            mock_uuid4,  # pylint: disable=unused-argument
+            mock_deepcopy,
+            mock_uuid4,
             mock_migration,
             mock_get_provider_types,
             mock_get_endpoint,
@@ -1634,7 +1645,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_check_minion_pools_for_action,
             mock_check_create_reservation_for_transfer,
             mock_tasks_execution,
-            mock_uuid4,  # pylint: disable=unused-argument
+            mock_uuid4,
             mock_migration,
             mock_get_provider_types,
             mock_check_endpoints,
@@ -1966,11 +1977,11 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     @ddt.unpack
     def test_cancel_tasks_execution(
             self,
-            mock_worker_client,  # pylint: disable=unused-argument
+            mock_worker_client,
             mock_set_task_status,
-            mock_advance_execution_state,  # pylint: disable=unused-argument
-            mock_set_tasks_execution_status,  # pylint: disable=unused-argument
-            mock_get_tasks_execution,  # pylint: disable=unused-argument
+            mock_advance_execution_state,
+            mock_set_tasks_execution_status,
+            mock_get_tasks_execution,
             config,
             expected_status,
     ):
@@ -2102,8 +2113,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         # task status is not in accepted state
         with self.assertRaisesRegex(
                 exception.InvalidTaskState,
-                "expected statuses",
-            ):
+                "expected statuses"):
             call_set_task_host()
 
         mock_get_task.assert_called_once_with(
@@ -2131,8 +2141,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         )
         with self.assertRaisesRegex(
                 exception.InvalidTaskState,
-                "has no host",
-            ):
+                "has no host"):
             call_set_task_host()
 
     @mock.patch.object(
@@ -2341,7 +2350,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_get_task_destination,
             mock_get_action,
             mock_get_endpoint,
-            mock_set_task_status,  # pylint: disable=unused-argument
+            mock_set_task_status,
             mock_get_worker_service_rpc_for_task,
             mock_cancel_tasks_execution,
             mock_get_execution_status,
@@ -2466,8 +2475,8 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         task_info = {
             mock.sentinel.instance: {
                 'test': 'info',
-                },
-            }
+            },
+        }
         mock_get_action.return_value = mock.Mock(
             info=task_info
         )
@@ -2491,9 +2500,10 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         self.assertEqual(started_tasks, [task.id])
 
         # handles worker service rpc error
-        mock_get_worker_service_rpc_for_task.side_effect = Exception()
+        mock_get_worker_service_rpc_for_task.side_effect = (
+            CoriolisTestException())
         self.assertRaises(
-            Exception,
+            CoriolisTestException,
             call_advance_execution_state,
         )
         mock_cancel_tasks_execution.assert_called_once_with(
@@ -2538,19 +2548,18 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     @ddt.unpack
     def test_advance_execution_state_scheduled_tasks(
             self,
-            mock_get_tasks_execution,  # pylint: disable=unused-argument
-            mock_check_clean_execution_deadlock,  # pylint: disable=unused-argument
-            mock_get_task_origin,  # pylint: disable=unused-argument
-            mock_get_task_destination,  # pylint: disable=unused-argument
-            mock_get_action,  # pylint: disable=unused-argument
-            mock_get_endpoint,  # pylint: disable=unused-argument
+            mock_get_tasks_execution,
+            mock_check_clean_execution_deadlock,
+            mock_get_task_origin,
+            mock_get_task_destination,
+            mock_get_action,
+            mock_get_endpoint,
             mock_set_task_status,
-            mock_get_worker_service_rpc_for_task,  # pylint: disable=unused-argument
-            mock_cancel_tasks_execution,  # pylint: disable=unused-argument
-            mock_get_execution_status,  # pylint: disable=unused-argument
-            mock_set_tasks_execution_status,  # pylint: disable=unused-argument
-            config,
-    ):
+            mock_get_worker_service_rpc_for_task,
+            mock_cancel_tasks_execution,
+            mock_get_execution_status,
+            mock_set_tasks_execution_status,
+            config):
         tasks = config.get('tasks', [])
         execution = mock.Mock(
             status=constants.EXECUTION_STATUS_RUNNING,
@@ -2573,7 +2582,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
         )
 
         for task in tasks:
-            if not 'expected_status' in task:
+            if 'expected_status' not in task:
                 continue
             kwargs = {'exception_details': mock.ANY}
             if task['expected_status'] == constants.TASK_STATUS_PENDING:
@@ -2895,13 +2904,13 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
     @ddt.unpack
     def test_task_completed(
             self,
-            mock_lock,  # pylint: disable=unused-argument
+            mock_lock,
             mock_update_transfer_action_info,
             mock_get_action,
             mock_get_tasks_execution,
             mock_set_task_status,
             mock_get_task,
-            mock_sanitize_task_info,  # pylint: disable=unused-argument
+            mock_sanitize_task_info,
             config,
             expected_status,
     ):
@@ -2999,12 +3008,12 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_get_task,
             mock_set_task_status,
             mock_get_tasks_execution,
-            mock_get_action,  # pylint: disable=unused-argument
-            mock_lock,  # pylint: disable=unused-argument
-            mock_cancel_execution_for_osmorphing_debugging,  # pylint: disable=unused-argument
-            mock_set_tasks_execution_status,  # pylint: disable=unused-argument
-            mock_cancel_tasks_execution,  # pylint: disable=unused-argument
-            mock_check_delete_reservation_for_transfer,  # pylint: disable=unused-argument
+            mock_get_action,
+            mock_lock,
+            mock_cancel_execution_for_osmorphing_debugging,
+            mock_set_tasks_execution_status,
+            mock_cancel_tasks_execution,
+            mock_check_delete_reservation_for_transfer,
             config,
             expected_status,
     ):
@@ -3066,7 +3075,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock_set_task_status,
             mock_get_tasks_execution,
             mock_get_action,
-            mock_lock,  # pylint: disable=unused-argument
+            mock_lock,
             mock_cancel_execution_for_osmorphing_debugging,
             mock_set_tasks_execution_status,
             mock_cancel_tasks_execution,
@@ -3098,9 +3107,7 @@ class ConductorServerEndpointTestCase(test_base.CoriolisBaseTestCase):
             mock.sentinel.exception_details,
         )
         mock_cancel_execution_for_osmorphing_debugging.assert_called_once_with(
-            mock.sentinel.context,
-            mock_get_tasks_execution.return_value,
-        )
+            mock.sentinel.context, mock_get_tasks_execution.return_value, )
         self.assertEqual(2, mock_set_task_status.call_count)
         mock_set_tasks_execution_status.assert_called_once_with(
             mock.sentinel.context,

+ 2 - 1
coriolis/tests/db/test_api.py

@@ -5,7 +5,8 @@ from unittest import mock
 
 from coriolis.db import api
 from coriolis import exception
-from coriolis.tests import test_base, testutils
+from coriolis.tests import test_base
+from coriolis.tests import testutils
 
 
 class DBAPITestCase(test_base.CoriolisBaseTestCase):

Some files were not shown because too many files changed in this diff