Pārlūkot izejas kodu

Drop eventlet

Eventlet is now deprecated, about to become unsupported.

Just like OpenStack, we're going to switch from Eventlet to native
threading.

* https://wiki.openstack.org/wiki/Eventlet-removal
* https://removal.eventlet.org/guide/openstack/

This PR will:

* drop the eventlet dependency
* stop monkey-patching standard libraries
* spawn native threads instead of eventlet coroutines
	* added a few TODOs to merge some of them since threads are more
	  heavyweight compared to coroutines
	* asyncio may be applicable in some cases but only if we can guarantee
	  that the callback doesn't perform any blocking operations
* use native coordination and communication primitives such as semaphores,
  locks or queues
* instead of eventlet.kill, we use stop variables or events to notify the
  threads that the work is done
* tell oslo.messaging and oslo.service to use threading instead of eventlet
* update coriolis-api to use cheroot (same as Ironic) instead of the
  eventlet based wsgi.
  * Note that we still allow using an external web server such as Apache, in
    which case the Coriolis wsgi app will be loaded insted of calling
    "coriolis-api".
Lucian Petrut 3 nedēļas atpakaļ
vecāks
revīzija
eab4357676

+ 3 - 3
coriolis/cmd/__init__.py

@@ -1,10 +1,10 @@
 # Copyright 2017 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-import eventlet
+from oslo_service import backend
 
 from coriolis import conf
 
-
-eventlet.monkey_patch()
 conf.init_common_opts()
+
+backend.init_backend(backend.BackendType.THREADING)

+ 1 - 1
coriolis/conductor/rpc/client.py

@@ -447,7 +447,7 @@ class ConductorTaskRpcEventHandler(events.BaseEventHandler):
     @property
     def _rpc_conductor_client(self):
         # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-        # oslo_messaging clients as the underlying eventlet thread queues will
+        # oslo_messaging clients as the underlying thread queues will
         # be invalidated.
         if self._rpc_conductor_client_instance is None:
             self._rpc_conductor_client_instance = ConductorClient()

+ 1 - 1
coriolis/conductor/rpc/server.py

@@ -177,7 +177,7 @@ class ConductorServerEndpoint(object):
         self._deployer_manager_client_instance = None
 
     # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-    # oslo_messaging clients as the underlying eventlet thread queues will
+    # oslo_messaging clients as the underlying thread queues will
     # be invalidated. Considering this class both serves from a "main
     # process" as well as forking child processes, it is safest to
     # instantiate the clients only when needed:

+ 15 - 25
coriolis/cron/cron.py

@@ -1,15 +1,16 @@
 import datetime
+import queue
 import sys
+import threading
 import time
 
-import eventlet
-from eventlet import semaphore
 from oslo_log import log
 from oslo_utils import timeutils
 import schedule
 
 from coriolis import exception
 from coriolis import schemas
+from coriolis import utils
 
 
 LOG = log.getLogger(__name__)
@@ -144,11 +145,11 @@ class CronJob(object):
 class Cron(object):
 
     def __init__(self):
-        self._queue = eventlet.Queue(maxsize=1000)
+        self._queue = queue.Queue(maxsize=1000)
         self._should_stop = False
         self._jobs = {}
-        self._eventlets = []
-        self._semaphore = semaphore.Semaphore(value=1)
+        self._threads = []
+        self._semaphore = threading.Semaphore(value=1)
 
     def register(self, job):
         if not isinstance(job, CronJob):
@@ -189,7 +190,7 @@ class Cron(object):
                           jobs[job].schedule)
                 if jobs[job].should_run(now):
                     LOG.debug("Spawning job %s" % job)
-                    eventlet.spawn(jobs[job].start, self._queue)
+                    utils.start_thread(jobs[job].start, args=[self._queue])
                     spawned += 1
 
         done = timeutils.utcnow()
@@ -199,12 +200,12 @@ class Cron(object):
             "jobs": spawned})
 
     def _loop(self):
-        while True:
+        while not self._should_stop:
             schedule.run_pending()
             time.sleep(.2)
 
     def _result_loop(self):
-        while True:
+        while not self._should_stop:
             job_info = self._queue.get()
             result = job_info["result"]
             error = job_info["error_info"]
@@ -223,7 +224,7 @@ class Cron(object):
     def _janitor(self):
         # remove expired jobs from memory. The check for expired
         # jobs runs once every minute.
-        while True:
+        while not self._should_stop:
             with self._semaphore:
                 tmp = {}
                 for job in self._jobs:
@@ -236,24 +237,13 @@ class Cron(object):
             # No need to run very often. Once a minute should do
             time.sleep(60)
 
-    def _ripper(self):
-        # Not sure if this will ever be called, but for correctness
-        # sake, thought I'd add it
-        while True:
-            if self._should_stop:
-                if len(self._eventlets):
-                    for greenthread in self._eventlets:
-                        eventlet.kill(greenthread)
-                    self._eventlets = []
-                return
-            time.sleep(.5)
-
     def start(self):
         schedule.every().minute.do(self._check_jobs)
-        self._eventlets.append(eventlet.spawn(self._loop))
-        self._eventlets.append(eventlet.spawn(self._janitor))
-        self._eventlets.append(eventlet.spawn(self._result_loop))
-        eventlet.spawn(self._ripper)
+        self._threads.append(utils.start_thread(self._loop))
+        # TODO(lpetrut): consider using "schedule" for the janitor job to avoid
+        # spawning an additional thread.
+        self._threads.append(utils.start_thread(self._janitor))
+        self._threads.append(utils.start_thread(self._result_loop))
 
     def stop(self):
         self._should_stop = True

+ 1 - 2
coriolis/deployer_manager/rpc/server.py

@@ -3,7 +3,6 @@
 
 import time
 
-import eventlet
 from oslo_config import cfg
 from oslo_log import log as logging
 
@@ -135,7 +134,7 @@ class DeployerManagerServerEndpoint:
             time.sleep(10)
 
     def _init_loop(self):
-        eventlet.spawn(self._loop)
+        utils.start_thread(self._loop)
 
     def execute_auto_deployment(
             self, ctxt, transfer_id, deployer_id, **kwargs):

+ 0 - 101
coriolis/deployments/manager.py

@@ -1,101 +0,0 @@
-# Copyright 2017 Cloudbase Solutions Srl
-# All Rights Reserved.
-
-import gc
-import sys
-
-import eventlet
-from oslo_log import log as logging
-from oslo_utils import units
-
-from coriolis import events
-from coriolis.providers import backup_writers
-from coriolis import qemu_reader
-from coriolis import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def _copy_volume(volume, disk_image_reader, backup_writer, event_manager):
-    disk_id = volume["disk_id"]
-    # for now we assume it is a local file
-    path = volume["disk_image_uri"]
-    skip_zeroes = volume.get("zeroed", False)
-
-    with backup_writer.open("", disk_id) as writer:
-        with disk_image_reader.open(path) as reader:
-            disk_size = reader.disk_size
-
-            perc_step = event_manager.add_percentage_step(
-                "Copying data of disk %s" % disk_id, disk_size)
-
-            offset = 0
-            max_block_size = 10 * units.Mi  # 10 MB
-
-            while offset < disk_size:
-                allocated, zero_block, block_size = reader.get_block_status(
-                    offset, max_block_size)
-                if not allocated or zero_block and skip_zeroes:
-                    if not allocated:
-                        LOG.debug(
-                            "Unallocated block detected: %s", block_size)
-                    else:
-                        LOG.debug("Skipping zero block: %s", block_size)
-                    offset += block_size
-                    writer.seek(offset)
-                else:
-                    buf = reader.read(offset, block_size)
-                    writer.write(buf)
-                    offset += len(buf)
-                    buf = None
-                    gc.collect()
-
-                event_manager.set_percentage_step(
-                    perc_step, offset)
-
-
-def _copy_wrapper(job_args):
-    disk_id = job_args[0].get("disk_id")
-    try:
-        return _copy_volume(*job_args), disk_id, False
-    except BaseException:
-        return sys.exc_info(), disk_id, True
-
-
-def copy_disk_data(target_conn_info, volumes_info, event_handler):
-    # TODO(gsamfira): the disk image should be an URI that can either be local
-    # (file://) or remote (https://, ftp://, smb://, nfs:// etc).
-    # This must happen if we are to implement multi-worker scenarios.
-    # In such cases, it is not guaranteed that the disk sync task
-    # will be started on the same node onto which the import
-    # happened. It may also be conceivable, that wherever the disk
-    # image ends up, we might be able to directly expose it using
-    # NFS, iSCSI or any other network protocol. In which case,
-    # we can skip downloading it locally just to sync it.
-
-    event_manager = events.EventManager(event_handler)
-
-    ip = target_conn_info["ip"]
-    port = target_conn_info.get("port", 22)
-    username = target_conn_info["username"]
-    pkey = target_conn_info.get("pkey")
-    password = target_conn_info.get("password")
-    event_manager.progress_update("Waiting for connectivity on %s:%s" % (
-        ip, port))
-    utils.wait_for_port_connectivity(ip, port)
-    backup_writer = backup_writers.SSHBackupWriter(
-        ip, port, username, pkey, password, volumes_info)
-    disk_image_reader = qemu_reader.QEMUDiskImageReader()
-
-    pool = eventlet.greenpool.GreenPool()
-    job_data = [(vol, disk_image_reader, backup_writer, event_manager)
-                for vol in volumes_info]
-    for result, disk_id, error in pool.imap(_copy_wrapper, job_data):
-        # TODO(gsamfira): There is no use in letting the other disks finish
-        # sync-ing as we don't save the state of the disk sync anywhere (yet).
-        # When/If we ever do add this info to the database, keep track of
-        # failures, and allow any other paralel sync to finish
-        if error:
-            event_manager.progress_update(
-                "Volume \"%s\" failed to sync" % disk_id)
-            raise result[0](result[1]).with_traceback(result[2])

+ 1 - 1
coriolis/minion_manager/rpc/client.py

@@ -184,7 +184,7 @@ class MinionManagerPoolRpcEventHandler(events.BaseEventHandler):
     @property
     def _rpc_minion_manager_client(self):
         # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-        # oslo_messaging clients as the underlying eventlet thread queues will
+        # oslo_messaging clients as the underlying thread queues will
         # be invalidated.
         if self._rpc_minion_manager_client_instance is None:
             self._rpc_minion_manager_client_instance = MinionManagerClient()

+ 1 - 1
coriolis/minion_manager/rpc/server.py

@@ -182,7 +182,7 @@ class MinionManagerServerEndpoint(object):
             max_workers=25)
 
     # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-    # oslo_messaging clients as the underlying eventlet thread queues will
+    # oslo_messaging clients as the underlying thread queues will
     # be invalidated. Considering this class both serves from a "main
     # process" as well as forking child processes, it is safest to
     # instantiate the clients only when needed:

+ 1 - 1
coriolis/minion_manager/rpc/tasks.py

@@ -75,7 +75,7 @@ MINION_POOL_POWER_OFF_MACHINE_TASK_NAME_FORMAT = (
 class MinionManagerTaskEventMixin(object):
 
     # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-    # oslo_messaging clients as the underlying eventlet thread queues will
+    # oslo_messaging clients as the underlying thread queues will
     # be invalidated. Considering this class both serves from a "main
     # process" as well as forking child processes, it is safest to
     # re-instantiate the clients every time:

+ 65 - 37
coriolis/providers/backup_writers.py

@@ -8,13 +8,13 @@ import copy
 import datetime
 import errno
 import os
+import queue
 import shutil
 import tempfile
 import threading
 import time
 import uuid
 
-import eventlet
 from oslo_config import cfg
 from oslo_log import log as logging
 import paramiko
@@ -284,13 +284,18 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
         self._stderr = None
         self._offset = None
         self._ssh = None
-        self._sender_q = eventlet.Queue(maxsize=5)
-        self._enc_q = eventlet.Queue(maxsize=5)
-        self._sender_evt = None
-        self._encoder_evt = []
+        self._sender_q = queue.Queue(maxsize=5)
+        self._enc_q = queue.Queue(maxsize=5)
+        self._sender_thread = None
+        self._encoder_threads = []
         self._encoder_cnt = encoder_count
         self._exception = None
+        # Stop sequence:
+        # * once "_closing" is set, we no longer accept writes. We wait for
+        #   the queues to be emptied and then we set "_stopped".
+        # * once "_stopped" is set, the worker loops will exit.
         self._closing = False
+        self._stopped = False
 
         self._compress_transfer = compress_transfer
         if self._compress_transfer is None:
@@ -343,11 +348,10 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
 
     def _open(self):
         self._exec_helper_cmd()
-        self._sender_evt = eventlet.spawn(
-            self._sender)
+        self._sender_thread = utils.start_thread(self._sender)
         for _ in range(self._encoder_cnt):
-            self._encoder_evt.append(
-                eventlet.spawn(self._encoder))
+            self._encoder_threads.append(
+                utils.start_thread(self._encoder))
 
     def seek(self, pos):
         self._offset = pos
@@ -356,8 +360,11 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
         pass
 
     def _sender(self):
-        while True:
-            data = self._sender_q.get()
+        while not self._stopped:
+            try:
+                data = self._sender_q.get(timeout=2)
+            except queue.Empty:
+                continue
             try:
                 self._send_msg(data)
             except BaseException as err:
@@ -368,8 +375,11 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
                 del data
 
     def _encoder(self):
-        while True:
-            payload = self._enc_q.get()
+        while not self._stopped:
+            try:
+                payload = self._enc_q.get(timeout=2)
+            except queue.Empty:
+                continue
             try:
                 data = self._encode_data(
                     payload["data"],
@@ -415,6 +425,7 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
     def close(self):
         self._closing = True
         self._wait_for_queues()
+        self._stopped = True
         if self._exception:
             # We can raise here. Any SSH socket cleanup will happen
             # in _handle_exception()
@@ -427,13 +438,15 @@ class SSHBackupWriterImpl(BaseBackupWriterImpl):
             self._ssh.exec_command("sudo sync")
             self._ssh.close()
             self._ssh = None
-        if self._sender_evt:
-            eventlet.kill(self._sender_evt)
-            self._sender_evt = None
+        if self._sender_thread:
+            LOG.debug("Joining sender thread.")
+            self._sender_thread.join()
+            self._sender_thread = None
 
-        for i in self._encoder_evt:
-            eventlet.kill(i)
-        self._encoder_evt = []
+        for i in self._encoder_threads:
+            LOG.debug("Joining encoder thread.")
+            i.join()
+        self._encoder_threads = []
 
     def _handle_exception(self, ex):
         super(SSHBackupWriterImpl, self)._handle_exception(ex)
@@ -566,16 +579,21 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
         self._crt = None
         self._key = None
         self._ca = None
+        # Stop sequence:
+        # * once "_closing" is set, we no longer accept writes. We wait for
+        #   the queues to be emptied and then we set "_stopped".
+        # * once "_stopped" is set, the worker loops will exit.
         self._closing = False
+        self._stopped = True
         self._write_error = False
         self._id = None
         self._exception = None
         self._compressor_count = compressor_count
-        self._comp_q = eventlet.Queue(maxsize=5)
-        self._sender_q = eventlet.Queue(maxsize=5)
+        self._comp_q = queue.Queue(maxsize=5)
+        self._sender_q = queue.Queue(maxsize=5)
 
-        self._sender_evt = None
-        self._compressor_evt = None
+        self._sender_thread = None
+        self._compressor_threads = None
 
         self._compress_transfer = compress_transfer
         if self._compress_transfer is None:
@@ -637,13 +655,13 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
         self._closing = False
         self._init_session()
         self._acquire()
-        self._sender_evt = eventlet.spawn(self._sender)
+        self._sender_thread = utils.start_thread(self._sender)
         if self._compressor_count is None or self._compressor_count == 0:
             self._compressor_count = 1
-        self._compressor_evt = []
+        self._compressor_threads = []
         for _ in range(self._compressor_count):
-            self._compressor_evt.append(
-                eventlet.spawn(self._compressor))
+            self._compressor_threads.append(
+                utils.start_thread(self._compressor))
 
     def seek(self, pos):
         self._offset = pos
@@ -660,8 +678,11 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
             return
 
     def _compressor(self):
-        while True:
-            payload = self._comp_q.get()
+        while not self._stopped:
+            try:
+                payload = self._comp_q.get(timeout=2)
+            except queue.Empty:
+                continue
             send_payload = {
                 "encoding": None,
                 "offset": payload["offset"],
@@ -683,8 +704,12 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
             self._comp_q.task_done()
 
     def _sender(self):
-        while True:
+        while not self._stopped:
             payload = self._sender_q.get()
+            try:
+                payload = self._sender_q.get(timeout=2)
+            except queue.Empty:
+                continue
             offset = copy.copy(payload["offset"])
             headers = {
                 "X-Write-Offset": str(offset),
@@ -856,6 +881,7 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
     def close(self):
         self._closing = True
         self._wait_for_queues()
+        self._stopped = True
         if self._exception:
             # There was an exception while writing. We still need to
             # release the disk.
@@ -870,13 +896,15 @@ class HTTPBackupWriterImpl(BaseBackupWriterImpl):
         if self._session:
             self._session.close()
             self._session = None
-        if self._sender_evt:
-            eventlet.kill(self._sender_evt)
-            self._sender_evt = None
-        if self._compressor_evt:
-            for i in self._compressor_evt:
-                eventlet.kill(i)
-            self._compressor_evt = None
+        if self._sender_thread:
+            LOG.debug("Joining sender thread.")
+            self._sender_thread.join()
+            self._sender_thread = None
+        if self._compressor_threads:
+            for i in self._compressor_threads:
+                LOG.debug("Joining compressor thread.")
+                i.join()
+            self._compressor_threads = None
 
 
 class HTTPBackupWriterBootstrapper(object):

+ 1 - 1
coriolis/rpc.py

@@ -60,7 +60,7 @@ def _get_transport():
 def get_server(target, endpoints, serializer=None):
     serializer = RequestContextSerializer(serializer)
     return messaging.get_rpc_server(_get_transport(), target, endpoints,
-                                    executor='eventlet',
+                                    executor='threading',
                                     serializer=serializer)
 
 

+ 21 - 11
coriolis/service.py

@@ -5,13 +5,15 @@ import argparse
 import os
 import platform
 import sys
+import threading
 
+from cheroot import wsgi
 from oslo_concurrency import processutils
 from oslo_config import cfg
 from oslo_log import log as logging
 import oslo_messaging as messaging
 from oslo_service import service
-from oslo_service import wsgi
+from oslo_service import wsgi as oslo_wsgi
 
 from coriolis import rpc
 from coriolis import utils
@@ -106,7 +108,7 @@ def get_application():
     CONF(args[1:], project='coriolis', version="1.0.0")
     utils.setup_logging()
 
-    loader = wsgi.Loader(CONF)
+    loader = oslo_wsgi.Loader(CONF)
     return loader.load_app("coriolis-api")
 
 
@@ -129,26 +131,34 @@ class WSGIService(service.ServiceBase):
         self._loader = wsgi.Loader(CONF)
         self._app = self._loader.load_app(name)
 
-        self._server = wsgi.Server(CONF,
-                                   name,
-                                   self._app,
-                                   host=self._host,
-                                   port=self._port)
+        bind_addr = (self._host, self._port)
+        self._server = wsgi.Server(
+            bind_addr=bind_addr,
+            wsgi_app=self._app,
+            server_name=name)
 
     def get_workers_count(self):
         return self._workers
 
     def start(self):
-        self._server.start()
+        self._thread = threading.Thread(
+            target=self._server.serve,
+            daemon=True
+        )
+        self._thread.start()
 
     def stop(self):
-        self._server.stop()
+        if self._server:
+            self._server.stop()
+            if self._thread:
+                self._thread.join(timeout=2)
 
     def wait(self):
-        self._server.wait()
+        if self._thread:
+            self._thread.join()
 
     def reset(self):
-        self._server.reset()
+        pass
 
 
 class MessagingService(service.ServiceBase):

+ 13 - 3
coriolis/taskflow/runner.py

@@ -1,10 +1,8 @@
 # Copyright 2020 Cloudbase Solutions Srl
 # All Rights Reserved.
 
-# NOTE: we neeed to make sure eventlet is imported:
 import multiprocessing
 import sys
-import eventlet  # noqa
 
 from logging import handlers
 from oslo_config import cfg
@@ -127,7 +125,19 @@ class TaskFlowRunner(object):
         LOG.debug(
             "Sucessfully started background process for flow '%s' with "
             "PID: '%d'", flow.name, process.pid)
-        eventlet.spawn(self._handle_mp_log_events, process, mp_log_q)
+
+        # TODO(lpetrut): one logger thread per subprocess may be excessive when
+        # having a large number of concurrent jobs. It may be worth having a
+        # single thread aggregating logs from all subprocesses, potentially
+        # using asyncio.
+        #
+        # Note that asyncio coroutines can't directly consume multiprocessing
+        # queues, we'd probably need pipes instead. There's also the option of
+        # using select/poll/epoll directly.
+        utils.start_thread(
+            target=self._handle_mp_log_events,
+            args=(process, mp_log_q),
+            daemon=True)
 
     def run_flow_in_background(self, flow, store=None):
         """ Starts the given flow in the background in a separate process.

+ 15 - 0
coriolis/utils.py

@@ -17,6 +17,7 @@ import socket
 import string
 import subprocess
 import sys
+import threading
 import time
 import traceback
 import uuid
@@ -262,6 +263,8 @@ def write_ssh_file(ssh, remote_path, content):
     # Enabling pipelined transfers here will make
     # SFTP transfers much faster, but in combination
     # with eventlet, it seems to cause some lock-ups
+    #
+    # TODO(lpetrut): reconsider this now that eventlet is gone.
     fd.write(content)
     fd.close()
 
@@ -1048,3 +1051,15 @@ class Grub2ConfigEditor(object):
             tmp.write("%s\n" % fmt)
         tmp.seek(0)
         return tmp.read()
+
+
+def start_thread(target, args=(), kwargs=None, daemon=True):
+    """Convenience helper for one-liner thread spawning."""
+    thread = threading.Thread(
+        target=target,
+        daemon=daemon,
+        args=args,
+        kwargs=kwargs or {},
+    )
+    thread.start()
+    return thread

+ 14 - 6
coriolis/worker/rpc/server.py

@@ -9,7 +9,6 @@ import signal
 import sys
 import time
 
-import eventlet
 from oslo_config import cfg
 from oslo_log import log as logging
 import psutil
@@ -54,7 +53,7 @@ class WorkerServerEndpoint(object):
     @property
     def _rpc_conductor_client(self):
         # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated
-        # oslo_messaging clients as the underlying eventlet thread queues will
+        # oslo_messaging clients as the underlying thread queues will
         # be invalidated. Considering this class both serves from a "main
         # process" as well as forking child processes, it is safest to
         # re-instantiate the client every time:
@@ -264,10 +263,19 @@ class WorkerServerEndpoint(object):
                     "Task '%s' was already in cancelling status." % task_id)
             raise
 
-        evt = eventlet.spawn(self._wait_for_process, p, mp_q)
-        eventlet.spawn(self._handle_mp_log_events, p, mp_log_q)
-
-        result = evt.wait()
+        # TODO(lpetrut): one logger thread per subprocess may be excessive when
+        # having a large number of concurrent jobs. It may be worth having a
+        # single thread aggregating logs from all subprocesses, potentially
+        # using asyncio.
+        #
+        # Note that asyncio coroutines can't directly consume multiprocessing
+        # queues, we'd probably need pipes instead. There's also the option of
+        # using select/poll/epoll directly.
+        utils.start_thread(
+            target=self._handle_mp_log_events,
+            args=(p, mp_log_q))
+
+        result = self._wait_for_process(p, mp_q)
         p.join()
 
         if result is None:

+ 5 - 1
requirements.txt

@@ -1,5 +1,4 @@
 setuptools>=65.0.0,<82  # pkg_resources removed in 82; required by sqlalchemy-migrate
-eventlet
 keystoneauth1
 keystonemiddleware
 Jinja2
@@ -46,3 +45,8 @@ taskflow
 webob
 sshtunnel
 requests-unixsocket
+# Cherrypy wsgi, also used by Ironic.
+# Leveraged by coriolis-api when called directly.
+cheroot
+# Used by oslo.service in thread mode.
+cotyledon