Explorar o código

Merge pull request #323 from CloudVE/nuwang-provider-upgrade

Upgrade azure, openstack sdks and moto to latest
Nuwan Goonasekera hai 8 horas
pai
achega
1ec6b0f4dd

+ 32 - 0
.github/aws/permissions-policy.json

@@ -0,0 +1,32 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Sid": "EC2FullAccessUsEast1",
+      "Effect": "Allow",
+      "Action": "ec2:*",
+      "Resource": "*",
+      "Condition": {
+        "StringEqualsIfExists": {
+          "ec2:Region": "us-east-1"
+        }
+      }
+    },
+    {
+      "Sid": "S3FullAccess",
+      "Effect": "Allow",
+      "Action": "s3:*",
+      "Resource": "*"
+    },
+    {
+      "Sid": "Route53FullAccess",
+      "Effect": "Allow",
+      "Action": [
+        "route53:*",
+        "route53domains:List*",
+        "route53domains:Get*"
+      ],
+      "Resource": "*"
+    }
+  ]
+}

+ 59 - 0
.github/aws/setup.sh

@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+# One-time setup to enable GitHub Actions OIDC -> AWS access for this repo.
+# Run with credentials that can manage IAM (admin, or a scoped IAM-admin role).
+# Re-running is safe: each step is idempotent or no-ops if already present.
+
+set -euo pipefail
+
+ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)"
+ROLE_NAME="cloudbridge-github-actions"
+POLICY_NAME="cloudbridge-github-actions-policy"
+REGION="us-east-1"
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+TRUST_POLICY="${SCRIPT_DIR}/trust-policy.json"
+PERMISSIONS_POLICY="${SCRIPT_DIR}/permissions-policy.json"
+
+# 1. Register GitHub's OIDC provider in IAM (no-op if it already exists).
+if ! aws iam get-open-id-connect-provider \
+      --open-id-connect-provider-arn "arn:aws:iam::${ACCOUNT_ID}:oidc-provider/token.actions.githubusercontent.com" \
+      >/dev/null 2>&1; then
+  aws iam create-open-id-connect-provider \
+    --url "https://token.actions.githubusercontent.com" \
+    --client-id-list "sts.amazonaws.com" \
+    --thumbprint-list "ffffffffffffffffffffffffffffffffffffffff"
+  # Thumbprint above is a placeholder — GitHub OIDC uses a JWKS endpoint and
+  # AWS now validates the JWKS chain server-side, so the thumbprint is no
+  # longer security-critical. Set any 40-char hex value.
+fi
+
+# 2. Render the trust policy with the real account id.
+TRUST_RENDERED="$(mktemp)"
+sed "s/ACCOUNT_ID/${ACCOUNT_ID}/g" "${TRUST_POLICY}" > "${TRUST_RENDERED}"
+
+# 3. Create or update the role.
+if aws iam get-role --role-name "${ROLE_NAME}" >/dev/null 2>&1; then
+  aws iam update-assume-role-policy \
+    --role-name "${ROLE_NAME}" \
+    --policy-document "file://${TRUST_RENDERED}"
+else
+  aws iam create-role \
+    --role-name "${ROLE_NAME}" \
+    --assume-role-policy-document "file://${TRUST_RENDERED}" \
+    --description "Assumed by GitHub Actions to run cloudbridge integration tests in ${REGION}"
+fi
+
+# 4. Attach an inline permissions policy (replaces on each run).
+aws iam put-role-policy \
+  --role-name "${ROLE_NAME}" \
+  --policy-name "${POLICY_NAME}" \
+  --policy-document "file://${PERMISSIONS_POLICY}"
+
+ROLE_ARN="arn:aws:iam::${ACCOUNT_ID}:role/${ROLE_NAME}"
+
+echo
+echo "Role ready: ${ROLE_ARN}"
+echo "Set this as a repo secret named AWS_OIDC_ROLE_ARN at:"
+echo "  https://github.com/CloudVE/cloudbridge/settings/secrets/actions"
+echo
+echo "Then remove the AWS_ACCESS_KEY and AWS_SECRET_KEY repo secrets — they are no longer used."

+ 24 - 0
.github/aws/trust-policy.json

@@ -0,0 +1,24 @@
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Sid": "AllowGitHubActionsOIDC",
+      "Effect": "Allow",
+      "Principal": {
+        "Federated": "arn:aws:iam::ACCOUNT_ID:oidc-provider/token.actions.githubusercontent.com"
+      },
+      "Action": "sts:AssumeRoleWithWebIdentity",
+      "Condition": {
+        "StringEquals": {
+          "token.actions.githubusercontent.com:aud": "sts.amazonaws.com"
+        },
+        "StringLike": {
+          "token.actions.githubusercontent.com:sub": [
+            "repo:CloudVE/cloudbridge:ref:refs/heads/main",
+            "repo:CloudVE/cloudbridge:environment:cloud-integration"
+          ]
+        }
+      }
+    }
+  ]
+}

+ 12 - 6
.github/workflows/deploy.yaml

@@ -5,16 +5,22 @@ on:
   push:
     tags:
       - '*'
+
+permissions:
+  contents: read
+
 jobs:
   build-n-publish:
     name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@master
-    - name: Set up Python 3.10.12
-      uses: actions/setup-python@v1
+    - uses: actions/checkout@v6
       with:
-        python-version: 3.10.12
+        persist-credentials: false
+    - name: Set up Python 3.13
+      uses: actions/setup-python@v6
+      with:
+        python-version: 3.13
     - name: Install dependencies
       run: |
         python3 -m pip install --upgrade pip setuptools
@@ -25,13 +31,13 @@ jobs:
         twine check dist/*
         ls -l dist
     - name: Publish distribution 📦 to Test PyPI
-      uses: pypa/gh-action-pypi-publish@master
+      uses: pypa/gh-action-pypi-publish@v1.14.0
       with:
         password: ${{ secrets.TEST_PYPI_API_TOKEN }}
         repository_url: https://test.pypi.org/legacy/
         skip_existing: true
     - name: Publish distribution 📦 to PyPI
       if: github.event_name == 'release'
-      uses: pypa/gh-action-pypi-publish@master
+      uses: pypa/gh-action-pypi-publish@v1.14.0
       with:
         password: ${{ secrets.PYPI_API_TOKEN }}

+ 156 - 0
.github/workflows/integration-cloud.yaml

@@ -0,0 +1,156 @@
+name: Cloud integration tests
+
+# Runs the per-cloud integration matrix. This workflow handles untrusted PR
+# code from forks, so it relies on layered protections:
+#
+#   1. Trigger is pull_request_target with types=[labeled] — fires only when a
+#      label is *added*. A force-push to the PR head does not re-fire this.
+#   2. The `safe-to-test` label is the only one that matters (see `if:` below).
+#      Pushing new commits to the PR causes pr-label-strip.yaml to remove the
+#      label, so re-testing requires a maintainer to re-label after re-review.
+#   3. The PR head SHA is taken from the event payload, which is captured at
+#      label-add time. Even if the attacker force-pushes between label-add and
+#      the runner starting, the checkout pins to the SHA the maintainer saw.
+#   4. The `cloud-integration` GitHub Environment requires reviewer approval
+#      per run. This is the last-line defense and is enforced by GitHub before
+#      the OIDC token / cloud secrets are exposed.
+#   5. The AWS trust policy only accepts the `environment:cloud-integration`
+#      sub claim for PR runs — so even if 1–4 were bypassed, a fork PR run
+#      without our environment cannot assume the role.
+#   6. Each cloud's secrets are gated by matrix.cloud-provider so a single
+#      compromised matrix cell cannot exfiltrate other clouds' credentials.
+#
+# Push to main and workflow_dispatch run without the label gate (the
+# Environment is conditionally skipped — they are trusted contexts).
+
+on:
+  pull_request_target:
+    types: [labeled]
+  push:
+    branches: [main]
+  workflow_dispatch: {}
+
+permissions:
+  contents: read
+
+jobs:
+  cloud:
+    name: Per-cloud integration tests
+    # Gate fork PR runs by the `safe-to-test` label. Push and workflow_dispatch
+    # don't carry a `github.event.label` so the right-hand branch fires.
+    if: >-
+      github.event_name != 'pull_request_target'
+      || github.event.label.name == 'safe-to-test'
+    runs-on: ubuntu-latest
+    # The environment is only set for pull_request_target — push and
+    # workflow_dispatch are trusted contexts and don't need a per-run approval.
+    environment: ${{ github.event_name == 'pull_request_target' && 'cloud-integration' || '' }}
+    permissions:
+      id-token: write   # required for AWS OIDC
+      contents: read
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.13']
+        cloud-provider: ['aws', 'azure', 'gcp', 'openstack']
+
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v6
+        with:
+          # github.event.pull_request.head.sha is captured at trigger time and
+          # immutable in the event payload — pins to the SHA the maintainer
+          # reviewed when applying the label.
+          ref: ${{ github.event.pull_request.head.sha }}
+          persist-credentials: false
+
+      - name: Setup Python
+        uses: actions/setup-python@v6
+        with:
+           python-version: ${{ matrix.python-version }}
+
+      - name: Cache pip dir
+        uses: actions/cache@v5
+        with:
+          path: ~/.cache/pip
+          key: pip-cache-${{ matrix.python-version }}-${{ hashFiles('**/setup.py', '**/requirements.txt') }}
+
+      - name: Install required packages
+        run: pip install tox
+
+      - name: Configure AWS credentials via OIDC
+        if: matrix.cloud-provider == 'aws'
+        uses: aws-actions/configure-aws-credentials@v6
+        with:
+          role-to-assume: ${{ secrets.AWS_OIDC_ROLE_ARN }}
+          aws-region: us-east-1
+
+      - name: Run tox
+        id: tox
+        run: tox -e py${{ matrix.python-version }}-${{ matrix.cloud-provider }}
+        env:
+          PYTHONUNBUFFERED: "True"
+          # Per-cloud secret scoping: each variable is only set in the matrix
+          # cell that needs it. Limits blast radius if a single cell is
+          # compromised.
+          # aws — credentials supplied via the OIDC step above
+          CB_VM_TYPE_AWS: ${{ matrix.cloud-provider == 'aws' && secrets.CB_VM_TYPE_AWS || '' }}
+          # azure
+          AZURE_CLIENT_ID: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_CLIENT_ID || '' }}
+          AZURE_SUBSCRIPTION_ID: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_SUBSCRIPTION_ID || '' }}
+          AZURE_SECRET: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_SECRET || '' }}
+          AZURE_TENANT: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_TENANT || '' }}
+          AZURE_RESOURCE_GROUP: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_RESOURCE_GROUP || '' }}
+          AZURE_STORAGE_ACCOUNT: ${{ matrix.cloud-provider == 'azure' && secrets.AZURE_STORAGE_ACCOUNT || '' }}
+          CB_IMAGE_AZURE: ${{ matrix.cloud-provider == 'azure' && secrets.CB_IMAGE_AZURE || '' }}
+          CB_VM_TYPE_AZURE: ${{ matrix.cloud-provider == 'azure' && secrets.CB_VM_TYPE_AZURE || '' }}
+          # gcp
+          GCP_SERVICE_CREDS_DICT: ${{ matrix.cloud-provider == 'gcp' && secrets.GCP_SERVICE_CREDS_DICT || '' }}
+          CB_IMAGE_GCP: ${{ matrix.cloud-provider == 'gcp' && secrets.CB_IMAGE_GCP || '' }}
+          CB_VM_TYPE_GCP: ${{ matrix.cloud-provider == 'gcp' && secrets.CB_VM_TYPE_GCP || '' }}
+          # openstack
+          OS_AUTH_URL: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_AUTH_URL || '' }}
+          OS_PASSWORD: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_PASSWORD || '' }}
+          OS_PROJECT_NAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_PROJECT_NAME || '' }}
+          OS_PROJECT_DOMAIN_NAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_PROJECT_DOMAIN_NAME || '' }}
+          OS_TENANT_NAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_TENANT_NAME || '' }}
+          OS_USERNAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_USERNAME || '' }}
+          OS_REGION_NAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_REGION_NAME || '' }}
+          OS_USER_DOMAIN_NAME: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_USER_DOMAIN_NAME || '' }}
+          OS_APPLICATION_CREDENTIAL_ID: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_APPLICATION_CREDENTIAL_ID || '' }}
+          OS_APPLICATION_CREDENTIAL_SECRET: ${{ matrix.cloud-provider == 'openstack' && secrets.OS_APPLICATION_CREDENTIAL_SECRET || '' }}
+          CB_IMAGE_OS: ${{ matrix.cloud-provider == 'openstack' && secrets.CB_IMAGE_OS || '' }}
+          CB_VM_TYPE_OS: ${{ matrix.cloud-provider == 'openstack' && secrets.CB_VM_TYPE_OS || '' }}
+          CB_PLACEMENT_OS: ${{ matrix.cloud-provider == 'openstack' && secrets.CB_PLACEMENT_OS || '' }}
+
+      - name: Create Build Status Badge
+        if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+        uses: schneegans/dynamic-badges-action@0e50b8bad39e7e1afd3e4e9c2b7dd145fad07501 # v1.8.0
+        with:
+          auth: ${{ secrets.BUILD_STATUS_GIST_SECRET }}
+          gistID: ${{ secrets.BUILD_STATUS_GIST_ID }}
+          filename: cloudbridge_py${{ matrix.python-version }}_${{ matrix.cloud-provider }}.json
+          label: ${{ matrix.cloud-provider }}
+          message: ${{ fromJSON('["passing", "failing"]')[steps.tox.outcome != 'success'] }}
+          color: ${{ fromJSON('["green", "red"]')[steps.tox.outcome != 'success'] }}
+
+      - name: Upload coverage to Coveralls
+        if: ${{ steps.tox.outcome == 'success' }}
+        uses: coverallsapp/github-action@648a8eb78e6d50909eff900e4ec85cab4524a45b # v2.3.6
+        with:
+          github-token: ${{ secrets.GITHUB_TOKEN }}
+          flag-name: run-${{ matrix.python-version }}-${{ matrix.cloud-provider }}
+          parallel: true
+          file: coverage.xml
+          format: cobertura
+
+  finish:
+    needs: cloud
+    if: ${{ always() && needs.cloud.result != 'skipped' }}
+    runs-on: ubuntu-latest
+    steps:
+    - name: Coveralls Finished
+      uses: coverallsapp/github-action@648a8eb78e6d50909eff900e4ec85cab4524a45b # v2.3.6
+      with:
+        github-token: ${{ secrets.GITHUB_TOKEN }}
+        parallel-finished: true

+ 26 - 83
.github/workflows/integration.yaml

@@ -1,6 +1,13 @@
-name: Integration tests
+name: Lint and mock tests
+
+# Runs on every push and pull request — including PRs from forks. This workflow
+# is intentionally limited to lint + the mock provider so it can run safely on
+# untrusted code (no secrets, no cloud credentials).
+#
+# Cloud-provider integration tests live in integration-cloud.yaml, which uses
+# pull_request_target plus a maintainer-applied `safe-to-test` label and a
+# protected GitHub Environment.
 
-# Run this workflow every time a new commit pushed to your repository
 on:
   push:
     branches:
@@ -10,26 +17,29 @@ on:
       - main
   workflow_dispatch: {}
 
+permissions:
+  contents: read
+
 jobs:
-  # Set the job key. The key is displayed as the job name
-  # when a job name is not provided
   lint:
     name: Lint code
     runs-on: ubuntu-latest
     strategy:
       matrix:
-        python-version: [ '3.10' ]
+        python-version: [ '3.13' ]
     steps:
       - name: Checkout code
-        uses: actions/checkout@v4
+        uses: actions/checkout@v6
+        with:
+          persist-credentials: false
 
       - name: Setup Python
-        uses: actions/setup-python@v5
+        uses: actions/setup-python@v6
         with:
            python-version: ${{ matrix.python-version }}
 
       - name: Cache pip dir
-        uses: actions/cache@v4
+        uses: actions/cache@v5
         with:
           path: ~/.cache/pip
           key: pip-cache-${{ matrix.python-version }}-lint
@@ -40,31 +50,25 @@ jobs:
       - name: Run tox
         run: tox -e lint
 
-  integration:
-    # Name the Job
-    name: Per-cloud integration tests
-    needs: lint
-    # Set the type of machine to run on
+  mock:
+    name: Mock-provider tests
     runs-on: ubuntu-latest
     strategy:
-      fail-fast: false
       matrix:
-        python-version: ['3.10']
-        cloud-provider: ['aws', 'azure', 'gcp', 'mock', 'openstack']
-
+        python-version: ['3.13']
     steps:
       - name: Checkout code
-        uses: actions/checkout@v4
+        uses: actions/checkout@v6
         with:
-          ref: ${{ github.event.pull_request.head.sha }}
+          persist-credentials: false
 
       - name: Setup Python
-        uses: actions/setup-python@v5
+        uses: actions/setup-python@v6
         with:
            python-version: ${{ matrix.python-version }}
 
       - name: Cache pip dir
-        uses: actions/cache@v4
+        uses: actions/cache@v5
         with:
           path: ~/.cache/pip
           key: pip-cache-${{ matrix.python-version }}-${{ hashFiles('**/setup.py', '**/requirements.txt') }}
@@ -73,67 +77,6 @@ jobs:
         run: pip install tox
 
       - name: Run tox
-        id: tox
-        run: tox -e py${{ matrix.python-version }}-${{ matrix.cloud-provider }}
+        run: tox -e py${{ matrix.python-version }}-mock
         env:
           PYTHONUNBUFFERED: "True"
-          # aws
-          AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
-          AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
-          CB_VM_TYPE_AWS: ${{ secrets.CB_VM_TYPE_AWS }}
-          # azure
-          AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
-          AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
-          AZURE_SECRET: ${{ secrets.AZURE_SECRET }}
-          AZURE_TENANT: ${{ secrets.AZURE_TENANT }}
-          AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }}
-          AZURE_STORAGE_ACCOUNT: ${{ secrets.AZURE_STORAGE_ACCOUNT }}
-          CB_IMAGE_AZURE: ${{ secrets.CB_IMAGE_AZURE }}
-          CB_VM_TYPE_AZURE: ${{ secrets.CB_VM_TYPE_AZURE }}
-          # gcp
-          GCP_SERVICE_CREDS_DICT: ${{ secrets.GCP_SERVICE_CREDS_DICT }}
-          CB_IMAGE_GCP: ${{ secrets.CB_IMAGE_GCP }}
-          CB_VM_TYPE_GCP: ${{ secrets.CB_VM_TYPE_GCP }}
-          # openstack
-          OS_AUTH_URL: ${{ secrets.OS_AUTH_URL }}
-          OS_PASSWORD: ${{ secrets.OS_PASSWORD }}
-          OS_PROJECT_NAME: ${{ secrets.OS_PROJECT_NAME }}
-          OS_PROJECT_DOMAIN_NAME: ${{ secrets.OS_PROJECT_DOMAIN_NAME }}
-          OS_TENANT_NAME: ${{ secrets.OS_TENANT_NAME }}
-          OS_USERNAME: ${{ secrets.OS_USERNAME }}
-          OS_REGION_NAME: ${{ secrets.OS_REGION_NAME }}
-          OS_USER_DOMAIN_NAME: ${{ secrets.OS_USER_DOMAIN_NAME }}
-          OS_APPLICATION_CREDENTIAL_ID: ${{ secrets.OS_APPLICATION_CREDENTIAL_ID }}
-          OS_APPLICATION_CREDENTIAL_SECRET: ${{ secrets.OS_APPLICATION_CREDENTIAL_SECRET }}
-          CB_IMAGE_OS: ${{ secrets.CB_IMAGE_OS }}
-          CB_VM_TYPE_OS: ${{ secrets.CB_VM_TYPE_OS }}
-          CB_PLACEMENT_OS: ${{ secrets.CB_PLACEMENT_OS }}
-
-      - name: Create Build Status Badge
-        if: github.ref == 'refs/heads/master'
-        uses: schneegans/dynamic-badges-action@v1.1.0
-        with:
-          auth: ${{ secrets.BUILD_STATUS_GIST_SECRET }}
-          gistID: ${{ secrets.BUILD_STATUS_GIST_ID }}
-          filename: cloudbridge_py${{ matrix.python-version }}_${{ matrix.cloud-provider }}.json
-          label: ${{ matrix.cloud-provider }}
-          message: ${{ fromJSON('["passing", "failing"]')[steps.tox.outcome != 'success'] }}
-          color: ${{ fromJSON('["green", "red"]')[steps.tox.outcome != 'success'] }}
-
-      - name: Coveralls
-        if: ${{ steps.tox.outcome == 'success' }}
-        uses: AndreMiras/coveralls-python-action@develop
-        with:
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          flag-name: run-${{ matrix.python-version }}-${{ matrix.cloud-provider }}
-          parallel: true
-
-  finish:
-    needs: integration
-    runs-on: ubuntu-latest
-    steps:
-    - name: Coveralls Finished
-      uses: AndreMiras/coveralls-python-action@develop
-      with:
-        github-token: ${{ secrets.github_token }}
-        parallel-finished: true

+ 26 - 0
.github/workflows/pr-label-strip.yaml

@@ -0,0 +1,26 @@
+name: Strip safe-to-test on PR update
+
+# When new commits land on a PR, automatically remove the `safe-to-test` label
+# so the cloud integration workflow cannot be re-triggered against unreviewed
+# code. A maintainer must re-apply the label after reviewing the new diff.
+
+on:
+  pull_request_target:
+    types: [synchronize]
+
+permissions:
+  pull-requests: write
+
+jobs:
+  strip:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Remove safe-to-test label
+        env:
+          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+          GH_REPO: ${{ github.repository }}
+          PR_NUMBER: ${{ github.event.pull_request.number }}
+        # `gh pr edit --remove-label` exits non-zero if the label isn't
+        # present; that's the common case (most PRs are never labeled), so
+        # swallow it.
+        run: gh pr edit "$PR_NUMBER" --remove-label safe-to-test || true

+ 104 - 73
cloudbridge/providers/azure/azure_client.py

@@ -7,17 +7,27 @@ from cloudbridge.interfaces.exceptions import (DuplicateResourceException,
                                                ProviderConnectionException,
                                                WaitStateException)
 
+from azure.core.credentials import AzureNamedKeyCredential
 from azure.core.exceptions import (ClientAuthenticationError,
                                    HttpResponseError, ResourceExistsError,
                                    ResourceNotFoundError)
-from azure.cosmosdb.table.tableservice import TableService
+from azure.data.tables import TableServiceClient
 from azure.identity import ClientSecretCredential
 from azure.mgmt.compute import ComputeManagementClient
+from azure.mgmt.compute.models import (CreationData, Disk, DiskUpdate, Image,
+                                       ImageUpdate, Snapshot, SnapshotUpdate,
+                                       VirtualMachine, VirtualMachineUpdate)
 from azure.mgmt.devtestlabs.models import GalleryImageReference
 from azure.mgmt.network import NetworkManagementClient
+from azure.mgmt.network.models import (NetworkInterface,
+                                       NetworkSecurityGroup, PublicIPAddress,
+                                       RouteTable, SecurityRule, SubResource,
+                                       Subnet, TagsObject, VirtualNetwork)
 from azure.mgmt.resource import ResourceManagementClient
-from azure.mgmt.resource.subscriptions import SubscriptionClient
+from azure.mgmt.resource.resources.models import ResourceGroup
 from azure.mgmt.storage import StorageManagementClient
+from azure.mgmt.storage.models import Sku, StorageAccountCreateParameters
+from azure.mgmt.subscription import SubscriptionClient
 from azure.storage.blob import (BlobSasPermissions, BlobServiceClient,
                                 generate_blob_sas)
 
@@ -172,7 +182,8 @@ class AzureClient(object):
         self._compute_client = None
         self._access_key_result = None
         self._block_blob_service = None
-        self._table_service = None
+        self._table_service_client = None
+        self._public_key_table_client = None
         self._storage_account = None
 
         log.debug("azure subscription : %s", self.subscription_id)
@@ -273,15 +284,18 @@ class AzureClient(object):
     @property
     def table_service(self):
         self._get_or_create_storage_account()
-        if not self._table_service:
-            self._table_service = TableService(
+        if not self._table_service_client:
+            credential = AzureNamedKeyCredential(
                 self.storage_account,
                 self.access_key_result.keys[0].value)
-        if not self._table_service. \
-                exists(table_name=self.public_key_storage_table_name):
-            self._table_service.create_table(
-                self.public_key_storage_table_name)
-        return self._table_service
+            self._table_service_client = TableServiceClient(
+                endpoint=f"https://{self.storage_account}.table.core.windows.net/",
+                credential=credential)
+        if not self._public_key_table_client:
+            self._public_key_table_client = \
+                self._table_service_client.create_table_if_not_exists(
+                    table_name=self.public_key_storage_table_name)
+        return self._public_key_table_client
 
     def blob_client(self, container_name, blob_name):
         return self.blob_service.get_blob_client(container=container_name, blob=blob_name)
@@ -291,7 +305,7 @@ class AzureClient(object):
 
     def create_resource_group(self, name, parameters):
         return self.resource_client.resource_groups. \
-            create_or_update(name, parameters)
+            create_or_update(name, ResourceGroup(**parameters))
 
     def get_storage_account(self, storage_account):
         return self.storage_client.storage_accounts. \
@@ -299,7 +313,8 @@ class AzureClient(object):
 
     def create_storage_account(self, name, params):
         return self.storage_client.storage_accounts. \
-            begin_create(self.resource_group, name.lower(), params).result()
+            begin_create(self.resource_group, name.lower(),
+                         StorageAccountCreateParameters(**params)).result()
 
     # Create a storage account. To prevent a race condition, try
     # to get or create at least twice
@@ -315,9 +330,7 @@ class AzureClient(object):
                     self.get_storage_account(self.storage_account)
             except ResourceNotFoundError:
                 storage_account_params = {
-                    'sku': {
-                        'name': 'Standard_LRS'
-                    },
+                    'sku': Sku(name='Standard_LRS'),
                     'kind': 'storage',
                     'location': self.region_name,
                 }
@@ -362,17 +375,16 @@ class AzureClient(object):
 
     def create_vm_firewall(self, name, parameters):
         return self.network_management_client.network_security_groups. \
-            begin_create_or_update(self.resource_group, name,
-                                   parameters).result()
+            begin_create_or_update(
+                self.resource_group, name,
+                NetworkSecurityGroup(**parameters)).result()
 
     def update_vm_firewall_tags(self, fw_id, tags):
         url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                              fw_id)
         name = url_params.get(VM_FIREWALL_NAME, "")
         return self.network_management_client.network_security_groups. \
-            begin_create_or_update(self.resource_group, name,
-                                   {'tags': tags,
-                                    'location': self.region_name}).result()
+            update_tags(self.resource_group, name, TagsObject(tags=tags))
 
     def get_vm_firewall(self, fw_id):
         url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
@@ -393,9 +405,14 @@ class AzureClient(object):
         url_params = azure_helpers.parse_url(VM_FIREWALL_RESOURCE_ID,
                                              fw_id)
         vm_firewall_name = url_params.get(VM_FIREWALL_NAME, "")
+        # parameters may be either a raw dict (from VMFirewallRuleService) or
+        # an existing SecurityRule model (when overriding default rules from
+        # the firewall.default_security_rules list).
+        rule = (parameters if isinstance(parameters, SecurityRule)
+                else SecurityRule(**parameters))
         return self.network_management_client.security_rules. \
             begin_create_or_update(self.resource_group, vm_firewall_name,
-                                   rule_name, parameters).result()
+                                   rule_name, rule).result()
 
     def delete_vm_firewall_rule(self, fw_rule_id, vm_firewall):
         url_params = azure_helpers.parse_url(VM_FIREWALL_RULE_RESOURCE_ID,
@@ -476,14 +493,14 @@ class AzureClient(object):
         return self.compute_client.disks.begin_create_or_update(
             self.resource_group,
             disk_name,
-            params
+            Disk(**params)
         ).result()
 
     def create_snapshot_disk(self, disk_name, params):
         return self.compute_client.disks.begin_create_or_update(
             self.resource_group,
             disk_name,
-            params
+            Disk(**params)
         ).result()
 
     def get_disk(self, disk_id):
@@ -509,7 +526,7 @@ class AzureClient(object):
         return self.compute_client.disks.begin_update(
             self.resource_group,
             disk_name,
-            {'tags': tags}  # type: ignore
+            DiskUpdate(tags=tags)
         ).wait()
 
     def list_snapshots(self):
@@ -527,14 +544,14 @@ class AzureClient(object):
         snapshot = self.compute_client.snapshots.begin_create_or_update(
             self.resource_group,
             snapshot_name,
-            {
-                'location': volume.location,
-                'creation_data': {
-                    'create_option': 'Copy',
-                    'source_uri': volume.id
-                },
-                'tags': tags
-            }
+            Snapshot(
+                location=volume.location,
+                creation_data=CreationData(
+                    create_option='Copy',
+                    source_uri=volume.id
+                ),
+                tags=tags
+            )
         ).result()
 
         self.update_snapshot_tags(snapshot.id, tags)
@@ -554,7 +571,7 @@ class AzureClient(object):
         return self.compute_client.snapshots.begin_update(
             self.resource_group,
             snapshot_name,
-            {'tags': tags}  # type: ignore
+            SnapshotUpdate(tags=tags)
         ).wait()
 
     def is_gallery_image(self, image_id):
@@ -565,7 +582,8 @@ class AzureClient(object):
 
     def create_image(self, name, params):
         return self.compute_client.images. \
-            begin_create_or_update(self.resource_group, name, params).result()
+            begin_create_or_update(
+                self.resource_group, name, Image(**params)).result()
 
     def delete_image(self, image_id):
         url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
@@ -601,12 +619,9 @@ class AzureClient(object):
             return True
         else:
             name = url_params.get(IMAGE_NAME, "")
-            return self.compute_client.images. \
-                begin_create_or_update(self.resource_group, name,
-                                       {
-                                           'tags': tags,
-                                           'location': self.region_name
-                                       }).result()
+            return self.compute_client.images.begin_update(
+                self.resource_group, name,
+                ImageUpdate(tags=tags)).result()
 
     def list_vm_types(self):
         return self.compute_client.virtual_machine_sizes. \
@@ -625,7 +640,9 @@ class AzureClient(object):
 
     def create_network(self, name, params):
         return self.network_management_client.virtual_networks. \
-            begin_create_or_update(self.networking_resource_group, name, parameters=params).result()
+            begin_create_or_update(
+                self.networking_resource_group, name,
+                parameters=VirtualNetwork(**params)).result()
 
     def delete_network(self, network_id):
         url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
@@ -637,7 +654,8 @@ class AzureClient(object):
         url_params = azure_helpers.parse_url(NETWORK_RESOURCE_ID, network_id)
         network_name = url_params.get(NETWORK_NAME, "")
         return self.network_management_client.virtual_networks. \
-            begin_create_or_update(self.networking_resource_group, network_name, tags).result()
+            update_tags(self.networking_resource_group, network_name,
+                        TagsObject(tags=tags))
 
     def get_network_id_for_subnet(self, subnet_id):
         url_params = azure_helpers.parse_url(SUBNET_RESOURCE_ID, subnet_id)
@@ -668,7 +686,7 @@ class AzureClient(object):
                 self.networking_resource_group,
                 network_name,
                 subnet_name,
-                params
+                Subnet(**params)
             )
         subnet_info = result_create.result()
 
@@ -705,8 +723,10 @@ class AzureClient(object):
 
     def create_floating_ip(self, public_ip_name, public_ip_parameters):
         return self.network_management_client.public_ip_addresses. \
-            begin_create_or_update(self.networking_resource_group,
-                                   public_ip_name, public_ip_parameters).result()
+            begin_create_or_update(
+                self.networking_resource_group,
+                public_ip_name,
+                PublicIPAddress(**public_ip_parameters)).result()
 
     def get_floating_ip(self, public_ip_id):
         url_params = azure_helpers.parse_url(PUBLIC_IP_RESOURCE_ID,
@@ -728,7 +748,8 @@ class AzureClient(object):
                                              fip_id)
         fip_name = url_params.get(PUBLIC_IP_NAME, "")
         self.network_management_client.public_ip_addresses. \
-            begin_create_or_update(self.networking_resource_group, fip_name, tags).result()
+            update_tags(self.networking_resource_group, fip_name,
+                        TagsObject(tags=tags))
 
     def list_floating_ips(self):
         return self.network_management_client.public_ip_addresses.list(
@@ -772,14 +793,18 @@ class AzureClient(object):
 
     def create_vm(self, vm_name, params):
         return self.compute_client.virtual_machines. \
-            begin_create_or_update(self.resource_group, vm_name, params).result()
+            begin_create_or_update(
+                self.resource_group, vm_name,
+                VirtualMachine(**params)).result()
 
     def update_vm(self, vm_id, params):
         url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                              vm_id)
         vm_name = url_params.get(VM_NAME, "")
         return self.compute_client.virtual_machines. \
-            begin_create_or_update(self.resource_group, vm_name, params).wait()
+            begin_create_or_update(
+                self.resource_group, vm_name,
+                VirtualMachine(**params)).wait()
 
     def deallocate_vm(self, vm_id):
         url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
@@ -806,8 +831,9 @@ class AzureClient(object):
         url_params = azure_helpers.parse_url(VM_RESOURCE_ID,
                                              vm_id)
         vm_name = url_params.get(VM_NAME, "")
-        self.compute_client.virtual_machines. \
-            begin_create_or_update(self.resource_group, vm_name, tags).result()
+        self.compute_client.virtual_machines.begin_update(
+            self.resource_group, vm_name,
+            VirtualMachineUpdate(tags=tags)).result()
 
     def delete_nic(self, nic_id):
         nic_params = azure_helpers.\
@@ -827,11 +853,15 @@ class AzureClient(object):
         nic_params = azure_helpers.\
             parse_url(NETWORK_INTERFACE_RESOURCE_ID, nic_id)
         nic_name = nic_params.get(NETWORK_INTERFACE_NAME, "")
+        # update_nic is called with the existing NIC model (from get_nic());
+        # create_nic is called with a raw dict from services.py. Accept both.
+        nic = (params if isinstance(params, NetworkInterface)
+               else NetworkInterface(**params))
         async_nic_creation = self.network_management_client. \
             network_interfaces.begin_create_or_update(
                 self.resource_group,
                 nic_name,
-                params
+                nic
             )
         nic_info = async_nic_creation.result()
         return nic_info
@@ -841,31 +871,33 @@ class AzureClient(object):
             network_interfaces.begin_create_or_update(
                 self.resource_group,
                 nic_name,
-                params
+                NetworkInterface(**params)
             ).result()
 
     def create_public_key(self, entity):
-        return self.table_service. \
-            insert_or_replace_entity(self.public_key_storage_table_name,
-                                     entity)
+        return self.table_service.upsert_entity(entity)
 
     def get_public_key(self, name):
-        entities = self.table_service. \
-            query_entities(self.public_key_storage_table_name,
-                           "Name eq '{0}'".format(name), num_results=1)
-
-        return entities.items[0] if len(entities.items) > 0 else None
+        entities = list(self.table_service.query_entities(
+            query_filter="Name eq '{0}'".format(name),
+            results_per_page=1))
+        return entities[0] if entities else None
 
     def delete_public_key(self, entity):
-        self.table_service.delete_entity(self.public_key_storage_table_name,
-                                         entity.PartitionKey, entity.RowKey)
+        self.table_service.delete_entity(
+            partition_key=entity['PartitionKey'],
+            row_key=entity['RowKey'])
 
     def list_public_keys(self, partition_key, limit=None, marker=None):
-        entities = self.table_service. \
-            query_entities(self.public_key_storage_table_name,
-                           "PartitionKey eq '{0}'".format(partition_key),
-                           marker=marker, num_results=limit)
-        return (entities.items, entities.next_marker)
+        pager = self.table_service.query_entities(
+            query_filter="PartitionKey eq '{0}'".format(partition_key),
+            results_per_page=limit).by_page(continuation_token=marker)
+        try:
+            page = next(pager)
+        except StopIteration:
+            return ([], None)
+        items = list(page)
+        return (items, pager.continuation_token)
 
     def delete_route_table(self, route_table_name):
         self.network_management_client. \
@@ -884,9 +916,7 @@ class AzureClient(object):
             subnet_name
         )
         if subnet_info:
-            subnet_info.route_table = {
-                'id': route_table_id
-            }
+            subnet_info.route_table = SubResource(id=route_table_id)
 
             result_create = self.network_management_client. \
                 subnets.begin_create_or_update(
@@ -938,8 +968,9 @@ class AzureClient(object):
         return self.network_management_client. \
             route_tables.begin_create_or_update(
              self.resource_group,
-             route_table_name, params).result()
+             route_table_name, RouteTable(**params)).result()
 
     def update_route_table_tags(self, route_table_name, tags):
-        self.network_management_client.route_tables. \
-            begin_create_or_update(self.resource_group, route_table_name, tags).result()
+        self.network_management_client.route_tables.update_tags(
+            self.resource_group, route_table_name,
+            TagsObject(tags=tags))

+ 18 - 0
cloudbridge/providers/azure/helpers.py

@@ -1,6 +1,24 @@
+import re
+
 from cloudbridge.interfaces.exceptions import InvalidValueException
 
 
+_RG_NAME_RE = re.compile(r'(/resourceGroups/)([^/]+)', re.IGNORECASE)
+
+
+def normalize_rg_case(azure_id):
+    # Microsoft.Compute/images list_by_resource_group returns the RG segment
+    # in a case that can differ from what create/get echo back (we've seen
+    # uppercase from list, lowercase from create/get for the same RG).
+    # Lowercase just the RG-name segment so that ids from any code path
+    # compare equal. Provider/type segments and the resource name itself are
+    # preserved.
+    if not azure_id:
+        return azure_id
+    return _RG_NAME_RE.sub(
+        lambda m: m.group(1) + m.group(2).lower(), azure_id)
+
+
 # def filter_by_tag(list_items, filters):
 #     """
 #     This function filter items on the tags

+ 30 - 35
cloudbridge/providers/azure/provider.py

@@ -1,9 +1,10 @@
 import logging
 import uuid
 
-from deprecation import deprecated
+from azure.core.exceptions import HttpResponseError
+from azure.core.exceptions import ResourceNotFoundError
 
-from msrestazure.azure_exceptions import CloudError
+from deprecation import deprecated
 
 import tenacity
 
@@ -143,7 +144,7 @@ class AzureCloudProvider(BaseCloudProvider):
         return self._azure_client
 
     @tenacity.retry(stop=tenacity.stop_after_attempt(2),
-                    retry=tenacity.retry_if_exception_type(CloudError),
+                    retry=tenacity.retry_if_exception_type(HttpResponseError),
                     reraise=True)
     def _initialize(self):
         """
@@ -154,33 +155,30 @@ class AzureCloudProvider(BaseCloudProvider):
         try:
             self._azure_client.get_resource_group(self.resource_group)
 
-        except CloudError as cloud_error:
-            if cloud_error.error.error == "ResourceGroupNotFound":
-                resource_group_params = {'location': self.region_name}
-                try:
-                    self._azure_client.\
-                        create_resource_group(self.resource_group,
-                                              resource_group_params)
-                except CloudError as cloud_error2:  # pragma: no cover
-                    if cloud_error2.error.error == "AuthorizationFailed":
-                        mess = 'The following error was returned by Azure:\n' \
-                               '%s\n\nThis is likely because the Role' \
-                               'associated with the given credentials does ' \
-                               'not allow for Resource Group creation.\nA ' \
-                               'Resource Group is necessary to manage ' \
-                               'resources in Azure. You must either ' \
-                               'provide an existing Resource Group as part ' \
-                               'of the configuration, or elevate the ' \
-                               'associated role.\nFor more information on ' \
-                               'roles, see: https://docs.microsoft.com/' \
-                               'en-us/azure/role-based-access-control/' \
-                               'overview\n' % cloud_error2
-                        raise ProviderConnectionException(mess)
-                    else:
-                        raise cloud_error2
-
-            else:
-                raise cloud_error
+        except ResourceNotFoundError:
+            resource_group_params = {'location': self.region_name}
+            try:
+                self._azure_client.\
+                    create_resource_group(self.resource_group,
+                                          resource_group_params)
+            except HttpResponseError as cloud_error2:  # pragma: no cover
+                if getattr(cloud_error2, 'error', None) and \
+                        cloud_error2.error.code == "AuthorizationFailed":
+                    mess = 'The following error was returned by Azure:\n' \
+                           '%s\n\nThis is likely because the Role' \
+                           'associated with the given credentials does ' \
+                           'not allow for Resource Group creation.\nA ' \
+                           'Resource Group is necessary to manage ' \
+                           'resources in Azure. You must either ' \
+                           'provide an existing Resource Group as part ' \
+                           'of the configuration, or elevate the ' \
+                           'associated role.\nFor more information on ' \
+                           'roles, see: https://docs.microsoft.com/' \
+                           'en-us/azure/role-based-access-control/' \
+                           'overview\n' % cloud_error2
+                    raise ProviderConnectionException(mess)
+                else:
+                    raise cloud_error2
 
         """
         Verify that resource group used for network exists,
@@ -188,8 +186,5 @@ class AzureCloudProvider(BaseCloudProvider):
         """
         try:
             self._azure_client.get_resource_group(self.networking_resource_group)
-        except CloudError as cloud_error:
-            if cloud_error.error.error == "ResourceGroupNotFound":
-                self.networking_resource_group = self.resource_group
-            else:
-                raise cloud_error
+        except ResourceNotFoundError:
+            self.networking_resource_group = self.resource_group

+ 30 - 30
cloudbridge/providers/azure/resources.py

@@ -5,7 +5,7 @@ import collections
 import io
 import logging
 
-import pysftp
+import paramiko
 from cloudbridge.base.resources import (BaseAttachmentInfo, BaseBucket,
                                         BaseBucketObject, BaseFloatingIP,
                                         BaseInstance, BaseInternetGateway,
@@ -23,6 +23,8 @@ from cloudbridge.interfaces.resources import (Instance, MachineImageState,
 
 from azure.common import AzureException
 from azure.core.exceptions import ResourceNotFoundError
+from azure.mgmt.compute.models import (DataDisk, ManagedDiskParameters,
+                                       SubResource as ComputeSubResource)
 from azure.mgmt.devtestlabs.models import GalleryImageReference
 from azure.mgmt.network.models import NetworkSecurityGroup
 
@@ -427,14 +429,12 @@ class AzureVolume(BaseVolume):
             Instance) else instance
         vm = self._provider.azure_client.get_vm(instance_id)
 
-        vm.storage_profile.data_disks.append({
-            'lun': len(vm.storage_profile.data_disks),
-            'name': self._volume.name,
-            'create_option': 'attach',
-            'managed_disk': {
-                'id': self.resource_id
-            }
-        })
+        vm.storage_profile.data_disks.append(DataDisk(
+            lun=len(vm.storage_profile.data_disks),
+            name=self._volume.name,
+            create_option='attach',
+            managed_disk=ManagedDiskParameters(id=self.resource_id)
+        ))
         self._provider.azure_client.update_vm(instance_id, vm)
 
     def detach(self, force=False):
@@ -609,7 +609,7 @@ class AzureMachineImage(BaseMachineImage):
         if self.is_gallery_image:
             return azure_helpers.generate_urn(self._image)
         else:
-            return self._image.id
+            return azure_helpers.normalize_rg_case(self._image.id)
 
     @property
     def name(self):
@@ -766,7 +766,7 @@ class AzureNetwork(BaseNetwork):
         self.assert_valid_resource_label(value)
         self._network.tags.update(Label=value or "")
         self._provider.azure_client. \
-            update_network_tags(self.id, self._network)
+            update_network_tags(self.id, self._network.tags)
 
     @property
     def external(self):
@@ -964,7 +964,7 @@ class AzureSubnet(BaseSubnet):
         kwargs = {self.tag_name: value or ""}
         az_network.tags.update(**kwargs)
         self._provider.azure_client.update_network_tags(
-            az_network.id, az_network)
+            az_network.id, az_network.tags)
 
     @property
     def tag_name(self):
@@ -1090,7 +1090,7 @@ class AzureInstance(BaseInstance):
         self.assert_valid_resource_label(value)
         self._vm.tags.update(Label=value or "")
         self._provider.azure_client. \
-            update_vm_tags(self.id, self._vm)
+            update_vm_tags(self.id, self._vm.tags)
 
     @property
     def public_ips(self):
@@ -1205,7 +1205,7 @@ class AzureInstance(BaseInstance):
         chines/linux/capture-image. In azure, we need to deprovision the VM
         before capturing.
         To deprovision, login to the VM and execute the `waagent deprovision`
-        command. To do this programmatically, use pysftp to ssh into the VM
+        command. To do this programmatically, use paramiko to ssh into the VM
         and executing deprovision command. To SSH into the VM programmatically
         however, we need to pass private key file path, so we have modified the
         CloudBridge interface to pass the private key file path
@@ -1225,9 +1225,7 @@ class AzureInstance(BaseInstance):
 
         create_params = {
             'location': self._provider.region_name,
-            'source_virtual_machine': {
-                'id': self.resource_id
-            },
+            'source_virtual_machine': ComputeSubResource(id=self.resource_id),
             'tags': {'Label': label}
         }
 
@@ -1236,16 +1234,18 @@ class AzureInstance(BaseInstance):
         return AzureMachineImage(self._provider, image)
 
     def _deprovision(self, private_key_path):
-        cnopts = pysftp.CnOpts()
-        cnopts.hostkeys = None
-        if private_key_path:
-            with pysftp.\
-                    Connection(self.public_ips[0],
-                               username=self._provider.vm_default_user_name,
-                               cnopts=cnopts,
-                               private_key=private_key_path) as sftp:
-                sftp.execute('sudo waagent -deprovision -force')
-                sftp.close()
+        if not private_key_path:
+            return
+        client = paramiko.SSHClient()
+        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+        try:
+            client.connect(
+                hostname=self.public_ips[0],
+                username=self._provider.vm_default_user_name,
+                key_filename=private_key_path)
+            client.exec_command('sudo waagent -deprovision -force')
+        finally:
+            client.close()
 
     def add_floating_ip(self, floating_ip):
         """
@@ -1432,11 +1432,11 @@ class AzureKeyPair(BaseKeyPair):
 
     @property
     def id(self):
-        return self._key_pair.Name
+        return self._key_pair['Name']
 
     @property
     def name(self):
-        return self._key_pair.Name
+        return self._key_pair['Name']
 
 
 class AzureRouter(BaseRouter):
@@ -1477,7 +1477,7 @@ class AzureRouter(BaseRouter):
         self._route_table.tags.update(Label=value or "")
         self._provider.azure_client. \
             update_route_table_tags(self._route_table.name,
-                                    self._route_table)
+                                    self._route_table.tags)
 
     def refresh(self):
         self._route_table = self._provider.azure_client. \

+ 111 - 100
cloudbridge/providers/azure/services.py

@@ -27,7 +27,18 @@ from cloudbridge.interfaces.resources import (MachineImage, Network, Snapshot,
                                               VMType, Volume)
 from azure.core.exceptions import ResourceNotFoundError
 
-from azure.mgmt.compute.models import DiskCreateOption
+from azure.mgmt.compute.models import (CreationData, DataDisk,
+                                       DiskCreateOption, HardwareProfile,
+                                       ImageReference, LinuxConfiguration,
+                                       ManagedDiskParameters,
+                                       NetworkInterfaceReference,
+                                       NetworkProfile, OSDisk, OSProfile,
+                                       SshConfiguration, SshPublicKey,
+                                       StorageProfile)
+from azure.mgmt.network.models import (AddressSpace,
+                                       NetworkInterfaceIPConfiguration,
+                                       PublicIPAddressSku,
+                                       PublicIPAddressSkuName, SubResource)
 
 from .resources import (AzureBucket, AzureBucketObject, AzureFloatingIP,
                         AzureInstance, AzureInternetGateway, AzureKeyPair,
@@ -387,10 +398,10 @@ class AzureVolumeService(BaseVolumeService):
         if snapshot:
             params = {
                 'location': zone_name,
-                'creation_data': {
-                    'create_option': DiskCreateOption.copy,
-                    'source_uri': snapshot.resource_id
-                },
+                'creation_data': CreationData(
+                    create_option=DiskCreateOption.copy,
+                    source_uri=snapshot.resource_id,
+                ),
                 'tags': tags
             }
 
@@ -401,9 +412,9 @@ class AzureVolumeService(BaseVolumeService):
             params = {
                 'location': zone_name,
                 'disk_size_gb': size,
-                'creation_data': {
-                    'create_option': DiskCreateOption.empty
-                },
+                'creation_data': CreationData(
+                    create_option=DiskCreateOption.empty,
+                ),
                 'tags': tags
             }
 
@@ -689,56 +700,52 @@ class AzureInstanceService(BaseInstanceService):
         if image.is_gallery_image:
             # pylint:disable=protected-access
             reference = image._image.as_dict()
-            image_ref = {
-                'publisher': reference['publisher'],
-                'offer': reference['offer'],
-                'sku': reference['sku'],
-                'version': reference['version']
-            }
+            image_ref = ImageReference(
+                publisher=reference['publisher'],
+                offer=reference['offer'],
+                sku=reference['sku'],
+                version=reference['version'],
+            )
         else:
-            image_ref = {
-                'id': image.resource_id
-            }
+            image_ref = ImageReference(id=image.resource_id)
 
-        storage_profile = {
-            'image_reference': image_ref,
-            "os_disk": {
-                "name": instance_name + '_os_disk',
-                "create_option": DiskCreateOption.from_image
-            },
-        }
+        os_disk = OSDisk(
+            name=instance_name + '_os_disk',
+            create_option=DiskCreateOption.from_image,
+        )
 
+        data_disks = None
         if launch_config:
             data_disks, root_disk_size = self._process_block_device_mappings(
                 launch_config)
-            if data_disks:
-                storage_profile['data_disks'] = data_disks
             if root_disk_size:
-                storage_profile['os_disk']['disk_size_gb'] = root_disk_size
+                os_disk.disk_size_gb = root_disk_size
 
-        return storage_profile
+        return StorageProfile(
+            image_reference=image_ref,
+            os_disk=os_disk,
+            data_disks=data_disks or None,
+        )
 
     def _process_block_device_mappings(self, launch_config):
         """
         Processes block device mapping information
-        and returns a Data disk dictionary list. If new volumes
+        and returns a DataDisk model list. If new volumes
         are requested (source is None and destination is VOLUME), they will be
         created and the relevant volume ids included in the mapping.
         """
         data_disks = []
         root_disk_size = None
 
-        def append_disk(disk_def, device_no, delete_on_terminate):
-            # In azure, there is no option to specify terminate disks
-            # (similar to AWS delete_on_terminate) on VM delete.
-            # This method uses the azure tags functionality to store
-            # the  delete_on_terminate option when the virtual machine
-            # is deleted, we parse the tags and delete accordingly
-            disk_def['lun'] = device_no
-            disk_def['tags'] = {
-                'delete_on_terminate': delete_on_terminate
-            }
-            data_disks.append(disk_def)
+        def append_disk(disk_kwargs, device_no, delete_on_terminate,
+                        managed_disk_id=None):
+            # Azure has no direct equivalent of AWS' delete_on_terminate, so
+            # the cleanup tag is recorded on the parent VM later; we just
+            # carry the flag (and the disk id) alongside the DataDisk model.
+            disk = DataDisk(lun=device_no, **disk_kwargs)
+            disk._cb_delete_on_terminate = delete_on_terminate
+            disk._cb_managed_disk_id = managed_disk_id
+            data_disks.append(disk)
 
         for device_no, device in enumerate(launch_config.block_devices):
             if device.is_volume:
@@ -749,37 +756,42 @@ class AzureInstanceService(BaseInstanceService):
                     # we are ignoring the root disk, if specified
                     if isinstance(device.source, Snapshot):
                         snapshot_vol = device.source.create_volume()
-                        disk_def = {
+                        disk_kwargs = {
                             # pylint:disable=protected-access
                             'name': snapshot_vol._volume.name,
                             'create_option': DiskCreateOption.attach,
-                            'managed_disk': {
-                                'id': snapshot_vol.id
-                            }
+                            'managed_disk': ManagedDiskParameters(
+                                id=snapshot_vol.id),
                         }
+                        append_disk(disk_kwargs, device_no,
+                                    device.delete_on_terminate,
+                                    managed_disk_id=snapshot_vol.id)
+                        continue
                     elif isinstance(device.source, Volume):
-                        disk_def = {
+                        disk_kwargs = {
                             # pylint:disable=protected-access
                             'name': device.source._volume.name,
                             'create_option': DiskCreateOption.attach,
-                            'managed_disk': {
-                                'id': device.source.id
-                            }
+                            'managed_disk': ManagedDiskParameters(
+                                id=device.source.id),
                         }
+                        append_disk(disk_kwargs, device_no,
+                                    device.delete_on_terminate,
+                                    managed_disk_id=device.source.id)
+                        continue
                     elif isinstance(device.source, MachineImage):
-                        disk_def = {
+                        disk_kwargs = {
                             # pylint:disable=protected-access
                             'name': device.source._volume.name,
                             'create_option': DiskCreateOption.from_image,
-                            'source_resource_id': device.source.id
+                            'source_resource_id': device.source.id,
                         }
                     else:
-                        disk_def = {
-                            # pylint:disable=protected-access
+                        disk_kwargs = {
                             'create_option': DiskCreateOption.empty,
-                            'disk_size_gb': device.size
+                            'disk_size_gb': device.size,
                         }
-                    append_disk(disk_def, device_no,
+                    append_disk(disk_kwargs, device_no,
                                 device.delete_on_terminate)
             else:  # device is ephemeral
                 # in azure we cannot add the ephemeral disks explicitly
@@ -825,19 +837,16 @@ class AzureInstanceService(BaseInstanceService):
 
         nic_params = {
             'location': self.provider.region_name,
-            'ip_configurations': [{
-                'name': instance_name + '_ip_config',
-                'private_ip_allocation_method': 'Dynamic',
-                'subnet': {
-                    'id': subnet_id
-                }
-            }]
+            'ip_configurations': [NetworkInterfaceIPConfiguration(
+                name=instance_name + '_ip_config',
+                private_ip_allocation_method='Dynamic',
+                subnet=SubResource(id=subnet_id),
+            )],
         }
 
         if vm_firewall_id:
-            nic_params['network_security_group'] = {
-                'id': vm_firewall_id
-            }
+            nic_params['network_security_group'] = SubResource(
+                id=vm_firewall_id)
         nic_info = self.provider.azure_client.create_nic(
             instance_name + '_nic',
             nic_params
@@ -865,41 +874,41 @@ class AzureInstanceService(BaseInstanceService):
                 name=temp_kp_name)
             temp_key_pair = key_pair
 
+        os_profile = OSProfile(
+            admin_username=self.provider.vm_default_user_name,
+            computer_name=instance_name,
+            linux_configuration=LinuxConfiguration(
+                disable_password_authentication=True,
+                ssh=SshConfiguration(public_keys=[SshPublicKey(
+                    path="/home/{}/.ssh/authorized_keys".format(
+                        self.provider.vm_default_user_name),
+                    key_data=key_pair._key_pair['Key'],
+                )]),
+            ),
+        )
+
+        tags = {'Label': label}
+        # Surface each data disk's delete-on-terminate flag onto the parent
+        # VM tags so the VM-delete path can later clean them up.
+        for disk in (storage_profile.data_disks or []):
+            tags['delete_on_terminate'] = getattr(
+                disk, '_cb_delete_on_terminate', False)
+
         params = {
             'location': zone_id,
-            'os_profile': {
-                'admin_username': self.provider.vm_default_user_name,
-                'computer_name': instance_name,
-                'linux_configuration': {
-                    "disable_password_authentication": True,
-                    "ssh": {
-                        "public_keys": [{
-                            "path":
-                                "/home/{}/.ssh/authorized_keys".format(
-                                        self.provider.vm_default_user_name),
-                                "key_data": key_pair._key_pair.Key
-                        }]
-                    }
-                }
-            },
-            'hardware_profile': {
-                'vm_size': instance_size
-            },
-            'network_profile': {
-                'network_interfaces': [{
-                    'id': nic_info.id
-                }]
-            },
+            'os_profile': os_profile,
+            'hardware_profile': HardwareProfile(vm_size=instance_size),
+            'network_profile': NetworkProfile(
+                network_interfaces=[NetworkInterfaceReference(
+                    id=nic_info.id)],
+            ),
             'storage_profile': storage_profile,
-            'tags': {'Label': label}
+            'tags': tags,
         }
 
-        for disk_def in storage_profile.get('data_disks', []):
-            params['tags'] = dict(disk_def.get('tags', {}), **params['tags'])
-
         if user_data:
             custom_data = base64.b64encode(bytes(ud, 'utf-8'))
-            params['os_profile']['custom_data'] = str(custom_data, 'utf-8')
+            params['os_profile'].custom_data = str(custom_data, 'utf-8')
 
         if not temp_key_pair:
             params['tags'].update(Key_Pair=key_pair.id)
@@ -909,9 +918,9 @@ class AzureInstanceService(BaseInstanceService):
         except Exception as e:
             # If VM creation fails, attempt to clean up intermediary resources
             self.provider.azure_client.delete_nic(nic_info.id)
-            for disk_def in storage_profile.get('data_disks', []):
-                if disk_def.get('tags', {}).get('delete_on_terminate'):
-                    disk_id = disk_def.get('managed_disk', {}).get('id')
+            for disk in (storage_profile.data_disks or []):
+                if getattr(disk, '_cb_delete_on_terminate', False):
+                    disk_id = getattr(disk, '_cb_managed_disk_id', None)
                     if disk_id:
                         vol = self.provider.storage.volumes.get(disk_id)
                         vol.delete()
@@ -1108,9 +1117,8 @@ class AzureNetworkService(BaseNetworkService):
         AzureNetwork.assert_valid_resource_label(label)
         params = {
             'location': self.provider.azure_client.region_name,
-            'address_space': {
-                'address_prefixes': [cidr_block]
-            },
+            'address_space': AddressSpace(
+                address_prefixes=[cidr_block]),
             'tags': {'Label': label}
         }
 
@@ -1225,7 +1233,7 @@ class AzureSubnetService(BaseSubnetService):
             az_network = self.provider.azure_client.get_network(net_id)
             az_network.tags.pop(sn.tag_name)
             self.provider.azure_client.update_network_tags(
-                az_network.id, az_network)
+                az_network.id, az_network.tags)
 
 
 class AzureRouterService(BaseRouterService):
@@ -1348,9 +1356,12 @@ class AzureFloatingIPService(BaseFloatingIPService):
     @dispatch(event="provider.networking.floating_ips.create",
               priority=BaseFloatingIPService.STANDARD_EVENT_PRIORITY)
     def create(self, gateway):
+        # Basic-SKU public IPs are retired in most Azure subscriptions
+        # (quota=0). Standard SKU requires Static allocation.
         public_ip_parameters = {
             'location': self.provider.azure_client.region_name,
-            'public_ip_allocation_method': 'Static'
+            'public_ip_allocation_method': 'Static',
+            'sku': PublicIPAddressSku(name=PublicIPAddressSkuName.STANDARD),
         }
 
         public_ip_name = AzureFloatingIP._generate_name_from_label(

+ 11 - 2
cloudbridge/providers/gcp/provider.py

@@ -126,6 +126,12 @@ class GCPResources(object):
             method = methods['get']
             parameters = method['parameterOrder']
 
+            # Skip resources using gRPC reserved-character placeholders
+            # ({+param}); string.Template can't handle them and cloudbridge
+            # does not address these resources.
+            if '{+' in method['path']:
+                continue
+
             # We would like to change a path like
             # {project}/regions/{region}/addresses/{address} to a pattern like
             # (PROJECT REGEX)/regions/(REGION REGEX)/addresses/(ADDRESS REGEX).
@@ -409,8 +415,11 @@ class GCPCloudProvider(BaseCloudProvider):
         try:
             return resource_url.get_resource()
         except googleapiclient.errors.HttpError as http_error:
-            if http_error.resp.status in [404]:
-                # 404 = not found
+            # 404: resource does not exist.
+            # 400: identifier failed server-side validation (e.g. a name
+            # whose format cannot match any real resource). Either way the
+            # answer to "fetch this resource" is no such resource.
+            if http_error.resp.status in (400, 404):
                 return None
             else:
                 raise

+ 4 - 12
cloudbridge/providers/mock/provider.py

@@ -6,9 +6,7 @@
     boto being hijacked, which will cause AWS to malfunction.
     See notes below.
 """
-from moto import mock_ec2
-from moto import mock_route53
-from moto import mock_s3
+from moto import mock_aws
 
 from ..aws import AWSCloudProvider
 from ...interfaces.provider import TestMockHelperMixin
@@ -32,17 +30,11 @@ class MockAWSCloudProvider(AWSCloudProvider, TestMockHelperMixin):
         """
         Let Moto take over all socket communications
         """
-        self.ec2mock = mock_ec2()
-        self.ec2mock.start()
-        self.s3mock = mock_s3()
-        self.s3mock.start()
-        self.route53mock = mock_route53()
-        self.route53mock.start()
+        self.mock_aws = mock_aws()
+        self.mock_aws.start()
 
     def tearDownMock(self):
         """
         Stop Moto intercepting all socket communications
         """
-        self.s3mock.stop()
-        self.ec2mock.stop()
-        self.route53mock.stop()
+        self.mock_aws.stop()

+ 69 - 17
cloudbridge/providers/openstack/resources.py

@@ -215,6 +215,7 @@ class OpenStackVMType(BaseVMType):
     def __init__(self, provider, os_flavor):
         super(OpenStackVMType, self).__init__(provider)
         self._os_flavor = os_flavor
+        self._extra_data = None
 
     @property
     def id(self):
@@ -254,11 +255,17 @@ class OpenStackVMType(BaseVMType):
 
     @property
     def extra_data(self):
-        extras = self._os_flavor.get_keys()
-        extras['rxtx_factor'] = self._os_flavor.rxtx_factor
-        extras['swap'] = self._os_flavor.swap
-        extras['is_public'] = self._os_flavor.is_public
-        return extras
+        # get_keys() hits Nova's /flavors/<id>/os-extra_specs endpoint.
+        # Cache the result so repeat property accesses (and family, which
+        # delegates here) don't fan out to N concurrent API calls under
+        # pytest-xdist load.
+        if self._extra_data is None:
+            extras = self._os_flavor.get_keys()
+            extras['rxtx_factor'] = self._os_flavor.rxtx_factor
+            extras['swap'] = self._os_flavor.swap
+            extras['is_public'] = self._os_flavor.is_public
+            self._extra_data = extras
+        return self._extra_data
 
 
 class OpenStackInstance(BaseInstance):
@@ -323,6 +330,36 @@ class OpenStackInstance(BaseInstance):
         self._os_instance.name = value
         self._os_instance.update(name=value or "cb-inst")
 
+    def _all_addresses(self):
+        """All IP addresses associated with this instance.
+
+        Combines the addresses Nova reports (via server.addresses /
+        ``_os_instance.networks``, populated from Nova's info_cache) with
+        any floating IPs Neutron currently has bound to the instance's
+        ports. Nova's info_cache is refreshed by a periodic task on a
+        ~60s cadence and is not re-synced on a plain server-show, so a
+        FIP attached via the Neutron API (as add_floating_ip does)
+        otherwise wouldn't show up until the next sync.
+        """
+        addrs = set()
+        for _, network_addrs in self._os_instance.networks.items():
+            for address in network_addrs:
+                addrs.add(address)
+        # Query Neutron for any floating IPs bound to this instance's
+        # ports — these may not yet be reflected in Nova's cached view.
+        try:
+            for port in self._provider.os_conn.network.ports(
+                    device_id=self.id):
+                for fip in self._provider.os_conn.network.ips(
+                        port_id=port.id):
+                    if fip.floating_ip_address:
+                        addrs.add(fip.floating_ip_address)
+        except Exception as e:
+            log.debug(
+                "Could not enumerate floating IPs for instance %s: %s",
+                self.id, e)
+        return addrs
+
     @property
     def public_ips(self):
         """
@@ -332,20 +369,16 @@ class OpenStackInstance(BaseInstance):
         # public or private, since the returned IPs are grouped by an arbitrary
         # network label. Therefore, it's necessary to parse the address and
         # determine whether it's public or private
-        return [address
-                for _, addresses in self._os_instance.networks.items()
-                for address in addresses
-                if not ipaddress.ip_address(address).is_private]
+        return [a for a in self._all_addresses()
+                if not ipaddress.ip_address(a).is_private]
 
     @property
     def private_ips(self):
         """
         Get all the private IP addresses for this instance.
         """
-        return [address
-                for _, addresses in self._os_instance.networks.items()
-                for address in addresses
-                if ipaddress.ip_address(address).is_private]
+        return [a for a in self._all_addresses()
+                if ipaddress.ip_address(a).is_private]
 
     @property
     def vm_type_id(self):
@@ -460,25 +493,44 @@ class OpenStackInstance(BaseInstance):
         """Get a floating IP object based on the supplied ID."""
         return self._provider.networking._floating_ips.get(None, floating_ip)
 
+    def _primary_port(self):
+        """Return the first Neutron port on this instance, or None."""
+        # pylint:disable=protected-access
+        return next(
+            iter(self._provider.os_conn.network.ports(device_id=self.id)),
+            None)
+
     def add_floating_ip(self, floating_ip):
         """
         Add a floating IP address to this instance.
+
+        Nova's add_floating_ip server action was removed in microversion
+        2.44 (Pike). The supported path is to set the FIP's port_id to
+        one of the server's Neutron ports.
         """
         log.debug("Adding floating IP adress: %s", floating_ip)
         fip = (floating_ip if isinstance(floating_ip, OpenStackFloatingIP)
                else self._get_fip(floating_ip))
-        self._provider.os_conn.compute.add_floating_ip_to_server(
-            self.id, fip.public_ip)
+        port = self._primary_port()
+        if not port:
+            raise Exception(
+                "Cannot add floating IP: instance {0} has no network port"
+                .format(self.id))
+        # pylint:disable=protected-access
+        self._provider.os_conn.network.update_ip(fip._ip, port_id=port.id)
 
     def remove_floating_ip(self, floating_ip):
         """
         Remove a floating IP address from this instance.
+
+        Same rationale as add_floating_ip; the Nova action endpoint is
+        gone, so detach by clearing port_id on the Neutron FIP.
         """
         log.debug("Removing floating IP adress: %s", floating_ip)
         fip = (floating_ip if isinstance(floating_ip, OpenStackFloatingIP)
                else self._get_fip(floating_ip))
-        self._provider.os_conn.compute.remove_floating_ip_from_server(
-            self.id, fip.public_ip)
+        # pylint:disable=protected-access
+        self._provider.os_conn.network.update_ip(fip._ip, port_id=None)
 
     def add_vm_firewall(self, firewall):
         """

+ 15 - 5
cloudbridge/providers/openstack/services.py

@@ -419,12 +419,22 @@ class OpenStackVolumeService(BaseVolumeService):
     @dispatch(event="provider.storage.volumes.list",
               priority=BaseVolumeService.STANDARD_EVENT_PRIORITY)
     def list(self, limit=None, marker=None):
-        cb_vols = [
-            OpenStackVolume(self.provider, vol)
-            for vol in self.provider.os_conn.block_storage.volumes(
+        try:
+            os_vols = list(self.provider.os_conn.block_storage.volumes(
                 limit=oshelpers.os_result_limit(self.provider, limit),
-                marker=marker)
-            if vol.availability_zone == self.provider.service_zone_name(self)]
+                marker=marker))
+        except NotFoundException:
+            # Cinder returns 404 when the supplied pagination marker
+            # refers to a volume that has since been deleted (e.g.,
+            # between the time a caller saw the volume in page N and
+            # asked for page N+1, or when a concurrent test deletes it).
+            # Fall back to a fresh listing.
+            if marker is None:
+                raise
+            os_vols = list(self.provider.os_conn.block_storage.volumes(
+                limit=oshelpers.os_result_limit(self.provider, limit)))
+        cb_vols = [OpenStackVolume(self.provider, vol) for vol in os_vols
+                   if vol.availability_zone == self.provider.service_zone_name(self)]
         return oshelpers.to_server_paged_list(self.provider, cb_vols, limit)
 
     @dispatch(event="provider.storage.volumes.create",

+ 1 - 0
docs/topics/contributor_guide.rst

@@ -10,6 +10,7 @@ CloudBridge Provider.
     Design Goals <design_goals.rst>
     Design Decisions <design_decisions.rst>
     Testing <testing.rst>
+    Pull Request CI <pull_request_ci.rst>
     Provider Development Walkthrough <provider_development.rst>
     Release Process <release_process.rst>
 

+ 92 - 0
docs/topics/pull_request_ci.rst

@@ -0,0 +1,92 @@
+Pull request CI and security gates
+==================================
+Two CI workflows run on pull requests, with different trust levels:
+
+.. list-table::
+   :header-rows: 1
+   :widths: 25 25 20 30
+
+   * - Workflow
+     - Trigger
+     - Runs on forks?
+     - Secrets exposed
+   * - ``Lint and mock tests`` (``integration.yaml``)
+     - every PR / push
+     - yes, always
+     - none
+   * - ``Cloud integration tests`` (``integration-cloud.yaml``)
+     - ``safe-to-test`` label, push to ``main``, manual dispatch
+     - only after maintainer approval
+     - AWS / Azure / GCP / OpenStack
+
+Fork PRs always get lint + mock feedback automatically. Cloud integration
+tests are gated behind maintainer review because they run untrusted PR code
+with access to live cloud credentials.
+
+
+For maintainers: applying ``safe-to-test``
+------------------------------------------
+Cloud integration runs against PRs are gated by the ``safe-to-test`` label.
+**Adding this label is equivalent to authorising arbitrary code execution
+against our cloud accounts.** Before applying it on a PR from a fork, audit
+the diff for anything that runs at install or test time:
+
+* ``setup.py`` / ``setup.cfg`` / ``pyproject.toml`` — any new ``cmdclass``,
+  ``entry_points``, post-install hooks, or ``extras_require`` entries that
+  pull in unfamiliar packages?
+* ``tox.ini`` — any new env definitions, ``commands`` overrides, or
+  ``setenv`` injections?
+* ``conftest.py`` and any ``__init__.py`` under ``tests/`` — these run on
+  pytest startup before fixtures even decide to run.
+* New files anywhere under ``.github/`` — workflow tampering.
+* Any new test that does outbound network IO outside the expected cloud
+  APIs (e.g., raw ``requests.post`` to an arbitrary URL).
+* Any change under ``cloudbridge/`` that calls ``subprocess``, ``os.system``,
+  ``eval``, ``exec``, or writes to disk outside the test working tree.
+
+After labeling, the workflow queues and stops at the ``cloud-integration``
+environment gate — you will get a second prompt to approve the actual run.
+Treat that as a sanity check, not the primary defence; the label was the
+real authorisation moment.
+
+If the PR is updated after labeling, the label is automatically removed by
+``pr-label-strip.yaml``. To re-test, re-audit the new diff before
+re-applying.
+
+
+One-time repo setup
+-------------------
+If you are setting up CI from scratch on a fork or new repo, these one-time
+configurations are required:
+
+
+``safe-to-test`` label
+~~~~~~~~~~~~~~~~~~~~~~
+Create the label in the repo's Issues → Labels page. Any colour. Restrict
+label management to maintainers (default for org-owned repos).
+
+
+``cloud-integration`` environment
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. Repo Settings → Environments → New environment → name it
+   ``cloud-integration``.
+2. Under **Required reviewers**, add the maintainer team or users who are
+   allowed to approve cloud-integration runs.
+3. Under **Deployment branches**, select **Selected branches** and add
+   ``main`` (the workflow only ever runs from base context).
+4. The environment does not need any environment-scoped secrets — all
+   secrets live at the repo level.
+
+
+AWS OIDC role
+~~~~~~~~~~~~~
+See ``.github/aws/setup.sh``. The role's trust policy
+(``.github/aws/trust-policy.json``) accepts two sub claims:
+
+* ``repo:CloudVE/cloudbridge:ref:refs/heads/main`` — push-to-main runs.
+* ``repo:CloudVE/cloudbridge:environment:cloud-integration`` — PR runs that
+  reached the protected environment. Fork PRs that do not reach the
+  environment cannot assume this role.
+
+The repo secret ``AWS_OIDC_ROLE_ARN`` must be set to the role ARN printed by
+``setup.sh``.

+ 27 - 17
setup.py

@@ -31,34 +31,44 @@ REQS_AWS = [
 # below are compatible with each other. List individual libraries instead
 # of using the azure umbrella package to speed up installation.
 REQS_AZURE = [
-    'msrestazure<1.0.0',
-    'azure-identity<2.0.0',
-    'azure-common<2.0.0',
-    'azure-mgmt-devtestlabs<10.0.0',
-    'azure-mgmt-resource<22.0.0',
-    'azure-mgmt-compute>=27.2.0,<28.0.0',
-    'azure-mgmt-network<22.0.0',
-    'azure-mgmt-storage<21.0.0',
-    'azure-storage-blob<13.0.0',
-    'azure-cosmosdb-table<2.0.0',
-    'pysftp<1.0.0'
+    # Minimums match SDK generation tested against the model-class
+    # serialization fixes in cloudbridge/providers/azure/. Older SDKs may
+    # work but are not covered by integration tests.
+    'azure-identity>=1.20.0,<2.0.0',
+    'azure-common>=1.1.28,<2.0.0',
+    'azure-core>=1.30.0,<2.0.0',
+    'azure-mgmt-devtestlabs>=9.0.0,<10.0.0',
+    'azure-mgmt-resource>=23.0.0,<26.0.0',
+    'azure-mgmt-subscription>=3.0.0,<4.0.0',
+    'azure-mgmt-compute>=34.0.0,<39.0.0',
+    'azure-mgmt-network>=28.0.0,<31.0.0',
+    'azure-mgmt-storage>=22.0.0,<25.0.0',
+    'azure-storage-blob>=12.20.0,<13.0.0',
+    'azure-data-tables>=12.4.0,<13.0.0',
+    'paramiko<6.0.0'
 ]
 REQS_GCP = [
     'google-api-python-client>=2.0,<3.0.0'
 ]
 REQS_OPENSTACK = [
-    'openstacksdk>=0.12.0,<1.0.0',
-    'python-novaclient>=7.0.0,<19.0',
-    'python-swiftclient>=3.2.0,<5.0',
-    'python-neutronclient>=6.0.0,<9.0',
-    'python-keystoneclient>=3.13.0,<6.0'
+    # Minimums match SDK generation tested against the OpenStack
+    # provider fixes in cloudbridge/providers/openstack/. The previous
+    # floors were circa-2018 and exposed Nova/Neutron APIs (e.g. the
+    # add_floating_ip_to_server action) that are gone from any modern
+    # OpenStack deployment.
+    'openstacksdk>=3.0.0,<5.0.0',
+    'python-novaclient>=17.0.0,<20.0',
+    'python-swiftclient>=4.0.0,<5.0',
+    'python-neutronclient>=11.0.0,<13.0',
+    'python-keystoneclient>=4.0.0,<7.0'
 ]
 REQS_FULL = REQS_AWS + REQS_GCP + REQS_OPENSTACK + REQS_AZURE
 # httpretty is required with/for moto 1.0.0 or AWS tests fail
 REQS_DEV = ([
     'tox>=4.0.0',
     'pytest',
-    'moto>=3.1.18',
+    'moto[ec2,s3]>=5.0.0',
+    'packaging',
     'sphinx>=1.3.1',
     'pydevd',
     'flake8>=3.3.0',

+ 32 - 2
tests/helpers/__init__.py

@@ -44,6 +44,36 @@ def skipIfNoService(services):
     return wrap
 
 
+def skipIfMockMotoVersion(spec, reason):
+    """
+    A decorator for skipping tests when running against the mock
+    provider and the installed moto version matches `spec` (a
+    PEP 440 specifier string such as ">=5.0,<5.3"). Other providers
+    (aws / azure / gcp / openstack) always run the test, and the mock
+    provider runs it normally when moto sits outside the broken range.
+    """
+    def wrap(func):
+        @functools.wraps(func)
+        def wrapper(self, *args, **kwargs):
+            provider = getattr(self, 'provider', None)
+            if provider and getattr(provider, 'PROVIDER_ID', None) == 'mock':
+                from importlib.metadata import PackageNotFoundError
+                from importlib.metadata import version as get_version
+                from packaging.specifiers import SpecifierSet
+                from packaging.version import Version
+                try:
+                    moto_version = Version(get_version('moto'))
+                except PackageNotFoundError:
+                    moto_version = None
+                if moto_version and moto_version in SpecifierSet(spec):
+                    self.skipTest(
+                        "Skipping for mock/moto %s matching %s: %s"
+                        % (moto_version, spec, reason))
+            func(self, *args, **kwargs)
+        return wrapper
+    return wrap
+
+
 def skipIfPython(op, major, minor):
     """
     A decorator for skipping tests if the python
@@ -104,8 +134,8 @@ TEST_DATA_CONFIG = {
     "AzureCloudProvider": {
         "image":
             cb_helpers.get_env('CB_IMAGE_AZURE',
-                               'Canonical:UbuntuServer:16.04.0-LTS:latest'),
-        "vm_type": cb_helpers.get_env('CB_VM_TYPE_AZURE', 'Standard_A2_v2'),
+                               'Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts-gen2:latest'),
+        "vm_type": cb_helpers.get_env('CB_VM_TYPE_AZURE', 'Standard_DC1ds_v3'),
         "placement": cb_helpers.get_env('CB_PLACEMENT_AZURE', 'eastus'),
         "placement_cfg_key": "azure_zone_name"
     }

+ 9 - 0
tests/test_compute_service.py

@@ -33,6 +33,15 @@ class CloudComputeServiceTestCase(ProviderTestBase):
                                      self.provider.compute.instances.
                                      _service_event_pattern))
 
+    # moto 5.x regression: DescribeInstances returns an empty list
+    # immediately after RunInstances completes, so the list-after-create
+    # check in standard_interface_tests.check_list fails. A secondary
+    # symptom shows in cleanup, where post-delete state remains
+    # "deleted" instead of becoming UNKNOWN. Last observed on moto
+    # 5.2.1. Tighten the specifier when an upstream fix lands.
+    @helpers.skipIfMockMotoVersion(
+        ">=5.0.0",
+        "moto 5.x RunInstances/DescribeInstances state-sync bug")
     @helpers.skipIfNoService(['compute.instances', 'networking.networks'])
     def test_crud_instance(self):
         label = "cb-instcrud-{0}".format(helpers.get_uuid())

+ 7 - 2
tox.ini

@@ -6,11 +6,16 @@
 # running the tests.
 
 [tox]
-envlist = {py3.10,pypy}-{aws,azure,gcp,openstack,mock},lint
+envlist = {py3.13,pypy}-{aws,azure,gcp,openstack,mock},lint
 
 [testenv]
 commands = # see setup.cfg for options sent to pytest and coverage
-           coverage run --source=cloudbridge -m pytest -n 5 tests/ -v {posargs}
+           coverage run --source=cloudbridge -m pytest -v {posargs:-n 5 tests/}
+           # Combine parallel-mode data files and emit Cobertura XML for upload
+           # by coverallsapp/github-action in CI. Locally this produces
+           # coverage.xml in the project root, which IDEs can also consume.
+           coverage combine
+           coverage xml
 setenv =
     # Fix for moto import issue: https://github.com/travis-ci/travis-ci/issues/7940
     BOTO_CONFIG=/dev/null