almahmoud 7 лет назад
Родитель
Сommit
370487fc4e

+ 2 - 2
.travis.yml

@@ -16,7 +16,7 @@ matrix:
   - python: 2.7
     env: TOX_ENV=py27-azure
   - python: 2.7
-    env: TOX_ENV=py27-gce
+    env: TOX_ENV=py27-gcp
   - python: 2.7
     env: TOX_ENV=py27-mock
   - python: 2.7
@@ -26,7 +26,7 @@ matrix:
   - python: 3.6
     env: TOX_ENV=py36-azure
   - python: 3.6
-    env: TOX_ENV=py36-gce
+    env: TOX_ENV=py36-gcp
   - python: 3.6
     env: TOX_ENV=py36-mock
   - python: 3.6

+ 4 - 4
README.rst

@@ -32,9 +32,9 @@ Build Status Tests
 .. |azure-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/7
                 :target: https://travis-ci.org/CloudVE/cloudbridge
 
-.. |gce-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/3
+.. |gcp-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/3
               :target: https://travis-ci.org/CloudVE/cloudbridge
-.. |gce-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/8
+.. |gcp-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/8
               :target: https://travis-ci.org/CloudVE/cloudbridge
 
 .. |mock-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/CloudVE/cloudbridge/branches/master/4
@@ -52,7 +52,7 @@ Build Status Tests
 +---------------------------+----------------+----------------+
 | **Amazon Web Services**   | |aws-py27|     | |aws-py36|     |
 +---------------------------+----------------+----------------+
-| **Google Compute Engine** | |gce-py27|     | |gce-py36|     |
+| **Google compute platform** | |gcp-py27|     | |gcp-py36|     |
 +---------------------------+----------------+----------------+
 | **Microsoft Azure**       | |azure-py27|   | |azure-py36|   |
 +---------------------------+----------------+----------------+
@@ -88,7 +88,7 @@ exploring the API:
   print(provider.security.key_pairs.list())
 
 The exact same command (as well as any other CloudBridge method) will run with
-any of the supported providers: ``ProviderList.[AWS | AZURE | GCE | OPENSTACK]``!
+any of the supported providers: ``ProviderList.[AWS | AZURE | GCP | OPENSTACK]``!
 
 
 Citation

+ 1 - 1
cloudbridge/cloud/factory.py

@@ -15,7 +15,7 @@ log = logging.getLogger(__name__)
 class ProviderList(object):
     AWS = 'aws'
     AZURE = 'azure'
-    GCE = 'gce'
+    GCP = 'gcp'
     OPENSTACK = 'openstack'
     MOCK = 'mock'
 

+ 0 - 5
cloudbridge/cloud/providers/gce/__init__.py

@@ -1,5 +0,0 @@
-"""
-Exports from this provider
-"""
-
-from .provider import GCECloudProvider  # noqa

+ 6 - 6
cloudbridge/cloud/providers/gce/README.rst → cloudbridge/cloud/providers/gcp/README.rst

@@ -1,6 +1,6 @@
 CloudBridge support for `Google Cloud Platform`_. Compute is provided by `Google
-Compute Engine`_ (GCE). Object storage is provided by `Google Cloud Storage`_
-(GCE).
+Compute Engine`_ (GCP). Object storage is provided by `Google Cloud Storage`_
+(GCP).
 
 Security Groups
 ~~~~~~~~~~~~~~~
@@ -8,10 +8,10 @@ CloudBridge API lets you control incoming traffic to VM instances by creating
 VM firewalls, adding rules to VM firewalls, and then assigning instances to VM
 firewalls.
 
-GCE does this a little bit differently. GCE lets you assign `tags`_ to VM
+GCP does this a little bit differently. GCP lets you assign `tags`_ to VM
 instances. Tags, then, can be used for networking purposes. In particular, you
 can create `firewall rules`_ to control incoming traffic to instances having a
-specific tag. So, to add GCE support to CloudBridge, we simulate VM firewalls by
+specific tag. So, to add GCP support to CloudBridge, we simulate VM firewalls by
 tags.
 
 To make this more clear, let us consider the example of adding a rule to a
@@ -21,13 +21,13 @@ created whose ``targetTags`` is ``[vmf]``. This makes sure that the rule
 applies to all instances that have ``vmf`` as a tag (in CloudBridge language
 instances belonging to the VM firewall ``vmf``).
 
-**Note**: This implementation does not take advantage of the full power of GCE
+**Note**: This implementation does not take advantage of the full power of GCP
 firewall format and only creates firewalls with one rule and only can find or
 list firewalls with one rule. This should be OK as long as all firewalls are
 created through the CloudBridge API.
 
 .. _`Google Cloud Platform`: https://cloud.google.com/
-.. _`Google Compute Engine`: https://cloud.google.com/compute/docs
+.. _`Google compute platform`: https://cloud.google.com/compute/docs
 .. _`Google Cloud Storage`: https://cloud.google.com/storage/docs
 .. _`tags`: https://cloud.google.com/compute/docs/reference/latest/instances/
    setTags

+ 5 - 0
cloudbridge/cloud/providers/gcp/__init__.py

@@ -0,0 +1,5 @@
+"""
+Exports from this provider
+"""
+
+from .provider import GCPCloudProvider  # noqa

+ 9 - 9
cloudbridge/cloud/providers/gce/helpers.py → cloudbridge/cloud/providers/gcp/helpers.py

@@ -7,8 +7,8 @@ import tenacity
 from cloudbridge.cloud.interfaces.exceptions import ProviderInternalException
 
 
-def gce_projects(provider):
-    return provider.gce_compute.projects()
+def gcp_projects(provider):
+    return provider.gcp_compute.projects()
 
 
 def iter_all(resource, **kwargs):
@@ -26,7 +26,7 @@ def get_common_metadata(provider):
     """
     Get a project's commonInstanceMetadata entry
     """
-    metadata = gce_projects(provider).get(
+    metadata = gcp_projects(provider).get(
         project=provider.project_name).execute()
     return metadata["commonInstanceMetadata"]
 
@@ -46,9 +46,9 @@ def __if_fingerprint_differs(e):
                 retry=tenacity.retry_if_exception(__if_fingerprint_differs),
                 wait=tenacity.wait_exponential(max=10),
                 reraise=True)
-def gce_metadata_save_op(provider, callback):
+def gcp_metadata_save_op(provider, callback):
     """
-    Carries out a metadata save operation. In GCE, a fingerprint based
+    Carries out a metadata save operation. In GCP, a fingerprint based
     locking mechanism is used to prevent lost updates. A new fingerprint
     is returned each time metadata is retrieved. Therefore, this method
     retrieves the metadata, invokes the provided callback with that
@@ -61,7 +61,7 @@ def gce_metadata_save_op(provider, callback):
         # allow callback to do processing on it
         callback(metadata)
         # save the metadata
-        operation = gce_projects(provider).setCommonInstanceMetadata(
+        operation = gcp_projects(provider).setCommonInstanceMetadata(
             project=provider.project_name, body=metadata).execute()
         provider.wait_for_operation(operation)
 
@@ -82,7 +82,7 @@ def modify_or_add_metadata_item(provider, key, value):
             else:
                 metadata['items'].append(entry)
 
-    gce_metadata_save_op(provider, _update_metadata_key)
+    gcp_metadata_save_op(provider, _update_metadata_key)
 
 
 # This function will raise an HttpError with message containing
@@ -97,7 +97,7 @@ def add_metadata_item(provider, key, value):
         # if not it will be already updated
         metadata['items'] = entries
 
-    gce_metadata_save_op(provider, _add_metadata_key)
+    gcp_metadata_save_op(provider, _add_metadata_key)
 
 
 def find_matching_metadata_items(provider, key_regex):
@@ -141,7 +141,7 @@ def remove_metadata_item(provider, key):
             else:
                 metadata['items'] = entries
 
-    gce_metadata_save_op(provider, _remove_metadata_by_key)
+    gcp_metadata_save_op(provider, _remove_metadata_by_key)
     return True
 
 

+ 40 - 40
cloudbridge/cloud/providers/gce/provider.py → cloudbridge/cloud/providers/gcp/provider.py

@@ -1,6 +1,6 @@
 """
 Provider implementation based on google-api-python-client library
-for GCE.
+for GCP.
 """
 import json
 import logging
@@ -18,9 +18,9 @@ from oauth2client.service_account import ServiceAccountCredentials
 from cloudbridge.cloud.base import BaseCloudProvider
 from cloudbridge.cloud.interfaces.exceptions import ProviderConnectionException
 
-from .services import GCEComputeService
-from .services import GCENetworkingService
-from .services import GCESecurityService
+from .services import GCPComputeService
+from .services import GCPNetworkingService
+from .services import GCPSecurityService
 from .services import GCPStorageService
 
 log = logging.getLogger(__name__)
@@ -192,12 +192,12 @@ class GCPResources(object):
         return parsed_url
 
 
-class GCECloudProvider(BaseCloudProvider):
+class GCPCloudProvider(BaseCloudProvider):
 
-    PROVIDER_ID = 'gce'
+    PROVIDER_ID = 'gcp'
 
     def __init__(self, config):
-        super(GCECloudProvider, self).__init__(config)
+        super(GCPCloudProvider, self).__init__(config)
 
         # Disable warnings about file_cache not being available when using
         # oauth2client >= 4.0.0.
@@ -206,44 +206,44 @@ class GCECloudProvider(BaseCloudProvider):
 
         # Initialize cloud connection fields
         self.credentials_file = self._get_config_value(
-                'gce_service_creds_file',
-                os.getenv('GCE_SERVICE_CREDS_FILE'))
+                'gcp_service_creds_file',
+                os.getenv('GCP_SERVICE_CREDS_FILE'))
         self.credentials_dict = self._get_config_value(
-                'gce_service_creds_dict',
-                json.loads(os.getenv('GCE_SERVICE_CREDS_DICT', '{}')))
+                'gcp_service_creds_dict',
+                json.loads(os.getenv('GCP_SERVICE_CREDS_DICT', '{}')))
         self.vm_default_user_name = self._get_config_value(
-            'gce_vm_default_username',
-            os.getenv('GCE_VM_DEFAULT_USERNAME', "cbuser"))
+            'gcp_vm_default_username',
+            os.getenv('GCP_VM_DEFAULT_USERNAME', "cbuser"))
 
-        # If 'gce_service_creds_dict' is not passed in from config and
+        # If 'gcp_service_creds_dict' is not passed in from config and
         # self.credentials_file is available, read and parse the json file to
         # self.credentials_dict.
         if self.credentials_file and not self.credentials_dict:
             with open(self.credentials_file) as creds_file:
                 self.credentials_dict = json.load(creds_file)
         self.default_zone = self._get_config_value(
-            'gce_default_zone',
-            os.environ.get('GCE_DEFAULT_ZONE') or 'us-central1-a')
+            'gcp_default_zone',
+            os.environ.get('GCP_DEFAULT_ZONE') or 'us-central1-a')
         self.region_name = self._get_config_value(
-            'gce_region_name',
-            os.environ.get('GCE_DEFAULT_REGION') or 'us-central1')
+            'gcp_region_name',
+            os.environ.get('GCP_DEFAULT_REGION') or 'us-central1')
 
         if self.credentials_dict and 'project_id' in self.credentials_dict:
             self.project_name = self.credentials_dict['project_id']
         else:
-            self.project_name = os.environ.get('GCE_PROJECT_NAME')
+            self.project_name = os.environ.get('GCP_PROJECT_NAME')
 
         # service connections, lazily initialized
-        self._gce_compute = None
-        self._gcs_storage = None
+        self._gcp_compute = None
+        self._gcp_storage = None
         self._credentials_cache = None
         self._compute_resources_cache = None
         self._storage_resources_cache = None
 
         # Initialize provider services
-        self._compute = GCEComputeService(self)
-        self._security = GCESecurityService(self)
-        self._networking = GCENetworkingService(self)
+        self._compute = GCPComputeService(self)
+        self._security = GCPSecurityService(self)
+        self._networking = GCPNetworkingService(self)
         self._storage = GCPStorageService(self)
 
     @property
@@ -263,22 +263,22 @@ class GCECloudProvider(BaseCloudProvider):
         return self._storage
 
     @property
-    def gce_compute(self):
-        if not self._gce_compute:
-            self._gce_compute = self._connect_gce_compute()
-        return self._gce_compute
+    def gcp_compute(self):
+        if not self._gcp_compute:
+            self._gcp_compute = self._connect_gcp_compute()
+        return self._gcp_compute
 
     @property
-    def gcs_storage(self):
-        if not self._gcs_storage:
-            self._gcs_storage = self._connect_gcs_storage()
-        return self._gcs_storage
+    def gcp_storage(self):
+        if not self._gcp_storage:
+            self._gcp_storage = self._connect_gcp_storage()
+        return self._gcp_storage
 
     @property
     def _compute_resources(self):
         if not self._compute_resources_cache:
             self._compute_resources_cache = GCPResources(
-                    self.gce_compute,
+                    self.gcp_compute,
                     project=self.project_name,
                     region=self.region_name,
                     zone=self.default_zone)
@@ -287,7 +287,7 @@ class GCECloudProvider(BaseCloudProvider):
     @property
     def _storage_resources(self):
         if not self._storage_resources_cache:
-            self._storage_resources_cache = GCPResources(self.gcs_storage)
+            self._storage_resources_cache = GCPResources(self.gcp_storage)
         return self._storage_resources_cache
 
     @property
@@ -309,23 +309,23 @@ class GCECloudProvider(BaseCloudProvider):
     def client_id(self):
         return self._credentials.service_account_email
 
-    def _connect_gcs_storage(self):
+    def _connect_gcp_storage(self):
         return discovery.build('storage', 'v1', credentials=self._credentials,
                                cache_discovery=False)
 
-    def _connect_gce_compute(self):
+    def _connect_gcp_compute(self):
         return discovery.build('compute', 'v1', credentials=self._credentials,
                                cache_discovery=False)
 
     def wait_for_operation(self, operation, region=None, zone=None):
         args = {'project': self.project_name, 'operation': operation['name']}
         if not region and not zone:
-            operations = self.gce_compute.globalOperations()
+            operations = self.gcp_compute.globalOperations()
         elif zone:
-            operations = self.gce_compute.zoneOperations()
+            operations = self.gcp_compute.zoneOperations()
             args['zone'] = zone
         else:
-            operations = self.gce_compute.regionOperations()
+            operations = self.gcp_compute.regionOperations()
             args['region'] = region
 
         while True:
@@ -362,7 +362,7 @@ class GCECloudProvider(BaseCloudProvider):
 
     def authenticate(self):
         try:
-            self.gce_compute
+            self.gcp_compute
             return True
         except Exception as e:
             raise ProviderConnectionException(

Разница между файлами не показана из-за своего большого размера
+ 147 - 147
cloudbridge/cloud/providers/gcp/resources.py


Разница между файлами не показана из-за своего большого размера
+ 182 - 182
cloudbridge/cloud/providers/gcp/services.py


+ 10 - 10
cloudbridge/cloud/providers/gce/subservices.py → cloudbridge/cloud/providers/gcp/subservices.py

@@ -10,30 +10,30 @@ from cloudbridge.cloud.base.subservices import BaseVMFirewallRuleSubService
 log = logging.getLogger(__name__)
 
 
-class GCSBucketObjectSubService(BaseBucketObjectSubService):
+class GCPBucketObjectSubService(BaseBucketObjectSubService):
 
     def __init__(self, provider, bucket):
-        super(GCSBucketObjectSubService, self).__init__(provider, bucket)
+        super(GCPBucketObjectSubService, self).__init__(provider, bucket)
 
 
-class GCEGatewaySubService(BaseGatewaySubService):
+class GCPGatewaySubService(BaseGatewaySubService):
     def __init__(self, provider, network):
-        super(GCEGatewaySubService, self).__init__(provider, network)
+        super(GCPGatewaySubService, self).__init__(provider, network)
 
 
-class GCEVMFirewallRuleSubService(BaseVMFirewallRuleSubService):
+class GCPVMFirewallRuleSubService(BaseVMFirewallRuleSubService):
 
     def __init__(self, provider, firewall):
-        super(GCEVMFirewallRuleSubService, self).__init__(provider, firewall)
+        super(GCPVMFirewallRuleSubService, self).__init__(provider, firewall)
 
 
-class GCEFloatingIPSubService(BaseFloatingIPSubService):
+class GCPFloatingIPSubService(BaseFloatingIPSubService):
 
     def __init__(self, provider, gateway):
-        super(GCEFloatingIPSubService, self).__init__(provider, gateway)
+        super(GCPFloatingIPSubService, self).__init__(provider, gateway)
 
 
-class GCESubnetSubService(BaseSubnetSubService):
+class GCPSubnetSubService(BaseSubnetSubService):
 
     def __init__(self, provider, network):
-        super(GCESubnetSubService, self).__init__(provider, network)
+        super(GCPSubnetSubService, self).__init__(provider, network)

+ 5 - 5
docs/getting_started.rst

@@ -85,11 +85,11 @@ Google Compute Cloud:
 
     from cloudbridge.cloud.factory import CloudProviderFactory, ProviderList
 
-    config = {'gce_project_name': 'project name',
-              'gce_service_creds_file': 'service_file.json',
-              'gce_default_zone': 'us-east1-b',  # Use desired value
-              'gce_region_name': 'us-east1'}  # Use desired value
-    provider = CloudProviderFactory().create_provider(ProviderList.GCE, config)
+    config = {'gcp_project_name': 'project name',
+              'gcp_service_creds_file': 'service_file.json',
+              'gcp_default_zone': 'us-east1-b',  # Use desired value
+              'gcp_region_name': 'us-east1'}  # Use desired value
+    provider = CloudProviderFactory().create_provider(ProviderList.GCP, config)
     image_id = 'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-v20181222'
 
 List some resources

+ 3 - 3
docs/topics/design_decisions.rst

@@ -41,8 +41,8 @@ Resource identification, naming, and labeling
   continued to use id and name, with the name being changeable for some
   resources, and read-only in others.
 
-  As CloudBridge evolved and support was added for Azure and GCE, things only
-  became more complex. Some providers (e.g. Azure and GCE) used a user-provided
+  As CloudBridge evolved and support was added for Azure and GCP, things only
+  became more complex. Some providers (e.g. Azure and GCP) used a user-provided
   value instead of an auto-generated value as an `id`, which would also be
   displayed in their respective dashboards as `Name`. This meant that they were
   treating their servers as individually named pets, instead of adopting the
@@ -65,7 +65,7 @@ Resource identification, naming, and labeling
   **Second iteration**
   However, it soon became apparent that this overloaded terminology was
   continuing to cause confusion. The `id` property in CloudBridge mapped to the
-  unchangeable `name` property in Azure and GCE, and the *name* property in
+  unchangeable `name` property in Azure and GCP, and the *name* property in
   cloudbridge sometimes mapped to a *tag* in certain providers, and a *name* in
   other providers and they were sometimes read-only, sometimes writable. In an
   attempt to disambiguate these concepts, it was then decided that perhaps

+ 1 - 1
docs/topics/object_storage.rst

@@ -5,7 +5,7 @@ unstructured data over HTTP. Object Storage is also referred to as Blob (Binary
 Large OBject) Storage by Azure, and Simple Storage Service (S3) by Amazon.
 
 Typically, you would store your objects within a Bucket, as it is known in
-AWS and GCE. A Bucket is also called a Container in OpenStack and Azure. In
+AWS and GCP. A Bucket is also called a Container in OpenStack and Azure. In
 CloudBridge, we use the term Bucket.
 
 Storing objects in a bucket

+ 12 - 12
docs/topics/procuring_credentials.rst

@@ -137,10 +137,10 @@ specific roles can also be assigned for more limited access.
 
 .. _google-creds:
 
-Google Compute Engine (GCE)
+Google compute platform (GCP)
 ---------------------------
 
-For Google Compute Engine (GCE), create a service account followed by creating
+For Google compute platform (GCP), create a service account followed by creating
 and downloading a key. Additional instructions are available at this link
 https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account.
 
@@ -148,36 +148,36 @@ Start off by clicking on the `Create Service Account` button on the
 IAM & admin section of the Google Cloud Console:
 https://console.cloud.google.com/iam-admin/serviceaccounts.
 
-.. figure:: captures/gce-sa-1.png
-   :alt: GCE Service Account 1
+.. figure:: captures/gcp-sa-1.png
+   :alt: GCP Service Account 1
 
 Next, we provide a name for the service account and an informative description.
 Note that the supplied name is used to create an email address for the service
 account. Once created, this email cannot be changed.
 
-.. figure:: captures/gce-sa-2.png
-   :alt: GCE Service Account 2
+.. figure:: captures/gcp-sa-2.png
+   :alt: GCP Service Account 2
 
 We then assign a role to the service account. Depending on what you will be
 doing with the service account, and CloudBridge, you can set granular access
 roles for the service account. The `Editor` role on the project is very broad
 and will allow you to exercise all of the CloudBridge capabilities.
 
-.. figure:: captures/gce-sa-3.png
-   :alt: GCE Service Account 3
+.. figure:: captures/gcp-sa-3.png
+   :alt: GCP Service Account 3
 
 After a service account has been created, we need to add a key to it.
 
-.. figure:: captures/gce-sa-4.png
-   :alt: GCE Service Account key 1
+.. figure:: captures/gcp-sa-4.png
+   :alt: GCP Service Account key 1
 
 Finally, choose the JSON format for the key when prompted. The file that is
 downloaded will be used with CloudBridge through the variables shown
 on the `Connection and Authentication Setup <setup.html>`_ page. Note that you
 may have multiple keys associated with the same service account.
 
-.. figure:: captures/gce-sa-5.png
-   :alt: GCE Service Account key 2
+.. figure:: captures/gcp-sa-5.png
+   :alt: GCP Service Account key 2
 
 The JSON credentials file will have a similar form to the example shown
 below, and can either be passed through an absolute path to the file, or

+ 32 - 32
docs/topics/provider_development.rst

@@ -5,7 +5,7 @@ for CloudBridge.
 
 
 1. We start off by creating a new folder for the provider within the
-``cloudbridge/cloud/providers`` folder. In this case: ``gce``. Further, install
+``cloudbridge/cloud/providers`` folder. In this case: ``gcp``. Further, install
 the native cloud provider Python library, here
 ``pip install google-api-python-client==1.4.2`` and a couple of its requirements
 ``oauth2client==1.5.2`` and ``pycrypto==2.6.1``.
@@ -20,21 +20,21 @@ add a class variable named ``PROVIDER_ID``.
     from cloudbridge.cloud.base import BaseCloudProvider
 
 
-    class GCECloudProvider(BaseCloudProvider):
+    class GCPCloudProvider(BaseCloudProvider):
 
-        PROVIDER_ID = 'gce'
+        PROVIDER_ID = 'gcp'
 
         def __init__(self, config):
-            super(GCECloudProvider, self).__init__(config)
+            super(GCPCloudProvider, self).__init__(config)
 
 
 
-3. Add an ``__init__.py`` to the ``cloudbridge/cloud/providers/gce`` folder
+3. Add an ``__init__.py`` to the ``cloudbridge/cloud/providers/gcp`` folder
 and export the provider.
 
 .. code-block:: python
 
-    from .provider import GCECloudProvider  # noqa
+    from .provider import GCPCloudProvider  # noqa
 
 .. tip ::
 
@@ -42,21 +42,21 @@ and export the provider.
 
 4. Next, we need to register the provider with the factory.
 This only requires that you register the provider's ID in the ``ProviderList``.
-Add GCE to the ``ProviderList`` class in ``cloudbridge/cloud/factory.py``.
+Add GCP to the ``ProviderList`` class in ``cloudbridge/cloud/factory.py``.
 
 
 5. Run the test suite. We will get the tests passing on py27 first.
 
 .. code-block:: bash
 
-    export CB_TEST_PROVIDER=gce
+    export CB_TEST_PROVIDER=gcp
     tox -e py27
 
 You should see the tests fail with the following message:
 
 .. code-block:: bash
 
-    "TypeError: Can't instantiate abstract class GCECloudProvider with abstract
+    "TypeError: Can't instantiate abstract class GCPCloudProvider with abstract
     methods storage, compute, security, network."
 
 6. Therefore, our next step is to implement these methods. We can start off by
@@ -68,22 +68,22 @@ implementing these methods in ``provider.py`` and raising a
     @property
     def compute(self):
         raise NotImplementedError(
-            "GCECloudProvider does not implement this service")
+            "GCPCloudProvider does not implement this service")
 
     @property
     def network(self):
         raise NotImplementedError(
-            "GCECloudProvider does not implement this service")
+            "GCPCloudProvider does not implement this service")
 
     @property
     def security(self):
         raise NotImplementedError(
-            "GCECloudProvider does not implement this service")
+            "GCPCloudProvider does not implement this service")
 
     @property
     def storage(self):
         raise NotImplementedError(
-            "GCECloudProvider does not implement this service")
+            "GCPCloudProvider does not implement this service")
 
 
 Running the tests now will complain as much. We will next implement each
@@ -97,10 +97,10 @@ Service in turn.
     from cloudbridge.cloud.base.services import BaseSecurityService
 
 
-    class GCESecurityService(BaseSecurityService):
+    class GCPSecurityService(BaseSecurityService):
 
         def __init__(self, provider):
-            super(GCESecurityService, self).__init__(provider)
+            super(GCPSecurityService, self).__init__(provider)
 
 
 8. We can now return this new service from the security property in
@@ -109,8 +109,8 @@ Service in turn.
 .. code-block:: python
 
     def __init__(self, config):
-        super(GCECloudProvider, self).__init__(config)
-        self._security = GCESecurityService(self)
+        super(GCPCloudProvider, self).__init__(config)
+        self._security = GCPSecurityService(self)
 
     @property
     def security(self):
@@ -125,7 +125,7 @@ tests to fail:
 
 .. code-block:: bash
 
-    "TypeError: Can't instantiate abstract class GCESecurityService with abstract
+    "TypeError: Can't instantiate abstract class GCPSecurityService with abstract
     methods key_pairs, security_groups."
 
 The Abstract Base Classes are doing their job and flagging all methods that
@@ -142,14 +142,14 @@ next implement these services.
     from cloudbridge.cloud.base.services import BaseSecurityService
 
 
-    class GCESecurityService(BaseSecurityService):
+    class GCPSecurityService(BaseSecurityService):
 
         def __init__(self, provider):
-            super(GCESecurityService, self).__init__(provider)
+            super(GCPSecurityService, self).__init__(provider)
 
             # Initialize provider services
-            self._key_pairs = GCEKeyPairService(provider)
-            self._security_groups = GCESecurityGroupService(provider)
+            self._key_pairs = GCPKeyPairService(provider)
+            self._security_groups = GCPSecurityGroupService(provider)
 
         @property
         def key_pairs(self):
@@ -160,16 +160,16 @@ next implement these services.
             return self._security_groups
 
 
-    class GCEKeyPairService(BaseKeyPairService):
+    class GCPKeyPairService(BaseKeyPairService):
 
         def __init__(self, provider):
-            super(GCEKeyPairService, self).__init__(provider)
+            super(GCPKeyPairService, self).__init__(provider)
 
 
-    class GCESecurityGroupService(BaseSecurityGroupService):
+    class GCPSecurityGroupService(BaseSecurityGroupService):
 
         def __init__(self, provider):
-            super(GCESecurityGroupService, self).__init__(provider)
+            super(GCPSecurityGroupService, self).__init__(provider)
 
 .. tip ::
 
@@ -180,7 +180,7 @@ Once again, running the tests will complain of missing methods:
 
 .. code-block:: bash
 
-    "TypeError: Can't instantiate abstract class GCEKeyPairService with abstract
+    "TypeError: Can't instantiate abstract class GCPKeyPairService with abstract
     methods create, find, get, list."
 
 11. Keep implementing the methods till the security service works, and the
@@ -200,8 +200,8 @@ dependencies.
 
 .. code-block:: python
 
-    gce_reqs = ['google-api-python-client==1.4.2']
-    full_reqs = base_reqs + aws_reqs + openstack_reqs + gce_reqs
+    gcp_reqs = ['google-api-python-client==1.4.2']
+    full_reqs = base_reqs + aws_reqs + openstack_reqs + gcp_reqs
 
 We will also register the provider in ``cloudbridge/cloud/factory.py``'s
 provider list.
@@ -212,7 +212,7 @@ provider list.
         AWS = 'aws'
         OPENSTACK = 'openstack'
         ...
-        GCE = 'gce'
+        GCP = 'gcp'
 
 .. tip ::
 
@@ -220,8 +220,8 @@ provider list.
 
 
 12. Thereafter, we create the actual connection through the sdk. In the case of
-GCE, we need a Compute API client object. We will make this connection
-available as a public property named ``gce_compute`` in the provider. We will
+GCP, we need a Compute API client object. We will make this connection
+available as a public property named ``gcp_compute`` in the provider. We will
 then lazily initialize this connection.
 
 A full implementation of the KeyPair service can now be made in a provider

+ 20 - 20
docs/topics/setup.rst

@@ -38,11 +38,11 @@ will override environment values.
     provider = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
 
 
-    ## For GCE
-    config = {'gce_service_creds_file': '<service_creds_file_name>.json'}
+    ## For GCP
+    config = {'gcp_service_creds_file': '<service_creds_file_name>.json'}
     # Alternatively, we can supply a dictionary with the credentials values
     # as the following:
-    gce_creds = {
+    gcp_creds = {
         "type": "service_account",
         "project_id": "<project_name>",
         "private_key_id": "<private_key_id>",
@@ -54,8 +54,8 @@ will override environment values.
         "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
         "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-name%40my-project.iam.gserviceaccount.com"
     }
-    config = {'gce_service_creds_dict': gce_creds}
-    provider = CloudProviderFactory().create_provider(ProviderList.GCE, config)
+    config = {'gcp_service_creds_dict': gcp_creds}
+    provider = CloudProviderFactory().create_provider(ProviderList.GCP, config)
 
 
     ## For OpenStack
@@ -142,19 +142,19 @@ Azure
 |                                     | placed.                                                  |
 +-------------------------------------+----------------------------------------------------------+
 
-GCE
+GCP
 ~~~
 
 +-------------------------+----------------------------------------------------------+
 | Variable                | Description                                              |
 +=========================+==========================================================+
-| gce_default_zone        | Default placement zone to use for the current session.   |
+| gcp_default_zone        | Default placement zone to use for the current session.   |
 |                         | Default is ``us-central1-a``.                            |
 +-------------------------+----------------------------------------------------------+
-| gce_region_name         | Default region to use for the current session. Default   |
+| gcp_region_name         | Default region to use for the current session. Default   |
 |                         | is ``us-central1``.                                      |
 +-------------------------+----------------------------------------------------------+
-| gce_vm_default_username | System user name for which supplied key pair will be     |
+| gcp_vm_default_username | System user name for which supplied key pair will be     |
 |                         | placed.                                                  |
 +-------------------------+----------------------------------------------------------+
 
@@ -216,21 +216,21 @@ https://docs.microsoft.com/en-us/azure/role-based-access-control/overview.
 | AZURE_VM_DEFAULT_USER_NAME          |           |
 +-------------------------------------+-----------+
 
-GCE
+GCP
 ~~~
 
 +------------------------+-----------+
 | Variable               | Required? |
 +========================+===========+
-| GCE_SERVICE_CREDS_DICT | ✔         |
+| GCP_SERVICE_CREDS_DICT | ✔         |
 | or                     |           |
-| GCE_SERVICE_CREDS_FILE |           |
+| GCP_SERVICE_CREDS_FILE |           |
 +------------------------+-----------+
-| GCE_DEFAULT_ZONE       |           |
+| GCP_DEFAULT_ZONE       |           |
 +------------------------+-----------+
-| GCE_PROJECT_NAME       |           |
+| GCP_PROJECT_NAME       |           |
 +------------------------+-----------+
-| GCE_REGION_NAME        |           |
+| GCP_REGION_NAME        |           |
 +------------------------+-----------+
 
 OpenStack
@@ -261,7 +261,7 @@ OpenStack
 +------------------------+-----------+
 
 Once the environment variables are set, you can create a connection as follows,
-replacing ``ProviderList.AWS`` with the desired provider (AZURE, GCE, or
+replacing ``ProviderList.AWS`` with the desired provider (AZURE, GCP, or
 OPENSTACK):
 
 .. code-block:: python
@@ -276,7 +276,7 @@ Providing access credentials in a CloudBridge config file
 CloudBridge can also read credentials from a file on your local file system.
 The file should be placed in one of two locations: ``/etc/cloudbridge.ini`` or
 ``~/.cloudbridge``. Each set of credentials should be delineated with the
-provider ID (e.g., ``openstack``, ``aws``, ``azure``, ``gce``) with the
+provider ID (e.g., ``openstack``, ``aws``, ``azure``, ``gcp``) with the
 necessary credentials being supplied in YAML format. Note that only one set
 of credentials per cloud provider type can be supplied (i.e., via this
 method, it is not possible to provide credentials for two different
@@ -295,8 +295,8 @@ OpenStack clouds).
     azure_secret: secret
     azure_resource_group: resource group
 
-    [gce]
-    gce_service_creds_file: absolute path to credentials file
+    [gcp]
+    gcp_service_creds_file: absolute path to credentials file
 
     [openstack]
     os_username: username
@@ -307,7 +307,7 @@ OpenStack clouds).
     os_project_name: project name
 
 Once the file is created, you can create a connection as follows, replacing
-``ProviderList.AWS`` with the desired provider (AZURE, GCE, or OPENSTACK):
+``ProviderList.AWS`` with the desired provider (AZURE, GCP, or OPENSTACK):
 
 .. code-block:: python
 

+ 4 - 4
test/helpers/__init__.py

@@ -128,12 +128,12 @@ TEST_DATA_CONFIG = {
         "vm_type": os.environ.get('CB_VM_TYPE_OS', 'm1.tiny'),
         "placement": os.environ.get('CB_PLACEMENT_OS', 'nova'),
     },
-    'GCECloudProvider': {
+    'GCPCloudProvider': {
         'image': ('https://www.googleapis.com/compute/v1/'
                   'projects/ubuntu-os-cloud/global/images/'
                   'ubuntu-1710-artful-v20180126'),
         'vm_type': 'f1-micro',
-        'placement': os.environ.get('GCE_DEFAULT_ZONE', 'us-central1-a'),
+        'placement': os.environ.get('GCP_DEFAULT_ZONE', 'us-central1-a'),
     },
     "AzureCloudProvider": {
         "placement":
@@ -152,8 +152,8 @@ def get_provider_test_data(provider, key):
         return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
     elif "OpenStackCloudProvider" in provider.name:
         return TEST_DATA_CONFIG.get("OpenStackCloudProvider").get(key)
-    elif "GCECloudProvider" in provider.name:
-        return TEST_DATA_CONFIG.get("GCECloudProvider").get(key)
+    elif "GCPCloudProvider" in provider.name:
+        return TEST_DATA_CONFIG.get("GCPCloudProvider").get(key)
     elif "AzureCloudProvider" in provider.name:
         return TEST_DATA_CONFIG.get("AzureCloudProvider").get(key)
     return None

+ 1 - 1
test/test_block_store_service.py

@@ -131,7 +131,7 @@ class CloudBlockStoreServiceTestCase(ProviderTestBase):
                 self.assertEqual(test_vol.attachments.instance_id,
                                  test_instance.id)
                 if (self.provider.PROVIDER_ID != 'azure' and
-                        self.provider.PROVIDER_ID != 'gce'):
+                        self.provider.PROVIDER_ID != 'gcp'):
                     self.assertEqual(test_vol.attachments.device,
                                      "/dev/sda2")
                 test_vol.detach()

+ 2 - 2
test/test_interface.py

@@ -54,8 +54,8 @@ class CloudInterfaceTestCase(ProviderTestBase):
             cloned_config['os_password'] = "cb_dummy"
         elif self.provider.PROVIDER_ID == 'azure':
             cloned_config['azure_subscription_id'] = "cb_dummy"
-        elif self.provider.PROVIDER_ID == 'gce':
-            cloned_config['gce_service_creds_dict'] = {'dummy': 'dict'}
+        elif self.provider.PROVIDER_ID == 'gcp':
+            cloned_config['gcp_service_creds_dict'] = {'dummy': 'dict'}
 
         with self.assertRaises(ProviderConnectionException):
             cloned_provider = CloudProviderFactory().create_provider(

+ 1 - 1
test/test_network_service.py

@@ -239,7 +239,7 @@ class CloudNetworkServiceTestCase(ProviderTestBase):
             # Check basic router properties
             sit.check_standard_behaviour(
                 self, self.provider.networking.routers, router)
-            if (self.provider.PROVIDER_ID != 'gce'):
+            if (self.provider.PROVIDER_ID != 'gcp'):
                 self.assertEqual(
                     router.state, RouterState.DETACHED,
                     "Router {0} state {1} should be {2}.".format(

+ 3 - 3
tox.ini

@@ -6,7 +6,7 @@
 # running the tests.
 
 [tox]
-envlist = {py27,py36,pypy}-{aws,azure,gce,openstack,mock}
+envlist = {py27,py36,pypy}-{aws,azure,gcp,openstack,mock}
 
 [testenv]
 commands = flake8 cloudbridge test setup.py
@@ -18,14 +18,14 @@ setenv =
     BOTO_CONFIG=/dev/null
     aws: CB_TEST_PROVIDER=aws
     azure: CB_TEST_PROVIDER=azure
-    gce: CB_TEST_PROVIDER=gce
+    gcp: CB_TEST_PROVIDER=gcp
     openstack: CB_TEST_PROVIDER=openstack
     mock: CB_TEST_PROVIDER=mock
 passenv =
     PYTHONUNBUFFERED
     aws: CB_IMAGE_AWS CB_INSTANCE_TYPE_AWS CB_PLACEMENT_AWS AWS_ACCESS_KEY AWS_SECRET_KEY
     azure: AZURE_SUBSCRIPTION_ID AZURE_CLIENT_ID AZURE_SECRET AZURE_TENANT AZURE_REGION_NAME AZURE_RESOURCE_GROUP AZURE_STORAGE_ACCOUNT AZURE_VM_DEFAULT_USER_NAME AZURE_PUBLIC_KEY_STORAGE_TABLE_NAME
-    gce: CB_IMAGE_GCE CB_INSTANCE_TYPE_GCE CB_PLACEMENT_GCE GCE_DEFAULT_REGION GCE_DEFAULT_ZONE GCE_PROJECT_NAME GCE_SERVICE_CREDS_FILE GCE_SERVICE_CREDS_DICT
+    gcp: CB_IMAGE_GCP CB_INSTANCE_TYPE_GCP CB_PLACEMENT_GCP GCP_DEFAULT_REGION GCP_DEFAULT_ZONE GCP_PROJECT_NAME GCP_SERVICE_CREDS_FILE GCP_SERVICE_CREDS_DICT
     openstack:  CB_IMAGE_OS CB_INSTANCE_TYPE_OS CB_PLACEMENT_OS OS_AUTH_URL OS_PASSWORD OS_PROJECT_NAME OS_TENANT_NAME OS_USERNAME OS_REGION_NAME OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_NAME NOVA_SERVICE_NAME
     mock: CB_IMAGE_AWS CB_INSTANCE_TYPE_AWS CB_PLACEMENT_AWS AWS_ACCESS_KEY AWS_SECRET_KEY
 deps =

Некоторые файлы не были показаны из-за большого количества измененных файлов