Explorar el Código

Merge pull request #108 from gvlproject/azure_dev_rebased

Azure provider merge ready
Nuwan Goonasekera hace 8 años
padre
commit
ef51ea42b5

+ 7 - 1
.travis.yml

@@ -10,14 +10,20 @@ matrix:
   include:
     - python: 2.7
       env: TOX_ENV=py27-aws
+    - python: 2.7
+      env: TOX_ENV=py27-azure
     - python: 2.7
       env: TOX_ENV=py27-openstack
     - python: 3.6
       env: TOX_ENV=py36-aws
+    - python: 3.6
+      env: TOX_ENV=py36-azure
     - python: 3.6
       env: TOX_ENV=py36-openstack
     - python: pypy-5.3.1
       env: TOX_ENV=pypy-aws
+    - python: pypy-5.3.1
+      env: TOX_ENV=pypy-azure
     - python: pypy-5.3.1
       env: TOX_ENV=pypy-openstack
 before_install:
@@ -53,7 +59,7 @@ install:
     - pip install coveralls
     - pip install codecov
 script:
-    - tox -e $TOX_ENV
+    - travis_wait 110 tox -e $TOX_ENV
 after_success:
     - |
       case "$TRAVIS_EVENT_TYPE" in

+ 9 - 9
README.rst

@@ -28,23 +28,23 @@ conditional code for each cloud.
 
 .. |aws-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/1
               :target: https://travis-ci.org/gvlproject/cloudbridge
-.. |aws-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/3
+.. |aws-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/4
               :target: https://travis-ci.org/gvlproject/cloudbridge
-.. |aws-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/5
+.. |aws-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/7
               :target: https://travis-ci.org/gvlproject/cloudbridge
 
-.. |os-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/2
+.. |os-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/3
              :target: https://travis-ci.org/gvlproject/cloudbridge
-.. |os-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/4
+.. |os-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/6
              :target: https://travis-ci.org/gvlproject/cloudbridge
-.. |os-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/6
+.. |os-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/9
              :target: https://travis-ci.org/gvlproject/cloudbridge
 
-.. |azure-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/azure_dev/2
+.. |azure-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/2
                 :target: https://travis-ci.org/gvlproject/cloudbridge/branches
-.. |azure-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/azure_dev/5
+.. |azure-py36| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/5
                 :target: https://travis-ci.org/gvlproject/cloudbridge/branches
-.. |azure-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/azure_dev/8
+.. |azure-pypy| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/master/8
                 :target: https://travis-ci.org/gvlproject/cloudbridge/branches
 
 .. |gce-py27| image:: https://travis-matrix-badges.herokuapp.com/repos/gvlproject/cloudbridge/branches/gce/3
@@ -65,7 +65,7 @@ Build Status
 +--------------------------+--------------+--------------+--------------+
 | **openstack**            | |os-py27|    | |os-py36|    | |os-pypy|    |
 +--------------------------+--------------+--------------+--------------+
-| **azure (alpha)**        | |azure-py27| | |azure-py36| | |azure-py36| |
+| **azure (beta)**         | |azure-py27| | |azure-py36| | |azure-py36| |
 +--------------------------+--------------+--------------+--------------+
 | **gce (alpha)**          | |gce-py27|   | |gce-py36|   | |gce-pypy|   |
 +--------------------------+--------------+--------------+--------------+

+ 14 - 1
cloudbridge/cloud/base/resources.py

@@ -19,6 +19,7 @@ from cloudbridge.cloud.interfaces.resources import BucketContainer
 from cloudbridge.cloud.interfaces.resources import BucketObject
 from cloudbridge.cloud.interfaces.resources import CloudResource
 from cloudbridge.cloud.interfaces.resources import FloatingIP
+from cloudbridge.cloud.interfaces.resources import FloatingIpState
 from cloudbridge.cloud.interfaces.resources import GatewayState
 from cloudbridge.cloud.interfaces.resources import Instance
 from cloudbridge.cloud.interfaces.resources import InstanceState
@@ -976,7 +977,7 @@ class BaseSubnet(BaseCloudResource, BaseObjectLifeCycleMixin, Subnet):
             interval=interval)
 
 
-class BaseFloatingIP(BaseCloudResource, FloatingIP):
+class BaseFloatingIP(BaseCloudResource, BaseObjectLifeCycleMixin, FloatingIP):
 
     def __init__(self, provider):
         super(BaseFloatingIP, self).__init__(provider)
@@ -988,6 +989,18 @@ class BaseFloatingIP(BaseCloudResource, FloatingIP):
         """
         return self.public_ip
 
+    @property
+    def state(self):
+        return (FloatingIpState.IN_USE if self.in_use
+                else FloatingIpState.AVAILABLE)
+
+    def wait_till_ready(self, timeout=None, interval=None):
+        self.wait_for(
+            [FloatingIpState.AVAILABLE, FloatingIpState.IN_USE],
+            terminal_states=[FloatingIpState.ERROR],
+            timeout=timeout,
+            interval=interval)
+
     def __repr__(self):
         return "<CB-{0}: {1} ({2})>".format(self.__class__.__name__,
                                             self.id, self.public_ip)

+ 1 - 0
cloudbridge/cloud/factory.py

@@ -15,6 +15,7 @@ log = logging.getLogger(__name__)
 class ProviderList(object):
     AWS = 'aws'
     OPENSTACK = 'openstack'
+    AZURE = 'azure'
 
 
 class CloudProviderFactory(object):

+ 1 - 0
cloudbridge/cloud/interfaces/exceptions.py

@@ -53,6 +53,7 @@ class InvalidNameException(CloudBridgeBaseException):
     a CloudBridge resource.An example would be setting uppercase
     letters, which are not allowed in a resource name.
     """
+
     def __init__(self, msg):
         super(InvalidNameException, self).__init__(msg)
 

+ 22 - 8
cloudbridge/cloud/interfaces/resources.py

@@ -790,8 +790,8 @@ class NetworkState(object):
     :cvar UNKNOWN: Network state unknown.
     :cvar PENDING: Network is being created.
     :cvar AVAILABLE: Network is available.
-    :cvar DOWN = Network is not operational.
-    :cvar ERROR = Network errored.
+    :cvar DOWN: Network is not operational.
+    :cvar ERROR: Network errored.
     """
     UNKNOWN = "unknown"
     PENDING = "pending"
@@ -884,15 +884,14 @@ class Network(ObjectLifeCycleMixin, CloudResource):
 
 
 class SubnetState(object):
-
     """
     Standard states for a subnet.
 
     :cvar UNKNOWN: Subnet state unknown.
     :cvar PENDING: Subnet is being created.
     :cvar AVAILABLE: Subnet is available.
-    :cvar DOWN = Subnet is not operational.
-    :cvar ERROR = Subnet errored.
+    :cvar DOWN: Subnet is not operational.
+    :cvar ERROR: Subnet errored.
     """
     UNKNOWN = "unknown"
     PENDING = "pending"
@@ -950,7 +949,23 @@ class Subnet(ObjectLifeCycleMixin, CloudResource):
         pass
 
 
-class FloatingIP(CloudResource):
+class FloatingIpState(object):
+
+    """
+    Standard states for a floating ip.
+
+    :cvar UNKNOWN: Floating IP state unknown.
+    :cvar AVAILABLE: Floating IP is available.
+    :cvar IN_USE: Floating IP is attached to a device.
+    :cvar ERROR: Floating IP is in an error state.
+    """
+    UNKNOWN = "unknown"
+    AVAILABLE = "available"
+    IN_USE = "in_use"
+    ERROR = "error"
+
+
+class FloatingIP(ObjectLifeCycleMixin, CloudResource):
     """
     Represents a floating (i.e., static) IP address.
     """
@@ -1099,7 +1114,6 @@ class Router(CloudResource):
 
 
 class GatewayState(object):
-
     """
     Standard states for a gateway.
 
@@ -1763,7 +1777,7 @@ class VMFirewallRuleContainer(PageableObjectMixin):
             fw.rules.create(TrafficDirection.OUTBOUND, src_dest_fw=fw)
 
         You need to pass in either ``src_dest_fw`` OR ``protocol`` AND
-        ``from_port``, ``to_port``, ``cidr_ip``. In other words, either
+        ``from_port``, ``to_port``, ``cidr``. In other words, either
         you are authorizing another group or you are authorizing some
         IP-based rule.
 

+ 10 - 3
cloudbridge/cloud/interfaces/services.py

@@ -547,7 +547,6 @@ class ImageService(PageableObjectMixin, CloudService):
 
 
 class NetworkingService(CloudService):
-
     """
     Base service interface for networking.
 
@@ -586,6 +585,16 @@ class NetworkingService(CloudService):
         """
         pass
 
+    @abstractproperty
+    def floating_ips(self):
+        """
+        Provides access to all Floating IP services.
+
+        :rtype: :class:`.FloatingIPService`
+        :return: a FloatingIPService object
+        """
+        pass
+
     @abstractproperty
     def gateways(self):
         """
@@ -859,7 +868,6 @@ class FloatingIPService(PageableObjectMixin, CloudService):
 
 
 class RouterService(PageableObjectMixin, CloudService):
-
     """
     Manage networking router actions and resources.
     """
@@ -927,7 +935,6 @@ class RouterService(PageableObjectMixin, CloudService):
 
 
 class GatewayService(CloudService):
-
     """
     Manage internet gateway resources.
     """

+ 5 - 0
cloudbridge/cloud/providers/aws/resources.py

@@ -1027,6 +1027,11 @@ class AWSFloatingIP(BaseFloatingIP):
     def delete(self):
         self._ip.release()
 
+    def refresh(self):
+        fip = self._provider.networking.floating_ips.get(self.id)
+        # pylint:disable=protected-access
+        self._ip = fip._ip
+
 
 class AWSRouter(BaseRouter):
 

+ 1 - 1
cloudbridge/cloud/providers/aws/services.py

@@ -313,7 +313,7 @@ class AWSBucketService(BaseBucketService):
             return self.svc.create('create_bucket', Bucket=name,
                                    CreateBucketConfiguration={
                                        'LocationConstraint': loc_constraint
-                                    })
+                                   })
 
 
 class AWSImageService(BaseImageService):

+ 5 - 0
cloudbridge/cloud/providers/azure/__init__.py

@@ -0,0 +1,5 @@
+"""
+Exports from this provider
+"""
+
+from .provider import AzureCloudProvider  # noqa

+ 558 - 0
cloudbridge/cloud/providers/azure/azure_client.py

@@ -0,0 +1,558 @@
+import datetime
+import logging
+from io import BytesIO
+
+from azure.common.credentials import ServicePrincipalCredentials
+from azure.mgmt.compute import ComputeManagementClient
+from azure.mgmt.network import NetworkManagementClient
+from azure.mgmt.resource import ResourceManagementClient
+from azure.mgmt.resource.subscriptions import SubscriptionClient
+from azure.mgmt.storage import StorageManagementClient
+from azure.storage.blob import BlobPermissions
+from azure.storage.blob import BlockBlobService
+from azure.storage.table import TableService
+
+log = logging.getLogger(__name__)
+
+
+class AzureClient(object):
+    """
+    Azure client is the wrapper on top of azure python sdk
+    """
+    def __init__(self, config):
+        self._config = config
+        self.subscription_id = config.get('azure_subscription_id')
+        self._credentials = ServicePrincipalCredentials(
+            client_id=config.get('azure_client_id'),
+            secret=config.get('azure_secret'),
+            tenant=config.get('azure_tenant')
+        )
+
+        self._resource_client = None
+        self._storage_client = None
+        self._network_management_client = None
+        self._subscription_client = None
+        self._compute_client = None
+        self._access_key_result = None
+        self._block_blob_service = None
+        self._table_service = None
+
+        log.debug("azure subscription : %s", self.subscription_id)
+
+    @property
+    def access_key_result(self):
+        if not self._access_key_result:
+            self._access_key_result = self.storage_client.storage_accounts. \
+                list_keys(self.resource_group, self.storage_account)
+        return self._access_key_result
+
+    @property
+    def resource_group(self):
+        return self._config.get('azure_resource_group')
+
+    @property
+    def storage_account(self):
+        return self._config.get('azure_storage_account')
+
+    @property
+    def region_name(self):
+        return self._config.get('azure_region_name')
+
+    @property
+    def public_key_storage_table_name(self):
+        return self._config.get('azure_public_key_storage_table_name')
+
+    @property
+    def storage_client(self):
+        if not self._storage_client:
+            self._storage_client = \
+                StorageManagementClient(self._credentials,
+                                        self.subscription_id)
+        return self._storage_client
+
+    @property
+    def subscription_client(self):
+        if not self._subscription_client:
+            self._subscription_client = SubscriptionClient(self._credentials)
+        return self._subscription_client
+
+    @property
+    def resource_client(self):
+        if not self._resource_client:
+            self._resource_client = \
+                ResourceManagementClient(self._credentials,
+                                         self.subscription_id)
+        return self._resource_client
+
+    @property
+    def compute_client(self):
+        if not self._compute_client:
+            self._compute_client = \
+                ComputeManagementClient(self._credentials,
+                                        self.subscription_id)
+        return self._compute_client
+
+    @property
+    def network_management_client(self):
+        if not self._network_management_client:
+            self._network_management_client = NetworkManagementClient(
+                self._credentials, self.subscription_id)
+        return self._network_management_client
+
+    @property
+    def blob_service(self):
+        if not self._block_blob_service:
+            self._block_blob_service = BlockBlobService(
+                self.storage_account,
+                self.access_key_result.keys[0].value)
+        return self._block_blob_service
+
+    @property
+    def table_service(self):
+        if not self._table_service:
+            self._table_service = TableService(
+                self.storage_account,
+                self.access_key_result.keys[0].value)
+        if not self._table_service. \
+                exists(table_name=self.public_key_storage_table_name):
+            self._table_service.create_table(
+                self.public_key_storage_table_name)
+        return self._table_service
+
+    def get_resource_group(self, name):
+        return self.resource_client.resource_groups.get(name)
+
+    def create_resource_group(self, name, parameters):
+        return self.resource_client.resource_groups. \
+            create_or_update(name, parameters)
+
+    def get_storage_account(self, storage_account):
+        return self.storage_client.storage_accounts. \
+            get_properties(self.resource_group, storage_account)
+
+    def create_storage_account(self, name, params):
+        return self.storage_client.storage_accounts. \
+            create(self.resource_group, name.lower(), params).result()
+
+    def list_locations(self):
+        return self.subscription_client.subscriptions. \
+            list_locations(self.subscription_id)
+
+    def list_vm_firewall(self):
+        return self.network_management_client.network_security_groups. \
+            list(self.resource_group)
+
+    def create_vm_firewall(self, name, parameters):
+        return self.network_management_client.network_security_groups. \
+            create_or_update(self.resource_group, name,
+                             parameters).result()
+
+    def update_vm_firewall_tags(self, name, tags):
+        return self.network_management_client.network_security_groups. \
+            create_or_update(self.resource_group, name,
+                             {'tags': tags,
+                              'location': self.region_name}).result()
+
+    def create_vm_firewall_rule(self, vm_firewall,
+                                rule_name, parameters):
+        return self.network_management_client.security_rules. \
+            create_or_update(self.resource_group, vm_firewall,
+                             rule_name, parameters).result()
+
+    def delete_vm_firewall_rule(self, name, vm_firewall):
+        return self.network_management_client.security_rules. \
+            delete(self.resource_group, vm_firewall, name).result()
+
+    def get_vm_firewall(self, name):
+        return self.network_management_client.network_security_groups. \
+            get(self.resource_group, name)
+
+    def delete_vm_firewall(self, name):
+        delete_async = self.network_management_client \
+            .network_security_groups. \
+            delete(self.resource_group, name)
+        delete_async.wait()
+
+    def list_containers(self, prefix=None):
+        return self.blob_service.list_containers(prefix=prefix)
+
+    def create_container(self, container_name):
+        self.blob_service.create_container(container_name)
+        return self.blob_service.get_container_properties(container_name)
+
+    def get_container(self, container_name):
+        return self.blob_service.get_container_properties(container_name)
+
+    def delete_container(self, container_name):
+        self.blob_service.delete_container(container_name)
+
+    def list_blobs(self, container_name, prefix=None):
+        return self.blob_service.list_blobs(container_name, prefix=prefix)
+
+    def get_blob(self, container_name, blob_name):
+        return self.blob_service.get_blob_properties(container_name, blob_name)
+
+    def create_blob_from_text(self, container_name, blob_name, text):
+        self.blob_service.create_blob_from_text(container_name,
+                                                blob_name, text)
+
+    def create_blob_from_file(self, container_name, blob_name, file_path):
+        self.blob_service.create_blob_from_path(container_name,
+                                                blob_name, file_path)
+
+    def delete_blob(self, container_name, blob_name):
+        self.blob_service.delete_blob(container_name, blob_name)
+
+    def get_blob_url(self, container_name, blob_name, expiry_time):
+        expiry_date = datetime.datetime.now() + datetime.timedelta(
+            seconds=expiry_time)
+        sas = self.blob_service.generate_blob_shared_access_signature(
+            container_name, blob_name, permission=BlobPermissions.READ,
+            expiry=expiry_date)
+        return self.blob_service.make_blob_url(container_name, blob_name,
+                                               sas_token=sas)
+
+    def get_blob_content(self, container_name, blob_name):
+        out_stream = BytesIO()
+        self.blob_service.get_blob_to_stream(container_name,
+                                             blob_name, out_stream)
+        return out_stream
+
+    def create_empty_disk(self, disk_name, params):
+        return self.compute_client.disks.create_or_update(
+            self.resource_group,
+            disk_name,
+            params,
+            raw=True
+        )
+
+    def create_snapshot_disk(self, disk_name, params):
+        return self.compute_client.disks.create_or_update(
+            self.resource_group,
+            disk_name,
+            params,
+            raw=True
+        )
+
+    def list_snapshots(self):
+        return self.compute_client.snapshots. \
+            list_by_resource_group(self.resource_group)
+
+    def update_disk_tags(self, disk_name, tags):
+        return self.compute_client.disks.update(
+            self.resource_group,
+            disk_name,
+            {'tags': tags},
+            raw=True
+        )
+
+    def get_disk(self, disk_name):
+        return self.compute_client.disks. \
+            get(self.resource_group, disk_name)
+
+    def list_networks(self):
+        return self.network_management_client.virtual_networks.list(
+            self.resource_group)
+
+    def get_network(self, network_name):
+        return self.network_management_client.virtual_networks.get(
+            self.resource_group, network_name)
+
+    def create_network(self, name, params):
+        return self.network_management_client.virtual_networks. \
+            create_or_update(self.resource_group,
+                             name,
+                             parameters=params,
+                             raw=True)
+
+    def delete_network(self, network_name):
+        return self.network_management_client.virtual_networks. \
+            delete(self.resource_group, network_name).wait()
+
+    def create_floating_ip(self, public_ip_name, public_ip_parameters):
+        return self.network_management_client.public_ip_addresses. \
+            create_or_update(self.resource_group,
+                             public_ip_name,
+                             public_ip_parameters).result()
+
+    def delete_floating_ip(self, public_ip_address_name):
+        return self.network_management_client.public_ip_addresses. \
+            delete(self.resource_group,
+                   public_ip_address_name).result()
+
+    def list_floating_ips(self):
+        return self.network_management_client.public_ip_addresses.list(
+            self.resource_group)
+
+    def update_network_tags(self, network_name, tags):
+        return self.network_management_client.virtual_networks. \
+            create_or_update(self.resource_group,
+                             network_name, tags).result()
+
+    def list_disks(self):
+        return self.compute_client.disks. \
+            list_by_resource_group(self.resource_group)
+
+    def delete_disk(self, disk_name):
+        async_deletion = self.compute_client.disks. \
+            delete(self.resource_group, disk_name)
+        async_deletion.wait()
+
+    def get_snapshot(self, snapshot_name):
+        return self.compute_client.snapshots.get(self.resource_group,
+                                                 snapshot_name)
+
+    def create_snapshot(self, snapshot_name, params):
+        return self.compute_client.snapshots.create_or_update(
+            self.resource_group,
+            snapshot_name,
+            params,
+            raw=True
+        )
+
+    def delete_snapshot(self, snapshot_name):
+        async_delete = self.compute_client.snapshots. \
+            delete(self.resource_group, snapshot_name)
+        async_delete.wait()
+
+    def update_snapshot_tags(self, snapshot_name, tags):
+        return self.compute_client.snapshots.update(
+            self.resource_group,
+            snapshot_name,
+            {'tags': tags},
+            raw=True
+        )
+
+    def create_image(self, name, params):
+        return self.compute_client.images. \
+            create_or_update(self.resource_group, name,
+                             params, raw=True)
+
+    def delete_image(self, name):
+        self.compute_client.images. \
+            delete(self.resource_group, name).wait()
+
+    def list_images(self):
+        return self.compute_client.images. \
+            list_by_resource_group(self.resource_group)
+
+    def get_image(self, image_name):
+        return self.compute_client.images. \
+            get(self.resource_group, image_name)
+
+    def update_image_tags(self, name, tags):
+        return self.compute_client.images. \
+            create_or_update(self.resource_group, name,
+                             {
+                                 'tags': tags,
+                                 'location': self.region_name
+                             }).result()
+
+    def list_vm_types(self):
+        return self.compute_client.virtual_machine_sizes. \
+            list(self.region_name)
+
+    def list_subnets(self, network_name):
+        return self.network_management_client.subnets. \
+            list(self.resource_group, network_name)
+
+    def get_subnet(self, network_name, subnet_name):
+        return self.network_management_client.subnets. \
+            get(self.resource_group, network_name, subnet_name)
+
+    def create_subnet(self, network_name,
+                      subnet_name, params):
+        result_create = self.network_management_client \
+            .subnets.create_or_update(
+                self.resource_group,
+                network_name,
+                subnet_name,
+                params
+            )
+        subnet_info = result_create.result()
+
+        return subnet_info
+
+    def delete_subnet(self, network_name, subnet_name):
+        result_delete = self.network_management_client \
+            .subnets.delete(
+                self.resource_group,
+                network_name,
+                subnet_name
+            )
+        result_delete.wait()
+
+    def list_vm(self):
+        return self.compute_client.virtual_machines.list(
+            self.resource_group
+        )
+
+    def restart_vm(self, vm_name):
+        return self.compute_client.virtual_machines.restart(
+            self.resource_group,
+            vm_name
+        ).wait()
+
+    def delete_vm(self, vm_name):
+        return self.compute_client.virtual_machines.delete(
+            self.resource_group,
+            vm_name
+        ).wait()
+
+    def get_vm(self, vm_name):
+        return self.compute_client.virtual_machines.get(
+            self.resource_group,
+            vm_name,
+            expand='instanceView'
+        )
+
+    def create_vm(self, vm_name, params):
+        return self.compute_client.virtual_machines. \
+            create_or_update(self.resource_group,
+                             vm_name, params, raw=True)
+
+    def update_vm(self, vm_name, params):
+        return self.compute_client.virtual_machines. \
+            create_or_update(self.resource_group,
+                             vm_name, params, raw=True)
+
+    def deallocate_vm(self, vm_name):
+        self.compute_client. \
+            virtual_machines.deallocate(self.resource_group,
+                                        vm_name).wait()
+
+    def generalize_vm(self, vm_name):
+        self.compute_client.virtual_machines. \
+            generalize(self.resource_group, vm_name)
+
+    def start_vm(self, vm_name):
+        self.compute_client.virtual_machines. \
+            start(self.resource_group,
+                  vm_name).wait()
+
+    def update_vm_tags(self, vm_name, tags):
+        self.compute_client.virtual_machines. \
+            create_or_update(self.resource_group,
+                             vm_name, tags).result()
+
+    def delete_nic(self, nic_name):
+        self.network_management_client. \
+            network_interfaces.delete(self.resource_group,
+                                      nic_name).wait()
+
+    def get_nic(self, name):
+        return self.network_management_client. \
+            network_interfaces.get(self.resource_group, name)
+
+    def create_nic(self, nic_name, params):
+        async_nic_creation = self.network_management_client. \
+            network_interfaces.create_or_update(
+                self.resource_group,
+                nic_name,
+                params
+            )
+        nic_info = async_nic_creation.result()
+
+        return nic_info
+
+    def get_public_ip(self, name):
+        return self.network_management_client. \
+            public_ip_addresses.get(self.resource_group, name)
+
+    def delete_public_ip(self, public_ip_name):
+        self.network_management_client. \
+            public_ip_addresses.delete(self.resource_group,
+                                       public_ip_name).wait()
+
+    def create_public_key(self, entity):
+
+        return self.table_service. \
+            insert_or_replace_entity(self.public_key_storage_table_name,
+                                     entity)
+
+    def get_public_key(self, name):
+        entities = self.table_service. \
+            query_entities(self.public_key_storage_table_name,
+                           "Name eq '{0}'".format(name), num_results=1)
+
+        return entities.items[0] if len(entities.items) > 0 else None
+
+    def delete_public_key(self, entity):
+        self.table_service.delete_entity(self.public_key_storage_table_name,
+                                         entity.PartitionKey, entity.RowKey)
+
+    def list_public_keys(self, partition_key, limit=None, marker=None):
+        entities = self.table_service. \
+            query_entities(self.public_key_storage_table_name,
+                           "PartitionKey eq '{0}'".format(partition_key),
+                           marker=marker, num_results=limit)
+        return (entities.items, entities.next_marker)
+
+    def delete_route_table(self, route_table_name):
+        self.network_management_client. \
+            route_tables.delete(self.resource_group, route_table_name
+                                ).wait()
+
+    def attach_subnet_to_route_table(self, network_name,
+                                     subnet_name, route_table_id):
+
+        subnet_info = self.network_management_client.subnets.get(
+            self.resource_group,
+            network_name,
+            subnet_name
+        )
+        if subnet_info:
+            subnet_info.route_table = {
+                'id': route_table_id
+            }
+
+            result_create = self.network_management_client. \
+                subnets.create_or_update(
+                 self.resource_group,
+                 network_name,
+                 subnet_name,
+                 subnet_info)
+            subnet_info = result_create.result()
+
+        return subnet_info
+
+    def detach_subnet_to_route_table(self, network_name,
+                                     subnet_name, route_table_id):
+
+        subnet_info = self.network_management_client.subnets.get(
+            self.resource_group,
+            network_name,
+            subnet_name
+        )
+
+        if subnet_info and subnet_info.route_table.id == route_table_id:
+            subnet_info.route_table = None
+
+            result_create = self.network_management_client. \
+                subnets.create_or_update(
+                 self.resource_group,
+                 network_name,
+                 subnet_name,
+                 subnet_info)
+            subnet_info = result_create.result()
+
+        return subnet_info
+
+    def list_route_tables(self):
+        return self.network_management_client. \
+            route_tables.list(self.resource_group)
+
+    def get_route_table(self, router_id):
+        return self.network_management_client. \
+            route_tables.get(self.resource_group, router_id)
+
+    def create_route_table(self, route_table_name, params):
+        return self.network_management_client. \
+            route_tables.create_or_update(
+             self.resource_group,
+             route_table_name, params).result()
+
+    def update_route_table_tags(self, route_table_name, tags):
+        self.network_management_client.route_tables. \
+            create_or_update(self.resource_group,
+                             route_table_name, tags).result()

+ 67 - 0
cloudbridge/cloud/providers/azure/helpers.py

@@ -0,0 +1,67 @@
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+
+
+def filter_by_tag(list_items, filters):
+    """
+    This function filter items on the tags
+    :param list_items:
+    :param filters:
+    :return:
+    """
+    filtered_list = []
+    if filters:
+        for obj in list_items:
+            for key in filters:
+                if obj.tags and filters[key] in obj.tags.get(key, ''):
+                    filtered_list.append(obj)
+
+        return filtered_list
+    else:
+        return list_items
+
+
+def parse_url(template_url, original_url):
+    """
+    In Azure all the resource IDs are returned as URIs.
+    ex: '/subscriptions/{subscriptionId}/resourceGroups/' \
+       '{resourceGroupName}/providers/Microsoft.Compute/' \
+       'virtualMachines/{vmName}'
+    This function splits the resource ID based on the template url passed
+    and returning the dictionary.
+    """
+    template_url_parts = template_url.split('/')
+    original_url_parts = original_url.split('/')
+    if len(template_url_parts) != len(original_url_parts):
+        raise Exception('Invalid url parameter passed')
+    resource_param = {}
+    for key, value in zip(template_url_parts, original_url_parts):
+        if key.startswith('{') and key.endswith('}'):
+            resource_param.update({key[1:-1]: value})
+
+    return resource_param
+
+
+def gen_key_pair():
+    """
+    This method generates the public and private key pair.
+    The public key format is OpenSSH and private key format is PEM container
+    :return:
+    """
+
+    private_key = rsa.generate_private_key(backend=default_backend(),
+                                           public_exponent=65537,
+                                           key_size=2048)
+
+    public_key_str = private_key.public_key(). \
+        public_bytes(serialization.Encoding.OpenSSH,
+                     serialization.PublicFormat.OpenSSH).decode('utf-8')
+
+    private_key_str = private_key. \
+        private_bytes(encoding=serialization.Encoding.PEM,
+                      format=serialization.PrivateFormat.TraditionalOpenSSL,
+                      encryption_algorithm=serialization.NoEncryption()
+                      ).decode('utf-8')
+
+    return (private_key_str, public_key_str)

+ 126 - 0
cloudbridge/cloud/providers/azure/provider.py

@@ -0,0 +1,126 @@
+import logging
+import os
+
+from cloudbridge.cloud.base import BaseCloudProvider
+from cloudbridge.cloud.providers.azure.azure_client import AzureClient
+from cloudbridge.cloud.providers.azure.services \
+    import AzureComputeService, AzureNetworkingService, \
+    AzureSecurityService, AzureStorageService
+
+from msrestazure.azure_exceptions import CloudError
+
+log = logging.getLogger(__name__)
+
+
+class AzureCloudProvider(BaseCloudProvider):
+    PROVIDER_ID = 'azure'
+
+    def __init__(self, config):
+        super(AzureCloudProvider, self).__init__(config)
+
+        # mandatory config values
+        self.subscription_id = self. \
+            _get_config_value('azure_subscription_id',
+                              os.environ.get('AZURE_SUBSCRIPTION_ID', None))
+        self.client_id = self._get_config_value(
+            'azure_client_id', os.environ.get('AZURE_CLIENT_ID', None))
+        self.secret = self._get_config_value(
+            'azure_secret', os.environ.get('AZURE_SECRET', None))
+        self.tenant = self._get_config_value(
+            'azure_tenant', os.environ.get('AZURE_TENANT', None))
+
+        # optional config values
+        self.region_name = self._get_config_value(
+            'azure_region_name', os.environ.get('AZURE_REGION_NAME',
+                                                'eastus'))
+        self.resource_group = self._get_config_value(
+            'azure_resource_group', os.environ.get('AZURE_RESOURCE_GROUP',
+                                                   'cloudbridge'))
+        # Storage account name is limited to a max length of 24 characters
+        # so take part of the client id to keep it unique
+        self.storage_account = self._get_config_value(
+            'azure_storage_account',
+            os.environ.get('AZURE_STORAGE_ACCOUNT',
+                           'storageacc' + self.resource_group[-12:]))
+
+        self.vm_default_user_name = self._get_config_value(
+            'azure_vm_default_user_name', os.environ.get
+            ('AZURE_VM_DEFAULT_USER_NAME', 'cbuser'))
+
+        self.public_key_storage_table_name = self._get_config_value(
+            'azure_public_key_storage_table_name', os.environ.get
+            ('AZURE_PUBLIC_KEY_STORAGE_TABLE_NAME', 'cbcerts'))
+
+        self._azure_client = None
+
+        self._security = AzureSecurityService(self)
+        self._storage = AzureStorageService(self)
+        self._compute = AzureComputeService(self)
+        self._networking = AzureNetworkingService(self)
+
+    @property
+    def compute(self):
+        return self._compute
+
+    @property
+    def networking(self):
+        return self._networking
+
+    @property
+    def security(self):
+        return self._security
+
+    @property
+    def storage(self):
+        return self._storage
+
+    @property
+    def azure_client(self):
+        if not self._azure_client:
+
+            # create a dict with both optional and mandatory configuration
+            # values to pass to the azureclient class, rather
+            # than passing the provider object and taking a dependency.
+
+            provider_config = {
+                'azure_subscription_id': self.subscription_id,
+                'azure_client_id': self.client_id,
+                'azure_secret': self.secret,
+                'azure_tenant': self.tenant,
+                'azure_region_name': self.region_name,
+                'azure_resource_group': self.resource_group,
+                'azure_storage_account': self.storage_account,
+                'azure_public_key_storage_table_name':
+                    self.public_key_storage_table_name
+            }
+
+            self._azure_client = AzureClient(provider_config)
+            self._initialize()
+        return self._azure_client
+
+    def _initialize(self):
+        """
+        Verifying that resource group and storage account exists
+        if not create one with the name provided in the
+        configuration
+        """
+        try:
+            self._azure_client.get_resource_group(self.resource_group)
+        except CloudError:
+            resource_group_params = {'location': self.region_name}
+            self._azure_client.create_resource_group(self.resource_group,
+                                                     resource_group_params)
+
+        try:
+            self._azure_client.get_storage_account(self.storage_account)
+        except CloudError:
+            storage_account_params = {
+                'sku': {
+                    'name': 'Standard_LRS'
+                },
+                'kind': 'storage',
+                'location': self.region_name,
+            }
+            self._azure_client. \
+                create_storage_account(self.storage_account,
+                                       storage_account_params)

+ 1768 - 0
cloudbridge/cloud/providers/azure/resources.py

@@ -0,0 +1,1768 @@
+"""
+DataTypes used by this provider
+"""
+import collections
+import logging
+import time
+
+from azure.common import AzureException
+from azure.mgmt.network.models import NetworkSecurityGroup
+
+from cloudbridge.cloud.base.resources import BaseAttachmentInfo, \
+    BaseBucket, BaseBucketContainer, BaseBucketObject, BaseFloatingIP, \
+    BaseInstance, BaseInternetGateway, BaseKeyPair, BaseLaunchConfig, \
+    BaseMachineImage, BaseNetwork, BasePlacementZone, BaseRegion, BaseRouter, \
+    BaseSnapshot, BaseSubnet, BaseVMFirewall, BaseVMFirewallRule, \
+    BaseVMFirewallRuleContainer, BaseVMType, BaseVolume, ClientPagedResultList
+from cloudbridge.cloud.interfaces import InstanceState, VolumeState
+from cloudbridge.cloud.interfaces.resources import Instance, \
+    MachineImageState, NetworkState, RouterState, \
+    SnapshotState, SubnetState, TrafficDirection
+
+from msrestazure.azure_exceptions import CloudError
+
+import pysftp
+
+from . import helpers as azure_helpers
+
+log = logging.getLogger(__name__)
+
+NETWORK_INTERFACE_RESOURCE_ID = '/subscriptions/{subscriptionId}/' \
+                                'resourceGroups/{resourceGroupName}' \
+                                '/providers/Microsoft.Network/' \
+                                'networkInterfaces/{networkInterfaceName}'
+PUBLIC_IP_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups' \
+                        '/{resourceGroupName}/providers/Microsoft.Network' \
+                        '/publicIPAddresses/{publicIpAddressName}'
+SUBNET_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups/' \
+                     '{resourceGroupName}/providers/Microsoft.Network' \
+                     '/virtualNetworks/{virtualNetworkName}/subnets' \
+                     '/{subnetName}'
+VOLUME_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups/' \
+                     '{resourceGroupName}/providers/Microsoft.Compute/' \
+                     'disks/{diskName}'
+VM_FIREWALL_RESOURCE_ID = '/subscriptions/{subscriptionId}/' \
+                             'resourceGroups/{resourceGroupName}/' \
+                             'providers/Microsoft.Network/' \
+                             'networkSecurityGroups/' \
+                             '{networkSecurityGroupName}'
+SNAPSHOT_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups/' \
+                       '{resourceGroupName}/providers/Microsoft.Compute/' \
+                       'snapshots/{snapshotName}'
+IMAGE_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups/' \
+                    '{resourceGroupName}/providers/Microsoft.Compute/' \
+                    'images/{imageName}'
+INSTANCE_RESOURCE_ID = '/subscriptions/{subscriptionId}/resourceGroups/' \
+                       '{resourceGroupName}/providers/Microsoft.Compute/' \
+                       'virtualMachines/{vmName}'
+
+NETWORK_NAME = 'virtualNetworkName'
+NETWORK_INTERFACE_NAME = 'networkInterfaceName'
+PUBLIC_IP_NAME = 'publicIpAddressName'
+IMAGE_NAME = 'imageName'
+VM_NAME = 'vmName'
+VOLUME_NAME = 'diskName'
+VM_FIREWALL_NAME = 'networkSecurityGroupName'
+SNAPSHOT_NAME = 'snapshotName'
+
+
+class AzureVMFirewall(BaseVMFirewall):
+    def __init__(self, provider, vm_firewall):
+        super(AzureVMFirewall, self).__init__(provider, vm_firewall)
+        self._vm_firewall = vm_firewall
+        if not self._vm_firewall.tags:
+            self._vm_firewall.tags = {}
+        self._rule_container = AzureVMFirewallRuleContainer(provider, self)
+
+    @property
+    def network_id(self):
+        return None
+
+    @property
+    def resource_id(self):
+        return self._vm_firewall.id
+
+    @property
+    def id(self):
+        return self._vm_firewall.name
+
+    @property
+    def name(self):
+        return self._vm_firewall.tags.get('Name', self._vm_firewall.name)
+
+    @name.setter
+    def name(self, value):
+        self.assert_valid_resource_name(value)
+        self._vm_firewall.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_vm_firewall_tags(self.id,
+                                    self._vm_firewall.tags)
+
+    @property
+    def description(self):
+        return self._vm_firewall.tags.get('Description', None)
+
+    @description.setter
+    def description(self, value):
+        self._vm_firewall.tags.update(Description=value)
+        self._provider.azure_client.\
+            update_vm_firewall_tags(self.id,
+                                    self._vm_firewall.tags)
+
+    @property
+    def rules(self):
+        return self._rule_container
+
+    def delete(self):
+        try:
+            self._provider.azure_client.\
+                delete_vm_firewall(self.id)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def refresh(self):
+        """
+        Refreshes the security group with tags if required.
+        """
+        try:
+            self._vm_firewall = self._provider.azure_client. \
+                get_vm_firewall(self.id)
+            if not self._vm_firewall.tags:
+                self._vm_firewall.tags = {}
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The security group no longer exists and cannot be refreshed.
+
+    def to_json(self):
+        js = super(AzureVMFirewall, self).to_json()
+        json_rules = [r.to_json() for r in self.rules]
+        js['rules'] = json_rules
+        if js.get('network_id'):
+            js.pop('network_id')  # Omit for consistency across cloud providers
+        return js
+
+
+class AzureVMFirewallRuleContainer(BaseVMFirewallRuleContainer):
+
+    def __init__(self, provider, firewall):
+        super(AzureVMFirewallRuleContainer, self).__init__(provider, firewall)
+
+    def list(self, limit=None, marker=None):
+        # Filter out firewall rules with priority < 3500 because values
+        # between 3500 and 4096 are assumed to be owned by cloudbridge
+        # default rules.
+        # pylint:disable=protected-access
+        rules = [AzureVMFirewallRule(self.firewall, rule) for rule
+                 in self.firewall._vm_firewall.security_rules
+                 if rule.priority < 3500]
+        return ClientPagedResultList(self._provider, rules,
+                                     limit=limit, marker=marker)
+
+    def create(self, direction, protocol=None, from_port=None, to_port=None,
+               cidr=None, src_dest_fw=None):
+        if protocol and from_port and to_port:
+            return self._create_rule(direction, protocol, from_port,
+                                     to_port, cidr)
+        elif src_dest_fw:
+            result = None
+            fw = (self._provider.security.vm_firewalls.get(src_dest_fw)
+                  if isinstance(src_dest_fw, str) else src_dest_fw)
+            for rule in fw.rules:
+                result = self._create_rule(
+                    rule.direction, rule.protocol, rule.from_port,
+                    rule.to_port, rule.cidr)
+            return result
+        else:
+            return None
+
+    def _create_rule(self, direction, protocol, from_port, to_port, cidr):
+
+        # If cidr is None, default values is set as 0.0.0.0/0
+        if not cidr:
+            cidr = '0.0.0.0/0'
+
+        count = len(self.firewall._vm_firewall.security_rules) + 1
+        rule_name = "Rule - " + str(count)
+        priority = 1000 + count
+        destination_port_range = str(from_port) + "-" + str(to_port)
+        source_port_range = '*'
+        destination_address_prefix = "*"
+        access = "Allow"
+        direction = ("Inbound" if direction == TrafficDirection.INBOUND
+                     else "Outbound")
+        parameters = {"priority": priority,
+                      "protocol": protocol,
+                      "source_port_range": source_port_range,
+                      "source_address_prefix": cidr,
+                      "destination_port_range": destination_port_range,
+                      "destination_address_prefix": destination_address_prefix,
+                      "access": access,
+                      "direction": direction}
+        result = self._provider.azure_client. \
+            create_vm_firewall_rule(self.firewall.id,
+                                    rule_name, parameters)
+        # pylint:disable=protected-access
+        self.firewall._vm_firewall.security_rules.append(result)
+        return AzureVMFirewallRule(self.firewall, result)
+
+
+# Tuple for port range
+PortRange = collections.namedtuple('PortRange', ['from_port', 'to_port'])
+
+
+class AzureVMFirewallRule(BaseVMFirewallRule):
+    def __init__(self, parent_fw, rule):
+        super(AzureVMFirewallRule, self).__init__(parent_fw, rule)
+
+    @property
+    def id(self):
+        return self._rule.name
+
+    @property
+    def direction(self):
+        return (TrafficDirection.INBOUND if self._rule.direction == "Inbound"
+                else TrafficDirection.OUTBOUND)
+
+    @property
+    def name(self):
+        return self._rule.name
+
+    @property
+    def protocol(self):
+        return self._rule.protocol
+
+    @property
+    def from_port(self):
+        return self._port_range_tuple().from_port
+
+    @property
+    def to_port(self):
+        return self._port_range_tuple().to_port
+
+    def _port_range_tuple(self):
+        if self._rule.destination_port_range == '*':
+            return PortRange(1, 65535)
+        destination_port_range = self._rule.destination_port_range
+        port_range_split = destination_port_range.split('-', 1)
+        return PortRange(int(port_range_split[0]), int(port_range_split[1]))
+
+    @property
+    def cidr(self):
+        return self._rule.source_address_prefix
+
+    @property
+    def src_dest_fw_id(self):
+        return self.firewall.id
+
+    @property
+    def src_dest_fw(self):
+        return self.firewall
+
+    def delete(self):
+        vm_firewall = self.firewall.name
+        self._provider.azure_client. \
+            delete_vm_firewall_rule(self.id, vm_firewall)
+        for i, o in enumerate(self.firewall._vm_firewall.security_rules):
+            if o.name == self.name:
+                del self.firewall._vm_firewall.security_rules[i]
+                break
+
+
+class AzureBucketObject(BaseBucketObject):
+    def __init__(self, provider, container, key):
+        super(AzureBucketObject, self).__init__(provider)
+        self._container = container
+        self._key = key
+
+    @property
+    def id(self):
+        return self._key.name
+
+    @property
+    def name(self):
+        """
+        Get this object's name.
+        """
+        return self._key.name
+
+    @property
+    def size(self):
+        """
+        Get this object's size.
+        """
+        return self._key.properties.content_length
+
+    @property
+    def last_modified(self):
+
+        """
+        Get the date and time this object was last modified.
+        """
+        return self._key.properties.last_modified. \
+            strftime("%Y-%m-%dT%H:%M:%S.%f")
+
+    def iter_content(self):
+        """
+        Returns this object's content as an
+        iterable.
+        """
+        content_stream = self._provider.azure_client. \
+            get_blob_content(self._container.name, self._key.name)
+        if content_stream:
+            content_stream.seek(0)
+        return content_stream
+
+    def upload(self, data):
+        """
+        Set the contents of this object to the data read from the source
+        string.
+        """
+        try:
+            self._provider.azure_client.create_blob_from_text(
+                self._container.name, self.name, data)
+            return True
+        except AzureException as azureEx:
+            log.exception(azureEx)
+            return False
+
+    def upload_from_file(self, path):
+        """
+        Store the contents of the file pointed by the "path" variable.
+        """
+        try:
+            self._provider.azure_client.create_blob_from_file(
+                self._container.name, self.name, path)
+            return True
+        except AzureException as azureEx:
+            log.exception(azureEx)
+            return False
+
+    def delete(self):
+        """
+        Delete this object.
+
+        :rtype: bool
+        :return: True if successful
+        """
+        try:
+            self._provider.azure_client.delete_blob(
+                self._container.name, self.name)
+            return True
+        except AzureException as azureEx:
+            log.exception(azureEx)
+            return False
+
+    def generate_url(self, expires_in=0):
+        """
+        Generate a URL to this object.
+        """
+        return self._provider.azure_client.get_blob_url(
+            self._container.name, self.name, expires_in)
+
+
+class AzureBucket(BaseBucket):
+    def __init__(self, provider, bucket):
+        super(AzureBucket, self).__init__(provider)
+        self._bucket = bucket
+        self._object_container = AzureBucketContainer(provider, self)
+
+    @property
+    def id(self):
+        return self._bucket.name
+
+    @property
+    def name(self):
+        """
+        Get this bucket's name.
+        """
+        return self._bucket.name
+
+    def delete(self, delete_contents=True):
+        """
+        Delete this bucket.
+        """
+        try:
+            self._provider.azure_client.delete_container(self.name)
+            return True
+        except AzureException as azureEx:
+            log.exception(azureEx)
+            return False
+
+    def exists(self, name):
+        """
+        Determine if an object with given name exists in this bucket.
+        """
+        return True if self.get(name) else False
+
+    @property
+    def objects(self):
+        return self._object_container
+
+
+class AzureBucketContainer(BaseBucketContainer):
+
+    def __init__(self, provider, bucket):
+        super(AzureBucketContainer, self).__init__(provider, bucket)
+
+    def get(self, key):
+        """
+        Retrieve a given object from this bucket.
+        """
+        try:
+            obj = self._provider.azure_client.get_blob(self.bucket.name, key)
+            return AzureBucketObject(self._provider, self.bucket, obj)
+        except AzureException as azureEx:
+            log.exception(azureEx)
+            return None
+
+    def list(self, limit=None, marker=None, prefix=None):
+        """
+        List all objects within this bucket.
+
+        :rtype: BucketObject
+        :return: List of all available BucketObjects within this bucket.
+        """
+        objects = [AzureBucketObject(self._provider, self.bucket, obj)
+                   for obj in
+                   self._provider.azure_client.list_blobs(
+                       self.bucket.name, prefix=prefix)]
+        return ClientPagedResultList(self._provider, objects,
+                                     limit=limit, marker=marker)
+
+    def find(self, name, limit=None, marker=None):
+        objects = [obj for obj in self if obj.name == name]
+        return ClientPagedResultList(self._provider, objects,
+                                     limit=limit, marker=marker)
+
+    def create(self, name):
+        self._provider.azure_client.create_blob_from_text(
+            self.bucket.name, name, '')
+        return self.get(name)
+
+
+class AzureVolume(BaseVolume):
+    VOLUME_STATE_MAP = {
+        'InProgress': VolumeState.CREATING,
+        'Creating': VolumeState.CREATING,
+        'Unattached': VolumeState.AVAILABLE,
+        'Attached': VolumeState.IN_USE,
+        'Deleting': VolumeState.CONFIGURING,
+        'Updating': VolumeState.CONFIGURING,
+        'Deleted': VolumeState.DELETED,
+        'Failed': VolumeState.ERROR,
+        'Canceled': VolumeState.ERROR
+    }
+
+    def __init__(self, provider, volume):
+        super(AzureVolume, self).__init__(provider)
+        self._volume = volume
+        self._description = None
+        self._state = 'unknown'
+        self._update_state()
+        if not self._volume.tags:
+            self._volume.tags = {}
+
+    def _update_state(self):
+        if not self._volume.provisioning_state == 'Succeeded':
+            self._state = self._volume.provisioning_state
+        elif self._volume.managed_by:
+            self._state = 'Attached'
+        else:
+            self._state = 'Unattached'
+
+    @property
+    def id(self):
+        return self._volume.name
+
+    @property
+    def resource_id(self):
+        return self._volume.id
+
+    @property
+    def tags(self):
+        return self._volume.tags
+
+    @property
+    def name(self):
+        """
+        Get the volume name.
+
+        .. note:: an instance must have a (case sensitive) tag ``Name``
+        """
+        return self._volume.tags.get('Name', self._volume.name)
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the volume name.
+        """
+        # self._volume.name = value
+        self.assert_valid_resource_name(value)
+        self._volume.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_disk_tags(self.id,
+                             self._volume.tags)
+
+    @property
+    def description(self):
+        return self._volume.tags.get('Description', None)
+
+    @description.setter
+    def description(self, value):
+        self._volume.tags.update(Description=value)
+        self._provider.azure_client. \
+            update_disk_tags(self.id,
+                             self._volume.tags)
+
+    @property
+    def size(self):
+        return self._volume.disk_size_gb
+
+    @property
+    def create_time(self):
+        return self._volume.time_created.strftime("%Y-%m-%dT%H:%M:%S.%f")
+
+    @property
+    def zone_id(self):
+        return self._volume.location
+
+    @property
+    def source(self):
+        if self._volume.creation_data.source_uri:
+            url_params = azure_helpers.\
+                parse_url(SNAPSHOT_RESOURCE_ID,
+                          self._volume.creation_data.source_uri)
+            return self._provider.storage.snapshots. \
+                get(url_params.get(SNAPSHOT_NAME))
+        return None
+
+    @property
+    def attachments(self):
+        """
+        Azure does not have option to specify the device name
+        while attaching disk to VM. It is automatically populated
+        and is not returned. As a result this method ignores
+        the device name parameter and passes None
+        to the BaseAttachmentInfo
+        :return:
+        """
+        if self._volume.managed_by:
+            url_params = azure_helpers.parse_url(INSTANCE_RESOURCE_ID,
+                                                 self._volume.managed_by)
+            return BaseAttachmentInfo(self,
+                                      url_params.get(VM_NAME),
+                                      None)
+        else:
+            return None
+
+    def attach(self, instance, device=None):
+        """
+        Attach this volume to an instance.
+        """
+        try:
+            instance_id = instance.id if isinstance(
+                instance,
+                Instance) else instance
+            vm = self._provider.azure_client.get_vm(instance_id)
+
+            vm.storage_profile.data_disks.append({
+                'lun': len(vm.storage_profile.data_disks),
+                'name': self.id,
+                'create_option': 'attach',
+                'managed_disk': {
+                    'id': self.resource_id
+                }
+            })
+            self._provider.azure_client.update_vm(instance_id, vm)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def detach(self, force=False):
+        """
+        Detach this volume from an instance.
+        """
+        for vm in self._provider.azure_client.list_vm():
+            for item in vm.storage_profile.data_disks:
+                if item.managed_disk and \
+                                item.managed_disk.id == self.resource_id:
+                    vm.storage_profile.data_disks.remove(item)
+                    self._provider.azure_client.update_vm(vm.name, vm)
+        return True
+
+    def create_snapshot(self, name, description=None):
+        """
+        Create a snapshot of this Volume.
+        """
+        return self._provider.storage.snapshots.create(name, self)
+
+    def delete(self):
+        """
+        Delete this volume.
+        """
+        try:
+            self._provider.azure_client. \
+                delete_disk(self.id)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    @property
+    def state(self):
+        return AzureVolume.VOLUME_STATE_MAP.get(
+            self._state, VolumeState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this volume by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._volume = self._provider.azure_client. \
+                get_disk(self.id)
+            self._update_state()
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The volume no longer exists and cannot be refreshed.
+            # set the state to unknown
+            self._state = 'unknown'
+
+
+class AzureSnapshot(BaseSnapshot):
+    SNAPSHOT_STATE_MAP = {
+        'InProgress': SnapshotState.PENDING,
+        'Succeeded': SnapshotState.AVAILABLE,
+        'Failed': SnapshotState.ERROR,
+        'Canceled': SnapshotState.ERROR,
+        'Updating': SnapshotState.CONFIGURING,
+        'Deleting': SnapshotState.CONFIGURING,
+        'Deleted': SnapshotState.UNKNOWN
+    }
+
+    def __init__(self, provider, snapshot):
+        super(AzureSnapshot, self).__init__(provider)
+        self._snapshot = snapshot
+        self._description = None
+        self._state = self._snapshot.provisioning_state
+        if not self._snapshot.tags:
+            self._snapshot.tags = {}
+
+    @property
+    def id(self):
+        return self._snapshot.name
+
+    @property
+    def resource_id(self):
+        return self._snapshot.id
+
+    @property
+    def name(self):
+        """
+        Get the snapshot name.
+
+        .. note:: an instance must have a (case sensitive) tag ``Name``
+        """
+        return self._snapshot.tags.get('Name', self._snapshot.name)
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the snapshot name.
+        """
+        self.assert_valid_resource_name(value)
+        self._snapshot.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_snapshot_tags(self.id,
+                                 self._snapshot.tags)
+
+    @property
+    def description(self):
+        return self._snapshot.tags.get('Description', None)
+
+    @description.setter
+    def description(self, value):
+        self._snapshot.tags.update(Description=value)
+        self._provider.azure_client. \
+            update_snapshot_tags(self.id,
+                                 self._snapshot.tags)
+
+    @property
+    def size(self):
+        return self._snapshot.disk_size_gb
+
+    @property
+    def volume_id(self):
+        url_params = azure_helpers.\
+            parse_url(VOLUME_RESOURCE_ID,
+                      self._snapshot.creation_data.source_resource_id)
+        return url_params.get(VOLUME_NAME)
+
+    @property
+    def create_time(self):
+        return self._snapshot.time_created.strftime("%Y-%m-%dT%H:%M:%S.%f")
+
+    @property
+    def state(self):
+        return AzureSnapshot.SNAPSHOT_STATE_MAP.get(
+            self._state, SnapshotState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this snapshot by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._snapshot = self._provider.azure_client. \
+                get_snapshot(self.id)
+            self._state = self._snapshot.provisioning_state
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The snapshot no longer exists and cannot be refreshed.
+            # set the state to unknown
+            self._state = 'unknown'
+
+    def delete(self):
+        """
+        Delete this snapshot.
+        """
+        try:
+            self._provider.azure_client.delete_snapshot(self.id)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def create_volume(self, placement=None,
+                      size=None, volume_type=None, iops=None):
+        """
+        Create a new Volume from this Snapshot.
+        """
+        return self._provider.storage.volumes. \
+            create(self.id, self.size,
+                   zone=placement, snapshot=self)
+
+
+class AzureMachineImage(BaseMachineImage):
+    IMAGE_STATE_MAP = {
+        'InProgress': MachineImageState.PENDING,
+        'Succeeded': MachineImageState.AVAILABLE,
+        'Failed': MachineImageState.ERROR
+    }
+
+    def __init__(self, provider, image):
+        super(AzureMachineImage, self).__init__(provider)
+        self._image = image
+        self._state = self._image.provisioning_state
+
+        if not self._image.tags:
+            self._image.tags = {}
+
+    @property
+    def id(self):
+        """
+        Get the image identifier.
+
+        :rtype: ``str``
+        :return: ID for this instance as returned by the cloud middleware.
+        """
+        return self._image.name
+
+    @property
+    def resource_id(self):
+        return self._image.id
+
+    @property
+    def name(self):
+        """
+        Get the image name.
+
+        :rtype: ``str``
+        :return: Name for this image as returned by the cloud middleware.
+        """
+        return self._image.tags.get('Name', self._image.name)
+
+    @name.setter
+    def name(self, value):
+        """
+        Set the image name.
+        """
+        self.assert_valid_resource_name(value)
+        self._image.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_image_tags(self.id, self._image.tags)
+
+    @property
+    def description(self):
+        """
+        Get the image description.
+
+        :rtype: ``str``
+        :return: Description for this image as returned by the cloud middleware
+        """
+        return self._image.tags.get('Description', None)
+
+    @description.setter
+    def description(self, value):
+        """
+        Set the image name.
+        """
+        self._image.tags.update(Description=value)
+        self._provider.azure_client. \
+            update_image_tags(self.id, self._image.tags)
+
+    @property
+    def min_disk(self):
+        """
+        Returns the minimum size of the disk that's required to
+        boot this image (in GB).
+        This value is not retuned in azure api
+        as this is a limitation with Azure Compute API
+
+        :rtype: ``int``
+        :return: The minimum disk size needed by this image
+        """
+        return self._image.storage_profile.os_disk.disk_size_gb or 0
+
+    def delete(self):
+        """
+        Delete this image
+        """
+        self._provider.azure_client.delete_image(self.id)
+
+    @property
+    def state(self):
+        return AzureMachineImage.IMAGE_STATE_MAP.get(
+            self._state, MachineImageState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this instance by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._image = self._provider.azure_client\
+                .get_image(self.id)
+            self._state = self._image.provisioning_state
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            # image no longer exists
+            self._state = "unknown"
+
+
+class AzureNetwork(BaseNetwork):
+    NETWORK_STATE_MAP = {
+        'InProgress': NetworkState.PENDING,
+        'Succeeded': NetworkState.AVAILABLE,
+    }
+
+    def __init__(self, provider, network):
+        super(AzureNetwork, self).__init__(provider)
+        self._network = network
+        self._state = self._network.provisioning_state
+        if not self._network.tags:
+            self._network.tags = {}
+
+    @property
+    def id(self):
+        return self._network.name
+
+    @property
+    def resource_id(self):
+        return self._network.id
+
+    @property
+    def name(self):
+        """
+        Get the network name.
+
+        .. note:: the network must have a (case sensitive) tag ``Name``
+        """
+        return self._network.tags.get('Name', self._network.name)
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the network name.
+        """
+        self.assert_valid_resource_name(value)
+        self._network.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_network_tags(self.id, self._network)
+
+    @property
+    def external(self):
+        """
+        For Azure, all VPC networks can be connected to the Internet so always
+        return ``True``.
+        """
+        return True
+
+    @property
+    def state(self):
+        return AzureNetwork.NETWORK_STATE_MAP.get(
+            self._state, NetworkState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this network by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._network = self._provider.azure_client.\
+                get_network(self.id)
+            self._state = self._network.provisioning_state
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The network no longer exists and cannot be refreshed.
+            # set the state to unknown
+            self._state = 'unknown'
+
+    @property
+    def cidr_block(self):
+        """
+        Address space associated with this network
+        :return:
+        """
+        return self._network.address_space.address_prefixes[0]
+
+    def delete(self):
+        """
+        Delete an existing network.
+        """
+        try:
+            self._provider.azure_client.\
+                delete_network(self.id)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    @property
+    def subnets(self):
+        """
+        List all the subnets in this network
+        :return:
+        """
+        return self._provider.networking.subnets.list(network=self.id)
+
+    def create_subnet(self, cidr_block, name=None, zone=None):
+        """
+        Create the subnet with cidr_block
+        :param cidr_block:
+        :param name:
+        :param zone:
+        :return:
+        """
+        return self._provider.networking.subnets. \
+            create(network=self.id, cidr_block=cidr_block, name=name)
+
+
+class AzureFloatingIP(BaseFloatingIP):
+
+    def __init__(self, provider, floating_ip):
+        super(AzureFloatingIP, self).__init__(provider)
+        self._ip = floating_ip
+
+    @property
+    def id(self):
+        return self._ip.name
+
+    @property
+    def resource_id(self):
+        return self._ip.id
+
+    @property
+    def public_ip(self):
+        return self._ip.ip_address
+
+    @property
+    def private_ip(self):
+        return self._ip.ip_configuration.private_ip_address \
+            if self._ip.ip_configuration else None
+
+    @property
+    def in_use(self):
+        return True if self._ip.ip_configuration else False
+
+    def delete(self):
+        """
+        Delete an existing floating ip.
+        """
+        try:
+            self._provider.azure_client.delete_floating_ip(self.id)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def refresh(self):
+        fip = self._provider.networking.floating_ips.get(self.id)
+        # pylint:disable=protected-access
+        self._ip = fip._ip
+
+
+class AzureRegion(BaseRegion):
+    def __init__(self, provider, azure_region):
+        super(AzureRegion, self).__init__(provider)
+        self._azure_region = azure_region
+
+    @property
+    def id(self):
+        return self._azure_region.name
+
+    @property
+    def name(self):
+        return self._azure_region.name
+
+    @property
+    def zones(self):
+        """
+            Access information about placement zones within this region.
+            As Azure does not have this feature, mapping the region
+            name as zone id and name.
+        """
+        return [AzurePlacementZone(self._provider,
+                                   self._azure_region.name,
+                                   self._azure_region.name)]
+
+
+class AzurePlacementZone(BasePlacementZone):
+    """
+    As Azure does not provide zones (limited support), we are mapping the
+    region information in the zones.
+    """
+    def __init__(self, provider, zone, region):
+        super(AzurePlacementZone, self).__init__(provider)
+        self._azure_zone = zone
+        self._azure_region = region
+
+    @property
+    def id(self):
+        """
+            Get the zone id
+            :rtype: ``str``
+            :return: ID for this zone as returned by the cloud middleware.
+        """
+        return self._azure_zone
+
+    @property
+    def name(self):
+        """
+            Get the zone name.
+            :rtype: ``str``
+            :return: Name for this zone as returned by the cloud middleware.
+        """
+        return self._azure_region
+
+    @property
+    def region_name(self):
+        """
+            Get the region that this zone belongs to.
+            :rtype: ``str``
+            :return: Name of this zone's region as returned by the
+            cloud middleware
+        """
+        return self._azure_region
+
+
+class AzureSubnet(BaseSubnet):
+    _SUBNET_STATE_MAP = {
+        'InProgress': SubnetState.PENDING,
+        'Succeeded': SubnetState.AVAILABLE,
+    }
+
+    def __init__(self, provider, subnet):
+        super(AzureSubnet, self).__init__(provider)
+        self._subnet = subnet
+        self._state = self._subnet.provisioning_state
+        self._url_params = azure_helpers\
+            .parse_url(SUBNET_RESOURCE_ID, subnet.id)
+        self._network = self._provider.azure_client.\
+            get_network(self._url_params.get(NETWORK_NAME))
+
+    @property
+    def id(self):
+        return self.network_id + '|$|' + self._subnet.name
+
+    @property
+    def resource_id(self):
+        return self._subnet.id
+
+    @property
+    def name(self):
+        """
+        Get the subnet name.
+
+        .. note:: the subnet must have a (case sensitive) tag ``Name``
+        """
+        return self._subnet.name
+
+    @property
+    def zone(self):
+        region = self._provider.\
+            compute.regions.get(self._network.location)
+        return region.zones[0]
+
+    @property
+    def cidr_block(self):
+        return self._subnet.address_prefix
+
+    @property
+    def network_id(self):
+        return self._url_params.get(NETWORK_NAME)
+
+    def delete(self):
+        """
+        Delete the subnet
+        :return:
+        """
+        try:
+            subnet_id_parts = self.id.split('|$|')
+            self._provider.azure_client. \
+                delete_subnet(subnet_id_parts[0], subnet_id_parts[1])
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    @property
+    def state(self):
+        return self._SUBNET_STATE_MAP.get(
+            self._state, NetworkState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this network by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._network = self._provider.azure_client. \
+                get_network(self.id)
+            self._state = self._network.provisioning_state
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The network no longer exists and cannot be refreshed.
+            # set the state to unknown
+            self._state = 'unknown'
+
+
+class AzureInstance(BaseInstance):
+
+    INSTANCE_STATE_MAP = {
+        'InProgress': InstanceState.PENDING,
+        'Creating': InstanceState.PENDING,
+        'VM running': InstanceState.RUNNING,
+        'Updating': InstanceState.CONFIGURING,
+        'Deleted': InstanceState.DELETED,
+        'Stopping': InstanceState.CONFIGURING,
+        'Deleting': InstanceState.CONFIGURING,
+        'Stopped': InstanceState.STOPPED,
+        'Canceled': InstanceState.ERROR,
+        'Failed': InstanceState.ERROR,
+        'VM stopped': InstanceState.STOPPED,
+        'VM deallocated': InstanceState.STOPPED,
+        'VM deallocating': InstanceState.CONFIGURING,
+        'VM stopping': InstanceState.CONFIGURING,
+        'VM starting': InstanceState.CONFIGURING
+    }
+
+    def __init__(self, provider, vm_instance):
+        super(AzureInstance, self).__init__(provider)
+        self._vm = vm_instance
+        self._update_state()
+        self._get_network_attributes()
+        if not self._vm.tags:
+            self._vm.tags = {}
+
+    def _get_network_attributes(self):
+        """
+        This method used identify the public , private ip addresses
+        and security groups associated with network interfaces.
+        :return:
+        """
+        self._private_ips = []
+        self._public_ips = []
+        self._vm_firewall_ids = []
+        self._public_ip_ids = []
+        self._nic_ids = []
+        for nic in self._vm.network_profile.network_interfaces:
+            nic_params = azure_helpers.\
+                parse_url(NETWORK_INTERFACE_RESOURCE_ID, nic.id)
+            nic_name = nic_params.get(NETWORK_INTERFACE_NAME)
+            self._nic_ids.append(nic_name)
+            nic = self._provider.azure_client.get_nic(nic_name)
+            if nic.network_security_group:
+                fw_params = azure_helpers. \
+                    parse_url(VM_FIREWALL_RESOURCE_ID,
+                              nic.network_security_group.id)
+                self._vm_firewall_ids.\
+                    append(fw_params.get(VM_FIREWALL_NAME))
+            if nic.ip_configurations:
+                for ip_config in nic.ip_configurations:
+                    self._private_ips.append(ip_config.private_ip_address)
+                    if ip_config.public_ip_address:
+                        url_params = azure_helpers.\
+                            parse_url(PUBLIC_IP_RESOURCE_ID,
+                                      ip_config.public_ip_address.id)
+                        public_ip_name = url_params.get(PUBLIC_IP_NAME)
+                        public_ip = self._provider.azure_client.\
+                            get_public_ip(public_ip_name)
+                        self._public_ip_ids.append(public_ip_name)
+                        self._public_ips.append(public_ip.ip_address)
+
+    @property
+    def id(self):
+        """
+        Get the instance identifier.
+        """
+        return self._vm.name
+
+    @property
+    def resource_id(self):
+        return self._vm.id
+
+    @property
+    def name(self):
+        """
+        Get the instance name.
+
+        .. note:: an instance must have a (case sensitive) tag ``Name``
+        """
+        return self._vm.tags.get('Name', self._vm.name)
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the instance name.
+        """
+        self.assert_valid_resource_name(value)
+        self._vm.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_vm_tags(self.id, self._vm)
+
+    @property
+    def public_ips(self):
+        """
+        Get all the public IP addresses for this instance.
+        """
+        return self._public_ips
+
+    @property
+    def private_ips(self):
+        """
+        Get all the private IP addresses for this instance.
+        """
+        return self._private_ips
+
+    @property
+    def vm_type_id(self):
+        """
+        Get the instance type name.
+        """
+        return self._vm.hardware_profile.vm_size
+
+    @property
+    def vm_type(self):
+        """
+        Get the instance type.
+        """
+        return self._provider.compute.vm_types.find(
+            name=self.vm_type_id)[0]
+
+    def reboot(self):
+        """
+        Reboot this instance (using the cloud middleware API).
+        """
+        self._provider.azure_client.restart_vm(self.id)
+
+    def delete(self):
+        """
+        Permanently terminate this instance.
+        After deleting the VM. we are deleting the network interface
+        associated to the instance, public ip addresses associated to
+        the instance and also removing OS disk and data disks where
+        tag with name 'delete_on_terminate' has value True.
+        """
+        self._provider.azure_client.deallocate_vm(self.id)
+        self._provider.azure_client.delete_vm(self.id)
+        for nic_id in self._nic_ids:
+            self._provider.azure_client.delete_nic(nic_id)
+        for public_ip_id in self._public_ip_ids:
+            self._provider.azure_client.delete_public_ip(public_ip_id)
+        for data_disk in self._vm.storage_profile.data_disks:
+            if data_disk.managed_disk:
+                disk_params = azure_helpers.\
+                    parse_url(VOLUME_RESOURCE_ID,
+                              data_disk.managed_disk.id)
+                disk = self._provider.azure_client.\
+                    get_disk(disk_params.get(VOLUME_NAME))
+                if disk and disk.tags \
+                        and disk.tags.get('delete_on_terminate',
+                                          'False') == 'True':
+                    self._provider.azure_client.\
+                        delete_disk(disk_params.get(VOLUME_NAME))
+        if self._vm.storage_profile.os_disk.managed_disk:
+            disk_params = azure_helpers. \
+                parse_url(VOLUME_RESOURCE_ID,
+                          self._vm.storage_profile.os_disk.managed_disk.id)
+            self._provider.azure_client. \
+                delete_disk(disk_params.get(VOLUME_NAME))
+
+    @property
+    def image_id(self):
+        """
+        Get the image ID for this insance.
+        """
+        image_ref_id = self._vm.storage_profile.image_reference.id
+        if image_ref_id:
+            url_params = azure_helpers.parse_url(IMAGE_RESOURCE_ID,
+                                                 image_ref_id)
+            return url_params.get(IMAGE_NAME)
+        else:
+            return None
+
+    @property
+    def zone_id(self):
+        """
+        Get the placement zone id where this instance is running.
+        """
+        return self._vm.location
+
+    @property
+    def vm_firewalls(self):
+        return [self._provider.security.vm_firewalls.get(group_id)
+                for group_id in self._vm_firewall_ids]
+
+    @property
+    def vm_firewall_ids(self):
+        return self._vm_firewall_ids
+
+    @property
+    def key_pair_name(self):
+        """
+        Get the name of the key pair associated with this instance.
+        """
+        return self._vm.tags.get('Key_Pair')
+
+    def create_image(self, name, private_key_path=None):
+        """
+        Create a new image based on this instance.
+        Documentation for create image available at
+        https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image  # noqa
+        In azure, need to deprovision the VM before capturing.
+        To deprovision, login to VM and execute waagent deprovision command.
+        To do this programmatically, using pysftp to ssh into the VM
+        and executing deprovision command.
+        To SSH into the VM programmatically, need pass private key file path,
+        so we have modified the Cloud Bridge interface to pass
+        the private key file path
+        """
+
+        self.assert_valid_resource_name(name)
+
+        if not self._state == 'VM generalized':
+            if not self._state == 'VM running':
+                self._provider.azure_client.start_vm(self.id)
+                time.sleep(10)  # Some time is required
+                self._get_network_attributes()
+
+            # if private_key_path:
+            self._deprovision(private_key_path)
+            self._provider.azure_client.deallocate_vm(self.id)
+            self._provider.azure_client.generalize_vm(self.id)
+
+        create_params = {
+            'location': self._provider.region_name,
+            'source_virtual_machine': {
+                'id': self.resource_id
+            },
+            'tags': {'Name': name}
+        }
+        self._provider.azure_client.\
+            create_image(name, create_params)
+        image = self._provider.azure_client.\
+            get_image(name)
+
+        return AzureMachineImage(self._provider, image)
+
+    def _deprovision(self, private_key_path):
+        cnopts = pysftp.CnOpts()
+        cnopts.hostkeys = None
+        if private_key_path:
+            with pysftp.\
+                    Connection(self.public_ips[0],
+                               username=self._provider.vm_default_user_name,
+                               cnopts=cnopts,
+                               private_key=private_key_path) as sftp:
+                sftp.execute('sudo waagent -deprovision -force')
+                sftp.close()
+
+    def add_floating_ip(self, ip_address):
+        """
+        Attaches public ip to the instance
+        :param ip_address:
+        :return:
+        """
+        try:
+            ip_addresses = [ip for ip in
+                            self._provider.azure_client.list_floating_ips()
+                            if ip.ip_address and ip.ip_address == ip_address]
+            if len(ip_addresses) > 0:
+                """
+                Add an elastic IP address to this instance.
+                """
+                nic = self._provider.azure_client.get_nic(self._nic_ids[0])
+
+                nic.ip_configurations[0].public_ip_address = {
+                    'id': ip_addresses[0].id
+                }
+                self._provider.azure_client.create_nic(self._nic_ids[0], nic)
+                return True
+            return False
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def remove_floating_ip(self, ip_address=None):
+        """
+        Remove a public IP address from this instance.
+        """
+        try:
+            nic = self._provider.azure_client.get_nic(self._nic_ids[0])
+            nic.ip_configurations[0].public_ip_address = None
+            self._provider.azure_client.create_nic(self._nic_ids[0], nic)
+            return True
+        except CloudError as cloudError:
+            log.exception(cloudError.message)
+            return False
+
+    def add_vm_firewall(self, fw):
+        '''
+        :param fw:
+        :return: None
+
+        This method adds the security group to VM instance.
+        In Azure, security group added to Network interface.
+        Azure supports to add only one security group to
+        network interface, we are adding the provided security group
+        if not associated any security group to NIC
+        else replacing the existing security group.
+        '''
+        fw = (self._provicer.security.vm_firewalls.get(fw)
+              if isinstance(fw, str) else fw)
+        nic = self._provider.azure_client.get_nic(self._nic_ids[0])
+        if not nic.network_security_group:
+            nic.network_security_group = NetworkSecurityGroup()
+            nic.network_security_group.id = fw.resource_id
+        else:
+            fw_url_params = azure_helpers.\
+                parse_url(VM_FIREWALL_RESOURCE_ID,
+                          nic.network_security_group.id)
+            existing_fw = self._provider.security.\
+                vm_firewalls.get(fw_url_params.get(VM_FIREWALL_NAME))
+
+            new_fw = self._provider.security.vm_firewalls.\
+                create('{0}-{1}'.format(fw.name, existing_fw.name),
+                       'Merged security groups {0} and {1}'.
+                       format(fw.name, existing_fw.name))
+            new_fw.add_rule(src_dest_fw=fw)
+            new_fw.add_rule(src_dest_fw=existing_fw)
+            nic.network_security_group.id = new_fw.resource_id
+
+        self._provider.azure_client.create_nic(self._nic_ids[0], nic)
+
+    def remove_vm_firewall(self, fw):
+
+        '''
+        :param fw:
+        :return: None
+
+        This method removes the security group to VM instance.
+        In Azure, security group added to Network interface.
+        Azure supports to add only one security group to
+        network interface, we are removing the provided security group
+        if it associated to NIC
+        else we are ignoring.
+        '''
+
+        nic = self._provider.azure_client.get_nic(self._nic_ids[0])
+        fw = (self._provicer.security.vm_firewalls.get(fw)
+              if isinstance(fw, str) else fw)
+        if nic.network_security_group and \
+                nic.network_security_group.id == fw.resource_id:
+            nic.network_security_group = None
+            self._provider.azure_client.create_nic(self._nic_ids[0], nic)
+
+    def _update_state(self):
+        """
+        Azure python sdk list operation does not return the current
+        staus of the instance. We have to explicity call the get method
+        for each instance to get the instance status(instance_view).
+        This is the limitation with azure rest api
+        :return:
+        """
+        if not self._vm.instance_view:
+            self.refresh()
+        if self._vm.instance_view and len(
+                self._vm.instance_view.statuses) > 1:
+            self._state = \
+                self._vm.instance_view.statuses[1].display_status
+        else:
+            self._state = \
+                self._vm.provisioning_state
+
+    @property
+    def state(self):
+        return AzureInstance.INSTANCE_STATE_MAP.get(
+            self._state, InstanceState.UNKNOWN)
+
+    def refresh(self):
+        """
+        Refreshes the state of this instance by re-querying the cloud provider
+        for its latest state.
+        """
+        try:
+            self._vm = self._provider.azure_client.get_vm(self.id)
+            if not self._vm.tags:
+                self._vm.tags = {}
+            self._update_state()
+            self._get_network_attributes()
+        except (CloudError, ValueError) as cloudError:
+            log.exception(cloudError.message)
+            # The volume no longer exists and cannot be refreshed.
+            # set the state to unknown
+            self._state = 'unknown'
+
+
+class AzureLaunchConfig(BaseLaunchConfig):
+
+    def __init__(self, provider):
+        super(AzureLaunchConfig, self).__init__(provider)
+
+
+class AzureVMType(BaseVMType):
+
+    def __init__(self, provider, vm_type):
+        super(AzureVMType, self).__init__(provider)
+        self._vm_type = vm_type
+
+    @property
+    def id(self):
+        return self._vm_type.name
+
+    @property
+    def name(self):
+        return self._vm_type.name
+
+    @property
+    def family(self):
+        """
+        Python sdk does not return family details.
+        So, as of now populating it with 'Unknown'
+        """
+        return "Unknown"
+
+    @property
+    def vcpus(self):
+        return self._vm_type.number_of_cores
+
+    @property
+    def ram(self):
+        return self._vm_type.memory_in_mb
+
+    @property
+    def size_root_disk(self):
+        return self._vm_type.os_disk_size_in_mb / 1024
+
+    @property
+    def size_ephemeral_disks(self):
+        return self._vm_type.resource_disk_size_in_mb / 1024
+
+    @property
+    def num_ephemeral_disks(self):
+        """
+        Azure by default adds one ephemeral disk. We can not add
+        more ephemeral disks to VM explicitly
+        So, returning it as Zero.
+        """
+        return 0
+
+    @property
+    def extra_data(self):
+        return {
+                    'max_data_disk_count':
+                    self._vm_type.max_data_disk_count
+               }
+
+
+class AzureKeyPair(BaseKeyPair):
+
+    def __init__(self, provider, key_pair):
+        super(AzureKeyPair, self).__init__(provider, key_pair)
+        self._material = None
+
+    @property
+    def id(self):
+        return self._key_pair.Name
+
+    @property
+    def name(self):
+        return self._key_pair.Name
+
+    @property
+    def material(self):
+        """
+        Unencrypted private key.
+
+        :rtype: str
+        :return: Unencrypted private key or ``None`` if not available.
+
+        """
+        return self._material
+
+    @material.setter
+    def material(self, value):
+        self._material = value
+
+    def delete(self):
+        try:
+            self._provider.azure_client.\
+                delete_public_key(self._key_pair)
+            return True
+        except CloudError:
+            return False
+
+
+class AzureRouter(BaseRouter):
+    def __init__(self, provider, route_table):
+        super(AzureRouter, self).__init__(provider)
+        self._route_table = route_table
+        if not self._route_table.tags:
+            self._route_table.tags = {}
+
+    @property
+    def id(self):
+        return self._route_table.name
+
+    @property
+    def resource_id(self):
+        return self._route_table.id
+
+    @property
+    def name(self):
+        """
+        Get the router name.
+
+        .. note:: the router must have a (case sensitive) tag ``Name``
+        """
+        return self._route_table.tags.get('Name', self._route_table.name)
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the router name.
+        """
+        self.assert_valid_resource_name(value)
+        self._route_table.tags.update(Name=value)
+        self._provider.azure_client. \
+            update_route_table_tags(self._route_table.name,
+                                    self._route_table)
+
+    def refresh(self):
+        self._route_table = self._provider.azure_client. \
+            get_route_table(self._route_table.name)
+
+    @property
+    def state(self):
+        self.refresh()  # Explicitly refresh the local object
+        if self._route_table.subnets:
+            return RouterState.ATTACHED
+        return RouterState.DETACHED
+
+    @property
+    def network_id(self):
+        return None
+
+    def delete(self):
+        self._provider.azure_client. \
+            delete_route_table(self.name)
+
+    def attach_subnet(self, subnet):
+        subnet_id_parts = subnet.id.split('|$|')
+        if (len(subnet_id_parts) != 2):
+            pass
+        self._provider.azure_client. \
+            attach_subnet_to_route_table(subnet_id_parts[0],
+                                         subnet_id_parts[1],
+                                         self.resource_id)
+        self.refresh()
+
+    def detach_subnet(self, subnet):
+        subnet_id_parts = subnet.id.split('|$|')
+        if (len(subnet_id_parts) != 2):
+            pass
+        self._provider.azure_client. \
+            detach_subnet_to_route_table(subnet_id_parts[0],
+                                         subnet_id_parts[1],
+                                         self.resource_id)
+        self.refresh()
+
+    def attach_gateway(self, gateway):
+        pass
+
+    def detach_gateway(self, gateway):
+        pass
+
+
+class AzureInternetGateway(BaseInternetGateway):
+    def __init__(self, provider, gateway):
+        super(AzureInternetGateway, self).__init__(provider)
+        self._gateway = gateway
+        self._name = None
+        self._network_id = None
+        self._state = ''
+
+    @property
+    def id(self):
+        return self._name
+
+    @property
+    def name(self):
+        """
+        Get the gateway name.
+
+        .. note:: the gateway must have a (case sensitive) tag ``Name``
+        """
+        return self._name
+
+    @name.setter
+    # pylint:disable=arguments-differ
+    def name(self, value):
+        """
+        Set the router name.
+        """
+        self.assert_valid_resource_name(value)
+        self._name = value
+
+    def refresh(self):
+        pass
+
+    @property
+    def state(self):
+        return self._state
+
+    @property
+    def network_id(self):
+        return None
+
+    def delete(self):
+        pass

+ 1096 - 0
cloudbridge/cloud/providers/azure/services.py

@@ -0,0 +1,1096 @@
+import base64
+import logging
+import uuid
+
+from azure.common import AzureException
+
+from cloudbridge.cloud.base.resources import ClientPagedResultList, \
+    ServerPagedResultList
+from cloudbridge.cloud.base.services import BaseBucketService, \
+    BaseComputeService, BaseFloatingIPService, BaseGatewayService, \
+    BaseImageService, BaseInstanceService, BaseKeyPairService, \
+    BaseNetworkService, BaseNetworkingService, BaseRegionService, \
+    BaseRouterService, BaseSecurityService, BaseSnapshotService, \
+    BaseStorageService, BaseSubnetService, BaseVMFirewallService, \
+    BaseVMTypeService, BaseVolumeService
+from cloudbridge.cloud.interfaces import InvalidConfigurationException
+from cloudbridge.cloud.interfaces.resources import MachineImage, \
+    Network, PlacementZone, Snapshot, Subnet, VMFirewall, VMType, Volume
+
+from msrestazure.azure_exceptions import CloudError
+
+from . import helpers as azure_helpers
+from .resources import AzureBucket, AzureFloatingIP, \
+    AzureInstance, AzureInternetGateway, AzureKeyPair, \
+    AzureLaunchConfig, AzureMachineImage, AzureNetwork, \
+    AzureRegion, AzureRouter, AzureSnapshot, AzureSubnet, \
+    AzureVMFirewall, AzureVMType, AzureVolume
+
+log = logging.getLogger(__name__)
+
+
+class AzureSecurityService(BaseSecurityService):
+    def __init__(self, provider):
+        super(AzureSecurityService, self).__init__(provider)
+
+        # Initialize provider services
+        self._key_pairs = AzureKeyPairService(provider)
+        self._vm_firewalls = AzureVMFirewallService(provider)
+
+    @property
+    def key_pairs(self):
+        return self._key_pairs
+
+    @property
+    def vm_firewalls(self):
+        return self._vm_firewalls
+
+
+class AzureVMFirewallService(BaseVMFirewallService):
+    def __init__(self, provider):
+        super(AzureVMFirewallService, self).__init__(provider)
+
+    def get(self, fw_id):
+        try:
+            fws = self.provider.azure_client.get_vm_firewall(fw_id)
+            return AzureVMFirewall(self.provider, fws)
+
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def list(self, limit=None, marker=None):
+        fws = [AzureVMFirewall(self.provider, fw)
+               for fw in self.provider.azure_client.list_vm_firewall()]
+        return ClientPagedResultList(self.provider, fws, limit, marker)
+
+    def create(self, name, description, network_id=None):
+        AzureVMFirewall.assert_valid_resource_name(name)
+        parameters = {"location": self.provider.region_name,
+                      'tags': {'Name': name}}
+
+        if description:
+            parameters['tags'].update(Description=description)
+
+        fw = self.provider.azure_client.create_vm_firewall(name, parameters)
+
+        # Add default rules to negate azure default rules.
+        # See: https://github.com/gvlproject/cloudbridge/issues/106
+        # pylint:disable=protected-access
+        for rule in fw.default_security_rules:
+            rule_name = "cb-override-" + rule.name
+            # Transpose rules to priority 4001 onwards, because
+            # only 0-4096 are allowed for custom rules
+            rule.priority = rule.priority - 61440
+            rule.access = "Deny"
+            self._provider.azure_client.create_vm_firewall_rule(
+                fw.name, rule_name, rule)
+
+        # Add a new custom rule allowing all outbound traffic to the internet
+        parameters = {"priority": 3000,
+                      "protocol": "*",
+                      "source_port_range": "*",
+                      "source_address_prefix": "*",
+                      "destination_port_range": "*",
+                      "destination_address_prefix": "Internet",
+                      "access": "Allow",
+                      "direction": "Outbound"}
+        result = self._provider.azure_client.create_vm_firewall_rule(
+            fw.name, "cb-default-internet-outbound", parameters)
+        fw.security_rules.append(result)
+
+        cb_fw = AzureVMFirewall(self.provider, fw)
+        return cb_fw
+
+    def find(self, name, limit=None, marker=None):
+        """
+        Searches for a security group by a given list of attributes.
+        """
+        filters = {'Name': name}
+        fws = [AzureVMFirewall(self.provider, vm_firewall)
+               for vm_firewall in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_vm_firewall(), filters)]
+
+        return ClientPagedResultList(self.provider, fws,
+                                     limit=limit, marker=marker)
+
+    def delete(self, group_id):
+        try:
+            self.provider.azure_client.delete_vm_firewall(group_id)
+            return True
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return False
+
+
+class AzureKeyPairService(BaseKeyPairService):
+    PARTITION_KEY = '00000000-0000-0000-0000-000000000000'
+
+    def __init__(self, provider):
+        super(AzureKeyPairService, self).__init__(provider)
+
+    def get(self, key_pair_id):
+        try:
+            key_pair = self.provider.azure_client.\
+                get_public_key(key_pair_id)
+
+            if key_pair:
+                return AzureKeyPair(self.provider, key_pair)
+            return None
+        except AzureException as error:
+            log.exception(error)
+            return None
+
+    def list(self, limit=None, marker=None):
+        key_pairs, resume_marker = self.provider.azure_client.list_public_keys(
+            AzureKeyPairService.PARTITION_KEY,  marker=marker,
+            limit=limit or self.provider.config.default_result_limit)
+        results = [AzureKeyPair(self.provider, key_pair)
+                   for key_pair in key_pairs]
+        return ServerPagedResultList(is_truncated=resume_marker,
+                                     marker=resume_marker,
+                                     supports_total=False,
+                                     data=results)
+
+    def find(self, name, limit=None, marker=None):
+        key_pair = self.get(name)
+        return ClientPagedResultList(self.provider,
+                                     [key_pair] if key_pair else [],
+                                     limit, marker)
+
+    def create(self, name):
+        AzureKeyPair.assert_valid_resource_name(name)
+
+        key_pair = self.get(name)
+
+        if key_pair:
+            raise Exception(
+                'Keypair already exists with name {0}'.format(name))
+
+        private_key_str, public_key_str = azure_helpers.gen_key_pair()
+
+        entity = {
+                  'PartitionKey': AzureKeyPairService.PARTITION_KEY,
+                  'RowKey': str(uuid.uuid4()),
+                  'Name': name,
+                  'Key': public_key_str
+                 }
+
+        self.provider.azure_client.create_public_key(entity)
+
+        key_pair = self.get(name)
+
+        key_pair.material = private_key_str
+
+        return key_pair
+
+
+class AzureBucketService(BaseBucketService):
+    def __init__(self, provider):
+        super(AzureBucketService, self).__init__(provider)
+
+    def get(self, bucket_id):
+        """
+        Returns a bucket given its ID. Returns ``None`` if the bucket
+        does not exist.
+        """
+        try:
+            bucket = self.provider.azure_client.get_container(bucket_id)
+            return AzureBucket(self.provider, bucket)
+
+        except AzureException as error:
+            log.exception(error)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+        """
+        Searches for a bucket by a given list of attributes.
+        """
+        buckets = [AzureBucket(self.provider, bucket)
+                   for bucket in
+                   self.provider.azure_client.list_containers(prefix=name)]
+        return ClientPagedResultList(self.provider, buckets,
+                                     limit=limit, marker=marker)
+
+    def list(self, limit=None, marker=None):
+        """
+        List all containers.
+        """
+        buckets = [AzureBucket(self.provider, bucket)
+                   for bucket in self.provider.azure_client.list_containers()]
+        return ClientPagedResultList(self.provider, buckets,
+                                     limit=limit, marker=marker)
+
+    def create(self, name, location=None):
+        """
+        Create a new bucket.
+        """
+        AzureBucket.assert_valid_resource_name(name)
+        bucket = self.provider.azure_client.create_container(name.lower())
+        return AzureBucket(self.provider, bucket)
+
+
+class AzureStorageService(BaseStorageService):
+    def __init__(self, provider):
+        super(AzureStorageService, self).__init__(provider)
+
+        # Initialize provider services
+        self._volume_svc = AzureVolumeService(self.provider)
+        self._snapshot_svc = AzureSnapshotService(self.provider)
+        self._bucket_svc = AzureBucketService(self.provider)
+
+    @property
+    def volumes(self):
+        return self._volume_svc
+
+    @property
+    def snapshots(self):
+        return self._snapshot_svc
+
+    @property
+    def buckets(self):
+        return self._bucket_svc
+
+
+class AzureVolumeService(BaseVolumeService):
+    def __init__(self, provider):
+        super(AzureVolumeService, self).__init__(provider)
+
+    def get(self, volume_id):
+        """
+        Returns a volume given its id.
+        """
+        try:
+            volume = self.provider.azure_client.get_disk(volume_id)
+            return AzureVolume(self.provider, volume)
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+        """
+        Searches for a volume by a given list of attributes.
+        """
+        filters = {'Name': name}
+        cb_vols = [AzureVolume(self.provider, volume)
+                   for volume in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_disks(), filters)]
+        return ClientPagedResultList(self.provider, cb_vols,
+                                     limit=limit, marker=marker)
+
+    def list(self, limit=None, marker=None):
+        """
+        List all volumes.
+        """
+        azure_vols = self.provider.azure_client.list_disks()
+        cb_vols = [AzureVolume(self.provider, vol) for vol in azure_vols]
+        return ClientPagedResultList(self.provider, cb_vols,
+                                     limit=limit, marker=marker)
+
+    def create(self, name, size, zone=None, snapshot=None, description=None):
+        """
+        Creates a new volume.
+        """
+        AzureVolume.assert_valid_resource_name(name)
+        zone_id = zone.id if isinstance(zone, PlacementZone) else zone
+        snapshot = (self.provider.storage.snapshots.get(snapshot)
+                    if snapshot and isinstance(snapshot, str) else snapshot)
+        disk_name = "{0}-{1}".format(name, uuid.uuid4().hex[:6])
+        tags = {'Name': name}
+        if description:
+            tags.update(Description=description)
+        if snapshot:
+            params = {
+                'location':
+                    zone_id or self.provider.azure_client.region_name,
+                'creation_data': {
+                    'create_option': 'copy',
+                    'source_uri': snapshot.resource_id
+                },
+                'tags': tags
+            }
+
+            self.provider.azure_client.create_snapshot_disk(disk_name, params)
+
+        else:
+            params = {
+                'location':
+                    zone_id or self.provider.region_name,
+                'disk_size_gb': size,
+                'creation_data': {
+                    'create_option': 'empty'
+                },
+                'tags': tags}
+
+            self.provider.azure_client.create_empty_disk(disk_name, params)
+
+        azure_vol = self.provider.azure_client.get_disk(disk_name)
+        cb_vol = AzureVolume(self.provider, azure_vol)
+
+        return cb_vol
+
+
+class AzureSnapshotService(BaseSnapshotService):
+    def __init__(self, provider):
+        super(AzureSnapshotService, self).__init__(provider)
+
+    def get(self, ss_id):
+        """
+        Returns a snapshot given its id.
+        """
+        try:
+            snapshot = self.provider.azure_client.get_snapshot(ss_id)
+            return AzureSnapshot(self.provider, snapshot)
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+        """
+             Searches for a snapshot by a given list of attributes.
+        """
+        filters = {'Name': name}
+        cb_snapshots = [AzureSnapshot(self.provider, snapshot)
+                        for snapshot in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_snapshots(), filters)]
+        return ClientPagedResultList(self.provider, cb_snapshots,
+                                     limit=limit, marker=marker)
+
+    def list(self, limit=None, marker=None):
+        """
+               List all snapshots.
+        """
+        snaps = [AzureSnapshot(self.provider, obj)
+                 for obj in
+                 self.provider.azure_client.list_snapshots()]
+        return ClientPagedResultList(self.provider, snaps, limit, marker)
+
+    def create(self, name, volume, description=None):
+        """
+        Creates a new snapshot of a given volume.
+        """
+        AzureSnapshot.assert_valid_resource_name(name)
+        volume = (self.provider.storage.volumes.get(volume)
+                  if isinstance(volume, str) else volume)
+
+        tags = {'Name': name}
+        snapshot_name = "{0}-{1}".format(name, uuid.uuid4().hex[:6])
+
+        if description:
+            tags.update(Description=description)
+
+        params = {
+            'location': self.provider.azure_client.region_name,
+            'creation_data': {
+                'create_option': 'Copy',
+                'source_uri': volume.resource_id
+            },
+            'disk_size_gb': volume.size,
+            'tags': tags
+        }
+
+        self.provider.azure_client. \
+            create_snapshot(snapshot_name, params)
+        azure_snap = self.provider.azure_client.get_snapshot(snapshot_name)
+        cb_snap = AzureSnapshot(self.provider, azure_snap)
+
+        return cb_snap
+
+
+class AzureComputeService(BaseComputeService):
+    def __init__(self, provider):
+        super(AzureComputeService, self).__init__(provider)
+        self._vm_type_svc = AzureVMTypeService(self.provider)
+        self._instance_svc = AzureInstanceService(self.provider)
+        self._region_svc = AzureRegionService(self.provider)
+        self._images_svc = AzureImageService(self.provider)
+
+    @property
+    def images(self):
+        return self._images_svc
+
+    @property
+    def vm_types(self):
+        return self._vm_type_svc
+
+    @property
+    def instances(self):
+        return self._instance_svc
+
+    @property
+    def regions(self):
+        return self._region_svc
+
+
+class AzureInstanceService(BaseInstanceService):
+    def __init__(self, provider):
+        super(AzureInstanceService, self).__init__(provider)
+
+    def create(self, name, image, vm_type, subnet=None, zone=None,
+               key_pair=None, vm_firewalls=None, user_data=None,
+               launch_config=None, **kwargs):
+
+        instance_name = name.replace("_", "-") if name \
+            else "{0} - {1}".format("cb", uuid.uuid4())
+
+        AzureInstance.assert_valid_resource_name(instance_name)
+
+        # Key_pair is mandatory in azure and it should not be None.
+        if key_pair:
+            key_pair = (self.provider.security.key_pairs.get(key_pair)
+                        if isinstance(key_pair, str) else key_pair)
+        else:
+            raise Exception("Can not create instance in azure "
+                            "without public key. Keypair required")
+
+        image = (self.provider.compute.images.get(image)
+                 if isinstance(image, str) else image)
+        if not isinstance(image, AzureMachineImage):
+            raise Exception("Provided image %s is not a valid azure image"
+                            % image)
+
+        instance_size = vm_type.id if \
+            isinstance(vm_type, VMType) else vm_type
+
+        if not subnet:
+            subnet = self.provider.networking.subnets.get_or_create_default()
+        else:
+            subnet = (self.provider.networking.subnets.get(subnet)
+                      if isinstance(subnet, str) else subnet)
+
+        zone_id = zone.id if isinstance(zone, PlacementZone) else zone
+
+        subnet_id, zone_id, vm_firewall_id = \
+            self._resolve_launch_options(instance_name,
+                                         subnet, zone_id, vm_firewalls)
+
+        if launch_config:
+            disks, root_disk_size = \
+                self._process_block_device_mappings(launch_config,
+                                                    name, zone_id)
+        else:
+            disks = None
+            root_disk_size = None
+
+        nic_params = {
+                'location': self._provider.region_name,
+                'ip_configurations': [{
+                    'name': instance_name + '_ip_config',
+                    'private_ip_allocation_method': 'Dynamic',
+                    'subnet': {
+                        'id': subnet_id
+                    }
+                }]
+            }
+
+        if vm_firewall_id:
+            nic_params['network_security_group'] = {
+                'id': vm_firewall_id
+            }
+        nic_info = self.provider.azure_client.create_nic(
+            instance_name + '_nic',
+            nic_params
+        )
+        # #! indicates shell script
+        ud = '#cloud-config\n' + user_data \
+            if user_data and not user_data.startswith('#!')\
+            and not user_data.startswith('#cloud-config') else user_data
+
+        params = {
+            'location': zone_id or self._provider.region_name,
+            'os_profile': {
+                'admin_username': self.provider.vm_default_user_name,
+                'computer_name': instance_name,
+                'linux_configuration': {
+                             "disable_password_authentication": True,
+                             "ssh": {
+                                 "public_keys": [{
+                                      "path":
+                                      "/home/{}/.ssh/authorized_keys".format(
+                                          self.provider.vm_default_user_name),
+                                      "key_data": key_pair._key_pair.Key
+                                     }]
+                                   }
+                           }
+            },
+            'hardware_profile': {
+                'vm_size': instance_size
+            },
+            'network_profile': {
+                'network_interfaces': [{
+                    'id': nic_info.id
+                }]
+            },
+            'storage_profile': {
+                'image_reference': {
+                    'id': image.resource_id
+                },
+                "os_disk": {
+                    "name": instance_name + '_os_disk',
+                    "create_option": "fromImage"
+                },
+                'data_disks': disks
+            },
+            'tags': {'Name': name}
+        }
+
+        if key_pair:
+            params['tags'].update(Key_Pair=key_pair.name)
+
+        if root_disk_size:
+            params['storage_profile']['os_disk']['disk_size_gb'] = \
+                root_disk_size
+
+        if user_data:
+            custom_data = base64.b64encode(bytes(ud, 'utf-8'))
+            params['os_profile']['custom_data'] = str(custom_data, 'utf-8')
+
+        self.provider.azure_client.create_vm(instance_name, params)
+        vm = self._provider.azure_client.get_vm(instance_name)
+        return AzureInstance(self.provider, vm)
+
+    def _resolve_launch_options(self, name, subnet=None, zone_id=None,
+                                vm_firewalls=None):
+        if subnet:
+            # subnet's zone takes precedence
+            zone_id = subnet.zone.id
+        vm_firewall_id = None
+
+        if isinstance(vm_firewalls, list) and len(vm_firewalls) > 0:
+
+            if isinstance(vm_firewalls[0], VMFirewall):
+                vm_firewalls_ids = [fw.id for fw in vm_firewalls]
+                vm_firewall_id = vm_firewalls[0].resource_id
+            else:
+                vm_firewalls_ids = vm_firewalls
+                vm_firewall = self.provider.security.\
+                    vm_firewalls.get(vm_firewalls[0])
+                vm_firewall_id = vm_firewall.resource_id
+
+            if len(vm_firewalls) > 1:
+                new_fw = self.provider.security.vm_firewalls.\
+                    create('{0}-fw'.format(name), 'Merge vm firewall {0}'.
+                           format(','.join(vm_firewalls_ids)))
+
+                for fw in vm_firewalls:
+                    new_fw.add_rule(src_dest_fw=fw)
+
+                vm_firewall_id = new_fw.resource_id
+
+        return subnet.resource_id, zone_id, vm_firewall_id
+
+    def _process_block_device_mappings(self, launch_config,
+                                       vm_name, zone=None):
+        """
+        Processes block device mapping information
+        and returns a Data disk dictionary list. If new volumes
+        are requested (source is None and destination is VOLUME), they will be
+        created and the relevant volume ids included in the mapping.
+        """
+        disks = []
+        volumes_count = 0
+        root_disk_size = None
+
+        def attach_volume(volume, delete_on_terminate):
+            disks.append({
+                'lun': volumes_count,
+                'name': volume.id,
+                'create_option': 'attach',
+                'managed_disk': {
+                    'id': volume.resource_id
+                }
+            })
+            delete_on_terminate = delete_on_terminate or False
+            volume.tags.update(delete_on_terminate=str(delete_on_terminate))
+            # In azure, there is no option to specify terminate disks
+            # (similar to AWS delete_on_terminate) on VM delete.
+            # This method uses the azure tags functionality to store
+            # the  delete_on_terminate option when the virtual machine
+            # is deleted, we parse the tags and delete accordingly
+            self.provider.azure_client.\
+                update_disk_tags(volume.id, volume.tags)
+
+        for device in launch_config.block_devices:
+            if device.is_volume:
+                if not device.is_root:
+                    # In azure, os disk automatically created,
+                    # we are ignoring the root disk, if specified
+                    if isinstance(device.source, Snapshot):
+                        snapshot_vol = device.source.create_volume()
+                        attach_volume(snapshot_vol,
+                                      device.delete_on_terminate)
+                    elif isinstance(device.source, Volume):
+                        attach_volume(device.source,
+                                      device.delete_on_terminate)
+                    elif isinstance(device.source, MachineImage):
+                        # Not supported
+                        pass
+                    else:
+                        # source is None, but destination is volume, therefore
+                        # create a blank volume. If the Zone is None, this
+                        # could fail since the volume and instance may
+                        # be created in two different zones.
+                        if not zone:
+                            raise InvalidConfigurationException(
+                                "A zone must be specified when "
+                                "launching with a"
+                                " new blank volume block device mapping.")
+                        vol_name = \
+                            "{0}_{1}_disk".format(vm_name,
+                                                  uuid.uuid4().hex[:6])
+                        new_vol = self.provider.storage.volumes.create(
+                            vol_name,
+                            device.size,
+                            zone)
+                        attach_volume(new_vol, device.delete_on_terminate)
+                    volumes_count += 1
+                else:
+                    root_disk_size = device.size
+
+            else:  # device is ephemeral
+                # in azure we cannot add the ephemeral disks explicitly
+                pass
+
+        return disks, root_disk_size
+
+    def create_launch_config(self):
+        return AzureLaunchConfig(self.provider)
+
+    def list(self, limit=None, marker=None):
+        """
+        List all instances.
+        """
+        instances = [AzureInstance(self.provider, inst)
+                     for inst in self.provider.azure_client.list_vm()]
+        return ClientPagedResultList(self.provider, instances,
+                                     limit=limit, marker=marker)
+
+    def get(self, instance_id):
+        """
+        Returns an instance given its id. Returns None
+        if the object does not exist.
+        """
+        try:
+            vm = self.provider.azure_client.get_vm(instance_id)
+            return AzureInstance(self.provider, vm)
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+        """
+        Searches for an instance by a given list of attributes.
+
+        :rtype: ``object`` of :class:`.Instance`
+        :return: an Instance object
+        """
+        filtr = {'Name': name}
+        instances = [AzureInstance(self.provider, inst)
+                     for inst in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_vm(), filtr)]
+        return ClientPagedResultList(self.provider, instances,
+                                     limit=limit, marker=marker)
+
+
+class AzureImageService(BaseImageService):
+    def __init__(self, provider):
+        super(AzureImageService, self).__init__(provider)
+
+    def get(self, image_id):
+        """
+        Returns an Image given its id
+        """
+        try:
+            image = self.provider.azure_client.get_image(image_id)
+            return AzureMachineImage(self.provider, image)
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+
+        """
+         Searches for a image by a given list of attributes.
+        """
+        filters = {'Name': name}
+        cb_images = [AzureMachineImage(self.provider, image)
+                     for image in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_images(), filters)]
+        return ClientPagedResultList(self.provider, cb_images,
+                                     limit=limit, marker=marker)
+
+    def list(self, limit=None, marker=None):
+        """
+        List all images.
+        """
+        azure_images = self.provider.azure_client.list_images()
+        cb_images = [AzureMachineImage(self.provider, img)
+                     for img in azure_images]
+        return ClientPagedResultList(self.provider, cb_images,
+                                     limit=limit, marker=marker)
+
+
+class AzureVMTypeService(BaseVMTypeService):
+
+    def __init__(self, provider):
+        super(AzureVMTypeService, self).__init__(provider)
+
+    @property
+    def instance_data(self):
+        """
+        Fetch info about the available instances.
+        """
+        r = self.provider.azure_client.list_vm_types()
+        return r
+
+    def list(self, limit=None, marker=None):
+        vm_types = [AzureVMType(self.provider, vm_type)
+                    for vm_type in self.instance_data]
+        return ClientPagedResultList(self.provider, vm_types,
+                                     limit=limit, marker=marker)
+
+
+class AzureNetworkingService(BaseNetworkingService):
+    def __init__(self, provider):
+        super(AzureNetworkingService, self).__init__(provider)
+        self._network_service = AzureNetworkService(self.provider)
+        self._subnet_service = AzureSubnetService(self.provider)
+        self._fip_service = AzureFloatingIPService(self.provider)
+        self._router_service = AzureRouterService(self.provider)
+        self._gateway_service = AzureGatewayService(self.provider)
+
+    @property
+    def networks(self):
+        return self._network_service
+
+    @property
+    def subnets(self):
+        return self._subnet_service
+
+    @property
+    def floating_ips(self):
+        return self._fip_service
+
+    @property
+    def routers(self):
+        return self._router_service
+
+    @property
+    def gateways(self):
+        return self._gateway_service
+
+
+class AzureNetworkService(BaseNetworkService):
+    def __init__(self, provider):
+        super(AzureNetworkService, self).__init__(provider)
+
+    def get(self, network_id):
+        try:
+            network = self.provider.azure_client.get_network(network_id)
+            return AzureNetwork(self.provider, network)
+
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def list(self, limit=None, marker=None):
+        """
+        List all networks.
+        """
+        networks = [AzureNetwork(self.provider, network)
+                    for network in self.provider.azure_client.list_networks()]
+        return ClientPagedResultList(self.provider, networks,
+                                     limit=limit, marker=marker)
+
+    def find(self, name, limit=None, marker=None):
+        filters = {'Name': name}
+        networks = [AzureNetwork(self.provider, network)
+                    for network in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_networks(), filters)]
+        return ClientPagedResultList(self.provider, networks,
+                                     limit=limit, marker=marker)
+
+    def create(self, name, cidr_block):
+        # Azure requires CIDR block to be specified when creating a network
+        # so set a default one and use the largest allowed netmask.
+        network_name = AzureNetwork.CB_DEFAULT_NETWORK_NAME
+        if name:
+            network_name = "{0}-{1}".format(name, uuid.uuid4().hex[:6])
+
+        AzureNetwork.assert_valid_resource_name(network_name)
+
+        params = {
+            'location': self.provider.azure_client.region_name,
+            'address_space': {
+                'address_prefixes': [cidr_block]
+            },
+            'tags': {'Name': name or AzureNetwork.CB_DEFAULT_NETWORK_NAME}
+        }
+        self.provider.azure_client.create_network(network_name, params)
+        network = self.provider.azure_client.get_network(network_name)
+        cb_network = AzureNetwork(self.provider, network)
+        return cb_network
+
+    def delete(self, network_id):
+        """
+        Delete an existing network.
+        """
+        try:
+            self.provider.azure_client.delete_network(network_id)
+            return True
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return False
+
+
+class AzureFloatingIPService(BaseFloatingIPService):
+
+    def __init__(self, provider):
+        super(AzureFloatingIPService, self).__init__(provider)
+
+    def get(self, floating_ip):
+        log.debug("Getting AWS Floating IP Service with the id: %s",
+                  floating_ip)
+        fip = [fip for fip in self.list() if fip.id == floating_ip]
+        return fip[0] if fip else None
+
+    def list(self, limit=None, marker=None):
+        floating_ips = [AzureFloatingIP(self.provider, floating_ip)
+                        for floating_ip in self.provider.azure_client.
+                        list_floating_ips()]
+        return ClientPagedResultList(self.provider, floating_ips,
+                                     limit=limit, marker=marker)
+
+    def create(self):
+        public_ip_address_name = "{0}-{1}".format(
+            'public_ip', uuid.uuid4().hex[:6])
+        public_ip_parameters = {
+            'location': self.provider.azure_client.region_name,
+            'public_ip_allocation_method': 'Static'
+        }
+
+        floating_ip = self.provider.azure_client.\
+            create_floating_ip(public_ip_address_name, public_ip_parameters)
+        return AzureFloatingIP(self.provider, floating_ip)
+
+
+class AzureRegionService(BaseRegionService):
+    def __init__(self, provider):
+        super(AzureRegionService, self).__init__(provider)
+
+    def get(self, region_id):
+        region = None
+        for azureRegion in self.provider.azure_client.list_locations():
+            if azureRegion.name == region_id:
+                region = AzureRegion(self.provider, azureRegion)
+                break
+        return region
+
+    def list(self, limit=None, marker=None):
+        regions = [AzureRegion(self.provider, region)
+                   for region in self.provider.azure_client.list_locations()]
+        return ClientPagedResultList(self.provider, regions,
+                                     limit=limit, marker=marker)
+
+    @property
+    def current(self):
+        return self.get(self.provider.region_name)
+
+
+class AzureSubnetService(BaseSubnetService):
+
+    def __init__(self, provider):
+        super(AzureSubnetService, self).__init__(provider)
+
+    def get(self, subnet_id):
+        """
+         Azure does not provide an api to get the subnet directly by id.
+         It also requires the network id.
+         To make it consistent across the providers the following code
+         gets the specific code from the subnet list.
+
+        :param subnet_id:
+        :return:
+        """
+        try:
+            subnet_id_parts = subnet_id.split('|$|')
+            if (len(subnet_id_parts) != 2):
+                return None
+            azure_subnet = self.provider.azure_client.\
+                get_subnet(subnet_id_parts[0], subnet_id_parts[1])
+            return AzureSubnet(self.provider,
+                               azure_subnet) if azure_subnet else None
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def list(self, network=None, limit=None, marker=None):
+        """
+        List subnets
+        """
+        return ClientPagedResultList(self.provider,
+                                     self._list_subnets(network),
+                                     limit=limit, marker=marker)
+
+    def _list_subnets(self, network=None):
+        result_list = []
+        if network:
+            network_id = network.id \
+                if isinstance(network, Network) else network
+            result_list = self.provider.azure_client.list_subnets(network_id)
+        else:
+            for net in self.provider.azure_client.list_networks():
+                result_list.extend(self.provider.azure_client.list_subnets(
+                    net.name
+                ))
+        subnets = [AzureSubnet(self.provider, subnet)
+                   for subnet in result_list]
+
+        return subnets
+
+    def create(self, network, cidr_block, name=None, **kwargs):
+        """
+        Create subnet
+        """
+        AzureSubnet.assert_valid_resource_name(name)
+        network_id = network.id \
+            if isinstance(network, Network) else network
+
+        if not name:
+            subnet_name = AzureSubnet.CB_DEFAULT_SUBNET_NAME
+        else:
+            subnet_name = name
+
+        subnet_info = self.provider.azure_client\
+            .create_subnet(
+                            network_id,
+                            subnet_name,
+                            {
+                                'address_prefix': cidr_block
+                            }
+                          )
+
+        return AzureSubnet(self.provider, subnet_info)
+
+    def get_or_create_default(self, zone=None):
+        default_cdir = '10.0.1.0/24'
+        network = None
+        subnet = None
+
+        # No provider-default Subnet exists, look for a library-default one
+        try:
+            subnet = self.provider.azure_client.get_subnet(
+                AzureNetwork.CB_DEFAULT_NETWORK_NAME,
+                AzureSubnet.CB_DEFAULT_SUBNET_NAME
+            )
+        except CloudError:
+            # Azure raises the cloud error if the resource not available
+            pass
+
+        if subnet:
+            return AzureSubnet(self.provider, subnet)
+
+        # No provider-default Subnet exists, try to create it (net + subnets)
+        default_net_name = AzureNetwork.CB_DEFAULT_NETWORK_NAME
+        try:
+            network = self.provider.azure_client \
+                .get_network(default_net_name)
+        except CloudError:
+            # Azure raises the cloud error if the resource not available
+            pass
+
+        if not network:
+            network = self.provider.networking.networks.create(
+                name=default_net_name, cidr_block='10.0.0.0/16')
+
+        subnet = self.provider.azure_client.create_subnet(
+            network.id,
+            AzureSubnet.CB_DEFAULT_SUBNET_NAME,
+            {'address_prefix': default_cdir}
+        )
+
+        return AzureSubnet(self.provider, subnet)
+
+    def delete(self, subnet):
+        try:
+            # Azure does not provide an api to delete the subnet by id
+            # It also requires network id. To get the network id
+            # code is doing an explicit get and retrieving the network id
+
+            subnet_id = subnet.id if isinstance(subnet, Subnet) else subnet
+            subnet_id_parts = subnet_id.split('|$|')
+            self.provider.azure_client.\
+                delete_subnet(subnet_id_parts[0], subnet_id_parts[1])
+            return True
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return False
+
+
+class AzureRouterService(BaseRouterService):
+    def __init__(self, provider):
+        super(AzureRouterService, self).__init__(provider)
+
+    def get(self, router_id):
+        try:
+            route = self.provider.azure_client.get_route_table(router_id)
+            return AzureRouter(self.provider, route)
+
+        except CloudError as cloudError:
+            # Azure raises the cloud error if the resource not available
+            log.exception(cloudError.message)
+            return None
+
+    def find(self, name, limit=None, marker=None):
+        filters = {'Name': name}
+        routes = [AzureRouter(self.provider, route)
+                  for route in azure_helpers.filter_by_tag(
+                self.provider.azure_client.list_route_tables(), filters)]
+
+        return ClientPagedResultList(self.provider, routes,
+                                     limit=limit, marker=marker)
+
+    def list(self, limit=None, marker=None):
+        routes = [AzureRouter(self.provider, route)
+                  for route in
+                  self.provider.azure_client.list_route_tables()]
+        return ClientPagedResultList(self.provider,
+                                     routes,
+                                     limit=limit, marker=marker)
+
+    def create(self, name, network):
+        AzureRouter.assert_valid_resource_name(name)
+        parameters = {"location": self.provider.region_name,
+                      'tags': {'Name': name}}
+        route = self.provider.azure_client. \
+            create_route_table(name, parameters)
+        return AzureRouter(self.provider, route)
+
+
+class AzureGatewayService(BaseGatewayService):
+    def __init__(self, provider):
+        super(AzureGatewayService, self).__init__(provider)
+        # Singleton returned by the list method
+        self.gateway_singleton = AzureInternetGateway(self.provider, None)
+
+    def get_or_create_inet_gateway(self, name):
+        AzureInternetGateway.assert_valid_resource_name(name)
+        gateway = AzureInternetGateway(self.provider, None)
+        gateway.name = name
+        return gateway
+
+    def list(self, limit=None, marker=None):
+        return [self.gateway_singleton]
+
+    def delete(self, gateway):
+        pass

+ 5 - 0
cloudbridge/cloud/providers/openstack/resources.py

@@ -879,6 +879,11 @@ class OpenStackFloatingIP(BaseFloatingIP):
     def delete(self):
         self._ip.delete(self._provider.os_conn.session)
 
+    def refresh(self):
+        fip = self._provider.networking.floating_ips.get(self.id)
+        # pylint:disable=protected-access
+        self._ip = fip._ip
+
 
 class OpenStackRouter(BaseRouter):
 

+ 14 - 0
docs/getting_started.rst

@@ -66,6 +66,20 @@ OpenStack (with Keystone authentication v3):
                                                       config)
     image_id = '97755049-ee4f-4515-b92f-ca00991ee99a'  # Ubuntu 14.04 @ Jetstream
 
+Azure:
+
+.. code-block:: python
+
+    from cloudbridge.cloud.factory import CloudProviderFactory, ProviderList
+
+    config = {'azure_subscription_id': 'REPLACE WITH ACTUAL VALUE',
+              'azure_client_id': 'REPLACE WITH ACTUAL VALUE',
+              'azure_secret': 'REPLACE WITH ACTUAL VALUE',
+              'azure_tenant': ' REPLACE WITH ACTUAL VALUE'}
+    provider = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
+    image_id = 'ami-2d39803a'  # Ubuntu 14.04 (HVM)
+
+
 List some resources
 -------------------
 Once you have a reference to a provider, explore the cloud platform:

+ 1 - 1
docs/topics/object_storage.rst

@@ -65,6 +65,6 @@ Once a provider is obtained, you can access the container as usual:
 
 .. code-block:: python
 
-    bucket = provider.object_store.get(container)
+    bucket = provider.storage.buckets.get(container)
     obj = bucket.create_object('my_object.txt')
     obj.upload_from_file(source)

+ 0 - 1
docs/topics/overview.rst

@@ -14,4 +14,3 @@ Introductions to all the key parts of CloudBridge you'll need to know:
     Using block storage <block_storage.rst>
     Using object storage <object_storage.rst>
     Troubleshooting <troubleshooting.rst>
-

+ 24 - 1
docs/topics/setup.rst

@@ -33,6 +33,17 @@ OS_PROJECT_NAME      OS_STORAGE_URL
 OS_REGION_NAME       OS_AUTH_TOKEN
 ===================  ==================
 
+**Azure**
+
+======================  ==================
+Mandatory variables     Optional Variables
+======================  ==================
+AZURE_SUBSCRIPTION_ID   AZURE_REGION_NAME
+AZURE_CLIENT_ID         AZURE_RESOURCE_GROUP
+AZURE_SECRET            AZURE_STORAGE_ACCOUNT_NAME
+AZURE_TENANT            AZURE_VM_DEFAULT_USER_NAME
+                        AZURE_PUBLIC_KEY_STORAGE_TABLE_NAME
+======================  ==================
 
 Once the environment variables are set, you can create a connection as follows:
 
@@ -57,6 +68,18 @@ will override environment values.
               'aws_secret_key' : '<your_secret_key>'}
     provider = CloudProviderFactory().create_provider(ProviderList.AWS, config)
 
+
+    ## For Azure
+    config = {'azure_subscription_id': '<your_subscription_id>',
+              'azure_client_id': '<your_client_id>',
+              'azure_secret': '<your_secret>',
+              'azure_tenant': '<your_tenant>',
+              'azure_resource_group': '<your resource group>'}
+    provider = CloudProviderFactory().create_provider(ProviderList.AZURE, config)
+
+For Azure, Create service principle credentials from the following link : 
+https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal#check-azure-subscription-permissions
+
 Some optional configuration values can only be provided through the config
 dictionary. These are listed below for each provider.
 
@@ -100,7 +123,7 @@ Providing access credentials in a file
 CloudBridge can also read credentials from a file on your local file system.
 The file should be placed in one of two locations: ``/etc/cloudbridge.ini`` or
 ``~/.cloudbridge``. Each set of credentials should be delineated with the
-provider ID (e.g., ``openstack``, ``aws``) with the necessary credentials
+provider ID (e.g., ``openstack``, ``aws``, ``azure``) with the necessary credentials
 being supplied in YAML format. Note that only one set of credentials per
 cloud provider type can be supplied (i.e., via this method, it is not possible
 to provide credentials for two different OpenStack clouds).

+ 1 - 1
docs/topics/testing.rst

@@ -43,7 +43,7 @@ Specific environment and infrastructure
 If you’d like to run the tests on a specific environment only, say Python 2.7,
 against a specific infrastructure, say aws, use a command like this:
 ``tox -e py27-aws``. The available provider names are listed in the
-`ProviderList`_ class (e.g., ``aws`` or ``openstack``).  
+`ProviderList`_ class (e.g., ``aws`` or ``openstack``).
 
 Specific test cases
 ~~~~~~~~~~~~~~~~~~~~

+ 10 - 1
setup.py

@@ -24,6 +24,15 @@ REQS_BASE = [
     'retrying>=1.3.3'
 ]
 REQS_AWS = ['boto3']
+REQS_AZURE = ['msrest>=0.4.7',
+              'msrestazure>=0.4.7',
+              'azure-common>=1.1.5',
+              'azure-mgmt-resource>=1.0.0rc1',
+              'azure-mgmt-compute>=1.0.0rc1',
+              'azure-mgmt-network>=1.0.0rc1',
+              'azure-mgmt-storage>=1.0.0rc1',
+              'azure-storage>=0.34.0',
+              'pysftp>=0.2.9']
 REQS_OPENSTACK = [
     'openstacksdk',
     'python-novaclient>=7.0.0',
@@ -33,7 +42,7 @@ REQS_OPENSTACK = [
     'python-neutronclient>=6.0.0',
     'python-keystoneclient>=3.13.0'
 ]
-REQS_FULL = REQS_BASE + REQS_AWS + REQS_OPENSTACK
+REQS_FULL = REQS_BASE + REQS_AWS + REQS_AZURE + REQS_OPENSTACK
 # httpretty is required with/for moto 1.0.0 or AWS tests fail
 REQS_DEV = ([
     'tox>=2.1.1',

+ 22 - 2
test/helpers/__init__.py

@@ -88,6 +88,14 @@ TEST_DATA_CONFIG = {
                                 '842b949c-ea76-48df-998d-8a41f2626243'),
         "vm_type": os.environ.get('CB_VM_TYPE_OS', 'm1.tiny'),
         "placement": os.environ.get('CB_PLACEMENT_OS', 'zone-r1'),
+    },
+    "AzureCloudProvider": {
+        "placement":
+            os.environ.get('CB_PLACEMENT_AZURE', 'eastus'),
+        "image":
+            os.environ.get('CB_IMAGE_AZURE', 'cb-test-image'),
+        "vm_type":
+            os.environ.get('CB_VM_TYPE_AZURE', 'Basic_A0'),
     }
 }
 
@@ -97,6 +105,8 @@ def get_provider_test_data(provider, key):
         return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
     elif "OpenStackCloudProvider" in provider.name:
         return TEST_DATA_CONFIG.get("OpenStackCloudProvider").get(key)
+    elif "AzureCloudProvider" in provider.name:
+        return TEST_DATA_CONFIG.get("AzureCloudProvider").get(key)
     return None
 
 
@@ -125,17 +135,27 @@ def delete_test_network(network):
 def create_test_instance(
         provider, instance_name, subnet, launch_config=None,
         key_pair=None, vm_firewalls=None, user_data=None):
-    return provider.compute.instances.create(
+
+    kp = None
+    if not key_pair:
+        kp = provider.security.key_pairs.create(name=instance_name)
+
+    instance = provider.compute.instances.create(
         instance_name,
         get_provider_test_data(provider, 'image'),
         get_provider_test_data(provider, 'vm_type'),
         subnet=subnet,
         zone=get_provider_test_data(provider, 'placement'),
-        key_pair=key_pair,
+        key_pair=key_pair or kp,
         vm_firewalls=vm_firewalls,
         launch_config=launch_config,
         user_data=user_data)
 
+    if kp:
+        kp.delete()
+
+    return instance
+
 
 def get_test_instance(provider, name, key_pair=None, vm_firewalls=None,
                       subnet=None, user_data=None):

+ 9 - 10
test/helpers/standard_interface_tests.py

@@ -164,30 +164,29 @@ def check_standard_behaviour(test, service, obj):
     test.assertTrue(
         obj == objs_list[0] == objs_iter[0] == objs_find[0] == obj_get,
         "Objects returned by list: {0}, iter: {1}, find: {2} and get: {3} "
-        " are not as expected: {4}" .format(objs_list[0].id, objs_iter[0].id,
-                                            objs_find[0].id, obj_get.id,
-                                            obj.id))
+        " are not as expected: {4}".format(objs_list[0].id, objs_iter[0].id,
+                                           objs_find[0].id, obj_get.id,
+                                           obj.id))
 
     test.assertTrue(
         obj.id == objs_list[0].id == objs_iter[0].id ==
         objs_find[0].id == obj_get.id,
         "Object Ids returned by list: {0}, iter: {1}, find: {2} and get: {3} "
-        " are not as expected: {4}" .format(objs_list[0].id, objs_iter[0].id,
-                                            objs_find[0].id, obj_get.id,
-                                            obj.id))
+        " are not as expected: {4}".format(objs_list[0].id, objs_iter[0].id,
+                                           objs_find[0].id, obj_get.id,
+                                           obj.id))
 
     test.assertTrue(
         obj.name == objs_list[0].name == objs_iter[0].name ==
         objs_find[0].name == obj_get.name,
         "Names returned by list: {0}, iter: {1}, find: {2} and get: {3} "
-        " are not as expected: {4}" .format(objs_list[0].id, objs_iter[0].id,
-                                            objs_find[0].id, obj_get.id,
-                                            obj.id))
+        " are not as expected: {4}".format(objs_list[0].id, objs_iter[0].id,
+                                           objs_find[0].id, obj_get.id,
+                                           obj.id))
 
 
 def check_create(test, service, iface, name_prefix,
                  create_func, cleanup_func):
-
     # check create with invalid name
     with test.assertRaises(InvalidNameException):
         # spaces should raise an exception

+ 3 - 2
test/test_block_store_service.py

@@ -113,8 +113,9 @@ class CloudBlockStoreServiceTestCase(ProviderTestBase):
                 self.assertEqual(test_vol.attachments.volume, test_vol)
                 self.assertEqual(test_vol.attachments.instance_id,
                                  test_instance.id)
-                self.assertEqual(test_vol.attachments.device,
-                                 "/dev/sda2")
+                if not self.provider.PROVIDER_ID == 'azure':
+                    self.assertEqual(test_vol.attachments.device,
+                                     "/dev/sda2")
                 test_vol.detach()
                 test_vol.name = 'newvolname1'
                 test_vol.wait_for(

+ 10 - 2
test/test_compute_service.py

@@ -181,7 +181,7 @@ class CloudComputeServiceTestCase(ProviderTestBase):
         lc.add_volume_device(
             is_root=True,
             source=img,
-            size=img.min_disk if img and img.min_disk else 2,
+            size=img.min_disk if img and img.min_disk else 30,
             delete_on_terminate=True)
 
         # Attempting to add more than one root volume should raise an
@@ -261,7 +261,7 @@ class CloudComputeServiceTestCase(ProviderTestBase):
                 lc.add_volume_device(
                     is_root=True,
                     source=img,
-                    size=img.min_disk if img and img.min_disk else 2,
+                    size=img.min_disk if img and img.min_disk else 30,
                     delete_on_terminate=True)
 
                 # Add all available ephemeral devices
@@ -348,6 +348,10 @@ class CloudComputeServiceTestCase(ProviderTestBase):
                 router.attach_gateway(gateway)
                 # check whether adding an elastic ip works
                 fip = self.provider.networking.floating_ips.create()
+                self.assertFalse(
+                    fip.in_use,
+                    "Newly created floating IP address should not be in use.")
+
                 with helpers.cleanup_action(lambda: fip.delete()):
                     with helpers.cleanup_action(
                             lambda: test_inst.remove_floating_ip(fip)):
@@ -356,6 +360,10 @@ class CloudComputeServiceTestCase(ProviderTestBase):
                         # On Devstack, FloatingIP is listed under private_ips.
                         self.assertIn(fip.public_ip, test_inst.public_ips +
                                       test_inst.private_ips)
+                        fip.refresh()
+                        self.assertTrue(
+                            fip.in_use,
+                            "Attached floating IP address should be in use.")
                     test_inst.refresh()
                     self.assertNotIn(
                         fip.public_ip,

+ 2 - 0
test/test_interface.py

@@ -57,6 +57,8 @@ class CloudInterfaceTestCase(ProviderTestBase):
         elif self.provider.PROVIDER_ID == 'openstack':
             cloned_config['os_username'] = "cb_dummy"
             cloned_config['os_password'] = "cb_dummy"
+        elif self.provider.PROVIDER_ID == 'azure':
+            cloned_config['azure_subscription_id'] = "cb_dummy"
 
         with self.assertRaises(ProviderConnectionException):
             cloned_provider = CloudProviderFactory().create_provider(

+ 1 - 1
test/test_network_service.py

@@ -76,7 +76,7 @@ class CloudNetworkServiceTestCase(ProviderTestBase):
 
         def create_subnet(name):
             return self.provider.networking.subnets.create(
-                network=net, cidr_block="10.0.0.1/24", name=name)
+                network=net, cidr_block="10.0.0.0/24", name=name)
 
         def cleanup_subnet(subnet):
             self.provider.networking.subnets.delete(subnet=subnet)

+ 1 - 0
test/test_object_store_service.py

@@ -26,6 +26,7 @@ class CloudObjectStoreServiceTestCase(ProviderTestBase):
         Create a new bucket, check whether the expected values are set,
         and delete it.
         """
+
         def create_bucket(name):
             return self.provider.storage.buckets.create(name)
 

+ 6 - 4
tox.ini

@@ -8,21 +8,23 @@
 # Alternatively, to run mock tests only, run tox as follows:
 # CB_USE_MOCK_PROVIDERS=True tox -e py27-aws
 #
-# Simply running tox -e py27-aws also works, because the default is to use
+# Simply running tox -e py27-aws also works because the default is to use
 # mock providers.
 
 [tox]
-envlist = {py27,py36,pypy}-{aws,openstack}
+envlist = {py27,py36,pypy}-{aws,azure,openstack}
 
 [testenv]
-commands = flake8 cloudbridge test setup.py 
-    {envpython} -m coverage run --branch --source=cloudbridge --omit=cloudbridge/cloud/interfaces/* setup.py test {posargs}
+commands = flake8 cloudbridge test setup.py
+           {envpython} -m coverage run --branch --source=cloudbridge --omit=cloudbridge/cloud/interfaces/* setup.py test {posargs}
 setenv =
     aws: CB_TEST_PROVIDER=aws
+    azure: CB_TEST_PROVIDER=azure
     openstack: CB_TEST_PROVIDER=openstack
 passenv =
     CB_USE_MOCK_PROVIDERS
     aws: CB_IMAGE_AWS CB_INSTANCE_TYPE_AWS CB_PLACEMENT_AWS AWS_ACCESS_KEY AWS_SECRET_KEY
+    azure: AZURE_SUBSCRIPTION_ID AZURE_CLIENT_ID AZURE_SECRET AZURE_TENANT AZURE_REGION_NAME AZURE_RESOURCE_GROUP AZURE_STORAGE_ACCOUNT AZURE_VM_DEFAULT_USER_NAME AZURE_PUBLIC_KEY_STORAGE_TABLE_NAME
     openstack:  CB_IMAGE_OS CB_INSTANCE_TYPE_OS CB_PLACEMENT_OS OS_AUTH_URL OS_PASSWORD OS_PROJECT_NAME OS_TENANT_NAME OS_USERNAME OS_REGION_NAME OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_NAME NOVA_SERVICE_NAME
 deps =
     -rrequirements.txt