1#    (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
2#    All Rights Reserved.
3#
4#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5#    not use this file except in compliance with the License. You may obtain
6#    a copy of the License at
7#
8#         http://www.apache.org/licenses/LICENSE-2.0
9#
10#    Unless required by applicable law or agreed to in writing, software
11#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13#    License for the specific language governing permissions and limitations
14#    under the License.
15#
16"""HPE LeftHand SAN ISCSI REST Proxy.
17
18Volume driver for HPE LeftHand Storage array.
19This driver requires 11.5 or greater firmware on the LeftHand array, using
20the 2.0 or greater version of the hpelefthandclient.
21
22You will need to install the python hpelefthandclient module.
23sudo pip install python-lefthandclient
24
25Set the following in the cinder.conf file to enable the
26LeftHand iSCSI REST Driver along with the required flags:
27
28volume_driver=cinder.volume.drivers.hpe.hpe_lefthand_iscsi.
29    HPELeftHandISCSIDriver
30
31It also requires the setting of hpelefthand_api_url, hpelefthand_username,
32hpelefthand_password for credentials to talk to the REST service on the
33LeftHand array.
34
35"""
36
37from oslo_config import cfg
38from oslo_log import log as logging
39from oslo_serialization import jsonutils as json
40from oslo_utils import excutils
41from oslo_utils import importutils
42from oslo_utils import units
43
44from cinder import context
45from cinder import coordination
46from cinder import exception
47from cinder.i18n import _
48from cinder import interface
49from cinder.objects import fields
50from cinder import utils as cinder_utils
51from cinder.volume import configuration
52from cinder.volume import driver
53from cinder.volume.drivers.san import san
54from cinder.volume import utils
55from cinder.volume import volume_types
56
57import math
58import re
59import six
60
61LOG = logging.getLogger(__name__)
62
63hpelefthandclient = importutils.try_import("hpelefthandclient")
64if hpelefthandclient:
65    from hpelefthandclient import client as hpe_lh_client
66    from hpelefthandclient import exceptions as hpeexceptions
67
68hpelefthand_opts = [
69    cfg.URIOpt('hpelefthand_api_url',
70               default=None,
71               help="HPE LeftHand WSAPI Server Url like "
72                    "https://<LeftHand ip>:8081/lhos",
73               deprecated_name='hplefthand_api_url'),
74    cfg.StrOpt('hpelefthand_username',
75               default=None,
76               help="HPE LeftHand Super user username",
77               deprecated_name='hplefthand_username'),
78    cfg.StrOpt('hpelefthand_password',
79               default=None,
80               help="HPE LeftHand Super user password",
81               secret=True,
82               deprecated_name='hplefthand_password'),
83    cfg.StrOpt('hpelefthand_clustername',
84               default=None,
85               help="HPE LeftHand cluster name",
86               deprecated_name='hplefthand_clustername'),
87    cfg.BoolOpt('hpelefthand_iscsi_chap_enabled',
88                default=False,
89                help='Configure CHAP authentication for iSCSI connections '
90                '(Default: Disabled)',
91                deprecated_name='hplefthand_iscsi_chap_enabled'),
92    cfg.BoolOpt('hpelefthand_debug',
93                default=False,
94                help="Enable HTTP debugging to LeftHand",
95                deprecated_name='hplefthand_debug'),
96    cfg.PortOpt('hpelefthand_ssh_port',
97                default=16022,
98                help="Port number of SSH service."),
99
100]
101
102CONF = cfg.CONF
103CONF.register_opts(hpelefthand_opts, group=configuration.SHARED_CONF_GROUP)
104
105MIN_API_VERSION = "1.1"
106MIN_CLIENT_VERSION = '2.1.0'
107
108# map the extra spec key to the REST client option key
109extra_specs_key_map = {
110    'hpelh:provisioning': 'isThinProvisioned',
111    'hpelh:ao': 'isAdaptiveOptimizationEnabled',
112    'hpelh:data_pl': 'dataProtectionLevel',
113    'hplh:provisioning': 'isThinProvisioned',
114    'hplh:ao': 'isAdaptiveOptimizationEnabled',
115    'hplh:data_pl': 'dataProtectionLevel',
116}
117
118# map the extra spec value to the REST client option value
119extra_specs_value_map = {
120    'isThinProvisioned': {'thin': True, 'full': False},
121    'isAdaptiveOptimizationEnabled': {'true': True, 'false': False},
122    'dataProtectionLevel': {
123        'r-0': 0, 'r-5': 1, 'r-10-2': 2, 'r-10-3': 3, 'r-10-4': 4, 'r-6': 5}
124}
125
126extra_specs_default_key_value_map = {
127    'hpelh:provisioning': 'thin',
128    'hpelh:ao': 'true',
129    'hpelh:data_pl': 'r-0'
130}
131
132
133@interface.volumedriver
134class HPELeftHandISCSIDriver(driver.ISCSIDriver):
135    """Executes REST commands relating to HPE/LeftHand SAN ISCSI volumes.
136
137    Version history:
138
139    .. code-block:: none
140
141        1.0.0 - Initial REST iSCSI proxy
142        1.0.1 - Added support for retype
143        1.0.2 - Added support for volume migrate
144        1.0.3 - Fixed bug #1285829, HP LeftHand backend assisted migration
145                should check for snapshots
146        1.0.4 - Fixed bug #1285925, LeftHand AO volume create performance
147                improvement
148        1.0.5 - Fixed bug #1311350, Live-migration of an instance when
149                attached to a volume was causing an error.
150        1.0.6 - Removing locks bug #1395953
151        1.0.7 - Fixed bug #1353137, Server was not removed from the HP
152                Lefthand backend after the last volume was detached.
153        1.0.8 - Fixed bug #1418201, A cloned volume fails to attach.
154        1.0.9 - Adding support for manage/unmanage.
155        1.0.10 - Add stats for goodness_function and filter_function
156        1.0.11 - Add over subscription support
157        1.0.12 - Adds consistency group support
158        1.0.13 - Added update_migrated_volume #1493546
159        1.0.14 - Removed the old CLIQ based driver
160        2.0.0 - Rebranded HP to HPE
161        2.0.1 - Remove db access for consistency groups
162        2.0.2 - Adds v2 managed replication support
163        2.0.3 - Adds v2 unmanaged replication support
164        2.0.4 - Add manage/unmanage snapshot support
165        2.0.5 - Changed minimum client version to be 2.1.0
166        2.0.6 - Update replication to version 2.1
167        2.0.7 - Fixed bug #1554746, Create clone volume with new size.
168        2.0.8 - Add defaults for creating a replication client, bug #1556331
169        2.0.9 - Fix terminate connection on failover
170        2.0.10 - Add entry point tracing
171        2.0.11 - Fix extend volume if larger than snapshot bug #1560654
172        2.0.12 - add CG capability to generic volume groups.
173        2.0.13 - Fix cloning operation related to provisioning, bug #1688243
174        2.0.14 - Fixed bug #1710072, Volume doesn't show expected parameters
175                 after Retype
176        2.0.15 - Fixed bug #1710098, Managed volume, does not pick up the extra
177                 specs/capabilities of the selected volume type.
178        2.0.16 - Handled concurrent attachment requests. bug #1779654
179    """
180
181    VERSION = "2.0.16"
182
183    CI_WIKI_NAME = "HPE_Storage_CI"
184
185    device_stats = {}
186
187    # v2 replication constants
188    EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period"
189    EXTRA_SPEC_REP_RETENTION_COUNT = "replication:retention_count"
190    EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT = (
191        "replication:remote_retention_count")
192    MIN_REP_SYNC_PERIOD = 1800
193    DEFAULT_RETENTION_COUNT = 5
194    MAX_RETENTION_COUNT = 50
195    DEFAULT_REMOTE_RETENTION_COUNT = 5
196    MAX_REMOTE_RETENTION_COUNT = 50
197    REP_SNAPSHOT_SUFFIX = "_SS"
198    REP_SCHEDULE_SUFFIX = "_SCHED"
199    FAILBACK_VALUE = 'default'
200
201    def __init__(self, *args, **kwargs):
202        super(HPELeftHandISCSIDriver, self).__init__(*args, **kwargs)
203        self.configuration.append_config_values(hpelefthand_opts)
204        self.configuration.append_config_values(san.san_opts)
205        if not self.configuration.hpelefthand_api_url:
206            raise exception.NotFound(_("HPELeftHand url not found"))
207
208        # blank is the only invalid character for cluster names
209        # so we need to use it as a separator
210        self.DRIVER_LOCATION = self.__class__.__name__ + ' %(cluster)s %(vip)s'
211        self._client_conf = {}
212        self._replication_targets = []
213        self._replication_enabled = False
214        self._active_backend_id = kwargs.get('active_backend_id', None)
215
216    def _login(self, timeout=None):
217        conf = self._get_lefthand_config()
218        if conf:
219            self._client_conf['hpelefthand_username'] = (
220                conf['hpelefthand_username'])
221            self._client_conf['hpelefthand_password'] = (
222                conf['hpelefthand_password'])
223            self._client_conf['hpelefthand_clustername'] = (
224                conf['hpelefthand_clustername'])
225            self._client_conf['hpelefthand_api_url'] = (
226                conf['hpelefthand_api_url'])
227            self._client_conf['hpelefthand_ssh_port'] = (
228                conf['hpelefthand_ssh_port'])
229            self._client_conf['hpelefthand_iscsi_chap_enabled'] = (
230                conf['hpelefthand_iscsi_chap_enabled'])
231            self._client_conf['ssh_conn_timeout'] = conf['ssh_conn_timeout']
232            self._client_conf['san_private_key'] = conf['san_private_key']
233        else:
234            self._client_conf['hpelefthand_username'] = (
235                self.configuration.hpelefthand_username)
236            self._client_conf['hpelefthand_password'] = (
237                self.configuration.hpelefthand_password)
238            self._client_conf['hpelefthand_clustername'] = (
239                self.configuration.hpelefthand_clustername)
240            self._client_conf['hpelefthand_api_url'] = (
241                self.configuration.hpelefthand_api_url)
242            self._client_conf['hpelefthand_ssh_port'] = (
243                self.configuration.hpelefthand_ssh_port)
244            self._client_conf['hpelefthand_iscsi_chap_enabled'] = (
245                self.configuration.hpelefthand_iscsi_chap_enabled)
246            self._client_conf['ssh_conn_timeout'] = (
247                self.configuration.ssh_conn_timeout)
248            self._client_conf['san_private_key'] = (
249                self.configuration.san_private_key)
250
251        client = self._create_client(timeout=timeout)
252        try:
253            if self.configuration.hpelefthand_debug:
254                client.debug_rest(True)
255
256            client.login(
257                self._client_conf['hpelefthand_username'],
258                self._client_conf['hpelefthand_password'])
259
260            cluster_info = client.getClusterByName(
261                self._client_conf['hpelefthand_clustername'])
262            self.cluster_id = cluster_info['id']
263            virtual_ips = cluster_info['virtualIPAddresses']
264            self.cluster_vip = virtual_ips[0]['ipV4Address']
265
266            # Extract IP address from API URL
267            ssh_ip = self._extract_ip_from_url(
268                self._client_conf['hpelefthand_api_url'])
269            known_hosts_file = CONF.ssh_hosts_key_file
270            policy = "AutoAddPolicy"
271            if CONF.strict_ssh_host_key_policy:
272                policy = "RejectPolicy"
273            client.setSSHOptions(
274                ssh_ip,
275                self._client_conf['hpelefthand_username'],
276                self._client_conf['hpelefthand_password'],
277                port=self._client_conf['hpelefthand_ssh_port'],
278                conn_timeout=self._client_conf['ssh_conn_timeout'],
279                privatekey=self._client_conf['san_private_key'],
280                missing_key_policy=policy,
281                known_hosts_file=known_hosts_file)
282
283            return client
284        except hpeexceptions.HTTPNotFound:
285            raise exception.DriverNotInitialized(
286                _('LeftHand cluster not found'))
287        except Exception as ex:
288            raise exception.DriverNotInitialized(ex)
289
290    def _logout(self, client):
291        if client is not None:
292            client.logout()
293
294    def _create_client(self, timeout=None):
295        # Timeout is only supported in version 2.0.1 and greater of the
296        # python-lefthandclient.
297        hpelefthand_api_url = self._client_conf['hpelefthand_api_url']
298        client = hpe_lh_client.HPELeftHandClient(
299            hpelefthand_api_url, timeout=timeout)
300        return client
301
302    def _create_replication_client(self, remote_array):
303        cl = hpe_lh_client.HPELeftHandClient(
304            remote_array['hpelefthand_api_url'])
305        try:
306            cl.login(
307                remote_array['hpelefthand_username'],
308                remote_array['hpelefthand_password'])
309
310            ssh_conn_timeout = remote_array.get('ssh_conn_timeout', 30)
311            san_private_key = remote_array.get('san_private_key', '')
312
313            # Extract IP address from API URL
314            ssh_ip = self._extract_ip_from_url(
315                remote_array['hpelefthand_api_url'])
316            known_hosts_file = CONF.ssh_hosts_key_file
317            policy = "AutoAddPolicy"
318            if CONF.strict_ssh_host_key_policy:
319                policy = "RejectPolicy"
320            cl.setSSHOptions(
321                ssh_ip,
322                remote_array['hpelefthand_username'],
323                remote_array['hpelefthand_password'],
324                port=remote_array['hpelefthand_ssh_port'],
325                conn_timeout=ssh_conn_timeout,
326                privatekey=san_private_key,
327                missing_key_policy=policy,
328                known_hosts_file=known_hosts_file)
329
330            return cl
331        except hpeexceptions.HTTPNotFound:
332            raise exception.DriverNotInitialized(
333                _('LeftHand cluster not found'))
334        except Exception as ex:
335            raise exception.DriverNotInitialized(ex)
336
337    def _destroy_replication_client(self, client):
338        if client is not None:
339            client.logout()
340
341    def _extract_ip_from_url(self, url):
342        result = re.search("://(.*):", url)
343        ip = result.group(1)
344        return ip
345
346    def do_setup(self, context):
347        """Set up LeftHand client."""
348        if not hpelefthandclient:
349            # Checks if client was successfully imported
350            ex_msg = _("HPELeftHand client is not installed. Please"
351                       " install using 'pip install "
352                       "python-lefthandclient'.")
353            LOG.error(ex_msg)
354            raise exception.VolumeDriverException(ex_msg)
355
356        if hpelefthandclient.version < MIN_CLIENT_VERSION:
357            ex_msg = (_("Invalid hpelefthandclient version found ("
358                        "%(found)s). Version %(minimum)s or greater "
359                        "required. Run 'pip install --upgrade "
360                        "python-lefthandclient' to upgrade the "
361                        "hpelefthandclient.")
362                      % {'found': hpelefthandclient.version,
363                         'minimum': MIN_CLIENT_VERSION})
364            LOG.error(ex_msg)
365            raise exception.InvalidInput(reason=ex_msg)
366
367        self._do_replication_setup()
368
369    def check_for_setup_error(self):
370        """Checks for incorrect LeftHand API being used on backend."""
371        client = self._login()
372        try:
373            self.api_version = client.getApiVersion()
374
375            LOG.info("HPELeftHand API version %s", self.api_version)
376
377            if self.api_version < MIN_API_VERSION:
378                LOG.warning("HPELeftHand API is version %(current)s. "
379                            "A minimum version of %(min)s is needed for "
380                            "manage/unmanage support.",
381                            {'current': self.api_version,
382                             'min': MIN_API_VERSION})
383        finally:
384            self._logout(client)
385
386    def check_replication_flags(self, options, required_flags):
387        for flag in required_flags:
388            if not options.get(flag, None):
389                msg = _('%s is not set and is required for the replication '
390                        'device to be valid.') % flag
391                LOG.error(msg)
392                raise exception.InvalidInput(reason=msg)
393
394    def get_version_string(self):
395        return (_('REST %(proxy_ver)s hpelefthandclient %(rest_ver)s') % {
396            'proxy_ver': self.VERSION,
397            'rest_ver': hpelefthandclient.get_version_string()})
398
399    @cinder_utils.trace
400    def create_volume(self, volume):
401        """Creates a volume."""
402        client = self._login()
403        try:
404            # get the extra specs of interest from this volume's volume type
405            volume_extra_specs = self._get_volume_extra_specs(volume)
406            extra_specs = self._get_lh_extra_specs(
407                volume_extra_specs,
408                extra_specs_key_map.keys())
409
410            # map the extra specs key/value pairs to key/value pairs
411            # used as optional configuration values by the LeftHand backend
412            optional = self._map_extra_specs(extra_specs)
413
414            # if provisioning is not set, default to thin
415            if 'isThinProvisioned' not in optional:
416                optional['isThinProvisioned'] = True
417
418            # AdaptiveOptimization defaults to 'true' if you don't specify the
419            # value on a create, and that is the most efficient way to create
420            # a volume. If you pass in 'false' or 'true' for AO, it will result
421            # in an update operation following the create operation to set this
422            # value, so it is best to not specify the value and let it default
423            # to 'true'.
424            if optional.get('isAdaptiveOptimizationEnabled'):
425                del optional['isAdaptiveOptimizationEnabled']
426
427            clusterName = self._client_conf['hpelefthand_clustername']
428            optional['clusterName'] = clusterName
429
430            volume_info = client.createVolume(
431                volume['name'], self.cluster_id,
432                volume['size'] * units.Gi,
433                optional)
434
435            model_update = self._update_provider(volume_info)
436
437            # v2 replication check
438            if self._volume_of_replicated_type(volume) and (
439               self._do_volume_replication_setup(volume, client, optional)):
440                model_update['replication_status'] = 'enabled'
441                model_update['replication_driver_data'] = (json.dumps(
442                    {'location': self._client_conf['hpelefthand_api_url']}))
443
444            return model_update
445        except Exception as ex:
446            raise exception.VolumeBackendAPIException(data=ex)
447        finally:
448            self._logout(client)
449
450    @cinder_utils.trace
451    def delete_volume(self, volume):
452        """Deletes a volume."""
453        client = self._login()
454        # v2 replication check
455        # If the volume type is replication enabled, we want to call our own
456        # method of deconstructing the volume and its dependencies
457        if self._volume_of_replicated_type(volume):
458            self._do_volume_replication_destroy(volume, client)
459            return
460
461        try:
462            volume_info = client.getVolumeByName(volume['name'])
463            client.deleteVolume(volume_info['id'])
464        except hpeexceptions.HTTPNotFound:
465            LOG.error("Volume did not exist. It will not be deleted")
466        except Exception as ex:
467            raise exception.VolumeBackendAPIException(ex)
468        finally:
469            self._logout(client)
470
471    @cinder_utils.trace
472    def extend_volume(self, volume, new_size):
473        """Extend the size of an existing volume."""
474        client = self._login()
475        try:
476            volume_info = client.getVolumeByName(volume['name'])
477
478            # convert GB to bytes
479            options = {'size': int(new_size) * units.Gi}
480            client.modifyVolume(volume_info['id'], options)
481        except Exception as ex:
482            raise exception.VolumeBackendAPIException(ex)
483        finally:
484            self._logout(client)
485
486    @cinder_utils.trace
487    def create_group(self, context, group):
488        """Creates a group."""
489        LOG.debug("Creating group.")
490        if not utils.is_group_a_cg_snapshot_type(group):
491            raise NotImplementedError()
492        for vol_type_id in group.volume_type_ids:
493            replication_type = self._volume_of_replicated_type(
494                None, vol_type_id)
495            if replication_type:
496                # An unsupported configuration
497                LOG.error('Unable to create group: create group with '
498                          'replication volume type is not supported.')
499                model_update = {'status': fields.GroupStatus.ERROR}
500                return model_update
501
502        return {'status': fields.GroupStatus.AVAILABLE}
503
504    @cinder_utils.trace
505    def create_group_from_src(self, context, group, volumes,
506                              group_snapshot=None, snapshots=None,
507                              source_group=None, source_vols=None):
508        """Creates a group from a source"""
509        msg = _("Creating a group from a source is not "
510                "supported when consistent_group_snapshot_enabled to true.")
511        if not utils.is_group_a_cg_snapshot_type(group):
512            raise NotImplementedError()
513        else:
514            raise exception.VolumeBackendAPIException(data=msg)
515
516    @cinder_utils.trace
517    def delete_group(self, context, group, volumes):
518        """Deletes a group."""
519        if not utils.is_group_a_cg_snapshot_type(group):
520            raise NotImplementedError()
521
522        volume_model_updates = []
523        for volume in volumes:
524            volume_update = {'id': volume.id}
525            try:
526                self.delete_volume(volume)
527                volume_update['status'] = 'deleted'
528            except Exception as ex:
529                LOG.error("There was an error deleting volume %(id)s: "
530                          "%(error)s.",
531                          {'id': volume.id,
532                           'error': ex})
533                volume_update['status'] = 'error'
534            volume_model_updates.append(volume_update)
535
536        model_update = {'status': group.status}
537
538        return model_update, volume_model_updates
539
540    @cinder_utils.trace
541    def update_group(self, context, group, add_volumes=None,
542                     remove_volumes=None):
543        """Updates a group.
544
545        Because the backend has no concept of volume grouping, cinder will
546        maintain all volume/group relationships. Because of this
547        functionality, there is no need to make any client calls; instead
548        simply returning out of this function allows cinder to properly
549        add/remove volumes from the group.
550        """
551        LOG.debug("Updating group.")
552        if not utils.is_group_a_cg_snapshot_type(group):
553            raise NotImplementedError()
554
555        return None, None, None
556
557    @cinder_utils.trace
558    def create_group_snapshot(self, context, group_snapshot, snapshots):
559        """Creates a group snapshot."""
560        if not utils.is_group_a_cg_snapshot_type(group_snapshot):
561            raise NotImplementedError()
562        client = self._login()
563        try:
564            snap_set = []
565            snapshot_base_name = "snapshot-" + group_snapshot.id
566            snapshot_model_updates = []
567            for i, snapshot in enumerate(snapshots):
568                volume = snapshot.volume
569                volume_name = volume['name']
570                try:
571                    volume_info = client.getVolumeByName(volume_name)
572                except Exception as ex:
573                    error = six.text_type(ex)
574                    LOG.error("Could not find volume with name %(name)s. "
575                              "Error: %(error)s",
576                              {'name': volume_name,
577                               'error': error})
578                    raise exception.VolumeBackendAPIException(data=error)
579
580                volume_id = volume_info['id']
581                snapshot_name = snapshot_base_name + "-" + six.text_type(i)
582                snap_set_member = {'volumeName': volume_name,
583                                   'volumeId': volume_id,
584                                   'snapshotName': snapshot_name}
585                snap_set.append(snap_set_member)
586                snapshot_update = {'id': snapshot['id'],
587                                   'status': fields.SnapshotStatus.AVAILABLE}
588                snapshot_model_updates.append(snapshot_update)
589
590            source_volume_id = snap_set[0]['volumeId']
591            optional = {'inheritAccess': True}
592            description = group_snapshot.description
593            if description:
594                optional['description'] = description
595
596            try:
597                client.createSnapshotSet(source_volume_id, snap_set, optional)
598            except Exception as ex:
599                error = six.text_type(ex)
600                LOG.error("Could not create snapshot set. Error: '%s'",
601                          error)
602                raise exception.VolumeBackendAPIException(
603                    data=error)
604
605        except Exception as ex:
606            raise exception.VolumeBackendAPIException(data=six.text_type(ex))
607        finally:
608            self._logout(client)
609
610        model_update = {'status': 'available'}
611
612        return model_update, snapshot_model_updates
613
614    @cinder_utils.trace
615    def delete_group_snapshot(self, context, group_snapshot, snapshots):
616        """Deletes a group snapshot."""
617        if not utils.is_group_a_cg_snapshot_type(group_snapshot):
618            raise NotImplementedError()
619        client = self._login()
620        snap_name_base = "snapshot-" + group_snapshot.id
621
622        snapshot_model_updates = []
623        for i, snapshot in enumerate(snapshots):
624            snapshot_update = {'id': snapshot['id']}
625            try:
626                snap_name = snap_name_base + "-" + six.text_type(i)
627                snap_info = client.getSnapshotByName(snap_name)
628                client.deleteSnapshot(snap_info['id'])
629                snapshot_update['status'] = fields.SnapshotStatus.DELETED
630            except hpeexceptions.HTTPServerError as ex:
631                in_use_msg = ('cannot be deleted because it is a clone '
632                              'point')
633                if in_use_msg in ex.get_description():
634                    LOG.error("The snapshot cannot be deleted because "
635                              "it is a clone point.")
636                snapshot_update['status'] = fields.SnapshotStatus.ERROR
637            except Exception as ex:
638                LOG.error("There was an error deleting snapshot %(id)s: "
639                          "%(error)s.",
640                          {'id': snapshot['id'],
641                           'error': six.text_type(ex)})
642                snapshot_update['status'] = fields.SnapshotStatus.ERROR
643            snapshot_model_updates.append(snapshot_update)
644
645        self._logout(client)
646
647        model_update = {'status': group_snapshot.status}
648
649        return model_update, snapshot_model_updates
650
651    @cinder_utils.trace
652    def create_snapshot(self, snapshot):
653        """Creates a snapshot."""
654        client = self._login()
655        try:
656            volume_info = client.getVolumeByName(snapshot['volume_name'])
657
658            option = {'inheritAccess': True}
659            client.createSnapshot(snapshot['name'],
660                                  volume_info['id'],
661                                  option)
662        except Exception as ex:
663            raise exception.VolumeBackendAPIException(ex)
664        finally:
665            self._logout(client)
666
667    @cinder_utils.trace
668    def delete_snapshot(self, snapshot):
669        """Deletes a snapshot."""
670        client = self._login()
671        try:
672            snap_info = client.getSnapshotByName(snapshot['name'])
673            client.deleteSnapshot(snap_info['id'])
674        except hpeexceptions.HTTPNotFound:
675            LOG.error("Snapshot did not exist. It will not be deleted")
676        except hpeexceptions.HTTPServerError as ex:
677            in_use_msg = 'cannot be deleted because it is a clone point'
678            if in_use_msg in ex.get_description():
679                raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
680
681            raise exception.VolumeBackendAPIException(ex)
682
683        except Exception as ex:
684            raise exception.VolumeBackendAPIException(ex)
685        finally:
686            self._logout(client)
687
688    @cinder_utils.trace
689    def get_volume_stats(self, refresh=False):
690        """Gets volume stats."""
691        client = self._login()
692        try:
693            if refresh:
694                self._update_backend_status(client)
695
696            return self.device_stats
697        finally:
698            self._logout(client)
699
700    def _update_backend_status(self, client):
701        data = {}
702        backend_name = self.configuration.safe_get('volume_backend_name')
703        data['driver_version'] = self.VERSION
704        data['volume_backend_name'] = backend_name or self.__class__.__name__
705        data['reserved_percentage'] = (
706            self.configuration.safe_get('reserved_percentage'))
707        data['storage_protocol'] = 'iSCSI'
708        data['vendor_name'] = 'Hewlett Packard Enterprise'
709        data['location_info'] = (self.DRIVER_LOCATION % {
710            'cluster': self._client_conf['hpelefthand_clustername'],
711            'vip': self.cluster_vip})
712        data['thin_provisioning_support'] = True
713        data['thick_provisioning_support'] = True
714        data['max_over_subscription_ratio'] = (
715            self.configuration.safe_get('max_over_subscription_ratio'))
716
717        cluster_info = client.getCluster(self.cluster_id)
718
719        total_capacity = cluster_info['spaceTotal']
720        free_capacity = cluster_info['spaceAvailable']
721
722        # convert to GB
723        data['total_capacity_gb'] = int(total_capacity) / units.Gi
724        data['free_capacity_gb'] = int(free_capacity) / units.Gi
725
726        # Collect some stats
727        capacity_utilization = (
728            (float(total_capacity - free_capacity) /
729             float(total_capacity)) * 100)
730        # Don't have a better way to get the total number volumes
731        # so try to limit the size of data for now. Once new lefthand API is
732        # available, replace this call.
733        total_volumes = 0
734        provisioned_size = 0
735        volumes = client.getVolumes(
736            cluster=self._client_conf['hpelefthand_clustername'],
737            fields=['members[id]', 'members[clusterName]', 'members[size]'])
738        if volumes:
739            total_volumes = volumes['total']
740            provisioned_size = sum(
741                members['size'] for members in volumes['members'])
742        data['provisioned_capacity_gb'] = int(provisioned_size) / units.Gi
743        data['capacity_utilization'] = capacity_utilization
744        data['total_volumes'] = total_volumes
745        data['filter_function'] = self.get_filter_function()
746        data['goodness_function'] = self.get_goodness_function()
747        data['consistent_group_snapshot_enabled'] = True
748        data['replication_enabled'] = self._replication_enabled
749        data['replication_type'] = ['periodic']
750        data['replication_count'] = len(self._replication_targets)
751        data['replication_targets'] = self._get_replication_targets()
752
753        self.device_stats = data
754
755    @cinder_utils.trace
756    def initialize_connection(self, volume, connector):
757        """Assigns the volume to a server.
758
759        Assign any created volume to a compute node/host so that it can be
760        used from that host. HPE VSA requires a volume to be assigned
761        to a server.
762        """
763        client = self._login()
764        try:
765            server_info = self._create_server(connector, client)
766            volume_info = client.getVolumeByName(volume['name'])
767
768            access_already_enabled = False
769            if volume_info['iscsiSessions'] is not None:
770                # Extract the server id for each session to check if the
771                # new server already has access permissions enabled.
772                for session in volume_info['iscsiSessions']:
773                    server_id = int(session['server']['uri'].split('/')[3])
774                    if server_id == server_info['id']:
775                        access_already_enabled = True
776                        break
777
778            if not access_already_enabled:
779                client.addServerAccess(
780                    volume_info['id'],
781                    server_info['id'])
782
783            iscsi_properties = self._get_iscsi_properties(volume)
784
785            if ('chapAuthenticationRequired' in server_info and
786                    server_info['chapAuthenticationRequired']):
787                iscsi_properties['auth_method'] = 'CHAP'
788                iscsi_properties['auth_username'] = connector['initiator']
789                iscsi_properties['auth_password'] = (
790                    server_info['chapTargetSecret'])
791
792            return {'driver_volume_type': 'iscsi', 'data': iscsi_properties}
793        except Exception as ex:
794            raise exception.VolumeBackendAPIException(ex)
795        finally:
796            self._logout(client)
797
798    @cinder_utils.trace
799    def terminate_connection(self, volume, connector, **kwargs):
800        """Unassign the volume from the host."""
801        client = self._login()
802        try:
803            volume_info = client.getVolumeByName(volume['name'])
804            server_info = client.getServerByName(connector['host'])
805            volume_list = client.findServerVolumes(server_info['name'])
806
807            removeServer = True
808            for entry in volume_list:
809                if entry['id'] != volume_info['id']:
810                    removeServer = False
811                    break
812
813            client.removeServerAccess(
814                volume_info['id'],
815                server_info['id'])
816
817            if removeServer:
818                client.deleteServer(server_info['id'])
819        except hpeexceptions.HTTPNotFound as ex:
820            # If a host is failed-over, we want to allow the detach to
821            # to 'succeed' when it cannot find the host. We can simply
822            # return out of the terminate connection in order for things
823            # to be updated correctly.
824            if self._active_backend_id:
825                LOG.warning("Because the host is currently in a "
826                            "failed-over state, the volume will not "
827                            "be properly detached from the primary "
828                            "array. The detach will be considered a "
829                            "success as far as Cinder is concerned. "
830                            "The volume can now be attached to the "
831                            "secondary target.")
832                return
833            else:
834                raise exception.VolumeBackendAPIException(ex)
835        except Exception as ex:
836            raise exception.VolumeBackendAPIException(ex)
837        finally:
838            self._logout(client)
839
840    @cinder_utils.trace
841    def create_volume_from_snapshot(self, volume, snapshot):
842        """Creates a volume from a snapshot."""
843        client = self._login()
844        try:
845            snap_info = client.getSnapshotByName(snapshot['name'])
846            volume_info = client.cloneSnapshot(
847                volume['name'],
848                snap_info['id'])
849
850            # Extend volume
851            if volume['size'] > snapshot['volume_size']:
852                LOG.debug("Resize the new volume to %s.", volume['size'])
853                self.extend_volume(volume, volume['size'])
854
855            model_update = self._update_provider(volume_info)
856
857            # v2 replication check
858            if self._volume_of_replicated_type(volume) and (
859               self._do_volume_replication_setup(volume, client)):
860                model_update['replication_status'] = 'enabled'
861                model_update['replication_driver_data'] = (json.dumps(
862                    {'location': self._client_conf['hpelefthand_api_url']}))
863
864            return model_update
865        except Exception as ex:
866            raise exception.VolumeBackendAPIException(ex)
867        finally:
868            self._logout(client)
869
870    @cinder_utils.trace
871    def create_cloned_volume(self, volume, src_vref):
872        client = self._login()
873        try:
874            volume_info = client.getVolumeByName(src_vref['name'])
875            clone_info = client.cloneVolume(volume['name'], volume_info['id'])
876
877            # Extend volume
878            if volume['size'] > src_vref['size']:
879                LOG.debug("Resize the new volume to %s.", volume['size'])
880                self.extend_volume(volume, volume['size'])
881            # TODO(kushal) : we will use volume.volume_types when we re-write
882            # the design for unit tests to use objects instead of dicts.
883            # Get the extra specs of interest from this volume's volume type
884            volume_extra_specs = self._get_volume_extra_specs(src_vref)
885            extra_specs = self._get_lh_extra_specs(
886                volume_extra_specs,
887                extra_specs_key_map.keys())
888
889            # Check provisioning type of source volume. If it's full then need
890            # to change provisioning of clone volume to full as lefthand
891            # creates clone volume only with thin provisioning type.
892            if extra_specs.get('hpelh:provisioning') == 'full':
893                options = {'isThinProvisioned': False}
894                clone_volume_info = client.getVolumeByName(volume['name'])
895                client.modifyVolume(clone_volume_info['id'], options)
896
897            model_update = self._update_provider(clone_info)
898
899            # v2 replication check
900            if self._volume_of_replicated_type(volume) and (
901               self._do_volume_replication_setup(volume, client)):
902                model_update['replication_status'] = 'enabled'
903                model_update['replication_driver_data'] = (json.dumps(
904                    {'location': self._client_conf['hpelefthand_api_url']}))
905
906            return model_update
907        except Exception as ex:
908            raise exception.VolumeBackendAPIException(ex)
909        finally:
910            self._logout(client)
911
912    def _get_volume_extra_specs(self, volume):
913        """Get extra specs from a volume."""
914        extra_specs = {}
915        type_id = volume.get('volume_type_id', None)
916        if type_id is not None:
917            ctxt = context.get_admin_context()
918            volume_type = volume_types.get_volume_type(ctxt, type_id)
919            extra_specs = volume_type.get('extra_specs')
920        return extra_specs
921
922    def _get_lh_extra_specs(self, extra_specs, valid_keys):
923        """Get LeftHand extra_specs (valid_keys only)."""
924        extra_specs_of_interest = {}
925        for key, value in extra_specs.items():
926            if key in valid_keys:
927                prefix = key.split(":")
928                if prefix[0] == "hplh":
929                    LOG.warning("The 'hplh' prefix is deprecated. Use "
930                                "'hpelh' instead.")
931                extra_specs_of_interest[key] = value
932        return extra_specs_of_interest
933
934    def _map_extra_specs(self, extra_specs):
935        """Map the extra spec key/values to LeftHand key/values."""
936        client_options = {}
937        for key, value in extra_specs.items():
938            # map extra spec key to lh client option key
939            client_key = extra_specs_key_map[key]
940            # map extra spect value to lh client option value
941            try:
942                value_map = extra_specs_value_map[client_key]
943                # an invalid value will throw KeyError
944                client_value = value_map[value]
945                client_options[client_key] = client_value
946            except KeyError:
947                LOG.error("'%(value)s' is an invalid value "
948                          "for extra spec '%(key)s'",
949                          {'value': value, 'key': key})
950        return client_options
951
952    def _update_provider(self, volume_info, cluster_vip=None):
953        if not cluster_vip:
954            cluster_vip = self.cluster_vip
955        # TODO(justinsb): Is this always 1? Does it matter?
956        cluster_interface = '1'
957        iscsi_portal = cluster_vip + ":3260," + cluster_interface
958
959        return {'provider_location': (
960            "%s %s %s" % (iscsi_portal, volume_info['iscsiIqn'], 0))}
961
962    @coordination.synchronized('VSA-{connector[host]}')
963    def _create_server(self, connector, client):
964        server_info = None
965        chap_enabled = self._client_conf.get('hpelefthand_iscsi_chap_enabled')
966        try:
967            server_info = client.getServerByName(connector['host'])
968            chap_secret = server_info['chapTargetSecret']
969            if not chap_enabled and chap_secret:
970                LOG.warning('CHAP secret exists for host %s but CHAP is '
971                            'disabled', connector['host'])
972            if chap_enabled and chap_secret is None:
973                LOG.warning('CHAP is enabled, but server secret not '
974                            'configured on server %s', connector['host'])
975            return server_info
976        except hpeexceptions.HTTPNotFound:
977            # server does not exist, so create one
978            pass
979
980        optional = None
981        if chap_enabled:
982            chap_secret = utils.generate_password()
983            optional = {'chapName': connector['initiator'],
984                        'chapTargetSecret': chap_secret,
985                        'chapAuthenticationRequired': True
986                        }
987
988        server_info = client.createServer(connector['host'],
989                                          connector['initiator'],
990                                          optional)
991        return server_info
992
993    def create_export(self, context, volume, connector):
994        pass
995
996    def ensure_export(self, context, volume):
997        pass
998
999    def remove_export(self, context, volume):
1000        pass
1001
1002    @cinder_utils.trace
1003    def retype(self, ctxt, volume, new_type, diff, host):
1004        """Convert the volume to be of the new type.
1005
1006        Returns a boolean indicating whether the retype occurred.
1007
1008        :param ctxt: Context
1009        :param volume: A dictionary describing the volume to retype
1010        :param new_type: A dictionary describing the volume type to convert to
1011        :param diff: A dictionary with the difference between the two types
1012        :param host: A dictionary describing the host, where
1013                     host['host'] is its name, and host['capabilities'] is a
1014                     dictionary of its reported capabilities.
1015        """
1016        LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
1017                  'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
1018                                                   'new_type': new_type,
1019                                                   'diff': diff,
1020                                                   'host': host})
1021        client = self._login()
1022        try:
1023            volume_info = client.getVolumeByName(volume['name'])
1024
1025            # pick out the LH extra specs
1026            new_extra_specs = dict(new_type).get('extra_specs')
1027
1028            # in the absence of LH capability in diff,
1029            # True should be return as retype is not needed
1030            if not list(filter((lambda key: extra_specs_key_map.get(key)),
1031                               diff['extra_specs'].keys())):
1032                return True
1033
1034            # add capability of LH, which are absent in new type,
1035            # so default value gets set for those capability
1036            for key, value in extra_specs_default_key_value_map.items():
1037                if key not in new_extra_specs.keys():
1038                    new_extra_specs[key] = value
1039
1040            lh_extra_specs = self._get_lh_extra_specs(
1041                new_extra_specs,
1042                extra_specs_key_map.keys())
1043
1044            LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs})
1045
1046            # only set the ones that have changed
1047            changed_extra_specs = {}
1048            for key, value in lh_extra_specs.items():
1049                try:
1050                    (old, new) = diff['extra_specs'][key]
1051                    if old != new:
1052                        changed_extra_specs[key] = value
1053                except KeyError:
1054                    changed_extra_specs[key] = value
1055
1056            # map extra specs to LeftHand options
1057            options = self._map_extra_specs(changed_extra_specs)
1058            if len(options) > 0:
1059                client.modifyVolume(volume_info['id'], options)
1060            return True
1061        except hpeexceptions.HTTPNotFound:
1062            raise exception.VolumeNotFound(volume_id=volume['id'])
1063        except Exception as ex:
1064            LOG.warning("%s", ex)
1065        finally:
1066            self._logout(client)
1067
1068        return False
1069
1070    @cinder_utils.trace
1071    def migrate_volume(self, ctxt, volume, host):
1072        """Migrate the volume to the specified host.
1073
1074        Backend assisted volume migration will occur if and only if;
1075
1076        1. Same LeftHand backend
1077        2. Volume cannot be attached
1078        3. Volumes with snapshots cannot be migrated
1079        4. Source and Destination clusters must be in the same management group
1080
1081        Volume re-type is not supported.
1082
1083        Returns a boolean indicating whether the migration occurred, as well as
1084        model_update.
1085
1086        :param ctxt: Context
1087        :param volume: A dictionary describing the volume to migrate
1088        :param host: A dictionary describing the host to migrate to, where
1089                     host['host'] is its name, and host['capabilities'] is a
1090                     dictionary of its reported capabilities.
1091        """
1092        false_ret = (False, None)
1093        if 'location_info' not in host['capabilities']:
1094            return false_ret
1095
1096        host_location = host['capabilities']['location_info']
1097        (driver, cluster, vip) = host_location.split(' ')
1098        client = self._login()
1099        LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
1100                  'cluster=%(cluster)s', {
1101                      'id': volume['id'],
1102                      'host': host,
1103                      'cluster': self._client_conf['hpelefthand_clustername']})
1104        try:
1105            # get the cluster info, if it exists and compare
1106            cluster_info = client.getClusterByName(cluster)
1107            LOG.debug('Cluster info: %s', cluster_info)
1108            virtual_ips = cluster_info['virtualIPAddresses']
1109
1110            if driver != self.__class__.__name__:
1111                LOG.info("Cannot provide backend assisted migration for "
1112                         "volume: %s because volume is from a different "
1113                         "backend.", volume['name'])
1114                return false_ret
1115            if vip != virtual_ips[0]['ipV4Address']:
1116                LOG.info("Cannot provide backend assisted migration for "
1117                         "volume: %s because cluster exists in different "
1118                         "management group.", volume['name'])
1119                return false_ret
1120
1121        except hpeexceptions.HTTPNotFound:
1122            LOG.info("Cannot provide backend assisted migration for "
1123                     "volume: %s because cluster exists in different "
1124                     "management group.", volume['name'])
1125            return false_ret
1126        finally:
1127            self._logout(client)
1128
1129        client = self._login()
1130        try:
1131            volume_info = client.getVolumeByName(volume['name'])
1132            LOG.debug('Volume info: %s', volume_info)
1133
1134            # can't migrate if server is attached
1135            if volume_info['iscsiSessions'] is not None:
1136                LOG.info("Cannot provide backend assisted migration "
1137                         "for volume: %s because the volume has been "
1138                         "exported.", volume['name'])
1139                return false_ret
1140
1141            # can't migrate if volume has snapshots
1142            snap_info = client.getVolume(
1143                volume_info['id'],
1144                'fields=snapshots,snapshots[resource[members[name]]]')
1145            LOG.debug('Snapshot info: %s', snap_info)
1146            if snap_info['snapshots']['resource'] is not None:
1147                LOG.info("Cannot provide backend assisted migration "
1148                         "for volume: %s because the volume has "
1149                         "snapshots.", volume['name'])
1150                return false_ret
1151
1152            options = {'clusterName': cluster}
1153            client.modifyVolume(volume_info['id'], options)
1154        except hpeexceptions.HTTPNotFound:
1155            LOG.info("Cannot provide backend assisted migration for "
1156                     "volume: %s because volume does not exist in this "
1157                     "management group.", volume['name'])
1158            return false_ret
1159        except hpeexceptions.HTTPServerError as ex:
1160            LOG.error("Exception: %s", ex)
1161            return false_ret
1162        finally:
1163            self._logout(client)
1164
1165        return (True, None)
1166
1167    @cinder_utils.trace
1168    def update_migrated_volume(self, context, volume, new_volume,
1169                               original_volume_status):
1170        """Rename the new (temp) volume to it's original name.
1171
1172
1173        This method tries to rename the new volume to it's original
1174        name after the migration has completed.
1175
1176        """
1177        LOG.debug("Update volume name for %(id)s.", {'id': new_volume['id']})
1178        name_id = None
1179        provider_location = None
1180        if original_volume_status == 'available':
1181            # volume isn't attached and can be updated
1182            original_name = CONF.volume_name_template % volume['id']
1183            current_name = CONF.volume_name_template % new_volume['id']
1184            client = self._login()
1185            try:
1186                volume_info = client.getVolumeByName(current_name)
1187                volumeMods = {'name': original_name}
1188                client.modifyVolume(volume_info['id'], volumeMods)
1189                LOG.info("Volume name changed from %(tmp)s to %(orig)s.",
1190                         {'tmp': current_name, 'orig': original_name})
1191            except Exception as e:
1192                LOG.error("Changing the volume name from %(tmp)s to "
1193                          "%(orig)s failed because %(reason)s.",
1194                          {'tmp': current_name, 'orig': original_name,
1195                           'reason': e})
1196                name_id = new_volume['_name_id'] or new_volume['id']
1197                provider_location = new_volume['provider_location']
1198            finally:
1199                self._logout(client)
1200        else:
1201            # the backend can't change the name.
1202            name_id = new_volume['_name_id'] or new_volume['id']
1203            provider_location = new_volume['provider_location']
1204
1205        return {'_name_id': name_id, 'provider_location': provider_location}
1206
1207    @cinder_utils.trace
1208    def manage_existing(self, volume, existing_ref):
1209        """Manage an existing LeftHand volume.
1210
1211        existing_ref is a dictionary of the form:
1212        {'source-name': <name of the virtual volume>}
1213        """
1214        # Check API Version
1215        self._check_api_version()
1216
1217        target_vol_name = self._get_existing_volume_ref_name(existing_ref)
1218
1219        # Check for the existence of the virtual volume.
1220        client = self._login()
1221        try:
1222            volume_info = client.getVolumeByName(target_vol_name)
1223        except hpeexceptions.HTTPNotFound:
1224            err = (_("Virtual volume '%s' doesn't exist on array.") %
1225                   target_vol_name)
1226            LOG.error(err)
1227            raise exception.InvalidInput(reason=err)
1228        finally:
1229            self._logout(client)
1230
1231        # Generate the new volume information based on the new ID.
1232        new_vol_name = 'volume-' + volume['id']
1233
1234        volume_type = None
1235        if volume['volume_type_id']:
1236            try:
1237                volume_type = self._get_volume_type(volume['volume_type_id'])
1238            except Exception:
1239                reason = (_("Volume type ID '%s' is invalid.") %
1240                          volume['volume_type_id'])
1241                raise exception.ManageExistingVolumeTypeMismatch(reason=reason)
1242
1243        new_vals = {"name": new_vol_name}
1244
1245        client = self._login()
1246        try:
1247            # Update the existing volume with the new name.
1248            client.modifyVolume(volume_info['id'], new_vals)
1249        finally:
1250            self._logout(client)
1251
1252        LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.",
1253                 {'ref': existing_ref['source-name'], 'new': new_vol_name})
1254
1255        display_name = None
1256        if volume['display_name']:
1257            display_name = volume['display_name']
1258
1259        if volume_type:
1260            LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.",
1261                     {'disp': display_name, 'new': new_vol_name})
1262
1263            # Creates a diff as it needed for retype operation.
1264            diff = {}
1265            diff['extra_specs'] = {key: (None, value) for key, value
1266                                   in volume_type['extra_specs'].items()}
1267            try:
1268                self.retype(None,
1269                            volume,
1270                            volume_type,
1271                            diff,
1272                            volume['host'])
1273                LOG.info("Virtual volume %(disp)s successfully retyped to "
1274                         "%(new_type)s.",
1275                         {'disp': display_name,
1276                          'new_type': volume_type.get('name')})
1277            except Exception:
1278                with excutils.save_and_reraise_exception():
1279                    LOG.warning("Failed to manage virtual volume %(disp)s "
1280                                "due to error during retype.",
1281                                {'disp': display_name})
1282                    # Try to undo the rename and clear the new comment.
1283                    client = self._login()
1284                    try:
1285                        client.modifyVolume(
1286                            volume_info['id'],
1287                            {'name': target_vol_name})
1288                    finally:
1289                        self._logout(client)
1290
1291        updates = {'display_name': display_name}
1292
1293        LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.",
1294                 {'disp': display_name, 'new': new_vol_name})
1295
1296        # Return display name to update the name displayed in the GUI and
1297        # any model updates from retype.
1298        return updates
1299
1300    @cinder_utils.trace
1301    def manage_existing_snapshot(self, snapshot, existing_ref):
1302        """Manage an existing LeftHand snapshot.
1303
1304        existing_ref is a dictionary of the form:
1305        {'source-name': <name of the snapshot>}
1306        """
1307        # Check API Version
1308        self._check_api_version()
1309
1310        # Potential parent volume for the snapshot
1311        volume = snapshot['volume']
1312
1313        if volume.get('replication_status') == 'failed-over':
1314            err = (_("Managing of snapshots to failed-over volumes is "
1315                     "not allowed."))
1316            raise exception.InvalidInput(reason=err)
1317
1318        target_snap_name = self._get_existing_volume_ref_name(existing_ref)
1319
1320        # Check for the existence of the virtual volume.
1321        client = self._login()
1322        try:
1323            updates = self._manage_snapshot(client,
1324                                            volume,
1325                                            snapshot,
1326                                            target_snap_name,
1327                                            existing_ref)
1328        finally:
1329            self._logout(client)
1330
1331        # Return display name to update the name displayed in the GUI and
1332        # any model updates from retype.
1333        return updates
1334
1335    def _manage_snapshot(self, client, volume, snapshot, target_snap_name,
1336                         existing_ref):
1337        # Check for the existence of the virtual volume.
1338        try:
1339            snapshot_info = client.getSnapshotByName(target_snap_name)
1340        except hpeexceptions.HTTPNotFound:
1341            err = (_("Snapshot '%s' doesn't exist on array.") %
1342                   target_snap_name)
1343            LOG.error(err)
1344            raise exception.InvalidInput(reason=err)
1345
1346        # Make sure the snapshot is being associated with the correct volume.
1347        try:
1348            parent_vol = client.getSnapshotParentVolume(target_snap_name)
1349        except hpeexceptions.HTTPNotFound:
1350            err = (_("Could not find the parent volume for Snapshot '%s' on "
1351                     "array.") % target_snap_name)
1352            LOG.error(err)
1353            raise exception.InvalidInput(reason=err)
1354
1355        parent_vol_name = 'volume-' + snapshot['volume_id']
1356        if parent_vol_name != parent_vol['name']:
1357            err = (_("The provided snapshot '%s' is not a snapshot of "
1358                     "the provided volume.") % target_snap_name)
1359            LOG.error(err)
1360            raise exception.InvalidInput(reason=err)
1361
1362        # Generate the new snapshot information based on the new ID.
1363        new_snap_name = 'snapshot-' + snapshot['id']
1364
1365        new_vals = {"name": new_snap_name}
1366
1367        try:
1368            # Update the existing snapshot with the new name.
1369            client.modifySnapshot(snapshot_info['id'], new_vals)
1370        except hpeexceptions.HTTPServerError:
1371            err = (_("An error occurred while attempting to modify "
1372                     "Snapshot '%s'.") % snapshot_info['id'])
1373            LOG.error(err)
1374
1375        LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.",
1376                 {'ref': existing_ref['source-name'], 'new': new_snap_name})
1377
1378        display_name = None
1379        if snapshot['display_name']:
1380            display_name = snapshot['display_name']
1381
1382        updates = {'display_name': display_name}
1383
1384        LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.",
1385                 {'disp': display_name, 'new': new_snap_name})
1386
1387        return updates
1388
1389    @cinder_utils.trace
1390    def manage_existing_get_size(self, volume, existing_ref):
1391        """Return size of volume to be managed by manage_existing.
1392
1393        existing_ref is a dictionary of the form:
1394        {'source-name': <name of the virtual volume>}
1395        """
1396        # Check API version.
1397        self._check_api_version()
1398
1399        target_vol_name = self._get_existing_volume_ref_name(existing_ref)
1400
1401        # Make sure the reference is not in use.
1402        if re.match('volume-*|snapshot-*', target_vol_name):
1403            reason = _("Reference must be the volume name of an unmanaged "
1404                       "virtual volume.")
1405            raise exception.ManageExistingInvalidReference(
1406                existing_ref=target_vol_name,
1407                reason=reason)
1408
1409        # Check for the existence of the virtual volume.
1410        client = self._login()
1411        try:
1412            volume_info = client.getVolumeByName(target_vol_name)
1413        except hpeexceptions.HTTPNotFound:
1414            err = (_("Virtual volume '%s' doesn't exist on array.") %
1415                   target_vol_name)
1416            LOG.error(err)
1417            raise exception.InvalidInput(reason=err)
1418        finally:
1419            self._logout(client)
1420
1421        return int(math.ceil(float(volume_info['size']) / units.Gi))
1422
1423    @cinder_utils.trace
1424    def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
1425        """Return size of volume to be managed by manage_existing.
1426
1427        existing_ref is a dictionary of the form:
1428        {'source-name': <name of the virtual volume>}
1429        """
1430        # Check API version.
1431        self._check_api_version()
1432
1433        target_snap_name = self._get_existing_volume_ref_name(existing_ref)
1434
1435        # Make sure the reference is not in use.
1436        if re.match('volume-*|snapshot-*|unm-*', target_snap_name):
1437            reason = _("Reference must be the name of an unmanaged "
1438                       "snapshot.")
1439            raise exception.ManageExistingInvalidReference(
1440                existing_ref=target_snap_name,
1441                reason=reason)
1442
1443        # Check for the existence of the virtual volume.
1444        client = self._login()
1445        try:
1446            snapshot_info = client.getSnapshotByName(target_snap_name)
1447        except hpeexceptions.HTTPNotFound:
1448            err = (_("Snapshot '%s' doesn't exist on array.") %
1449                   target_snap_name)
1450            LOG.error(err)
1451            raise exception.InvalidInput(reason=err)
1452        finally:
1453            self._logout(client)
1454
1455        return int(math.ceil(float(snapshot_info['size']) / units.Gi))
1456
1457    @cinder_utils.trace
1458    def unmanage(self, volume):
1459        """Removes the specified volume from Cinder management."""
1460        # Check API version.
1461        self._check_api_version()
1462
1463        # Rename the volume's name to unm-* format so that it can be
1464        # easily found later.
1465        client = self._login()
1466        try:
1467            volume_info = client.getVolumeByName(volume['name'])
1468            new_vol_name = 'unm-' + six.text_type(volume['id'])
1469            options = {'name': new_vol_name}
1470            client.modifyVolume(volume_info['id'], options)
1471        finally:
1472            self._logout(client)
1473
1474        LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. "
1475                 "Volume renamed to '%(new)s'.",
1476                 {'disp': volume['display_name'],
1477                  'vol': volume['name'],
1478                  'new': new_vol_name})
1479
1480    @cinder_utils.trace
1481    def unmanage_snapshot(self, snapshot):
1482        """Removes the specified snapshot from Cinder management."""
1483        # Check API version.
1484        self._check_api_version()
1485
1486        # Potential parent volume for the snapshot
1487        volume = snapshot['volume']
1488
1489        if volume.get('replication_status') == 'failed-over':
1490            err = (_("Unmanaging of snapshots from 'failed-over' volumes is "
1491                     "not allowed."))
1492            LOG.error(err)
1493            # TODO(leeantho) Change this exception to Invalid when the volume
1494            # manager supports handling that.
1495            raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
1496
1497        # Rename the snapshots's name to ums-* format so that it can be
1498        # easily found later.
1499        client = self._login()
1500        try:
1501            snapshot_info = client.getSnapshotByName(snapshot['name'])
1502            new_snap_name = 'ums-' + six.text_type(snapshot['id'])
1503            options = {'name': new_snap_name}
1504            client.modifySnapshot(snapshot_info['id'], options)
1505            LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. "
1506                     "Snapshot renamed to '%(new)s'.",
1507                     {'disp': snapshot['display_name'],
1508                      'vol': snapshot['name'],
1509                      'new': new_snap_name})
1510        finally:
1511            self._logout(client)
1512
1513    def _get_existing_volume_ref_name(self, existing_ref):
1514        """Returns the volume name of an existing reference.
1515
1516        Checks if an existing volume reference has a source-name element.
1517        If source-name is not present an error will be thrown.
1518        """
1519        if 'source-name' not in existing_ref:
1520            reason = _("Reference must contain source-name.")
1521            raise exception.ManageExistingInvalidReference(
1522                existing_ref=existing_ref,
1523                reason=reason)
1524
1525        return existing_ref['source-name']
1526
1527    def _check_api_version(self):
1528        """Checks that the API version is correct."""
1529        if (self.api_version < MIN_API_VERSION):
1530            ex_msg = (_('Invalid HPELeftHand API version found: %(found)s. '
1531                        'Version %(minimum)s or greater required for '
1532                        'manage/unmanage support.')
1533                      % {'found': self.api_version,
1534                         'minimum': MIN_API_VERSION})
1535            LOG.error(ex_msg)
1536            raise exception.InvalidInput(reason=ex_msg)
1537
1538    def _get_volume_type(self, type_id):
1539        ctxt = context.get_admin_context()
1540        return volume_types.get_volume_type(ctxt, type_id)
1541
1542    # v2 replication methods
1543    @cinder_utils.trace
1544    def failover_host(self, context, volumes, secondary_id=None, groups=None):
1545        """Force failover to a secondary replication target."""
1546        if secondary_id and secondary_id == self.FAILBACK_VALUE:
1547            volume_update_list = self._replication_failback(volumes)
1548            target_id = None
1549        else:
1550            failover_target = None
1551            for target in self._replication_targets:
1552                if target['backend_id'] == secondary_id:
1553                    failover_target = target
1554                    break
1555            if not failover_target:
1556                msg = _("A valid secondary target MUST be specified in order "
1557                        "to failover.")
1558                LOG.error(msg)
1559                raise exception.InvalidReplicationTarget(reason=msg)
1560
1561            target_id = failover_target['backend_id']
1562            volume_update_list = []
1563            for volume in volumes:
1564                if self._volume_of_replicated_type(volume):
1565                    # Try and stop the remote snapshot schedule. If the primary
1566                    # array is down, we will continue with the failover.
1567                    client = None
1568                    try:
1569                        client = self._login(timeout=30)
1570                        name = volume['name'] + self.REP_SCHEDULE_SUFFIX + (
1571                            "_Pri")
1572                        client.stopRemoteSnapshotSchedule(name)
1573                    except Exception:
1574                        LOG.warning("The primary array is currently "
1575                                    "offline, remote copy has been "
1576                                    "automatically paused.")
1577                    finally:
1578                        self._logout(client)
1579
1580                    # Update provider location to the new array.
1581                    cl = None
1582                    try:
1583                        cl = self._create_replication_client(failover_target)
1584                        # Stop snapshot schedule
1585                        try:
1586                            name = volume['name'] + (
1587                                self.REP_SCHEDULE_SUFFIX + "_Rmt")
1588                            cl.stopRemoteSnapshotSchedule(name)
1589                        except Exception:
1590                            pass
1591                        # Make the volume primary so it can be attached after a
1592                        # fail-over.
1593                        cl.makeVolumePrimary(volume['name'])
1594
1595                        # Update the provider info for a proper fail-over.
1596                        volume_info = cl.getVolumeByName(volume['name'])
1597                        prov_location = self._update_provider(
1598                            volume_info,
1599                            cluster_vip=failover_target['cluster_vip'])
1600                        volume_update_list.append(
1601                            {'volume_id': volume['id'],
1602                             'updates': {'replication_status': 'failed-over',
1603                                         'provider_location':
1604                                         prov_location['provider_location']}})
1605                    except Exception as ex:
1606                        LOG.error("There was a problem with the failover "
1607                                  "(%(error)s) and it was unsuccessful. "
1608                                  "Volume '%(volume)s will not be available "
1609                                  "on the failed over target.",
1610                                  {'error': six.text_type(ex),
1611                                   'volume': volume['id']})
1612                        volume_update_list.append(
1613                            {'volume_id': volume['id'],
1614                             'updates': {'replication_status': 'error'}})
1615                    finally:
1616                        self._destroy_replication_client(cl)
1617                else:
1618                    # If the volume is not of replicated type, we need to
1619                    # force the status into error state so a user knows they
1620                    # do not have access to the volume.
1621                    volume_update_list.append(
1622                        {'volume_id': volume['id'],
1623                         'updates': {'status': 'error'}})
1624
1625            self._active_backend_id = target_id
1626
1627        return target_id, volume_update_list, []
1628
1629    def _do_replication_setup(self):
1630        default_san_ssh_port = self.configuration.hpelefthand_ssh_port
1631        default_ssh_conn_timeout = self.configuration.ssh_conn_timeout
1632        default_san_private_key = self.configuration.san_private_key
1633
1634        replication_targets = []
1635        replication_devices = self.configuration.replication_device
1636        if replication_devices:
1637            # We do not want to fail if we cannot log into the client here
1638            # as a failover can still occur, so we need out replication
1639            # devices to exist.
1640            for dev in replication_devices:
1641                remote_array = dict(dev.items())
1642                # Override and set defaults for certain entries
1643                remote_array['managed_backend_name'] = (
1644                    dev.get('managed_backend_name'))
1645                remote_array['hpelefthand_ssh_port'] = (
1646                    dev.get('hpelefthand_ssh_port', default_san_ssh_port))
1647                remote_array['ssh_conn_timeout'] = (
1648                    dev.get('ssh_conn_timeout', default_ssh_conn_timeout))
1649                remote_array['san_private_key'] = (
1650                    dev.get('san_private_key', default_san_private_key))
1651                # Format hpe3par_iscsi_chap_enabled as a bool
1652                remote_array['hpelefthand_iscsi_chap_enabled'] = (
1653                    dev.get('hpelefthand_iscsi_chap_enabled') == 'True')
1654                remote_array['cluster_id'] = None
1655                remote_array['cluster_vip'] = None
1656                array_name = remote_array['backend_id']
1657
1658                # Make sure we can log into the array, that it has been
1659                # correctly configured, and its API version meets the
1660                # minimum requirement.
1661                cl = None
1662                try:
1663                    cl = self._create_replication_client(remote_array)
1664                    api_version = cl.getApiVersion()
1665                    cluster_info = cl.getClusterByName(
1666                        remote_array['hpelefthand_clustername'])
1667                    remote_array['cluster_id'] = cluster_info['id']
1668                    virtual_ips = cluster_info['virtualIPAddresses']
1669                    remote_array['cluster_vip'] = virtual_ips[0]['ipV4Address']
1670
1671                    if api_version < MIN_API_VERSION:
1672                        LOG.warning("The secondary array must have an API "
1673                                    "version of %(min_ver)s or higher. "
1674                                    "Array '%(target)s' is on %(target_ver)s, "
1675                                    "therefore it will not be added as a "
1676                                    "valid replication target.",
1677                                    {'min_ver': MIN_API_VERSION,
1678                                     'target': array_name,
1679                                     'target_ver': api_version})
1680                    elif not self._is_valid_replication_array(remote_array):
1681                        LOG.warning("'%s' is not a valid replication array. "
1682                                    "In order to be valid, backend_id, "
1683                                    "hpelefthand_api_url, "
1684                                    "hpelefthand_username, "
1685                                    "hpelefthand_password, and "
1686                                    "hpelefthand_clustername, "
1687                                    "must be specified. If the target is "
1688                                    "managed, managed_backend_name must be "
1689                                    "set as well.", array_name)
1690                    else:
1691                        replication_targets.append(remote_array)
1692                except Exception:
1693                    LOG.error("Could not log in to LeftHand array (%s) with "
1694                              "the provided credentials.", array_name)
1695                finally:
1696                    self._destroy_replication_client(cl)
1697
1698            self._replication_targets = replication_targets
1699            if self._is_replication_configured_correct():
1700                self._replication_enabled = True
1701
1702    def _replication_failback(self, volumes):
1703        array_config = {'hpelefthand_api_url':
1704                        self.configuration.hpelefthand_api_url,
1705                        'hpelefthand_username':
1706                        self.configuration.hpelefthand_username,
1707                        'hpelefthand_password':
1708                        self.configuration.hpelefthand_password,
1709                        'hpelefthand_ssh_port':
1710                        self.configuration.hpelefthand_ssh_port}
1711
1712        # Make sure the proper steps on the backend have been completed before
1713        # we allow a failback.
1714        if not self._is_host_ready_for_failback(volumes, array_config):
1715            msg = _("The host is not ready to be failed back. Please "
1716                    "resynchronize the volumes and resume replication on the "
1717                    "LeftHand backends.")
1718            LOG.error(msg)
1719            raise exception.InvalidReplicationTarget(reason=msg)
1720
1721        cl = None
1722        volume_update_list = []
1723        for volume in volumes:
1724            if self._volume_of_replicated_type(volume):
1725                try:
1726                    cl = self._create_replication_client(array_config)
1727                    # Update the provider info for a proper fail-back.
1728                    volume_info = cl.getVolumeByName(volume['name'])
1729                    cluster_info = cl.getClusterByName(
1730                        self.configuration.hpelefthand_clustername)
1731                    virtual_ips = cluster_info['virtualIPAddresses']
1732                    cluster_vip = virtual_ips[0]['ipV4Address']
1733                    provider_location = self._update_provider(
1734                        volume_info, cluster_vip=cluster_vip)
1735                    volume_update_list.append(
1736                        {'volume_id': volume['id'],
1737                         'updates': {'replication_status': 'available',
1738                                     'provider_location':
1739                                     provider_location['provider_location']}})
1740                except Exception as ex:
1741                    # The secondary array was not able to execute the fail-back
1742                    # properly. The replication status is now in an unknown
1743                    # state, so we will treat it as an error.
1744                    LOG.error("There was a problem with the failover "
1745                              "(%(error)s) and it was unsuccessful. "
1746                              "Volume '%(volume)s will not be available "
1747                              "on the failed over target.",
1748                              {'error': ex,
1749                               'volume': volume['id']})
1750                    volume_update_list.append(
1751                        {'volume_id': volume['id'],
1752                         'updates': {'replication_status': 'error'}})
1753                finally:
1754                    self._destroy_replication_client(cl)
1755            else:
1756                # Upon failing back, we can move the non-replicated volumes
1757                # back into available state.
1758                volume_update_list.append(
1759                    {'volume_id': volume['id'],
1760                     'updates': {'status': 'available'}})
1761
1762        return volume_update_list
1763
1764    def _is_host_ready_for_failback(self, volumes, array_config):
1765        """Checks to make sure the volumes have been synchronized
1766
1767        This entails ensuring the remote snapshot schedule has been resumed
1768        on the backends and the secondary volume's data has been copied back
1769        to the primary.
1770        """
1771        is_ready = True
1772        cl = None
1773        try:
1774            for volume in volumes:
1775                if self._volume_of_replicated_type(volume):
1776                    schedule_name = volume['name'] + (
1777                        self.REP_SCHEDULE_SUFFIX + "_Pri")
1778                    cl = self._create_replication_client(array_config)
1779                    schedule = cl.getRemoteSnapshotSchedule(schedule_name)
1780                    schedule = ''.join(schedule)
1781                    # We need to check the status of the schedule to make sure
1782                    # it is not paused.
1783                    result = re.search(r".*paused\s+(\w+)", schedule)
1784                    is_schedule_active = result.group(1) == 'false'
1785
1786                    volume_info = cl.getVolumeByName(volume['name'])
1787                    if not volume_info['isPrimary'] or not is_schedule_active:
1788                        is_ready = False
1789                        break
1790        except Exception as ex:
1791            LOG.error("There was a problem when trying to determine if "
1792                      "the volume can be failed-back: %s", ex)
1793            is_ready = False
1794        finally:
1795            self._destroy_replication_client(cl)
1796
1797        return is_ready
1798
1799    def _get_replication_targets(self):
1800        replication_targets = []
1801        for target in self._replication_targets:
1802            replication_targets.append(target['backend_id'])
1803
1804        return replication_targets
1805
1806    def _is_valid_replication_array(self, target):
1807        required_flags = ['hpelefthand_api_url', 'hpelefthand_username',
1808                          'hpelefthand_password', 'backend_id',
1809                          'hpelefthand_clustername']
1810        try:
1811            self.check_replication_flags(target, required_flags)
1812            return True
1813        except Exception:
1814            return False
1815
1816    def _is_replication_configured_correct(self):
1817        rep_flag = True
1818        # Make sure there is at least one replication target.
1819        if len(self._replication_targets) < 1:
1820            LOG.error("There must be at least one valid replication "
1821                      "device configured.")
1822            rep_flag = False
1823        return rep_flag
1824
1825    def _volume_of_replicated_type(self, volume, vol_type_id=None):
1826        # TODO(kushal) : we will use volume.volume_types when we re-write
1827        # the design for unit tests to use objects instead of dicts.
1828        replicated_type = False
1829        volume_type_id = vol_type_id if vol_type_id else volume.get(
1830            'volume_type_id')
1831        if volume_type_id:
1832            volume_type = self._get_volume_type(volume_type_id)
1833
1834            extra_specs = volume_type.get('extra_specs')
1835            if extra_specs and 'replication_enabled' in extra_specs:
1836                rep_val = extra_specs['replication_enabled']
1837                replicated_type = (rep_val == "<is> True")
1838
1839        return replicated_type
1840
1841    def _does_snapshot_schedule_exist(self, schedule_name, client):
1842        try:
1843            exists = client.doesRemoteSnapshotScheduleExist(schedule_name)
1844        except Exception:
1845            exists = False
1846        return exists
1847
1848    def _get_lefthand_config(self):
1849        conf = None
1850        for target in self._replication_targets:
1851            if target['backend_id'] == self._active_backend_id:
1852                conf = target
1853                break
1854
1855        return conf
1856
1857    def _do_volume_replication_setup(self, volume, client, optional=None):
1858        """This function will do or ensure the following:
1859
1860        -Create volume on main array (already done in create_volume)
1861        -Create volume on secondary array
1862        -Make volume remote on secondary array
1863        -Create the snapshot schedule
1864
1865        If anything here fails, we will need to clean everything up in
1866        reverse order, including the original volume.
1867        """
1868        schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX
1869        # If there is already a snapshot schedule, the volume is setup
1870        # for replication on the backend. Start the schedule and return
1871        # success.
1872        if self._does_snapshot_schedule_exist(schedule_name + "_Pri", client):
1873            try:
1874                client.startRemoteSnapshotSchedule(schedule_name + "_Pri")
1875            except Exception:
1876                pass
1877            return True
1878
1879        # Grab the extra_spec entries for replication and make sure they
1880        # are set correctly.
1881        volume_type = self._get_volume_type(volume["volume_type_id"])
1882        extra_specs = volume_type.get("extra_specs")
1883
1884        # Get and check replication sync period
1885        replication_sync_period = extra_specs.get(
1886            self.EXTRA_SPEC_REP_SYNC_PERIOD)
1887        if replication_sync_period:
1888            replication_sync_period = int(replication_sync_period)
1889            if replication_sync_period < self.MIN_REP_SYNC_PERIOD:
1890                msg = (_("The replication sync period must be at least %s "
1891                         "seconds.") % self.MIN_REP_SYNC_PERIOD)
1892                LOG.error(msg)
1893                raise exception.VolumeBackendAPIException(data=msg)
1894        else:
1895            # If there is no extra_spec value for replication sync period, we
1896            # will default it to the required minimum and log a warning.
1897            replication_sync_period = self.MIN_REP_SYNC_PERIOD
1898            LOG.warning("There was no extra_spec value for %(spec_name)s, "
1899                        "so the default value of %(def_val)s will be "
1900                        "used. To overwrite this, set this value in the "
1901                        "volume type extra_specs.",
1902                        {'spec_name': self.EXTRA_SPEC_REP_SYNC_PERIOD,
1903                         'def_val': self.MIN_REP_SYNC_PERIOD})
1904
1905        # Get and check retention count
1906        retention_count = extra_specs.get(
1907            self.EXTRA_SPEC_REP_RETENTION_COUNT)
1908        if retention_count:
1909            retention_count = int(retention_count)
1910            if retention_count > self.MAX_RETENTION_COUNT:
1911                msg = (_("The retention count must be %s or less.") %
1912                       self.MAX_RETENTION_COUNT)
1913                LOG.error(msg)
1914                raise exception.VolumeBackendAPIException(data=msg)
1915        else:
1916            # If there is no extra_spec value for retention count, we
1917            # will default it and log a warning.
1918            retention_count = self.DEFAULT_RETENTION_COUNT
1919            LOG.warning("There was no extra_spec value for %(spec_name)s, "
1920                        "so the default value of %(def_val)s will be "
1921                        "used. To overwrite this, set this value in the "
1922                        "volume type extra_specs.",
1923                        {'spec_name': self.EXTRA_SPEC_REP_RETENTION_COUNT,
1924                         'def_val': self.DEFAULT_RETENTION_COUNT})
1925
1926        # Get and checkout remote retention count
1927        remote_retention_count = extra_specs.get(
1928            self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT)
1929        if remote_retention_count:
1930            remote_retention_count = int(remote_retention_count)
1931            if remote_retention_count > self.MAX_REMOTE_RETENTION_COUNT:
1932                msg = (_("The remote retention count must be %s or less.") %
1933                       self.MAX_REMOTE_RETENTION_COUNT)
1934                LOG.error(msg)
1935                raise exception.VolumeBackendAPIException(data=msg)
1936        else:
1937            # If there is no extra_spec value for remote retention count, we
1938            # will default it and log a warning.
1939            remote_retention_count = self.DEFAULT_REMOTE_RETENTION_COUNT
1940            spec_name = self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT
1941            LOG.warning("There was no extra_spec value for %(spec_name)s, "
1942                        "so the default value of %(def_val)s will be "
1943                        "used. To overwrite this, set this value in the "
1944                        "volume type extra_specs.",
1945                        {'spec_name': spec_name,
1946                         'def_val': self.DEFAULT_REMOTE_RETENTION_COUNT})
1947
1948        cl = None
1949        try:
1950            # Create volume on secondary system
1951            for remote_target in self._replication_targets:
1952                cl = self._create_replication_client(remote_target)
1953
1954                if optional:
1955                    optional['clusterName'] = (
1956                        remote_target['hpelefthand_clustername'])
1957                cl.createVolume(volume['name'],
1958                                remote_target['cluster_id'],
1959                                volume['size'] * units.Gi,
1960                                optional)
1961
1962                # Make secondary volume a remote volume
1963                # NOTE: The snapshot created when making a volume remote is
1964                # not managed by cinder. This snapshot will be removed when
1965                # _do_volume_replication_destroy is called.
1966                snap_name = volume['name'] + self.REP_SNAPSHOT_SUFFIX
1967                cl.makeVolumeRemote(volume['name'], snap_name)
1968
1969                # A remote IP address is needed from the cluster in order to
1970                # create the snapshot schedule.
1971                remote_ip = cl.getIPFromCluster(
1972                    remote_target['hpelefthand_clustername'])
1973
1974                # Destroy remote client
1975                self._destroy_replication_client(cl)
1976
1977                # Create remote snapshot schedule on the primary system.
1978                # We want to start the remote snapshot schedule instantly; a
1979                # date in the past will do that. We will use the Linux epoch
1980                # date formatted to ISO 8601 (YYYY-MM-DDTHH:MM:SSZ).
1981                start_date = "1970-01-01T00:00:00Z"
1982                remote_vol_name = volume['name']
1983
1984                client.createRemoteSnapshotSchedule(
1985                    volume['name'],
1986                    schedule_name,
1987                    replication_sync_period,
1988                    start_date,
1989                    retention_count,
1990                    remote_target['hpelefthand_clustername'],
1991                    remote_retention_count,
1992                    remote_vol_name,
1993                    remote_ip,
1994                    remote_target['hpelefthand_username'],
1995                    remote_target['hpelefthand_password'])
1996
1997            return True
1998        except Exception as ex:
1999            # Destroy the replication client that was created
2000            self._destroy_replication_client(cl)
2001            # Deconstruct what we tried to create
2002            self._do_volume_replication_destroy(volume, client)
2003            msg = (_("There was an error setting up a remote schedule "
2004                     "on the LeftHand arrays: ('%s'). The volume will not be "
2005                     "recognized as replication type.") %
2006                   six.text_type(ex))
2007            LOG.error(msg)
2008            raise exception.VolumeBackendAPIException(data=msg)
2009
2010    def _do_volume_replication_destroy(self, volume, client):
2011        """This will remove all dependencies of a replicated volume
2012
2013        It should be used when deleting a replication enabled volume
2014        or if setting up a remote copy group fails. It will try and do the
2015        following:
2016        -Delete the snapshot schedule
2017        -Delete volume and snapshots on secondary array
2018        -Delete volume and snapshots on primary array
2019        """
2020        # Delete snapshot schedule
2021        try:
2022            schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX
2023            client.deleteRemoteSnapshotSchedule(schedule_name)
2024        except Exception:
2025            pass
2026
2027        # Delete volume on secondary array(s)
2028        remote_vol_name = volume['name']
2029        for remote_target in self._replication_targets:
2030            try:
2031                cl = self._create_replication_client(remote_target)
2032                volume_info = cl.getVolumeByName(remote_vol_name)
2033                cl.deleteVolume(volume_info['id'])
2034            except Exception:
2035                pass
2036            finally:
2037                # Destroy the replication client that was created
2038                self._destroy_replication_client(cl)
2039
2040        # Delete volume on primary array
2041        try:
2042            volume_info = client.getVolumeByName(volume['name'])
2043            client.deleteVolume(volume_info['id'])
2044        except Exception:
2045            pass
2046