1# Copyright 2015 IBM Corp.
2# All Rights Reserved.
3#
4#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5#    not use this file except in compliance with the License. You may obtain
6#    a copy of the License at
7#
8#         http://www.apache.org/licenses/LICENSE-2.0
9#
10#    Unless required by applicable law or agreed to in writing, software
11#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13#    License for the specific language governing permissions and limitations
14#    under the License.
15#
16
17import math
18import paramiko
19import random
20import re
21import time
22import unicodedata
23
24from eventlet import greenthread
25from oslo_concurrency import processutils
26from oslo_config import cfg
27from oslo_log import log as logging
28from oslo_serialization import jsonutils as json
29from oslo_service import loopingcall
30from oslo_utils import encodeutils
31from oslo_utils import excutils
32from oslo_utils import strutils
33from oslo_utils import units
34from retrying import retry
35import six
36
37from cinder import context
38from cinder import exception
39from cinder.i18n import _
40from cinder import objects
41from cinder.objects import fields
42from cinder import ssh_utils
43from cinder import utils as cinder_utils
44from cinder.volume import configuration
45from cinder.volume import driver
46from cinder.volume.drivers.ibm.storwize_svc import (
47    replication as storwize_rep)
48from cinder.volume.drivers.ibm.storwize_svc import storwize_const
49from cinder.volume.drivers.san import san
50from cinder.volume import qos_specs
51from cinder.volume import utils
52from cinder.volume import volume_types
53
54
55INTERVAL_1_SEC = 1
56DEFAULT_TIMEOUT = 15
57LOG = logging.getLogger(__name__)
58
59storwize_svc_opts = [
60    cfg.ListOpt('storwize_svc_volpool_name',
61                default=['volpool'],
62                help='Comma separated list of storage system storage '
63                     'pools for volumes.'),
64    cfg.IntOpt('storwize_svc_vol_rsize',
65               default=2,
66               min=-1, max=100,
67               help='Storage system space-efficiency parameter for volumes '
68                    '(percentage)'),
69    cfg.IntOpt('storwize_svc_vol_warning',
70               default=0,
71               min=-1, max=100,
72               help='Storage system threshold for volume capacity warnings '
73                    '(percentage)'),
74    cfg.BoolOpt('storwize_svc_vol_autoexpand',
75                default=True,
76                help='Storage system autoexpand parameter for volumes '
77                     '(True/False)'),
78    cfg.IntOpt('storwize_svc_vol_grainsize',
79               default=256,
80               help='Storage system grain size parameter for volumes '
81                    '(32/64/128/256)'),
82    cfg.BoolOpt('storwize_svc_vol_compression',
83                default=False,
84                help='Storage system compression option for volumes'),
85    cfg.BoolOpt('storwize_svc_vol_easytier',
86                default=True,
87                help='Enable Easy Tier for volumes'),
88    cfg.StrOpt('storwize_svc_vol_iogrp',
89               default='0',
90               help='The I/O group in which to allocate volumes. It can be a '
91               'comma-separated list in which case the driver will select an '
92               'io_group based on least number of volumes associated with the '
93               'io_group.'),
94    cfg.IntOpt('storwize_svc_flashcopy_timeout',
95               default=120,
96               min=1, max=600,
97               help='Maximum number of seconds to wait for FlashCopy to be '
98                    'prepared.'),
99    cfg.BoolOpt('storwize_svc_multihostmap_enabled',
100                default=True,
101                help='This option no longer has any affect. It is deprecated '
102                     'and will be removed in the next release.',
103                deprecated_for_removal=True),
104    cfg.BoolOpt('storwize_svc_allow_tenant_qos',
105                default=False,
106                help='Allow tenants to specify QOS on create'),
107    cfg.StrOpt('storwize_svc_stretched_cluster_partner',
108               default=None,
109               help='If operating in stretched cluster mode, specify the '
110                    'name of the pool in which mirrored copies are stored.'
111                    'Example: "pool2"'),
112    cfg.StrOpt('storwize_san_secondary_ip',
113               default=None,
114               help='Specifies secondary management IP or hostname to be '
115                    'used if san_ip is invalid or becomes inaccessible.'),
116    cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
117                default=False,
118                help='Specifies that the volume not be formatted during '
119                     'creation.'),
120    cfg.IntOpt('storwize_svc_flashcopy_rate',
121               default=50,
122               min=1, max=100,
123               help='Specifies the Storwize FlashCopy copy rate to be used '
124               'when creating a full volume copy. The default is rate '
125               'is 50, and the valid rates are 1-100.'),
126    cfg.StrOpt('storwize_svc_mirror_pool',
127               default=None,
128               help='Specifies the name of the pool in which mirrored copy '
129                    'is stored. Example: "pool2"'),
130    cfg.StrOpt('storwize_peer_pool',
131               default=None,
132               help='Specifies the name of the peer pool for hyperswap '
133                    'volume, the peer pool must exist on the other site.'),
134    cfg.DictOpt('storwize_preferred_host_site',
135                default={},
136                help='Specifies the site information for host. '
137                     'One WWPN or multi WWPNs used in the host can be '
138                     'specified. For example: '
139                     'storwize_preferred_host_site=site1:wwpn1,'
140                     'site2:wwpn2&wwpn3 or '
141                     'storwize_preferred_host_site=site1:iqn1,site2:iqn2'),
142    cfg.IntOpt('cycle_period_seconds',
143               default=300,
144               min=60, max=86400,
145               help='This defines an optional cycle period that applies to '
146               'Global Mirror relationships with a cycling mode of multi. '
147               'A Global Mirror relationship using the multi cycling_mode '
148               'performs a complete cycle at most once each period. '
149               'The default is 300 seconds, and the valid seconds '
150               'are 60-86400.'),
151]
152
153CONF = cfg.CONF
154CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP)
155
156
157class StorwizeSSH(object):
158    """SSH interface to IBM Storwize family and SVC storage systems."""
159    def __init__(self, run_ssh):
160        self._ssh = run_ssh
161
162    def _run_ssh(self, ssh_cmd):
163        try:
164            return self._ssh(ssh_cmd)
165        except processutils.ProcessExecutionError as e:
166            msg = (_('CLI Exception output:\n command: %(cmd)s\n '
167                     'stdout: %(out)s\n stderr: %(err)s.') %
168                   {'cmd': ssh_cmd,
169                    'out': e.stdout,
170                    'err': e.stderr})
171            LOG.error(msg)
172            raise exception.VolumeBackendAPIException(data=msg)
173
174    def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
175        """Run an SSH command and return parsed output."""
176        raw = self._run_ssh(ssh_cmd)
177        return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
178                           with_header=with_header)
179
180    def run_ssh_assert_no_output(self, ssh_cmd, log_cmd=None):
181        """Run an SSH command and assert no output returned."""
182        out, err = self._run_ssh(ssh_cmd)
183        if len(out.strip()) != 0:
184            if not log_cmd:
185                log_cmd = ' '.join(ssh_cmd)
186            msg = (_('Expected no output from CLI command %(cmd)s, '
187                     'got %(out)s.') % {'cmd': log_cmd, 'out': out})
188            LOG.error(msg)
189            raise exception.VolumeBackendAPIException(data=msg)
190
191    def run_ssh_check_created(self, ssh_cmd):
192        """Run an SSH command and return the ID of the created object."""
193        out, err = self._run_ssh(ssh_cmd)
194        try:
195            match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
196            return match_obj.group(1)
197        except (AttributeError, IndexError):
198            msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
199                     'stdout: %(out)s\n stderr: %(err)s.') %
200                   {'cmd': ssh_cmd,
201                    'out': out,
202                    'err': err})
203            LOG.error(msg)
204            raise exception.VolumeBackendAPIException(data=msg)
205
206    def lsnode(self, node_id=None):
207        with_header = True
208        ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
209        if node_id:
210            with_header = False
211            ssh_cmd.append(node_id)
212        return self.run_ssh_info(ssh_cmd, with_header=with_header)
213
214    def lslicense(self):
215        ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
216        return self.run_ssh_info(ssh_cmd)[0]
217
218    def lsguicapabilities(self):
219        ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!']
220        return self.run_ssh_info(ssh_cmd)[0]
221
222    def lssystem(self):
223        ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
224        return self.run_ssh_info(ssh_cmd)[0]
225
226    def lsmdiskgrp(self, pool):
227        ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
228                   '"%s"' % pool]
229        try:
230            return self.run_ssh_info(ssh_cmd)[0]
231        except exception.VolumeBackendAPIException as ex:
232            LOG.warning("Failed to get pool %(pool)s info. "
233                        "Exception: %(ex)s.", {'pool': pool,
234                                               'ex': ex})
235            return None
236
237    def lsiogrp(self):
238        ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
239        return self.run_ssh_info(ssh_cmd, with_header=True)
240
241    def lsportip(self):
242        ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
243        return self.run_ssh_info(ssh_cmd, with_header=True)
244
245    @staticmethod
246    def _create_port_arg(port_type, port_name):
247        if port_type == 'initiator':
248            port = ['-iscsiname']
249        else:
250            port = ['-hbawwpn']
251        port.append(port_name)
252        return port
253
254    def mkhost(self, host_name, port_type, port_name, site=None):
255        port = self._create_port_arg(port_type, port_name)
256        ssh_cmd = ['svctask', 'mkhost', '-force'] + port
257        if site:
258            ssh_cmd += ['-site', '"%s"' % site]
259        ssh_cmd += ['-name', '"%s"' % host_name]
260        return self.run_ssh_check_created(ssh_cmd)
261
262    def addhostport(self, host, port_type, port_name):
263        port = self._create_port_arg(port_type, port_name)
264        ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
265        self.run_ssh_assert_no_output(ssh_cmd)
266
267    def lshost(self, host=None):
268        with_header = True
269        ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
270        if host:
271            with_header = False
272            ssh_cmd.append('"%s"' % host)
273        return self.run_ssh_info(ssh_cmd, with_header=with_header)
274
275    def add_chap_secret(self, secret, host):
276        ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
277        log_cmd = 'svctask chhost -chapsecret *** %s' % host
278        self.run_ssh_assert_no_output(ssh_cmd, log_cmd)
279
280    def chhost(self, host, site):
281        ssh_cmd = ['svctask', 'chhost', '-site', '"%s"' % site, '"%s"' % host]
282        self.run_ssh_assert_no_output(ssh_cmd)
283
284    def lsiscsiauth(self):
285        ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
286        return self.run_ssh_info(ssh_cmd, with_header=True)
287
288    def lsfabric(self, wwpn=None, host=None):
289        ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
290        if wwpn:
291            ssh_cmd.extend(['-wwpn', wwpn])
292        elif host:
293            ssh_cmd.extend(['-host', '"%s"' % host])
294        else:
295            msg = (_('Must pass wwpn or host to lsfabric.'))
296            LOG.error(msg)
297            raise exception.VolumeDriverException(message=msg)
298        return self.run_ssh_info(ssh_cmd, with_header=True)
299
300    def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
301        """Map vdisk to host.
302
303        If vdisk already mapped and multihostmap is True, use the force flag.
304        """
305        ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host,
306                   '-scsi', lun, '"%s"' % vdisk]
307
308        if multihostmap:
309            ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
310        self.run_ssh_check_created(ssh_cmd)
311
312    def mkrcrelationship(self, master, aux, system, asyncmirror,
313                         cyclingmode=False):
314        ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master,
315                   '-aux', aux, '-cluster', system]
316        if asyncmirror:
317            ssh_cmd.append('-global')
318        if cyclingmode:
319            ssh_cmd.extend(['-cyclingmode', 'multi'])
320        return self.run_ssh_check_created(ssh_cmd)
321
322    def rmrcrelationship(self, relationship, force=False):
323        ssh_cmd = ['svctask', 'rmrcrelationship']
324        if force:
325            ssh_cmd += ['-force']
326        ssh_cmd += [relationship]
327        self.run_ssh_assert_no_output(ssh_cmd)
328
329    def switchrelationship(self, relationship, aux=True):
330        primary = 'aux' if aux else 'master'
331        ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
332                   primary, relationship]
333        self.run_ssh_assert_no_output(ssh_cmd)
334
335    def startrcrelationship(self, rc_rel, primary=None):
336        ssh_cmd = ['svctask', 'startrcrelationship', '-force']
337        if primary:
338            ssh_cmd.extend(['-primary', primary])
339        ssh_cmd.append(rc_rel)
340        self.run_ssh_assert_no_output(ssh_cmd)
341
342    def ch_rcrelationship_cycleperiod(self, relationship,
343                                      cycle_period_seconds):
344        # Note: Can only change one attribute at a time,
345        # so define two ch_rcrelationship_xxx here
346        if cycle_period_seconds:
347            ssh_cmd = ['svctask', 'chrcrelationship']
348            ssh_cmd.extend(['-cycleperiodseconds',
349                            six.text_type(cycle_period_seconds)])
350            ssh_cmd.append(relationship)
351            self.run_ssh_assert_no_output(ssh_cmd)
352
353    def ch_rcrelationship_changevolume(self, relationship,
354                                       changevolume, master):
355        # Note: Can only change one attribute at a time,
356        # so define two ch_rcrelationship_xxx here
357        if changevolume:
358            ssh_cmd = ['svctask', 'chrcrelationship']
359            if master:
360                ssh_cmd.extend(['-masterchange', changevolume])
361            else:
362                ssh_cmd.extend(['-auxchange', changevolume])
363            ssh_cmd.append(relationship)
364            self.run_ssh_assert_no_output(ssh_cmd)
365
366    def stoprcrelationship(self, relationship, access=False):
367        ssh_cmd = ['svctask', 'stoprcrelationship']
368        if access:
369            ssh_cmd.append('-access')
370        ssh_cmd.append(relationship)
371        self.run_ssh_assert_no_output(ssh_cmd)
372
373    def lsrcrelationship(self, rc_rel):
374        ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel]
375        return self.run_ssh_info(ssh_cmd)
376
377    # replication cg
378    def chrcrelationship(self, relationship, rccg=None):
379        ssh_cmd = ['svctask', 'chrcrelationship']
380        if rccg:
381            ssh_cmd.extend(['-consistgrp', rccg])
382        else:
383            ssh_cmd.extend(['-noconsistgrp'])
384        ssh_cmd.append(relationship)
385        self.run_ssh_assert_no_output(ssh_cmd)
386
387    def lsrcconsistgrp(self, rccg):
388        ssh_cmd = ['svcinfo', 'lsrcconsistgrp', '-delim', '!', rccg]
389        try:
390            return self.run_ssh_info(ssh_cmd)[0]
391        except exception.VolumeBackendAPIException as ex:
392            LOG.warning("Failed to get rcconsistgrp %(rccg)s info. "
393                        "Exception: %(ex)s.", {'rccg': rccg,
394                                               'ex': ex})
395            return None
396
397    def mkrcconsistgrp(self, rccg, system):
398        ssh_cmd = ['svctask', 'mkrcconsistgrp', '-name', rccg,
399                   '-cluster', system]
400        return self.run_ssh_check_created(ssh_cmd)
401
402    def rmrcconsistgrp(self, rccg, force=True):
403        ssh_cmd = ['svctask', 'rmrcconsistgrp']
404        if force:
405            ssh_cmd += ['-force']
406        ssh_cmd += ['"%s"' % rccg]
407        return self.run_ssh_assert_no_output(ssh_cmd)
408
409    def startrcconsistgrp(self, rccg, primary=None):
410        ssh_cmd = ['svctask', 'startrcconsistgrp', '-force']
411        if primary:
412            ssh_cmd.extend(['-primary', primary])
413        ssh_cmd.append(rccg)
414        self.run_ssh_assert_no_output(ssh_cmd)
415
416    def stoprcconsistgrp(self, rccg, access=False):
417        ssh_cmd = ['svctask', 'stoprcconsistgrp']
418        if access:
419            ssh_cmd.append('-access')
420        ssh_cmd.append(rccg)
421        self.run_ssh_assert_no_output(ssh_cmd)
422
423    def switchrcconsistgrp(self, rccg, aux=True):
424        primary = 'aux' if aux else 'master'
425        ssh_cmd = ['svctask', 'switchrcconsistgrp', '-primary',
426                   primary, rccg]
427        self.run_ssh_assert_no_output(ssh_cmd)
428
429    def lspartnership(self, system_name):
430        key_value = 'name=%s' % system_name
431        ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue',
432                   key_value, '-delim', '!']
433        return self.run_ssh_info(ssh_cmd, with_header=True)
434
435    def lspartnershipcandidate(self):
436        ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!']
437        return self.run_ssh_info(ssh_cmd, with_header=True)
438
439    def mkippartnership(self, ip_v4, bandwith=1000, backgroundcopyrate=50):
440        ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4',
441                   '-clusterip', ip_v4, '-linkbandwidthmbits',
442                   six.text_type(bandwith),
443                   '-backgroundcopyrate', six.text_type(backgroundcopyrate)]
444        return self.run_ssh_assert_no_output(ssh_cmd)
445
446    def mkfcpartnership(self, system_name, bandwith=1000,
447                        backgroundcopyrate=50):
448        ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits',
449                   six.text_type(bandwith),
450                   '-backgroundcopyrate', six.text_type(backgroundcopyrate),
451                   system_name]
452        return self.run_ssh_assert_no_output(ssh_cmd)
453
454    def chpartnership(self, partnership_id, start=True):
455        action = '-start' if start else '-stop'
456        ssh_cmd = ['svctask', 'chpartnership', action, partnership_id]
457        return self.run_ssh_assert_no_output(ssh_cmd)
458
459    def rmvdiskhostmap(self, host, vdisk):
460        ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host,
461                   '"%s"' % vdisk]
462        self.run_ssh_assert_no_output(ssh_cmd)
463
464    def lsvdiskhostmap(self, vdisk):
465        ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk]
466        return self.run_ssh_info(ssh_cmd, with_header=True)
467
468    def lshostvdiskmap(self, host):
469        ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
470        return self.run_ssh_info(ssh_cmd, with_header=True)
471
472    def get_vdiskhostmapid(self, vdisk, host):
473        resp = self.lsvdiskhostmap(vdisk)
474        for mapping_info in resp:
475            if mapping_info['host_name'] == host:
476                lun_id = mapping_info['SCSI_id']
477                return lun_id
478        return None
479
480    def rmhost(self, host):
481        ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
482        self.run_ssh_assert_no_output(ssh_cmd)
483
484    def mkvdisk(self, name, size, units, pool, opts, params):
485        ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp',
486                   '"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']),
487                   '-size', size, '-unit', units] + params
488        try:
489            return self.run_ssh_check_created(ssh_cmd)
490        except Exception as ex:
491            if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg:
492                vdisk = self.lsvdisk(name)
493                if vdisk:
494                    LOG.warning('CMMVC6372W The virtualized storage '
495                                'capacity that the cluster is using is '
496                                'approaching the virtualized storage '
497                                'capacity that is licensed.')
498                    return vdisk['id']
499            with excutils.save_and_reraise_exception():
500                LOG.exception('Failed to create vdisk %(vol)s.',
501                              {'vol': name})
502
503    def rmvdisk(self, vdisk, force=True):
504        ssh_cmd = ['svctask', 'rmvdisk']
505        if force:
506            ssh_cmd += ['-force']
507        ssh_cmd += ['"%s"' % vdisk]
508        self.run_ssh_assert_no_output(ssh_cmd)
509
510    def lsvdisk(self, vdisk):
511        """Return vdisk attributes or None if it doesn't exist."""
512        ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
513                   '"%s"' % vdisk]
514        out, err = self._ssh(ssh_cmd, check_exit_code=False)
515        if not err:
516            return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
517                               with_header=False)[0]
518        if 'CMMVC5754E' in err:
519            return None
520        msg = (_('CLI Exception output:\n command: %(cmd)s\n '
521                 'stdout: %(out)s\n stderr: %(err)s.') %
522               {'cmd': ssh_cmd,
523                'out': out,
524                'err': err})
525        LOG.error(msg)
526        raise exception.VolumeBackendAPIException(data=msg)
527
528    def lsvdisks_from_filter(self, filter_name, value):
529        """Performs an lsvdisk command, filtering the results as specified.
530
531        Returns an iterable for all matching vdisks.
532        """
533        ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
534                   '-filtervalue', '%s=%s' % (filter_name, value)]
535        return self.run_ssh_info(ssh_cmd, with_header=True)
536
537    def chvdisk(self, vdisk, params):
538        ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk]
539        self.run_ssh_assert_no_output(ssh_cmd)
540
541    def movevdisk(self, vdisk, iogrp):
542        ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk]
543        self.run_ssh_assert_no_output(ssh_cmd)
544
545    def expandvdisksize(self, vdisk, amount):
546        ssh_cmd = (
547            ['svctask', 'expandvdisksize', '-size', six.text_type(amount),
548             '-unit', 'gb', '"%s"' % vdisk])
549        self.run_ssh_assert_no_output(ssh_cmd)
550
551    def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None):
552        ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target',
553                   '"%s"' % target, '-autodelete']
554        if not full_copy:
555            ssh_cmd.extend(['-copyrate', '0'])
556        else:
557            ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)])
558        if consistgrp:
559            ssh_cmd.extend(['-consistgrp', consistgrp])
560        out, err = self._ssh(ssh_cmd, check_exit_code=False)
561        if 'successfully created' not in out:
562            msg = (_('CLI Exception output:\n command: %(cmd)s\n '
563                     'stdout: %(out)s\n stderr: %(err)s.') %
564                   {'cmd': ssh_cmd,
565                    'out': out,
566                    'err': err})
567            LOG.error(msg)
568            raise exception.VolumeBackendAPIException(data=msg)
569        try:
570            match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
571                                  'successfully created', out)
572            fc_map_id = match_obj.group(1)
573        except (AttributeError, IndexError):
574            msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
575                     'stdout: %(out)s\n stderr: %(err)s.') %
576                   {'cmd': ssh_cmd,
577                    'out': out,
578                    'err': err})
579            LOG.error(msg)
580            raise exception.VolumeBackendAPIException(data=msg)
581        return fc_map_id
582
583    def prestartfcmap(self, fc_map_id, restore=False):
584        ssh_cmd = ['svctask', 'prestartfcmap']
585        if restore:
586            ssh_cmd.append('-restore')
587        ssh_cmd.append(fc_map_id)
588        self.run_ssh_assert_no_output(ssh_cmd)
589
590    def startfcmap(self, fc_map_id, restore=False):
591        ssh_cmd = ['svctask', 'startfcmap']
592        if restore:
593            ssh_cmd.append('-restore')
594        ssh_cmd.append(fc_map_id)
595        self.run_ssh_assert_no_output(ssh_cmd)
596
597    def prestartfcconsistgrp(self, fc_consist_group):
598        ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
599        self.run_ssh_assert_no_output(ssh_cmd)
600
601    def startfcconsistgrp(self, fc_consist_group):
602        ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
603        self.run_ssh_assert_no_output(ssh_cmd)
604
605    def stopfcconsistgrp(self, fc_consist_group):
606        ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
607        self.run_ssh_assert_no_output(ssh_cmd)
608
609    def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
610        ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
611                   '-autodelete', autodel, fc_map_id]
612        self.run_ssh_assert_no_output(ssh_cmd)
613
614    def stopfcmap(self, fc_map_id):
615        ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
616        self.run_ssh_assert_no_output(ssh_cmd)
617
618    def rmfcmap(self, fc_map_id):
619        ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
620        self.run_ssh_assert_no_output(ssh_cmd)
621
622    def lsvdiskfcmappings(self, vdisk):
623        ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!',
624                   '"%s"' % vdisk]
625        return self.run_ssh_info(ssh_cmd, with_header=True)
626
627    def lsfcmap(self, fc_map_id):
628        ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
629                   'id=%s' % fc_map_id, '-delim', '!']
630        return self.run_ssh_info(ssh_cmd, with_header=True)
631
632    def lsfcconsistgrp(self, fc_consistgrp):
633        ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
634        out, err = self._ssh(ssh_cmd)
635        return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
636                           with_header=False)
637
638    def mkfcconsistgrp(self, fc_consist_group):
639        ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
640        return self.run_ssh_check_created(ssh_cmd)
641
642    def rmfcconsistgrp(self, fc_consist_group):
643        ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
644        return self.run_ssh_assert_no_output(ssh_cmd)
645
646    def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete):
647        ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
648                   '"%s"' % dest_pool])
649        if auto_delete:
650            ssh_cmd += ['-autodelete']
651        ssh_cmd += ['"%s"' % vdisk]
652        return self.run_ssh_check_created(ssh_cmd)
653
654    def lsvdiskcopy(self, vdisk, copy_id=None):
655        ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
656        with_header = True
657        if copy_id:
658            ssh_cmd += ['-copy', copy_id]
659            with_header = False
660        ssh_cmd += ['"%s"' % vdisk]
661        return self.run_ssh_info(ssh_cmd, with_header=with_header)
662
663    def lsvdisksyncprogress(self, vdisk, copy_id):
664        ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
665                   '-copy', copy_id, '"%s"' % vdisk]
666        return self.run_ssh_info(ssh_cmd, with_header=True)[0]
667
668    def rmvdiskcopy(self, vdisk, copy_id):
669        ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk]
670        self.run_ssh_assert_no_output(ssh_cmd)
671
672    def addvdiskaccess(self, vdisk, iogrp):
673        ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp,
674                   '"%s"' % vdisk]
675        self.run_ssh_assert_no_output(ssh_cmd)
676
677    def rmvdiskaccess(self, vdisk, iogrp):
678        ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk]
679        self.run_ssh_assert_no_output(ssh_cmd)
680
681    def lsportfc(self, node_id):
682        ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!',
683                   '-filtervalue', 'node_id=%s' % node_id]
684        return self.run_ssh_info(ssh_cmd, with_header=True)
685
686    def lstargetportfc(self, current_node_id=None, host_io_permitted=None):
687        ssh_cmd = ['svcinfo', 'lstargetportfc', '-delim', '!']
688        if current_node_id and host_io_permitted:
689            ssh_cmd += ['-filtervalue', '%s:%s' % (
690                'current_node_id=%s' % current_node_id,
691                'host_io_permitted=%s' % host_io_permitted)]
692        elif current_node_id:
693            ssh_cmd += ['-filtervalue', 'current_node_id=%s' % current_node_id]
694        return self.run_ssh_info(ssh_cmd, with_header=True)
695
696    def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
697        ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy',
698                   copy_id, '-vdisk', vdisk]
699        self.run_ssh_assert_no_output(ssh_cmd)
700
701    def mkvolume(self, name, size, units, pool, params):
702        ssh_cmd = ['svctask', 'mkvolume', '-name', name, '-pool',
703                   '"%s"' % pool, '-size', size, '-unit', units] + params
704        return self.run_ssh_check_created(ssh_cmd)
705
706    def rmvolume(self, volume, force=True):
707        ssh_cmd = ['svctask', 'rmvolume']
708        if force:
709            ssh_cmd += ['-removehostmappings', '-removefcmaps',
710                        '-removercrelationships']
711        ssh_cmd += ['"%s"' % volume]
712        self.run_ssh_assert_no_output(ssh_cmd)
713
714    def addvolumecopy(self, name, pool, params):
715        ssh_cmd = ['svctask', 'addvolumecopy', '-pool',
716                   '"%s"' % pool] + params + ['"%s"' % name]
717        self.run_ssh_assert_no_output(ssh_cmd)
718
719    def rmvolumecopy(self, name, pool):
720        ssh_cmd = ['svctask', 'rmvolumecopy', '-pool',
721                   '"%s"' % pool, '"%s"' % name]
722        self.run_ssh_assert_no_output(ssh_cmd)
723
724
725class StorwizeHelpers(object):
726
727    # All the supported QoS key are saved in this dict. When a new
728    # key is going to add, three values MUST be set:
729    # 'default': to indicate the value, when the parameter is disabled.
730    # 'param': to indicate the corresponding parameter in the command.
731    # 'type': to indicate the type of this value.
732    WAIT_TIME = 5
733    svc_qos_keys = {'IOThrottling': {'default': '0',
734                                     'param': 'rate',
735                                     'type': int}}
736
737    def __init__(self, run_ssh):
738        self.ssh = StorwizeSSH(run_ssh)
739        self.check_fcmapping_interval = 3
740
741    @staticmethod
742    def handle_keyerror(cmd, out):
743        msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
744               % {'out': out, 'cmd': cmd})
745        raise exception.VolumeBackendAPIException(data=msg)
746
747    def compression_enabled(self):
748        """Return whether or not compression is enabled for this system."""
749        resp = self.ssh.lslicense()
750        keys = ['license_compression_enclosures',
751                'license_compression_capacity']
752        for key in keys:
753            if resp.get(key, '0') != '0':
754                return True
755
756        # lslicense is not used for V9000 compression check
757        # compression_enclosures and compression_capacity are
758        # always 0. V9000 uses license_scheme 9846 as an
759        # indicator and can always do compression
760        try:
761            resp = self.ssh.lsguicapabilities()
762            if resp.get('license_scheme', '0') == '9846':
763                return True
764        except exception.VolumeBackendAPIException:
765            LOG.exception("Failed to fetch licensing scheme.")
766        return False
767
768    def replication_licensed(self):
769        """Return whether or not replication is enabled for this system."""
770        # Uses product_key as an indicator to check
771        # whether replication is supported in storage.
772        try:
773            resp = self.ssh.lsguicapabilities()
774            product_key = resp.get('product_key', '0')
775            if product_key in storwize_const.REP_CAP_DEVS:
776                return True
777        except exception.VolumeBackendAPIException as war:
778            LOG.warning("Failed to run lsguicapability. Exception: %s.", war)
779        return False
780
781    def get_system_info(self):
782        """Return system's name, ID, and code level."""
783        resp = self.ssh.lssystem()
784        level = resp['code_level']
785        match_obj = re.search('([0-9].){3}[0-9]', level)
786        if match_obj is None:
787            msg = _('Failed to get code level (%s).') % level
788            raise exception.VolumeBackendAPIException(data=msg)
789        code_level = match_obj.group().split('.')
790        return {'code_level': tuple([int(x) for x in code_level]),
791                'topology': resp['topology'],
792                'system_name': resp['name'],
793                'system_id': resp['id']}
794
795    def get_pool_attrs(self, pool):
796        """Return attributes for the specified pool."""
797        return self.ssh.lsmdiskgrp(pool)
798
799    def is_pool_defined(self, pool_name):
800        """Check if vdisk is defined."""
801        attrs = self.get_pool_attrs(pool_name)
802        return attrs is not None
803
804    def get_available_io_groups(self):
805        """Return list of available IO groups."""
806        iogrps = []
807        resp = self.ssh.lsiogrp()
808        for iogrp in resp:
809            try:
810                if int(iogrp['node_count']) > 0:
811                    iogrps.append(int(iogrp['id']))
812            except KeyError:
813                self.handle_keyerror('lsiogrp', iogrp)
814            except ValueError:
815                msg = (_('Expected integer for node_count, '
816                         'svcinfo lsiogrp returned: %(node)s.') %
817                       {'node': iogrp['node_count']})
818                raise exception.VolumeBackendAPIException(data=msg)
819        return iogrps
820
821    def get_vdisk_count_by_io_group(self):
822        res = {}
823        resp = self.ssh.lsiogrp()
824        for iogrp in resp:
825            try:
826                if int(iogrp['node_count']) > 0:
827                    res[int(iogrp['id'])] = int(iogrp['vdisk_count'])
828            except KeyError:
829                self.handle_keyerror('lsiogrp', iogrp)
830            except ValueError:
831                msg = (_('Expected integer for node_count, '
832                         'svcinfo lsiogrp returned: %(node)s') %
833                       {'node': iogrp['node_count']})
834                raise exception.VolumeBackendAPIException(data=msg)
835        return res
836
837    def select_io_group(self, state, opts, pool):
838        selected_iog = 0
839        iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
840        if len(iog_list) == 0:
841            raise exception.InvalidInput(
842                reason=_('Given I/O group(s) %(iogrp)s not valid; available '
843                         'I/O groups are %(avail)s.')
844                % {'iogrp': opts['iogrp'],
845                   'avail': state['available_iogrps']})
846
847        site_iogrp = []
848        pool_data = self.get_pool_attrs(pool)
849        if pool_data is None:
850            msg = (_('Failed getting details for pool %s.') % pool)
851            LOG.error(msg)
852            raise exception.InvalidConfigurationValue(message=msg)
853        if 'site_id' in pool_data and pool_data['site_id']:
854            for node in state['storage_nodes'].values():
855                if pool_data['site_id'] == node['site_id']:
856                    site_iogrp.append(node['IO_group'])
857            site_iogrp = list(map(int, site_iogrp))
858            iog_list = list(set(site_iogrp).intersection(iog_list))
859            if len(iog_list) == 0:
860                raise exception.InvalidInput(
861                    reason=_('The storage system topology is hyperswap or '
862                             'stretched, The site_id of pool %(pool)s is '
863                             '%(site_id)s, the available I/O groups on this '
864                             'site is %(site_iogrp)s, but the given I/O'
865                             ' group(s) is %(iogrp)s.')
866                    % {'pool': pool, 'site_id': pool_data['site_id'],
867                       'site_iogrp': site_iogrp, 'iogrp': opts['iogrp']})
868
869        iog_vdc = self.get_vdisk_count_by_io_group()
870        LOG.debug("IO group current balance %s", iog_vdc)
871        min_vdisk_count = iog_vdc[iog_list[0]]
872        selected_iog = iog_list[0]
873        for iog in iog_list:
874            if iog_vdc[iog] < min_vdisk_count:
875                min_vdisk_count = iog_vdc[iog]
876                selected_iog = iog
877        LOG.debug("Selected io_group is %d", selected_iog)
878        return selected_iog
879
880    def get_volume_io_group(self, vol_name):
881        vdisk = self.ssh.lsvdisk(vol_name)
882        if vdisk:
883            resp = self.ssh.lsiogrp()
884            for iogrp in resp:
885                if iogrp['name'] == vdisk['IO_group_name']:
886                    return int(iogrp['id'])
887        return None
888
889    def get_node_info(self):
890        """Return dictionary containing information on system's nodes."""
891        nodes = {}
892        resp = self.ssh.lsnode()
893        for node_data in resp:
894            try:
895                if node_data['status'] != 'online':
896                    continue
897                node = {}
898                node['id'] = node_data['id']
899                node['name'] = node_data['name']
900                node['IO_group'] = node_data['IO_group_id']
901                node['iscsi_name'] = node_data['iscsi_name']
902                node['WWNN'] = node_data['WWNN']
903                node['status'] = node_data['status']
904                node['WWPN'] = []
905                node['ipv4'] = []
906                node['ipv6'] = []
907                node['enabled_protocols'] = []
908                nodes[node['id']] = node
909                node['site_id'] = (node_data['site_id']
910                                   if 'site_id' in node_data else None)
911            except KeyError:
912                self.handle_keyerror('lsnode', node_data)
913        return nodes
914
915    def add_iscsi_ip_addrs(self, storage_nodes):
916        """Add iSCSI IP addresses to system node information."""
917        resp = self.ssh.lsportip()
918        for ip_data in resp:
919            try:
920                state = ip_data['state']
921                if ip_data['node_id'] in storage_nodes and (
922                        state == 'configured' or state == 'online'):
923                    node = storage_nodes[ip_data['node_id']]
924                    if len(ip_data['IP_address']):
925                        node['ipv4'].append(ip_data['IP_address'])
926                    if len(ip_data['IP_address_6']):
927                        node['ipv6'].append(ip_data['IP_address_6'])
928            except KeyError:
929                self.handle_keyerror('lsportip', ip_data)
930
931    def add_fc_wwpns(self, storage_nodes, code_level):
932        """Add FC WWPNs to system node information."""
933        for key in storage_nodes:
934            node = storage_nodes[key]
935            wwpns = set(node['WWPN'])
936            # The Storwize/svc release 7.7.0.0 introduced NPIV feature.
937            # The virtual wwpns will be included in cli lstargetportfc
938            if code_level < (7, 7, 0, 0):
939                resp = self.ssh.lsportfc(node_id=node['id'])
940                for port_info in resp:
941                    if (port_info['type'] == 'fc' and
942                            port_info['status'] == 'active'):
943                        wwpns.add(port_info['WWPN'])
944            else:
945                npiv_wwpns = self.get_npiv_wwpns(node_id=node['id'])
946                wwpns.update(npiv_wwpns)
947            node['WWPN'] = list(wwpns)
948            LOG.info('WWPN on node %(node)s: %(wwpn)s.',
949                     {'node': node['id'], 'wwpn': node['WWPN']})
950
951    def get_npiv_wwpns(self, node_id=None, host_io=None):
952        wwpns = set()
953        # In the response of lstargetportfc, the host_io_permitted
954        # indicates whether the port can be used for host I/O
955        resp = self.ssh.lstargetportfc(current_node_id=node_id,
956                                       host_io_permitted=host_io)
957        for port_info in resp:
958            wwpns.add(port_info['WWPN'])
959        return list(wwpns)
960
961    def add_chap_secret_to_host(self, host_name):
962        """Generate and store a randomly-generated CHAP secret for the host."""
963        chap_secret = utils.generate_password()
964        self.ssh.add_chap_secret(chap_secret, host_name)
965        return chap_secret
966
967    def get_chap_secret_for_host(self, host_name):
968        """Generate and store a randomly-generated CHAP secret for the host."""
969        resp = self.ssh.lsiscsiauth()
970        host_found = False
971        for host_data in resp:
972            try:
973                if host_data['name'] == host_name:
974                    host_found = True
975                    if host_data['iscsi_auth_method'] == 'chap':
976                        return host_data['iscsi_chap_secret']
977            except KeyError:
978                self.handle_keyerror('lsiscsiauth', host_data)
979        if not host_found:
980            msg = _('Failed to find host %s.') % host_name
981            raise exception.VolumeBackendAPIException(data=msg)
982        return None
983
984    def get_conn_fc_wwpns(self, host):
985        wwpns = set()
986        resp = self.ssh.lsfabric(host=host)
987        for wwpn in resp.select('local_wwpn'):
988            if wwpn is not None:
989                wwpns.add(wwpn)
990        return list(wwpns)
991
992    def get_host_from_connector(self, connector, volume_name=None,
993                                iscsi=False):
994        """Return the Storwize host described by the connector."""
995        LOG.debug('Enter: get_host_from_connector: %s.', connector)
996
997        # If we have FC information, we have a faster lookup option
998        host_name = None
999        if 'wwpns' in connector and not iscsi:
1000            for wwpn in connector['wwpns']:
1001                resp = self.ssh.lsfabric(wwpn=wwpn)
1002                for wwpn_info in resp:
1003                    try:
1004                        if (wwpn_info['remote_wwpn'] and
1005                                wwpn_info['name'] and
1006                                wwpn_info['remote_wwpn'].lower() ==
1007                                wwpn.lower()):
1008                            host_name = wwpn_info['name']
1009                            break
1010                    except KeyError:
1011                        self.handle_keyerror('lsfabric', wwpn_info)
1012                if host_name:
1013                    break
1014        if host_name:
1015            LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
1016            return host_name
1017
1018        def update_host_list(host, host_list):
1019            idx = host_list.index(host)
1020            del host_list[idx]
1021            host_list.insert(0, host)
1022
1023        # That didn't work, so try exhaustive search
1024        hosts_info = self.ssh.lshost()
1025        host_list = list(hosts_info.select('name'))
1026        # If we have a "real" connector, we might be able to find the
1027        # host entry with fewer queries if we move the host entries
1028        # that contain the connector's host property value to the front
1029        # of the list
1030        if 'host' in connector:
1031            # order host_list such that the host entries that
1032            # contain the connector's host name are at the
1033            # beginning of the list
1034            for host in host_list:
1035                if re.search(connector['host'], host):
1036                    update_host_list(host, host_list)
1037        # If we have a volume name we have a potential fast path
1038        # for finding the matching host for that volume.
1039        # Add the host_names that have mappings for our volume to the
1040        # head of the list of host names to search them first
1041        if volume_name:
1042            hosts_map_info = self.ssh.lsvdiskhostmap(volume_name)
1043            hosts_map_info_list = list(hosts_map_info.select('host_name'))
1044            # remove the fast path host names from the end of the list
1045            # and move to the front so they are only searched for once.
1046            for host in hosts_map_info_list:
1047                update_host_list(host, host_list)
1048        found = False
1049        for name in host_list:
1050            try:
1051                resp = self.ssh.lshost(host=name)
1052            except exception.VolumeBackendAPIException as ex:
1053                LOG.debug("Exception message: %s", ex.msg)
1054                if 'CMMVC5754E' in ex.msg:
1055                    LOG.debug("CMMVC5754E found in CLI exception.")
1056                    # CMMVC5754E: The specified object does not exist
1057                    # The host has been deleted while walking the list.
1058                    # This is a result of a host change on the SVC that
1059                    # is out of band to this request.
1060                    continue
1061                # unexpected error so reraise it
1062                with excutils.save_and_reraise_exception():
1063                    pass
1064            if iscsi:
1065                if 'initiator' in connector:
1066                    for iscsi_name in resp.select('iscsi_name'):
1067                        if iscsi_name == connector['initiator']:
1068                            host_name = name
1069                            found = True
1070                            break
1071            elif 'wwpns' in connector and len(connector['wwpns']):
1072                connector_wwpns = [str(x).lower() for x in connector['wwpns']]
1073                for wwpn in resp.select('WWPN'):
1074                    if wwpn and wwpn.lower() in connector_wwpns:
1075                        host_name = name
1076                        found = True
1077                        break
1078            if found:
1079                break
1080
1081        LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
1082        return host_name
1083
1084    def create_host(self, connector, iscsi=False, site=None):
1085        """Create a new host on the storage system.
1086
1087        We create a host name and associate it with the given connection
1088        information.  The host name will be a cleaned up version of the given
1089        host name (at most 55 characters), plus a random 8-character suffix to
1090        avoid collisions. The total length should be at most 63 characters.
1091        """
1092        LOG.debug('Enter: create_host: host %s.', connector['host'])
1093
1094        # Before we start, make sure host name is a string and that we have at
1095        # least one port.
1096        host_name = connector['host']
1097        if not isinstance(host_name, six.string_types):
1098            msg = _('create_host: Host name is not unicode or string.')
1099            LOG.error(msg)
1100            raise exception.VolumeDriverException(message=msg)
1101
1102        ports = []
1103        if iscsi:
1104            if 'initiator' in connector:
1105                ports.append(['initiator', '%s' % connector['initiator']])
1106            else:
1107                msg = _('create_host: No initiators supplied.')
1108        else:
1109            if 'wwpns' in connector:
1110                for wwpn in connector['wwpns']:
1111                    ports.append(['wwpn', '%s' % wwpn])
1112            else:
1113                msg = _('create_host: No wwpns supplied.')
1114        if not len(ports):
1115            LOG.error(msg)
1116            raise exception.VolumeDriverException(message=msg)
1117
1118        # Build a host name for the Storwize host - first clean up the name
1119        if isinstance(host_name, six.text_type):
1120            host_name = unicodedata.normalize('NFKD', host_name).encode(
1121                'ascii', 'replace').decode('ascii')
1122
1123        for num in range(0, 128):
1124            ch = str(chr(num))
1125            if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
1126                host_name = host_name.replace(ch, '-')
1127
1128        # Storwize doesn't like hostname that doesn't starts with letter or _.
1129        if not re.match('^[A-Za-z]', host_name):
1130            host_name = '_' + host_name
1131
1132        # Add a random 8-character suffix to avoid collisions
1133        rand_id = str(random.randint(0, 99999999)).zfill(8)
1134        host_name = '%s-%s' % (host_name[:55], rand_id)
1135
1136        # Create a host with one port
1137        port = ports.pop(0)
1138        # Host site_id is necessary for hyperswap volume.
1139        self.ssh.mkhost(host_name, port[0], port[1], site)
1140
1141        # Add any additional ports to the host
1142        for port in ports:
1143            self.ssh.addhostport(host_name, port[0], port[1])
1144
1145        LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.',
1146                  {'host': connector['host'], 'host_name': host_name})
1147        return host_name
1148
1149    def update_host(self, host_name, site_name):
1150        self.ssh.chhost(host_name, site=site_name)
1151
1152    def delete_host(self, host_name):
1153        self.ssh.rmhost(host_name)
1154
1155    def _get_unused_lun_id(self, host_name):
1156        luns_used = []
1157        result_lun = '-1'
1158        resp = self.ssh.lshostvdiskmap(host_name)
1159        for mapping_info in resp:
1160            luns_used.append(int(mapping_info['SCSI_id']))
1161
1162        luns_used.sort()
1163        result_lun = str(len(luns_used))
1164        for index, n in enumerate(luns_used):
1165            if n > index:
1166                result_lun = str(index)
1167                break
1168
1169        return result_lun
1170
1171    @cinder_utils.trace
1172    def map_vol_to_host(self, volume_name, host_name, multihostmap):
1173        """Create a mapping between a volume to a host."""
1174
1175        # Check if this volume is already mapped to this host
1176        result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name)
1177        if result_lun:
1178            LOG.debug('volume %(volume_name)s is already mapped to the host '
1179                      '%(host_name)s.',
1180                      {'volume_name': volume_name, 'host_name': host_name})
1181            return int(result_lun)
1182
1183        def _retry_on_exception(e):
1184            if hasattr(e, 'msg') and 'CMMVC5879E' in e.msg:
1185                return True
1186            return False
1187
1188        @retry(retry_on_exception=_retry_on_exception,
1189               stop_max_attempt_number=3,
1190               wait_random_min=1,
1191               wait_random_max=10)
1192        def make_vdisk_host_map():
1193            try:
1194                result_lun = self._get_unused_lun_id(host_name)
1195                self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
1196                                        multihostmap)
1197                return int(result_lun)
1198            except Exception as ex:
1199                if (not multihostmap and hasattr(ex, 'msg') and
1200                        'CMMVC6071E' in ex.msg):
1201                    LOG.warning('storwize_svc_multihostmap_enabled is set '
1202                                'to False, not allowing multi host mapping.')
1203                    raise exception.VolumeDriverException(
1204                        message=_('CMMVC6071E The VDisk-to-host mapping was '
1205                                  'not created because the VDisk is already '
1206                                  'mapped to a host.'))
1207                with excutils.save_and_reraise_exception():
1208                    LOG.error('Error mapping VDisk-to-host.')
1209
1210        return make_vdisk_host_map()
1211
1212    def unmap_vol_from_host(self, volume_name, host_name):
1213        """Unmap the volume and delete the host if it has no more mappings."""
1214
1215        LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from '
1216                  'host %(host_name)s.',
1217                  {'volume_name': volume_name, 'host_name': host_name})
1218
1219        # Check if the mapping exists
1220        resp = self.ssh.lsvdiskhostmap(volume_name)
1221        if not len(resp):
1222            LOG.warning('unmap_vol_from_host: No mapping of volume '
1223                        '%(vol_name)s to any host found.',
1224                        {'vol_name': volume_name})
1225            return host_name
1226        if host_name is None:
1227            if len(resp) > 1:
1228                LOG.warning('unmap_vol_from_host: Multiple mappings of '
1229                            'volume %(vol_name)s found, no host '
1230                            'specified.', {'vol_name': volume_name})
1231                return
1232            else:
1233                host_name = resp[0]['host_name']
1234        else:
1235            found = False
1236            for h in resp.select('host_name'):
1237                if h == host_name:
1238                    found = True
1239            if not found:
1240                LOG.warning('unmap_vol_from_host: No mapping of volume '
1241                            '%(vol_name)s to host %(host)s found.',
1242                            {'vol_name': volume_name, 'host': host_name})
1243                return host_name
1244        # We now know that the mapping exists
1245        self.ssh.rmvdiskhostmap(host_name, volume_name)
1246
1247        LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from '
1248                  'host %(host_name)s.',
1249                  {'volume_name': volume_name, 'host_name': host_name})
1250        return host_name
1251
1252    def check_host_mapped_vols(self, host_name):
1253        return self.ssh.lshostvdiskmap(host_name)
1254
1255    def check_vol_mapped_to_host(self, vol_name, host_name):
1256        resp = self.ssh.lsvdiskhostmap(vol_name)
1257        for mapping_info in resp:
1258            if mapping_info['host_name'] == host_name:
1259                return True
1260        return False
1261
1262    @staticmethod
1263    def build_default_opts(config):
1264        # Ignore capitalization
1265
1266        cluster_partner = config.storwize_svc_stretched_cluster_partner
1267        opt = {'rsize': config.storwize_svc_vol_rsize,
1268               'warning': config.storwize_svc_vol_warning,
1269               'autoexpand': config.storwize_svc_vol_autoexpand,
1270               'grainsize': config.storwize_svc_vol_grainsize,
1271               'compression': config.storwize_svc_vol_compression,
1272               'easytier': config.storwize_svc_vol_easytier,
1273               'iogrp': config.storwize_svc_vol_iogrp,
1274               'qos': None,
1275               'stretched_cluster': cluster_partner,
1276               'replication': False,
1277               'nofmtdisk': config.storwize_svc_vol_nofmtdisk,
1278               'mirror_pool': config.storwize_svc_mirror_pool,
1279               'volume_topology': None,
1280               'peer_pool': config.storwize_peer_pool,
1281               'cycle_period_seconds': config.cycle_period_seconds}
1282        return opt
1283
1284    @staticmethod
1285    def check_vdisk_opts(state, opts):
1286        # Check that grainsize is 32/64/128/256
1287        if opts['grainsize'] not in [32, 64, 128, 256]:
1288            raise exception.InvalidInput(
1289                reason=_('Illegal value specified for '
1290                         'storwize_svc_vol_grainsize: set to either '
1291                         '32, 64, 128, or 256.'))
1292
1293        # Check that compression is supported
1294        if opts['compression'] and not state['compression_enabled']:
1295            raise exception.InvalidInput(
1296                reason=_('System does not support compression.'))
1297
1298        # Check that rsize is set if compression is set
1299        if opts['compression'] and opts['rsize'] == -1:
1300            raise exception.InvalidInput(
1301                reason=_('If compression is set to True, rsize must '
1302                         'also be set (not equal to -1).'))
1303
1304        # Check cycle_period_seconds are in 60-86400
1305        if opts['cycle_period_seconds'] not in range(60, 86401):
1306            raise exception.InvalidInput(
1307                reason=_('cycle_period_seconds should be integer '
1308                         'between 60 and 86400.'))
1309
1310        iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
1311
1312        if len(iogs) == 0:
1313            raise exception.InvalidInput(
1314                reason=_('Given I/O group(s) %(iogrp)s not valid; available '
1315                         'I/O groups are %(avail)s.')
1316                % {'iogrp': opts['iogrp'],
1317                   'avail': state['available_iogrps']})
1318
1319        if opts['nofmtdisk'] and opts['rsize'] != -1:
1320            raise exception.InvalidInput(
1321                reason=_('If nofmtdisk is set to True, rsize must '
1322                         'also be set to -1.'))
1323
1324    @staticmethod
1325    def _get_valid_requested_io_groups(state, opts):
1326        given_iogs = str(opts['iogrp'])
1327        iog_list = given_iogs.split(',')
1328        # convert to int
1329        iog_list = list(map(int, iog_list))
1330        LOG.debug("Requested iogroups %s", iog_list)
1331        LOG.debug("Available iogroups %s", state['available_iogrps'])
1332        filtiog = set(iog_list).intersection(state['available_iogrps'])
1333        iog_list = list(filtiog)
1334        LOG.debug("Filtered (valid) requested iogroups %s", iog_list)
1335        return iog_list
1336
1337    def _get_opts_from_specs(self, opts, specs):
1338        qos = {}
1339        for k, value in specs.items():
1340            # Get the scope, if using scope format
1341            key_split = k.split(':')
1342            if len(key_split) == 1:
1343                scope = None
1344                key = key_split[0]
1345            else:
1346                scope = key_split[0]
1347                key = key_split[1]
1348
1349            # We generally do not look at capabilities in the driver, but
1350            # replication is a special case where the user asks for
1351            # a volume to be replicated, and we want both the scheduler and
1352            # the driver to act on the value.
1353            if ((not scope or scope == 'capabilities') and
1354               key == 'replication'):
1355                scope = None
1356                key = 'replication'
1357                words = value.split()
1358                if not (words and len(words) == 2 and words[0] == '<is>'):
1359                    LOG.error('Replication must be specified as '
1360                              '\'<is> True\' or \'<is> False\'.')
1361                del words[0]
1362                value = words[0]
1363
1364            # Add the QoS.
1365            if scope and scope == 'qos':
1366                if key in self.svc_qos_keys.keys():
1367                    try:
1368                        type_fn = self.svc_qos_keys[key]['type']
1369                        value = type_fn(value)
1370                        qos[key] = value
1371                    except ValueError:
1372                        continue
1373
1374            # Any keys that the driver should look at should have the
1375            # 'drivers' scope.
1376            if scope and scope != 'drivers':
1377                continue
1378
1379            if key in opts:
1380                this_type = type(opts[key]).__name__
1381                if this_type == 'int':
1382                    value = int(value)
1383                elif this_type == 'bool':
1384                    value = strutils.bool_from_string(value)
1385                opts[key] = value
1386        if len(qos) != 0:
1387            opts['qos'] = qos
1388        return opts
1389
1390    def _get_qos_from_volume_metadata(self, volume_metadata):
1391        """Return the QoS information from the volume metadata."""
1392        qos = {}
1393        for i in volume_metadata:
1394            k = i.get('key', None)
1395            value = i.get('value', None)
1396            key_split = k.split(':')
1397            if len(key_split) == 1:
1398                scope = None
1399                key = key_split[0]
1400            else:
1401                scope = key_split[0]
1402                key = key_split[1]
1403            # Add the QoS.
1404            if scope and scope == 'qos':
1405                if key in self.svc_qos_keys.keys():
1406                    try:
1407                        type_fn = self.svc_qos_keys[key]['type']
1408                        value = type_fn(value)
1409                        qos[key] = value
1410                    except ValueError:
1411                        continue
1412        return qos
1413
1414    def _wait_for_a_condition(self, testmethod, timeout=None,
1415                              interval=INTERVAL_1_SEC,
1416                              raise_exception=False):
1417        start_time = time.time()
1418        if timeout is None:
1419            timeout = DEFAULT_TIMEOUT
1420
1421        def _inner():
1422            try:
1423                testValue = testmethod()
1424            except Exception as ex:
1425                if raise_exception:
1426                    LOG.exception("_wait_for_a_condition: %s"
1427                                  " execution failed.",
1428                                  testmethod.__name__)
1429                    raise exception.VolumeBackendAPIException(data=ex)
1430                else:
1431                    testValue = False
1432                    LOG.debug('Helper.'
1433                              '_wait_for_condition: %(method_name)s '
1434                              'execution failed for %(exception)s.',
1435                              {'method_name': testmethod.__name__,
1436                               'exception': ex.message})
1437            if testValue:
1438                raise loopingcall.LoopingCallDone()
1439
1440            if int(time.time()) - start_time > timeout:
1441                msg = (_('CommandLineHelper._wait_for_condition: %s timeout.')
1442                       % testmethod.__name__)
1443                LOG.error(msg)
1444                raise exception.VolumeBackendAPIException(data=msg)
1445
1446        timer = loopingcall.FixedIntervalLoopingCall(_inner)
1447        timer.start(interval=interval).wait()
1448
1449    def get_vdisk_params(self, config, state, type_id,
1450                         volume_type=None, volume_metadata=None):
1451        """Return the parameters for creating the vdisk.
1452
1453        Takes volume type and defaults from config options into account.
1454        """
1455        opts = self.build_default_opts(config)
1456        ctxt = context.get_admin_context()
1457        if volume_type is None and type_id is not None:
1458            volume_type = volume_types.get_volume_type(ctxt, type_id)
1459        if volume_type:
1460            qos_specs_id = volume_type.get('qos_specs_id')
1461            specs = dict(volume_type).get('extra_specs')
1462
1463            # NOTE(vhou): We prefer the qos_specs association
1464            # and over-ride any existing
1465            # extra-specs settings if present
1466            if qos_specs_id is not None:
1467                kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
1468                # Merge the qos_specs into extra_specs and qos_specs has higher
1469                # priority than extra_specs if they have different values for
1470                # the same key.
1471                specs.update(kvs)
1472            opts = self._get_opts_from_specs(opts, specs)
1473        if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
1474                and volume_metadata):
1475            qos = self._get_qos_from_volume_metadata(volume_metadata)
1476            if len(qos) != 0:
1477                opts['qos'] = qos
1478
1479        self.check_vdisk_opts(state, opts)
1480        return opts
1481
1482    @staticmethod
1483    def _get_vdisk_create_params(opts, add_copies=False):
1484        easytier = 'on' if opts['easytier'] else 'off'
1485        if opts['rsize'] == -1:
1486            params = []
1487            if opts['nofmtdisk']:
1488                params.append('-nofmtdisk')
1489        else:
1490            params = ['-rsize', '%s%%' % str(opts['rsize']),
1491                      '-autoexpand', '-warning',
1492                      '%s%%' % str(opts['warning'])]
1493            if not opts['autoexpand']:
1494                params.remove('-autoexpand')
1495
1496            if opts['compression']:
1497                params.append('-compressed')
1498            else:
1499                params.extend(['-grainsize', str(opts['grainsize'])])
1500
1501        if add_copies and opts['mirror_pool']:
1502            params.extend(['-copies', '2'])
1503
1504        params.extend(['-easytier', easytier])
1505        return params
1506
1507    def create_vdisk(self, name, size, units, pool, opts):
1508        LOG.debug('Enter: create_vdisk: vdisk %s.', name)
1509        mdiskgrp = pool
1510        if opts['mirror_pool']:
1511            if not self.is_pool_defined(opts['mirror_pool']):
1512                raise exception.InvalidInput(
1513                    reason=_('The pool %s in which mirrored copy is stored '
1514                             'is invalid') % opts['mirror_pool'])
1515            # The syntax of pool SVC expects is pool:mirror_pool in
1516            # mdiskgrp for mirror volume
1517            mdiskgrp = '%s:%s' % (pool, opts['mirror_pool'])
1518        params = self._get_vdisk_create_params(
1519            opts, add_copies=True if opts['mirror_pool'] else False)
1520        self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params)
1521        LOG.debug('Leave: _create_vdisk: volume %s.', name)
1522
1523    def _get_hyperswap_volume_create_params(self, opts):
1524        # Storwize/svc use cli command mkvolume to create hyperswap volume.
1525        # You must specify -thin with grainsize.
1526        # You must specify either -thin or -compressed with warning.
1527        params = []
1528        LOG.debug('The I/O groups of a hyperswap volume will be selected by '
1529                  'storage.')
1530        if opts['rsize'] != -1:
1531            params.extend(['-buffersize', '%s%%' % str(opts['rsize']),
1532                           '-warning',
1533                           '%s%%' % six.text_type(opts['warning'])])
1534            if not opts['autoexpand']:
1535                params.append('-noautoexpand')
1536            if opts['compression']:
1537                params.append('-compressed')
1538            else:
1539                params.append('-thin')
1540                params.extend(['-grainsize', six.text_type(opts['grainsize'])])
1541        return params
1542
1543    def create_hyperswap_volume(self, vol_name, size, units, pool, opts):
1544        vol_name = '"%s"' % vol_name
1545        params = self._get_hyperswap_volume_create_params(opts)
1546        self.ssh.mkvolume(vol_name, six.text_type(size), units, pool, params)
1547
1548    def convert_volume_to_hyperswap(self, vol_name, opts, state):
1549        vol_name = '%s' % vol_name
1550        if not self.is_system_topology_hyperswap(state):
1551            reason = _('Convert volume to hyperswap failed, the system is '
1552                       'below release 7.6.0.0 or it is not hyperswap '
1553                       'topology.')
1554            raise exception.VolumeDriverException(reason=reason)
1555        else:
1556            attr = self.get_vdisk_attributes(vol_name)
1557            if attr is None:
1558                msg = (_('convert_volume_to_hyperswap: Failed to get '
1559                         'attributes for volume %s.') % vol_name)
1560                LOG.error(msg)
1561                raise exception.VolumeDriverException(message=msg)
1562            pool = attr['mdisk_grp_name']
1563            self.check_hyperswap_pool(pool, opts['peer_pool'])
1564            hyper_pool = '%s' % opts['peer_pool']
1565            params = self._get_hyperswap_volume_create_params(opts)
1566            self.ssh.addvolumecopy(vol_name, hyper_pool, params)
1567
1568    def convert_hyperswap_volume_to_normal(self, vol_name, peer_pool):
1569        vol_name = '%s' % vol_name
1570        hyper_pool = '%s' % peer_pool
1571        self.ssh.rmvolumecopy(vol_name, hyper_pool)
1572
1573    def delete_hyperswap_volume(self, volume, force):
1574        """Ensures that vdisk is not part of FC mapping and deletes it."""
1575        if not self.is_vdisk_defined(volume):
1576            LOG.warning('Tried to delete non-existent volume %s.', volume)
1577            return
1578        self.ensure_vdisk_no_fc_mappings(volume, allow_snaps=True,
1579                                         allow_fctgt = True)
1580        self.ssh.rmvolume(volume, force=force)
1581
1582    def get_vdisk_attributes(self, vdisk):
1583        attrs = self.ssh.lsvdisk(vdisk)
1584        return attrs
1585
1586    def is_vdisk_defined(self, vdisk_name):
1587        """Check if vdisk is defined."""
1588        attrs = self.get_vdisk_attributes(vdisk_name)
1589        return attrs is not None
1590
1591    def find_vdisk_copy_id(self, vdisk, pool):
1592        resp = self.ssh.lsvdiskcopy(vdisk)
1593        for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
1594            if mdisk_grp == pool:
1595                return copy_id
1596        msg = _('Failed to find a vdisk copy in the expected pool.')
1597        LOG.error(msg)
1598        raise exception.VolumeDriverException(message=msg)
1599
1600    def get_vdisk_copy_attrs(self, vdisk, copy_id):
1601        return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
1602
1603    def get_vdisk_copies(self, vdisk):
1604        copies = {'primary': None,
1605                  'secondary': None}
1606
1607        resp = self.ssh.lsvdiskcopy(vdisk)
1608        for copy_id, status, sync, primary, mdisk_grp in (
1609            resp.select('copy_id', 'status', 'sync',
1610                        'primary', 'mdisk_grp_name')):
1611            copy = {'copy_id': copy_id,
1612                    'status': status,
1613                    'sync': sync,
1614                    'primary': primary,
1615                    'mdisk_grp_name': mdisk_grp,
1616                    'sync_progress': None}
1617            if copy['sync'] != 'yes':
1618                progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
1619                copy['sync_progress'] = progress_info['progress']
1620            if copy['primary'] == 'yes':
1621                copies['primary'] = copy
1622            else:
1623                copies['secondary'] = copy
1624        return copies
1625
1626    def _prepare_fc_map(self, fc_map_id, timeout, restore):
1627        self.ssh.prestartfcmap(fc_map_id, restore)
1628        mapping_ready = False
1629        max_retries = (timeout // self.WAIT_TIME) + 1
1630        for try_number in range(1, max_retries):
1631            mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
1632            if (mapping_attrs is None or
1633                    'status' not in mapping_attrs):
1634                break
1635            if mapping_attrs['status'] == 'prepared':
1636                mapping_ready = True
1637                break
1638            elif mapping_attrs['status'] == 'stopped':
1639                self.ssh.prestartfcmap(fc_map_id, restore)
1640            elif mapping_attrs['status'] != 'preparing':
1641                msg = (_('Unexecpted mapping status %(status)s for mapping '
1642                         '%(id)s. Attributes: %(attr)s.')
1643                       % {'status': mapping_attrs['status'],
1644                          'id': fc_map_id,
1645                          'attr': mapping_attrs})
1646                LOG.error(msg)
1647                raise exception.VolumeBackendAPIException(data=msg)
1648            greenthread.sleep(self.WAIT_TIME)
1649
1650        if not mapping_ready:
1651            msg = (_('Mapping %(id)s prepare failed to complete within the '
1652                     'allotted %(to)d seconds timeout. Terminating.')
1653                   % {'id': fc_map_id,
1654                      'to': timeout})
1655            LOG.error(msg)
1656            raise exception.VolumeDriverException(message=msg)
1657
1658    def start_fc_consistgrp(self, fc_consistgrp):
1659        self.ssh.startfcconsistgrp(fc_consistgrp)
1660
1661    def create_fc_consistgrp(self, fc_consistgrp):
1662        self.ssh.mkfcconsistgrp(fc_consistgrp)
1663
1664    def delete_fc_consistgrp(self, fc_consistgrp):
1665        self.ssh.rmfcconsistgrp(fc_consistgrp)
1666
1667    def stop_fc_consistgrp(self, fc_consistgrp):
1668        self.ssh.stopfcconsistgrp(fc_consistgrp)
1669
1670    def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
1671                                 config, timeout):
1672        model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
1673        snapshots_model_update = []
1674        try:
1675            for snapshot in snapshots:
1676                opts = self.get_vdisk_params(config, state,
1677                                             snapshot['volume_type_id'])
1678                volume = snapshot.volume
1679                if not volume:
1680                    msg = (_("Can't get volume from snapshot: %(id)s")
1681                           % {"id": snapshot.id})
1682                    LOG.error(msg)
1683                    raise exception.VolumeBackendAPIException(data=msg)
1684                pool = utils.extract_host(volume.host, 'pool')
1685                self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
1686                                                    snapshot['name'],
1687                                                    fc_consistgrp,
1688                                                    config, opts, False,
1689                                                    pool=pool)
1690
1691            self.prepare_fc_consistgrp(fc_consistgrp, timeout)
1692            self.start_fc_consistgrp(fc_consistgrp)
1693            # There is CG limitation that could not create more than 128 CGs.
1694            # After start CG, we delete CG to avoid CG limitation.
1695            # Cinder general will maintain the CG and snapshots relationship.
1696            self.delete_fc_consistgrp(fc_consistgrp)
1697        except exception.VolumeBackendAPIException as err:
1698            model_update['status'] = fields.GroupSnapshotStatus.ERROR
1699            # Release cg
1700            self.delete_fc_consistgrp(fc_consistgrp)
1701            LOG.error("Failed to create CGSnapshot. "
1702                      "Exception: %s.", err)
1703
1704        for snapshot in snapshots:
1705            snapshots_model_update.append(
1706                {'id': snapshot['id'],
1707                 'status': model_update['status'],
1708                 'replication_status': fields.ReplicationStatus.NOT_CAPABLE})
1709        return model_update, snapshots_model_update
1710
1711    def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
1712        """Delete flashcopy maps and consistent group."""
1713        model_update = {'status': fields.GroupSnapshotStatus.DELETED}
1714        snapshots_model_update = []
1715
1716        try:
1717            for snapshot in snapshots:
1718                self.delete_vdisk(snapshot['name'], True)
1719        except exception.VolumeBackendAPIException as err:
1720            model_update['status'] = (
1721                fields.GroupSnapshotStatus.ERROR_DELETING)
1722            LOG.error("Failed to delete the snapshot %(snap)s of "
1723                      "CGSnapshot. Exception: %(exception)s.",
1724                      {'snap': snapshot['name'], 'exception': err})
1725
1726        for snapshot in snapshots:
1727            snapshots_model_update.append(
1728                {'id': snapshot['id'],
1729                 'status': model_update['status']})
1730
1731        return model_update, snapshots_model_update
1732
1733    def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
1734        """Prepare FC Consistency Group."""
1735        self.ssh.prestartfcconsistgrp(fc_consistgrp)
1736
1737        def prepare_fc_consistgrp_success():
1738            mapping_ready = False
1739            mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
1740            if (mapping_attrs is None or
1741                    'status' not in mapping_attrs):
1742                pass
1743            if mapping_attrs['status'] == 'prepared':
1744                mapping_ready = True
1745            elif mapping_attrs['status'] == 'stopped':
1746                self.ssh.prestartfcconsistgrp(fc_consistgrp)
1747            elif mapping_attrs['status'] != 'preparing':
1748                msg = (_('Unexpected mapping status %(status)s for mapping'
1749                         '%(id)s. Attributes: %(attr)s.') %
1750                       {'status': mapping_attrs['status'],
1751                        'id': fc_consistgrp,
1752                        'attr': mapping_attrs})
1753                LOG.error(msg)
1754                raise exception.VolumeBackendAPIException(data=msg)
1755            return mapping_ready
1756        self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
1757
1758    def create_cg_from_source(self, group, fc_consistgrp,
1759                              sources, targets, state,
1760                              config, timeout):
1761        """Create consistence group from source"""
1762        LOG.debug('Enter: create_cg_from_source: cg %(cg)s'
1763                  ' source %(source)s, target %(target)s',
1764                  {'cg': fc_consistgrp, 'source': sources, 'target': targets})
1765        model_update = {'status': fields.GroupStatus.AVAILABLE}
1766        ctxt = context.get_admin_context()
1767        try:
1768            for source, target in zip(sources, targets):
1769                opts = self.get_vdisk_params(config, state,
1770                                             source['volume_type_id'])
1771                pool = utils.extract_host(target['host'], 'pool')
1772                self.create_flashcopy_to_consistgrp(source['name'],
1773                                                    target['name'],
1774                                                    fc_consistgrp,
1775                                                    config, opts,
1776                                                    True, pool=pool)
1777            self.prepare_fc_consistgrp(fc_consistgrp, timeout)
1778            self.start_fc_consistgrp(fc_consistgrp)
1779            self.delete_fc_consistgrp(fc_consistgrp)
1780            volumes_model_update = self._get_volume_model_updates(
1781                ctxt, targets, group['id'], model_update['status'])
1782        except exception.VolumeBackendAPIException as err:
1783            model_update['status'] = fields.GroupStatus.ERROR
1784            volumes_model_update = self._get_volume_model_updates(
1785                ctxt, targets, group['id'], model_update['status'])
1786            with excutils.save_and_reraise_exception():
1787                # Release cg
1788                self.delete_fc_consistgrp(fc_consistgrp)
1789                LOG.error("Failed to create CG from CGsnapshot. "
1790                          "Exception: %s", err)
1791            return model_update, volumes_model_update
1792
1793        LOG.debug('Leave: create_cg_from_source.')
1794        return model_update, volumes_model_update
1795
1796    def _get_volume_model_updates(self, ctxt, volumes, cgId,
1797                                  status='available'):
1798        """Update the volume model's status and return it."""
1799        volume_model_updates = []
1800        LOG.info("Updating status for CG: %(id)s.",
1801                 {'id': cgId})
1802        if volumes:
1803            for volume in volumes:
1804                volume_model_updates.append({
1805                    'id': volume['id'],
1806                    'status': status,
1807                    'replication_status':
1808                        fields.ReplicationStatus.NOT_CAPABLE})
1809        else:
1810            LOG.info("No volume found for CG: %(cg)s.",
1811                     {'cg': cgId})
1812        return volume_model_updates
1813
1814    def run_flashcopy(self, source, target, timeout, copy_rate,
1815                      full_copy=True, restore=False):
1816        """Create a FlashCopy mapping from the source to the target."""
1817        LOG.debug('Enter: run_flashcopy: execute FlashCopy from source '
1818                  '%(source)s to target %(target)s.',
1819                  {'source': source, 'target': target})
1820
1821        fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate)
1822        self._prepare_fc_map(fc_map_id, timeout, restore)
1823        self.ssh.startfcmap(fc_map_id, restore)
1824
1825        LOG.debug('Leave: run_flashcopy: FlashCopy started from '
1826                  '%(source)s to %(target)s.',
1827                  {'source': source, 'target': target})
1828
1829    def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
1830                                       config, opts, full_copy=False,
1831                                       pool=None):
1832        """Create a FlashCopy mapping and add to consistent group."""
1833        LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
1834                  ' from source %(source)s to target %(target)s'
1835                  'Then add the flashcopy to %(cg)s.',
1836                  {'source': source, 'target': target, 'cg': consistgrp})
1837
1838        src_attrs = self.get_vdisk_attributes(source)
1839        if src_attrs is None:
1840            msg = (_('create_copy: Source vdisk %(src)s '
1841                     'does not exist.') % {'src': source})
1842            LOG.error(msg)
1843            raise exception.VolumeDriverException(message=msg)
1844
1845        src_size = src_attrs['capacity']
1846        # In case we need to use a specific pool
1847        if not pool:
1848            pool = src_attrs['mdisk_grp_name']
1849        opts['iogrp'] = src_attrs['IO_group_id']
1850        self.create_vdisk(target, src_size, 'b', pool, opts)
1851
1852        self.ssh.mkfcmap(source, target, full_copy,
1853                         config.storwize_svc_flashcopy_rate,
1854                         consistgrp=consistgrp)
1855
1856        LOG.debug('Leave: create_flashcopy_to_consistgrp: '
1857                  'FlashCopy started from  %(source)s to %(target)s.',
1858                  {'source': source, 'target': target})
1859
1860    def _get_vdisk_fc_mappings(self, vdisk):
1861        """Return FlashCopy mappings that this vdisk is associated with."""
1862        mapping_ids = []
1863        resp = self.ssh.lsvdiskfcmappings(vdisk)
1864        for id in resp.select('id'):
1865            mapping_ids.append(id)
1866        return mapping_ids
1867
1868    def _get_flashcopy_mapping_attributes(self, fc_map_id):
1869        try:
1870            resp = self.ssh.lsfcmap(fc_map_id)
1871            return resp[0] if len(resp) else None
1872        except exception.VolumeBackendAPIException as ex:
1873            LOG.warning("Failed to get fcmap %(fcmap)s info. "
1874                        "Exception: %(ex)s.", {'fcmap': fc_map_id,
1875                                               'ex': ex})
1876            return None
1877
1878    def _get_flashcopy_consistgrp_attr(self, fc_map_id):
1879        resp = self.ssh.lsfcconsistgrp(fc_map_id)
1880        if not len(resp):
1881            return None
1882        return resp[0]
1883
1884    def _check_vdisk_fc_mappings(self, name,
1885                                 allow_snaps=True, allow_fctgt=False):
1886        """FlashCopy mapping check helper."""
1887        LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name)
1888        mapping_ids = self._get_vdisk_fc_mappings(name)
1889        wait_for_copy = False
1890        for map_id in mapping_ids:
1891            attrs = self._get_flashcopy_mapping_attributes(map_id)
1892            # We should ignore GMCV flash copies
1893            # Hyperswap flash copies are also ignored.
1894            if not attrs or 'yes' == attrs['rc_controlled']:
1895                continue
1896            source = attrs['source_vdisk_name']
1897            target = attrs['target_vdisk_name']
1898            copy_rate = attrs['copy_rate']
1899            status = attrs['status']
1900
1901            if allow_fctgt and target == name and status == 'copying':
1902                self.ssh.stopfcmap(map_id)
1903                attrs = self._get_flashcopy_mapping_attributes(map_id)
1904                if attrs:
1905                    status = attrs['status']
1906                else:
1907                    continue
1908
1909            if copy_rate == '0':
1910                if source == name:
1911                    # Vdisk with snapshots. Return False if snapshot
1912                    # not allowed.
1913                    if not allow_snaps:
1914                        raise loopingcall.LoopingCallDone(retvalue=False)
1915                    self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
1916                    wait_for_copy = True
1917                else:
1918                    # A snapshot
1919                    if target != name:
1920                        msg = (_('Vdisk %(name)s not involved in '
1921                                 'mapping %(src)s -> %(tgt)s.') %
1922                               {'name': name, 'src': source, 'tgt': target})
1923                        LOG.error(msg)
1924                        raise exception.VolumeDriverException(message=msg)
1925                    if status in ['copying', 'prepared']:
1926                        self.ssh.stopfcmap(map_id)
1927                        # Need to wait for the fcmap to change to
1928                        # stopped state before remove fcmap
1929                        wait_for_copy = True
1930                    elif status in ['stopping', 'preparing']:
1931                        wait_for_copy = True
1932                    else:
1933                        self.ssh.rmfcmap(map_id)
1934            # Case 4: Copy in progress - wait and will autodelete
1935            else:
1936                if status == 'prepared':
1937                    self.ssh.stopfcmap(map_id)
1938                    self.ssh.rmfcmap(map_id)
1939                elif status in ['idle_or_copied', 'stopped']:
1940                    # Prepare failed or stopped
1941                    self.ssh.rmfcmap(map_id)
1942                else:
1943                    wait_for_copy = True
1944        if not wait_for_copy or not len(mapping_ids):
1945            raise loopingcall.LoopingCallDone(retvalue=True)
1946
1947    def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True,
1948                                    allow_fctgt=False):
1949        """Ensure vdisk has no flashcopy mappings."""
1950        timer = loopingcall.FixedIntervalLoopingCall(
1951            self._check_vdisk_fc_mappings, name,
1952            allow_snaps, allow_fctgt)
1953        # Create a timer greenthread. The default volume service heart
1954        # beat is every 10 seconds. The flashcopy usually takes hours
1955        # before it finishes. Don't set the sleep interval shorter
1956        # than the heartbeat. Otherwise volume service heartbeat
1957        # will not be serviced.
1958        LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.',
1959                  name)
1960        ret = timer.start(interval=self.check_fcmapping_interval).wait()
1961        timer.stop()
1962        return ret
1963
1964    def start_relationship(self, volume_name, primary=None):
1965        vol_attrs = self.get_vdisk_attributes(volume_name)
1966        if vol_attrs['RC_name']:
1967            self.ssh.startrcrelationship(vol_attrs['RC_name'], primary)
1968
1969    def stop_relationship(self, volume_name, access=False):
1970        vol_attrs = self.get_vdisk_attributes(volume_name)
1971        if vol_attrs['RC_name']:
1972            self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access)
1973
1974    def create_relationship(self, master, aux, system, asyncmirror,
1975                            cyclingmode=False, masterchange=None,
1976                            cycle_period_seconds=None):
1977        try:
1978            rc_id = self.ssh.mkrcrelationship(master, aux, system,
1979                                              asyncmirror, cyclingmode)
1980        except exception.VolumeBackendAPIException as e:
1981            # CMMVC5959E is the code in Stowize storage, meaning that
1982            # there is a relationship that already has this name on the
1983            # master cluster.
1984            if 'CMMVC5959E' not in e:
1985                # If there is no relation between the primary and the
1986                # secondary back-end storage, the exception is raised.
1987                raise
1988        if rc_id:
1989            # We need setup master and aux change volumes for gmcv
1990            # before we can start remote relationship
1991            # aux change volume must be set on target site
1992            if cycle_period_seconds:
1993                self.change_relationship_cycleperiod(master,
1994                                                     cycle_period_seconds)
1995            if masterchange:
1996                self.change_relationship_changevolume(master,
1997                                                      masterchange, True)
1998            else:
1999                self.start_relationship(master)
2000
2001    def change_relationship_changevolume(self, volume_name,
2002                                         change_volume, master):
2003        vol_attrs = self.get_vdisk_attributes(volume_name)
2004        if vol_attrs['RC_name'] and change_volume:
2005            self.ssh.ch_rcrelationship_changevolume(vol_attrs['RC_name'],
2006                                                    change_volume, master)
2007
2008    def change_relationship_cycleperiod(self, volume_name,
2009                                        cycle_period_seconds):
2010        vol_attrs = self.get_vdisk_attributes(volume_name)
2011        if vol_attrs['RC_name'] and cycle_period_seconds:
2012            self.ssh.ch_rcrelationship_cycleperiod(vol_attrs['RC_name'],
2013                                                   cycle_period_seconds)
2014
2015    def delete_relationship(self, volume_name):
2016        vol_attrs = self.get_vdisk_attributes(volume_name)
2017        if vol_attrs['RC_name']:
2018            self.ssh.rmrcrelationship(vol_attrs['RC_name'], True)
2019
2020    def get_relationship_info(self, volume_name):
2021        vol_attrs = self.get_vdisk_attributes(volume_name)
2022        if not vol_attrs or not vol_attrs['RC_name']:
2023            LOG.info("Unable to get remote copy information for "
2024                     "volume %s", volume_name)
2025            return None
2026
2027        relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
2028        return relationship[0] if len(relationship) > 0 else None
2029
2030    def delete_rc_volume(self, volume_name, target_vol=False):
2031        vol_name = volume_name
2032        if target_vol:
2033            vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume_name
2034
2035        try:
2036            rel_info = self.get_relationship_info(vol_name)
2037            if rel_info:
2038                self.delete_relationship(vol_name)
2039            # Delete change volume
2040            self.delete_vdisk(
2041                storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name, False)
2042            self.delete_vdisk(vol_name, False)
2043        except Exception as e:
2044            msg = (_('Unable to delete the volume for '
2045                     'volume %(vol)s. Exception: %(err)s.'),
2046                   {'vol': vol_name, 'err': e})
2047            LOG.exception(msg)
2048            raise exception.VolumeDriverException(message=msg)
2049
2050    def switch_relationship(self, relationship, aux=True):
2051        self.ssh.switchrelationship(relationship, aux)
2052
2053    # replication cg
2054    def chrcrelationship(self, relationship, rccg=None):
2055        rels = self.ssh.lsrcrelationship(relationship)[0]
2056        if rccg and rels['consistency_group_name'] == rccg:
2057            LOG.info('relationship %(rel)s is aleady added to group %(grp)s.',
2058                     {'rel': relationship, 'grp': rccg})
2059            return
2060        if not rccg and rels['consistency_group_name'] == '':
2061            LOG.info('relationship %(rel)s is aleady removed from group',
2062                     {'rel': relationship})
2063            return
2064        self.ssh.chrcrelationship(relationship, rccg)
2065
2066    def get_rccg(self, rccg):
2067        return self.ssh.lsrcconsistgrp(rccg)
2068
2069    def create_rccg(self, rccg, system):
2070        self.ssh.mkrcconsistgrp(rccg, system)
2071
2072    def delete_rccg(self, rccg):
2073        if self.ssh.lsrcconsistgrp(rccg):
2074            self.ssh.rmrcconsistgrp(rccg)
2075
2076    def start_rccg(self, rccg, primary=None):
2077        self.ssh.startrcconsistgrp(rccg, primary)
2078
2079    def stop_rccg(self, rccg, access=False):
2080        self.ssh.stoprcconsistgrp(rccg, access)
2081
2082    def switch_rccg(self, rccg, aux=True):
2083        self.ssh.switchrcconsistgrp(rccg, aux)
2084
2085    def get_rccg_info(self, volume_name):
2086        vol_attrs = self.get_vdisk_attributes(volume_name)
2087        if not vol_attrs or not vol_attrs['RC_name']:
2088            LOG.warning("Unable to get remote copy information for "
2089                        "volume %s", volume_name)
2090            return None
2091
2092        rcrel = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
2093        if len(rcrel) > 0 and rcrel[0]['consistency_group_name']:
2094            return self.ssh.lsrcconsistgrp(rcrel[0]['consistency_group_name'])
2095        else:
2096            return None
2097
2098    def get_partnership_info(self, system_name):
2099        partnership = self.ssh.lspartnership(system_name)
2100        return partnership[0] if len(partnership) > 0 else None
2101
2102    def get_partnershipcandidate_info(self, system_name):
2103        candidates = self.ssh.lspartnershipcandidate()
2104        for candidate in candidates:
2105            if system_name == candidate['name']:
2106                return candidate
2107        return None
2108
2109    def mkippartnership(self, ip_v4, bandwith=1000, copyrate=50):
2110        self.ssh.mkippartnership(ip_v4, bandwith, copyrate)
2111
2112    def mkfcpartnership(self, system_name, bandwith=1000, copyrate=50):
2113        self.ssh.mkfcpartnership(system_name, bandwith, copyrate)
2114
2115    def chpartnership(self, partnership_id):
2116        self.ssh.chpartnership(partnership_id)
2117
2118    def delete_vdisk(self, vdisk, force):
2119        """Ensures that vdisk is not part of FC mapping and deletes it."""
2120        LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk)
2121        if not self.is_vdisk_defined(vdisk):
2122            LOG.info('Tried to delete non-existent vdisk %s.', vdisk)
2123            return
2124        self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True,
2125                                         allow_fctgt=True)
2126        self.ssh.rmvdisk(vdisk, force=force)
2127        LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk)
2128
2129    def create_copy(self, src, tgt, src_id, config, opts,
2130                    full_copy, pool=None):
2131        """Create a new snapshot using FlashCopy."""
2132        LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.',
2133                  {'tgt': tgt, 'src': src})
2134
2135        src_attrs = self.get_vdisk_attributes(src)
2136        if src_attrs is None:
2137            msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
2138                     'does not exist.') % {'src': src, 'src_id': src_id})
2139            LOG.error(msg)
2140            raise exception.VolumeDriverException(message=msg)
2141
2142        src_size = src_attrs['capacity']
2143        # In case we need to use a specific pool
2144        if not pool:
2145            pool = src_attrs['mdisk_grp_name']
2146
2147        opts['iogrp'] = src_attrs['IO_group_id']
2148        self.create_vdisk(tgt, src_size, 'b', pool, opts)
2149        timeout = config.storwize_svc_flashcopy_timeout
2150        try:
2151            self.run_flashcopy(src, tgt, timeout,
2152                               config.storwize_svc_flashcopy_rate,
2153                               full_copy=full_copy)
2154        except Exception:
2155            with excutils.save_and_reraise_exception():
2156                self.delete_vdisk(tgt, True)
2157
2158        LOG.debug('Leave: _create_copy: snapshot %(tgt)s from '
2159                  'vdisk %(src)s.',
2160                  {'tgt': tgt, 'src': src})
2161
2162    def extend_vdisk(self, vdisk, amount):
2163        self.ssh.expandvdisksize(vdisk, amount)
2164
2165    def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config,
2166                       auto_delete=False):
2167        """Add a vdisk copy in the given pool."""
2168        resp = self.ssh.lsvdiskcopy(vdisk)
2169        if len(resp) > 1:
2170            msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
2171                     'Adding another copy would exceed the limit of '
2172                     '2 copies.') % vdisk)
2173            raise exception.VolumeDriverException(message=msg)
2174        orig_copy_id = resp[0].get("copy_id", None)
2175
2176        if orig_copy_id is None:
2177            msg = (_('add_vdisk_copy started without a vdisk copy in the '
2178                     'expected pool.'))
2179            LOG.error(msg)
2180            raise exception.VolumeDriverException(message=msg)
2181
2182        if volume_type is None:
2183            opts = self.get_vdisk_params(config, state, None)
2184        else:
2185            opts = self.get_vdisk_params(config, state, volume_type['id'],
2186                                         volume_type=volume_type)
2187        params = self._get_vdisk_create_params(opts)
2188        try:
2189            new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params,
2190                                                auto_delete)
2191        except exception.VolumeBackendAPIException as e:
2192            msg = (_('Unable to add vdiskcopy for volume %(vol)s. '
2193                     'Exception: %(err)s.'),
2194                   {'vol': vdisk, 'err': e})
2195            LOG.exception(msg)
2196            raise exception.VolumeDriverException(message=msg)
2197        return (orig_copy_id, new_copy_id)
2198
2199    def is_vdisk_copy_synced(self, vdisk, copy_id):
2200        sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
2201        if sync == 'yes':
2202            return True
2203        return False
2204
2205    def rm_vdisk_copy(self, vdisk, copy_id):
2206        self.ssh.rmvdiskcopy(vdisk, copy_id)
2207
2208    def lsvdiskcopy(self, vdisk, copy_id=None):
2209        return self.ssh.lsvdiskcopy(vdisk, copy_id)
2210
2211    @staticmethod
2212    def can_migrate_to_host(host, state):
2213        if 'location_info' not in host['capabilities']:
2214            return None
2215        info = host['capabilities']['location_info']
2216        try:
2217            (dest_type, dest_id, dest_pool) = info.split(':')
2218        except ValueError:
2219            return None
2220        if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
2221            return None
2222        return dest_pool
2223
2224    def add_vdisk_qos(self, vdisk, qos):
2225        """Add the QoS configuration to the volume."""
2226        for key, value in qos.items():
2227            if key in self.svc_qos_keys.keys():
2228                param = self.svc_qos_keys[key]['param']
2229                self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
2230
2231    def update_vdisk_qos(self, vdisk, qos):
2232        """Update all the QoS in terms of a key and value.
2233
2234        svc_qos_keys saves all the supported QoS parameters. Going through
2235        this dict, we set the new values to all the parameters. If QoS is
2236        available in the QoS configuration, the value is taken from it;
2237        if not, the value will be set to default.
2238        """
2239        for key, value in self.svc_qos_keys.items():
2240            param = value['param']
2241            if key in qos.keys():
2242                # If the value is set in QoS, take the value from
2243                # the QoS configuration.
2244                v = qos[key]
2245            else:
2246                # If not, set the value to default.
2247                v = value['default']
2248            self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
2249
2250    def disable_vdisk_qos(self, vdisk, qos):
2251        """Disable the QoS."""
2252        for key, value in qos.items():
2253            if key in self.svc_qos_keys.keys():
2254                param = self.svc_qos_keys[key]['param']
2255                # Take the default value.
2256                value = self.svc_qos_keys[key]['default']
2257                self.ssh.chvdisk(vdisk, ['-' + param, value])
2258
2259    def change_vdisk_options(self, vdisk, changes, opts, state):
2260        change_value = {'warning': '', 'easytier': '', 'autoexpand': ''}
2261        if 'warning' in opts:
2262            change_value['warning'] = '%s%%' % str(opts['warning'])
2263        if 'easytier' in opts:
2264            change_value['easytier'] = 'on' if opts['easytier'] else 'off'
2265        if 'autoexpand' in opts:
2266            change_value['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
2267
2268        for key in changes:
2269            self.ssh.chvdisk(vdisk, ['-' + key, change_value[key]])
2270
2271    def change_vdisk_iogrp(self, vdisk, state, iogrp):
2272        if state['code_level'] < (6, 4, 0, 0):
2273            LOG.debug('Ignore change IO group as storage code level is '
2274                      '%(code_level)s, below the required 6.4.0.0.',
2275                      {'code_level': state['code_level']})
2276        else:
2277            self.ssh.movevdisk(vdisk, str(iogrp[0]))
2278            self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
2279            self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
2280
2281    def vdisk_by_uid(self, vdisk_uid):
2282        """Returns the properties of the vdisk with the specified UID.
2283
2284        Returns None if no such disk exists.
2285        """
2286
2287        vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
2288
2289        if len(vdisks) == 0:
2290            return None
2291
2292        if len(vdisks) != 1:
2293            msg = (_('Expected single vdisk returned from lsvdisk when '
2294                     'filtering on vdisk_UID.  %(count)s were returned.') %
2295                   {'count': len(vdisks)})
2296            LOG.error(msg)
2297            raise exception.VolumeBackendAPIException(data=msg)
2298
2299        vdisk = vdisks.result[0]
2300
2301        return self.ssh.lsvdisk(vdisk['name'])
2302
2303    def is_vdisk_in_use(self, vdisk):
2304        """Returns True if the specified vdisk is mapped to at least 1 host."""
2305        resp = self.ssh.lsvdiskhostmap(vdisk)
2306        return len(resp) != 0
2307
2308    def rename_vdisk(self, vdisk, new_name):
2309        self.ssh.chvdisk(vdisk, ['-name', new_name])
2310
2311    def change_vdisk_primary_copy(self, vdisk, copy_id):
2312        self.ssh.chvdisk(vdisk, ['-primary', copy_id])
2313
2314    def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
2315        self.ssh.migratevdisk(vdisk, dest_pool, copy_id)
2316
2317    def is_system_topology_hyperswap(self, state):
2318        """Returns True if the system version higher than 7.5 and the system
2319
2320        topology is hyperswap.
2321        """
2322        if state['code_level'] < (7, 6, 0, 0):
2323            LOG.debug('Hyperswap failure as the storage'
2324                      'code_level is %(code_level)s, below '
2325                      'the required 7.6.0.0.',
2326                      {'code_level': state['code_level']})
2327        else:
2328            if state['topology'] == 'hyperswap':
2329                return True
2330            else:
2331                LOG.debug('Hyperswap failure as the storage system '
2332                          'topology is not hyperswap.')
2333        return False
2334
2335    def check_hyperswap_pool(self, pool, peer_pool):
2336        # Check the hyperswap pools.
2337        if not peer_pool:
2338            raise exception.InvalidInput(
2339                reason=_('The peer pool is necessary for hyperswap volume, '
2340                         'please configure the peer pool.'))
2341        pool_attr = self.get_pool_attrs(pool)
2342        peer_pool_attr = self.get_pool_attrs(peer_pool)
2343        if not peer_pool_attr:
2344            raise exception.InvalidInput(
2345                reason=_('The hyperswap peer pool %s '
2346                         'is invalid.') % peer_pool)
2347
2348        if not pool_attr['site_id'] or not peer_pool_attr['site_id']:
2349            raise exception.InvalidInput(
2350                reason=_('The site_id of pools is necessary for hyperswap '
2351                         'volume, but there is no site_id in the pool or '
2352                         'peer pool.'))
2353
2354        if pool_attr['site_id'] == peer_pool_attr['site_id']:
2355            raise exception.InvalidInput(
2356                reason=_('The hyperswap volume must be configured in two '
2357                         'independent sites, the pool %(pool)s is on the '
2358                         'same site as peer_pool %(peer_pool)s. ') %
2359                {'pool': pool, 'peer_pool': peer_pool})
2360
2361    def is_volume_hyperswap(self, vol_name):
2362        """Returns True if the volume rcrelationship is activeactive."""
2363        is_hyper_volume = False
2364        vol_attrs = self.get_vdisk_attributes(vol_name)
2365        if vol_attrs and vol_attrs['RC_name']:
2366            relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
2367            if relationship[0]['copy_type'] == 'activeactive':
2368                is_hyper_volume = True
2369        return is_hyper_volume
2370
2371
2372class CLIResponse(object):
2373    """Parse SVC CLI output and generate iterable."""
2374
2375    def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
2376        super(CLIResponse, self).__init__()
2377        if ssh_cmd:
2378            self.ssh_cmd = ' '.join(ssh_cmd)
2379        else:
2380            self.ssh_cmd = 'None'
2381        self.raw = raw
2382        self.delim = delim
2383        self.with_header = with_header
2384        self.result = self._parse()
2385
2386    def select(self, *keys):
2387        for a in self.result:
2388            vs = []
2389            for k in keys:
2390                v = a.get(k, None)
2391                if isinstance(v, six.string_types) or v is None:
2392                    v = [v]
2393                if isinstance(v, list):
2394                    vs.append(v)
2395            for item in zip(*vs):
2396                if len(item) == 1:
2397                    yield item[0]
2398                else:
2399                    yield item
2400
2401    def __getitem__(self, key):
2402        try:
2403            return self.result[key]
2404        except KeyError:
2405            msg = (_('Did not find the expected key %(key)s in %(fun)s: '
2406                     '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
2407                                    'raw': self.raw})
2408            raise exception.VolumeBackendAPIException(data=msg)
2409
2410    def __iter__(self):
2411        for a in self.result:
2412            yield a
2413
2414    def __len__(self):
2415        return len(self.result)
2416
2417    def _parse(self):
2418        def get_reader(content, delim):
2419            for line in content.lstrip().splitlines():
2420                line = line.strip()
2421                if line:
2422                    yield line.split(delim)
2423                else:
2424                    yield []
2425
2426        if isinstance(self.raw, six.string_types):
2427            stdout, stderr = self.raw, ''
2428        else:
2429            stdout, stderr = self.raw
2430        reader = get_reader(stdout, self.delim)
2431        result = []
2432
2433        if self.with_header:
2434            hds = tuple()
2435            for row in reader:
2436                hds = row
2437                break
2438            for row in reader:
2439                cur = dict()
2440                if len(hds) != len(row):
2441                    msg = (_('Unexpected CLI response: header/row mismatch. '
2442                             'header: %(header)s, row: %(row)s.')
2443                           % {'header': hds,
2444                              'row': row})
2445                    raise exception.VolumeBackendAPIException(data=msg)
2446                for k, v in zip(hds, row):
2447                    CLIResponse.append_dict(cur, k, v)
2448                result.append(cur)
2449        else:
2450            cur = dict()
2451            for row in reader:
2452                if row:
2453                    CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
2454                elif cur:  # start new section
2455                    result.append(cur)
2456                    cur = dict()
2457            if cur:
2458                result.append(cur)
2459        return result
2460
2461    @staticmethod
2462    def append_dict(dict_, key, value):
2463        key, value = key.strip(), value.strip()
2464        obj = dict_.get(key, None)
2465        if obj is None:
2466            dict_[key] = value
2467        elif isinstance(obj, list):
2468            obj.append(value)
2469            dict_[key] = obj
2470        else:
2471            dict_[key] = [obj, value]
2472        return dict_
2473
2474
2475class StorwizeSVCCommonDriver(san.SanDriver,
2476                              driver.ManageableVD,
2477                              driver.MigrateVD,
2478                              driver.CloneableImageVD):
2479    """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
2480
2481    Version history:
2482
2483    .. code-block:: none
2484
2485        1.0 - Initial driver
2486        1.1 - FC support, create_cloned_volume, volume type support,
2487              get_volume_stats, minor bug fixes
2488        1.2.0 - Added retype
2489        1.2.1 - Code refactor, improved exception handling
2490        1.2.2 - Fix bug #1274123 (races in host-related functions)
2491        1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
2492                to lsfabric, clear unused data from connections, ensure
2493                matching WWPNs by comparing lower case
2494        1.2.4 - Fix bug #1278035 (async migration/retype)
2495        1.2.5 - Added support for manage_existing (unmanage is inherited)
2496        1.2.6 - Added QoS support in terms of I/O throttling rate
2497        1.3.1 - Added support for volume replication
2498        1.3.2 - Added support for consistency group
2499        1.3.3 - Update driver to use ABC metaclasses
2500        2.0 - Code refactor, split init file and placed shared methods
2501              for FC and iSCSI within the StorwizeSVCCommonDriver class
2502        2.1 - Added replication V2 support to the global/metro mirror
2503              mode
2504        2.1.1 - Update replication to version 2.1
2505    """
2506
2507    VERSION = "2.1.1"
2508    VDISKCOPYOPS_INTERVAL = 600
2509    DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0
2510
2511    def __init__(self, *args, **kwargs):
2512        super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
2513        self.configuration.append_config_values(storwize_svc_opts)
2514        self._backend_name = self.configuration.safe_get('volume_backend_name')
2515        self.active_ip = self.configuration.san_ip
2516        self.inactive_ip = self.configuration.storwize_san_secondary_ip
2517        self._master_backend_helpers = StorwizeHelpers(self._run_ssh)
2518        self._aux_backend_helpers = None
2519        self._helpers = self._master_backend_helpers
2520        self._vdiskcopyops = {}
2521        self._vdiskcopyops_loop = None
2522        self.protocol = None
2523        self._master_state = {'storage_nodes': {},
2524                              'enabled_protocols': set(),
2525                              'compression_enabled': False,
2526                              'available_iogrps': [],
2527                              'system_name': None,
2528                              'system_id': None,
2529                              'code_level': None,
2530                              }
2531        self._state = self._master_state
2532        self._aux_state = {'storage_nodes': {},
2533                           'enabled_protocols': set(),
2534                           'compression_enabled': False,
2535                           'available_iogrps': [],
2536                           'system_name': None,
2537                           'system_id': None,
2538                           'code_level': None,
2539                           }
2540        self._active_backend_id = kwargs.get('active_backend_id')
2541
2542        # This dictionary is used to map each replication target to certain
2543        # replication manager object.
2544        self.replica_manager = {}
2545
2546        # One driver can be configured with only one replication target
2547        # to failover.
2548        self._replica_target = {}
2549
2550        # This boolean is used to indicate whether replication is supported
2551        # by this storage.
2552        self._replica_enabled = False
2553
2554        # This list is used to save the supported replication modes.
2555        self._supported_replica_types = []
2556
2557        # This is used to save the available pools in failed-over status
2558        self._secondary_pools = None
2559
2560        # Storwize has the limitation that can not burst more than 3 new ssh
2561        # connections within 1 second. So slow down the initialization.
2562        time.sleep(1)
2563
2564    def do_setup(self, ctxt):
2565        """Check that we have all configuration details from the storage."""
2566        LOG.debug('enter: do_setup')
2567
2568        # v2.1 replication setup
2569        self._get_storwize_config()
2570
2571        # Validate that the pool exists
2572        self._validate_pools_exist()
2573
2574        # Build the list of in-progress vdisk copy operations
2575        if ctxt is None:
2576            admin_context = context.get_admin_context()
2577        else:
2578            admin_context = ctxt.elevated()
2579        volumes = objects.VolumeList.get_all_by_host(admin_context, self.host)
2580
2581        for volume in volumes:
2582            metadata = volume.admin_metadata
2583            curr_ops = metadata.get('vdiskcopyops', None)
2584            if curr_ops:
2585                ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
2586                self._vdiskcopyops[volume['id']] = ops
2587
2588        # if vdiskcopy exists in database, start the looping call
2589        if len(self._vdiskcopyops) >= 1:
2590            self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
2591                self._check_volume_copy_ops)
2592            self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
2593        LOG.debug('leave: do_setup')
2594
2595    def _update_storwize_state(self, state, helper):
2596        # Get storage system name, id, and code level
2597        state.update(helper.get_system_info())
2598
2599        # Check if compression is supported
2600        state['compression_enabled'] = helper.compression_enabled()
2601
2602        # Get the available I/O groups
2603        state['available_iogrps'] = helper.get_available_io_groups()
2604
2605        # Get the iSCSI and FC names of the Storwize/SVC nodes
2606        state['storage_nodes'] = helper.get_node_info()
2607
2608        # Add the iSCSI IP addresses and WWPNs to the storage node info
2609        helper.add_iscsi_ip_addrs(state['storage_nodes'])
2610        helper.add_fc_wwpns(state['storage_nodes'], state['code_level'])
2611
2612        # For each node, check what connection modes it supports.  Delete any
2613        # nodes that do not support any types (may be partially configured).
2614        to_delete = []
2615        for k, node in state['storage_nodes'].items():
2616            if ((len(node['ipv4']) or len(node['ipv6']))
2617                    and len(node['iscsi_name'])):
2618                node['enabled_protocols'].append('iSCSI')
2619                state['enabled_protocols'].add('iSCSI')
2620            if len(node['WWPN']):
2621                node['enabled_protocols'].append('FC')
2622                state['enabled_protocols'].add('FC')
2623            if not len(node['enabled_protocols']):
2624                to_delete.append(k)
2625        for delkey in to_delete:
2626            del state['storage_nodes'][delkey]
2627
2628    def _get_backend_pools(self):
2629        if not self._active_backend_id:
2630            return self.configuration.storwize_svc_volpool_name
2631        elif not self._secondary_pools:
2632            self._secondary_pools = [self._replica_target.get('pool_name')]
2633        return self._secondary_pools
2634
2635    def _validate_pools_exist(self):
2636        # Validate that the pool exists
2637        pools = self._get_backend_pools()
2638        for pool in pools:
2639            if not self._helpers.is_pool_defined(pool):
2640                reason = (_('Failed getting details for pool %s.') % pool)
2641                raise exception.InvalidInput(reason=reason)
2642
2643    def check_for_setup_error(self):
2644        """Ensure that the flags are set properly."""
2645        LOG.debug('enter: check_for_setup_error')
2646
2647        # Check that we have the system ID information
2648        if self._state['system_name'] is None:
2649            exception_msg = (_('Unable to determine system name.'))
2650            raise exception.VolumeBackendAPIException(data=exception_msg)
2651        if self._state['system_id'] is None:
2652            exception_msg = (_('Unable to determine system id.'))
2653            raise exception.VolumeBackendAPIException(data=exception_msg)
2654
2655        # Make sure we have at least one node configured
2656        if not len(self._state['storage_nodes']):
2657            msg = _('do_setup: No configured nodes.')
2658            LOG.error(msg)
2659            raise exception.VolumeDriverException(message=msg)
2660
2661        if self.protocol not in self._state['enabled_protocols']:
2662            # TODO(mc_nair): improve this error message by looking at
2663            # self._state['enabled_protocols'] to tell user what driver to use
2664            raise exception.InvalidInput(
2665                reason=_('The storage device does not support %(prot)s. '
2666                         'Please configure the device to support %(prot)s or '
2667                         'switch to a driver using a different protocol.')
2668                % {'prot': self.protocol})
2669
2670        required_flags = ['san_ip', 'san_ssh_port', 'san_login',
2671                          'storwize_svc_volpool_name']
2672        for flag in required_flags:
2673            if not self.configuration.safe_get(flag):
2674                raise exception.InvalidInput(reason=_('%s is not set.') % flag)
2675
2676        # Ensure that either password or keyfile were set
2677        if not (self.configuration.san_password or
2678                self.configuration.san_private_key):
2679            raise exception.InvalidInput(
2680                reason=_('Password or SSH private key is required for '
2681                         'authentication: set either san_password or '
2682                         'san_private_key option.'))
2683
2684        opts = self._helpers.build_default_opts(self.configuration)
2685        self._helpers.check_vdisk_opts(self._state, opts)
2686
2687        LOG.debug('leave: check_for_setup_error')
2688
2689    def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
2690        cinder_utils.check_ssh_injection(cmd_list)
2691        command = ' '.join(cmd_list)
2692        if not self.sshpool:
2693            try:
2694                self.sshpool = self._set_up_sshpool(self.active_ip)
2695            except paramiko.SSHException:
2696                LOG.warning('Unable to use san_ip to create SSHPool. Now '
2697                            'attempting to use storwize_san_secondary_ip '
2698                            'to create SSHPool.')
2699                if self._toggle_ip():
2700                    self.sshpool = self._set_up_sshpool(self.active_ip)
2701                else:
2702                    LOG.warning('Unable to create SSHPool using san_ip '
2703                                'and not able to use '
2704                                'storwize_san_secondary_ip since it is '
2705                                'not configured.')
2706                    raise
2707        try:
2708            return self._ssh_execute(self.sshpool, command,
2709                                     check_exit_code, attempts)
2710
2711        except Exception:
2712            # Need to check if creating an SSHPool storwize_san_secondary_ip
2713            # before raising an error.
2714            try:
2715                if self._toggle_ip():
2716                    LOG.warning("Unable to execute SSH command with "
2717                                "%(inactive)s. Attempting to execute SSH "
2718                                "command with %(active)s.",
2719                                {'inactive': self.inactive_ip,
2720                                 'active': self.active_ip})
2721                    self.sshpool = self._set_up_sshpool(self.active_ip)
2722                    return self._ssh_execute(self.sshpool, command,
2723                                             check_exit_code, attempts)
2724                else:
2725                    LOG.warning('Not able to use '
2726                                'storwize_san_secondary_ip since it is '
2727                                'not configured.')
2728                    raise
2729            except Exception:
2730                with excutils.save_and_reraise_exception():
2731                    LOG.error("Error running SSH command: %s",
2732                              command)
2733
2734    def _set_up_sshpool(self, ip):
2735        password = self.configuration.san_password
2736        privatekey = self.configuration.san_private_key
2737        min_size = self.configuration.ssh_min_pool_conn
2738        max_size = self.configuration.ssh_max_pool_conn
2739        sshpool = ssh_utils.SSHPool(
2740            ip,
2741            self.configuration.san_ssh_port,
2742            self.configuration.ssh_conn_timeout,
2743            self.configuration.san_login,
2744            password=password,
2745            privatekey=privatekey,
2746            min_size=min_size,
2747            max_size=max_size)
2748
2749        return sshpool
2750
2751    def _ssh_execute(self, sshpool, command,
2752                     check_exit_code = True, attempts=1):
2753        try:
2754            with sshpool.item() as ssh:
2755                while attempts > 0:
2756                    attempts -= 1
2757                    try:
2758                        return processutils.ssh_execute(
2759                            ssh,
2760                            command,
2761                            check_exit_code=check_exit_code)
2762                    except Exception as e:
2763                        LOG.error('Error has occurred: %s', e)
2764                        last_exception = e
2765                        greenthread.sleep(self.DEFAULT_GR_SLEEP)
2766                    try:
2767                        std_err = last_exception.stderr
2768                        if std_err is not None and not self._is_ascii(std_err):
2769                            std_err = encodeutils.safe_decode(std_err,
2770                                                              errors='ignore')
2771                            LOG.error("The stderr has non-ascii characters. "
2772                                      "Please check the error code.\n"
2773                                      "Stderr: %s", std_err)
2774                            std_err = std_err.split()[0]
2775                        raise processutils.ProcessExecutionError(
2776                            exit_code=last_exception.exit_code,
2777                            stdout=last_exception.stdout,
2778                            stderr=std_err,
2779                            cmd=last_exception.cmd)
2780                    except AttributeError:
2781                        raise processutils.ProcessExecutionError(
2782                            exit_code=-1,
2783                            stdout="",
2784                            stderr="Error running SSH command",
2785                            cmd=command)
2786
2787        except Exception:
2788            with excutils.save_and_reraise_exception():
2789                LOG.error("Error running SSH command: %s", command)
2790
2791    def _is_ascii(self, value):
2792        try:
2793            return all(ord(c) < 128 for c in value)
2794        except TypeError:
2795            return False
2796
2797    def _toggle_ip(self):
2798        # Change active_ip if storwize_san_secondary_ip is set.
2799        if self.configuration.storwize_san_secondary_ip is None:
2800            return False
2801
2802        self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip
2803        LOG.info('Toggle active_ip from %(old)s to %(new)s.',
2804                 {'old': self.inactive_ip,
2805                  'new': self.active_ip})
2806        return True
2807
2808    def ensure_export(self, ctxt, volume):
2809        """Check that the volume exists on the storage.
2810
2811        The system does not "export" volumes as a Linux iSCSI target does,
2812        and therefore we just check that the volume exists on the storage.
2813        """
2814        vol_name = self._get_target_vol(volume)
2815        volume_defined = self._helpers.is_vdisk_defined(vol_name)
2816
2817        if not volume_defined:
2818            LOG.error('ensure_export: Volume %s not found on storage.',
2819                      volume['name'])
2820
2821    def create_export(self, ctxt, volume, connector):
2822        model_update = None
2823        return model_update
2824
2825    def remove_export(self, ctxt, volume):
2826        pass
2827
2828    def create_export_snapshot(self, ctxt, snapshot, connector):
2829        model_update = None
2830        return model_update
2831
2832    def remove_export_snapshot(self, ctxt, snapshot):
2833        pass
2834
2835    def _get_vdisk_params(self, type_id, volume_type=None,
2836                          volume_metadata=None):
2837        return self._helpers.get_vdisk_params(self.configuration,
2838                                              self._state, type_id,
2839                                              volume_type=volume_type,
2840                                              volume_metadata=volume_metadata)
2841
2842    def _check_if_group_type_cg_snapshot(self, volume):
2843        if (volume.group_id and
2844                not utils.is_group_a_cg_snapshot_type(volume.group)):
2845            msg = _('Create volume with a replication or hyperswap '
2846                    'group_id is not supported. Please add volume to '
2847                    'group after volume creation.')
2848            LOG.error(msg)
2849            raise exception.VolumeDriverException(reason=msg)
2850
2851    def create_volume(self, volume):
2852        LOG.debug('enter: create_volume: volume %s', volume['name'])
2853        # Create a replication or hyperswap volume with group_id is not
2854        # allowed.
2855        self._check_if_group_type_cg_snapshot(volume)
2856        opts = self._get_vdisk_params(volume['volume_type_id'],
2857                                      volume_metadata=
2858                                      volume.get('volume_metadata'))
2859        ctxt = context.get_admin_context()
2860        rep_type = self._get_volume_replicated_type(ctxt, volume)
2861
2862        pool = utils.extract_host(volume['host'], 'pool')
2863        model_update = None
2864
2865        if opts['volume_topology'] == 'hyperswap':
2866            LOG.debug('Volume %s to be created is a hyperswap volume.',
2867                      volume.name)
2868            if not self._helpers.is_system_topology_hyperswap(self._state):
2869                reason = _('Create hyperswap volume failed, the system is '
2870                           'below release 7.6.0.0 or it is not hyperswap '
2871                           'topology.')
2872                raise exception.InvalidInput(reason=reason)
2873            if opts['mirror_pool'] or rep_type:
2874                reason = _('Create hyperswap volume with streched cluster or '
2875                           'replication enabled is not supported.')
2876                raise exception.InvalidInput(reason=reason)
2877            if not opts['easytier']:
2878                raise exception.InvalidInput(
2879                    reason=_('The default easytier of hyperswap volume is '
2880                             'on, it does not support easytier off.'))
2881            self._helpers.check_hyperswap_pool(pool, opts['peer_pool'])
2882            hyperpool = '%s:%s' % (pool, opts['peer_pool'])
2883            self._helpers.create_hyperswap_volume(volume.name,
2884                                                  volume.size, 'gb',
2885                                                  hyperpool, opts)
2886        else:
2887            if opts['mirror_pool'] and rep_type:
2888                reason = _('Create mirror volume with replication enabled is '
2889                           'not supported.')
2890                raise exception.InvalidInput(reason=reason)
2891            opts['iogrp'] = self._helpers.select_io_group(self._state,
2892                                                          opts, pool)
2893            self._helpers.create_vdisk(volume['name'], str(volume['size']),
2894                                       'gb', pool, opts)
2895        if opts['qos']:
2896            self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
2897
2898        model_update = {'replication_status':
2899                        fields.ReplicationStatus.NOT_CAPABLE}
2900
2901        if rep_type:
2902            replica_obj = self._get_replica_obj(rep_type)
2903            replica_obj.volume_replication_setup(ctxt, volume)
2904            model_update = {'replication_status':
2905                            fields.ReplicationStatus.ENABLED}
2906
2907        LOG.debug('leave: create_volume:\n volume: %(vol)s\n '
2908                  'model_update %(model_update)s',
2909                  {'vol': volume['name'],
2910                   'model_update': model_update})
2911        return model_update
2912
2913    def delete_volume(self, volume):
2914        LOG.debug('enter: delete_volume: volume %s', volume['name'])
2915        ctxt = context.get_admin_context()
2916
2917        hyper_volume = self._helpers.is_volume_hyperswap(volume.name)
2918        if hyper_volume:
2919            LOG.debug('Volume %s to be deleted is a hyperswap '
2920                      'volume.', volume.name)
2921            self._helpers.delete_hyperswap_volume(volume.name, False)
2922            return
2923
2924        rep_type = self._get_volume_replicated_type(ctxt, volume)
2925        if rep_type:
2926            if self._aux_backend_helpers:
2927                self._aux_backend_helpers.delete_rc_volume(volume['name'],
2928                                                           target_vol=True)
2929            if not self._active_backend_id:
2930                self._master_backend_helpers.delete_rc_volume(volume['name'])
2931            else:
2932                # If it's in fail over state, also try to delete the volume
2933                # in master backend
2934                try:
2935                    self._master_backend_helpers.delete_rc_volume(
2936                        volume['name'])
2937                except Exception as ex:
2938                    LOG.error('Failed to get delete volume %(volume)s in '
2939                              'master backend. Exception: %(err)s.',
2940                              {'volume': volume['name'],
2941                               'err': ex})
2942        else:
2943            if self._active_backend_id:
2944                msg = (_('Error: delete non-replicate volume in failover mode'
2945                         ' is not allowed.'))
2946                LOG.error(msg)
2947                raise exception.VolumeDriverException(message=msg)
2948            else:
2949                self._helpers.delete_vdisk(volume['name'], False)
2950
2951        if volume['id'] in self._vdiskcopyops:
2952            del self._vdiskcopyops[volume['id']]
2953
2954            if not len(self._vdiskcopyops):
2955                self._vdiskcopyops_loop.stop()
2956                self._vdiskcopyops_loop = None
2957        LOG.debug('leave: delete_volume: volume %s', volume['name'])
2958
2959    def create_snapshot(self, snapshot):
2960        ctxt = context.get_admin_context()
2961        try:
2962            # TODO(zhaochy): change to use snapshot.volume
2963            source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
2964        except Exception:
2965            msg = (_('create_snapshot: get source volume failed.'))
2966            LOG.error(msg)
2967            raise exception.VolumeDriverException(message=msg)
2968
2969        rep_type = self._get_volume_replicated_type(
2970            ctxt, None, source_vol['volume_type_id'])
2971        if rep_type == storwize_const.GMCV:
2972            # GMCV volume will have problem to failback
2973            # when it has flash copy relationship besides change volumes
2974            msg = _('create_snapshot: Create snapshot to '
2975                    'gmcv replication volume is not allowed.')
2976            LOG.error(msg)
2977            raise exception.VolumeDriverException(message=msg)
2978
2979        pool = utils.extract_host(source_vol['host'], 'pool')
2980        opts = self._get_vdisk_params(source_vol['volume_type_id'])
2981
2982        if opts['volume_topology'] == 'hyperswap':
2983            msg = _('create_snapshot: Create snapshot to a '
2984                    'hyperswap volume is not allowed.')
2985            LOG.error(msg)
2986            raise exception.VolumeDriverException(message=msg)
2987
2988        self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
2989                                  snapshot['volume_id'], self.configuration,
2990                                  opts, False, pool=pool)
2991
2992    def delete_snapshot(self, snapshot):
2993        self._helpers.delete_vdisk(snapshot['name'], False)
2994
2995    def create_volume_from_snapshot(self, volume, snapshot):
2996        # Create volume from snapshot with a replication or hyperswap group_id
2997        # is not allowed.
2998        self._check_if_group_type_cg_snapshot(volume)
2999        opts = self._get_vdisk_params(volume['volume_type_id'],
3000                                      volume_metadata=
3001                                      volume.get('volume_metadata'))
3002        pool = utils.extract_host(volume['host'], 'pool')
3003        self._helpers.create_copy(snapshot['name'], volume['name'],
3004                                  snapshot['id'], self.configuration,
3005                                  opts, True, pool=pool)
3006        # The volume size is equal to the snapshot size in most
3007        # of the cases. But in some scenario, the volume size
3008        # may be bigger than the source volume size.
3009        # SVC does not support flashcopy between two volumes
3010        # with two different size. So use the snapshot size to
3011        # create volume first and then extend the volume to-
3012        # the target size.
3013        if volume['size'] > snapshot['volume_size']:
3014            # extend the new created target volume to expected size.
3015            self._extend_volume_op(volume, volume['size'],
3016                                   snapshot['volume_size'])
3017        if opts['qos']:
3018            self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
3019
3020        ctxt = context.get_admin_context()
3021        model_update = {'replication_status':
3022                        fields.ReplicationStatus.NOT_CAPABLE}
3023        rep_type = self._get_volume_replicated_type(ctxt, volume)
3024
3025        if rep_type:
3026            self._validate_replication_enabled()
3027            replica_obj = self._get_replica_obj(rep_type)
3028            replica_obj.volume_replication_setup(ctxt, volume)
3029            model_update = {'replication_status':
3030                            fields.ReplicationStatus.ENABLED}
3031        return model_update
3032
3033    def create_cloned_volume(self, tgt_volume, src_volume):
3034        """Creates a clone of the specified volume."""
3035        # Create a cloned volume with a replication or hyperswap group_id is
3036        # not allowed.
3037        self._check_if_group_type_cg_snapshot(tgt_volume)
3038        opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
3039                                      volume_metadata=
3040                                      tgt_volume.get('volume_metadata'))
3041        pool = utils.extract_host(tgt_volume['host'], 'pool')
3042        self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
3043                                  src_volume['id'], self.configuration,
3044                                  opts, True, pool=pool)
3045
3046        # The source volume size is equal to target volume size
3047        # in most of the cases. But in some scenarios, the target
3048        # volume size may be bigger than the source volume size.
3049        # SVC does not support flashcopy between two volumes
3050        # with two different sizes. So use source volume size to
3051        # create target volume first and then extend target
3052        # volume to original size.
3053        if tgt_volume['size'] > src_volume['size']:
3054            # extend the new created target volume to expected size.
3055            self._extend_volume_op(tgt_volume, tgt_volume['size'],
3056                                   src_volume['size'])
3057
3058        if opts['qos']:
3059            self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
3060
3061        if opts['volume_topology'] == 'hyperswap':
3062            LOG.debug('The source volume %s to be cloned is a hyperswap '
3063                      'volume.', src_volume.name)
3064            # Ensures the vdisk is not part of FC mapping.
3065            # Otherwize convert it to hyperswap volume will be failed.
3066            self._helpers.ensure_vdisk_no_fc_mappings(tgt_volume['name'],
3067                                                      allow_snaps=True,
3068                                                      allow_fctgt=False)
3069
3070            self._helpers.convert_volume_to_hyperswap(tgt_volume['name'],
3071                                                      opts,
3072                                                      self._state)
3073
3074        ctxt = context.get_admin_context()
3075        model_update = {'replication_status':
3076                        fields.ReplicationStatus.NOT_CAPABLE}
3077        rep_type = self._get_volume_replicated_type(ctxt, tgt_volume)
3078
3079        if rep_type:
3080            self._validate_replication_enabled()
3081            replica_obj = self._get_replica_obj(rep_type)
3082            replica_obj.volume_replication_setup(ctxt, tgt_volume)
3083            model_update = {'replication_status':
3084                            fields.ReplicationStatus.ENABLED}
3085        return model_update
3086
3087    def extend_volume(self, volume, new_size):
3088        self._extend_volume_op(volume, new_size)
3089
3090    def _extend_volume_op(self, volume, new_size, old_size=None):
3091        LOG.debug('enter: _extend_volume_op: volume %s', volume['id'])
3092        volume_name = self._get_target_vol(volume)
3093        if self._helpers.is_volume_hyperswap(volume_name):
3094            msg = _('_extend_volume_op: Extending a hyperswap volume is '
3095                    'not supported.')
3096            LOG.error(msg)
3097            raise exception.InvalidInput(message=msg)
3098
3099        ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name,
3100                                                        allow_snaps=False)
3101        if not ret:
3102            msg = (_('_extend_volume_op: Extending a volume with snapshots is '
3103                     'not supported.'))
3104            LOG.error(msg)
3105            raise exception.VolumeDriverException(message=msg)
3106
3107        if old_size is None:
3108            old_size = volume.size
3109        extend_amt = int(new_size) - old_size
3110
3111        rel_info = self._helpers.get_relationship_info(volume_name)
3112        if rel_info:
3113            LOG.warning('_extend_volume_op: Extending a volume with '
3114                        'remote copy is not recommended.')
3115            try:
3116                rep_type = rel_info['copy_type']
3117                cyclingmode = rel_info['cycling_mode']
3118                self._master_backend_helpers.delete_relationship(
3119                    volume.name)
3120                tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
3121                           volume.name)
3122                self._master_backend_helpers.extend_vdisk(volume.name,
3123                                                          extend_amt)
3124                self._aux_backend_helpers.extend_vdisk(tgt_vol, extend_amt)
3125                tgt_sys = self._aux_backend_helpers.get_system_info()
3126                if storwize_const.GMCV_MULTI == cyclingmode:
3127                    tgt_change_vol = (
3128                        storwize_const.REPLICA_CHG_VOL_PREFIX +
3129                        tgt_vol)
3130                    source_change_vol = (
3131                        storwize_const.REPLICA_CHG_VOL_PREFIX +
3132                        volume.name)
3133                    self._master_backend_helpers.extend_vdisk(
3134                        source_change_vol, extend_amt)
3135                    self._aux_backend_helpers.extend_vdisk(
3136                        tgt_change_vol, extend_amt)
3137                    src_change_opts = self._get_vdisk_params(
3138                        volume.volume_type_id)
3139                    cycle_period_seconds = src_change_opts.get(
3140                        'cycle_period_seconds')
3141                    self._master_backend_helpers.create_relationship(
3142                        volume.name, tgt_vol, tgt_sys.get('system_name'),
3143                        True, True, source_change_vol, cycle_period_seconds)
3144                    self._aux_backend_helpers.change_relationship_changevolume(
3145                        tgt_vol, tgt_change_vol, False)
3146                    self._master_backend_helpers.start_relationship(
3147                        volume.name)
3148                else:
3149                    self._master_backend_helpers.create_relationship(
3150                        volume.name, tgt_vol, tgt_sys.get('system_name'),
3151                        True if storwize_const.GLOBAL == rep_type else False)
3152            except Exception as e:
3153                msg = (_('Failed to extend a volume with remote copy '
3154                         '%(volume)s. Exception: '
3155                         '%(err)s.') % {'volume': volume.id,
3156                                        'err': e})
3157                LOG.error(msg)
3158                raise exception.VolumeDriverException(message=msg)
3159        else:
3160            self._helpers.extend_vdisk(volume_name, extend_amt)
3161        LOG.debug('leave: _extend_volume_op: volume %s', volume.id)
3162
3163    def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False):
3164        return self._helpers.add_vdisk_copy(volume, dest_pool,
3165                                            vol_type, self._state,
3166                                            self.configuration,
3167                                            auto_delete=auto_delete)
3168
3169    def _add_vdisk_copy_op(self, ctxt, volume, new_op):
3170        metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
3171                                                     volume['id'])
3172        curr_ops = metadata.get('vdiskcopyops', None)
3173        if curr_ops:
3174            curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
3175            new_ops_list = curr_ops_list.append(new_op)
3176        else:
3177            new_ops_list = [new_op]
3178        new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
3179        self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
3180                                             {'vdiskcopyops': new_ops_str},
3181                                             False)
3182        if volume['id'] in self._vdiskcopyops:
3183            self._vdiskcopyops[volume['id']].append(new_op)
3184        else:
3185            self._vdiskcopyops[volume['id']] = [new_op]
3186
3187        # We added the first copy operation, so start the looping call
3188        if len(self._vdiskcopyops) == 1:
3189            self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
3190                self._check_volume_copy_ops)
3191            self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
3192
3193    def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
3194        try:
3195            self._vdiskcopyops[volume['id']].remove((orig_copy_id,
3196                                                     new_copy_id))
3197            if not len(self._vdiskcopyops[volume['id']]):
3198                del self._vdiskcopyops[volume['id']]
3199            if not len(self._vdiskcopyops):
3200                self._vdiskcopyops_loop.stop()
3201                self._vdiskcopyops_loop = None
3202        except KeyError:
3203            LOG.error('_rm_vdisk_copy_op: Volume %s does not have any '
3204                      'registered vdisk copy operations.', volume['id'])
3205            return
3206        except ValueError:
3207            LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have '
3208                      'the specified vdisk copy operation: orig=%(orig)s '
3209                      'new=%(new)s.',
3210                      {'vol': volume['id'], 'orig': orig_copy_id,
3211                       'new': new_copy_id})
3212            return
3213
3214        metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
3215                                                     volume['id'])
3216        curr_ops = metadata.get('vdiskcopyops', None)
3217        if not curr_ops:
3218            LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not '
3219                      'have any registered vdisk copy operations.',
3220                      volume['id'])
3221            return
3222        curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
3223        try:
3224            curr_ops_list.remove((orig_copy_id, new_copy_id))
3225        except ValueError:
3226            LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
3227                      'not have the specified vdisk copy operation: '
3228                      'orig=%(orig)s new=%(new)s.',
3229                      {'vol': volume['id'], 'orig': orig_copy_id,
3230                       'new': new_copy_id})
3231            return
3232
3233        if len(curr_ops_list):
3234            new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
3235            self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
3236                                                 {'vdiskcopyops': new_ops_str},
3237                                                 False)
3238        else:
3239            self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
3240                                                 'vdiskcopyops')
3241
3242    def _check_volume_copy_ops(self):
3243        LOG.debug("Enter: update volume copy status.")
3244        ctxt = context.get_admin_context()
3245        copy_items = list(self._vdiskcopyops.items())
3246        for vol_id, copy_ops in copy_items:
3247            try:
3248                volume = self.db.volume_get(ctxt, vol_id)
3249            except Exception:
3250                LOG.warning('Volume %s does not exist.', vol_id)
3251                del self._vdiskcopyops[vol_id]
3252                if not len(self._vdiskcopyops):
3253                    self._vdiskcopyops_loop.stop()
3254                    self._vdiskcopyops_loop = None
3255                continue
3256
3257            for copy_op in copy_ops:
3258                try:
3259                    synced = self._helpers.is_vdisk_copy_synced(volume['name'],
3260                                                                copy_op[1])
3261                except Exception:
3262                    LOG.info('_check_volume_copy_ops: Volume %(vol)s does '
3263                             'not have the specified vdisk copy '
3264                             'operation: orig=%(orig)s new=%(new)s.',
3265                             {'vol': volume['id'], 'orig': copy_op[0],
3266                              'new': copy_op[1]})
3267                else:
3268                    if synced:
3269                        self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
3270                        self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
3271                                               copy_op[1])
3272        LOG.debug("Exit: update volume copy status.")
3273
3274    # #### V2.1 replication methods #### #
3275    @cinder_utils.trace
3276    def failover_host(self, context, volumes, secondary_id=None, groups=None):
3277        if not self._replica_enabled:
3278            msg = _("Replication is not properly enabled on backend.")
3279            LOG.error(msg)
3280            raise exception.UnableToFailOver(reason=msg)
3281
3282        if storwize_const.FAILBACK_VALUE == secondary_id:
3283            # In this case the administrator would like to fail back.
3284            secondary_id, volumes_update, groups_update = self._host_failback(
3285                context, volumes, groups)
3286        elif (secondary_id == self._replica_target['backend_id']
3287                or secondary_id is None):
3288            # In this case the administrator would like to fail over.
3289            secondary_id, volumes_update, groups_update = self._host_failover(
3290                context, volumes, groups)
3291        else:
3292            msg = (_("Invalid secondary id %s.") % secondary_id)
3293            LOG.error(msg)
3294            raise exception.InvalidReplicationTarget(reason=msg)
3295
3296        return secondary_id, volumes_update, groups_update
3297
3298    def _host_failback(self, ctxt, volumes, groups):
3299        """Fail back all the volume on the secondary backend."""
3300        volumes_update = []
3301        groups_update = []
3302        if not self._active_backend_id:
3303            LOG.info("Host has been failed back. doesn't need "
3304                     "to fail back again")
3305            return None, volumes_update, groups_update
3306
3307        try:
3308            self._master_backend_helpers.get_system_info()
3309        except Exception:
3310            msg = (_("Unable to failback due to primary is not reachable."))
3311            LOG.error(msg)
3312            raise exception.UnableToFailOver(reason=msg)
3313
3314        bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
3315
3316        # start synchronize from aux volume to master volume
3317        self._sync_with_aux(ctxt, rep_volumes)
3318        self._sync_replica_groups(ctxt, groups)
3319        self._wait_replica_ready(ctxt, rep_volumes)
3320        self._wait_replica_groups_ready(ctxt, groups)
3321
3322        rep_volumes_update = self._failback_replica_volumes(ctxt,
3323                                                            rep_volumes)
3324        volumes_update.extend(rep_volumes_update)
3325
3326        rep_vols_in_grp_update, groups_update = self._failback_replica_groups(
3327            ctxt, groups)
3328        volumes_update.extend(rep_vols_in_grp_update)
3329
3330        bypass_volumes_update = self._bypass_volume_process(bypass_volumes)
3331        volumes_update.extend(bypass_volumes_update)
3332
3333        self._helpers = self._master_backend_helpers
3334        self._active_backend_id = None
3335        self._state = self._master_state
3336
3337        self._update_volume_stats()
3338        return storwize_const.FAILBACK_VALUE, volumes_update, groups_update
3339
3340    def _failback_replica_volumes(self, ctxt, rep_volumes):
3341        LOG.debug('enter: _failback_replica_volumes')
3342        volumes_update = []
3343
3344        for volume in rep_volumes:
3345            rep_type = self._get_volume_replicated_type(ctxt, volume)
3346            replica_obj = self._get_replica_obj(rep_type)
3347            tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
3348            rep_info = self._helpers.get_relationship_info(tgt_volume)
3349            if not rep_info:
3350                volumes_update.append(
3351                    {'volume_id': volume['id'],
3352                     'updates':
3353                         {'replication_status':
3354                          fields.ReplicationStatus.ERROR,
3355                          'status': 'error'}})
3356                LOG.error('_failback_replica_volumes:no rc-releationship '
3357                          'is established between master: %(master)s and '
3358                          'aux %(aux)s. Please re-establish the '
3359                          'relationship and synchronize the volumes on '
3360                          'backend storage.',
3361                          {'master': volume['name'], 'aux': tgt_volume})
3362                continue
3363            LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
3364                      '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
3365                      'primary=%(primary)s',
3366                      {'vol': volume['name'],
3367                       'master_vol': rep_info['master_vdisk_name'],
3368                       'aux_vol': rep_info['aux_vdisk_name'],
3369                       'state': rep_info['state'],
3370                       'primary': rep_info['primary']})
3371            if volume.status == 'in-use':
3372                LOG.warning('_failback_replica_volumes: failback in-use '
3373                            'volume: %(volume)s is not recommended.',
3374                            {'volume': volume.name})
3375            try:
3376                replica_obj.replication_failback(volume)
3377                model_updates = {
3378                    'replication_status': fields.ReplicationStatus.ENABLED}
3379                volumes_update.append(
3380                    {'volume_id': volume['id'],
3381                     'updates': model_updates})
3382            except exception.VolumeDriverException:
3383                LOG.error('Unable to fail back volume %(volume_id)s',
3384                          {'volume_id': volume.id})
3385                volumes_update.append(
3386                    {'volume_id': volume['id'],
3387                     'updates': {'replication_status':
3388                                 fields.ReplicationStatus.ERROR,
3389                                 'status': 'error'}})
3390        LOG.debug('leave: _failback_replica_volumes '
3391                  'volumes_update=%(volumes_update)s',
3392                  {'volumes_update': volumes_update})
3393        return volumes_update
3394
3395    def _bypass_volume_process(self, bypass_vols):
3396        volumes_update = []
3397        for vol in bypass_vols:
3398            if vol.replication_driver_data:
3399                rep_data = json.loads(vol.replication_driver_data)
3400                update_status = rep_data['previous_status']
3401                rep_data = ''
3402            else:
3403                update_status = 'error'
3404                rep_data = json.dumps({'previous_status': vol.status})
3405
3406            volumes_update.append(
3407                {'volume_id': vol.id,
3408                 'updates': {'status': update_status,
3409                             'replication_driver_data': rep_data}})
3410
3411        return volumes_update
3412
3413    def _failback_replica_groups(self, ctxt, groups):
3414        volumes_update = []
3415        groups_update = []
3416        for grp in groups:
3417            try:
3418                grp_rep_status = self._rep_grp_failback(
3419                    ctxt, grp, sync_grp=False)['replication_status']
3420            except Exception as ex:
3421                LOG.error('Fail to failback group %(grp)s during host '
3422                          'failback due to error: %(error)s',
3423                          {'grp': grp.id, 'error': ex})
3424                grp_rep_status = fields.ReplicationStatus.ERROR
3425
3426            # Update all the volumes' status in that group
3427            for vol in grp.volumes:
3428                vol_update = {'volume_id': vol.id,
3429                              'updates':
3430                                  {'replication_status': grp_rep_status,
3431                                   'status': (
3432                                       vol.status if grp_rep_status ==
3433                                       fields.ReplicationStatus.ENABLED
3434                                       else 'error')}}
3435                volumes_update.append(vol_update)
3436            grp_status = (fields.GroupStatus.AVAILABLE
3437                          if grp_rep_status ==
3438                          fields.ReplicationStatus.ENABLED
3439                          else fields.GroupStatus.ERROR)
3440            grp_update = {'group_id': grp.id,
3441                          'updates': {'replication_status': grp_rep_status,
3442                                      'status': grp_status}}
3443            groups_update.append(grp_update)
3444        return volumes_update, groups_update
3445
3446    def _sync_with_aux(self, ctxt, volumes):
3447        LOG.debug('enter: _sync_with_aux ')
3448        try:
3449            rep_mgr = self._get_replica_mgr()
3450            rep_mgr.establish_target_partnership()
3451        except Exception as ex:
3452            LOG.warning('Fail to establish partnership in backend. '
3453                        'error=%(ex)s', {'error': ex})
3454        for volume in volumes:
3455            tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
3456            rep_info = self._helpers.get_relationship_info(tgt_volume)
3457            if not rep_info:
3458                LOG.error('_sync_with_aux: no rc-releationship is '
3459                          'established between master: %(master)s and aux '
3460                          '%(aux)s. Please re-establish the relationship '
3461                          'and synchronize the volumes on backend '
3462                          'storage.', {'master': volume['name'],
3463                                       'aux': tgt_volume})
3464                continue
3465            LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol='
3466                      '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
3467                      'primary=%(primary)s',
3468                      {'volume': volume['name'],
3469                       'master_vol': rep_info['master_vdisk_name'],
3470                       'aux_vol': rep_info['aux_vdisk_name'],
3471                       'state': rep_info['state'],
3472                       'primary': rep_info['primary']})
3473            try:
3474                if (rep_info['state'] not in
3475                        [storwize_const.REP_CONSIS_SYNC,
3476                         storwize_const.REP_CONSIS_COPYING]):
3477                    if rep_info['primary'] == 'master':
3478                        self._helpers.start_relationship(tgt_volume)
3479                    else:
3480                        self._helpers.start_relationship(tgt_volume,
3481                                                         primary='aux')
3482            except Exception as ex:
3483                LOG.warning('Fail to copy data from aux to master. master:'
3484                            ' %(master)s and aux %(aux)s. Please '
3485                            're-establish the relationship and synchronize'
3486                            ' the volumes on backend storage. error='
3487                            '%(ex)s', {'master': volume['name'],
3488                                       'aux': tgt_volume,
3489                                       'error': ex})
3490        LOG.debug('leave: _sync_with_aux.')
3491
3492    def _wait_replica_ready(self, ctxt, volumes):
3493        for volume in volumes:
3494            tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
3495            try:
3496                self._wait_replica_vol_ready(ctxt, tgt_volume)
3497            except Exception as ex:
3498                LOG.error('_wait_replica_ready: wait for volume:%(volume)s'
3499                          ' remote copy synchronization failed due to '
3500                          'error:%(err)s.', {'volume': tgt_volume,
3501                                             'err': ex})
3502
3503    def _wait_replica_vol_ready(self, ctxt, volume):
3504        LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s',
3505                  {'volume': volume})
3506
3507        def _replica_vol_ready():
3508            rep_info = self._helpers.get_relationship_info(volume)
3509            if not rep_info:
3510                msg = (_('_wait_replica_vol_ready: no rc-releationship'
3511                         'is established for volume:%(volume)s. Please '
3512                         're-establish the rc-relationship and '
3513                         'synchronize the volumes on backend storage.'),
3514                       {'volume': volume})
3515                LOG.error(msg)
3516                raise exception.VolumeBackendAPIException(data=msg)
3517            LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: '
3518                      'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
3519                      'state=%(state)s, primary=%(primary)s',
3520                      {'volume': volume,
3521                       'master_vol': rep_info['master_vdisk_name'],
3522                       'aux_vol': rep_info['aux_vdisk_name'],
3523                       'state': rep_info['state'],
3524                       'primary': rep_info['primary']})
3525            if (rep_info['state'] in
3526                    [storwize_const.REP_CONSIS_SYNC,
3527                     storwize_const.REP_CONSIS_COPYING]):
3528                return True
3529            elif rep_info['state'] == storwize_const.REP_IDL_DISC:
3530                msg = (_('Wait synchronize failed. volume: %(volume)s'),
3531                       {'volume': volume})
3532                LOG.error(msg)
3533                raise exception.VolumeBackendAPIException(data=msg)
3534            return False
3535
3536        self._helpers._wait_for_a_condition(
3537            _replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT,
3538            interval=storwize_const.DEFAULT_RC_INTERVAL,
3539            raise_exception=True)
3540        LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s',
3541                  {'volume': volume})
3542
3543    def _sync_replica_groups(self, ctxt, groups):
3544        for grp in groups:
3545            rccg_name = self._get_rccg_name(grp)
3546            self._sync_with_aux_grp(ctxt, rccg_name)
3547
3548    def _wait_replica_groups_ready(self, ctxt, groups):
3549        for grp in groups:
3550            rccg_name = self._get_rccg_name(grp)
3551            self._wait_replica_grp_ready(ctxt, rccg_name)
3552
3553    def _host_failover(self, ctxt, volumes, groups):
3554        volumes_update = []
3555        groups_update = []
3556        if self._active_backend_id:
3557            LOG.info("Host has been failed over to %s",
3558                     self._active_backend_id)
3559            return self._active_backend_id, volumes_update, groups_update
3560
3561        try:
3562            self._aux_backend_helpers.get_system_info()
3563        except Exception as ex:
3564            msg = (_("Unable to failover due to replication target is not "
3565                     "reachable. error=%(ex)s"), {'error': ex})
3566            LOG.error(msg)
3567            raise exception.UnableToFailOver(reason=msg)
3568
3569        bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
3570
3571        rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes)
3572        volumes_update.extend(rep_volumes_update)
3573
3574        rep_vols_in_grp_update, groups_update = self._failover_replica_groups(
3575            ctxt, groups)
3576        volumes_update.extend(rep_vols_in_grp_update)
3577
3578        bypass_volumes_update = self._bypass_volume_process(bypass_volumes)
3579        volumes_update.extend(bypass_volumes_update)
3580
3581        self._helpers = self._aux_backend_helpers
3582        self._active_backend_id = self._replica_target['backend_id']
3583        self._secondary_pools = [self._replica_target['pool_name']]
3584        self._state = self._aux_state
3585
3586        self._update_volume_stats()
3587        return self._active_backend_id, volumes_update, groups_update
3588
3589    def _failover_replica_volumes(self, ctxt, rep_volumes):
3590        LOG.debug('enter: _failover_replica_volumes')
3591        volumes_update = []
3592
3593        for volume in rep_volumes:
3594            rep_type = self._get_volume_replicated_type(ctxt, volume)
3595            replica_obj = self._get_replica_obj(rep_type)
3596            # Try do the fail-over.
3597            try:
3598                rep_info = self._aux_backend_helpers.get_relationship_info(
3599                    storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'])
3600                if not rep_info:
3601                    volumes_update.append(
3602                        {'volume_id': volume['id'],
3603                         'updates':
3604                             {'replication_status':
3605                              fields.ReplicationStatus.FAILOVER_ERROR,
3606                              'status': 'error'}})
3607                    LOG.error('_failover_replica_volumes: no rc-'
3608                              'releationship is established for volume:'
3609                              '%(volume)s. Please re-establish the rc-'
3610                              'relationship and synchronize the volumes on'
3611                              ' backend storage.',
3612                              {'volume': volume.name})
3613                    continue
3614                LOG.debug('_failover_replica_volumes: vol=%(vol)s, '
3615                          'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
3616                          'state=%(state)s, primary=%(primary)s',
3617                          {'vol': volume['name'],
3618                           'master_vol': rep_info['master_vdisk_name'],
3619                           'aux_vol': rep_info['aux_vdisk_name'],
3620                           'state': rep_info['state'],
3621                           'primary': rep_info['primary']})
3622                if volume.status == 'in-use':
3623                    LOG.warning('_failover_replica_volumes: failover in-use '
3624                                'volume: %(volume)s is not recommended.',
3625                                {'volume': volume.name})
3626                replica_obj.failover_volume_host(ctxt, volume)
3627                model_updates = {
3628                    'replication_status': fields.ReplicationStatus.FAILED_OVER}
3629                volumes_update.append(
3630                    {'volume_id': volume['id'],
3631                     'updates': model_updates})
3632            except exception.VolumeDriverException:
3633                LOG.error('Unable to failover to aux volume. Please make '
3634                          'sure that the aux volume is ready.')
3635                volumes_update.append(
3636                    {'volume_id': volume['id'],
3637                     'updates': {'status': 'error',
3638                                 'replication_status':
3639                                 fields.ReplicationStatus.FAILOVER_ERROR}})
3640        LOG.debug('leave: _failover_replica_volumes '
3641                  'volumes_update=%(volumes_update)s',
3642                  {'volumes_update': volumes_update})
3643        return volumes_update
3644
3645    def _failover_replica_groups(self, ctxt, groups):
3646        volumes_update = []
3647        groups_update = []
3648        for grp in groups:
3649            try:
3650                grp_rep_status = self._rep_grp_failover(
3651                    ctxt, grp)['replication_status']
3652            except Exception as ex:
3653                LOG.error('Fail to failover group %(grp)s during host '
3654                          'failover due to error: %(error)s',
3655                          {'grp': grp.id, 'error': ex})
3656                grp_rep_status = fields.ReplicationStatus.ERROR
3657
3658            # Update all the volumes' status in that group
3659            for vol in grp.volumes:
3660                vol_update = {'volume_id': vol.id,
3661                              'updates':
3662                                  {'replication_status': grp_rep_status,
3663                                   'status': (
3664                                       vol.status if grp_rep_status ==
3665                                       fields.ReplicationStatus.FAILED_OVER
3666                                       else 'error')}}
3667                volumes_update.append(vol_update)
3668            grp_status = (fields.GroupStatus.AVAILABLE
3669                          if grp_rep_status ==
3670                          fields.ReplicationStatus.FAILED_OVER
3671                          else fields.GroupStatus.ERROR)
3672            grp_update = {'group_id': grp.id,
3673                          'updates': {'replication_status': grp_rep_status,
3674                                      'status': grp_status}}
3675            groups_update.append(grp_update)
3676        return volumes_update, groups_update
3677
3678    def _classify_volume(self, ctxt, volumes):
3679        bypass_volumes = []
3680        replica_volumes = []
3681
3682        for v in volumes:
3683            volume_type = self._get_volume_replicated_type(ctxt, v)
3684            grp = v.group
3685            if grp and utils.is_group_a_type(
3686                    grp, "consistent_group_replication_enabled"):
3687                continue
3688            elif volume_type and v.status in ['available', 'in-use']:
3689                replica_volumes.append(v)
3690            else:
3691                bypass_volumes.append(v)
3692        return bypass_volumes, replica_volumes
3693
3694    def _get_replica_obj(self, rep_type):
3695        replica_manager = self.replica_manager[
3696            self._replica_target['backend_id']]
3697        return replica_manager.get_replica_obj(rep_type)
3698
3699    def _get_replica_mgr(self):
3700        replica_manager = self.replica_manager[
3701            self._replica_target['backend_id']]
3702        return replica_manager
3703
3704    def _get_target_vol(self, volume):
3705        tgt_vol = volume['name']
3706        if self._active_backend_id:
3707            ctxt = context.get_admin_context()
3708            rep_type = self._get_volume_replicated_type(ctxt, volume)
3709            if rep_type:
3710                tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
3711                           volume['name'])
3712        return tgt_vol
3713
3714    def _validate_replication_enabled(self):
3715        if not self._replica_enabled:
3716            msg = _("Replication is not properly configured on backend.")
3717            LOG.error(msg)
3718            raise exception.VolumeBackendAPIException(data=msg)
3719
3720    def _get_specs_replicated_type(self, volume_type):
3721        replication_type = None
3722        extra_specs = volume_type.get("extra_specs", {})
3723        rep_val = extra_specs.get('replication_enabled')
3724        if rep_val == "<is> True":
3725            replication_type = extra_specs.get('replication_type',
3726                                               storwize_const.GLOBAL)
3727            # The format for replication_type in extra spec is in
3728            # "<in> global". Otherwise, the code will
3729            # not reach here.
3730            if replication_type != storwize_const.GLOBAL:
3731                # Pick up the replication type specified in the
3732                # extra spec from the format like "<in> global".
3733                replication_type = replication_type.split()[1]
3734            if replication_type not in storwize_const.VALID_REP_TYPES:
3735                msg = (_("Invalid replication type %s.") % replication_type)
3736                LOG.error(msg)
3737                raise exception.InvalidInput(reason=msg)
3738        return replication_type
3739
3740    def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None):
3741        replication_type = None
3742        volume_type = None
3743        volume_type_id = volume.volume_type_id if volume else vol_type_id
3744        if volume_type_id:
3745            volume_type = objects.VolumeType.get_by_name_or_id(
3746                ctxt, volume_type_id)
3747        if volume_type:
3748            replication_type = self._get_specs_replicated_type(volume_type)
3749        return replication_type
3750
3751    def _get_storwize_config(self):
3752        # Update the storwize state
3753        try:
3754            self._update_storwize_state(self._master_state, self._helpers)
3755        except Exception as err:
3756            LOG.warning('Fail to get system %(san_ip)s info. error=%(error)s',
3757                        {'san_ip': self.active_ip, 'error': err})
3758            if not self._active_backend_id:
3759                with excutils.save_and_reraise_exception():
3760                    pass
3761        self._do_replication_setup()
3762
3763        if self._active_backend_id and self._replica_target:
3764            self._helpers = self._aux_backend_helpers
3765            self._state = self._aux_state
3766
3767        self._replica_enabled = (True if (self._helpers.replication_licensed()
3768                                          and self._replica_target) else False)
3769        if self._replica_enabled:
3770            self._supported_replica_types = storwize_const.VALID_REP_TYPES
3771
3772    def _do_replication_setup(self):
3773        rep_devs = self.configuration.safe_get('replication_device')
3774        if not rep_devs:
3775            return
3776
3777        if len(rep_devs) > 1:
3778            raise exception.InvalidInput(
3779                reason='Multiple replication devices are configured. '
3780                       'Now only one replication_device is supported.')
3781
3782        required_flags = ['san_ip', 'backend_id', 'san_login',
3783                          'san_password', 'pool_name']
3784        for flag in required_flags:
3785            if flag not in rep_devs[0]:
3786                raise exception.InvalidInput(
3787                    reason=_('%s is not set.') % flag)
3788
3789        rep_target = {}
3790        rep_target['san_ip'] = rep_devs[0].get('san_ip')
3791        rep_target['backend_id'] = rep_devs[0].get('backend_id')
3792        rep_target['san_login'] = rep_devs[0].get('san_login')
3793        rep_target['san_password'] = rep_devs[0].get('san_password')
3794        rep_target['pool_name'] = rep_devs[0].get('pool_name')
3795
3796        # Each replication target will have a corresponding replication.
3797        self._replication_initialize(rep_target)
3798
3799    def _replication_initialize(self, target):
3800        rep_manager = storwize_rep.StorwizeSVCReplicationManager(
3801            self, target, StorwizeHelpers)
3802
3803        if self._active_backend_id:
3804            if self._active_backend_id != target['backend_id']:
3805                msg = (_("Invalid secondary id %s.") % self._active_backend_id)
3806                LOG.error(msg)
3807                raise exception.InvalidInput(reason=msg)
3808        # Setup partnership only in non-failover state
3809        else:
3810            try:
3811                rep_manager.establish_target_partnership()
3812            except exception.VolumeDriverException:
3813                LOG.error('The replication src %(src)s has not '
3814                          'successfully established partnership with the '
3815                          'replica target %(tgt)s.',
3816                          {'src': self.configuration.san_ip,
3817                           'tgt': target['backend_id']})
3818
3819        self._aux_backend_helpers = rep_manager.get_target_helpers()
3820        self.replica_manager[target['backend_id']] = rep_manager
3821        self._replica_target = target
3822        self._update_storwize_state(self._aux_state, self._aux_backend_helpers)
3823
3824    # Replication Group (Tiramisu)
3825    @cinder_utils.trace
3826    def enable_replication(self, context, group, volumes):
3827        """Enables replication for a group and volumes in the group."""
3828        model_update = {'replication_status': fields.ReplicationStatus.ENABLED}
3829        volumes_update = []
3830        rccg_name = self._get_rccg_name(group)
3831        rccg = self._helpers.get_rccg(rccg_name)
3832        if rccg and rccg['relationship_count'] != '0':
3833            try:
3834                if rccg['primary'] == 'aux':
3835                    self._helpers.start_rccg(rccg_name, primary='aux')
3836                else:
3837                    self._helpers.start_rccg(rccg_name, primary='master')
3838            except exception.VolumeBackendAPIException as err:
3839                LOG.error("Failed to enable group replication on %(rccg)s. "
3840                          "Exception: %(exception)s.",
3841                          {'rccg': rccg_name, 'exception': err})
3842                model_update[
3843                    'replication_status'] = fields.ReplicationStatus.ERROR
3844        else:
3845            if rccg:
3846                LOG.error("Enable replication on empty group %(rccg)s is "
3847                          "forbidden.", {'rccg': rccg['name']})
3848            else:
3849                LOG.error("Failed to enable group replication: %(grp)s does "
3850                          "not exist in backend.", {'grp': group.id})
3851            model_update['replication_status'] = fields.ReplicationStatus.ERROR
3852
3853        for vol in volumes:
3854            volumes_update.append(
3855                {'id': vol.id,
3856                 'replication_status': model_update['replication_status']})
3857        return model_update, volumes_update
3858
3859    @cinder_utils.trace
3860    def disable_replication(self, context, group, volumes):
3861        """Disables replication for a group and volumes in the group."""
3862        model_update = {
3863            'replication_status': fields.ReplicationStatus.DISABLED}
3864        volumes_update = []
3865        rccg_name = self._get_rccg_name(group)
3866        rccg = self._helpers.get_rccg(rccg_name)
3867        if rccg and rccg['relationship_count'] != '0':
3868            try:
3869                self._helpers.stop_rccg(rccg_name)
3870            except exception.VolumeBackendAPIException as err:
3871                LOG.error("Failed to disable group replication on %(rccg)s. "
3872                          "Exception: %(exception)s.",
3873                          {'rccg': rccg_name, 'exception': err})
3874                model_update[
3875                    'replication_status'] = fields.ReplicationStatus.ERROR
3876        else:
3877            if rccg:
3878                LOG.error("Disable replication on empty group %(rccg)s is "
3879                          "forbidden.", {'rccg': rccg['name']})
3880            else:
3881                LOG.error("Failed to disable group replication: %(grp)s does "
3882                          "not exist in backend.", {'grp': group.id})
3883            model_update['replication_status'] = fields.ReplicationStatus.ERROR
3884
3885        for vol in volumes:
3886            volumes_update.append(
3887                {'id': vol.id,
3888                 'replication_status': model_update['replication_status']})
3889        return model_update, volumes_update
3890
3891    @cinder_utils.trace
3892    def failover_replication(self, context, group, volumes,
3893                             secondary_backend_id=None):
3894        """Fails over replication for a group and volumes in the group."""
3895        volumes_model_update = []
3896        model_update = {}
3897        if not self._replica_enabled:
3898            msg = _("Replication is not properly enabled on backend.")
3899            LOG.error(msg)
3900            raise exception.UnableToFailOver(reason=msg)
3901
3902        if storwize_const.FAILBACK_VALUE == secondary_backend_id:
3903            # In this case the administrator would like to group fail back.
3904            model_update = self._rep_grp_failback(context, group)
3905        elif (secondary_backend_id == self._replica_target['backend_id']
3906                or secondary_backend_id is None):
3907            # In this case the administrator would like to group fail over.
3908            model_update = self._rep_grp_failover(context, group)
3909        else:
3910            msg = (_("Invalid secondary id %s.") % secondary_backend_id)
3911            LOG.error(msg)
3912            raise exception.InvalidReplicationTarget(reason=msg)
3913
3914        for vol in volumes:
3915            volume_model_update = {'id': vol.id,
3916                                   'replication_status':
3917                                       model_update['replication_status']}
3918            volumes_model_update.append(volume_model_update)
3919        return model_update, volumes_model_update
3920
3921    def _rep_grp_failback(self, ctxt, group, sync_grp=True):
3922        """Fail back all the volume in the replication group."""
3923        model_update = {
3924            'replication_status': fields.ReplicationStatus.ENABLED}
3925        rccg_name = self._get_rccg_name(group)
3926
3927        try:
3928            self._master_backend_helpers.get_system_info()
3929        except Exception as ex:
3930            msg = (_("Unable to failback group %(rccg)s due to primary is not "
3931                     "reachable. error=%(error)s"),
3932                   {'rccg': rccg_name, 'error': ex})
3933            LOG.error(msg)
3934            raise exception.UnableToFailOver(reason=msg)
3935
3936        rccg = self._helpers.get_rccg(rccg_name)
3937        if not rccg:
3938            msg = (_("Unable to failback group %(rccg)s due to replication "
3939                     "group does not exist on backend."),
3940                   {'rccg': rccg_name})
3941            LOG.error(msg)
3942            raise exception.UnableToFailOver(reason=msg)
3943
3944        if rccg['relationship_count'] == '0':
3945            msg = (_("Unable to failback empty group %(rccg)s"),
3946                   {'rccg': rccg['name']})
3947            LOG.error(msg)
3948            raise exception.UnableToFailOver(reason=msg)
3949
3950        if rccg['primary'] == 'master':
3951            LOG.info("Do not need to fail back group %(rccg)s again due to "
3952                     "primary is already master.", {'rccg': rccg['name']})
3953            return model_update
3954
3955        if sync_grp:
3956            self._sync_with_aux_grp(ctxt, rccg['name'])
3957            self._wait_replica_grp_ready(ctxt, rccg['name'])
3958
3959        if rccg['cycling_mode'] == 'multi':
3960            # This is a gmcv replication group
3961            try:
3962                self._aux_backend_helpers.stop_rccg(rccg['name'], access=True)
3963                self._aux_backend_helpers.start_rccg(rccg['name'],
3964                                                     primary='master')
3965                return model_update
3966            except exception.VolumeBackendAPIException as e:
3967                msg = (_('Unable to fail over the group %(rccg)s to the aux '
3968                         'back-end, error: %(error)s') %
3969                       {"rccg": rccg['name'], "error": e})
3970                LOG.exception(msg)
3971                raise exception.UnableToFailOver(reason=msg)
3972        else:
3973            try:
3974                self._helpers.switch_rccg(rccg['name'], aux=False)
3975            except exception.VolumeBackendAPIException as e:
3976                msg = (_('Unable to fail back the group %(rccg)s, error: '
3977                         '%(error)s') % {"rccg": rccg['name'], "error": e})
3978                LOG.exception(msg)
3979                raise exception.UnableToFailOver(reason=msg)
3980        return model_update
3981
3982    def _rep_grp_failover(self, ctxt, group):
3983        """Fail over all the volume in the replication group."""
3984        model_update = {
3985            'replication_status': fields.ReplicationStatus.FAILED_OVER}
3986        rccg_name = self._get_rccg_name(group)
3987        try:
3988            self._aux_backend_helpers.get_system_info()
3989        except Exception as ex:
3990            msg = (_("Unable to failover group %(rccg)s due to replication "
3991                     "target is not reachable. error=%(error)s"),
3992                   {'rccg': rccg_name, 'error': ex})
3993            LOG.error(msg)
3994            raise exception.UnableToFailOver(reason=msg)
3995
3996        rccg = self._aux_backend_helpers.get_rccg(rccg_name)
3997        if not rccg:
3998            msg = (_("Unable to failover group %(rccg)s due to replication "
3999                     "group does not exist on backend."),
4000                   {'rccg': rccg_name})
4001            LOG.error(msg)
4002            raise exception.UnableToFailOver(reason=msg)
4003
4004        if rccg['relationship_count'] == '0':
4005            msg = (_("Unable to failover group %(rccg)s due to it is an "
4006                     "empty group."), {'rccg': rccg['name']})
4007            LOG.error(msg)
4008            raise exception.UnableToFailOver(reason=msg)
4009
4010        if rccg['primary'] == 'aux':
4011            LOG.info("Do not need to fail over group %(rccg)s again due to "
4012                     "primary is already aux.", {'rccg': rccg['name']})
4013            return model_update
4014
4015        if rccg['cycling_mode'] == 'multi':
4016            # This is a gmcv replication group
4017            try:
4018                self._aux_backend_helpers.stop_rccg(rccg['name'], access=True)
4019                self._sync_with_aux_grp(ctxt, rccg['name'])
4020                return model_update
4021            except exception.VolumeBackendAPIException as e:
4022                msg = (_('Unable to fail over the group %(rccg)s to the aux '
4023                         'back-end, error: %(error)s') %
4024                       {"rccg": rccg['name'], "error": e})
4025                LOG.exception(msg)
4026                raise exception.UnableToFailOver(reason=msg)
4027        else:
4028            try:
4029                # Reverse the role of the primary and secondary volumes
4030                self._helpers.switch_rccg(rccg['name'], aux=True)
4031                return model_update
4032            except exception.VolumeBackendAPIException as e:
4033                LOG.exception('Unable to fail over the group %(rccg)s to the '
4034                              'aux back-end by switchrcconsistgrp command, '
4035                              'error: %(error)s',
4036                              {"rccg": rccg['name'], "error": e})
4037                # If the switch command fail, try to make the aux group
4038                # writeable again.
4039                try:
4040                    self._aux_backend_helpers.stop_rccg(rccg['name'],
4041                                                        access=True)
4042                    self._sync_with_aux_grp(ctxt, rccg['name'])
4043                    return model_update
4044                except exception.VolumeBackendAPIException as e:
4045                    msg = (_('Unable to fail over the group %(rccg)s to the '
4046                             'aux back-end, error: %(error)s') %
4047                           {"rccg": rccg['name'], "error": e})
4048                    LOG.exception(msg)
4049                    raise exception.UnableToFailOver(reason=msg)
4050
4051    @cinder_utils.trace
4052    def _sync_with_aux_grp(self, ctxt, rccg_name):
4053        try:
4054            rccg = self._helpers.get_rccg(rccg_name)
4055            if rccg and rccg['relationship_count'] != '0':
4056                if (rccg['state'] not in
4057                        [storwize_const.REP_CONSIS_SYNC,
4058                         storwize_const.REP_CONSIS_COPYING]):
4059                    if rccg['primary'] == 'master':
4060                        self._helpers.start_rccg(rccg_name, primary='master')
4061                    else:
4062                        self._helpers.start_rccg(rccg_name, primary='aux')
4063            else:
4064                LOG.warning('group %(grp)s is not in sync.')
4065        except exception.VolumeBackendAPIException as ex:
4066            LOG.warning('Fail to copy data from aux group %(rccg)s to master '
4067                        'group. Please recheck the relationship and '
4068                        'synchronize the group on backend storage. error='
4069                        '%(error)s', {'rccg': rccg['name'], 'error': ex})
4070
4071    def _wait_replica_grp_ready(self, ctxt, rccg_name):
4072        LOG.debug('_wait_replica_grp_ready: group=%(rccg)s',
4073                  {'rccg': rccg_name})
4074
4075        def _replica_grp_ready():
4076            rccg = self._helpers.get_rccg(rccg_name)
4077            if not rccg:
4078                msg = (_('_replica_grp_ready: no group %(rccg)s exists on the '
4079                         'backend. Please re-create the rccg and synchronize'
4080                         'the volumes on backend storage.'),
4081                       {'rccg': rccg_name})
4082                LOG.error(msg)
4083                raise exception.VolumeBackendAPIException(data=msg)
4084            if rccg['relationship_count'] == '0':
4085                return True
4086            LOG.debug('_replica_grp_ready: group: %(rccg)s: state=%(state)s, '
4087                      'primary=%(primary)s',
4088                      {'rccg': rccg['name'], 'state': rccg['state'],
4089                       'primary': rccg['primary']})
4090            if rccg['state'] in [storwize_const.REP_CONSIS_SYNC,
4091                                 storwize_const.REP_CONSIS_COPYING]:
4092                return True
4093            if rccg['state'] == storwize_const.REP_IDL_DISC:
4094                msg = (_('Wait synchronize failed. group: %(rccg)s') %
4095                       {'rccg': rccg_name})
4096                LOG.error(msg)
4097                raise exception.VolumeBackendAPIException(data=msg)
4098            return False
4099        try:
4100            self._helpers._wait_for_a_condition(
4101                _replica_grp_ready,
4102                timeout=storwize_const.DEFAULT_RCCG_TIMEOUT,
4103                interval=storwize_const.DEFAULT_RCCG_INTERVAL,
4104                raise_exception=True)
4105        except Exception as ex:
4106            LOG.error('_wait_replica_grp_ready: wait for group %(rccg)s '
4107                      'synchronization failed due to '
4108                      'error: %(err)s.', {'rccg': rccg_name,
4109                                          'err': ex})
4110
4111    def get_replication_error_status(self, context, groups):
4112        """Returns error info for replicated groups and its volumes.
4113
4114        The failover/failback only happens manually, no need to update the
4115        status.
4116        """
4117        return [], []
4118
4119    def _get_vol_sys_info(self, volume):
4120        tgt_vol = volume.name
4121        backend_helper = self._helpers
4122        node_state = self._state
4123        grp = volume.group
4124        if grp and utils.is_group_a_type(
4125                grp, "consistent_group_replication_enabled"):
4126            if (grp.replication_status ==
4127                    fields.ReplicationStatus.FAILED_OVER):
4128                tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
4129                           volume.name)
4130                backend_helper = self._aux_backend_helpers
4131                node_state = self._aux_state
4132            else:
4133                backend_helper = self._master_backend_helpers
4134                node_state = self._master_state
4135        elif self._active_backend_id:
4136            ctxt = context.get_admin_context()
4137            rep_type = self._get_volume_replicated_type(ctxt, volume)
4138            if rep_type:
4139                tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
4140                           volume.name)
4141
4142        return tgt_vol, backend_helper, node_state
4143
4144    def _toggle_rep_vol_info(self, volume, helper):
4145        if helper == self._master_backend_helpers:
4146            vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name
4147            backend_helper = self._aux_backend_helpers
4148            node_state = self._aux_state
4149        else:
4150            vol_name = volume.name
4151            backend_helper = self._master_backend_helpers
4152            node_state = self._master_state
4153        return vol_name, backend_helper, node_state
4154
4155    def _get_map_info_from_connector(self, volume, connector, iscsi=False):
4156        if volume.display_name == 'backup-snapshot':
4157            LOG.debug('It is a virtual volume %(vol)s for detach snapshot.',
4158                      {'vol': volume.id})
4159            vol_name = volume.name
4160            backend_helper = self._helpers
4161            node_state = self._state
4162        else:
4163            vol_name, backend_helper, node_state = self._get_vol_sys_info(
4164                volume)
4165
4166        info = {}
4167        if 'host' in connector:
4168            # get host according to FC protocol
4169            connector = connector.copy()
4170            if not iscsi:
4171                connector.pop('initiator', None)
4172                info = {'driver_volume_type': 'fibre_channel',
4173                        'data': {}}
4174            else:
4175                info = {'driver_volume_type': 'iscsi',
4176                        'data': {}}
4177            host_name = backend_helper.get_host_from_connector(
4178                connector, volume_name=vol_name, iscsi=iscsi)
4179            vol_mapped = backend_helper.check_vol_mapped_to_host(vol_name,
4180                                                                 host_name)
4181            if host_name is None or not vol_mapped:
4182                ctxt = context.get_admin_context()
4183                rep_type = self._get_volume_replicated_type(ctxt, volume)
4184                if host_name is None and not rep_type:
4185                    msg = (_('_get_map_info_from_connector: Failed to get '
4186                             'host name from connector.'))
4187                    LOG.error(msg)
4188                    raise exception.VolumeDriverException(message=msg)
4189                if rep_type:
4190                    # Try to unmap the volume in the secondary side if it is a
4191                    # replication volume.
4192                    (vol_name, backend_helper,
4193                     node_state) = self._toggle_rep_vol_info(volume,
4194                                                             backend_helper)
4195                    try:
4196                        host_name = backend_helper.get_host_from_connector(
4197                            connector, volume_name=vol_name, iscsi=iscsi)
4198                    except Exception as ex:
4199                        LOG.warning('Failed to get host mapping for volume '
4200                                    '%(volume)s in the secondary side. '
4201                                    'Exception: %(err)s.',
4202                                    {'volume': vol_name, 'err': ex})
4203                        return info, None, None, None, None
4204                    if host_name is None:
4205                        msg = (_('_get_map_info_from_connector: Failed to get '
4206                                 'host name from connector.'))
4207                        LOG.error(msg)
4208                        raise exception.VolumeDriverException(message=msg)
4209        else:
4210            host_name = None
4211
4212        return info, host_name, vol_name, backend_helper, node_state
4213
4214    def _check_snapshot_replica_volume_status(self, snapshot):
4215        ctxt = context.get_admin_context()
4216        if self._get_volume_replicated_type(ctxt, None,
4217                                            snapshot.volume_type_id):
4218            LOG.debug('It is a replication volume snapshot for backup.')
4219            rep_volume = objects.Volume.get_by_id(ctxt, snapshot.volume_id)
4220            volume_name, backend_helper, node_state = self._get_vol_sys_info(
4221                rep_volume)
4222            if backend_helper != self._helpers or self._active_backend_id:
4223                msg = (_('The snapshot of the replication volume %s has '
4224                         'failed over to the aux backend. It can not attach'
4225                         ' to the aux backend.') % volume_name)
4226                LOG.error(msg)
4227                raise exception.VolumeDriverException(message=msg)
4228
4229    def migrate_volume(self, ctxt, volume, host):
4230        """Migrate directly if source and dest are managed by same storage.
4231
4232        We create a new vdisk copy in the desired pool, and add the original
4233        vdisk copy to the admin_metadata of the volume to be deleted. The
4234        deletion will occur using a periodic task once the new copy is synced.
4235
4236        :param ctxt: Context
4237        :param volume: A dictionary describing the volume to migrate
4238        :param host: A dictionary describing the host to migrate to, where
4239                     host['host'] is its name, and host['capabilities'] is a
4240                     dictionary of its reported capabilities.
4241        """
4242        LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
4243                  {'id': volume['id'], 'host': host['host']})
4244
4245        # hyperswap volume doesn't support migrate
4246        if self._helpers.is_volume_hyperswap(volume['name']):
4247            msg = _('migrate_volume: Migrating a hyperswap volume is '
4248                    'not supported.')
4249            LOG.error(msg)
4250            raise exception.InvalidInput(message=msg)
4251
4252        false_ret = (False, None)
4253        dest_pool = self._helpers.can_migrate_to_host(host, self._state)
4254        if dest_pool is None:
4255            return false_ret
4256
4257        ctxt = context.get_admin_context()
4258        volume_type_id = volume['volume_type_id']
4259        if volume_type_id is not None:
4260            vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
4261        else:
4262            vol_type = None
4263
4264        resp = self._helpers.lsvdiskcopy(volume.name)
4265        if len(resp) > 1:
4266            copies = self._helpers.get_vdisk_copies(volume.name)
4267            self._helpers.migratevdisk(volume.name, dest_pool,
4268                                       copies['primary']['copy_id'])
4269        else:
4270            self._check_volume_copy_ops()
4271            if self._state['code_level'] < (7, 6, 0, 0):
4272                new_op = self.add_vdisk_copy(volume.name, dest_pool,
4273                                             vol_type)
4274                self._add_vdisk_copy_op(ctxt, volume, new_op)
4275            else:
4276                self.add_vdisk_copy(volume.name, dest_pool, vol_type,
4277                                    auto_delete=True)
4278
4279        LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
4280                  {'id': volume.id, 'host': host['host']})
4281        return (True, None)
4282
4283    def _verify_retype_params(self, volume, new_opts, old_opts, need_copy,
4284                              change_mirror, new_rep_type, old_rep_type):
4285        # Some volume parameters can not be changed or changed at the same
4286        # time during volume retype operation. This function checks the
4287        # retype parameters.
4288        resp = self._helpers.lsvdiskcopy(volume.name)
4289        if old_opts['mirror_pool'] and len(resp) == 1:
4290            msg = (_('Unable to retype: volume %s is a mirrorred vol. But it '
4291                     'has only one copy in storage.') % volume.name)
4292            raise exception.VolumeDriverException(message=msg)
4293
4294        if need_copy:
4295            # mirror volume can not add volume-copy again.
4296            if len(resp) > 1:
4297                msg = (_('Unable to retype: current action needs volume-copy. '
4298                         'A copy of volume %s exists. Adding another copy '
4299                         'would exceed the limit of 2 copies.') % volume.name)
4300                raise exception.VolumeDriverException(message=msg)
4301            if old_opts['mirror_pool'] or new_opts['mirror_pool']:
4302                msg = (_('Unable to retype: current action needs volume-copy, '
4303                         'it is not allowed for mirror volume '
4304                         '%s.') % volume.name)
4305                raise exception.VolumeDriverException(message=msg)
4306
4307        if change_mirror:
4308            if (new_opts['mirror_pool'] and
4309                    not self._helpers.is_pool_defined(
4310                        new_opts['mirror_pool'])):
4311                msg = (_('Unable to retype: The pool %s in which mirror copy '
4312                         'is stored is not valid') % new_opts['mirror_pool'])
4313                raise exception.VolumeDriverException(message=msg)
4314
4315        # There are four options for rep_type: None, metro, global, gmcv
4316        if new_rep_type or old_rep_type:
4317            # If volume is replicated, can't copy
4318            if need_copy or new_opts['mirror_pool'] or old_opts['mirror_pool']:
4319                msg = (_('Unable to retype: current action needs volume-copy, '
4320                         'it is not allowed for replication type. '
4321                         'Volume = %s') % volume.id)
4322                raise exception.VolumeDriverException(message=msg)
4323
4324        if new_rep_type != old_rep_type:
4325            old_io_grp = self._helpers.get_volume_io_group(volume.name)
4326            if (old_io_grp not in
4327                    StorwizeHelpers._get_valid_requested_io_groups(
4328                        self._state, new_opts)):
4329                msg = (_('Unable to retype: it is not allowed to change '
4330                         'replication type and io group at the same time.'))
4331                LOG.error(msg)
4332                raise exception.VolumeDriverException(message=msg)
4333            if new_rep_type and old_rep_type:
4334                msg = (_('Unable to retype: it is not allowed to change '
4335                         '%(old_rep_type)s volume to %(new_rep_type)s '
4336                         'volume.') %
4337                       {'old_rep_type': old_rep_type,
4338                        'new_rep_type': new_rep_type})
4339                LOG.error(msg)
4340                raise exception.VolumeDriverException(message=msg)
4341        elif storwize_const.GMCV == new_rep_type:
4342            # To gmcv, we may change cycle_period_seconds if needed
4343            previous_cps = old_opts.get('cycle_period_seconds')
4344            new_cps = new_opts.get('cycle_period_seconds')
4345            if previous_cps != new_cps:
4346                self._helpers.change_relationship_cycleperiod(volume.name,
4347                                                              new_cps)
4348
4349    def _check_hyperswap_retype_params(self, volume, new_opts, old_opts,
4350                                       change_mirror, new_rep_type,
4351                                       old_rep_type, old_pool,
4352                                       new_pool, old_io_grp):
4353        if new_opts['mirror_pool'] or old_opts['mirror_pool']:
4354            msg = (_('Unable to retype volume %s: current action needs '
4355                     'volume-copy, it is not allowed for hyperswap '
4356                     'type.') % volume.name)
4357            LOG.error(msg)
4358            raise exception.InvalidInput(message=msg)
4359        if new_rep_type or old_rep_type:
4360            msg = _('Retype between replicated volume and hyperswap volume'
4361                    ' is not allowed.')
4362            LOG.error(msg)
4363            raise exception.InvalidInput(message=msg)
4364        if (old_io_grp not in
4365                StorwizeHelpers._get_valid_requested_io_groups(
4366                    self._state, new_opts)):
4367            msg = _('Unable to retype: it is not allowed to change '
4368                    'hyperswap type and IO group at the same time.')
4369            LOG.error(msg)
4370            raise exception.InvalidInput(message=msg)
4371        if new_opts['volume_topology'] == 'hyperswap':
4372            if old_pool != new_pool:
4373                msg = (_('Unable to retype volume %s: current action needs '
4374                         'volume pool change, hyperswap volume does not '
4375                         'support pool change.') % volume.name)
4376                LOG.error(msg)
4377                raise exception.InvalidInput(message=msg)
4378            if volume.previous_status == 'in-use':
4379                msg = _('Retype an in-use volume to a hyperswap '
4380                        'volume is not allowed.')
4381                LOG.error(msg)
4382                raise exception.InvalidInput(message=msg)
4383            if not new_opts['easytier']:
4384                raise exception.InvalidInput(
4385                    reason=_('The default easytier of hyperswap volume is '
4386                             'on, it does not support easytier off.'))
4387            if (old_opts['volume_topology'] != 'hyperswap' and
4388                    self._helpers._get_vdisk_fc_mappings(volume.name)):
4389                msg = _('Unable to retype: it is not allowed to change a '
4390                        'normal volume with snapshot to a hyperswap '
4391                        'volume.')
4392                LOG.error(msg)
4393                raise exception.InvalidInput(message=msg)
4394            if (old_opts['volume_topology'] == 'hyperswap' and
4395                    old_opts['peer_pool'] != new_opts['peer_pool']):
4396                msg = _('Unable to retype: it is not allowed to change a '
4397                        'hyperswap volume peer_pool.')
4398                LOG.error(msg)
4399                raise exception.InvalidInput(message=msg)
4400
4401    def _retype_hyperswap_volume(self, volume, host, old_opts, new_opts,
4402                                 old_pool, new_pool, vdisk_changes,
4403                                 need_copy, new_type):
4404        if (old_opts['volume_topology'] != 'hyperswap' and
4405                new_opts['volume_topology'] == 'hyperswap'):
4406            LOG.debug('retype: Convert a normal volume %s to hyperswap '
4407                      'volume.', volume.name)
4408            self._helpers.convert_volume_to_hyperswap(volume.name,
4409                                                      new_opts,
4410                                                      self._state)
4411        elif (old_opts['volume_topology'] == 'hyperswap' and
4412                new_opts['volume_topology'] != 'hyperswap'):
4413            LOG.debug('retype: Convert a hyperswap volume %s to normal '
4414                      'volume.', volume.name)
4415            if new_pool == old_pool:
4416                self._helpers.convert_hyperswap_volume_to_normal(
4417                    volume.name,
4418                    old_opts['peer_pool'])
4419            elif new_pool == old_opts['peer_pool']:
4420                self._helpers.convert_hyperswap_volume_to_normal(
4421                    volume.name,
4422                    old_pool)
4423        else:
4424            rel_info = self._helpers.get_relationship_info(volume.name)
4425            aux_vdisk = rel_info['aux_vdisk_name']
4426            if need_copy:
4427                self.add_vdisk_copy(aux_vdisk, old_opts['peer_pool'], new_type,
4428                                    auto_delete=True)
4429            elif vdisk_changes:
4430                self._helpers.change_vdisk_options(aux_vdisk,
4431                                                   vdisk_changes,
4432                                                   new_opts, self._state)
4433        if need_copy:
4434            self.add_vdisk_copy(volume.name, old_pool, new_type,
4435                                auto_delete=True)
4436        elif vdisk_changes:
4437            self._helpers.change_vdisk_options(volume.name,
4438                                               vdisk_changes,
4439                                               new_opts, self._state)
4440
4441    def retype(self, ctxt, volume, new_type, diff, host):
4442        """Convert the volume to be of the new type.
4443
4444        Returns a boolean indicating whether the retype occurred.
4445
4446        :param ctxt: Context
4447        :param volume: A dictionary describing the volume to migrate
4448        :param new_type: A dictionary describing the volume type to convert to
4449        :param diff: A dictionary with the difference between the two types
4450        :param host: A dictionary describing the host to migrate to, where
4451                     host['host'] is its name, and host['capabilities'] is a
4452                     dictionary of its reported capabilities.
4453        """
4454        def retype_iogrp_property(volume, new, old):
4455            if new != old:
4456                self._helpers.change_vdisk_iogrp(volume['name'],
4457                                                 self._state, (new, old))
4458
4459        LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
4460                  'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
4461                                                   'new_type': new_type,
4462                                                   'diff': diff,
4463                                                   'host': host})
4464
4465        no_copy_keys = ['warning', 'autoexpand', 'easytier']
4466        copy_keys = ['rsize', 'grainsize', 'compression']
4467        all_keys = no_copy_keys + copy_keys
4468        old_opts = self._get_vdisk_params(volume['volume_type_id'],
4469                                          volume_metadata=
4470                                          volume.get('volume_matadata'))
4471        new_opts = self._get_vdisk_params(new_type['id'],
4472                                          volume_type=new_type)
4473
4474        vdisk_changes = []
4475        need_copy = False
4476        change_mirror = False
4477
4478        for key in all_keys:
4479            if old_opts[key] != new_opts[key]:
4480                if key in copy_keys:
4481                    need_copy = True
4482                    break
4483                elif key in no_copy_keys:
4484                    vdisk_changes.append(key)
4485
4486        old_pool = utils.extract_host(volume['host'], 'pool')
4487        new_pool = utils.extract_host(host['host'], 'pool')
4488        if old_pool != new_pool:
4489            need_copy = True
4490
4491        if old_opts['mirror_pool'] != new_opts['mirror_pool']:
4492            change_mirror = True
4493
4494        # Check if retype affects volume replication
4495        model_update = None
4496        new_rep_type = self._get_specs_replicated_type(new_type)
4497        old_rep_type = self._get_volume_replicated_type(ctxt, volume)
4498        old_io_grp = self._helpers.get_volume_io_group(volume['name'])
4499        new_io_grp = self._helpers.select_io_group(self._state,
4500                                                   new_opts, new_pool)
4501
4502        self._verify_retype_params(volume, new_opts, old_opts, need_copy,
4503                                   change_mirror, new_rep_type, old_rep_type)
4504
4505        if old_opts['volume_topology'] or new_opts['volume_topology']:
4506            self._check_hyperswap_retype_params(volume, new_opts, old_opts,
4507                                                change_mirror, new_rep_type,
4508                                                old_rep_type, old_pool,
4509                                                new_pool, old_io_grp)
4510            self._retype_hyperswap_volume(volume, host, old_opts, new_opts,
4511                                          old_pool, new_pool, vdisk_changes,
4512                                          need_copy, new_type)
4513        else:
4514            if need_copy:
4515                self._check_volume_copy_ops()
4516                dest_pool = self._helpers.can_migrate_to_host(host,
4517                                                              self._state)
4518                if dest_pool is None:
4519                    return False
4520
4521                retype_iogrp_property(volume,
4522                                      new_io_grp, old_io_grp)
4523                try:
4524                    if self._state['code_level'] < (7, 6, 0, 0):
4525                        new_op = self.add_vdisk_copy(volume.name, dest_pool,
4526                                                     new_type)
4527                        self._add_vdisk_copy_op(ctxt, volume, new_op)
4528                    else:
4529                        self.add_vdisk_copy(volume.name, dest_pool, new_type,
4530                                            auto_delete=True)
4531                except exception.VolumeDriverException:
4532                    # roll back changing iogrp property
4533                    retype_iogrp_property(volume, old_io_grp, new_io_grp)
4534                    msg = (_('Unable to retype: A copy of volume %s exists. '
4535                             'Retyping would exceed the limit of 2 copies.'),
4536                           volume['id'])
4537                    raise exception.VolumeDriverException(message=msg)
4538            else:
4539                retype_iogrp_property(volume, new_io_grp, old_io_grp)
4540
4541                self._helpers.change_vdisk_options(volume['name'],
4542                                                   vdisk_changes,
4543                                                   new_opts, self._state)
4544                if change_mirror:
4545                    copies = self._helpers.get_vdisk_copies(volume.name)
4546                    if not old_opts['mirror_pool'] and new_opts['mirror_pool']:
4547                        # retype from non mirror vol to mirror vol
4548                        self.add_vdisk_copy(volume['name'],
4549                                            new_opts['mirror_pool'], new_type)
4550                    elif (old_opts['mirror_pool'] and
4551                            not new_opts['mirror_pool']):
4552                        # retype from mirror vol to non mirror vol
4553                        secondary = copies['secondary']
4554                        if secondary:
4555                            self._helpers.rm_vdisk_copy(
4556                                volume.name, secondary['copy_id'])
4557                    else:
4558                        # migrate the second copy to another pool.
4559                        self._helpers.migratevdisk(
4560                            volume.name, new_opts['mirror_pool'],
4561                            copies['secondary']['copy_id'])
4562        if new_opts['qos']:
4563            # Add the new QoS setting to the volume. If the volume has an
4564            # old QoS setting, it will be overwritten.
4565            self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
4566        elif old_opts['qos']:
4567            # If the old_opts contain QoS keys, disable them.
4568            self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
4569
4570        # Delete replica if needed
4571        if old_rep_type and not new_rep_type:
4572            self._aux_backend_helpers.delete_rc_volume(volume['name'],
4573                                                       target_vol=True)
4574            if storwize_const.GMCV == old_rep_type:
4575                self._helpers.delete_vdisk(
4576                    storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'],
4577                    False)
4578            model_update = {'replication_status':
4579                            fields.ReplicationStatus.DISABLED,
4580                            'replication_driver_data': None,
4581                            'replication_extended_status': None}
4582        # Add replica if needed
4583        if not old_rep_type and new_rep_type:
4584            replica_obj = self._get_replica_obj(new_rep_type)
4585            replica_obj.volume_replication_setup(ctxt, volume)
4586            if storwize_const.GMCV == new_rep_type:
4587                # Set cycle_period_seconds if needed
4588                self._helpers.change_relationship_cycleperiod(
4589                    volume['name'],
4590                    new_opts.get('cycle_period_seconds'))
4591            model_update = {'replication_status':
4592                            fields.ReplicationStatus.ENABLED}
4593
4594        LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
4595                  'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
4596                                                   'new_type': new_type,
4597                                                   'diff': diff,
4598                                                   'host': host['host']})
4599        return True, model_update
4600
4601    def update_migrated_volume(self, ctxt, volume, new_volume,
4602                               original_volume_status):
4603        """Return model update from Storwize for migrated volume.
4604
4605        This method should rename the back-end volume name(id) on the
4606        destination host back to its original name(id) on the source host.
4607
4608        :param ctxt: The context used to run the method update_migrated_volume
4609        :param volume: The original volume that was migrated to this backend
4610        :param new_volume: The migration volume object that was created on
4611                           this backend as part of the migration process
4612        :param original_volume_status: The status of the original volume
4613        :returns: model_update to update DB with any needed changes
4614        """
4615        current_name = new_volume.name
4616        original_volume_name = volume.name
4617        LOG.debug("Attempt rename of %(cur)s to original name %(orig)s",
4618                  dict(cur=current_name, orig=original_volume_name))
4619        try:
4620            self._helpers.rename_vdisk(current_name, original_volume_name)
4621            rep_type = self._get_volume_replicated_type(ctxt, new_volume)
4622            if rep_type:
4623                rel_info = self._helpers.get_relationship_info(current_name)
4624                aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
4625                           original_volume_name)
4626                self._aux_backend_helpers.rename_vdisk(
4627                    rel_info['aux_vdisk_name'], aux_vol)
4628        except exception.VolumeBackendAPIException:
4629            LOG.error('Unable to rename the logical volume '
4630                      'for volume: %s', volume['id'])
4631            return {'_name_id': new_volume['_name_id'] or new_volume['id']}
4632        # If the back-end name(id) for the volume has been renamed,
4633        # it is OK for the volume to keep the original name(id) and there is
4634        # no need to use the column "_name_id" to establish the mapping
4635        # relationship between the volume id and the back-end volume
4636        # name(id).
4637        # Set the key "_name_id" to None for a successful rename.
4638        model_update = {'_name_id': None}
4639        return model_update
4640
4641    def manage_existing(self, volume, ref):
4642        """Manages an existing vdisk.
4643
4644        Renames the vdisk to match the expected name for the volume.
4645        Error checking done by manage_existing_get_size is not repeated -
4646        if we got here then we have a vdisk that isn't in use (or we don't
4647        care if it is in use.
4648        """
4649        # Check that the reference is valid
4650        vdisk = self._manage_input_check(ref)
4651        vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name'])
4652        if vdisk_io_grp not in self._state['available_iogrps']:
4653            msg = (_("Failed to manage existing volume due to "
4654                     "the volume to be managed is not in a valid "
4655                     "I/O group."))
4656            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4657
4658        # Add replication check
4659        ctxt = context.get_admin_context()
4660        rep_type = self._get_volume_replicated_type(ctxt, volume)
4661        vol_rep_type = None
4662        rel_info = self._helpers.get_relationship_info(vdisk['name'])
4663        copies = self._helpers.get_vdisk_copies(vdisk['name'])
4664        if rel_info and rel_info['copy_type'] != 'activeactive':
4665            vol_rep_type = (
4666                storwize_const.GMCV if
4667                storwize_const.GMCV_MULTI == rel_info['cycling_mode']
4668                else rel_info['copy_type'])
4669
4670            aux_info = self._aux_backend_helpers.get_system_info()
4671            if rel_info['aux_cluster_id'] != aux_info['system_id']:
4672                msg = (_("Failed to manage existing volume due to the aux "
4673                         "cluster for volume %(volume)s is %(aux_id)s. The "
4674                         "configured cluster id is %(cfg_id)s") %
4675                       {'volume': vdisk['name'],
4676                        'aux_id': rel_info['aux_cluster_id'],
4677                        'cfg_id': aux_info['system_id']})
4678                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4679
4680        if vol_rep_type != rep_type:
4681            msg = (_("Failed to manage existing volume due to "
4682                     "the replication type of the volume to be managed is "
4683                     "mismatch with the provided replication type."))
4684            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4685        elif storwize_const.GMCV == rep_type:
4686            if volume['volume_type_id']:
4687                rep_opts = self._get_vdisk_params(
4688                    volume['volume_type_id'],
4689                    volume_metadata=volume.get('volume_metadata'))
4690                # Check cycle_period_seconds
4691                rep_cps = six.text_type(rep_opts.get('cycle_period_seconds'))
4692            if rel_info['cycle_period_seconds'] != rep_cps:
4693                msg = (_("Failed to manage existing volume due to "
4694                         "the cycle_period_seconds %(vol_cps)s of "
4695                         "the volume to be managed is mismatch with "
4696                         "cycle_period_seconds %(type_cps)s in "
4697                         "the provided gmcv replication type.") %
4698                       {'vol_cps': rel_info['cycle_period_seconds'],
4699                        'type_cps': rep_cps})
4700                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4701
4702        if volume['volume_type_id']:
4703            opts = self._get_vdisk_params(volume['volume_type_id'],
4704                                          volume_metadata=
4705                                          volume.get('volume_metadata'))
4706            # Manage hyperswap volume
4707            if rel_info and rel_info['copy_type'] == 'activeactive':
4708                if opts['volume_topology'] != 'hyperswap':
4709                    msg = _("Failed to manage existing volume due to "
4710                            "the hyperswap volume to be managed is "
4711                            "mismatched with the provided non-hyperswap type.")
4712                    raise exception.ManageExistingVolumeTypeMismatch(
4713                        reason=msg)
4714                aux_vdisk = rel_info['aux_vdisk_name']
4715                aux_vol_attr = self._helpers.get_vdisk_attributes(aux_vdisk)
4716                peer_pool = aux_vol_attr['mdisk_grp_name']
4717                if opts['peer_pool'] != peer_pool:
4718                    msg = (_("Failed to manage existing hyperswap volume due "
4719                             "to peer pool mismatch. The peer pool of the "
4720                             "volume to be managed is %(vol_pool)s, but the "
4721                             "peer_pool of the chosen type is %(peer_pool)s.")
4722                           % {'vol_pool': peer_pool,
4723                              'peer_pool': opts['peer_pool']})
4724                    raise exception.ManageExistingVolumeTypeMismatch(
4725                        reason=msg)
4726            else:
4727                if opts['volume_topology'] == 'hyperswap':
4728                    msg = _("Failed to manage existing volume, the volume to "
4729                            "be managed is not a hyperswap volume, "
4730                            "mismatch with the provided hyperswap type.")
4731                    raise exception.ManageExistingVolumeTypeMismatch(
4732                        reason=msg)
4733
4734            resp = self._helpers.lsvdiskcopy(vdisk['name'])
4735            expected_copy_num = 2 if opts['mirror_pool'] else 1
4736            if len(resp) != expected_copy_num:
4737                msg = (_("Failed to manage existing volume due to mirror type "
4738                         "mismatch. Volume to be managed has %(resp_len)s "
4739                         "copies. mirror_pool of the chosen type is "
4740                         "%(mirror_pool)s.") %
4741                       {'resp_len': len(resp),
4742                        'mirror_pool': opts['mirror_pool']})
4743                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4744            if (opts['mirror_pool']and opts['mirror_pool'] !=
4745                    copies['secondary']['mdisk_grp_name']):
4746                msg = (_("Failed to manage existing volume due to mirror pool "
4747                         "mismatch. The secondary pool of the volume to be "
4748                         "managed is %(sec_copy_pool)s. mirror_pool of the "
4749                         "chosen type is %(mirror_pool)s.") %
4750                       {'sec_copy_pool': copies['secondary']['mdisk_grp_name'],
4751                        'mirror_pool': opts['mirror_pool']})
4752                raise exception.ManageExistingVolumeTypeMismatch(
4753                    reason=msg)
4754
4755            vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0')
4756            if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1:
4757                msg = (_("Failed to manage existing volume due to "
4758                         "the volume to be managed is thin, but "
4759                         "the volume type chosen is thick."))
4760                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4761
4762            if not vdisk_copy['autoexpand'] and opts['rsize'] != -1:
4763                msg = (_("Failed to manage existing volume due to "
4764                         "the volume to be managed is thick, but "
4765                         "the volume type chosen is thin."))
4766                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4767
4768            if (vdisk_copy['compressed_copy'] == 'no' and
4769                    opts['compression']):
4770                msg = (_("Failed to manage existing volume due to the "
4771                         "volume to be managed is not compress, but "
4772                         "the volume type chosen is compress."))
4773                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4774
4775            if (vdisk_copy['compressed_copy'] == 'yes' and
4776                    not opts['compression']):
4777                msg = (_("Failed to manage existing volume due to the "
4778                         "volume to be managed is compress, but "
4779                         "the volume type chosen is not compress."))
4780                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4781
4782            if (vdisk_io_grp not in
4783                    StorwizeHelpers._get_valid_requested_io_groups(
4784                        self._state, opts)):
4785                msg = (_("Failed to manage existing volume due to "
4786                         "I/O group mismatch. The I/O group of the "
4787                         "volume to be managed is %(vdisk_iogrp)s. I/O group"
4788                         " of the chosen type is %(opt_iogrp)s.") %
4789                       {'vdisk_iogrp': vdisk['IO_group_name'],
4790                        'opt_iogrp': opts['iogrp']})
4791                raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4792        pool = utils.extract_host(volume['host'], 'pool')
4793        if copies['primary']['mdisk_grp_name'] != pool:
4794            msg = (_("Failed to manage existing volume due to the "
4795                     "pool of the volume to be managed does not "
4796                     "match the backend pool. Pool of the "
4797                     "volume to be managed is %(vdisk_pool)s. Pool "
4798                     "of the backend is %(backend_pool)s.") %
4799                   {'vdisk_pool': copies['primary']['mdisk_grp_name'],
4800                    'backend_pool': pool})
4801            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
4802
4803        model_update = {'replication_status':
4804                        fields.ReplicationStatus.NOT_CAPABLE}
4805        self._helpers.rename_vdisk(vdisk['name'], volume['name'])
4806        if vol_rep_type:
4807            aux_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
4808            self._aux_backend_helpers.rename_vdisk(rel_info['aux_vdisk_name'],
4809                                                   aux_vol)
4810            if storwize_const.GMCV == vol_rep_type:
4811                self._helpers.rename_vdisk(
4812                    rel_info['master_change_vdisk_name'],
4813                    storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'])
4814                self._aux_backend_helpers.rename_vdisk(
4815                    rel_info['aux_change_vdisk_name'],
4816                    storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vol)
4817            model_update = {'replication_status':
4818                            fields.ReplicationStatus.ENABLED}
4819        return model_update
4820
4821    def manage_existing_get_size(self, volume, ref):
4822        """Return size of an existing Vdisk for manage_existing.
4823
4824        existing_ref is a dictionary of the form:
4825        {'source-id': <uid of disk>} or
4826        {'source-name': <name of the disk>}
4827
4828        Optional elements are:
4829          'manage_if_in_use':  True/False (default is False)
4830            If set to True, a volume will be managed even if it is currently
4831            attached to a host system.
4832        """
4833
4834        # Check that the reference is valid
4835        vdisk = self._manage_input_check(ref)
4836
4837        # Check if the disk is in use, if we need to.
4838        manage_if_in_use = ref.get('manage_if_in_use', False)
4839        if (not manage_if_in_use and
4840                self._helpers.is_vdisk_in_use(vdisk['name'])):
4841            reason = _('The specified vdisk is mapped to a host.')
4842            raise exception.ManageExistingInvalidReference(existing_ref=ref,
4843                                                           reason=reason)
4844
4845        return int(math.ceil(float(vdisk['capacity']) / units.Gi))
4846
4847    def unmanage(self, volume):
4848        """Remove the specified volume from Cinder management."""
4849        pass
4850
4851    def get_volume_stats(self, refresh=False):
4852        """Get volume stats.
4853
4854        If we haven't gotten stats yet or 'refresh' is True,
4855        run update the stats first.
4856        """
4857        if not self._stats or refresh:
4858            self._update_volume_stats()
4859
4860        return self._stats
4861
4862    @staticmethod
4863    def _get_rccg_name(group, grp_id=None, hyper_grp=False):
4864        group_id = group.id if group else grp_id
4865        rccg = (storwize_const.HYPERCG_PREFIX
4866                if hyper_grp else storwize_const.RCCG_PREFIX)
4867        return rccg + group_id[0:4] + '-' + group_id[-5:]
4868
4869    # Add CG capability to generic volume groups
4870    def create_group(self, context, group):
4871        """Creates a group.
4872
4873        :param context: the context of the caller.
4874        :param group: the group object.
4875        :returns: model_update
4876        """
4877        LOG.debug("Creating group.")
4878
4879        model_update = {'status': fields.GroupStatus.AVAILABLE}
4880        group_type = objects.GroupType.get_by_id(context, group.group_type_id)
4881        if len(group_type.group_specs) > 1:
4882            LOG.error('Unable to create group: create group with mixed specs '
4883                      '%s is not supported.', group_type.group_specs)
4884            model_update = {'status': fields.GroupStatus.ERROR}
4885            return model_update
4886
4887        support_grps = ['group_snapshot_enabled',
4888                        'consistent_group_snapshot_enabled',
4889                        'consistent_group_replication_enabled',
4890                        'hyperswap_group_enabled']
4891        supported_grp = False
4892        for grp_spec in support_grps:
4893            if utils.is_group_a_type(group, grp_spec):
4894                supported_grp = True
4895                break
4896        if not supported_grp:
4897            LOG.error('Unable to create group: %s is not a supported group '
4898                      'type.', group.group_type_id)
4899            model_update = {'status': fields.GroupStatus.ERROR}
4900            return model_update
4901
4902        if (utils.is_group_a_cg_snapshot_type(group) or
4903                utils.is_group_a_type(group, "group_snapshot_enabled")):
4904            for vol_type_id in group.volume_type_ids:
4905                replication_type = self._get_volume_replicated_type(
4906                    context, None, vol_type_id)
4907                if replication_type:
4908                    # An unsupported configuration
4909                    LOG.error('Unable to create group: create consistent '
4910                              'snapshot group with replication volume type is '
4911                              'not supported.')
4912                    model_update = {'status': fields.GroupStatus.ERROR}
4913                    return model_update
4914                opts = self._get_vdisk_params(vol_type_id)
4915                if opts['volume_topology']:
4916                    # An unsupported configuration
4917                    LOG.error('Unable to create group: create consistent '
4918                              'snapshot group with a hyperswap volume type'
4919                              ' is not supported.')
4920                    model_update = {'status': fields.GroupStatus.ERROR}
4921                    return model_update
4922
4923        # We'll rely on the generic group implementation if it is
4924        # a non-consistent snapshot group.
4925        if utils.is_group_a_type(group, "group_snapshot_enabled"):
4926            raise NotImplementedError()
4927
4928        if utils.is_group_a_type(group,
4929                                 "consistent_group_replication_enabled"):
4930            rccg_type = None
4931            for vol_type_id in group.volume_type_ids:
4932                replication_type = self._get_volume_replicated_type(
4933                    context, None, vol_type_id)
4934                if not replication_type:
4935                    # An unsupported configuration
4936                    LOG.error('Unable to create group: create consistent '
4937                              'replication group with non-replication volume'
4938                              ' type is not supported.')
4939                    model_update = {'status': fields.GroupStatus.ERROR}
4940                    return model_update
4941                if not rccg_type:
4942                    rccg_type = replication_type
4943                elif rccg_type != replication_type:
4944                    # An unsupported configuration
4945                    LOG.error('Unable to create group: create consistent '
4946                              'replication group with different replication '
4947                              'types is not supported.')
4948                    model_update = {'status': fields.GroupStatus.ERROR}
4949                    return model_update
4950            if rccg_type == storwize_const.GMCV:
4951                LOG.error('Unable to create group: create consistent '
4952                          'replication group with GMCV replication '
4953                          'volume type is not supported.')
4954                model_update = {'status': fields.GroupStatus.ERROR}
4955                return model_update
4956
4957            rccg_name = self._get_rccg_name(group)
4958            try:
4959                tgt_sys = self._aux_backend_helpers.get_system_info()
4960                self._helpers.create_rccg(
4961                    rccg_name, tgt_sys.get('system_name'))
4962                model_update.update({'replication_status':
4963                                    fields.ReplicationStatus.ENABLED})
4964            except exception.VolumeBackendAPIException as err:
4965                LOG.error("Failed to create rccg  %(rccg)s. "
4966                          "Exception: %(exception)s.",
4967                          {'rccg': rccg_name, 'exception': err})
4968                model_update = {'status': fields.GroupStatus.ERROR}
4969            return model_update
4970
4971        if utils.is_group_a_type(group, "hyperswap_group_enabled"):
4972            if not self._helpers.is_system_topology_hyperswap(self._state):
4973                LOG.error('Unable to create group: create group on '
4974                          'a system that does not support hyperswap.')
4975                model_update = {'status': fields.GroupStatus.ERROR}
4976
4977            for vol_type_id in group.volume_type_ids:
4978                opts = self._get_vdisk_params(vol_type_id)
4979                if not opts['volume_topology']:
4980                    # An unsupported configuration
4981                    LOG.error('Unable to create group: create consistent '
4982                              'hyperswap group with non-hyperswap volume'
4983                              ' type is not supported.')
4984                    model_update = {'status': fields.GroupStatus.ERROR}
4985                    return model_update
4986
4987            rccg_name = self._get_rccg_name(group, hyper_grp=True)
4988            try:
4989                self._helpers.create_rccg(
4990                    rccg_name, self._state['system_name'])
4991            except exception.VolumeBackendAPIException as err:
4992                LOG.error("Failed to create rccg  %(rccg)s. "
4993                          "Exception: %(exception)s.",
4994                          {'rccg': group.name, 'exception': err})
4995                model_update = {'status': fields.GroupStatus.ERROR}
4996        return model_update
4997
4998    def delete_group(self, context, group, volumes):
4999        """Deletes a group.
5000
5001        :param context: the context of the caller.
5002        :param group: the group object.
5003        :param volumes: a list of volume objects in the group.
5004        :returns: model_update, volumes_model_update
5005        """
5006        LOG.debug("Deleting group.")
5007
5008        # we'll rely on the generic group implementation if it is
5009        # not a consistency group and not a consistency replication
5010        # request and not a hyperswap group request.
5011        if (not utils.is_group_a_cg_snapshot_type(group) and not
5012                utils.is_group_a_type(group,
5013                                      "consistent_group_replication_enabled")
5014                and not utils.is_group_a_type(group,
5015                                              "hyperswap_group_enabled")):
5016            raise NotImplementedError()
5017
5018        model_update = {'status': fields.GroupStatus.DELETED}
5019        volumes_model_update = []
5020        if utils.is_group_a_type(group,
5021                                 "consistent_group_replication_enabled"):
5022            model_update, volumes_model_update = self._delete_replication_grp(
5023                group, volumes)
5024
5025        if utils.is_group_a_type(group, "hyperswap_group_enabled"):
5026            model_update, volumes_model_update = self._delete_hyperswap_grp(
5027                group, volumes)
5028
5029        else:
5030            for volume in volumes:
5031                try:
5032                    self._helpers.delete_vdisk(volume.name, True)
5033                    volumes_model_update.append(
5034                        {'id': volume.id, 'status': 'deleted'})
5035                except exception.VolumeBackendAPIException as err:
5036                    model_update['status'] = (
5037                        fields.GroupStatus.ERROR_DELETING)
5038                    LOG.error("Failed to delete the volume %(vol)s of CG. "
5039                              "Exception: %(exception)s.",
5040                              {'vol': volume.name, 'exception': err})
5041                    volumes_model_update.append(
5042                        {'id': volume.id,
5043                         'status': fields.GroupStatus.ERROR_DELETING})
5044
5045        return model_update, volumes_model_update
5046
5047    def update_group(self, context, group, add_volumes=None,
5048                     remove_volumes=None):
5049        """Updates a group.
5050
5051        :param context: the context of the caller.
5052        :param group: the group object.
5053        :param add_volumes: a list of volume objects to be added.
5054        :param remove_volumes: a list of volume objects to be removed.
5055        :returns: model_update, add_volumes_update, remove_volumes_update
5056        """
5057
5058        LOG.debug("Updating group.")
5059
5060        # we'll rely on the generic group implementation if it is not a
5061        # consistency group request and not consistency replication request
5062        # and not a hyperswap group request.
5063        if (not utils.is_group_a_cg_snapshot_type(group) and not
5064                utils.is_group_a_type(group,
5065                                      "consistent_group_replication_enabled")
5066                and not utils.is_group_a_type(group,
5067                                              "hyperswap_group_enabled")):
5068            raise NotImplementedError()
5069
5070        if utils.is_group_a_type(group,
5071                                 "consistent_group_replication_enabled"):
5072            return self._update_replication_grp(context, group, add_volumes,
5073                                                remove_volumes)
5074
5075        if utils.is_group_a_type(group, "hyperswap_group_enabled"):
5076            return self._update_hyperswap_group(context, group,
5077                                                add_volumes, remove_volumes)
5078
5079        if utils.is_group_a_cg_snapshot_type(group):
5080            return None, None, None
5081
5082    def create_group_from_src(self, context, group, volumes,
5083                              group_snapshot=None, snapshots=None,
5084                              source_group=None, source_vols=None):
5085        """Creates a group from source.
5086
5087        :param context: the context of the caller.
5088        :param group: the Group object to be created.
5089        :param volumes: a list of Volume objects in the group.
5090        :param group_snapshot: the GroupSnapshot object as source.
5091        :param snapshots: a list of snapshot objects in group_snapshot.
5092        :param source_group: the Group object as source.
5093        :param source_vols: a list of volume objects in the source_group.
5094        :returns: model_update, volumes_model_update
5095        """
5096        LOG.debug('Enter: create_group_from_src.')
5097
5098        if utils.is_group_a_type(group,
5099                                 "consistent_group_replication_enabled"):
5100            # An unsupported configuration
5101            msg = _('Unable to create replication group: create replication '
5102                    'group from a replication group is not supported.')
5103            LOG.exception(msg)
5104            raise exception.VolumeBackendAPIException(data=msg)
5105
5106        if utils.is_group_a_type(group, "hyperswap_group_enabled"):
5107            # An unsupported configuration
5108            msg = _('Unable to create hyperswap group: create hyperswap '
5109                    'group from a hyperswap group is not supported.')
5110            LOG.exception(msg)
5111            raise exception.VolumeBackendAPIException(data=msg)
5112
5113        if not utils.is_group_a_cg_snapshot_type(group):
5114            # we'll rely on the generic volume groups implementation if it is
5115            # not a consistency group request.
5116            raise NotImplementedError()
5117
5118        if group_snapshot and snapshots:
5119            cg_name = 'cg-' + group_snapshot.id
5120            sources = snapshots
5121
5122        elif source_group and source_vols:
5123            cg_name = 'cg-' + source_group.id
5124            sources = source_vols
5125
5126        else:
5127            error_msg = _("create_group_from_src must be creating from a "
5128                          "group snapshot, or a source group.")
5129            raise exception.InvalidInput(reason=error_msg)
5130        LOG.debug('create_group_from_src: cg_name %(cg_name)s'
5131                  ' %(sources)s', {'cg_name': cg_name, 'sources': sources})
5132        self._helpers.create_fc_consistgrp(cg_name)
5133        timeout = self.configuration.storwize_svc_flashcopy_timeout
5134        model_update, snapshots_model = (
5135            self._helpers.create_cg_from_source(group,
5136                                                cg_name,
5137                                                sources,
5138                                                volumes,
5139                                                self._state,
5140                                                self.configuration,
5141                                                timeout))
5142        LOG.debug("Leave: create_group_from_src.")
5143        return model_update, snapshots_model
5144
5145    def create_group_snapshot(self, context, group_snapshot, snapshots):
5146        """Creates a group_snapshot.
5147
5148        :param context: the context of the caller.
5149        :param group_snapshot: the GroupSnapshot object to be created.
5150        :param snapshots: a list of Snapshot objects in the group_snapshot.
5151        :returns: model_update, snapshots_model_update
5152        """
5153        if not utils.is_group_a_cg_snapshot_type(group_snapshot):
5154            # we'll rely on the generic group implementation if it is not a
5155            # consistency group request.
5156            raise NotImplementedError()
5157
5158        # Use group_snapshot id as cg name
5159        cg_name = 'cg_snap-' + group_snapshot.id
5160        # Create new cg as cg_snapshot
5161        self._helpers.create_fc_consistgrp(cg_name)
5162
5163        timeout = self.configuration.storwize_svc_flashcopy_timeout
5164
5165        model_update, snapshots_model = (
5166            self._helpers.run_consistgrp_snapshots(cg_name,
5167                                                   snapshots,
5168                                                   self._state,
5169                                                   self.configuration,
5170                                                   timeout))
5171
5172        return model_update, snapshots_model
5173
5174    def delete_group_snapshot(self, context, group_snapshot, snapshots):
5175        """Deletes a group_snapshot.
5176
5177        :param context: the context of the caller.
5178        :param group_snapshot: the GroupSnapshot object to be deleted.
5179        :param snapshots: a list of snapshot objects in the group_snapshot.
5180        :returns: model_update, snapshots_model_update
5181        """
5182
5183        if not utils.is_group_a_cg_snapshot_type(group_snapshot):
5184            # we'll rely on the generic group implementation if it is not a
5185            # consistency group request.
5186            raise NotImplementedError()
5187
5188        cgsnapshot_id = group_snapshot.id
5189        cg_name = 'cg_snap-' + cgsnapshot_id
5190
5191        model_update, snapshots_model = (
5192            self._helpers.delete_consistgrp_snapshots(cg_name,
5193                                                      snapshots))
5194
5195        return model_update, snapshots_model
5196
5197    @cinder_utils.trace
5198    def revert_to_snapshot(self, context, volume, snapshot):
5199        """Revert volume to snapshot."""
5200        if snapshot.volume_size != volume.size:
5201            raise exception.InvalidInput(
5202                reason=_('Reverting volume is not supported if the volume '
5203                         'size is not equal to the snapshot size.'))
5204
5205        rep_type = self._get_volume_replicated_type(context, volume)
5206        if rep_type:
5207            raise exception.InvalidInput(
5208                reason=_('Reverting replication volume is not supported.'))
5209        try:
5210            self._helpers.run_flashcopy(
5211                snapshot.name, volume.name,
5212                self.configuration.storwize_svc_flashcopy_timeout,
5213                self.configuration.storwize_svc_flashcopy_rate, True, True)
5214        except Exception as err:
5215            msg = (_("Reverting volume %(vol)s to snapshot %(snap)s failed "
5216                     "due to: %(err)s.")
5217                   % {"vol": volume.name, "snap": snapshot.name, "err": err})
5218            LOG.error(msg)
5219            raise exception.VolumeBackendAPIException(data=msg)
5220
5221    def get_pool(self, volume):
5222        attr = self._helpers.get_vdisk_attributes(volume['name'])
5223
5224        if attr is None:
5225            msg = (_('get_pool: Failed to get attributes for volume '
5226                     '%s') % volume['name'])
5227            LOG.error(msg)
5228            raise exception.VolumeDriverException(message=msg)
5229
5230        return attr['mdisk_grp_name']
5231
5232    def _update_volume_stats(self):
5233        """Retrieve stats info from volume group."""
5234
5235        LOG.debug("Updating volume stats.")
5236        data = {}
5237
5238        data['vendor_name'] = 'IBM'
5239        data['driver_version'] = self.VERSION
5240        data['storage_protocol'] = self.protocol
5241        data['pools'] = []
5242
5243        backend_name = self.configuration.safe_get('volume_backend_name')
5244        data['volume_backend_name'] = (backend_name or
5245                                       self._state['system_name'])
5246
5247        data['pools'] = [self._build_pool_stats(pool)
5248                         for pool in
5249                         self._get_backend_pools()]
5250        if self._replica_enabled:
5251            data['replication'] = self._replica_enabled
5252            data['replication_enabled'] = self._replica_enabled
5253            data['replication_targets'] = self._get_replication_targets()
5254            data['consistent_group_replication_enabled'] = True
5255        self._stats = data
5256
5257    def _build_pool_stats(self, pool):
5258        """Build pool status"""
5259        QoS_support = True
5260        pool_stats = {}
5261        try:
5262            pool_data = self._helpers.get_pool_attrs(pool)
5263            if pool_data:
5264                easy_tier = pool_data['easy_tier'] in ['on', 'auto']
5265                total_capacity_gb = float(pool_data['capacity']) / units.Gi
5266                free_capacity_gb = float(pool_data['free_capacity']) / units.Gi
5267                allocated_capacity_gb = (float(pool_data['used_capacity']) /
5268                                         units.Gi)
5269                provisioned_capacity_gb = float(
5270                    pool_data['virtual_capacity']) / units.Gi
5271
5272                rsize = self.configuration.safe_get(
5273                    'storwize_svc_vol_rsize')
5274                # rsize of -1 or 100 means fully allocate the mdisk
5275                use_thick_provisioning = rsize == -1 or rsize == 100
5276                over_sub_ratio = self.configuration.safe_get(
5277                    'max_over_subscription_ratio')
5278                location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
5279                                 {'sys_id': self._state['system_id'],
5280                                  'pool': pool_data['name']})
5281                multiattach = (self.configuration.
5282                               storwize_svc_multihostmap_enabled)
5283                pool_stats = {
5284                    'pool_name': pool_data['name'],
5285                    'total_capacity_gb': total_capacity_gb,
5286                    'free_capacity_gb': free_capacity_gb,
5287                    'allocated_capacity_gb': allocated_capacity_gb,
5288                    'provisioned_capacity_gb': provisioned_capacity_gb,
5289                    'compression_support': self._state['compression_enabled'],
5290                    'reserved_percentage':
5291                        self.configuration.reserved_percentage,
5292                    'QoS_support': QoS_support,
5293                    'consistencygroup_support': True,
5294                    'location_info': location_info,
5295                    'easytier_support': easy_tier,
5296                    'multiattach': multiattach,
5297                    'thin_provisioning_support': not use_thick_provisioning,
5298                    'thick_provisioning_support': use_thick_provisioning,
5299                    'max_over_subscription_ratio': over_sub_ratio,
5300                    'consistent_group_snapshot_enabled': True,
5301                }
5302            if self._replica_enabled:
5303                pool_stats.update({
5304                    'replication_enabled': self._replica_enabled,
5305                    'replication_type': self._supported_replica_types,
5306                    'replication_targets': self._get_replication_targets(),
5307                    'replication_count': len(self._get_replication_targets()),
5308                    'consistent_group_replication_enabled': True
5309                })
5310
5311        except exception.VolumeBackendAPIException:
5312            msg = _('Failed getting details for pool %s.') % pool
5313            raise exception.VolumeBackendAPIException(data=msg)
5314
5315        return pool_stats
5316
5317    def _get_replication_targets(self):
5318        return [self._replica_target['backend_id']]
5319
5320    def _manage_input_check(self, ref):
5321        """Verify the input of manage function."""
5322        # Check that the reference is valid
5323        if 'source-name' in ref:
5324            manage_source = ref['source-name']
5325            vdisk = self._helpers.get_vdisk_attributes(manage_source)
5326        elif 'source-id' in ref:
5327            manage_source = ref['source-id']
5328            vdisk = self._helpers.vdisk_by_uid(manage_source)
5329        else:
5330            reason = _('Reference must contain source-id or '
5331                       'source-name element.')
5332            raise exception.ManageExistingInvalidReference(existing_ref=ref,
5333                                                           reason=reason)
5334
5335        if vdisk is None:
5336            reason = (_('No vdisk with the UID specified by ref %s.')
5337                      % manage_source)
5338            raise exception.ManageExistingInvalidReference(existing_ref=ref,
5339                                                           reason=reason)
5340        return vdisk
5341
5342    def _delete_replication_grp(self, group, volumes):
5343        model_update = {'status': fields.GroupStatus.DELETED}
5344        volumes_model_update = []
5345        rccg_name = self._get_rccg_name(group)
5346        try:
5347            self._helpers.delete_rccg(rccg_name)
5348        except exception.VolumeBackendAPIException as err:
5349            LOG.error("Failed to delete rccg  %(rccg)s. "
5350                      "Exception: %(exception)s.",
5351                      {'rccg': rccg_name, 'exception': err})
5352            model_update = {'status': fields.GroupStatus.ERROR_DELETING}
5353
5354        for volume in volumes:
5355            try:
5356                self._master_backend_helpers.delete_rc_volume(volume.name)
5357                self._aux_backend_helpers.delete_rc_volume(volume.name,
5358                                                           target_vol=True)
5359                volumes_model_update.append(
5360                    {'id': volume.id, 'status': 'deleted'})
5361            except exception.VolumeDriverException as err:
5362                model_update['status'] = (
5363                    fields.GroupStatus.ERROR_DELETING)
5364                LOG.error("Failed to delete the volume %(vol)s of CG. "
5365                          "Exception: %(exception)s.",
5366                          {'vol': volume.name, 'exception': err})
5367                volumes_model_update.append(
5368                    {'id': volume.id,
5369                     'status': fields.GroupStatus.ERROR_DELETING})
5370        return model_update, volumes_model_update
5371
5372    def _update_replication_grp(self, context, group,
5373                                add_volumes, remove_volumes):
5374        model_update = {'status': fields.GroupStatus.AVAILABLE}
5375        LOG.info("Update replication group: %(group)s. ", {'group': group.id})
5376
5377        rccg_name = self._get_rccg_name(group)
5378        rccg = self._helpers.get_rccg(rccg_name)
5379        if not rccg:
5380            LOG.error("Failed to update group: %(grp)s does not exist in "
5381                      "backend.", {'grp': group.id})
5382            model_update['status'] = fields.GroupStatus.ERROR
5383            return model_update, None, None
5384
5385        # Add remote copy relationship to rccg
5386        added_vols = []
5387        for volume in add_volumes:
5388            try:
5389                vol_name = (volume.name if not self._active_backend_id else
5390                            storwize_const.REPLICA_AUX_VOL_PREFIX +
5391                            volume.name)
5392                rcrel = self._helpers.get_relationship_info(vol_name)
5393                if not rcrel:
5394                    LOG.error("Failed to update group: remote copy "
5395                              "relationship of %(vol)s does not exist in "
5396                              "backend.", {'vol': volume.id})
5397                    model_update['status'] = fields.GroupStatus.ERROR
5398                elif (rccg['copy_type'] != 'empty_group' and
5399                      (rccg['copy_type'] != rcrel['copy_type'] or
5400                      rccg['state'] != rcrel['state'] or
5401                      rccg['primary'] != rcrel['primary'] or
5402                      rccg['cycling_mode'] != rcrel['cycling_mode'] or
5403                      (rccg['cycle_period_seconds'] !=
5404                       rcrel['cycle_period_seconds']))):
5405                    LOG.error("Failed to update rccg %(rccg)s: remote copy "
5406                              "type of %(vol)s is %(vol_rc_type)s, the rccg "
5407                              "type is %(rccg_type)s. rcrel state is "
5408                              "%(rcrel_state)s, rccg state is %(rccg_state)s. "
5409                              "rcrel primary is %(rcrel_primary)s, rccg "
5410                              "primary is %(rccg_primary)s. "
5411                              "rcrel cycling mode is %(rcrel_cmode)s, rccg "
5412                              "cycling mode is %(rccg_cmode)s. rcrel cycling "
5413                              "period is %(rcrel_period)s, rccg cycling "
5414                              "period is %(rccg_period)s. ",
5415                              {'rccg': rccg_name,
5416                               'vol': volume.id,
5417                               'vol_rc_type': rcrel['copy_type'],
5418                               'rccg_type': rccg['copy_type'],
5419                               'rcrel_state': rcrel['state'],
5420                               'rccg_state': rccg['state'],
5421                               'rcrel_primary': rcrel['primary'],
5422                               'rccg_primary': rccg['primary'],
5423                               'rcrel_cmode': rcrel['cycling_mode'],
5424                               'rccg_cmode': rccg['cycling_mode'],
5425                               'rcrel_period': rcrel['cycle_period_seconds'],
5426                               'rccg_period': rccg['cycle_period_seconds']})
5427                    model_update['status'] = fields.GroupStatus.ERROR
5428                else:
5429                    self._helpers.chrcrelationship(rcrel['name'], rccg_name)
5430                    if rccg['copy_type'] == 'empty_group':
5431                        rccg = self._helpers.get_rccg(rccg_name)
5432                    added_vols.append({'id': volume.id,
5433                                       'group_id': group.id})
5434            except exception.VolumeBackendAPIException as err:
5435                model_update['status'] = fields.GroupStatus.ERROR
5436                LOG.error("Failed to add the remote copy of volume %(vol)s to "
5437                          "group. Exception: %(exception)s.",
5438                          {'vol': volume.name, 'exception': err})
5439
5440        # Remove remote copy relationship from rccg
5441        removed_vols = []
5442        for volume in remove_volumes:
5443            try:
5444                vol_name = (volume.name if not self._active_backend_id else
5445                            storwize_const.REPLICA_AUX_VOL_PREFIX +
5446                            volume.name)
5447                rcrel = self._helpers.get_relationship_info(vol_name)
5448                if not rcrel:
5449                    LOG.error("Failed to update group: remote copy "
5450                              "relationship of %(vol)s does not exist in "
5451                              "backend.", {'vol': volume.id})
5452                    model_update['status'] = fields.GroupStatus.ERROR
5453                else:
5454                    self._helpers.chrcrelationship(rcrel['name'])
5455                    removed_vols.append({'id': volume.id,
5456                                         'group_id': None})
5457            except exception.VolumeBackendAPIException as err:
5458                model_update['status'] = fields.GroupStatus.ERROR
5459                LOG.error("Failed to remove the remote copy of volume %(vol)s "
5460                          "from group. Exception: %(exception)s.",
5461                          {'vol': volume.name, 'exception': err})
5462        return model_update, added_vols, removed_vols
5463
5464    def _delete_hyperswap_grp(self, group, volumes):
5465        model_update = {'status': fields.GroupStatus.DELETED}
5466        volumes_model_update = []
5467        try:
5468            rccg_name = self._get_rccg_name(group, hyper_grp=True)
5469            self._helpers.delete_rccg(rccg_name)
5470        except exception.VolumeBackendAPIException as err:
5471            LOG.error("Failed to delete rccg  %(rccg)s. "
5472                      "Exception: %(exception)s.",
5473                      {'rccg': group.name, 'exception': err})
5474            model_update = {'status': fields.GroupStatus.ERROR_DELETING}
5475
5476        for volume in volumes:
5477            try:
5478                self._helpers.delete_hyperswap_volume(volume.name, True)
5479                volumes_model_update.append(
5480                    {'id': volume.id, 'status': 'deleted'})
5481            except exception.VolumeDriverException as err:
5482                LOG.error("Failed to delete the volume %(vol)s of CG. "
5483                          "Exception: %(exception)s.",
5484                          {'vol': volume.name, 'exception': err})
5485                volumes_model_update.append(
5486                    {'id': volume.id,
5487                     'status': 'error_deleting'})
5488        return model_update, volumes_model_update
5489
5490    def _update_hyperswap_group(self, context, group,
5491                                add_volumes=None, remove_volumes=None):
5492        LOG.info("Update hyperswap group: %(group)s. ", {'group': group.id})
5493        model_update = {'status': fields.GroupStatus.AVAILABLE}
5494        rccg_name = self._get_rccg_name(group, hyper_grp=True)
5495        if not self._helpers.get_rccg(rccg_name):
5496            LOG.error("Failed to update rccg: %(grp)s does not exist in "
5497                      "backend.", {'grp': group.id})
5498            model_update['status'] = fields.GroupStatus.ERROR
5499            return model_update, None, None
5500
5501        # Add remote copy relationship to rccg
5502        added_vols = []
5503        for volume in add_volumes:
5504            hyper_volume = self._helpers.is_volume_hyperswap(volume.name)
5505            if not hyper_volume:
5506                LOG.error("Failed to update rccg: the non hyperswap volume"
5507                          " of %(vol)s can't be added to hyperswap group.",
5508                          {'vol': volume.id})
5509                model_update['status'] = fields.GroupStatus.ERROR
5510                continue
5511            try:
5512                rcrel = self._helpers.get_relationship_info(volume.name)
5513                if not rcrel:
5514                    LOG.error("Failed to update rccg: remote copy relationship"
5515                              " of %(vol)s does not exist in backend.",
5516                              {'vol': volume.id})
5517                    model_update['status'] = fields.GroupStatus.ERROR
5518                else:
5519                    self._helpers.chrcrelationship(rcrel['name'], rccg_name)
5520                    added_vols.append({'id': volume.id,
5521                                       'group_id': group.id})
5522            except exception.VolumeBackendAPIException as err:
5523                model_update['status'] = fields.GroupStatus.ERROR
5524                LOG.error("Failed to add the remote copy of volume %(vol)s to "
5525                          "rccg. Exception: %(exception)s.",
5526                          {'vol': volume.name, 'exception': err})
5527
5528        # Remove remote copy relationship from rccg
5529        removed_vols = []
5530        for volume in remove_volumes:
5531            try:
5532                rcrel = self._helpers.get_relationship_info(volume.name)
5533                if not rcrel:
5534                    LOG.error("Failed to update rccg: remote copy relationship"
5535                              " of %(vol)s does not exit in backend.",
5536                              {'vol': volume.id})
5537                    model_update['status'] = fields.GroupStatus.ERROR
5538                else:
5539                    self._helpers.chrcrelationship(rcrel['name'])
5540                    removed_vols.append({'id': volume.id,
5541                                         'group_id': None})
5542            except exception.VolumeBackendAPIException as err:
5543                model_update['status'] = fields.GroupStatus.ERROR
5544                LOG.error("Failed to remove the remote copy of volume %(vol)s "
5545                          "from rccg. Exception: %(exception)s.",
5546                          {'vol': volume.name, 'exception': err})
5547        return model_update, added_vols, removed_vols
5548
5549    def _get_volume_host_site_from_conf(self, volume, connector, iscsi=False):
5550        host_site = self.configuration.safe_get('storwize_preferred_host_site')
5551        select_site = None
5552        if not host_site:
5553            LOG.debug('There is no host_site configured for volume %s.',
5554                      volume.name)
5555            return select_site
5556        if iscsi:
5557            for site, iqn in host_site.items():
5558                if connector['initiator'].lower() in iqn.lower():
5559                    if select_site is None:
5560                        select_site = site
5561                    elif select_site != site:
5562                        msg = _('Configured the host IQN in both sites.')
5563                        LOG.error(msg)
5564                        raise exception.InvalidConfigurationValue(message=msg)
5565        else:
5566            for wwpn in connector['wwpns']:
5567                for site, wwpn_list in host_site.items():
5568                    if wwpn.lower() in wwpn_list.lower():
5569                        if select_site is None:
5570                            select_site = site
5571                        elif select_site != site:
5572                            msg = _('Configured the host wwpns not in the'
5573                                    ' same site.')
5574                            LOG.error(msg)
5575                            raise exception.InvalidConfigurationValue(
5576                                message=msg)
5577        return select_site
5578
5579    def _update_host_site_for_hyperswap_volume(self, host_name, host_site):
5580        host_info = self._helpers.ssh.lshost(host=host_name)
5581        if not host_info[0]['site_name'] and host_site:
5582            self._helpers.update_host(host_name, host_site)
5583        elif host_info[0]['site_name']:
5584            ref_host_site = host_info[0]['site_name']
5585            if host_site and host_site != ref_host_site:
5586                msg = (_('The existing host site is %(ref_host_site)s,'
5587                         ' but the new host site is %(host_site)s.') %
5588                       {'ref_host_site': ref_host_site,
5589                        'host_site': host_site})
5590                LOG.error(msg)
5591                raise exception.InvalidConfigurationValue(message=msg)
5592