1# Copyright (c) 2017 Dell Inc. or its subsidiaries. 2# All Rights Reserved. 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); you may 5# not use this file except in compliance with the License. You may obtain 6# a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13# License for the specific language governing permissions and limitations 14# under the License. 15 16import ast 17from copy import deepcopy 18import os.path 19import random 20import sys 21 22from oslo_config import cfg 23from oslo_log import log as logging 24from oslo_utils import strutils 25import six 26 27from cinder import coordination 28from cinder import exception 29from cinder.i18n import _ 30from cinder.objects import fields 31from cinder.volume import configuration 32from cinder.volume.drivers.dell_emc.vmax import masking 33from cinder.volume.drivers.dell_emc.vmax import provision 34from cinder.volume.drivers.dell_emc.vmax import rest 35from cinder.volume.drivers.dell_emc.vmax import utils 36from cinder.volume import utils as volume_utils 37from cinder.volume import volume_types 38LOG = logging.getLogger(__name__) 39 40CONF = cfg.CONF 41 42CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_dell_emc_config.xml' 43CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_dell_emc_config_' 44CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' 45BACKENDNAME = 'volume_backend_name' 46PREFIXBACKENDNAME = 'capabilities:volume_backend_name' 47 48# Replication 49REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED 50REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED 51REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER 52FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR 53REPLICATION_ERROR = fields.ReplicationStatus.ERROR 54 55 56vmax_opts = [ 57 cfg.StrOpt('cinder_dell_emc_config_file', 58 default=CINDER_EMC_CONFIG_FILE, 59 deprecated_for_removal=True, 60 help='Use this file for cinder emc plugin ' 61 'config data.'), 62 cfg.IntOpt('interval', 63 default=3, 64 help='Use this value to specify ' 65 'length of the interval in seconds.'), 66 cfg.IntOpt('retries', 67 default=200, 68 help='Use this value to specify ' 69 'number of retries.'), 70 cfg.BoolOpt('initiator_check', 71 default=False, 72 help='Use this value to enable ' 73 'the initiator_check.'), 74 cfg.PortOpt(utils.VMAX_SERVER_PORT, 75 default=8443, 76 help='REST server port number.'), 77 cfg.StrOpt(utils.VMAX_ARRAY, 78 help='Serial number of the array to connect to.'), 79 cfg.StrOpt(utils.VMAX_SRP, 80 help='Storage resource pool on array to use for provisioning.'), 81 cfg.StrOpt(utils.VMAX_SERVICE_LEVEL, 82 help='Service level to use for provisioning storage.'), 83 cfg.StrOpt(utils.VMAX_WORKLOAD, 84 help='Workload'), 85 cfg.ListOpt(utils.VMAX_PORT_GROUPS, 86 bounds=True, 87 help='List of port groups containing frontend ports ' 88 'configured prior for server connection.')] 89 90CONF.register_opts(vmax_opts, group=configuration.SHARED_CONF_GROUP) 91 92 93class VMAXCommon(object): 94 """Common class for Rest based VMAX volume drivers. 95 96 This common class is for Dell EMC VMAX volume drivers 97 based on UniSphere Rest API. 98 It supports VMAX 3 and VMAX All Flash arrays. 99 100 """ 101 pool_info = {'backend_name': None, 102 'config_file': None, 103 'arrays_info': {}, 104 'max_over_subscription_ratio': None, 105 'reserved_percentage': 0, 106 'replication_enabled': False} 107 108 def __init__(self, prtcl, version, configuration=None, 109 active_backend_id=None): 110 111 self.protocol = prtcl 112 self.configuration = configuration 113 self.configuration.append_config_values(vmax_opts) 114 self.rest = rest.VMAXRest() 115 self.utils = utils.VMAXUtils() 116 self.masking = masking.VMAXMasking(prtcl, self.rest) 117 self.provision = provision.VMAXProvision(self.rest) 118 self.version = version 119 # replication 120 self.replication_enabled = False 121 self.extend_replicated_vol = False 122 self.rep_devices = None 123 self.active_backend_id = active_backend_id 124 self.failover = False 125 self._get_replication_info() 126 self._gather_info() 127 self.nextGen = False 128 129 def _gather_info(self): 130 """Gather the relevant information for update_volume_stats.""" 131 self._get_attributes_from_config() 132 array_info = self.get_attributes_from_cinder_config() 133 if array_info is None: 134 array_info = self.utils.parse_file_to_get_array_map( 135 self.pool_info['config_file']) 136 self.rest.set_rest_credentials(array_info) 137 if array_info: 138 self.nextGen = self.rest.is_next_gen_array( 139 array_info['SerialNumber']) 140 finalarrayinfolist = self._get_slo_workload_combinations( 141 array_info) 142 self.pool_info['arrays_info'] = finalarrayinfolist 143 144 def _get_attributes_from_config(self): 145 """Get relevent details from configuration file.""" 146 if hasattr(self.configuration, 'cinder_dell_emc_config_file'): 147 self.pool_info['config_file'] = ( 148 self.configuration.cinder_dell_emc_config_file) 149 else: 150 self.pool_info['config_file'] = ( 151 self.configuration.safe_get('cinder_dell_emc_config_file')) 152 self.interval = self.configuration.safe_get('interval') 153 self.retries = self.configuration.safe_get('retries') 154 self.pool_info['backend_name'] = ( 155 self.configuration.safe_get('volume_backend_name')) 156 mosr = volume_utils.get_max_over_subscription_ratio( 157 self.configuration.safe_get('max_over_subscription_ratio'), True) 158 self.pool_info['max_over_subscription_ratio'] = mosr 159 self.pool_info['reserved_percentage'] = ( 160 self.configuration.safe_get('reserved_percentage')) 161 LOG.debug( 162 "Updating volume stats on file %(emcConfigFileName)s on " 163 "backend %(backendName)s.", 164 {'emcConfigFileName': self.pool_info['config_file'], 165 'backendName': self.pool_info['backend_name']}) 166 167 def _get_initiator_check_flag(self): 168 """Reads the configuration for initator_check flag. 169 170 :returns: flag 171 """ 172 return self.configuration.safe_get('initiator_check') 173 174 def _get_replication_info(self): 175 """Gather replication information, if provided.""" 176 self.rep_config = None 177 self.replication_targets = None 178 if hasattr(self.configuration, 'replication_device'): 179 self.rep_devices = self.configuration.safe_get( 180 'replication_device') 181 if self.rep_devices and len(self.rep_devices) == 1: 182 self.rep_config = self.utils.get_replication_config( 183 self.rep_devices) 184 if self.rep_config: 185 self.replication_targets = [self.rep_config['array']] 186 if self.active_backend_id == self.rep_config['array']: 187 self.failover = True 188 self.extend_replicated_vol = self.rep_config['allow_extend'] 189 self.allow_delete_metro = ( 190 self.rep_config['allow_delete_metro'] 191 if self.rep_config.get('allow_delete_metro') else False) 192 # use self.replication_enabled for update_volume_stats 193 self.replication_enabled = True 194 LOG.debug("The replication configuration is %(rep_config)s.", 195 {'rep_config': self.rep_config}) 196 elif self.rep_devices and len(self.rep_devices) > 1: 197 LOG.error("More than one replication target is configured. " 198 "Dell EMC VMAX only suppports a single replication " 199 "target. Replication will not be enabled.") 200 201 def _get_slo_workload_combinations(self, array_info): 202 """Method to query the array for SLO and Workloads. 203 204 Takes the arrayinfolist object and generates a set which has 205 all available SLO & Workload combinations 206 :param array_info: the array information 207 :returns: finalarrayinfolist 208 :raises: VolumeBackendAPIException: 209 """ 210 try: 211 array = array_info['SerialNumber'] 212 if self.failover: 213 array = self.active_backend_id 214 215 slo_settings = self.rest.get_slo_list(array) 216 slo_list = [x for x in slo_settings 217 if x.lower() not in ['none', 'optimized']] 218 workload_settings = self.rest.get_workload_settings(array) 219 workload_settings.append('None') 220 slo_workload_set = set( 221 ['%(slo)s:%(workload)s' % {'slo': slo, 222 'workload': workload} 223 for slo in slo_list for workload in workload_settings]) 224 slo_workload_set.add('None:None') 225 226 if self.nextGen: 227 LOG.warning("Workloads have been deprecated for arrays " 228 "running PowerMax OS uCode level 5978 or higher. " 229 "Any supplied workloads will be treated as None " 230 "values. It is highly recommended to create a new " 231 "volume type without a workload specified.") 232 for slo in slo_list: 233 slo_workload_set.add(slo) 234 slo_workload_set.add('None') 235 slo_workload_set.add('Optimized') 236 slo_workload_set.add('Optimized:None') 237 # If array is 5978 or greater and a VMAX AFA add legacy SL/WL 238 # combinations 239 if any(self.rest.get_vmax_model(array) in x for x in 240 utils.VMAX_AFA_MODELS): 241 slo_workload_set.add('Diamond:OLTP') 242 slo_workload_set.add('Diamond:OLTP_REP') 243 slo_workload_set.add('Diamond:DSS') 244 slo_workload_set.add('Diamond:DSS_REP') 245 slo_workload_set.add('Diamond:None') 246 247 if not any(self.rest.get_vmax_model(array) in x for x in 248 utils.VMAX_AFA_MODELS): 249 slo_workload_set.add('Optimized:None') 250 251 finalarrayinfolist = [] 252 for sloWorkload in slo_workload_set: 253 temparray_info = array_info.copy() 254 try: 255 slo, workload = sloWorkload.split(':') 256 temparray_info['SLO'] = slo 257 temparray_info['Workload'] = workload 258 except ValueError: 259 temparray_info['SLO'] = sloWorkload 260 finalarrayinfolist.append(temparray_info) 261 except Exception as e: 262 exception_message = (_( 263 "Unable to get the SLO/Workload combinations from the array. " 264 "Exception received was %(e)s") % {'e': six.text_type(e)}) 265 LOG.error(exception_message) 266 raise exception.VolumeBackendAPIException( 267 data=exception_message) 268 return finalarrayinfolist 269 270 def create_volume(self, volume): 271 """Creates a EMC(VMAX) volume from a storage group. 272 273 :param volume: volume object 274 :returns: model_update - dict 275 """ 276 model_update = {} 277 rep_driver_data = {} 278 volume_id = volume.id 279 extra_specs = self._initial_setup(volume) 280 if 'qos' in extra_specs: 281 del extra_specs['qos'] 282 283 # Volume_name naming convention is 'OS-UUID'. 284 volume_name = self.utils.get_volume_element_name(volume_id) 285 volume_size = volume.size 286 287 volume_dict = (self._create_volume( 288 volume_name, volume_size, extra_specs)) 289 290 # Set-up volume replication, if enabled 291 if self.utils.is_replication_enabled(extra_specs): 292 rep_update = self._replicate_volume(volume, volume_name, 293 volume_dict, extra_specs) 294 rep_driver_data = rep_update['replication_driver_data'] 295 model_update.update(rep_update) 296 297 # Add volume to group, if required 298 if volume.group_id is not None: 299 if (volume_utils.is_group_a_cg_snapshot_type(volume.group) 300 or volume.group.is_replicated): 301 LOG.debug("Adding volume %(vol_id)s to group %(grp_id)s", 302 {'vol_id': volume.id, 'grp_id': volume.group_id}) 303 self._add_new_volume_to_volume_group( 304 volume, volume_dict['device_id'], volume_name, 305 extra_specs, rep_driver_data) 306 307 LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", 308 {'name': volume_name, 'dict': volume_dict}) 309 model_update.update( 310 {'provider_location': six.text_type(volume_dict)}) 311 return model_update 312 313 def _add_new_volume_to_volume_group(self, volume, device_id, volume_name, 314 extra_specs, rep_driver_data=None): 315 """Add a new volume to a volume group. 316 317 This may also be called after extending a replicated volume. 318 :param volume: the volume object 319 :param device_id: the device id 320 :param volume_name: the volume name 321 :param extra_specs: the extra specifications 322 :param rep_driver_data: the replication driver data, optional 323 """ 324 self.utils.check_replication_matched(volume, extra_specs) 325 group_name = self.provision.get_or_create_volume_group( 326 extra_specs[utils.ARRAY], volume.group, extra_specs) 327 self.masking.add_volume_to_storage_group( 328 extra_specs[utils.ARRAY], device_id, 329 group_name, volume_name, extra_specs) 330 # Add remote volume to remote group, if required 331 if volume.group.is_replicated: 332 self._add_remote_vols_to_volume_group( 333 extra_specs[utils.ARRAY], 334 [volume], volume.group, extra_specs, rep_driver_data) 335 336 def create_volume_from_snapshot(self, volume, snapshot): 337 """Creates a volume from a snapshot. 338 339 :param volume: volume object 340 :param snapshot: snapshot object 341 :returns: model_update 342 :raises: VolumeBackendAPIException: 343 """ 344 LOG.debug("Entering create_volume_from_snapshot.") 345 model_update = {} 346 extra_specs = self._initial_setup(volume) 347 348 # Check if legacy snapshot 349 sourcedevice_id = self._find_device_on_array( 350 snapshot, extra_specs) 351 from_snapvx = False if sourcedevice_id else True 352 353 clone_dict = self._create_cloned_volume( 354 volume, snapshot, extra_specs, is_snapshot=False, 355 from_snapvx=from_snapvx) 356 357 # Set-up volume replication, if enabled 358 if self.utils.is_replication_enabled(extra_specs): 359 rep_update = self._replicate_volume(volume, snapshot['name'], 360 clone_dict, extra_specs) 361 model_update.update(rep_update) 362 363 model_update.update( 364 {'provider_location': six.text_type(clone_dict)}) 365 return model_update 366 367 def create_cloned_volume(self, clone_volume, source_volume): 368 """Creates a clone of the specified volume. 369 370 :param clone_volume: clone volume Object 371 :param source_volume: volume object 372 :returns: model_update, dict 373 """ 374 model_update = {} 375 extra_specs = self._initial_setup(clone_volume) 376 clone_dict = self._create_cloned_volume(clone_volume, source_volume, 377 extra_specs) 378 379 # Set-up volume replication, if enabled 380 if self.utils.is_replication_enabled(extra_specs): 381 rep_update = self._replicate_volume( 382 clone_volume, clone_volume.name, clone_dict, extra_specs) 383 model_update.update(rep_update) 384 385 model_update.update( 386 {'provider_location': six.text_type(clone_dict)}) 387 return model_update 388 389 def _replicate_volume(self, volume, volume_name, volume_dict, extra_specs, 390 delete_src=True): 391 """Setup up remote replication for a volume. 392 393 :param volume: the volume object 394 :param volume_name: the volume name 395 :param volume_dict: the volume dict 396 :param extra_specs: the extra specifications 397 :param delete_src: flag to indicate if source should be deleted on 398 if replication fails 399 :returns: replication model_update 400 """ 401 array = volume_dict['array'] 402 try: 403 device_id = volume_dict['device_id'] 404 replication_status, replication_driver_data = ( 405 self.setup_volume_replication( 406 array, volume, device_id, extra_specs)) 407 except Exception: 408 if delete_src: 409 self._cleanup_replication_source( 410 array, volume, volume_name, volume_dict, extra_specs) 411 raise 412 return ({'replication_status': replication_status, 413 'replication_driver_data': six.text_type( 414 replication_driver_data)}) 415 416 def delete_volume(self, volume): 417 """Deletes a EMC(VMAX) volume. 418 419 :param volume: volume object 420 """ 421 LOG.info("Deleting Volume: %(volume)s", 422 {'volume': volume.name}) 423 volume_name = self._delete_volume(volume) 424 LOG.info("Leaving delete_volume: %(volume_name)s.", 425 {'volume_name': volume_name}) 426 427 def create_snapshot(self, snapshot, volume): 428 """Creates a snapshot. 429 430 :param snapshot: snapshot object 431 :param volume: volume Object to create snapshot from 432 :returns: dict -- the cloned volume dictionary 433 """ 434 extra_specs = self._initial_setup(volume) 435 snapshot_dict = self._create_cloned_volume( 436 snapshot, volume, extra_specs, is_snapshot=True) 437 model_update = {'provider_location': six.text_type(snapshot_dict)} 438 return model_update 439 440 def delete_snapshot(self, snapshot, volume): 441 """Deletes a snapshot. 442 443 :param snapshot: snapshot object 444 :param volume: source volume 445 """ 446 LOG.info("Delete Snapshot: %(snapshotName)s.", 447 {'snapshotName': snapshot.name}) 448 extra_specs = self._initial_setup(volume) 449 sourcedevice_id, snap_name = self._parse_snap_info( 450 extra_specs[utils.ARRAY], snapshot) 451 if not sourcedevice_id and not snap_name: 452 # Check if legacy snapshot 453 sourcedevice_id = self._find_device_on_array( 454 snapshot, extra_specs) 455 if sourcedevice_id: 456 self._delete_volume(snapshot) 457 else: 458 LOG.info("No snapshot found on the array") 459 elif not sourcedevice_id or not snap_name: 460 LOG.info("No snapshot found on the array") 461 else: 462 @coordination.synchronized("emc-source-{sourcedevice_id}") 463 def do_delete_volume_snap_check_for_links(sourcedevice_id): 464 # Ensure snap has not been recently deleted 465 self.provision.delete_volume_snap_check_for_links( 466 extra_specs[utils.ARRAY], snap_name, 467 sourcedevice_id, extra_specs) 468 do_delete_volume_snap_check_for_links(sourcedevice_id) 469 470 LOG.info("Leaving delete_snapshot: %(ssname)s.", 471 {'ssname': snap_name}) 472 473 def _remove_members(self, array, volume, device_id, 474 extra_specs, connector, async_grp=None): 475 """This method unmaps a volume from a host. 476 477 Removes volume from the storage group that belongs to a masking view. 478 :param array: the array serial number 479 :param volume: volume object 480 :param device_id: the VMAX volume device id 481 :param extra_specs: extra specifications 482 :param connector: the connector object 483 :param async_grp: the name if the async group, if applicable 484 """ 485 volume_name = volume.name 486 LOG.debug("Detaching volume %s.", volume_name) 487 return self.masking.remove_and_reset_members( 488 array, volume, device_id, volume_name, 489 extra_specs, True, connector, async_grp=async_grp) 490 491 def _unmap_lun(self, volume, connector): 492 """Unmaps a volume from the host. 493 494 :param volume: the volume Object 495 :param connector: the connector Object 496 """ 497 extra_specs = self._initial_setup(volume) 498 if 'qos' in extra_specs: 499 del extra_specs['qos'] 500 rep_extra_specs = self._get_replication_extra_specs( 501 extra_specs, self.rep_config) 502 if self.utils.is_volume_failed_over(volume): 503 extra_specs = rep_extra_specs 504 volume_name = volume.name 505 async_grp = None 506 LOG.info("Unmap volume: %(volume)s.", 507 {'volume': volume_name}) 508 if connector is not None: 509 host = self.utils.get_host_short_name(connector['host']) 510 else: 511 LOG.warning("Cannot get host name from connector object - " 512 "assuming force-detach.") 513 host = None 514 515 device_info, is_live_migration, source_storage_group_list = ( 516 self.find_host_lun_id(volume, host, extra_specs)) 517 if 'hostlunid' not in device_info: 518 LOG.info("Volume %s is not mapped. No volume to unmap.", 519 volume_name) 520 return 521 if is_live_migration and len(source_storage_group_list) == 1: 522 LOG.info("Volume %s is mapped. Failed live migration case", 523 volume_name) 524 return 525 source_nf_sg = None 526 array = extra_specs[utils.ARRAY] 527 if self.utils.does_vol_need_rdf_management_group(extra_specs): 528 async_grp = self.utils.get_async_rdf_managed_grp_name( 529 self.rep_config) 530 if len(source_storage_group_list) > 1: 531 for storage_group in source_storage_group_list: 532 if 'NONFAST' in storage_group: 533 source_nf_sg = storage_group 534 break 535 if source_nf_sg: 536 # Remove volume from non fast storage group 537 self.masking.remove_volume_from_sg( 538 array, device_info['device_id'], volume_name, source_nf_sg, 539 extra_specs) 540 else: 541 self._remove_members(array, volume, device_info['device_id'], 542 extra_specs, connector, async_grp=async_grp) 543 if self.utils.is_metro_device(self.rep_config, extra_specs): 544 # Need to remove from remote masking view 545 device_info, __, __ = (self.find_host_lun_id( 546 volume, host, extra_specs, rep_extra_specs)) 547 if 'hostlunid' in device_info: 548 self._remove_members( 549 rep_extra_specs[utils.ARRAY], volume, 550 device_info['device_id'], 551 rep_extra_specs, connector, async_grp=async_grp) 552 else: 553 # Make an attempt to clean up initiator group 554 self.masking.attempt_ig_cleanup( 555 connector, self.protocol, rep_extra_specs[utils.ARRAY], 556 True) 557 558 def initialize_connection(self, volume, connector): 559 """Initializes the connection and returns device and connection info. 560 561 The volume may be already mapped, if this is so the deviceInfo tuple 562 is returned. If the volume is not already mapped then we need to 563 gather information to either 1. Create an new masking view or 2. Add 564 the volume to an existing storage group within an already existing 565 maskingview. 566 567 The naming convention is the following: 568 569 .. code-block:: none 570 571 initiator_group_name = OS-<shortHostName>-<shortProtocol>-IG 572 e.g OS-myShortHost-I-IG 573 storage_group_name = OS-<shortHostName>-<srpName>-<shortProtocol>-SG 574 e.g OS-myShortHost-SRP_1-I-SG 575 port_group_name = OS-<target>-PG The port_group_name will come from 576 the EMC configuration xml file. 577 These are precreated. If the portGroup does not 578 exist then an error will be returned to the user 579 maskingview_name = OS-<shortHostName>-<srpName>-<shortProtocol>-MV 580 e.g OS-myShortHost-SRP_1-I-MV 581 582 :param volume: volume Object 583 :param connector: the connector Object 584 :returns: dict -- device_info_dict - device information dict 585 """ 586 extra_specs = self._initial_setup(volume) 587 is_multipath = connector.get('multipath', False) 588 rep_extra_specs = self._get_replication_extra_specs( 589 extra_specs, self.rep_config) 590 remote_port_group = None 591 volume_name = volume.name 592 LOG.info("Initialize connection: %(volume)s.", 593 {'volume': volume_name}) 594 if (self.utils.is_metro_device(self.rep_config, extra_specs) 595 and not is_multipath and self.protocol.lower() == 'iscsi'): 596 LOG.warning("Multipathing is not correctly enabled " 597 "on your system.") 598 return 599 600 if self.utils.is_volume_failed_over(volume): 601 extra_specs = rep_extra_specs 602 device_info_dict, is_live_migration, source_storage_group_list = ( 603 self.find_host_lun_id(volume, connector['host'], extra_specs)) 604 masking_view_dict = self._populate_masking_dict( 605 volume, connector, extra_specs) 606 607 if self.rest.is_next_gen_array(extra_specs['array']): 608 masking_view_dict['workload'] = 'NONE' 609 temp_pool = masking_view_dict['storagegroup_name'] 610 splitPool = temp_pool.split('+') 611 if len(splitPool) == 4: 612 splitPool[1] = 'NONE' 613 masking_view_dict['storagegroup_name'] = '+'.join(splitPool) 614 615 if ('hostlunid' in device_info_dict and 616 device_info_dict['hostlunid'] is not None and 617 is_live_migration is False) or ( 618 is_live_migration and len(source_storage_group_list) > 1): 619 hostlunid = device_info_dict['hostlunid'] 620 LOG.info("Volume %(volume)s is already mapped. " 621 "The hostlunid is %(hostlunid)s.", 622 {'volume': volume_name, 623 'hostlunid': hostlunid}) 624 port_group_name = ( 625 self.get_port_group_from_masking_view( 626 extra_specs[utils.ARRAY], 627 device_info_dict['maskingview'])) 628 if self.utils.is_metro_device(self.rep_config, extra_specs): 629 remote_info_dict, __, __ = ( 630 self.find_host_lun_id(volume, connector['host'], 631 extra_specs, rep_extra_specs)) 632 if remote_info_dict.get('hostlunid') is None: 633 # Need to attach on remote side 634 metro_host_lun, remote_port_group = ( 635 self._attach_metro_volume( 636 volume, connector, extra_specs, rep_extra_specs)) 637 else: 638 metro_host_lun = remote_info_dict['hostlunid'] 639 remote_port_group = self.get_port_group_from_masking_view( 640 rep_extra_specs[utils.ARRAY], 641 remote_info_dict['maskingview']) 642 device_info_dict['metro_hostlunid'] = metro_host_lun 643 644 else: 645 if is_live_migration: 646 source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg = ( 647 self._setup_for_live_migration( 648 device_info_dict, source_storage_group_list)) 649 masking_view_dict['source_nf_sg'] = source_nf_sg 650 masking_view_dict['source_sg'] = source_sg 651 masking_view_dict['source_parent_sg'] = source_parent_sg 652 try: 653 self.masking.pre_live_migration( 654 source_nf_sg, source_sg, source_parent_sg, 655 is_source_nf_sg, device_info_dict, extra_specs) 656 except Exception: 657 # Move it back to original storage group 658 source_storage_group_list = ( 659 self.rest.get_storage_groups_from_volume( 660 device_info_dict['array'], 661 device_info_dict['device_id'])) 662 self.masking.failed_live_migration( 663 masking_view_dict, source_storage_group_list, 664 extra_specs) 665 exception_message = (_( 666 "Unable to setup live migration because of the " 667 "following error: %(errorMessage)s.") 668 % {'errorMessage': sys.exc_info()[1]}) 669 raise exception.VolumeBackendAPIException( 670 data=exception_message) 671 device_info_dict, port_group_name = ( 672 self._attach_volume( 673 volume, connector, extra_specs, masking_view_dict, 674 is_live_migration)) 675 if self.utils.is_metro_device(self.rep_config, extra_specs): 676 # Need to attach on remote side 677 metro_host_lun, remote_port_group = self._attach_metro_volume( 678 volume, connector, extra_specs, rep_extra_specs) 679 device_info_dict['metro_hostlunid'] = metro_host_lun 680 if is_live_migration: 681 self.masking.post_live_migration( 682 masking_view_dict, extra_specs) 683 if self.protocol.lower() == 'iscsi': 684 device_info_dict['ip_and_iqn'] = ( 685 self._find_ip_and_iqns( 686 extra_specs[utils.ARRAY], port_group_name)) 687 if self.utils.is_metro_device(self.rep_config, extra_specs): 688 device_info_dict['metro_ip_and_iqn'] = ( 689 self._find_ip_and_iqns( 690 rep_extra_specs[utils.ARRAY], remote_port_group)) 691 device_info_dict['is_multipath'] = is_multipath 692 return device_info_dict 693 694 def _attach_metro_volume(self, volume, connector, 695 extra_specs, rep_extra_specs): 696 """Helper method to attach a metro volume. 697 698 Metro protected volumes point to two VMAX devices on different arrays, 699 which are presented as a single device to the host. This method 700 masks the remote device to the host. 701 :param volume: the volume object 702 :param connector: the connector dict 703 :param rep_extra_specs: replication extra specifications 704 :return: hostlunid, remote_port_group 705 """ 706 remote_mv_dict = self._populate_masking_dict( 707 volume, connector, extra_specs, rep_extra_specs) 708 remote_info_dict, remote_port_group = ( 709 self._attach_volume( 710 volume, connector, extra_specs, remote_mv_dict, 711 rep_extra_specs=rep_extra_specs)) 712 remote_port_group = self.get_port_group_from_masking_view( 713 rep_extra_specs[utils.ARRAY], remote_info_dict['maskingview']) 714 return remote_info_dict['hostlunid'], remote_port_group 715 716 def _attach_volume(self, volume, connector, extra_specs, 717 masking_view_dict, is_live_migration=False, 718 rep_extra_specs=None): 719 """Attach a volume to a host. 720 721 :param volume: the volume object 722 :param connector: the connector object 723 :param extra_specs: extra specifications 724 :param masking_view_dict: masking view information 725 :param is_live_migration: flag to indicate live migration 726 :param rep_extra_specs: rep extra specs are passed if metro device 727 :returns: dict -- device_info_dict 728 String -- port group name 729 :raises: VolumeBackendAPIException 730 """ 731 volume_name = volume.name 732 if is_live_migration: 733 masking_view_dict['isLiveMigration'] = True 734 else: 735 masking_view_dict['isLiveMigration'] = False 736 m_specs = extra_specs if rep_extra_specs is None else rep_extra_specs 737 rollback_dict = self.masking.setup_masking_view( 738 masking_view_dict[utils.ARRAY], volume, 739 masking_view_dict, m_specs) 740 741 # Find host lun id again after the volume is exported to the host. 742 743 device_info_dict, __, __ = self.find_host_lun_id( 744 volume, connector['host'], extra_specs, rep_extra_specs) 745 if 'hostlunid' not in device_info_dict: 746 # Did not successfully attach to host, 747 # so a rollback for FAST is required. 748 LOG.error("Error Attaching volume %(vol)s. " 749 "Cannot retrieve hostlunid. ", 750 {'vol': volume_name}) 751 self.masking.check_if_rollback_action_for_masking_required( 752 masking_view_dict[utils.ARRAY], volume, 753 masking_view_dict[utils.DEVICE_ID], 754 rollback_dict) 755 exception_message = (_("Error Attaching volume %(vol)s.") 756 % {'vol': volume_name}) 757 raise exception.VolumeBackendAPIException( 758 data=exception_message) 759 760 return device_info_dict, rollback_dict[utils.PORTGROUPNAME] 761 762 def terminate_connection(self, volume, connector): 763 """Disallow connection from connector. 764 765 :param volume: the volume Object 766 :param connector: the connector Object 767 """ 768 volume_name = volume.name 769 LOG.info("Terminate connection: %(volume)s.", 770 {'volume': volume_name}) 771 self._unmap_lun(volume, connector) 772 773 def extend_volume(self, volume, new_size): 774 """Extends an existing volume. 775 776 :param volume: the volume Object 777 :param new_size: the new size to increase the volume to 778 :returns: dict -- modifiedVolumeDict - the extended volume Object 779 :raises: VolumeBackendAPIException: 780 """ 781 original_vol_size = volume.size 782 volume_name = volume.name 783 extra_specs = self._initial_setup(volume) 784 device_id = self._find_device_on_array(volume, extra_specs) 785 array = extra_specs[utils.ARRAY] 786 # Check if volume is part of an on-going clone operation 787 self._sync_check(array, device_id, volume_name, extra_specs) 788 if device_id is None: 789 exception_message = (_("Cannot find Volume: %(volume_name)s. " 790 "Extend operation. Exiting....") 791 % {'volume_name': volume_name}) 792 LOG.error(exception_message) 793 raise exception.VolumeBackendAPIException(data=exception_message) 794 __, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id) 795 if snapvx_src: 796 if not self.rest.is_next_gen_array(array): 797 exception_message = ( 798 _("The volume: %(volume)s is a snapshot source. " 799 "Extending a volume with snapVx snapshots is only " 800 "supported on VMAX from HyperMaxOS version 5978 " 801 "onwards. Exiting...") % {'volume': volume_name}) 802 LOG.error(exception_message) 803 raise exception.VolumeBackendAPIException( 804 data=exception_message) 805 806 if int(original_vol_size) > int(new_size): 807 exception_message = (_( 808 "Your original size: %(original_vol_size)s GB is greater " 809 "than: %(new_size)s GB. Only Extend is supported. Exiting...") 810 % {'original_vol_size': original_vol_size, 811 'new_size': new_size}) 812 LOG.error(exception_message) 813 raise exception.VolumeBackendAPIException(data=exception_message) 814 LOG.info("Extending volume %(volume)s to %(new_size)d GBs", 815 {'volume': volume_name, 816 'new_size': int(new_size)}) 817 if self.utils.is_replication_enabled(extra_specs): 818 # Extra logic required if volume is replicated 819 self.extend_volume_is_replicated( 820 array, volume, device_id, volume_name, new_size, extra_specs) 821 else: 822 self.provision.extend_volume( 823 array, device_id, new_size, extra_specs) 824 825 LOG.debug("Leaving extend_volume: %(volume_name)s. ", 826 {'volume_name': volume_name}) 827 828 def update_volume_stats(self): 829 """Retrieve stats info.""" 830 pools = [] 831 # Dictionary to hold the arrays for which the SRP details 832 # have already been queried. 833 arrays = {} 834 total_capacity_gb = 0 835 free_capacity_gb = 0 836 provisioned_capacity_gb = 0 837 location_info = None 838 backend_name = self.pool_info['backend_name'] 839 max_oversubscription_ratio = ( 840 self.pool_info['max_over_subscription_ratio']) 841 reserved_percentage = self.pool_info['reserved_percentage'] 842 array_reserve_percent = None 843 array_info_list = self.pool_info['arrays_info'] 844 already_queried = False 845 for array_info in array_info_list: 846 if self.failover: 847 array_info = self.get_secondary_stats_info( 848 self.rep_config, array_info) 849 # Add both SLO & Workload name in the pool name 850 # Only insert the array details in the dict once 851 self.rest.set_rest_credentials(array_info) 852 if array_info['SerialNumber'] not in arrays: 853 (location_info, total_capacity_gb, free_capacity_gb, 854 provisioned_capacity_gb, 855 array_reserve_percent) = self._update_srp_stats(array_info) 856 arrays[array_info['SerialNumber']] = ( 857 [total_capacity_gb, free_capacity_gb, 858 provisioned_capacity_gb, array_reserve_percent]) 859 else: 860 already_queried = True 861 try: 862 pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" 863 % {'slo': array_info['SLO'], 864 'workload': array_info['Workload'], 865 'srpName': array_info['srpName'], 866 'array': array_info['SerialNumber']}) 867 except KeyError: 868 pool_name = ("%(slo)s+%(srpName)s+%(array)s" 869 % {'slo': array_info['SLO'], 870 'srpName': array_info['srpName'], 871 'array': array_info['SerialNumber']}) 872 873 if already_queried: 874 # The dictionary will only have one key per VMAX 875 # Construct the location info 876 try: 877 temp_location_info = ( 878 ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" 879 % {'arrayName': array_info['SerialNumber'], 880 'srpName': array_info['srpName'], 881 'slo': array_info['SLO'], 882 'workload': array_info['Workload']})) 883 except KeyError: 884 temp_location_info = ( 885 ("%(arrayName)s#%(srpName)s#%(slo)s" 886 % {'arrayName': array_info['SerialNumber'], 887 'srpName': array_info['srpName'], 888 'slo': array_info['SLO']})) 889 890 pool = {'pool_name': pool_name, 891 'total_capacity_gb': 892 arrays[array_info['SerialNumber']][0], 893 'free_capacity_gb': 894 arrays[array_info['SerialNumber']][1], 895 'provisioned_capacity_gb': 896 arrays[array_info['SerialNumber']][2], 897 'QoS_support': False, 898 'location_info': temp_location_info, 899 'thin_provisioning_support': True, 900 'thick_provisioning_support': False, 901 'consistent_group_snapshot_enabled': True, 902 'max_over_subscription_ratio': 903 max_oversubscription_ratio, 904 'reserved_percentage': reserved_percentage, 905 'replication_enabled': self.replication_enabled} 906 if arrays[array_info['SerialNumber']][3]: 907 if reserved_percentage: 908 if (arrays[array_info['SerialNumber']][3] > 909 reserved_percentage): 910 pool['reserved_percentage'] = ( 911 arrays[array_info['SerialNumber']][3]) 912 else: 913 pool['reserved_percentage'] = ( 914 arrays[array_info['SerialNumber']][3]) 915 else: 916 pool = {'pool_name': pool_name, 917 'total_capacity_gb': total_capacity_gb, 918 'free_capacity_gb': free_capacity_gb, 919 'provisioned_capacity_gb': provisioned_capacity_gb, 920 'QoS_support': False, 921 'location_info': location_info, 922 'consistencygroup_support': False, 923 'thin_provisioning_support': True, 924 'thick_provisioning_support': False, 925 'consistent_group_snapshot_enabled': True, 926 'max_over_subscription_ratio': 927 max_oversubscription_ratio, 928 'reserved_percentage': reserved_percentage, 929 'replication_enabled': self.replication_enabled, 930 'group_replication_enabled': self.replication_enabled, 931 'consistent_group_replication_enabled': 932 self.replication_enabled 933 } 934 if array_reserve_percent: 935 if isinstance(reserved_percentage, int): 936 if array_reserve_percent > reserved_percentage: 937 pool['reserved_percentage'] = array_reserve_percent 938 else: 939 pool['reserved_percentage'] = array_reserve_percent 940 941 pools.append(pool) 942 pools = self.utils.add_legacy_pools(pools) 943 data = {'vendor_name': "Dell EMC", 944 'driver_version': self.version, 945 'storage_protocol': 'unknown', 946 'volume_backend_name': backend_name or 947 self.__class__.__name__, 948 # Use zero capacities here so we always use a pool. 949 'total_capacity_gb': 0, 950 'free_capacity_gb': 0, 951 'provisioned_capacity_gb': 0, 952 'reserved_percentage': 0, 953 'replication_enabled': self.replication_enabled, 954 'replication_targets': self.replication_targets, 955 'pools': pools} 956 957 return data 958 959 def _update_srp_stats(self, array_info): 960 """Update SRP stats. 961 962 :param array_info: array information 963 :returns: location_info 964 :returns: totalManagedSpaceGbs 965 :returns: remainingManagedSpaceGbs 966 :returns: provisionedManagedSpaceGbs 967 :returns: array_reserve_percent 968 :returns: wlpEnabled 969 """ 970 (totalManagedSpaceGbs, remainingManagedSpaceGbs, 971 provisionedManagedSpaceGbs, array_reserve_percent) = ( 972 self.provision.get_srp_pool_stats( 973 array_info['SerialNumber'], array_info)) 974 975 LOG.info("Capacity stats for SRP pool %(srpName)s on array " 976 "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " 977 "free_capacity_gb=%(free_capacity_gb)lu, " 978 "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", 979 {'srpName': array_info['srpName'], 980 'arrayName': array_info['SerialNumber'], 981 'total_capacity_gb': totalManagedSpaceGbs, 982 'free_capacity_gb': remainingManagedSpaceGbs, 983 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) 984 985 try: 986 location_info = ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" 987 % {'arrayName': array_info['SerialNumber'], 988 'srpName': array_info['srpName'], 989 'slo': array_info['SLO'], 990 'workload': array_info['Workload']}) 991 except KeyError: 992 location_info = ("%(arrayName)s#%(srpName)s#%(slo)s" 993 % {'arrayName': array_info['SerialNumber'], 994 'srpName': array_info['srpName'], 995 'slo': array_info['SLO']}) 996 997 return (location_info, totalManagedSpaceGbs, 998 remainingManagedSpaceGbs, provisionedManagedSpaceGbs, 999 array_reserve_percent) 1000 1001 def _set_config_file_and_get_extra_specs(self, volume, 1002 volume_type_id=None, 1003 register_config_file=True): 1004 """Given the volume object get the associated volumetype. 1005 1006 Given the volume object get the associated volumetype and the 1007 extra specs associated with it. 1008 Based on the name of the config group, register the config file 1009 1010 :param volume: the volume object including the volume_type_id 1011 :param volume_type_id: Optional override of volume.volume_type_id 1012 :returns: dict -- the extra specs dict 1013 :returns: string -- configuration file 1014 """ 1015 qos_specs = {} 1016 extra_specs = self.utils.get_volumetype_extra_specs( 1017 volume, volume_type_id) 1018 type_id = volume.volume_type_id 1019 if type_id: 1020 res = volume_types.get_volume_type_qos_specs(type_id) 1021 qos_specs = res['qos_specs'] 1022 1023 config_group = None 1024 config_file = None 1025 # If there are no extra specs then the default case is assumed. 1026 if extra_specs: 1027 config_group = self.configuration.config_group 1028 if extra_specs.get('replication_enabled') == '<is> True': 1029 extra_specs[utils.IS_RE] = True 1030 if self.rep_config and self.rep_config.get('mode'): 1031 extra_specs[utils.REP_MODE] = self.rep_config['mode'] 1032 if self.rep_config and self.rep_config.get(utils.METROBIAS): 1033 extra_specs[utils.METROBIAS] = self.rep_config[ 1034 utils.METROBIAS] 1035 if register_config_file: 1036 config_file = self._register_config_file_from_config_group( 1037 config_group) 1038 return extra_specs, config_file, qos_specs 1039 1040 def _find_device_on_array(self, volume, extra_specs): 1041 """Given the volume get the VMAX device Id. 1042 1043 :param volume: volume object 1044 :param extra_specs: the extra Specs 1045 :returns: array, device_id 1046 """ 1047 founddevice_id = None 1048 volume_name = volume.id 1049 try: 1050 name_id = volume._name_id 1051 except AttributeError: 1052 name_id = None 1053 loc = volume.provider_location 1054 1055 if isinstance(loc, six.string_types): 1056 name = ast.literal_eval(loc) 1057 array = extra_specs[utils.ARRAY] 1058 if name.get('device_id'): 1059 device_id = name['device_id'] 1060 elif name.get('keybindings'): 1061 device_id = name['keybindings']['DeviceID'] 1062 else: 1063 device_id = None 1064 try: 1065 founddevice_id = self.rest.check_volume_device_id( 1066 array, device_id, volume_name, name_id) 1067 except exception.VolumeBackendAPIException: 1068 pass 1069 1070 if founddevice_id is None: 1071 LOG.debug("Volume %(volume_name)s not found on the array.", 1072 {'volume_name': volume_name}) 1073 else: 1074 LOG.debug("Volume name: %(volume_name)s Volume device id: " 1075 "%(founddevice_id)s.", 1076 {'volume_name': volume_name, 1077 'founddevice_id': founddevice_id}) 1078 1079 return founddevice_id 1080 1081 def find_host_lun_id(self, volume, host, extra_specs, 1082 rep_extra_specs=None): 1083 """Given the volume dict find the host lun id for a volume. 1084 1085 :param volume: the volume dict 1086 :param host: host from connector (can be None on a force-detach) 1087 :param extra_specs: the extra specs 1088 :param rep_extra_specs: rep extra specs, passed in if metro device 1089 :returns: dict -- the data dict 1090 """ 1091 maskedvols = {} 1092 is_live_migration = False 1093 volume_name = volume.name 1094 device_id = self._find_device_on_array(volume, extra_specs) 1095 if rep_extra_specs is not None: 1096 device_id = self.get_remote_target_device( 1097 extra_specs[utils.ARRAY], volume, device_id)[0] 1098 extra_specs = rep_extra_specs 1099 host_name = self.utils.get_host_short_name(host) if host else None 1100 if device_id: 1101 array = extra_specs[utils.ARRAY] 1102 source_storage_group_list = ( 1103 self.rest.get_storage_groups_from_volume(array, device_id)) 1104 # return only masking views for this host 1105 maskingviews = self._get_masking_views_from_volume( 1106 array, device_id, host_name, source_storage_group_list) 1107 1108 for maskingview in maskingviews: 1109 host_lun_id = self.rest.find_mv_connections_for_vol( 1110 array, maskingview, device_id) 1111 if host_lun_id is not None: 1112 devicedict = {'hostlunid': host_lun_id, 1113 'maskingview': maskingview, 1114 'array': array, 1115 'device_id': device_id} 1116 maskedvols = devicedict 1117 if not maskedvols: 1118 LOG.debug( 1119 "Host lun id not found for volume: %(volume_name)s " 1120 "with the device id: %(device_id)s.", 1121 {'volume_name': volume_name, 1122 'device_id': device_id}) 1123 else: 1124 LOG.debug("Device info: %(maskedvols)s.", 1125 {'maskedvols': maskedvols}) 1126 if host: 1127 hoststr = ("-%(host)s-" % {'host': host_name}) 1128 1129 if (hoststr.lower() 1130 not in maskedvols['maskingview'].lower()): 1131 LOG.debug("Volume is masked but not to host %(host)s " 1132 "as is expected. Assuming live migration.", 1133 {'host': host}) 1134 is_live_migration = True 1135 else: 1136 for storage_group in source_storage_group_list: 1137 if 'NONFAST' in storage_group: 1138 is_live_migration = True 1139 break 1140 else: 1141 exception_message = (_("Cannot retrieve volume %(vol)s " 1142 "from the array.") % {'vol': volume_name}) 1143 LOG.exception(exception_message) 1144 raise exception.VolumeBackendAPIException(exception_message) 1145 1146 return maskedvols, is_live_migration, source_storage_group_list 1147 1148 def get_masking_views_from_volume(self, array, volume, device_id, host): 1149 """Get all masking views from a volume. 1150 1151 :param array: array serial number 1152 :param volume: the volume object 1153 :param device_id: the volume device id 1154 :param host: the host 1155 :return: masking view list, is metro 1156 """ 1157 is_metro = False 1158 extra_specs = self._initial_setup(volume) 1159 mv_list = self._get_masking_views_from_volume(array, device_id, host) 1160 if self.utils.is_metro_device(self.rep_config, extra_specs): 1161 is_metro = True 1162 return mv_list, is_metro 1163 1164 def _get_masking_views_from_volume(self, array, device_id, host, 1165 storage_group_list=None): 1166 """Helper function to retrieve masking view list for a volume. 1167 1168 :param array: array serial number 1169 :param device_id: the volume device id 1170 :param host: the host 1171 :param storage_group_list: the storage group list to use 1172 :returns: masking view list 1173 """ 1174 LOG.debug("Getting masking views from volume") 1175 maskingview_list = [] 1176 host_compare = False 1177 if not storage_group_list: 1178 storage_group_list = self.rest.get_storage_groups_from_volume( 1179 array, device_id) 1180 host_compare = True if host else False 1181 for sg in storage_group_list: 1182 mvs = self.rest.get_masking_views_from_storage_group( 1183 array, sg) 1184 for mv in mvs: 1185 if host_compare: 1186 if host.lower() in mv.lower(): 1187 maskingview_list.append(mv) 1188 else: 1189 maskingview_list.append(mv) 1190 return maskingview_list 1191 1192 def _register_config_file_from_config_group(self, config_group_name): 1193 """Given the config group name register the file. 1194 1195 :param config_group_name: the config group name 1196 :returns: string -- configurationFile - name of the configuration file 1197 :raises: VolumeBackendAPIException: 1198 """ 1199 if config_group_name is None: 1200 return CINDER_EMC_CONFIG_FILE 1201 if hasattr(self.configuration, 'cinder_dell_emc_config_file'): 1202 config_file = self.configuration.cinder_dell_emc_config_file 1203 else: 1204 config_file = ( 1205 ("%(prefix)s%(configGroupName)s%(postfix)s" 1206 % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, 1207 'configGroupName': config_group_name, 1208 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) 1209 1210 # The file saved in self.configuration may not be the correct one, 1211 # double check. 1212 if config_group_name not in config_file: 1213 config_file = ( 1214 ("%(prefix)s%(configGroupName)s%(postfix)s" 1215 % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, 1216 'configGroupName': config_group_name, 1217 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) 1218 1219 if os.path.isfile(config_file): 1220 LOG.debug("Configuration file : %(configurationFile)s exists.", 1221 {'configurationFile': config_file}) 1222 else: 1223 exception_message = (_( 1224 "Configuration file %(configurationFile)s does not exist.") 1225 % {'configurationFile': config_file}) 1226 LOG.error(exception_message) 1227 raise exception.VolumeBackendAPIException(data=exception_message) 1228 1229 return config_file 1230 1231 def _initial_setup(self, volume, volume_type_id=None): 1232 """Necessary setup to accumulate the relevant information. 1233 1234 The volume object has a host in which we can parse the 1235 config group name. The config group name is the key to our EMC 1236 configuration file. The emc configuration file contains srp name 1237 and array name which are mandatory fields. 1238 :param volume: the volume object 1239 :param volume_type_id: optional override of volume.volume_type_id 1240 :returns: dict -- extra spec dict 1241 :raises: VolumeBackendAPIException: 1242 """ 1243 try: 1244 array_info = self.get_attributes_from_cinder_config() 1245 if array_info: 1246 extra_specs, config_file, qos_specs = ( 1247 self._set_config_file_and_get_extra_specs( 1248 volume, volume_type_id, register_config_file=False)) 1249 else: 1250 extra_specs, config_file, qos_specs = ( 1251 self._set_config_file_and_get_extra_specs( 1252 volume, volume_type_id)) 1253 array_info = self.utils.parse_file_to_get_array_map( 1254 self.pool_info['config_file']) 1255 if not array_info: 1256 exception_message = (_( 1257 "Unable to get corresponding record for srp.")) 1258 raise exception.VolumeBackendAPIException( 1259 data=exception_message) 1260 1261 self.rest.set_rest_credentials(array_info) 1262 1263 extra_specs = self._set_vmax_extra_specs(extra_specs, array_info) 1264 if qos_specs and qos_specs.get('consumer') != "front-end": 1265 extra_specs['qos'] = qos_specs.get('specs') 1266 except Exception: 1267 exception_message = (_( 1268 "Unable to get configuration information necessary to " 1269 "create a volume: %(errorMessage)s.") 1270 % {'errorMessage': sys.exc_info()[1]}) 1271 raise exception.VolumeBackendAPIException(data=exception_message) 1272 return extra_specs 1273 1274 def _populate_masking_dict(self, volume, connector, 1275 extra_specs, rep_extra_specs=None): 1276 """Get all the names of the maskingview and sub-components. 1277 1278 :param volume: the volume object 1279 :param connector: the connector object 1280 :param extra_specs: extra specifications 1281 :param rep_extra_specs: replication extra specs, if metro volume 1282 :returns: dict -- a dictionary with masking view information 1283 """ 1284 masking_view_dict = {} 1285 volume_name = volume.name 1286 device_id = self._find_device_on_array(volume, extra_specs) 1287 if rep_extra_specs is not None: 1288 device_id = self.get_remote_target_device( 1289 extra_specs[utils.ARRAY], volume, device_id)[0] 1290 extra_specs = rep_extra_specs 1291 if not device_id: 1292 exception_message = (_("Cannot retrieve volume %(vol)s " 1293 "from the array. ") % {'vol': volume_name}) 1294 LOG.exception(exception_message) 1295 raise exception.VolumeBackendAPIException(exception_message) 1296 1297 host_name = connector['host'] 1298 unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12) 1299 protocol = self.utils.get_short_protocol_type(self.protocol) 1300 short_host_name = self.utils.get_host_short_name(host_name) 1301 masking_view_dict[utils.DISABLECOMPRESSION] = False 1302 masking_view_dict['replication_enabled'] = False 1303 slo = extra_specs[utils.SLO] 1304 workload = extra_specs[utils.WORKLOAD] 1305 rep_enabled = self.utils.is_replication_enabled(extra_specs) 1306 short_pg_name = self.utils.get_pg_short_name( 1307 extra_specs[utils.PORTGROUPNAME]) 1308 masking_view_dict[utils.SLO] = slo 1309 masking_view_dict[utils.WORKLOAD] = 'NONE' if self.nextGen else ( 1310 extra_specs[utils.WORKLOAD]) 1311 masking_view_dict[utils.SRP] = unique_name 1312 masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY] 1313 masking_view_dict[utils.PORTGROUPNAME] = ( 1314 extra_specs[utils.PORTGROUPNAME]) 1315 if self._get_initiator_check_flag(): 1316 masking_view_dict[utils.INITIATOR_CHECK] = True 1317 else: 1318 masking_view_dict[utils.INITIATOR_CHECK] = False 1319 1320 if slo: 1321 slo_wl_combo = self.utils.truncate_string(slo + workload, 10) 1322 child_sg_name = ( 1323 "OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s" 1324 % {'shortHostName': short_host_name, 1325 'srpName': unique_name, 1326 'combo': slo_wl_combo, 1327 'pg': short_pg_name}) 1328 do_disable_compression = self.utils.is_compression_disabled( 1329 extra_specs) 1330 if do_disable_compression: 1331 child_sg_name = ("%(child_sg_name)s-CD" 1332 % {'child_sg_name': child_sg_name}) 1333 masking_view_dict[utils.DISABLECOMPRESSION] = True 1334 else: 1335 child_sg_name = ( 1336 "OS-%(shortHostName)s-No_SLO-%(pg)s" 1337 % {'shortHostName': short_host_name, 1338 'pg': short_pg_name}) 1339 if rep_enabled: 1340 rep_mode = extra_specs.get(utils.REP_MODE, None) 1341 child_sg_name += self.utils.get_replication_prefix(rep_mode) 1342 masking_view_dict['replication_enabled'] = True 1343 mv_prefix = ( 1344 "OS-%(shortHostName)s-%(protocol)s-%(pg)s" 1345 % {'shortHostName': short_host_name, 1346 'protocol': protocol, 'pg': short_pg_name}) 1347 1348 masking_view_dict[utils.SG_NAME] = child_sg_name 1349 1350 masking_view_dict[utils.MV_NAME] = ("%(prefix)s-MV" 1351 % {'prefix': mv_prefix}) 1352 1353 masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG" 1354 % {'prefix': mv_prefix}) 1355 1356 masking_view_dict[utils.IG_NAME] = ( 1357 ("OS-%(shortHostName)s-%(protocol)s-IG" 1358 % {'shortHostName': short_host_name, 1359 'protocol': protocol})) 1360 masking_view_dict[utils.CONNECTOR] = connector 1361 masking_view_dict[utils.DEVICE_ID] = device_id 1362 masking_view_dict[utils.VOL_NAME] = volume_name 1363 1364 return masking_view_dict 1365 1366 def _create_cloned_volume( 1367 self, volume, source_volume, extra_specs, is_snapshot=False, 1368 from_snapvx=False): 1369 """Create a clone volume from the source volume. 1370 1371 :param volume: clone volume 1372 :param source_volume: source of the clone volume 1373 :param extra_specs: extra specs 1374 :param is_snapshot: boolean -- Defaults to False 1375 :param from_snapvx: bool -- Defaults to False 1376 :returns: dict -- cloneDict the cloned volume dictionary 1377 :raises: VolumeBackendAPIException: 1378 """ 1379 clone_name = volume.name 1380 snap_name = None 1381 LOG.info("Create a replica from Volume: Clone Volume: %(clone_name)s " 1382 "from Source Volume: %(source_name)s.", 1383 {'clone_name': clone_name, 1384 'source_name': source_volume.name}) 1385 1386 array = extra_specs[utils.ARRAY] 1387 is_clone_license = self.rest.is_snapvx_licensed(array) 1388 if from_snapvx: 1389 source_device_id, snap_name = self._parse_snap_info( 1390 array, source_volume) 1391 else: 1392 source_device_id = self._find_device_on_array( 1393 source_volume, extra_specs) 1394 1395 if not is_clone_license: 1396 exception_message = (_( 1397 "SnapVx feature is not licensed on %(array)s.") 1398 % {'array': array}) 1399 LOG.error(exception_message) 1400 raise exception.VolumeBackendAPIException(data=exception_message) 1401 1402 # Check if source is currently a snap target. Wait for sync if true. 1403 self._sync_check(array, source_device_id, source_volume.name, 1404 extra_specs, tgt_only=True) 1405 1406 if not is_snapshot: 1407 clone_dict = self._create_replica( 1408 array, volume, source_device_id, extra_specs, 1409 snap_name=snap_name) 1410 else: 1411 clone_dict = self._create_snapshot( 1412 array, volume, source_device_id, extra_specs) 1413 1414 LOG.debug("Leaving _create_cloned_volume: Volume: " 1415 "%(clone_name)s Source Device Id: %(source_name)s ", 1416 {'clone_name': clone_name, 1417 'source_name': source_device_id}) 1418 1419 return clone_dict 1420 1421 def _parse_snap_info(self, array, snapshot): 1422 """Given a snapshot object, parse the provider_location. 1423 1424 :param array: the array serial number 1425 :param snapshot: the snapshot object 1426 :returns: sourcedevice_id, foundsnap_name 1427 """ 1428 foundsnap_name = None 1429 sourcedevice_id = None 1430 volume_name = snapshot.id 1431 1432 loc = snapshot.provider_location 1433 1434 if isinstance(loc, six.string_types): 1435 name = ast.literal_eval(loc) 1436 try: 1437 sourcedevice_id = name['source_id'] 1438 snap_name = name['snap_name'] 1439 except KeyError: 1440 LOG.info("Error retrieving snapshot details. Assuming " 1441 "legacy structure of snapshot...") 1442 return None, None 1443 # Ensure snapvx is on the array. 1444 try: 1445 snap_details = self.rest.get_volume_snap( 1446 array, sourcedevice_id, snap_name) 1447 if snap_details: 1448 foundsnap_name = snap_name 1449 except Exception as e: 1450 LOG.info("Exception in retrieving snapshot: %(e)s.", 1451 {'e': e}) 1452 foundsnap_name = None 1453 1454 if foundsnap_name is None or sourcedevice_id is None: 1455 exception_message = (_("Error retrieving snapshot details. " 1456 "Snapshot name: %(snap)s") % 1457 {'snap': volume_name}) 1458 LOG.error(exception_message) 1459 1460 else: 1461 LOG.debug("Source volume: %(volume_name)s Snap name: " 1462 "%(foundsnap_name)s.", 1463 {'volume_name': sourcedevice_id, 1464 'foundsnap_name': foundsnap_name}) 1465 1466 return sourcedevice_id, foundsnap_name 1467 1468 def _create_snapshot(self, array, snapshot, 1469 source_device_id, extra_specs): 1470 """Create a snap Vx of a volume. 1471 1472 :param array: the array serial number 1473 :param snapshot: the snapshot object 1474 :param source_device_id: the source device id 1475 :param extra_specs: the extra specifications 1476 :returns: snap_dict 1477 """ 1478 clone_name = self.utils.get_volume_element_name(snapshot.id) 1479 snap_name = self.utils.truncate_string(clone_name, 19) 1480 try: 1481 self.provision.create_volume_snapvx(array, source_device_id, 1482 snap_name, extra_specs) 1483 except Exception as e: 1484 exception_message = (_("Error creating snap Vx of %(vol)s. " 1485 "Exception received: %(e)s.") 1486 % {'vol': source_device_id, 1487 'e': six.text_type(e)}) 1488 LOG.error(exception_message) 1489 raise exception.VolumeBackendAPIException(data=exception_message) 1490 snap_dict = {'snap_name': snap_name, 'source_id': source_device_id} 1491 return snap_dict 1492 1493 def _delete_volume(self, volume): 1494 """Helper function to delete the specified volume. 1495 1496 Pass in host if is snapshot 1497 :param volume: volume object to be deleted 1498 :returns: volume_name (string vol name) 1499 """ 1500 volume_name = volume.name 1501 extra_specs = self._initial_setup(volume) 1502 1503 device_id = self._find_device_on_array(volume, extra_specs) 1504 if device_id is None: 1505 LOG.error("Volume %(name)s not found on the array. " 1506 "No volume to delete.", 1507 {'name': volume_name}) 1508 return volume_name 1509 1510 array = extra_specs[utils.ARRAY] 1511 # Check if volume is snap source 1512 self._sync_check(array, device_id, volume_name, extra_specs) 1513 # Remove from any storage groups and cleanup replication 1514 self._remove_vol_and_cleanup_replication( 1515 array, device_id, volume_name, extra_specs, volume) 1516 self._delete_from_srp( 1517 array, device_id, volume_name, extra_specs) 1518 return volume_name 1519 1520 def _create_volume( 1521 self, volume_name, volume_size, extra_specs): 1522 """Create a volume. 1523 1524 :param volume_name: the volume name 1525 :param volume_size: the volume size 1526 :param extra_specs: extra specifications 1527 :returns: int -- return code 1528 :returns: dict -- volume_dict 1529 :raises: VolumeBackendAPIException: 1530 """ 1531 array = extra_specs[utils.ARRAY] 1532 self.nextGen = self.rest.is_next_gen_array(array) 1533 if self.nextGen: 1534 extra_specs[utils.WORKLOAD] = 'NONE' 1535 is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( 1536 array, extra_specs[utils.SLO], 1537 extra_specs[utils.WORKLOAD], extra_specs[utils.SRP]) 1538 if not is_valid_slo or not is_valid_workload: 1539 exception_message = (_( 1540 "Either SLO: %(slo)s or workload %(workload)s is invalid. " 1541 "Examine previous error statement for valid values.") 1542 % {'slo': extra_specs[utils.SLO], 1543 'workload': extra_specs[utils.WORKLOAD]}) 1544 LOG.error(exception_message) 1545 raise exception.VolumeBackendAPIException(data=exception_message) 1546 1547 LOG.debug("Create Volume: %(volume)s Srp: %(srp)s " 1548 "Array: %(array)s " 1549 "Size: %(size)lu.", 1550 {'volume': volume_name, 1551 'srp': extra_specs[utils.SRP], 1552 'array': array, 1553 'size': volume_size}) 1554 1555 do_disable_compression = self.utils.is_compression_disabled( 1556 extra_specs) 1557 1558 storagegroup_name = self.masking.get_or_create_default_storage_group( 1559 array, extra_specs[utils.SRP], extra_specs[utils.SLO], 1560 extra_specs[utils.WORKLOAD], extra_specs, 1561 do_disable_compression) 1562 try: 1563 volume_dict = self.provision.create_volume_from_sg( 1564 array, volume_name, storagegroup_name, 1565 volume_size, extra_specs) 1566 except Exception: 1567 # if the volume create fails, check if the 1568 # storage group needs to be cleaned up 1569 LOG.error("Create volume failed. Checking if " 1570 "storage group cleanup necessary...") 1571 num_vol_in_sg = self.rest.get_num_vols_in_sg( 1572 array, storagegroup_name) 1573 1574 if num_vol_in_sg == 0: 1575 LOG.debug("There are no volumes in the storage group " 1576 "%(sg_id)s. Deleting storage group.", 1577 {'sg_id': storagegroup_name}) 1578 self.rest.delete_storage_group( 1579 array, storagegroup_name) 1580 raise 1581 1582 return volume_dict 1583 1584 def _set_vmax_extra_specs(self, extra_specs, pool_record): 1585 """Set the VMAX extra specs. 1586 1587 The pool_name extra spec must be set, otherwise a default slo/workload 1588 will be chosen. The portgroup can either be passed as an extra spec 1589 on the volume type (e.g. 'storagetype:portgroupname = os-pg1-pg'), or 1590 can be chosen from a list provided in the xml file, e.g.: 1591 <PortGroups> 1592 <PortGroup>OS-PORTGROUP1-PG</PortGroup> 1593 <PortGroup>OS-PORTGROUP2-PG</PortGroup> 1594 </PortGroups>. 1595 1596 :param extra_specs: extra specifications 1597 :param pool_record: pool record 1598 :returns: dict -- the extra specifications dictionary 1599 """ 1600 # set extra_specs from pool_record 1601 extra_specs[utils.SRP] = pool_record['srpName'] 1602 extra_specs[utils.ARRAY] = pool_record['SerialNumber'] 1603 if not extra_specs.get(utils.PORTGROUPNAME): 1604 extra_specs[utils.PORTGROUPNAME] = pool_record['PortGroup'] 1605 if not extra_specs[utils.PORTGROUPNAME]: 1606 error_message = (_("Port group name has not been provided - " 1607 "please configure the " 1608 "'storagetype:portgroupname' extra spec on " 1609 "the volume type, or enter a list of " 1610 "portgroups to the xml file associated with " 1611 "this backend e.g." 1612 "<PortGroups>" 1613 " <PortGroup>OS-PORTGROUP1-PG</PortGroup>" 1614 " <PortGroup>OS-PORTGROUP2-PG</PortGroup>" 1615 "</PortGroups>.")) 1616 LOG.exception(error_message) 1617 raise exception.VolumeBackendAPIException(data=error_message) 1618 1619 extra_specs[utils.INTERVAL] = self.interval 1620 LOG.debug("The interval is set at: %(intervalInSecs)s.", 1621 {'intervalInSecs': self.interval}) 1622 extra_specs[utils.RETRIES] = self.retries 1623 LOG.debug("Retries are set at: %(retries)s.", 1624 {'retries': self.retries}) 1625 1626 # Set pool_name slo and workload 1627 if 'pool_name' in extra_specs: 1628 pool_name = extra_specs['pool_name'] 1629 pool_details = pool_name.split('+') 1630 slo_from_extra_spec = pool_details[0] 1631 workload_from_extra_spec = pool_details[1] 1632 # Check if legacy pool chosen 1633 if (workload_from_extra_spec == pool_record['srpName'] or 1634 self.nextGen): 1635 workload_from_extra_spec = 'NONE' 1636 1637 elif pool_record.get('ServiceLevel'): 1638 slo_from_extra_spec = pool_record['ServiceLevel'] 1639 workload_from_extra_spec = pool_record.get('Workload', 'None') 1640 # If workload is None in cinder.conf, convert to string 1641 if not workload_from_extra_spec or self.nextGen: 1642 workload_from_extra_spec = 'NONE' 1643 LOG.info("Pool_name is not present in the extra_specs " 1644 "- using slo/ workload from xml file: %(slo)s/%(wl)s.", 1645 {'slo': slo_from_extra_spec, 1646 'wl': workload_from_extra_spec}) 1647 1648 else: 1649 slo_list = self.rest.get_slo_list(pool_record['SerialNumber']) 1650 if 'Optimized' in slo_list: 1651 slo_from_extra_spec = 'Optimized' 1652 elif 'Diamond' in slo_list: 1653 slo_from_extra_spec = 'Diamond' 1654 else: 1655 slo_from_extra_spec = 'None' 1656 workload_from_extra_spec = 'NONE' 1657 LOG.warning("Pool_name is not present in the extra_specs" 1658 "and no slo/ workload information is present " 1659 "in the xml file - using default slo/ workload " 1660 "combination: %(slo)s/%(wl)s.", 1661 {'slo': slo_from_extra_spec, 1662 'wl': workload_from_extra_spec}) 1663 # Standardize slo and workload 'NONE' naming conventions 1664 if workload_from_extra_spec.lower() == 'none': 1665 workload_from_extra_spec = 'NONE' 1666 if slo_from_extra_spec.lower() == 'none': 1667 slo_from_extra_spec = None 1668 extra_specs[utils.SLO] = slo_from_extra_spec 1669 extra_specs[utils.WORKLOAD] = workload_from_extra_spec 1670 if self.rest.is_compression_capable(extra_specs[utils.ARRAY]): 1671 if extra_specs.get(utils.DISABLECOMPRESSION): 1672 # If not True remove it. 1673 if not strutils.bool_from_string( 1674 extra_specs[utils.DISABLECOMPRESSION]): 1675 extra_specs.pop(utils.DISABLECOMPRESSION, None) 1676 else: 1677 extra_specs.pop(utils.DISABLECOMPRESSION, None) 1678 1679 LOG.debug("SRP is: %(srp)s, Array is: %(array)s " 1680 "SLO is: %(slo)s, Workload is: %(workload)s.", 1681 {'srp': extra_specs[utils.SRP], 1682 'array': extra_specs[utils.ARRAY], 1683 'slo': extra_specs[utils.SLO], 1684 'workload': extra_specs[utils.WORKLOAD]}) 1685 return extra_specs 1686 1687 def _delete_from_srp(self, array, device_id, volume_name, 1688 extra_specs): 1689 """Delete from srp. 1690 1691 :param array: the array serial number 1692 :param device_id: the device id 1693 :param volume_name: the volume name 1694 :param extra_specs: the extra specifications 1695 :raises: VolumeBackendAPIException: 1696 """ 1697 try: 1698 LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", 1699 {'name': volume_name, 'device_id': device_id}) 1700 self.provision.delete_volume_from_srp( 1701 array, device_id, volume_name) 1702 except Exception as e: 1703 # If we cannot successfully delete the volume, then we want to 1704 # return the volume to the default storage group, 1705 # which should be the SG it previously belonged to. 1706 self.masking.add_volume_to_default_storage_group( 1707 array, device_id, volume_name, extra_specs) 1708 1709 error_message = (_("Failed to delete volume %(volume_name)s. " 1710 "Exception received: %(e)s") % 1711 {'volume_name': volume_name, 1712 'e': six.text_type(e)}) 1713 LOG.exception(error_message) 1714 raise exception.VolumeBackendAPIException(data=error_message) 1715 1716 def _remove_vol_and_cleanup_replication( 1717 self, array, device_id, volume_name, extra_specs, volume): 1718 """Remove a volume from its storage groups and cleanup replication. 1719 1720 :param array: the array serial number 1721 :param device_id: the device id 1722 :param volume_name: the volume name 1723 :param extra_specs: the extra specifications 1724 :param volume: the volume object 1725 """ 1726 # Cleanup remote replication 1727 if self.utils.is_replication_enabled(extra_specs): 1728 self.cleanup_lun_replication(volume, volume_name, 1729 device_id, extra_specs) 1730 # Remove from any storage groups 1731 self.masking.remove_and_reset_members( 1732 array, volume, device_id, volume_name, extra_specs, False) 1733 1734 def get_target_wwns_from_masking_view( 1735 self, volume, connector): 1736 """Find target WWNs via the masking view. 1737 1738 :param volume: volume to be attached 1739 :param connector: the connector dict 1740 :returns: list -- the target WWN list 1741 """ 1742 metro_wwns = [] 1743 host = connector['host'] 1744 short_host_name = self.utils.get_host_short_name(host) 1745 extra_specs = self._initial_setup(volume) 1746 rep_extra_specs = self._get_replication_extra_specs( 1747 extra_specs, self.rep_config) 1748 if self.utils.is_volume_failed_over(volume): 1749 extra_specs = rep_extra_specs 1750 device_id = self._find_device_on_array(volume, extra_specs) 1751 target_wwns = self._get_target_wwns_from_masking_view( 1752 device_id, short_host_name, extra_specs) 1753 if self.utils.is_metro_device(self.rep_config, extra_specs): 1754 remote_device_id = self.get_remote_target_device( 1755 extra_specs[utils.ARRAY], volume, device_id)[0] 1756 metro_wwns = self._get_target_wwns_from_masking_view( 1757 remote_device_id, short_host_name, rep_extra_specs) 1758 return target_wwns, metro_wwns 1759 1760 def _get_target_wwns_from_masking_view( 1761 self, device_id, short_host_name, extra_specs): 1762 """Helper function to get wwns from a masking view. 1763 1764 :param device_id: the device id 1765 :param short_host_name: the short host name 1766 :param extra_specs: the extra specs 1767 :return: target wwns -- list 1768 """ 1769 target_wwns = [] 1770 array = extra_specs[utils.ARRAY] 1771 masking_view_list = self._get_masking_views_from_volume( 1772 array, device_id, short_host_name) 1773 if masking_view_list: 1774 portgroup = self.get_port_group_from_masking_view( 1775 array, masking_view_list[0]) 1776 target_wwns = self.rest.get_target_wwns(array, portgroup) 1777 LOG.info("Target wwns in masking view %(maskingView)s: " 1778 "%(targetWwns)s.", 1779 {'maskingView': masking_view_list[0], 1780 'targetWwns': target_wwns}) 1781 return target_wwns 1782 1783 def get_port_group_from_masking_view(self, array, maskingview_name): 1784 """Get the port groups in a masking view. 1785 1786 :param array: the array serial number 1787 :param maskingview_name: masking view name 1788 :returns: port group name 1789 """ 1790 return self.rest.get_element_from_masking_view( 1791 array, maskingview_name, portgroup=True) 1792 1793 def get_initiator_group_from_masking_view(self, array, maskingview_name): 1794 """Get the initiator group in a masking view. 1795 1796 :param array: the array serial number 1797 :param maskingview_name: masking view name 1798 :returns: initiator group name 1799 """ 1800 return self.rest.get_element_from_masking_view( 1801 array, maskingview_name, host=True) 1802 1803 def get_common_masking_views(self, array, portgroup_name, 1804 initiator_group_name): 1805 """Get common masking views, if any. 1806 1807 :param array: the array serial number 1808 :param portgroup_name: port group name 1809 :param initiator_group_name: ig name 1810 :returns: list of masking views 1811 """ 1812 LOG.debug("Finding Masking Views for port group %(pg)s and %(ig)s.", 1813 {'pg': portgroup_name, 'ig': initiator_group_name}) 1814 masking_view_list = self.rest.get_common_masking_views( 1815 array, portgroup_name, initiator_group_name) 1816 return masking_view_list 1817 1818 def _get_ip_and_iqn(self, array, port): 1819 """Get ip and iqn from the director port. 1820 1821 :param array: the array serial number 1822 :param port: the director port on the array 1823 :returns: ip_and_iqn - dict 1824 """ 1825 ip_iqn_list = [] 1826 ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( 1827 array, port) 1828 for ip in ip_addresses: 1829 ip_iqn_list.append({'iqn': iqn, 'ip': ip}) 1830 return ip_iqn_list 1831 1832 def _find_ip_and_iqns(self, array, port_group_name): 1833 """Find the list of ips and iqns for the ports in a portgroup. 1834 1835 :param array: the array serial number 1836 :param port_group_name: the portgroup name 1837 :returns: ip_and_iqn - list of dicts 1838 """ 1839 ips_and_iqns = [] 1840 LOG.debug("The portgroup name for iscsiadm is %(pg)s", 1841 {'pg': port_group_name}) 1842 ports = self.rest.get_port_ids(array, port_group_name) 1843 for port in ports: 1844 ip_and_iqn = self._get_ip_and_iqn(array, port) 1845 ips_and_iqns.extend(ip_and_iqn) 1846 return ips_and_iqns 1847 1848 def _create_replica( 1849 self, array, clone_volume, source_device_id, 1850 extra_specs, snap_name=None): 1851 """Create a replica. 1852 1853 Create replica for source volume, source can be volume or snapshot. 1854 :param array: the array serial number 1855 :param clone_volume: the clone volume object 1856 :param source_device_id: the device ID of the volume 1857 :param extra_specs: extra specifications 1858 :param snap_name: the snapshot name - optional 1859 :returns: int -- return code 1860 :returns: dict -- cloneDict 1861 """ 1862 target_device_id = None 1863 clone_id = clone_volume.id 1864 clone_name = self.utils.get_volume_element_name(clone_id) 1865 create_snap = False 1866 # VMAX supports using a target volume that is bigger than 1867 # the source volume, so we create the target volume the desired 1868 # size at this point to avoid having to extend later 1869 try: 1870 clone_dict = self._create_volume( 1871 clone_name, clone_volume.size, extra_specs) 1872 target_device_id = clone_dict['device_id'] 1873 LOG.info("The target device id is: %(device_id)s.", 1874 {'device_id': target_device_id}) 1875 if not snap_name: 1876 snap_name = self.utils.get_temp_snap_name( 1877 clone_name, source_device_id) 1878 create_snap = True 1879 self.provision.create_volume_replica( 1880 array, source_device_id, target_device_id, 1881 snap_name, extra_specs, create_snap) 1882 except Exception as e: 1883 if target_device_id: 1884 LOG.warning("Create replica failed. Cleaning up the target " 1885 "volume. Clone name: %(cloneName)s, Error " 1886 "received is %(e)s.", 1887 {'cloneName': clone_name, 'e': e}) 1888 self._cleanup_target( 1889 array, target_device_id, source_device_id, 1890 clone_name, snap_name, extra_specs) 1891 # Re-throw the exception. 1892 raise 1893 return clone_dict 1894 1895 def _cleanup_target( 1896 self, array, target_device_id, source_device_id, 1897 clone_name, snap_name, extra_specs): 1898 """Cleanup target volume on failed clone/ snapshot creation. 1899 1900 :param array: the array serial number 1901 :param target_device_id: the target device ID 1902 :param source_device_id: the source device ID 1903 :param clone_name: the name of the clone volume 1904 :param extra_specs: the extra specifications 1905 """ 1906 snap_session = self.rest.get_sync_session( 1907 array, source_device_id, snap_name, target_device_id) 1908 if snap_session: 1909 self.provision.break_replication_relationship( 1910 array, target_device_id, source_device_id, 1911 snap_name, extra_specs) 1912 self._delete_from_srp( 1913 array, target_device_id, clone_name, extra_specs) 1914 1915 def _sync_check(self, array, device_id, volume_name, extra_specs, 1916 tgt_only=False): 1917 """Check if volume is part of a SnapVx sync process. 1918 1919 :param array: the array serial number 1920 :param device_id: volume instance 1921 :param volume_name: volume name 1922 :param tgt_only: Flag - return only sessions where device is target 1923 :param extra_specs: extra specifications 1924 """ 1925 get_sessions = False 1926 snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( 1927 array, device_id) 1928 if snapvx_tgt: 1929 get_sessions = True 1930 elif snapvx_src and not tgt_only: 1931 get_sessions = True 1932 if get_sessions: 1933 snap_vx_sessions = self.rest.find_snap_vx_sessions( 1934 array, device_id, tgt_only) 1935 if snap_vx_sessions: 1936 for session in snap_vx_sessions: 1937 source = session['source_vol'] 1938 snap_name = session['snap_name'] 1939 targets = session['target_vol_list'] 1940 for target in targets: 1941 # Break the replication relationship 1942 LOG.debug("Unlinking source from target. Source: " 1943 "%(volume)s, Target: %(target)s.", 1944 {'volume': volume_name, 'target': target}) 1945 self.provision.break_replication_relationship( 1946 array, target, source, snap_name, extra_specs) 1947 # The snapshot name will only have 'temp' (or EMC_SMI for 1948 # legacy volumes) if it is a temporary volume. 1949 # Only then is it a candidate for deletion. 1950 if 'temp' in snap_name or 'EMC_SMI' in snap_name: 1951 @coordination.synchronized("emc-source-{source}") 1952 def do_delete_temp_volume_snap(source): 1953 self.provision.delete_temp_volume_snap( 1954 array, snap_name, source) 1955 do_delete_temp_volume_snap(source) 1956 1957 def manage_existing(self, volume, external_ref): 1958 """Manages an existing VMAX Volume (import to Cinder). 1959 1960 Renames the existing volume to match the expected name for the volume. 1961 Also need to consider things like QoS, Emulation, account/tenant. 1962 :param volume: the volume object including the volume_type_id 1963 :param external_ref: reference to the existing volume 1964 :returns: dict -- model_update 1965 """ 1966 LOG.info("Beginning manage existing volume process") 1967 array, device_id = self.utils.get_array_and_device_id( 1968 volume, external_ref) 1969 volume_id = volume.id 1970 # Check if the existing volume is valid for cinder management 1971 self._check_lun_valid_for_cinder_management( 1972 array, device_id, volume_id, external_ref) 1973 extra_specs = self._initial_setup(volume) 1974 1975 volume_name = self.utils.get_volume_element_name(volume_id) 1976 # Rename the volume 1977 LOG.debug("Rename volume %(vol)s to %(element_name)s.", 1978 {'vol': volume_id, 1979 'element_name': volume_name}) 1980 self.rest.rename_volume(array, device_id, volume_name) 1981 provider_location = {'device_id': device_id, 'array': array} 1982 model_update = {'provider_location': six.text_type(provider_location)} 1983 1984 # Set-up volume replication, if enabled 1985 if self.utils.is_replication_enabled(extra_specs): 1986 rep_update = self._replicate_volume(volume, volume_name, 1987 provider_location, 1988 extra_specs, delete_src=False) 1989 model_update.update(rep_update) 1990 1991 else: 1992 # Add volume to default storage group 1993 self.masking.add_volume_to_default_storage_group( 1994 array, device_id, volume_name, extra_specs) 1995 1996 return model_update 1997 1998 def _check_lun_valid_for_cinder_management( 1999 self, array, device_id, volume_id, external_ref): 2000 """Check if a volume is valid for cinder management. 2001 2002 :param array: the array serial number 2003 :param device_id: the device id 2004 :param volume_id: the cinder volume id 2005 :param external_ref: the external reference 2006 :raises: ManageExistingInvalidReference, ManageExistingAlreadyManaged: 2007 """ 2008 # Ensure the volume exists on the array 2009 volume_details = self.rest.get_volume(array, device_id) 2010 if not volume_details: 2011 msg = (_('Unable to retrieve volume details from array for ' 2012 'device %(device_id)s') % {'device_id': device_id}) 2013 raise exception.ManageExistingInvalidReference( 2014 existing_ref=external_ref, reason=msg) 2015 2016 # Check if volume is already cinder managed 2017 if volume_details.get('volume_identifier'): 2018 volume_identifier = volume_details['volume_identifier'] 2019 if volume_identifier.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): 2020 raise exception.ManageExistingAlreadyManaged( 2021 volume_ref=volume_id) 2022 2023 # Check if the volume is attached by checking if in any masking view. 2024 storagegrouplist = self.rest.get_storage_groups_from_volume( 2025 array, device_id) 2026 for sg in storagegrouplist: 2027 mvs = self.rest.get_masking_views_from_storage_group( 2028 array, sg) 2029 if mvs: 2030 msg = (_("Unable to import volume %(device_id)s to cinder. " 2031 "Volume is in masking view(s): %(mv)s.") 2032 % {'device_id': device_id, 'mv': mvs}) 2033 raise exception.ManageExistingInvalidReference( 2034 existing_ref=external_ref, reason=msg) 2035 2036 # Check if there are any replication sessions associated 2037 # with the volume. 2038 snapvx_tgt, __, rdf = self.rest.is_vol_in_rep_session( 2039 array, device_id) 2040 if snapvx_tgt or rdf: 2041 msg = (_("Unable to import volume %(device_id)s to cinder. " 2042 "It is part of a replication session.") 2043 % {'device_id': device_id}) 2044 raise exception.ManageExistingInvalidReference( 2045 existing_ref=external_ref, reason=msg) 2046 2047 def manage_existing_get_size(self, volume, external_ref): 2048 """Return size of an existing VMAX volume to manage_existing. 2049 2050 :param self: reference to class 2051 :param volume: the volume object including the volume_type_id 2052 :param external_ref: reference to the existing volume 2053 :returns: size of the volume in GB 2054 """ 2055 LOG.debug("Volume in manage_existing_get_size: %(volume)s.", 2056 {'volume': volume}) 2057 array, device_id = self.utils.get_array_and_device_id( 2058 volume, external_ref) 2059 # Ensure the volume exists on the array 2060 volume_details = self.rest.get_volume(array, device_id) 2061 if not volume_details: 2062 msg = (_('Unable to retrieve volume details from array for ' 2063 'device %(device_id)s') % {'device_id': device_id}) 2064 raise exception.ManageExistingInvalidReference( 2065 existing_ref=external_ref, reason=msg) 2066 2067 size = float(self.rest.get_size_of_device_on_array(array, device_id)) 2068 if not size.is_integer(): 2069 exception_message = ( 2070 _("Cannot manage existing VMAX volume %(device_id)s " 2071 "- it has a size of %(vol_size)s but only whole GB " 2072 "sizes are supported. Please extend the " 2073 "volume to the nearest GB value before importing.") 2074 % {'device_id': device_id, 'vol_size': size, }) 2075 LOG.exception(exception_message) 2076 raise exception.ManageExistingInvalidReference( 2077 existing_ref=external_ref, reason=exception_message) 2078 2079 LOG.debug("Size of volume %(device_id)s is %(vol_size)s GB.", 2080 {'device_id': device_id, 'vol_size': int(size)}) 2081 return int(size) 2082 2083 def unmanage(self, volume): 2084 """Export VMAX volume from Cinder. 2085 2086 Leave the volume intact on the backend array. 2087 2088 :param volume: the volume object 2089 """ 2090 volume_name = volume.name 2091 volume_id = volume.id 2092 LOG.info("Unmanage volume %(name)s, id=%(id)s", 2093 {'name': volume_name, 'id': volume_id}) 2094 extra_specs = self._initial_setup(volume) 2095 device_id = self._find_device_on_array(volume, extra_specs) 2096 if device_id is None: 2097 LOG.error("Cannot find Volume: %(id)s for " 2098 "unmanage operation. Exiting...", 2099 {'id': volume_id}) 2100 else: 2101 # Check if volume is snap source 2102 self._sync_check(extra_specs['array'], device_id, 2103 volume_name, extra_specs) 2104 # Remove volume from any openstack storage groups 2105 # and remove any replication 2106 self._remove_vol_and_cleanup_replication( 2107 extra_specs['array'], device_id, 2108 volume_name, extra_specs, volume) 2109 # Rename the volume to volumeId, thus remove the 'OS-' prefix. 2110 self.rest.rename_volume( 2111 extra_specs[utils.ARRAY], device_id, volume_id) 2112 2113 def manage_existing_snapshot(self, snapshot, existing_ref): 2114 """Manage an existing VMAX Snapshot (import to Cinder). 2115 2116 Renames the Snapshot to prefix it with OS- to indicate 2117 it is managed by Cinder 2118 2119 :param snapshot: the snapshot object 2120 :param existing_ref: the snapshot name on the backend VMAX 2121 :raises: VolumeBackendAPIException 2122 :returns: model update 2123 """ 2124 volume = snapshot.volume 2125 extra_specs = self._initial_setup(volume) 2126 array = extra_specs[utils.ARRAY] 2127 device_id = self._find_device_on_array(volume, extra_specs) 2128 2129 try: 2130 snap_name = existing_ref['source-name'] 2131 except KeyError: 2132 snap_name = existing_ref['source-id'] 2133 2134 if snapshot.display_name: 2135 snap_display_name = snapshot.display_name 2136 else: 2137 snap_display_name = snapshot.id 2138 2139 if snap_name.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): 2140 exception_message = ( 2141 _("Unable to manage existing Snapshot. Snapshot " 2142 "%(snapshot)s is already managed by Cinder.") % 2143 {'snapshot': snap_name}) 2144 raise exception.VolumeBackendAPIException( 2145 data=exception_message) 2146 2147 if self.utils.is_volume_failed_over(volume): 2148 exception_message = ( 2149 (_("Volume %(name)s is failed over from the source volume, " 2150 "it is not possible to manage a snapshot of a failed over " 2151 "volume.") % {'name': volume.id})) 2152 LOG.exception(exception_message) 2153 raise exception.VolumeBackendAPIException( 2154 data=exception_message) 2155 2156 if not self.rest.get_volume_snap(array, device_id, snap_name): 2157 exception_message = ( 2158 _("Snapshot %(snap_name)s is not associated with specified " 2159 "volume %(device_id)s, it is not possible to manage a " 2160 "snapshot that is not associated with the specified " 2161 "volume.") 2162 % {'device_id': device_id, 'snap_name': snap_name}) 2163 LOG.exception(exception_message) 2164 raise exception.VolumeBackendAPIException( 2165 data=exception_message) 2166 2167 snap_backend_name = self.utils.modify_snapshot_prefix( 2168 snap_name, manage=True) 2169 2170 try: 2171 self.rest.modify_volume_snap( 2172 array, device_id, device_id, snap_name, 2173 extra_specs, rename=True, new_snap_name=snap_backend_name) 2174 2175 except Exception as e: 2176 exception_message = ( 2177 _("There was an issue managing %(snap_name)s, it was not " 2178 "possible to add the OS- prefix. Error Message: %(e)s.") 2179 % {'snap_name': snap_name, 'e': six.text_type(e)}) 2180 LOG.exception(exception_message) 2181 raise exception.VolumeBackendAPIException(data=exception_message) 2182 2183 prov_loc = {'source_id': device_id, 'snap_name': snap_backend_name} 2184 2185 updates = {'display_name': snap_display_name, 2186 'provider_location': six.text_type(prov_loc)} 2187 2188 LOG.info("Managing SnapVX Snapshot %(snap_name)s of source " 2189 "volume %(device_id)s, OpenStack Snapshot display name: " 2190 "%(snap_display_name)s", { 2191 'snap_name': snap_name, 'device_id': device_id, 2192 'snap_display_name': snap_display_name}) 2193 2194 return updates 2195 2196 def manage_existing_snapshot_get_size(self, snapshot): 2197 """Return the size of the source volume for manage-existing-snapshot. 2198 2199 :param snapshot: the snapshot object 2200 :returns: size of the source volume in GB 2201 """ 2202 volume = snapshot.volume 2203 extra_specs = self._initial_setup(volume) 2204 device_id = self._find_device_on_array(volume, extra_specs) 2205 return self.rest.get_size_of_device_on_array( 2206 extra_specs[utils.ARRAY], device_id) 2207 2208 def unmanage_snapshot(self, snapshot): 2209 """Export VMAX Snapshot from Cinder. 2210 2211 Leaves the snapshot intact on the backend VMAX 2212 2213 :param snapshot: the snapshot object 2214 :raises: VolumeBackendAPIException 2215 """ 2216 volume = snapshot.volume 2217 extra_specs = self._initial_setup(volume) 2218 array = extra_specs[utils.ARRAY] 2219 device_id, snap_name = self._parse_snap_info(array, snapshot) 2220 2221 if self.utils.is_volume_failed_over(volume): 2222 exception_message = ( 2223 _("It is not possible to unmanage a snapshot where the " 2224 "source volume is failed-over, revert back to source " 2225 "VMAX to unmanage snapshot %(snap_name)s") 2226 % {'snap_name': snap_name}) 2227 2228 LOG.exception(exception_message) 2229 raise exception.VolumeBackendAPIException( 2230 data=exception_message) 2231 2232 new_snap_backend_name = self.utils.modify_snapshot_prefix( 2233 snap_name, unmanage=True) 2234 2235 try: 2236 self.rest.modify_volume_snap( 2237 array, device_id, device_id, snap_name, extra_specs, 2238 rename=True, new_snap_name=new_snap_backend_name) 2239 except Exception as e: 2240 exception_message = ( 2241 _("There was an issue unmanaging Snapshot, it " 2242 "was not possible to remove the OS- prefix. Error " 2243 "message is: %(e)s.") 2244 % {'snap_name': snap_name, 'e': six.text_type(e)}) 2245 LOG.exception(exception_message) 2246 raise exception.VolumeBackendAPIException(data=exception_message) 2247 2248 self._sync_check(array, device_id, volume.name, extra_specs) 2249 2250 LOG.info("Snapshot %(snap_name)s is no longer managed in " 2251 "OpenStack but still remains on VMAX source " 2252 "%(array_id)s", {'snap_name': snap_name, 'array_id': array}) 2253 2254 def retype(self, volume, new_type, host): 2255 """Migrate volume to another host using retype. 2256 2257 :param volume: the volume object including the volume_type_id 2258 :param new_type: the new volume type. 2259 :param host: The host dict holding the relevant target(destination) 2260 information 2261 :returns: boolean -- True if retype succeeded, False if error 2262 """ 2263 volume_name = volume.name 2264 LOG.info("Migrating Volume %(volume)s via retype.", 2265 {'volume': volume_name}) 2266 2267 extra_specs = self._initial_setup(volume) 2268 2269 device_id = self._find_device_on_array(volume, extra_specs) 2270 if device_id is None: 2271 LOG.error("Volume %(name)s not found on the array. " 2272 "No volume to migrate using retype.", 2273 {'name': volume_name}) 2274 return False 2275 2276 # If the volume is attached, we can't support retype. 2277 # Need to explicitly check this after the code change, 2278 # as 'move' functionality will cause the volume to appear 2279 # as successfully retyped, but will remove it from the masking view. 2280 if volume.attach_status == 'attached': 2281 LOG.error( 2282 "Volume %(name)s is not suitable for storage " 2283 "assisted migration using retype " 2284 "as it is attached.", 2285 {'name': volume_name}) 2286 return False 2287 2288 if self.utils.is_replication_enabled(extra_specs): 2289 LOG.error("Volume %(name)s is replicated - " 2290 "Replicated volumes are not eligible for " 2291 "storage assisted retype. Host assisted " 2292 "retype is supported.", 2293 {'name': volume_name}) 2294 return False 2295 2296 return self._slo_workload_migration(device_id, volume, host, 2297 volume_name, new_type, extra_specs) 2298 2299 def _slo_workload_migration(self, device_id, volume, host, 2300 volume_name, new_type, extra_specs): 2301 """Migrate from SLO/Workload combination to another. 2302 2303 :param device_id: the volume device id 2304 :param volume: the volume object 2305 :param host: the host dict 2306 :param volume_name: the name of the volume 2307 :param new_type: the type to migrate to 2308 :param extra_specs: extra specifications 2309 :returns: boolean -- True if migration succeeded, False if error. 2310 """ 2311 is_compression_disabled = self.utils.is_compression_disabled( 2312 extra_specs) 2313 # Check if old type and new type have different compression types 2314 do_change_compression = (self.utils.change_compression_type( 2315 is_compression_disabled, new_type)) 2316 is_valid, target_slo, target_workload = ( 2317 self._is_valid_for_storage_assisted_migration( 2318 device_id, host, extra_specs[utils.ARRAY], 2319 extra_specs[utils.SRP], volume_name, 2320 do_change_compression)) 2321 2322 if not is_valid: 2323 LOG.error( 2324 "Volume %(name)s is not suitable for storage " 2325 "assisted migration using retype.", 2326 {'name': volume_name}) 2327 return False 2328 if volume.host != host['host'] or do_change_compression: 2329 LOG.debug( 2330 "Retype Volume %(name)s from source host %(sourceHost)s " 2331 "to target host %(targetHost)s. Compression change is %(cc)r.", 2332 {'name': volume_name, 2333 'sourceHost': volume.host, 2334 'targetHost': host['host'], 2335 'cc': do_change_compression}) 2336 return self._migrate_volume( 2337 extra_specs[utils.ARRAY], volume, device_id, 2338 extra_specs[utils.SRP], target_slo, 2339 target_workload, volume_name, new_type, extra_specs) 2340 2341 return False 2342 2343 def _migrate_volume( 2344 self, array, volume, device_id, srp, target_slo, 2345 target_workload, volume_name, new_type, extra_specs): 2346 """Migrate from one slo/workload combination to another. 2347 2348 This requires moving the volume from its current SG to a 2349 new or existing SG that has the target attributes. 2350 :param array: the array serial number 2351 :param volume: the volume object 2352 :param device_id: the device number 2353 :param srp: the storage resource pool 2354 :param target_slo: the target service level 2355 :param target_workload: the target workload 2356 :param volume_name: the volume name 2357 :param new_type: the volume type to migrate to 2358 :param extra_specs: the extra specifications 2359 :returns: bool 2360 """ 2361 target_extra_specs = new_type['extra_specs'] 2362 target_extra_specs[utils.SRP] = srp 2363 target_extra_specs[utils.ARRAY] = array 2364 target_extra_specs[utils.SLO] = target_slo 2365 target_extra_specs[utils.WORKLOAD] = target_workload 2366 target_extra_specs[utils.INTERVAL] = extra_specs[utils.INTERVAL] 2367 target_extra_specs[utils.RETRIES] = extra_specs[utils.RETRIES] 2368 is_compression_disabled = self.utils.is_compression_disabled( 2369 target_extra_specs) 2370 2371 try: 2372 target_sg_name = self.masking.get_or_create_default_storage_group( 2373 array, srp, target_slo, target_workload, extra_specs, 2374 is_compression_disabled) 2375 except Exception as e: 2376 LOG.error("Failed to get or create storage group. " 2377 "Exception received was %(e)s.", {'e': e}) 2378 return False 2379 2380 storagegroups = self.rest.get_storage_groups_from_volume( 2381 array, device_id) 2382 if not storagegroups: 2383 LOG.warning("Volume : %(volume_name)s does not currently " 2384 "belong to any storage groups.", 2385 {'volume_name': volume_name}) 2386 self.masking.add_volume_to_storage_group( 2387 array, device_id, target_sg_name, volume_name, extra_specs) 2388 else: 2389 self.masking.remove_and_reset_members( 2390 array, volume, device_id, volume_name, target_extra_specs, 2391 reset=True) 2392 2393 # Check that it has been added. 2394 vol_check = self.rest.is_volume_in_storagegroup( 2395 array, device_id, target_sg_name) 2396 if not vol_check: 2397 LOG.error( 2398 "Volume: %(volume_name)s has not been " 2399 "added to target storage group %(storageGroup)s.", 2400 {'volume_name': volume_name, 2401 'storageGroup': target_sg_name}) 2402 return False 2403 2404 return True 2405 2406 def _is_valid_for_storage_assisted_migration( 2407 self, device_id, host, source_array, 2408 source_srp, volume_name, do_change_compression): 2409 """Check if volume is suitable for storage assisted (pool) migration. 2410 2411 :param device_id: the volume device id 2412 :param host: the host dict 2413 :param source_array: the volume's current array serial number 2414 :param source_srp: the volume's current pool name 2415 :param volume_name: the name of the volume to be migrated 2416 :param do_change_compression: do change compression 2417 :returns: boolean -- True/False 2418 :returns: string -- targetSlo 2419 :returns: string -- targetWorkload 2420 """ 2421 false_ret = (False, None, None) 2422 host_info = host['host'] 2423 2424 LOG.debug("Target host is : %(info)s.", {'info': host_info}) 2425 try: 2426 info_detail = host_info.split('#') 2427 pool_details = info_detail[1].split('+') 2428 if len(pool_details) == 4: 2429 target_slo = pool_details[0] 2430 if pool_details[1].lower() == 'none': 2431 target_workload = 'NONE' 2432 else: 2433 target_workload = pool_details[1] 2434 target_srp = pool_details[2] 2435 target_array_serial = pool_details[3] 2436 elif len(pool_details) == 3: 2437 target_slo = pool_details[0] 2438 target_srp = pool_details[1] 2439 target_array_serial = pool_details[2] 2440 target_workload = 'NONE' 2441 else: 2442 raise IndexError 2443 if target_slo.lower() == 'none': 2444 target_slo = None 2445 if self.rest.is_next_gen_array(target_array_serial): 2446 target_workload = 'NONE' 2447 except IndexError: 2448 LOG.error("Error parsing array, pool, SLO and workload.") 2449 return false_ret 2450 2451 if target_array_serial not in source_array: 2452 LOG.error( 2453 "The source array: %(source_array)s does not " 2454 "match the target array: %(target_array)s - " 2455 "skipping storage-assisted migration.", 2456 {'source_array': source_array, 2457 'target_array': target_array_serial}) 2458 return false_ret 2459 2460 if target_srp not in source_srp: 2461 LOG.error( 2462 "Only SLO/workload migration within the same SRP Pool is " 2463 "supported in this version. The source pool: " 2464 "%(source_pool_name)s does not match the target array: " 2465 "%(target_pool)s. Skipping storage-assisted migration.", 2466 {'source_pool_name': source_srp, 2467 'target_pool': target_srp}) 2468 return false_ret 2469 2470 found_storage_group_list = self.rest.get_storage_groups_from_volume( 2471 source_array, device_id) 2472 if not found_storage_group_list: 2473 LOG.warning("Volume: %(volume_name)s does not currently " 2474 "belong to any storage groups.", 2475 {'volume_name': volume_name}) 2476 2477 else: 2478 for found_storage_group_name in found_storage_group_list: 2479 emc_fast_setting = ( 2480 self.provision. 2481 get_slo_workload_settings_from_storage_group( 2482 source_array, found_storage_group_name)) 2483 target_combination = ("%(targetSlo)s+%(targetWorkload)s" 2484 % {'targetSlo': target_slo, 2485 'targetWorkload': target_workload}) 2486 if target_combination == emc_fast_setting: 2487 # Check if migration is from compression to non compression 2488 # or vice versa 2489 if not do_change_compression: 2490 LOG.warning( 2491 "No action required. Volume: %(volume_name)s is " 2492 "already part of slo/workload combination: " 2493 "%(targetCombination)s.", 2494 {'volume_name': volume_name, 2495 'targetCombination': target_combination}) 2496 return false_ret 2497 2498 return True, target_slo, target_workload 2499 2500 def setup_volume_replication(self, array, volume, device_id, 2501 extra_specs, target_device_id=None): 2502 """Setup replication for volume, if enabled. 2503 2504 Called on create volume, create cloned volume, create volume from 2505 snapshot, manage_existing, and re-establishing a replication 2506 relationship after extending. 2507 :param array: the array serial number 2508 :param volume: the volume object 2509 :param device_id: the device id 2510 :param extra_specs: the extra specifications 2511 :param target_device_id: the target device id 2512 :returns: replication_status -- str, replication_driver_data -- dict 2513 """ 2514 source_name = volume.name 2515 LOG.debug('Starting replication setup ' 2516 'for volume: %s.', source_name) 2517 # Get rdf details 2518 rdf_group_no, remote_array = self.get_rdf_details(array) 2519 rdf_vol_size = volume.size 2520 if rdf_vol_size == 0: 2521 rdf_vol_size = self.rest.get_size_of_device_on_array( 2522 array, device_id) 2523 2524 # Give the target volume the same Volume Element Name as the 2525 # source volume 2526 target_name = self.utils.get_volume_element_name(volume.id) 2527 2528 if not target_device_id: 2529 # Create a target volume on the target array 2530 rep_extra_specs = self._get_replication_extra_specs( 2531 extra_specs, self.rep_config) 2532 volume_dict = self._create_volume( 2533 target_name, rdf_vol_size, rep_extra_specs) 2534 target_device_id = volume_dict['device_id'] 2535 2536 LOG.debug("Create volume replica: Target device: %(target)s " 2537 "Source Device: %(source)s " 2538 "Volume identifier: %(name)s.", 2539 {'target': target_device_id, 2540 'source': device_id, 2541 'name': target_name}) 2542 2543 # Enable rdf replication and establish the link 2544 rdf_dict = self.enable_rdf( 2545 array, volume, device_id, rdf_group_no, self.rep_config, 2546 target_name, remote_array, target_device_id, extra_specs) 2547 2548 if self.utils.does_vol_need_rdf_management_group(extra_specs): 2549 self._add_volume_to_async_rdf_managed_grp( 2550 array, device_id, source_name, remote_array, 2551 target_device_id, extra_specs) 2552 2553 LOG.info('Successfully setup replication for %s.', 2554 target_name) 2555 replication_status = REPLICATION_ENABLED 2556 replication_driver_data = rdf_dict 2557 2558 return replication_status, replication_driver_data 2559 2560 def _add_volume_to_async_rdf_managed_grp( 2561 self, array, device_id, volume_name, remote_array, 2562 target_device_id, extra_specs): 2563 """Add an async volume to its rdf management group. 2564 2565 :param array: the array serial number 2566 :param device_id: the device id 2567 :param volume_name: the volume name 2568 :param remote_array: the remote array 2569 :param target_device_id: the target device id 2570 :param extra_specs: the extra specifications 2571 :raises: VolumeBackendAPIException 2572 """ 2573 group_name = self.utils.get_async_rdf_managed_grp_name( 2574 self.rep_config) 2575 try: 2576 self.provision.get_or_create_group(array, group_name, extra_specs) 2577 self.masking.add_volume_to_storage_group( 2578 array, device_id, group_name, volume_name, extra_specs) 2579 # Add remote volume 2580 self.provision.get_or_create_group( 2581 remote_array, group_name, extra_specs) 2582 self.masking.add_volume_to_storage_group( 2583 remote_array, target_device_id, 2584 group_name, volume_name, extra_specs) 2585 except Exception as e: 2586 exception_message = ( 2587 _('Exception occurred adding volume %(vol)s to its async ' 2588 'rdf management group - the exception received was: %(e)s') 2589 % {'vol': volume_name, 'e': six.text_type(e)}) 2590 LOG.error(exception_message) 2591 raise exception.VolumeBackendAPIException(data=exception_message) 2592 2593 def cleanup_lun_replication(self, volume, volume_name, 2594 device_id, extra_specs): 2595 """Cleanup target volume on delete. 2596 2597 Extra logic if target is last in group, or is a metro volume. 2598 :param volume: the volume object 2599 :param volume_name: the volume name 2600 :param device_id: the device id 2601 :param extra_specs: extra specifications 2602 :raises: VolumeBackendAPIException 2603 """ 2604 LOG.debug('Starting cleanup replication from volume: ' 2605 '%s.', volume_name) 2606 try: 2607 loc = volume.provider_location 2608 rep_data = volume.replication_driver_data 2609 2610 if (isinstance(loc, six.string_types) 2611 and isinstance(rep_data, six.string_types)): 2612 name = ast.literal_eval(loc) 2613 try: 2614 array = name['array'] 2615 except KeyError: 2616 array = (name['keybindings'] 2617 ['SystemName'].split('+')[1].strip('-')) 2618 rep_extra_specs = self._get_replication_extra_specs( 2619 extra_specs, self.rep_config) 2620 (target_device, remote_array, rdf_group_no, 2621 local_vol_state, pair_state) = ( 2622 self.get_remote_target_device(array, volume, device_id)) 2623 2624 if target_device is not None: 2625 # Clean-up target 2626 self._cleanup_remote_target( 2627 array, volume, remote_array, device_id, target_device, 2628 rdf_group_no, volume_name, rep_extra_specs) 2629 LOG.info('Successfully destroyed replication for ' 2630 'volume: %(volume)s', 2631 {'volume': volume_name}) 2632 else: 2633 LOG.warning('Replication target not found for ' 2634 'replication-enabled volume: %(volume)s', 2635 {'volume': volume_name}) 2636 except Exception as e: 2637 if extra_specs.get(utils.REP_MODE, None) in [ 2638 utils.REP_ASYNC, utils.REP_METRO]: 2639 (target_device, remote_array, rdf_group_no, 2640 local_vol_state, pair_state) = ( 2641 self.get_remote_target_device( 2642 extra_specs[utils.ARRAY], volume, device_id)) 2643 if target_device is not None: 2644 # Return devices to their async rdf management groups 2645 self._add_volume_to_async_rdf_managed_grp( 2646 extra_specs[utils.ARRAY], device_id, volume_name, 2647 remote_array, target_device, extra_specs) 2648 exception_message = ( 2649 _('Cannot get necessary information to cleanup ' 2650 'replication target for volume: %(volume)s. ' 2651 'The exception received was: %(e)s. Manual ' 2652 'clean-up may be required. Please contact ' 2653 'your administrator.') 2654 % {'volume': volume_name, 'e': six.text_type(e)}) 2655 LOG.error(exception_message) 2656 raise exception.VolumeBackendAPIException(data=exception_message) 2657 2658 def _cleanup_remote_target( 2659 self, array, volume, remote_array, device_id, target_device, 2660 rdf_group, volume_name, rep_extra_specs): 2661 """Clean-up remote replication target after exception or on deletion. 2662 2663 :param array: the array serial number 2664 :param volume: the volume object 2665 :param remote_array: the remote array serial number 2666 :param device_id: the source device id 2667 :param target_device: the target device id 2668 :param rdf_group: the RDF group 2669 :param volume_name: the volume name 2670 :param rep_extra_specs: replication extra specifications 2671 """ 2672 self.masking.remove_and_reset_members( 2673 remote_array, volume, target_device, volume_name, 2674 rep_extra_specs, False) 2675 are_vols_paired, local_vol_state, pair_state = ( 2676 self.rest.are_vols_rdf_paired( 2677 array, remote_array, device_id, target_device)) 2678 if are_vols_paired: 2679 is_metro = self.utils.is_metro_device( 2680 self.rep_config, rep_extra_specs) 2681 if is_metro: 2682 rep_extra_specs['allow_del_metro'] = self.allow_delete_metro 2683 self._cleanup_metro_target( 2684 array, device_id, target_device, 2685 rdf_group, rep_extra_specs) 2686 else: 2687 # Break the sync relationship. 2688 self.provision.break_rdf_relationship( 2689 array, device_id, target_device, rdf_group, 2690 rep_extra_specs, pair_state) 2691 self._delete_from_srp( 2692 remote_array, target_device, volume_name, rep_extra_specs) 2693 2694 @coordination.synchronized('emc-rg-{rdf_group}') 2695 def _cleanup_metro_target(self, array, device_id, target_device, 2696 rdf_group, rep_extra_specs): 2697 """Helper function to cleanup a metro remote target. 2698 2699 :param array: the array serial number 2700 :param device_id: the device id 2701 :param target_device: the target device id 2702 :param rdf_group: the rdf group number 2703 :param rep_extra_specs: the rep extra specs 2704 """ 2705 if rep_extra_specs['allow_del_metro']: 2706 metro_grp = self.utils.get_async_rdf_managed_grp_name( 2707 self.rep_config) 2708 self.provision.break_metro_rdf_pair( 2709 array, device_id, target_device, rdf_group, 2710 rep_extra_specs, metro_grp) 2711 # Remove the volume from the metro_grp 2712 self.masking.remove_volume_from_sg(array, device_id, 'metro_vol', 2713 metro_grp, rep_extra_specs) 2714 # Resume I/O on the RDF links for any remaining volumes 2715 if self.rest.get_num_vols_in_sg(array, metro_grp) > 0: 2716 LOG.info("Resuming I/O for all volumes in the RDF group: " 2717 "%(rdfg)s", {'rdfg': device_id}) 2718 self.provision.enable_group_replication( 2719 array, metro_grp, rdf_group, 2720 rep_extra_specs, establish=True) 2721 else: 2722 exception_message = ( 2723 _("Deleting a Metro-protected replicated volume is " 2724 "not permitted on this backend %(backend)s. " 2725 "Please contact your administrator.") 2726 % {'backend': self.configuration.safe_get( 2727 'volume_backend_name')}) 2728 LOG.error(exception_message) 2729 raise exception.VolumeBackendAPIException( 2730 data=exception_message) 2731 2732 def _cleanup_replication_source( 2733 self, array, volume, volume_name, volume_dict, extra_specs): 2734 """Cleanup a remote replication source volume on failure. 2735 2736 If replication setup fails at any stage on a new volume create, 2737 we must clean-up the source instance as the cinder database won't 2738 be updated with the provider_location. This means the volume cannot 2739 be properly deleted from the array by cinder. 2740 :param array: the array serial number 2741 :param volume: the volume object 2742 :param volume_name: the name of the volume 2743 :param volume_dict: the source volume dictionary 2744 :param extra_specs: the extra specifications 2745 """ 2746 LOG.warning( 2747 "Replication failed. Cleaning up the source volume. " 2748 "Volume name: %(sourceName)s ", 2749 {'sourceName': volume_name}) 2750 device_id = volume_dict['device_id'] 2751 # Check if volume is snap target (e.g. if clone volume) 2752 self._sync_check(array, device_id, volume_name, extra_specs) 2753 # Remove from any storage groups and cleanup replication 2754 self._remove_vol_and_cleanup_replication( 2755 array, device_id, volume_name, extra_specs, volume) 2756 self._delete_from_srp( 2757 array, device_id, volume_name, extra_specs) 2758 2759 def get_rdf_details(self, array): 2760 """Retrieves an SRDF group instance. 2761 2762 :param array: the array serial number 2763 :returns: rdf_group_no, remote_array 2764 """ 2765 if not self.rep_config: 2766 exception_message = (_("Replication is not configured on " 2767 "backend: %(backend)s.") % 2768 {'backend': self.configuration.safe_get( 2769 'volume_backend_name')}) 2770 LOG.exception(exception_message) 2771 raise exception.VolumeBackendAPIException(data=exception_message) 2772 2773 remote_array = self.rep_config['array'] 2774 rdf_group_label = self.rep_config['rdf_group_label'] 2775 LOG.info("Replication group: %(RDFGroup)s.", 2776 {'RDFGroup': rdf_group_label}) 2777 rdf_group_no = self.rest.get_rdf_group_number(array, rdf_group_label) 2778 if rdf_group_no is None: 2779 exception_message = (_("Cannot find replication group: " 2780 "%(RDFGroup)s. Please check the name " 2781 "and the array") % 2782 {'RDFGroup': rdf_group_label}) 2783 LOG.exception(exception_message) 2784 raise exception.VolumeBackendAPIException( 2785 data=exception_message) 2786 2787 LOG.info("Found RDF group number: %(RDFGroup)s.", 2788 {'RDFGroup': rdf_group_no}) 2789 2790 return rdf_group_no, remote_array 2791 2792 def failover_host(self, volumes, secondary_id=None, groups=None): 2793 """Fails over the volumes on a host back and forth. 2794 2795 Driver needs to update following info for failed-over volume: 2796 1. provider_location: update array details 2797 2. replication_status: new status for replication-enabled volume 2798 :param volumes: the list of volumes to be failed over 2799 :param secondary_id: the target backend 2800 :param groups: replication groups 2801 :returns: secondary_id, volume_update_list, group_update_list 2802 :raises: VolumeBackendAPIException 2803 """ 2804 volume_update_list = [] 2805 group_update_list = [] 2806 group_fo = None 2807 if secondary_id != 'default': 2808 if not self.failover: 2809 self.failover = True 2810 if self.rep_config: 2811 secondary_id = self.rep_config['array'] 2812 else: 2813 exception_message = (_( 2814 "Backend %(backend)s is already failed over. " 2815 "If you wish to failback, please append " 2816 "'--backend_id default' to your command.") 2817 % {'backend': self.configuration.safe_get( 2818 'volume_backend_name')}) 2819 LOG.error(exception_message) 2820 raise exception.VolumeBackendAPIException( 2821 data=exception_message) 2822 else: 2823 if self.failover: 2824 self.failover = False 2825 secondary_id = None 2826 group_fo = 'default' 2827 else: 2828 exception_message = (_( 2829 "Cannot failback backend %(backend)s- backend not " 2830 "in failed over state. If you meant to failover, please " 2831 "omit the '--backend_id default' from the command") 2832 % {'backend': self.configuration.safe_get( 2833 'volume_backend_name')}) 2834 LOG.error(exception_message) 2835 raise exception.VolumeBackendAPIException( 2836 data=exception_message) 2837 2838 if groups: 2839 for group in groups: 2840 vol_list = [] 2841 for index, vol in enumerate(volumes): 2842 if vol.group_id == group.id: 2843 vol_list.append(volumes.pop(index)) 2844 grp_update, vol_updates = ( 2845 self.failover_replication( 2846 None, group, vol_list, group_fo, host=True)) 2847 2848 group_update_list.append({'group_id': group.id, 2849 'updates': grp_update}) 2850 volume_update_list += vol_updates 2851 2852 rep_mode = self.rep_config['mode'] 2853 if rep_mode == utils.REP_ASYNC: 2854 vol_grp_name = self.utils.get_async_rdf_managed_grp_name( 2855 self.rep_config) 2856 __, volume_update_list = ( 2857 self._failover_replication( 2858 volumes, None, vol_grp_name, 2859 secondary_backend_id=group_fo, host=True)) 2860 2861 for volume in volumes: 2862 extra_specs = self._initial_setup(volume) 2863 if self.utils.is_replication_enabled(extra_specs): 2864 if rep_mode == utils.REP_SYNC: 2865 model_update = self._failover_volume( 2866 volume, self.failover, extra_specs) 2867 volume_update_list.append(model_update) 2868 else: 2869 if self.failover: 2870 # Since the array has been failed-over, 2871 # volumes without replication should be in error. 2872 volume_update_list.append({ 2873 'volume_id': volume.id, 2874 'updates': {'status': 'error'}}) 2875 else: 2876 # This is a failback, so we will attempt 2877 # to recover non-failed over volumes 2878 recovery = self.recover_volumes_on_failback( 2879 volume, extra_specs) 2880 volume_update_list.append(recovery) 2881 2882 LOG.info("Failover host complete.") 2883 return secondary_id, volume_update_list, group_update_list 2884 2885 def _failover_volume(self, vol, failover, extra_specs): 2886 """Failover a volume. 2887 2888 :param vol: the volume object 2889 :param failover: flag to indicate failover or failback -- bool 2890 :param extra_specs: the extra specifications 2891 :returns: model_update -- dict 2892 """ 2893 loc = vol.provider_location 2894 rep_data = vol.replication_driver_data 2895 try: 2896 name = ast.literal_eval(loc) 2897 replication_keybindings = ast.literal_eval(rep_data) 2898 try: 2899 array = name['array'] 2900 except KeyError: 2901 array = (name['keybindings'] 2902 ['SystemName'].split('+')[1].strip('-')) 2903 device_id = self._find_device_on_array(vol, {utils.ARRAY: array}) 2904 2905 (target_device, remote_array, rdf_group, 2906 local_vol_state, pair_state) = ( 2907 self.get_remote_target_device(array, vol, device_id)) 2908 2909 self._sync_check(array, device_id, vol.name, extra_specs) 2910 self.provision.failover_volume( 2911 array, device_id, rdf_group, extra_specs, 2912 local_vol_state, failover) 2913 2914 if failover: 2915 new_status = REPLICATION_FAILOVER 2916 else: 2917 new_status = REPLICATION_ENABLED 2918 2919 # Transfer ownership to secondary_backend_id and 2920 # update provider_location field 2921 loc = six.text_type(replication_keybindings) 2922 rep_data = six.text_type(name) 2923 2924 except Exception as ex: 2925 msg = ('Failed to failover volume %(volume_id)s. ' 2926 'Error: %(error)s.') 2927 LOG.error(msg, {'volume_id': vol.id, 2928 'error': ex}, ) 2929 new_status = FAILOVER_ERROR 2930 2931 model_update = {'volume_id': vol.id, 2932 'updates': 2933 {'replication_status': new_status, 2934 'replication_driver_data': rep_data, 2935 'provider_location': loc}} 2936 return model_update 2937 2938 def recover_volumes_on_failback(self, volume, extra_specs): 2939 """Recover volumes on failback. 2940 2941 On failback, attempt to recover non RE(replication enabled) 2942 volumes from primary array. 2943 :param volume: the volume object 2944 :param extra_specs: the extra specifications 2945 :returns: volume_update 2946 """ 2947 # Check if volume still exists on the primary 2948 volume_update = {'volume_id': volume.id} 2949 device_id = self._find_device_on_array(volume, extra_specs) 2950 if not device_id: 2951 volume_update['updates'] = {'status': 'error'} 2952 else: 2953 try: 2954 maskingview = self._get_masking_views_from_volume( 2955 extra_specs[utils.ARRAY], device_id, None) 2956 except Exception: 2957 maskingview = None 2958 LOG.debug("Unable to determine if volume is in masking view.") 2959 if not maskingview: 2960 volume_update['updates'] = {'status': 'available'} 2961 else: 2962 volume_update['updates'] = {'status': 'in-use'} 2963 return volume_update 2964 2965 def get_remote_target_device(self, array, volume, device_id): 2966 """Get the remote target for a given volume. 2967 2968 :param array: the array serial number 2969 :param volume: the volume object 2970 :param device_id: the device id 2971 :returns: target_device, target_array, rdf_group, state 2972 """ 2973 target_device, local_vol_state, pair_state = None, '', '' 2974 rdf_group, remote_array = self.get_rdf_details(array) 2975 try: 2976 rep_target_data = volume.replication_driver_data 2977 replication_keybindings = ast.literal_eval(rep_target_data) 2978 remote_array = replication_keybindings['array'] 2979 remote_device = replication_keybindings['device_id'] 2980 target_device_info = self.rest.get_volume( 2981 remote_array, remote_device) 2982 if target_device_info is not None: 2983 target_device = remote_device 2984 are_vols_paired, local_vol_state, pair_state = ( 2985 self.rest.are_vols_rdf_paired( 2986 array, remote_array, device_id, target_device)) 2987 if not are_vols_paired: 2988 target_device = None 2989 except (KeyError, ValueError): 2990 target_device = None 2991 return (target_device, remote_array, rdf_group, 2992 local_vol_state, pair_state) 2993 2994 def extend_volume_is_replicated( 2995 self, array, volume, device_id, volume_name, 2996 new_size, extra_specs): 2997 """Extend a replication-enabled volume. 2998 2999 Cannot extend volumes in a synchronization pair where the source 3000 and/or target arrays are running HyperMax versions < 5978. Must first 3001 break the relationship, extend them separately, then recreate the 3002 pair. Extending Metro protected volumes is not supported. 3003 :param array: the array serial number 3004 :param volume: the volume objcet 3005 :param device_id: the volume device id 3006 :param volume_name: the volume name 3007 :param new_size: the new size the volume should be 3008 :param extra_specs: extra specifications 3009 """ 3010 ode_replication, allow_extend = False, self.extend_replicated_vol 3011 if (self.rest.is_next_gen_array(array) 3012 and not self.utils.is_metro_device( 3013 self.rep_config, extra_specs)): 3014 # Check if remote array is next gen 3015 __, remote_array = self.get_rdf_details(array) 3016 if self.rest.is_next_gen_array(remote_array): 3017 ode_replication = True 3018 if self.utils.is_metro_device(self.rep_config, extra_specs): 3019 allow_extend = False 3020 if allow_extend is True or ode_replication is True: 3021 try: 3022 (target_device, remote_array, rdf_group, 3023 local_vol_state, pair_state) = ( 3024 self.get_remote_target_device( 3025 array, volume, device_id)) 3026 rep_extra_specs = self._get_replication_extra_specs( 3027 extra_specs, self.rep_config) 3028 lock_rdf_group = rdf_group 3029 if not ode_replication: 3030 # Volume must be removed from replication (storage) group 3031 # before the replication relationship can be ended (cannot 3032 # have a mix of replicated and non-replicated volumes as 3033 # the SRDF groups become unmanageable) 3034 lock_rdf_group = None 3035 self.masking.remove_and_reset_members( 3036 array, volume, device_id, volume_name, 3037 extra_specs, False) 3038 3039 # Repeat on target side 3040 self.masking.remove_and_reset_members( 3041 remote_array, volume, target_device, volume_name, 3042 rep_extra_specs, False) 3043 3044 LOG.info("Breaking replication relationship...") 3045 self.provision.break_rdf_relationship( 3046 array, device_id, target_device, rdf_group, 3047 rep_extra_specs, pair_state) 3048 3049 # Extend the target volume 3050 LOG.info("Extending target volume...") 3051 # Check to make sure the R2 device requires extending first... 3052 r2_size = self.rest.get_size_of_device_on_array( 3053 remote_array, target_device) 3054 if int(r2_size) < int(new_size): 3055 self.provision.extend_volume( 3056 remote_array, target_device, new_size, 3057 rep_extra_specs, lock_rdf_group) 3058 3059 # Extend the source volume 3060 LOG.info("Extending source volume...") 3061 self.provision.extend_volume( 3062 array, device_id, new_size, extra_specs, lock_rdf_group) 3063 3064 if not ode_replication: 3065 # Re-create replication relationship 3066 LOG.info("Recreating replication relationship...") 3067 self.setup_volume_replication( 3068 array, volume, device_id, extra_specs, target_device) 3069 3070 # Check if volume needs to be returned to volume group 3071 if volume.group_id: 3072 self._add_new_volume_to_volume_group( 3073 volume, device_id, volume_name, extra_specs) 3074 3075 except Exception as e: 3076 exception_message = (_("Error extending volume. " 3077 "Error received was %(e)s") % 3078 {'e': e}) 3079 LOG.exception(exception_message) 3080 raise exception.VolumeBackendAPIException( 3081 data=exception_message) 3082 3083 else: 3084 exception_message = (_( 3085 "Extending a replicated volume is not permitted on this " 3086 "backend. Please contact your administrator. Note that " 3087 "you cannot extend SRDF/Metro protected volumes.")) 3088 LOG.error(exception_message) 3089 raise exception.VolumeBackendAPIException(data=exception_message) 3090 3091 def enable_rdf(self, array, volume, device_id, rdf_group_no, rep_config, 3092 target_name, remote_array, target_device, extra_specs): 3093 """Create a replication relationship with a target volume. 3094 3095 :param array: the array serial number 3096 :param volume: the volume object 3097 :param device_id: the device id 3098 :param rdf_group_no: the rdf group number 3099 :param rep_config: the replication config 3100 :param target_name: the target volume name 3101 :param remote_array: the remote array serial number 3102 :param target_device: the target device id 3103 :param extra_specs: the extra specifications 3104 :returns: rdf_dict 3105 """ 3106 rep_extra_specs = self._get_replication_extra_specs( 3107 extra_specs, rep_config) 3108 try: 3109 # Remove source and target instances from their 3110 # default storage groups 3111 self.masking.remove_and_reset_members( 3112 array, volume, device_id, target_name, extra_specs, False) 3113 3114 self.masking.remove_and_reset_members( 3115 remote_array, volume, target_device, target_name, 3116 rep_extra_specs, False) 3117 3118 # Check if volume is a copy session target 3119 self._sync_check(array, device_id, target_name, 3120 extra_specs, tgt_only=True) 3121 # Establish replication relationship 3122 rdf_dict = self.rest.create_rdf_device_pair( 3123 array, device_id, rdf_group_no, target_device, remote_array, 3124 extra_specs) 3125 3126 # Add source and target instances to their replication groups 3127 LOG.debug("Adding source device to default replication group.") 3128 self.add_volume_to_replication_group( 3129 array, device_id, target_name, extra_specs) 3130 LOG.debug("Adding target device to default replication group.") 3131 self.add_volume_to_replication_group( 3132 remote_array, target_device, target_name, rep_extra_specs) 3133 3134 except Exception as e: 3135 LOG.warning( 3136 ("Remote replication failed. Cleaning up the target " 3137 "volume and returning source volume to default storage " 3138 "group. Volume name: %(name)s "), 3139 {'name': target_name}) 3140 self._cleanup_remote_target( 3141 array, volume, remote_array, device_id, target_device, 3142 rdf_group_no, target_name, rep_extra_specs) 3143 # Re-throw the exception. 3144 exception_message = (_("Remote replication failed with exception:" 3145 " %(e)s") 3146 % {'e': six.text_type(e)}) 3147 LOG.exception(exception_message) 3148 raise exception.VolumeBackendAPIException(data=exception_message) 3149 3150 return rdf_dict 3151 3152 def add_volume_to_replication_group( 3153 self, array, device_id, volume_name, extra_specs): 3154 """Add a volume to the default replication group. 3155 3156 Replication groups are VMAX storage groups that contain only 3157 RDF-paired volumes. We can use our normal storage group operations. 3158 :param array: array serial number 3159 :param device_id: the device id 3160 :param volume_name: the volume name 3161 :param extra_specs: the extra specifications 3162 :returns: storagegroup_name 3163 """ 3164 do_disable_compression = self.utils.is_compression_disabled( 3165 extra_specs) 3166 rep_mode = extra_specs.get(utils.REP_MODE, None) 3167 try: 3168 storagegroup_name = ( 3169 self.masking.get_or_create_default_storage_group( 3170 array, extra_specs[utils.SRP], extra_specs[utils.SLO], 3171 extra_specs[utils.WORKLOAD], extra_specs, 3172 do_disable_compression, is_re=True, rep_mode=rep_mode)) 3173 except Exception as e: 3174 exception_message = (_("Failed to get or create replication" 3175 "group. Exception received: %(e)s") 3176 % {'e': six.text_type(e)}) 3177 LOG.exception(exception_message) 3178 raise exception.VolumeBackendAPIException( 3179 data=exception_message) 3180 3181 self.masking.add_volume_to_storage_group( 3182 array, device_id, storagegroup_name, volume_name, extra_specs) 3183 3184 return storagegroup_name 3185 3186 def _get_replication_extra_specs(self, extra_specs, rep_config): 3187 """Get replication extra specifications. 3188 3189 Called when target array operations are necessary - 3190 on create, extend, etc and when volume is failed over. 3191 :param extra_specs: the extra specifications 3192 :param rep_config: the replication configuration 3193 :returns: repExtraSpecs - dict 3194 """ 3195 if not self.utils.is_replication_enabled(extra_specs): 3196 # Skip this if the volume is not replicated 3197 return 3198 rep_extra_specs = deepcopy(extra_specs) 3199 rep_extra_specs[utils.ARRAY] = rep_config['array'] 3200 rep_extra_specs[utils.SRP] = rep_config['srp'] 3201 rep_extra_specs[utils.PORTGROUPNAME] = rep_config['portgroup'] 3202 3203 # If disable compression is set, check if target array is all flash 3204 do_disable_compression = self.utils.is_compression_disabled( 3205 extra_specs) 3206 if do_disable_compression: 3207 if not self.rest.is_compression_capable( 3208 rep_extra_specs[utils.ARRAY]): 3209 rep_extra_specs.pop(utils.DISABLECOMPRESSION, None) 3210 3211 # Check to see if SLO and Workload are configured on the target array. 3212 if extra_specs[utils.SLO]: 3213 is_valid_slo, is_valid_workload = ( 3214 self.provision.verify_slo_workload( 3215 rep_extra_specs[utils.ARRAY], 3216 extra_specs[utils.SLO], 3217 rep_extra_specs[utils.WORKLOAD], 3218 rep_extra_specs[utils.SRP])) 3219 if not is_valid_slo or not is_valid_workload: 3220 LOG.warning("The target array does not support the storage " 3221 "pool setting for SLO %(slo)s or workload " 3222 "%(workload)s. Not assigning any SLO or " 3223 "workload.", 3224 {'slo': extra_specs[utils.SLO], 3225 'workload': extra_specs[utils.WORKLOAD]}) 3226 rep_extra_specs[utils.SLO] = None 3227 if extra_specs[utils.WORKLOAD]: 3228 rep_extra_specs[utils.WORKLOAD] = None 3229 3230 return rep_extra_specs 3231 3232 @staticmethod 3233 def get_secondary_stats_info(rep_config, array_info): 3234 """On failover, report on secondary array statistics. 3235 3236 :param rep_config: the replication configuration 3237 :param array_info: the array info 3238 :returns: secondary_info - dict 3239 """ 3240 secondary_info = array_info.copy() 3241 secondary_info['SerialNumber'] = six.text_type(rep_config['array']) 3242 secondary_info['srpName'] = rep_config['srp'] 3243 return secondary_info 3244 3245 def _setup_for_live_migration(self, device_info_dict, 3246 source_storage_group_list): 3247 """Function to set attributes for live migration. 3248 3249 :param device_info_dict: the data dict 3250 :param source_storage_group_list: 3251 :returns: source_nf_sg: The non fast storage group 3252 :returns: source_sg: The source storage group 3253 :returns: source_parent_sg: The parent storage group 3254 :returns: is_source_nf_sg:if the non fast storage group already exists 3255 """ 3256 array = device_info_dict['array'] 3257 source_sg = None 3258 is_source_nf_sg = False 3259 # Get parent storage group 3260 source_parent_sg = self.rest.get_element_from_masking_view( 3261 array, device_info_dict['maskingview'], storagegroup=True) 3262 source_nf_sg = source_parent_sg[:-2] + 'NONFAST' 3263 for sg in source_storage_group_list: 3264 is_descendant = self.rest.is_child_sg_in_parent_sg( 3265 array, sg, source_parent_sg) 3266 if is_descendant: 3267 source_sg = sg 3268 is_descendant = self.rest.is_child_sg_in_parent_sg( 3269 array, source_nf_sg, source_parent_sg) 3270 if is_descendant: 3271 is_source_nf_sg = True 3272 return source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg 3273 3274 def create_group(self, context, group): 3275 """Creates a generic volume group. 3276 3277 :param context: the context 3278 :param group: the group object to be created 3279 :returns: dict -- modelUpdate 3280 :raises: VolumeBackendAPIException, NotImplementedError, InvalidInput 3281 """ 3282 if (not volume_utils.is_group_a_cg_snapshot_type(group) 3283 and not group.is_replicated): 3284 raise NotImplementedError() 3285 if group.is_replicated: 3286 if (self.rep_config and self.rep_config.get('mode') 3287 and self.rep_config['mode'] 3288 in [utils.REP_ASYNC, utils.REP_METRO]): 3289 msg = _('Replication groups are not supported ' 3290 'for use with Asynchronous replication or Metro.') 3291 raise exception.InvalidInput(reason=msg) 3292 3293 model_update = {'status': fields.GroupStatus.AVAILABLE} 3294 3295 LOG.info("Create generic volume group: %(group)s.", 3296 {'group': group.id}) 3297 3298 vol_grp_name = self.utils.update_volume_group_name(group) 3299 3300 try: 3301 array, interval_retries_dict = self.utils.get_volume_group_utils( 3302 group, self.interval, self.retries) 3303 self.provision.create_volume_group( 3304 array, vol_grp_name, interval_retries_dict) 3305 if group.is_replicated: 3306 LOG.debug("Group: %(group)s is a replication group.", 3307 {'group': group.id}) 3308 # Create remote group 3309 __, remote_array = self.get_rdf_details(array) 3310 self.provision.create_volume_group( 3311 remote_array, vol_grp_name, interval_retries_dict) 3312 model_update.update({ 3313 'replication_status': fields.ReplicationStatus.ENABLED}) 3314 except Exception: 3315 exception_message = (_("Failed to create generic volume group:" 3316 " %(volGrpName)s.") 3317 % {'volGrpName': vol_grp_name}) 3318 LOG.exception(exception_message) 3319 raise exception.VolumeBackendAPIException(data=exception_message) 3320 3321 return model_update 3322 3323 def delete_group(self, context, group, volumes): 3324 """Deletes a generic volume group. 3325 3326 :param context: the context 3327 :param group: the group object to be deleted 3328 :param volumes: the list of volumes in the generic group to be deleted 3329 :returns: dict -- modelUpdate 3330 :returns: list -- list of volume model updates 3331 :raises: NotImplementedError 3332 """ 3333 LOG.info("Delete generic volume group: %(group)s.", 3334 {'group': group.id}) 3335 if (not volume_utils.is_group_a_cg_snapshot_type(group) 3336 and not group.is_replicated): 3337 raise NotImplementedError() 3338 model_update, volumes_model_update = self._delete_group( 3339 group, volumes) 3340 return model_update, volumes_model_update 3341 3342 def _delete_group(self, group, volumes): 3343 """Helper function to delete a volume group. 3344 3345 :param group: the group object 3346 :param volumes: the member volume objects 3347 :returns: model_update, volumes_model_update 3348 """ 3349 volumes_model_update = [] 3350 array, interval_retries_dict = self.utils.get_volume_group_utils( 3351 group, self.interval, self.retries) 3352 vol_grp_name = None 3353 3354 volume_group = self._find_volume_group( 3355 array, group) 3356 3357 if volume_group is None: 3358 LOG.error("Cannot find generic volume group %(volGrpName)s.", 3359 {'volGrpName': group.id}) 3360 model_update = {'status': fields.GroupStatus.DELETED} 3361 3362 volumes_model_update = self.utils.update_volume_model_updates( 3363 volumes_model_update, volumes, group.id, status='deleted') 3364 return model_update, volumes_model_update 3365 3366 if 'name' in volume_group: 3367 vol_grp_name = volume_group['name'] 3368 volume_device_ids = self._get_members_of_volume_group( 3369 array, vol_grp_name) 3370 deleted_volume_device_ids = [] 3371 3372 # Remove replication for group, if applicable 3373 if group.is_replicated: 3374 self._cleanup_group_replication( 3375 array, vol_grp_name, volume_device_ids, 3376 interval_retries_dict) 3377 try: 3378 if volume_device_ids: 3379 # First remove all the volumes from the SG 3380 self.masking.remove_volumes_from_storage_group( 3381 array, volume_device_ids, vol_grp_name, 3382 interval_retries_dict) 3383 for vol in volumes: 3384 extra_specs = self._initial_setup(vol) 3385 device_id = self._find_device_on_array( 3386 vol, extra_specs) 3387 if device_id in volume_device_ids: 3388 self.masking.remove_and_reset_members( 3389 array, vol, device_id, vol.name, 3390 extra_specs, False) 3391 self._delete_from_srp( 3392 array, device_id, "group vol", extra_specs) 3393 else: 3394 LOG.debug("Volume not found on the array.") 3395 # Add the device id to the deleted list 3396 deleted_volume_device_ids.append(device_id) 3397 # Once all volumes are deleted then delete the SG 3398 self.rest.delete_storage_group(array, vol_grp_name) 3399 model_update = {'status': fields.GroupStatus.DELETED} 3400 volumes_model_update = self.utils.update_volume_model_updates( 3401 volumes_model_update, volumes, group.id, status='deleted') 3402 except Exception as e: 3403 LOG.error("Error deleting volume group." 3404 "Error received: %(e)s", {'e': e}) 3405 model_update = {'status': fields.GroupStatus.ERROR_DELETING} 3406 # Update the volumes_model_update 3407 if len(deleted_volume_device_ids) is not 0: 3408 LOG.debug("Device ids: %(dev)s are deleted.", 3409 {'dev': deleted_volume_device_ids}) 3410 volumes_not_deleted = [] 3411 for vol in volume_device_ids: 3412 if vol not in deleted_volume_device_ids: 3413 volumes_not_deleted.append(vol) 3414 if not deleted_volume_device_ids: 3415 volumes_model_update = self.utils.update_volume_model_updates( 3416 volumes_model_update, deleted_volume_device_ids, 3417 group.id, status='deleted') 3418 if not volumes_not_deleted: 3419 volumes_model_update = self.utils.update_volume_model_updates( 3420 volumes_model_update, volumes_not_deleted, 3421 group.id, status='error_deleting') 3422 # As a best effort try to add back the undeleted volumes to sg 3423 # Don't throw any exception in case of failure 3424 try: 3425 if not volumes_not_deleted: 3426 self.masking.add_volumes_to_storage_group( 3427 array, volumes_not_deleted, 3428 vol_grp_name, interval_retries_dict) 3429 except Exception as ex: 3430 LOG.error("Error in rollback - %(ex)s. " 3431 "Failed to add back volumes to sg %(sg_name)s", 3432 {'ex': ex, 'sg_name': vol_grp_name}) 3433 3434 return model_update, volumes_model_update 3435 3436 def _cleanup_group_replication( 3437 self, array, vol_grp_name, volume_device_ids, extra_specs): 3438 """Cleanup remote replication. 3439 3440 Break and delete the rdf replication relationship and 3441 delete the remote storage group and member devices. 3442 :param array: the array serial number 3443 :param vol_grp_name: the volume group name 3444 :param volume_device_ids: the device ids of the local volumes 3445 :param extra_specs: the extra specifications 3446 """ 3447 rdf_group_no, remote_array = self.get_rdf_details(array) 3448 # Delete replication for group, if applicable 3449 if volume_device_ids: 3450 self.provision.delete_group_replication( 3451 array, vol_grp_name, rdf_group_no, extra_specs) 3452 remote_device_ids = self._get_members_of_volume_group( 3453 remote_array, vol_grp_name) 3454 # Remove volumes from remote replication group 3455 if remote_device_ids: 3456 self.masking.remove_volumes_from_storage_group( 3457 remote_array, remote_device_ids, vol_grp_name, extra_specs) 3458 for device_id in remote_device_ids: 3459 # Make sure they are not members of any other storage groups 3460 self.masking.remove_and_reset_members( 3461 remote_array, None, device_id, 'target_vol', 3462 extra_specs, False) 3463 self._delete_from_srp( 3464 remote_array, device_id, "group vol", extra_specs) 3465 # Once all volumes are deleted then delete the SG 3466 self.rest.delete_storage_group(remote_array, vol_grp_name) 3467 3468 def create_group_snapshot(self, context, group_snapshot, snapshots): 3469 """Creates a generic volume group snapshot. 3470 3471 :param context: the context 3472 :param group_snapshot: the group snapshot to be created 3473 :param snapshots: snapshots 3474 :returns: dict -- modelUpdate 3475 :returns: list -- list of snapshots 3476 :raises: VolumeBackendAPIException, NotImplementedError 3477 """ 3478 grp_id = group_snapshot.group_id 3479 source_group = group_snapshot.get('group') 3480 if not volume_utils.is_group_a_cg_snapshot_type(source_group): 3481 raise NotImplementedError() 3482 snapshots_model_update = [] 3483 LOG.info( 3484 "Create snapshot for %(grpId)s " 3485 "group Snapshot ID: %(group_snapshot)s.", 3486 {'group_snapshot': group_snapshot.id, 3487 'grpId': grp_id}) 3488 3489 try: 3490 snap_name = self.utils.truncate_string(group_snapshot.id, 19) 3491 self._create_group_replica(source_group, snap_name) 3492 3493 except Exception as e: 3494 exception_message = (_("Failed to create snapshot for group: " 3495 "%(volGrpName)s. Exception received: %(e)s") 3496 % {'volGrpName': grp_id, 3497 'e': six.text_type(e)}) 3498 LOG.exception(exception_message) 3499 raise exception.VolumeBackendAPIException(data=exception_message) 3500 3501 for snapshot in snapshots: 3502 src_dev_id = self._get_src_device_id_for_group_snap(snapshot) 3503 snapshots_model_update.append( 3504 {'id': snapshot.id, 3505 'provider_location': six.text_type( 3506 {'source_id': src_dev_id, 'snap_name': snap_name}), 3507 'status': fields.SnapshotStatus.AVAILABLE}) 3508 model_update = {'status': fields.GroupStatus.AVAILABLE} 3509 3510 return model_update, snapshots_model_update 3511 3512 def _get_src_device_id_for_group_snap(self, snapshot): 3513 """Get the source device id for the provider_location. 3514 3515 :param snapshot: the snapshot object 3516 :return: src_device_id 3517 """ 3518 volume = snapshot.volume 3519 extra_specs = self._initial_setup(volume) 3520 return self._find_device_on_array(volume, extra_specs) 3521 3522 def _create_group_replica( 3523 self, source_group, snap_name): 3524 """Create a group replica. 3525 3526 This can be a group snapshot or a cloned volume group. 3527 :param source_group: the group object 3528 :param snap_name: the name of the snapshot 3529 """ 3530 array, interval_retries_dict = self.utils.get_volume_group_utils( 3531 source_group, self.interval, self.retries) 3532 vol_grp_name = None 3533 volume_group = ( 3534 self._find_volume_group(array, source_group)) 3535 if volume_group: 3536 if 'name' in volume_group: 3537 vol_grp_name = volume_group['name'] 3538 if vol_grp_name is None: 3539 exception_message = ( 3540 _("Cannot find generic volume group %(group_id)s.") % 3541 {'group_id': source_group.id}) 3542 raise exception.VolumeBackendAPIException( 3543 data=exception_message) 3544 self.provision.create_group_replica( 3545 array, vol_grp_name, 3546 snap_name, interval_retries_dict) 3547 3548 def delete_group_snapshot(self, context, group_snapshot, snapshots): 3549 """Delete a volume group snapshot. 3550 3551 :param context: the context 3552 :param group_snapshot: the volume group snapshot to be deleted 3553 :param snapshots: the snapshot objects 3554 :returns: model_update, snapshots_model_update 3555 """ 3556 model_update, snapshots_model_update = self._delete_group_snapshot( 3557 group_snapshot, snapshots) 3558 return model_update, snapshots_model_update 3559 3560 def _delete_group_snapshot(self, group_snapshot, snapshots): 3561 """Helper function to delete a group snapshot. 3562 3563 :param group_snapshot: the group snapshot object 3564 :param snapshots: the snapshot objects 3565 :returns: model_update, snapshots_model_update 3566 :raises: VolumeBackendApiException, NotImplementedError 3567 """ 3568 snapshots_model_update = [] 3569 source_group = group_snapshot.get('group') 3570 grp_id = group_snapshot.group_id 3571 if not volume_utils.is_group_a_cg_snapshot_type(source_group): 3572 raise NotImplementedError() 3573 3574 LOG.info("Delete snapshot grpSnapshotId: %(grpSnapshotId)s" 3575 " for source group %(grpId)s", 3576 {'grpSnapshotId': group_snapshot.id, 3577 'grpId': grp_id}) 3578 3579 snap_name = self.utils.truncate_string(group_snapshot.id, 19) 3580 vol_grp_name = None 3581 try: 3582 # Get the array serial 3583 array, extra_specs = self.utils.get_volume_group_utils( 3584 source_group, self.interval, self.retries) 3585 # Get the volume group dict for getting the group name 3586 volume_group = (self._find_volume_group(array, source_group)) 3587 if volume_group and volume_group.get('name'): 3588 vol_grp_name = volume_group['name'] 3589 if vol_grp_name is None: 3590 exception_message = ( 3591 _("Cannot find generic volume group %(grp_id)s.") % 3592 {'group_id': source_group.id}) 3593 raise exception.VolumeBackendAPIException( 3594 data=exception_message) 3595 # Check if the snapshot exists 3596 if 'snapVXSnapshots' in volume_group: 3597 if snap_name in volume_group['snapVXSnapshots']: 3598 src_devs = self._get_snap_src_dev_list(array, snapshots) 3599 self.provision.delete_group_replica( 3600 array, snap_name, vol_grp_name, src_devs, extra_specs) 3601 else: 3602 # Snapshot has been already deleted, return successfully 3603 LOG.error("Cannot find group snapshot %(snapId)s.", 3604 {'snapId': group_snapshot.id}) 3605 model_update = {'status': fields.GroupSnapshotStatus.DELETED} 3606 for snapshot in snapshots: 3607 snapshots_model_update.append( 3608 {'id': snapshot.id, 3609 'status': fields.SnapshotStatus.DELETED}) 3610 except Exception as e: 3611 LOG.error("Error deleting volume group snapshot." 3612 "Error received: %(e)s", {'e': e}) 3613 model_update = { 3614 'status': fields.GroupSnapshotStatus.ERROR_DELETING} 3615 3616 return model_update, snapshots_model_update 3617 3618 def _get_snap_src_dev_list(self, array, snapshots): 3619 """Get the list of source devices for a list of snapshots. 3620 3621 :param array: the array serial number 3622 :param snapshots: the list of snapshot objects 3623 :return: src_dev_ids 3624 """ 3625 src_dev_ids = [] 3626 for snap in snapshots: 3627 src_dev_id, snap_name = self._parse_snap_info(array, snap) 3628 if snap_name: 3629 src_dev_ids.append(src_dev_id) 3630 return src_dev_ids 3631 3632 def _find_volume_group(self, array, group): 3633 """Finds a volume group given the group. 3634 3635 :param array: the array serial number 3636 :param group: the group object 3637 :returns: volume group dictionary 3638 """ 3639 group_name = self.utils.update_volume_group_name(group) 3640 volume_group = self.rest.get_storage_group_rep(array, group_name) 3641 if not volume_group: 3642 LOG.warning("Volume group %(group_id)s cannot be found", 3643 {'group_id': group_name}) 3644 return None 3645 return volume_group 3646 3647 def _get_members_of_volume_group(self, array, group_name): 3648 """Get the members of a volume group. 3649 3650 :param array: the array serial number 3651 :param group_name: the storage group name 3652 :returns: list -- member_device_ids 3653 """ 3654 member_device_ids = self.rest.get_volumes_in_storage_group( 3655 array, group_name) 3656 if not member_device_ids: 3657 LOG.info("No member volumes found in %(group_id)s", 3658 {'group_id': group_name}) 3659 return member_device_ids 3660 3661 def update_group(self, group, add_volumes, remove_volumes): 3662 """Updates LUNs in generic volume group. 3663 3664 :param group: storage configuration service instance 3665 :param add_volumes: the volumes uuids you want to add to the vol grp 3666 :param remove_volumes: the volumes uuids you want to remove from 3667 the CG 3668 :returns: model_update 3669 :raises: VolumeBackendAPIException, NotImplementedError 3670 """ 3671 LOG.info("Update generic volume Group: %(group)s. " 3672 "This adds and/or removes volumes from " 3673 "a generic volume group.", 3674 {'group': group.id}) 3675 if (not volume_utils.is_group_a_cg_snapshot_type(group) 3676 and not group.is_replicated): 3677 raise NotImplementedError() 3678 3679 array, interval_retries_dict = self.utils.get_volume_group_utils( 3680 group, self.interval, self.retries) 3681 model_update = {'status': fields.GroupStatus.AVAILABLE} 3682 add_vols = [vol for vol in add_volumes] if add_volumes else [] 3683 add_device_ids = self._get_volume_device_ids(add_vols, array) 3684 remove_vols = [vol for vol in remove_volumes] if remove_volumes else [] 3685 remove_device_ids = self._get_volume_device_ids(remove_vols, array) 3686 vol_grp_name = None 3687 try: 3688 volume_group = self._find_volume_group(array, group) 3689 if volume_group: 3690 if 'name' in volume_group: 3691 vol_grp_name = volume_group['name'] 3692 if vol_grp_name is None: 3693 raise exception.GroupNotFound(group_id=group.id) 3694 # Add volume(s) to the group 3695 if add_device_ids: 3696 self.utils.check_rep_status_enabled(group) 3697 for vol in add_vols: 3698 extra_specs = self._initial_setup(vol) 3699 self.utils.check_replication_matched(vol, extra_specs) 3700 self.masking.add_volumes_to_storage_group( 3701 array, add_device_ids, vol_grp_name, interval_retries_dict) 3702 if group.is_replicated: 3703 # Add remote volumes to remote storage group 3704 self._add_remote_vols_to_volume_group( 3705 array, add_vols, group, interval_retries_dict) 3706 # Remove volume(s) from the group 3707 if remove_device_ids: 3708 self.masking.remove_volumes_from_storage_group( 3709 array, remove_device_ids, 3710 vol_grp_name, interval_retries_dict) 3711 if group.is_replicated: 3712 # Remove remote volumes from the remote storage group 3713 self._remove_remote_vols_from_volume_group( 3714 array, remove_vols, group, interval_retries_dict) 3715 except exception.GroupNotFound: 3716 raise 3717 except Exception as ex: 3718 exception_message = (_("Failed to update volume group:" 3719 " %(volGrpName)s. Exception: %(ex)s.") 3720 % {'volGrpName': group.id, 3721 'ex': ex}) 3722 LOG.exception(exception_message) 3723 raise exception.VolumeBackendAPIException(data=exception_message) 3724 3725 return model_update, None, None 3726 3727 def _add_remote_vols_to_volume_group( 3728 self, array, volumes, group, 3729 extra_specs, rep_driver_data=None): 3730 """Add the remote volumes to their volume group. 3731 3732 :param array: the array serial number 3733 :param volumes: list of volumes 3734 :param group: the id of the group 3735 :param extra_specs: the extra specifications 3736 :param rep_driver_data: replication driver data, optional 3737 """ 3738 remote_device_list = [] 3739 __, remote_array = self.get_rdf_details(array) 3740 for vol in volumes: 3741 try: 3742 remote_loc = ast.literal_eval(vol.replication_driver_data) 3743 except (ValueError, KeyError): 3744 remote_loc = ast.literal_eval(rep_driver_data) 3745 founddevice_id = self.rest.check_volume_device_id( 3746 remote_array, remote_loc['device_id'], vol.id) 3747 if founddevice_id is not None: 3748 remote_device_list.append(founddevice_id) 3749 group_name = self.provision.get_or_create_volume_group( 3750 remote_array, group, extra_specs) 3751 self.masking.add_volumes_to_storage_group( 3752 remote_array, remote_device_list, group_name, extra_specs) 3753 LOG.info("Added volumes to remote volume group.") 3754 3755 def _remove_remote_vols_from_volume_group( 3756 self, array, volumes, group, extra_specs): 3757 """Remove the remote volumes from their volume group. 3758 3759 :param array: the array serial number 3760 :param volumes: list of volumes 3761 :param group: the id of the group 3762 :param extra_specs: the extra specifications 3763 """ 3764 remote_device_list = [] 3765 __, remote_array = self.get_rdf_details(array) 3766 for vol in volumes: 3767 remote_loc = ast.literal_eval(vol.replication_driver_data) 3768 founddevice_id = self.rest.check_volume_device_id( 3769 remote_array, remote_loc['device_id'], vol.id) 3770 if founddevice_id is not None: 3771 remote_device_list.append(founddevice_id) 3772 group_name = self.provision.get_or_create_volume_group( 3773 array, group, extra_specs) 3774 self.masking.remove_volumes_from_storage_group( 3775 remote_array, remote_device_list, group_name, extra_specs) 3776 LOG.info("Removed volumes from remote volume group.") 3777 3778 def _get_volume_device_ids(self, volumes, array): 3779 """Get volume device ids from volume. 3780 3781 :param volumes: volume objects 3782 :returns: device_ids 3783 """ 3784 device_ids = [] 3785 for volume in volumes: 3786 specs = {utils.ARRAY: array} 3787 device_id = self._find_device_on_array(volume, specs) 3788 if device_id is None: 3789 LOG.error("Volume %(name)s not found on the array.", 3790 {'name': volume['name']}) 3791 else: 3792 device_ids.append(device_id) 3793 return device_ids 3794 3795 def create_group_from_src(self, context, group, volumes, 3796 group_snapshot, snapshots, source_group, 3797 source_vols): 3798 """Creates the volume group from source. 3799 3800 :param context: the context 3801 :param group: the volume group object to be created 3802 :param volumes: volumes in the consistency group 3803 :param group_snapshot: the source volume group snapshot 3804 :param snapshots: snapshots of the source volumes 3805 :param source_group: the source volume group 3806 :param source_vols: the source vols 3807 :returns: model_update, volumes_model_update 3808 model_update is a dictionary of cg status 3809 volumes_model_update is a list of dictionaries of volume 3810 update 3811 :raises: VolumeBackendAPIException, NotImplementedError 3812 """ 3813 if not volume_utils.is_group_a_cg_snapshot_type(group): 3814 raise NotImplementedError() 3815 create_snapshot = False 3816 volumes_model_update = [] 3817 if group_snapshot: 3818 source_id = group_snapshot.id 3819 actual_source_grp = group_snapshot.get('group') 3820 elif source_group: 3821 source_id = source_group.id 3822 actual_source_grp = source_group 3823 create_snapshot = True 3824 else: 3825 exception_message = (_("Must supply either group snapshot or " 3826 "a source group.")) 3827 raise exception.VolumeBackendAPIException( 3828 data=exception_message) 3829 3830 tgt_name = self.utils.update_volume_group_name(group) 3831 rollback_dict = {} 3832 array, interval_retries_dict = self.utils.get_volume_group_utils( 3833 group, self.interval, self.retries) 3834 source_sg = self._find_volume_group(array, actual_source_grp) 3835 if source_sg is not None: 3836 src_grp_name = (source_sg['name'] 3837 if 'name' in source_sg else None) 3838 rollback_dict['source_group_name'] = src_grp_name 3839 else: 3840 error_msg = (_("Cannot retrieve source volume group %(grp_id)s " 3841 "from the array.") 3842 % {'grp_id': actual_source_grp.id}) 3843 LOG.error(error_msg) 3844 raise exception.VolumeBackendAPIException(data=error_msg) 3845 3846 LOG.debug("Enter VMAX create_volume group_from_src. Group to be " 3847 "created: %(grpId)s, Source : %(SourceGrpId)s.", 3848 {'grpId': group.id, 'SourceGrpId': source_id}) 3849 3850 try: 3851 self.provision.create_volume_group( 3852 array, tgt_name, interval_retries_dict) 3853 rollback_dict.update({ 3854 'target_group_name': tgt_name, 'volumes': [], 3855 'device_ids': [], 'list_volume_pairs': [], 3856 'interval_retries_dict': interval_retries_dict}) 3857 model_update = {'status': fields.GroupStatus.AVAILABLE} 3858 # Create the target devices 3859 list_volume_pairs = [] 3860 for volume in volumes: 3861 src_dev_id, extra_specs, vol_size, tgt_vol_name = ( 3862 self._get_clone_vol_info( 3863 volume, source_vols, snapshots)) 3864 volume_dict = self._create_volume( 3865 tgt_vol_name, vol_size, extra_specs) 3866 device_id = volume_dict['device_id'] 3867 # Add the volume to the volume group SG 3868 self.masking.add_volume_to_storage_group( 3869 extra_specs[utils.ARRAY], device_id, tgt_name, 3870 tgt_vol_name, extra_specs) 3871 # Record relevant information 3872 list_volume_pairs.append((src_dev_id, device_id)) 3873 # Add details to rollback dict 3874 rollback_dict['device_ids'].append(device_id) 3875 rollback_dict['list_volume_pairs'].append( 3876 (src_dev_id, device_id)) 3877 rollback_dict['volumes'].append( 3878 (device_id, extra_specs, volume)) 3879 volumes_model_update.append( 3880 self.utils.get_grp_volume_model_update( 3881 volume, volume_dict, group.id)) 3882 3883 if create_snapshot is True: 3884 # We have to create a snapshot of the source group 3885 snap_name = self.utils.truncate_string(group.id, 19) 3886 self._create_group_replica(actual_source_grp, snap_name) 3887 rollback_dict['snap_name'] = snap_name 3888 else: 3889 # We need to check if the snapshot exists 3890 snap_name = self.utils.truncate_string(source_id, 19) 3891 if ('snapVXSnapshots' in source_sg and 3892 snap_name in source_sg['snapVXSnapshots']): 3893 LOG.info("Snapshot is present on the array") 3894 else: 3895 error_msg = ( 3896 _("Cannot retrieve source snapshot %(snap_id)s " 3897 "from the array.") % {'snap_id': source_id}) 3898 LOG.error(error_msg) 3899 raise exception.VolumeBackendAPIException(data=error_msg) 3900 # Link and break the snapshot to the source group 3901 self.provision.link_and_break_replica( 3902 array, src_grp_name, tgt_name, snap_name, 3903 interval_retries_dict, list_volume_pairs, 3904 delete_snapshot=create_snapshot) 3905 # Update the replication status 3906 if group.is_replicated: 3907 volumes_model_update = self._replicate_group( 3908 array, volumes_model_update, 3909 tgt_name, interval_retries_dict) 3910 model_update.update({ 3911 'replication_status': fields.ReplicationStatus.ENABLED}) 3912 except Exception: 3913 exception_message = (_("Failed to create vol grp %(volGrpName)s" 3914 " from source %(grpSnapshot)s.") 3915 % {'volGrpName': group.id, 3916 'grpSnapshot': source_id}) 3917 LOG.error(exception_message) 3918 if array is not None: 3919 LOG.info("Attempting rollback for the create group from src.") 3920 self._rollback_create_group_from_src(array, rollback_dict) 3921 raise exception.VolumeBackendAPIException(data=exception_message) 3922 3923 return model_update, volumes_model_update 3924 3925 def _get_clone_vol_info(self, volume, source_vols, snapshots): 3926 """Get the clone volume info. 3927 3928 :param volume: the new volume object 3929 :param source_vols: the source volume list 3930 :param snapshots: the source snapshot list 3931 :returns: src_dev_id, extra_specs, vol_size, tgt_vol_name 3932 """ 3933 src_dev_id, vol_size = None, None 3934 extra_specs = self._initial_setup(volume) 3935 if not source_vols: 3936 for snap in snapshots: 3937 if snap.id == volume.snapshot_id: 3938 src_dev_id, __ = self._parse_snap_info( 3939 extra_specs[utils.ARRAY], snap) 3940 vol_size = snap.volume_size 3941 else: 3942 for src_vol in source_vols: 3943 if src_vol.id == volume.source_volid: 3944 src_extra_specs = self._initial_setup(src_vol) 3945 src_dev_id = self._find_device_on_array( 3946 src_vol, src_extra_specs) 3947 vol_size = src_vol.size 3948 tgt_vol_name = self.utils.get_volume_element_name(volume.id) 3949 return src_dev_id, extra_specs, vol_size, tgt_vol_name 3950 3951 def _rollback_create_group_from_src(self, array, rollback_dict): 3952 """Performs rollback for create group from src in case of failure. 3953 3954 :param array: the array serial number 3955 :param rollback_dict: dict containing rollback details 3956 """ 3957 try: 3958 # Delete the snapshot if required 3959 if rollback_dict.get("snap_name"): 3960 try: 3961 src_dev_ids = [ 3962 a for a, b in rollback_dict['list_volume_pairs']] 3963 self.provision.delete_group_replica( 3964 array, rollback_dict["snap_name"], 3965 rollback_dict["source_group_name"], 3966 src_dev_ids, rollback_dict['interval_retries_dict']) 3967 except Exception as e: 3968 LOG.debug("Failed to delete group snapshot. Attempting " 3969 "further rollback. Exception received: %(e)s.", 3970 {'e': e}) 3971 if rollback_dict.get('volumes'): 3972 # Remove any devices which were added to the target SG 3973 if rollback_dict['device_ids']: 3974 self.masking.remove_volumes_from_storage_group( 3975 array, rollback_dict['device_ids'], 3976 rollback_dict['target_group_name'], 3977 rollback_dict['interval_retries_dict']) 3978 # Delete all the volumes 3979 for dev_id, extra_specs, volume in rollback_dict['volumes']: 3980 self._remove_vol_and_cleanup_replication( 3981 array, dev_id, "group vol", extra_specs, volume) 3982 self._delete_from_srp( 3983 array, dev_id, "group vol", extra_specs) 3984 # Delete the target SG 3985 if rollback_dict.get("target_group_name"): 3986 self.rest.delete_storage_group( 3987 array, rollback_dict['target_group_name']) 3988 LOG.info("Rollback completed for create group from src.") 3989 except Exception as e: 3990 LOG.error("Rollback failed for the create group from src. " 3991 "Exception received: %(e)s.", {'e': e}) 3992 3993 def _replicate_group(self, array, volumes_model_update, 3994 group_name, extra_specs): 3995 """Replicate a cloned volume group. 3996 3997 :param array: the array serial number 3998 :param volumes_model_update: the volumes model updates 3999 :param group_name: the group name 4000 :param extra_specs: the extra specs 4001 :return: volumes_model_update 4002 """ 4003 rdf_group_no, remote_array = self.get_rdf_details(array) 4004 self.rest.replicate_group( 4005 array, group_name, rdf_group_no, remote_array, extra_specs) 4006 # Need to set SRP to None for remote generic volume group - Not set 4007 # automatically, and a volume can only be in one storage group 4008 # managed by FAST 4009 self.rest.set_storagegroup_srp( 4010 remote_array, group_name, "None", extra_specs) 4011 for volume_model_update in volumes_model_update: 4012 vol_id = volume_model_update['id'] 4013 loc = ast.literal_eval(volume_model_update['provider_location']) 4014 src_device_id = loc['device_id'] 4015 rdf_vol_details = self.rest.get_rdf_group_volume( 4016 array, src_device_id) 4017 tgt_device_id = rdf_vol_details['remoteDeviceID'] 4018 element_name = self.utils.get_volume_element_name(vol_id) 4019 self.rest.rename_volume(remote_array, tgt_device_id, element_name) 4020 rep_update = {'device_id': tgt_device_id, 'array': remote_array} 4021 volume_model_update.update( 4022 {'replication_driver_data': six.text_type(rep_update), 4023 'replication_status': fields.ReplicationStatus.ENABLED}) 4024 return volumes_model_update 4025 4026 def enable_replication(self, context, group, volumes): 4027 """Enable replication for a group. 4028 4029 Replication is enabled on replication-enabled groups by default. 4030 :param context: the context 4031 :param group: the group object 4032 :param volumes: the list of volumes 4033 :returns: model_update, None 4034 """ 4035 if not group.is_replicated: 4036 raise NotImplementedError() 4037 4038 model_update = {} 4039 if not volumes: 4040 # Return if empty group 4041 return model_update, None 4042 4043 try: 4044 vol_grp_name = None 4045 extra_specs = self._initial_setup(volumes[0]) 4046 array = extra_specs[utils.ARRAY] 4047 volume_group = self._find_volume_group(array, group) 4048 if volume_group: 4049 if 'name' in volume_group: 4050 vol_grp_name = volume_group['name'] 4051 if vol_grp_name is None: 4052 raise exception.GroupNotFound(group_id=group.id) 4053 4054 rdf_group_no, _ = self.get_rdf_details(array) 4055 self.provision.enable_group_replication( 4056 array, vol_grp_name, rdf_group_no, extra_specs) 4057 model_update.update({ 4058 'replication_status': fields.ReplicationStatus.ENABLED}) 4059 except Exception as e: 4060 model_update.update({ 4061 'replication_status': fields.ReplicationStatus.ERROR}) 4062 LOG.error("Error enabling replication on group %(group)s. " 4063 "Exception received: %(e)s.", 4064 {'group': group.id, 'e': e}) 4065 4066 return model_update, None 4067 4068 def disable_replication(self, context, group, volumes): 4069 """Disable replication for a group. 4070 4071 :param context: the context 4072 :param group: the group object 4073 :param volumes: the list of volumes 4074 :returns: model_update, None 4075 """ 4076 if not group.is_replicated: 4077 raise NotImplementedError() 4078 4079 model_update = {} 4080 if not volumes: 4081 # Return if empty group 4082 return model_update, None 4083 4084 try: 4085 vol_grp_name = None 4086 extra_specs = self._initial_setup(volumes[0]) 4087 array = extra_specs[utils.ARRAY] 4088 volume_group = self._find_volume_group(array, group) 4089 if volume_group: 4090 if 'name' in volume_group: 4091 vol_grp_name = volume_group['name'] 4092 if vol_grp_name is None: 4093 raise exception.GroupNotFound(group_id=group.id) 4094 4095 rdf_group_no, _ = self.get_rdf_details(array) 4096 self.provision.disable_group_replication( 4097 array, vol_grp_name, rdf_group_no, extra_specs) 4098 model_update.update({ 4099 'replication_status': fields.ReplicationStatus.DISABLED}) 4100 except Exception as e: 4101 model_update.update({ 4102 'replication_status': fields.ReplicationStatus.ERROR}) 4103 LOG.error("Error disabling replication on group %(group)s. " 4104 "Exception received: %(e)s.", 4105 {'group': group.id, 'e': e}) 4106 4107 return model_update, None 4108 4109 def failover_replication(self, context, group, volumes, 4110 secondary_backend_id=None, host=False): 4111 """Failover replication for a group. 4112 4113 :param context: the context 4114 :param group: the group object 4115 :param volumes: the list of volumes 4116 :param secondary_backend_id: the secondary backend id - default None 4117 :param host: flag to indicate if whole host is being failed over 4118 :returns: model_update, vol_model_updates 4119 """ 4120 return self._failover_replication( 4121 volumes, group, None, 4122 secondary_backend_id=secondary_backend_id, host=host) 4123 4124 def _failover_replication( 4125 self, volumes, group, vol_grp_name, 4126 secondary_backend_id=None, host=False): 4127 """Failover replication for a group. 4128 4129 :param volumes: the list of volumes 4130 :param group: the group object 4131 :param vol_grp_name: the group name 4132 :param secondary_backend_id: the secondary backend id - default None 4133 :param host: flag to indicate if whole host is being failed over 4134 :returns: model_update, vol_model_updates 4135 """ 4136 model_update = {} 4137 vol_model_updates = [] 4138 if not volumes: 4139 # Return if empty group 4140 return model_update, vol_model_updates 4141 4142 try: 4143 extra_specs = self._initial_setup(volumes[0]) 4144 array = extra_specs[utils.ARRAY] 4145 if group: 4146 volume_group = self._find_volume_group(array, group) 4147 if volume_group: 4148 if 'name' in volume_group: 4149 vol_grp_name = volume_group['name'] 4150 if vol_grp_name is None: 4151 raise exception.GroupNotFound(group_id=group.id) 4152 4153 rdf_group_no, _ = self.get_rdf_details(array) 4154 # As we only support a single replication target, ignore 4155 # any secondary_backend_id which is not 'default' 4156 failover = False if secondary_backend_id == 'default' else True 4157 self.provision.failover_group( 4158 array, vol_grp_name, rdf_group_no, extra_specs, failover) 4159 if failover: 4160 model_update.update({ 4161 'replication_status': 4162 fields.ReplicationStatus.FAILED_OVER}) 4163 vol_rep_status = fields.ReplicationStatus.FAILED_OVER 4164 else: 4165 model_update.update({ 4166 'replication_status': fields.ReplicationStatus.ENABLED}) 4167 vol_rep_status = fields.ReplicationStatus.ENABLED 4168 4169 except Exception as e: 4170 model_update.update({ 4171 'replication_status': fields.ReplicationStatus.ERROR}) 4172 vol_rep_status = fields.ReplicationStatus.ERROR 4173 LOG.error("Error failover replication on group %(group)s. " 4174 "Exception received: %(e)s.", 4175 {'group': vol_grp_name, 'e': e}) 4176 4177 for vol in volumes: 4178 loc = vol.provider_location 4179 rep_data = vol.replication_driver_data 4180 if vol_rep_status != fields.ReplicationStatus.ERROR: 4181 loc = vol.replication_driver_data 4182 rep_data = vol.provider_location 4183 update = {'id': vol.id, 4184 'replication_status': vol_rep_status, 4185 'provider_location': loc, 4186 'replication_driver_data': rep_data} 4187 if host: 4188 update = {'volume_id': vol.id, 'updates': update} 4189 vol_model_updates.append(update) 4190 4191 LOG.debug("Volume model updates: %s", vol_model_updates) 4192 return model_update, vol_model_updates 4193 4194 def get_attributes_from_cinder_config(self): 4195 """Get all attributes from the configuration file 4196 4197 :returns: kwargs 4198 """ 4199 kwargs = None 4200 username = self.configuration.safe_get(utils.VMAX_USER_NAME) 4201 password = self.configuration.safe_get(utils.VMAX_PASSWORD) 4202 if username and password: 4203 serial_number = self.configuration.safe_get(utils.VMAX_ARRAY) 4204 if serial_number is None: 4205 LOG.error("Array Serial Number must be set in cinder.conf") 4206 srp_name = self.configuration.safe_get(utils.VMAX_SRP) 4207 if srp_name is None: 4208 LOG.error("SRP Name must be set in cinder.conf") 4209 slo = self.configuration.safe_get(utils.VMAX_SERVICE_LEVEL) 4210 workload = self.configuration.safe_get(utils.VMAX_WORKLOAD) 4211 port_groups = self.configuration.safe_get(utils.VMAX_PORT_GROUPS) 4212 random_portgroup = None 4213 if port_groups: 4214 random_portgroup = random.choice(self.configuration.safe_get( 4215 utils.VMAX_PORT_GROUPS)) 4216 kwargs = ( 4217 {'RestServerIp': self.configuration.safe_get( 4218 utils.VMAX_SERVER_IP), 4219 'RestServerPort': self.configuration.safe_get( 4220 utils.VMAX_SERVER_PORT), 4221 'RestUserName': username, 4222 'RestPassword': password, 4223 'SerialNumber': serial_number, 4224 'srpName': srp_name, 4225 'PortGroup': random_portgroup}) 4226 if self.configuration.safe_get('driver_ssl_cert_verify'): 4227 if self.configuration.safe_get('driver_ssl_cert_path'): 4228 kwargs.update({'SSLVerify': self.configuration.safe_get( 4229 'driver_ssl_cert_path')}) 4230 else: 4231 kwargs.update({'SSLVerify': True}) 4232 else: 4233 kwargs.update({'SSLVerify': False}) 4234 4235 if slo: 4236 kwargs.update({'ServiceLevel': slo, 'Workload': workload}) 4237 4238 return kwargs 4239 4240 def revert_to_snapshot(self, volume, snapshot): 4241 """Revert volume to snapshot. 4242 4243 :param volume: the volume object 4244 :param snapshot: the snapshot object 4245 """ 4246 extra_specs = self._initial_setup(volume) 4247 if self.utils.is_replication_enabled(extra_specs): 4248 exception_message = (_( 4249 "Volume is replicated - revert to snapshot feature is not " 4250 "supported for replicated volumes.")) 4251 LOG.error(exception_message) 4252 raise exception.VolumeDriverException(message=exception_message) 4253 array = extra_specs[utils.ARRAY] 4254 sourcedevice_id, snap_name = self._parse_snap_info( 4255 array, snapshot) 4256 if not sourcedevice_id or not snap_name: 4257 LOG.error("No snapshot found on the array") 4258 exception_message = (_( 4259 "Failed to revert the volume to the snapshot")) 4260 raise exception.VolumeDriverException(data=exception_message) 4261 self._sync_check(array, sourcedevice_id, volume.name, extra_specs) 4262 try: 4263 LOG.info("Reverting device: %(deviceid)s " 4264 "to snapshot: %(snapname)s.", 4265 {'deviceid': sourcedevice_id, 'snapname': snap_name}) 4266 self.provision.revert_volume_snapshot( 4267 array, sourcedevice_id, snap_name, extra_specs) 4268 # Once the restore is done, we need to check if it is complete 4269 restore_complete = self.provision.is_restore_complete( 4270 array, sourcedevice_id, snap_name, extra_specs) 4271 if not restore_complete: 4272 LOG.debug("Restore couldn't complete in the specified " 4273 "time interval. The terminate restore may fail") 4274 LOG.debug("Terminating restore session") 4275 # This may throw an exception if restore_complete is False 4276 self.provision.delete_volume_snap( 4277 array, snap_name, sourcedevice_id, restored=True) 4278 # Revert volume to snapshot is successful if termination was 4279 # successful - possible even if restore_complete was False 4280 # when we checked last. 4281 LOG.debug("Restored session was terminated") 4282 LOG.info("Reverted the volume to snapshot successfully") 4283 except Exception as e: 4284 exception_message = (_( 4285 "Failed to revert the volume to the snapshot" 4286 "Exception received was %(e)s") % {'e': six.text_type(e)}) 4287 LOG.error(exception_message) 4288 raise exception.VolumeBackendAPIException( 4289 message=exception_message) 4290