1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# All Rights Reserved.
4#
5#    Licensed under the Apache License, Version 2.0 (the "License"); you may
6#    not use this file except in compliance with the License. You may obtain
7#    a copy of the License at
8#
9#         http://www.apache.org/licenses/LICENSE-2.0
10#
11#    Unless required by applicable law or agreed to in writing, software
12#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14#    License for the specific language governing permissions and limitations
15#    under the License.
16"""Drivers for volumes."""
17
18import abc
19import time
20
21from os_brick import exception as brick_exception
22from oslo_concurrency import processutils
23from oslo_config import cfg
24from oslo_config import types
25from oslo_log import log as logging
26from oslo_utils import excutils
27import six
28
29from cinder import exception
30from cinder.i18n import _
31from cinder.image import image_utils
32from cinder import objects
33from cinder.objects import fields
34from cinder import utils
35from cinder.volume import configuration
36from cinder.volume import driver_utils
37from cinder.volume import rpcapi as volume_rpcapi
38from cinder.volume import throttling
39
40LOG = logging.getLogger(__name__)
41
42
43volume_opts = [
44    cfg.IntOpt('num_shell_tries',
45               default=3,
46               help='Number of times to attempt to run flakey shell commands'),
47    cfg.IntOpt('reserved_percentage',
48               default=0,
49               min=0, max=100,
50               help='The percentage of backend capacity is reserved'),
51    cfg.StrOpt('target_prefix',
52               deprecated_name='iscsi_target_prefix',
53               default='iqn.2010-10.org.openstack:',
54               help='Prefix for iSCSI volumes'),
55    cfg.StrOpt('target_ip_address',
56               deprecated_name='iscsi_ip_address',
57               default='$my_ip',
58               help='The IP address that the iSCSI daemon is listening on'),
59    cfg.ListOpt('iscsi_secondary_ip_addresses',
60                default=[],
61                help='The list of secondary IP addresses of the iSCSI daemon'),
62    cfg.PortOpt('target_port',
63                deprecated_name='iscsi_port',
64                default=3260,
65                help='The port that the iSCSI daemon is listening on'),
66    cfg.IntOpt('num_volume_device_scan_tries',
67               default=3,
68               help='The maximum number of times to rescan targets'
69                    ' to find volume'),
70    cfg.StrOpt('volume_backend_name',
71               help='The backend name for a given driver implementation'),
72    cfg.BoolOpt('use_multipath_for_image_xfer',
73                default=False,
74                help='Do we attach/detach volumes in cinder using multipath '
75                     'for volume to image and image to volume transfers?'),
76    cfg.BoolOpt('enforce_multipath_for_image_xfer',
77                default=False,
78                help='If this is set to True, attachment of volumes for '
79                     'image transfer will be aborted when multipathd is not '
80                     'running. Otherwise, it will fallback to single path.'),
81    cfg.StrOpt('volume_clear',
82               default='zero',
83               choices=['none', 'zero'],
84               help='Method used to wipe old volumes'),
85    cfg.IntOpt('volume_clear_size',
86               default=0,
87               max=1024,
88               help='Size in MiB to wipe at start of old volumes. 1024 MiB'
89                    'at max. 0 => all'),
90    cfg.StrOpt('volume_clear_ionice',
91               help='The flag to pass to ionice to alter the i/o priority '
92                    'of the process used to zero a volume after deletion, '
93                    'for example "-c3" for idle only priority.'),
94    cfg.StrOpt('target_helper',
95               deprecated_name='iscsi_helper',
96               default='tgtadm',
97               choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl',
98                        'ietadm', 'fake'],
99               help='iSCSI target user-land tool to use. tgtadm is default, '
100                    'use lioadm for LIO iSCSI support, scstadmin for SCST '
101                    'target support, ietadm for iSCSI Enterprise Target, '
102                    'iscsictl for Chelsio iSCSI '
103                    'Target or fake for testing.'),
104    cfg.StrOpt('volumes_dir',
105               default='$state_path/volumes',
106               help='Volume configuration file storage '
107               'directory'),
108    cfg.StrOpt('iet_conf',
109               default='/etc/iet/ietd.conf',
110               help='IET configuration file'),
111    cfg.StrOpt('chiscsi_conf',
112               default='/etc/chelsio-iscsi/chiscsi.conf',
113               help='Chiscsi (CXT) global defaults configuration file'),
114    cfg.StrOpt('iscsi_iotype',
115               default='fileio',
116               choices=['blockio', 'fileio', 'auto'],
117               help=('Sets the behavior of the iSCSI target '
118                     'to either perform blockio or fileio '
119                     'optionally, auto can be set and Cinder '
120                     'will autodetect type of backing device')),
121    cfg.StrOpt('volume_dd_blocksize',
122               default='1M',
123               help='The default block size used when copying/clearing '
124                    'volumes'),
125    cfg.StrOpt('volume_copy_blkio_cgroup_name',
126               default='cinder-volume-copy',
127               help='The blkio cgroup name to be used to limit bandwidth '
128                    'of volume copy'),
129    cfg.IntOpt('volume_copy_bps_limit',
130               default=0,
131               help='The upper limit of bandwidth of volume copy. '
132                    '0 => unlimited'),
133    cfg.StrOpt('iscsi_write_cache',
134               default='on',
135               choices=['on', 'off'],
136               help='Sets the behavior of the iSCSI target to either '
137                    'perform write-back(on) or write-through(off). '
138                    'This parameter is valid if target_helper is set '
139                    'to tgtadm.'),
140    cfg.StrOpt('iscsi_target_flags',
141               default='',
142               help='Sets the target-specific flags for the iSCSI target. '
143                    'Only used for tgtadm to specify backing device flags '
144                    'using bsoflags option. The specified string is passed '
145                    'as is to the underlying tool.'),
146    cfg.StrOpt('target_protocol',
147               deprecated_name='iscsi_protocol',
148               default='iscsi',
149               choices=['iscsi', 'iser'],
150               help='Determines the iSCSI protocol for new iSCSI volumes, '
151                    'created with tgtadm or lioadm target helpers. In '
152                    'order to enable RDMA, this parameter should be set '
153                    'with the value "iser". The supported iSCSI protocol '
154                    'values are "iscsi" and "iser".'),
155    cfg.StrOpt('driver_client_cert_key',
156               help='The path to the client certificate key for verification, '
157                    'if the driver supports it.'),
158    cfg.StrOpt('driver_client_cert',
159               help='The path to the client certificate for verification, '
160                    'if the driver supports it.'),
161    cfg.BoolOpt('driver_use_ssl',
162                default=False,
163                help='Tell driver to use SSL for connection to backend '
164                     'storage if the driver supports it.'),
165    cfg.StrOpt('max_over_subscription_ratio',
166               default='20.0',
167               regex='^(auto|\d*\.\d+|\d+)$',
168               help='Representation of the over subscription ratio '
169                    'when thin provisioning is enabled. Default ratio is '
170                    '20.0, meaning provisioned capacity can be 20 times of '
171                    'the total physical capacity. If the ratio is 10.5, it '
172                    'means provisioned capacity can be 10.5 times of the '
173                    'total physical capacity. A ratio of 1.0 means '
174                    'provisioned capacity cannot exceed the total physical '
175                    'capacity. If ratio is \'auto\', Cinder will '
176                    'automatically calculate the ratio based on the '
177                    'provisioned capacity and the used space. If not set to '
178                    'auto, the ratio has to be a minimum of 1.0.'),
179    cfg.StrOpt('scst_target_iqn_name',
180               help='Certain ISCSI targets have predefined target names, '
181                    'SCST target driver uses this name.'),
182    cfg.StrOpt('scst_target_driver',
183               default='iscsi',
184               help='SCST target implementation can choose from multiple '
185                    'SCST target drivers.'),
186    cfg.BoolOpt('use_chap_auth',
187                default=False,
188                help='Option to enable/disable CHAP authentication for '
189                     'targets.'),
190    cfg.StrOpt('chap_username',
191               default='',
192               help='CHAP user name.'),
193    cfg.StrOpt('chap_password',
194               default='',
195               help='Password for specified CHAP account name.',
196               secret=True),
197    cfg.StrOpt('driver_data_namespace',
198               help='Namespace for driver private data values to be '
199                    'saved in.'),
200    cfg.StrOpt('filter_function',
201               help='String representation for an equation that will be '
202                    'used to filter hosts. Only used when the driver '
203                    'filter is set to be used by the Cinder scheduler.'),
204    cfg.StrOpt('goodness_function',
205               help='String representation for an equation that will be '
206                    'used to determine the goodness of a host. Only used '
207                    'when using the goodness weigher is set to be used by '
208                    'the Cinder scheduler.'),
209    cfg.BoolOpt('driver_ssl_cert_verify',
210                default=False,
211                help='If set to True the http client will validate the SSL '
212                     'certificate of the backend endpoint.'),
213    cfg.StrOpt('driver_ssl_cert_path',
214               help='Can be used to specify a non default path to a '
215               'CA_BUNDLE file or directory with certificates of '
216               'trusted CAs, which will be used to validate the backend'),
217    cfg.ListOpt('trace_flags',
218                help='List of options that control which trace info '
219                     'is written to the DEBUG log level to assist '
220                     'developers. Valid values are method and api.'),
221    cfg.MultiOpt('replication_device',
222                 item_type=types.Dict(),
223                 secret=True,
224                 help="Multi opt of dictionaries to represent a replication "
225                      "target device.  This option may be specified multiple "
226                      "times in a single config section to specify multiple "
227                      "replication target devices.  Each entry takes the "
228                      "standard dict config form: replication_device = "
229                      "target_device_id:<required>,"
230                      "key1:value1,key2:value2..."),
231    cfg.BoolOpt('image_upload_use_cinder_backend',
232                default=False,
233                help='If set to True, upload-to-image in raw format will '
234                     'create a cloned volume and register its location to '
235                     'the image service, instead of uploading the volume '
236                     'content. The cinder backend and locations support '
237                     'must be enabled in the image service.'),
238    cfg.BoolOpt('image_upload_use_internal_tenant',
239                default=False,
240                help='If set to True, the image volume created by '
241                     'upload-to-image will be placed in the internal tenant. '
242                     'Otherwise, the image volume is created in the current '
243                     'context\'s tenant.'),
244    cfg.BoolOpt('image_volume_cache_enabled',
245                default=False,
246                help='Enable the image volume cache for this backend.'),
247    cfg.IntOpt('image_volume_cache_max_size_gb',
248               default=0,
249               help='Max size of the image volume cache for this backend in '
250                    'GB. 0 => unlimited.'),
251    cfg.IntOpt('image_volume_cache_max_count',
252               default=0,
253               help='Max number of entries allowed in the image volume cache. '
254                    '0 => unlimited.'),
255    cfg.BoolOpt('report_discard_supported',
256                default=False,
257                help='Report to clients of Cinder that the backend supports '
258                     'discard (aka. trim/unmap). This will not actually '
259                     'change the behavior of the backend or the client '
260                     'directly, it will only notify that it can be used.'),
261    cfg.StrOpt('storage_protocol',
262               ignore_case=True,
263               default='iscsi',
264               choices=['iscsi', 'fc'],
265               help='Protocol for transferring data between host and '
266                    'storage back-end.'),
267    cfg.BoolOpt('backup_use_temp_snapshot',
268                default=False,
269                help='If this is set to True, a temporary snapshot will '
270                     'be created for performing non-disruptive backups. '
271                     'Otherwise a temporary volume will be cloned '
272                     'in order to perform a backup.'),
273    cfg.BoolOpt('enable_unsupported_driver',
274                default=False,
275                help="Set this to True when you want to allow an unsupported "
276                     "driver to start.  Drivers that haven't maintained a "
277                     "working CI system and testing are marked as unsupported "
278                     "until CI is working again.  This also marks a driver as "
279                     "deprecated and may be removed in the next release."),
280    cfg.StrOpt('backend_availability_zone',
281               default=None,
282               help='Availability zone for this volume backend. If not set, '
283                    'the storage_availability_zone option value is used as '
284                    'the default for all backends.'),
285]
286
287# for backward compatibility
288iser_opts = [
289    cfg.IntOpt('num_iser_scan_tries',
290               default=3,
291               help='The maximum number of times to rescan iSER target'
292                    'to find volume'),
293    cfg.StrOpt('iser_target_prefix',
294               default='iqn.2010-10.org.openstack:',
295               help='Prefix for iSER volumes'),
296    cfg.StrOpt('iser_ip_address',
297               default='$my_ip',
298               help='The IP address that the iSER daemon is listening on'),
299    cfg.PortOpt('iser_port',
300                default=3260,
301                help='The port that the iSER daemon is listening on'),
302    cfg.StrOpt('iser_helper',
303               default='tgtadm',
304               help='The name of the iSER target user-land tool to use'),
305]
306
307
308CONF = cfg.CONF
309CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
310CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP)
311CONF.register_opts(volume_opts)
312CONF.register_opts(iser_opts)
313CONF.import_opt('backup_use_same_host', 'cinder.backup.api')
314
315
316@six.add_metaclass(abc.ABCMeta)
317class BaseVD(object):
318    """Executes commands relating to Volumes.
319
320       Base Driver for Cinder Volume Control Path,
321       This includes supported/required implementation
322       for API calls.  Also provides *generic* implementation
323       of core features like cloning, copy_image_to_volume etc,
324       this way drivers that inherit from this base class and
325       don't offer their own impl can fall back on a general
326       solution here.
327
328       Key thing to keep in mind with this driver is that it's
329       intended that these drivers ONLY implement Control Path
330       details (create, delete, extend...), while transport or
331       data path related implementation should be a *member object*
332       that we call a connector.  The point here is that for example
333       don't allow the LVM driver to implement iSCSI methods, instead
334       call whatever connector it has configured via conf file
335       (iSCSI{LIO, TGT, IET}, FC, etc).
336
337       In the base class and for example the LVM driver we do this via a has-a
338       relationship and just provide an interface to the specific connector
339       methods.  How you do this in your own driver is of course up to you.
340    """
341    VERSION = "N/A"
342
343    # NOTE(geguileo): By default we assume drivers don't support Active-Active
344    # configurations.  If driver supports it then they can set this class
345    # attribute on the driver, and if support depends on configuration options
346    # then they can set it at the instance level on the driver's __init__
347    # method since the manager will do the check after that.
348    SUPPORTS_ACTIVE_ACTIVE = False
349
350    # If a driver hasn't maintained their CI system, this will get
351    # set to False, which prevents the driver from starting.
352    # Add enable_unsupported_driver = True in cinder.conf to get
353    # the unsupported driver started.
354    SUPPORTED = True
355
356    # Methods checked to detect a driver implements a replication feature
357    REPLICATION_FEATURE_CHECKERS = {'v2.1': 'failover_host',
358                                    'a/a': 'failover_completed'}
359
360    def __init__(self, execute=utils.execute, *args, **kwargs):
361        # NOTE(vish): db is set by Manager
362        self.db = kwargs.get('db')
363        self.host = kwargs.get('host')
364        self.cluster_name = kwargs.get('cluster_name')
365        self.configuration = kwargs.get('configuration', None)
366
367        if self.configuration:
368            self.configuration.append_config_values(volume_opts)
369            self.configuration.append_config_values(iser_opts)
370            utils.setup_tracing(self.configuration.safe_get('trace_flags'))
371
372            # NOTE(geguileo): Don't allow to start if we are enabling
373            # replication on a cluster service with a backend that doesn't
374            # support the required mechanism for Active-Active.
375            replication_devices = self.configuration.safe_get(
376                'replication_device')
377            if (self.cluster_name and replication_devices and
378                    not self.supports_replication_feature('a/a')):
379                raise exception.Invalid(_("Driver doesn't support clustered "
380                                          "replication."))
381
382        self.driver_utils = driver_utils.VolumeDriverUtils(
383            self._driver_data_namespace(), self.db)
384
385        self._execute = execute
386        self._stats = {}
387        self._throttle = None
388
389        self.pools = []
390        self.capabilities = {}
391
392        # We set these mappings up in the base driver so they
393        # can be used by children
394        # (intended for LVM and BlockDevice, but others could use as well)
395        self.target_mapping = {
396            'fake': 'cinder.volume.targets.fake.FakeTarget',
397            'ietadm': 'cinder.volume.targets.iet.IetAdm',
398            'lioadm': 'cinder.volume.targets.lio.LioAdm',
399            'tgtadm': 'cinder.volume.targets.tgt.TgtAdm',
400            'scstadmin': 'cinder.volume.targets.scst.SCSTAdm',
401            'iscsictl': 'cinder.volume.targets.cxt.CxtAdm'}
402
403        # set True by manager after successful check_for_setup
404        self._initialized = False
405
406    def _driver_data_namespace(self):
407        namespace = self.__class__.__name__
408        if self.configuration:
409            namespace = self.configuration.safe_get('driver_data_namespace')
410            if not namespace:
411                namespace = self.configuration.safe_get('volume_backend_name')
412        return namespace
413
414    def _is_non_recoverable(self, err, non_recoverable_list):
415        for item in non_recoverable_list:
416            if item in err:
417                return True
418
419        return False
420
421    def _try_execute(self, *command, **kwargs):
422        # NOTE(vish): Volume commands can partially fail due to timing, but
423        #             running them a second time on failure will usually
424        #             recover nicely.
425
426        non_recoverable = kwargs.pop('no_retry_list', [])
427
428        tries = 0
429        while True:
430            try:
431                self._execute(*command, **kwargs)
432                return True
433            except processutils.ProcessExecutionError as ex:
434                tries = tries + 1
435
436                if tries >= self.configuration.num_shell_tries or\
437                        self._is_non_recoverable(ex.stderr, non_recoverable):
438                    raise
439
440                LOG.exception("Recovering from a failed execute. "
441                              "Try number %s", tries)
442                time.sleep(tries ** 2)
443
444    def _detach_volume(self, context, attach_info, volume, properties,
445                       force=False, remote=False, ignore_errors=False):
446        """Disconnect the volume from the host.
447
448        With the force parameter we can indicate if we give more importance to
449        cleaning up as much as possible or if data integrity has higher
450        priority.  This requires the latests OS-Brick code that adds this
451        feature.
452
453        We can also force errors to be ignored using ignore_errors.
454        """
455        # Use Brick's code to do attach/detach
456        exc = brick_exception.ExceptionChainer()
457        if attach_info:
458            connector = attach_info['connector']
459            with exc.context(force, 'Disconnect failed'):
460                connector.disconnect_volume(attach_info['conn']['data'],
461                                            attach_info['device'], force=force,
462                                            ignore_errors=ignore_errors)
463
464        if remote:
465            # Call remote manager's terminate_connection which includes
466            # driver's terminate_connection and remove export
467            rpcapi = volume_rpcapi.VolumeAPI()
468            with exc.context(force, 'Remote terminate connection failed'):
469                rpcapi.terminate_connection(context, volume, properties,
470                                            force=force)
471        else:
472            # Call local driver's terminate_connection and remove export.
473            # NOTE(avishay) This is copied from the manager's code - need to
474            # clean this up in the future.
475            with exc.context(force,
476                             _('Unable to terminate volume connection')):
477                try:
478                    self.terminate_connection(volume, properties, force=force)
479                except Exception as err:
480                    err_msg = (
481                        _('Unable to terminate volume connection: %(err)s')
482                        % {'err': err})
483                    LOG.error(err_msg)
484                    raise exception.VolumeBackendAPIException(data=err_msg)
485
486            with exc.context(force, _('Unable to remove export')):
487                try:
488                    LOG.debug("volume %s: removing export", volume['id'])
489                    self.remove_export(context, volume)
490                except Exception as ex:
491                    LOG.exception("Error detaching volume %(volume)s, "
492                                  "due to remove export failure.",
493                                  {"volume": volume['id']})
494                    raise exception.RemoveExportException(volume=volume['id'],
495                                                          reason=ex)
496        if exc and not ignore_errors:
497            raise exc
498
499    def set_initialized(self):
500        self._initialized = True
501
502    @property
503    def initialized(self):
504        return self._initialized
505
506    @property
507    def supported(self):
508        return self.SUPPORTED
509
510    def set_throttle(self):
511        bps_limit = ((self.configuration and
512                      self.configuration.safe_get('volume_copy_bps_limit')) or
513                     CONF.volume_copy_bps_limit)
514        cgroup_name = ((self.configuration and
515                        self.configuration.safe_get(
516                            'volume_copy_blkio_cgroup_name')) or
517                       CONF.volume_copy_blkio_cgroup_name)
518        self._throttle = None
519        if bps_limit:
520            try:
521                self._throttle = throttling.BlkioCgroup(int(bps_limit),
522                                                        cgroup_name)
523            except processutils.ProcessExecutionError as err:
524                LOG.warning('Failed to activate volume copy throttling: '
525                            '%(err)s', {'err': err})
526        throttling.Throttle.set_default(self._throttle)
527
528    def get_version(self):
529        """Get the current version of this driver."""
530        return self.VERSION
531
532    @abc.abstractmethod
533    def check_for_setup_error(self):
534        return
535
536    @abc.abstractmethod
537    def create_volume(self, volume):
538        """Creates a volume.
539
540        Can optionally return a Dictionary of changes to the volume object to
541        be persisted.
542
543        If volume_type extra specs includes
544        'capabilities:replication <is> True' the driver
545        needs to create a volume replica (secondary), and setup replication
546        between the newly created volume and the secondary volume.
547        Returned dictionary should include:
548
549        .. code-block:: python
550
551            volume['replication_status'] = 'copying'
552            volume['replication_extended_status'] = <driver specific value>
553            volume['driver_data'] = <driver specific value>
554
555        """
556        return
557
558    @abc.abstractmethod
559    def delete_volume(self, volume):
560        """Deletes a volume.
561
562        If volume_type extra specs includes 'replication: <is> True'
563        then the driver needs to delete the volume replica too.
564        """
565        return
566
567    def secure_file_operations_enabled(self):
568        """Determine if driver is running in Secure File Operations mode.
569
570        The Cinder Volume driver needs to query if this driver is running
571        in a secure file operations mode. By default, it is False: any driver
572        that does support secure file operations should override this method.
573        """
574        return False
575
576    def get_volume_stats(self, refresh=False):
577        """Return the current state of the volume service.
578
579        If 'refresh' is True, run the update first.
580
581        For replication the following state should be reported:
582        replication = True (None or false disables replication)
583        """
584        return
585
586    def get_prefixed_property(self, property):
587        """Return prefixed property name
588
589        :returns: a prefixed property name string or None
590        """
591
592        if property and self.capabilities.get('vendor_prefix'):
593            return self.capabilities.get('vendor_prefix') + ':' + property
594
595    def _set_property(self, properties, entry, title, description,
596                      type, **kwargs):
597        prop = dict(title=title, description=description, type=type)
598        allowed_keys = ('enum', 'default', 'minimum', 'maximum')
599        for key in kwargs:
600            if key in allowed_keys:
601                prop[key] = kwargs[key]
602        properties[entry] = prop
603
604    def _init_standard_capabilities(self):
605        """Create a dictionary of Cinder standard capabilities.
606
607        This method creates a dictionary of Cinder standard capabilities
608        and returns the created dictionary.
609        The keys of this dictionary don't contain prefix and separator(:).
610        """
611
612        properties = {}
613        self._set_property(
614            properties,
615            "thin_provisioning",
616            "Thin Provisioning",
617            _("Sets thin provisioning."),
618            "boolean")
619
620        self._set_property(
621            properties,
622            "compression",
623            "Compression",
624            _("Enables compression."),
625            "boolean")
626
627        self._set_property(
628            properties,
629            "qos",
630            "QoS",
631            _("Enables QoS."),
632            "boolean")
633
634        self._set_property(
635            properties,
636            "replication_enabled",
637            "Replication",
638            _("Enables replication."),
639            "boolean")
640
641        return properties
642
643    def _init_vendor_properties(self):
644        """Create a dictionary of vendor unique properties.
645
646        This method creates a dictionary of vendor unique properties
647        and returns both created dictionary and vendor name.
648        Returned vendor name is used to check for name of vendor
649        unique properties.
650
651        - Vendor name shouldn't include colon(:) because of the separator
652          and it is automatically replaced by underscore(_).
653          ex. abc:d -> abc_d
654        - Vendor prefix is equal to vendor name.
655          ex. abcd
656        - Vendor unique properties must start with vendor prefix + ':'.
657          ex. abcd:maxIOPS
658
659        Each backend driver needs to override this method to expose
660        its own properties using _set_property() like this:
661
662        self._set_property(
663            properties,
664            "vendorPrefix:specific_property",
665            "Title of property",
666            _("Description of property"),
667            "type")
668
669        : return dictionary of vendor unique properties
670        : return vendor name
671
672        Example of implementation::
673
674        properties = {}
675        self._set_property(
676            properties,
677            "abcd:compression_type",
678            "Compression type",
679            _("Specifies compression type."),
680            "string",
681            enum=["lossy", "lossless", "special"])
682
683        self._set_property(
684            properties,
685            "abcd:minIOPS",
686            "Minimum IOPS QoS",
687            _("Sets minimum IOPS if QoS is enabled."),
688            "integer",
689            minimum=10,
690            default=100)
691
692        return properties, 'abcd'
693        """
694
695        return {}, None
696
697    def init_capabilities(self):
698        """Obtain backend volume stats and capabilities list.
699
700        This stores a dictionary which is consisted of two parts.
701        First part includes static backend capabilities which are
702        obtained by get_volume_stats(). Second part is properties,
703        which includes parameters correspond to extra specs.
704        This properties part is consisted of cinder standard
705        capabilities and vendor unique properties.
706
707        Using this capabilities list, operator can manage/configure
708        backend using key/value from capabilities without specific
709        knowledge of backend.
710        """
711
712        # Set static backend capabilities from get_volume_stats()
713        stats = self.get_volume_stats(True)
714        if stats:
715            self.capabilities = stats.copy()
716
717        # Set cinder standard capabilities
718        self.capabilities['properties'] = self._init_standard_capabilities()
719
720        # Set Vendor unique properties
721        vendor_prop, vendor_name = self._init_vendor_properties()
722        if vendor_name and vendor_prop:
723            updated_vendor_prop = {}
724            old_name = None
725            # Replace colon in vendor name to underscore.
726            if ':' in vendor_name:
727                old_name = vendor_name
728                vendor_name = vendor_name.replace(':', '_')
729                LOG.warning('The colon in vendor name was replaced '
730                            'by underscore. Updated vendor name is '
731                            '%(name)s".', {'name': vendor_name})
732
733            for key in vendor_prop:
734                # If key has colon in vendor name field, we replace it to
735                # underscore.
736                # ex. abc:d:storagetype:provisioning
737                #     -> abc_d:storagetype:provisioning
738                if old_name and key.startswith(old_name + ':'):
739                    new_key = key.replace(old_name, vendor_name, 1)
740                    updated_vendor_prop[new_key] = vendor_prop[key]
741                    continue
742                if not key.startswith(vendor_name + ':'):
743                    LOG.warning('Vendor unique property "%(property)s" '
744                                'must start with vendor prefix with colon '
745                                '"%(prefix)s". The property was '
746                                'not registered on capabilities list.',
747                                {'prefix': vendor_name + ':',
748                                 'property': key})
749                    continue
750                updated_vendor_prop[key] = vendor_prop[key]
751
752            # Update vendor unique properties to the dictionary
753            self.capabilities['vendor_prefix'] = vendor_name
754            self.capabilities['properties'].update(updated_vendor_prop)
755
756        LOG.debug("Initialized capabilities list: %s.", self.capabilities)
757
758    def _update_pools_and_stats(self, data):
759        """Updates data for pools and volume stats based on provided data."""
760        # provisioned_capacity_gb is set to None by default below, but
761        # None won't be used in calculation. It will be overridden by
762        # driver's provisioned_capacity_gb if reported, otherwise it
763        # defaults to allocated_capacity_gb in host_manager.py.
764        if self.pools:
765            for pool in self.pools:
766                new_pool = {}
767                new_pool.update(dict(
768                    pool_name=pool,
769                    total_capacity_gb=0,
770                    free_capacity_gb=0,
771                    provisioned_capacity_gb=None,
772                    reserved_percentage=100,
773                    QoS_support=False,
774                    filter_function=self.get_filter_function(),
775                    goodness_function=self.get_goodness_function()
776                ))
777                data["pools"].append(new_pool)
778        else:
779            # No pool configured, the whole backend will be treated as a pool
780            single_pool = {}
781            single_pool.update(dict(
782                pool_name=data["volume_backend_name"],
783                total_capacity_gb=0,
784                free_capacity_gb=0,
785                provisioned_capacity_gb=None,
786                reserved_percentage=100,
787                QoS_support=False,
788                filter_function=self.get_filter_function(),
789                goodness_function=self.get_goodness_function()
790            ))
791            data["pools"].append(single_pool)
792        self._stats = data
793
794    def copy_image_to_volume(self, context, volume, image_service, image_id):
795        """Fetch image from image_service and write to unencrypted volume.
796
797        This does not attach an encryptor layer when connecting to the volume.
798        """
799        self._copy_image_data_to_volume(
800            context, volume, image_service, image_id, encrypted=False)
801
802    def copy_image_to_encrypted_volume(
803            self, context, volume, image_service, image_id):
804        """Fetch image from image_service and write to encrypted volume.
805
806        This attaches the encryptor layer when connecting to the volume.
807        """
808        self._copy_image_data_to_volume(
809            context, volume, image_service, image_id, encrypted=True)
810
811    def _copy_image_data_to_volume(self, context, volume, image_service,
812                                   image_id, encrypted=False):
813        """Fetch the image from image_service and write it to the volume."""
814        LOG.debug('copy_image_to_volume %s.', volume['name'])
815
816        use_multipath = self.configuration.use_multipath_for_image_xfer
817        enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
818        properties = utils.brick_get_connector_properties(use_multipath,
819                                                          enforce_multipath)
820        attach_info, volume = self._attach_volume(context, volume, properties)
821        try:
822            if encrypted:
823                encryption = self.db.volume_encryption_metadata_get(context,
824                                                                    volume.id)
825                utils.brick_attach_volume_encryptor(context,
826                                                    attach_info,
827                                                    encryption)
828            try:
829                image_utils.fetch_to_raw(
830                    context,
831                    image_service,
832                    image_id,
833                    attach_info['device']['path'],
834                    self.configuration.volume_dd_blocksize,
835                    size=volume['size'])
836            except exception.ImageTooBig:
837                with excutils.save_and_reraise_exception():
838                    LOG.exception("Copying image %(image_id)s "
839                                  "to volume failed due to "
840                                  "insufficient available space.",
841                                  {'image_id': image_id})
842
843            finally:
844                if encrypted:
845                    utils.brick_detach_volume_encryptor(attach_info,
846                                                        encryption)
847        finally:
848            self._detach_volume(context, attach_info, volume, properties,
849                                force=True)
850
851    def copy_volume_to_image(self, context, volume, image_service, image_meta):
852        """Copy the volume to the specified image."""
853        LOG.debug('copy_volume_to_image %s.', volume['name'])
854
855        use_multipath = self.configuration.use_multipath_for_image_xfer
856        enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
857        properties = utils.brick_get_connector_properties(use_multipath,
858                                                          enforce_multipath)
859        attach_info, volume = self._attach_volume(context, volume, properties)
860
861        try:
862            image_utils.upload_volume(context,
863                                      image_service,
864                                      image_meta,
865                                      attach_info['device']['path'])
866        finally:
867            # Since attached volume was not used for writing we can force
868            # detach it
869            self._detach_volume(context, attach_info, volume, properties,
870                                force=True, ignore_errors=True)
871
872    def before_volume_copy(self, context, src_vol, dest_vol, remote=None):
873        """Driver-specific actions before copyvolume data.
874
875        This method will be called before _copy_volume_data during volume
876        migration
877        """
878        pass
879
880    def after_volume_copy(self, context, src_vol, dest_vol, remote=None):
881        """Driver-specific actions after copyvolume data.
882
883        This method will be called after _copy_volume_data during volume
884        migration
885        """
886        pass
887
888    def get_filter_function(self):
889        """Get filter_function string.
890
891        Returns either the string from the driver instance or global section
892        in cinder.conf. If nothing is specified in cinder.conf, then try to
893        find the default filter_function. When None is returned the scheduler
894        will always pass the driver instance.
895
896        :returns: a filter_function string or None
897        """
898        ret_function = self.configuration.filter_function
899        if not ret_function:
900            ret_function = CONF.filter_function
901        if not ret_function:
902            ret_function = self.get_default_filter_function()
903        return ret_function
904
905    def get_goodness_function(self):
906        """Get good_function string.
907
908        Returns either the string from the driver instance or global section
909        in cinder.conf. If nothing is specified in cinder.conf, then try to
910        find the default goodness_function. When None is returned the scheduler
911        will give the lowest score to the driver instance.
912
913        :returns: a goodness_function string or None
914        """
915        ret_function = self.configuration.goodness_function
916        if not ret_function:
917            ret_function = CONF.goodness_function
918        if not ret_function:
919            ret_function = self.get_default_goodness_function()
920        return ret_function
921
922    def get_default_filter_function(self):
923        """Get the default filter_function string.
924
925        Each driver could overwrite the method to return a well-known
926        default string if it is available.
927
928        :returns: None
929        """
930        return None
931
932    def get_default_goodness_function(self):
933        """Get the default goodness_function string.
934
935        Each driver could overwrite the method to return a well-known
936        default string if it is available.
937
938        :returns: None
939        """
940        return None
941
942    def _attach_volume(self, context, volume, properties, remote=False):
943        """Attach the volume."""
944        if remote:
945            # Call remote manager's initialize_connection which includes
946            # driver's create_export and initialize_connection
947            rpcapi = volume_rpcapi.VolumeAPI()
948            try:
949                conn = rpcapi.initialize_connection(context, volume,
950                                                    properties)
951            except Exception:
952                with excutils.save_and_reraise_exception():
953                    # It is possible that initialize_connection fails due to
954                    # timeout. In fact, the volume is already attached after
955                    # the timeout error is raised, so the connection worths
956                    # a try of terminating.
957                    try:
958                        rpcapi.terminate_connection(context, volume,
959                                                    properties, force=True)
960                    except Exception:
961                        LOG.warning("Failed terminating the connection "
962                                    "of volume %(volume_id)s, but it is "
963                                    "acceptable.",
964                                    {'volume_id': volume['id']})
965        else:
966            # Call local driver's create_export and initialize_connection.
967            # NOTE(avishay) This is copied from the manager's code - need to
968            # clean this up in the future.
969            model_update = None
970            try:
971                LOG.debug("Volume %s: creating export", volume['id'])
972                model_update = self.create_export(context, volume, properties)
973                if model_update:
974                    volume.update(model_update)
975                    volume.save()
976            except exception.CinderException as ex:
977                if model_update:
978                    LOG.exception("Failed updating model of volume "
979                                  "%(volume_id)s with driver provided "
980                                  "model %(model)s",
981                                  {'volume_id': volume['id'],
982                                   'model': model_update})
983                    raise exception.ExportFailure(reason=ex)
984
985            try:
986                conn = self.initialize_connection(volume, properties)
987            except Exception as err:
988                try:
989                    err_msg = (_('Unable to fetch connection information from '
990                                 'backend: %(err)s') %
991                               {'err': six.text_type(err)})
992                    LOG.error(err_msg)
993                    LOG.debug("Cleaning up failed connect initialization.")
994                    self.remove_export(context, volume)
995                except Exception as ex:
996                    ex_msg = (_('Error encountered during cleanup '
997                                'of a failed attach: %(ex)s') %
998                              {'ex': six.text_type(ex)})
999                    LOG.error(err_msg)
1000                    raise exception.VolumeBackendAPIException(data=ex_msg)
1001                raise exception.VolumeBackendAPIException(data=err_msg)
1002
1003            # Add encrypted flag to connection_info if not set in the driver.
1004            if conn['data'].get('encrypted') is None:
1005                encrypted = bool(volume.encryption_key_id)
1006                conn['data']['encrypted'] = encrypted
1007
1008        try:
1009            attach_info = self._connect_device(conn)
1010        except Exception as exc:
1011            # We may have reached a point where we have attached the volume,
1012            # so we have to detach it (do the cleanup).
1013            attach_info = getattr(exc, 'kwargs', {}).get('attach_info', None)
1014
1015            try:
1016                LOG.debug('Device for volume %s is unavailable but did '
1017                          'attach, detaching it.', volume['id'])
1018                self._detach_volume(context, attach_info, volume,
1019                                    properties, force=True,
1020                                    remote=remote)
1021            except Exception:
1022                LOG.exception('Error detaching volume %s',
1023                              volume['id'])
1024            raise
1025
1026        return (attach_info, volume)
1027
1028    def _attach_snapshot(self, ctxt, snapshot, properties):
1029        """Attach the snapshot."""
1030        model_update = None
1031        try:
1032            LOG.debug("Snapshot %s: creating export.", snapshot.id)
1033            model_update = self.create_export_snapshot(ctxt, snapshot,
1034                                                       properties)
1035            if model_update:
1036                snapshot.provider_location = model_update.get(
1037                    'provider_location', None)
1038                snapshot.provider_auth = model_update.get(
1039                    'provider_auth', None)
1040                snapshot.save()
1041        except exception.CinderException as ex:
1042            if model_update:
1043                LOG.exception("Failed updating model of snapshot "
1044                              "%(snapshot_id)s with driver provided "
1045                              "model %(model)s.",
1046                              {'snapshot_id': snapshot.id,
1047                               'model': model_update})
1048                raise exception.ExportFailure(reason=ex)
1049
1050        try:
1051            conn = self.initialize_connection_snapshot(
1052                snapshot, properties)
1053        except Exception as err:
1054            try:
1055                err_msg = (_('Unable to fetch connection information from '
1056                             'backend: %(err)s') %
1057                           {'err': six.text_type(err)})
1058                LOG.error(err_msg)
1059                LOG.debug("Cleaning up failed connect initialization.")
1060                self.remove_export_snapshot(ctxt, snapshot)
1061            except Exception as ex:
1062                ex_msg = (_('Error encountered during cleanup '
1063                            'of a failed attach: %(ex)s') %
1064                          {'ex': six.text_type(ex)})
1065                LOG.error(err_msg)
1066                raise exception.VolumeBackendAPIException(data=ex_msg)
1067            raise exception.VolumeBackendAPIException(data=err_msg)
1068        return conn
1069
1070    def _connect_device(self, conn):
1071        # Use Brick's code to do attach/detach
1072        use_multipath = self.configuration.use_multipath_for_image_xfer
1073        device_scan_attempts = self.configuration.num_volume_device_scan_tries
1074        protocol = conn['driver_volume_type']
1075        connector = utils.brick_get_connector(
1076            protocol,
1077            use_multipath=use_multipath,
1078            device_scan_attempts=device_scan_attempts,
1079            conn=conn)
1080        device = connector.connect_volume(conn['data'])
1081        host_device = device['path']
1082
1083        attach_info = {'conn': conn, 'device': device, 'connector': connector}
1084
1085        unavailable = True
1086        try:
1087            # Secure network file systems will NOT run as root.
1088            root_access = not self.secure_file_operations_enabled()
1089            unavailable = not connector.check_valid_device(host_device,
1090                                                           root_access)
1091        except Exception:
1092            LOG.exception('Could not validate device %s', host_device)
1093
1094        if unavailable:
1095            raise exception.DeviceUnavailable(path=host_device,
1096                                              attach_info=attach_info,
1097                                              reason=(_("Unable to access "
1098                                                        "the backend storage "
1099                                                        "via the path "
1100                                                        "%(path)s.") %
1101                                                      {'path': host_device}))
1102        return attach_info
1103
1104    def clone_image(self, context, volume,
1105                    image_location, image_meta,
1106                    image_service):
1107        return None, False
1108
1109    def backup_use_temp_snapshot(self):
1110        """Get the configured setting for backup from snapshot.
1111
1112        If an inheriting driver does not support this operation,
1113        the driver should override this method to return false
1114        and log a warning letting the administrator know they
1115        have configured something that cannot be done.
1116        """
1117        return self.configuration.safe_get("backup_use_temp_snapshot")
1118
1119    def snapshot_revert_use_temp_snapshot(self):
1120        # Specify whether a temporary backup snapshot should be used when
1121        # reverting a snapshot. For some backends, this operation is not
1122        # needed or not supported, in which case the driver should override
1123        # this method.
1124        return True
1125
1126    def snapshot_remote_attachable(self):
1127        # TODO(lixiaoy1): the method will be deleted later when remote
1128        # attach snapshot is implemented.
1129        return False
1130
1131    def get_backup_device(self, context, backup):
1132        """Get a backup device from an existing volume.
1133
1134        The function returns a volume or snapshot to backup service,
1135        and then backup service attaches the device and does backup.
1136        """
1137        backup_device = None
1138        is_snapshot = False
1139        if self.backup_use_temp_snapshot():
1140            (backup_device, is_snapshot) = (
1141                self._get_backup_volume_temp_snapshot(context, backup))
1142        else:
1143            backup_device = self._get_backup_volume_temp_volume(
1144                context, backup)
1145            is_snapshot = False
1146        return (backup_device, is_snapshot)
1147
1148    def _get_backup_volume_temp_volume(self, context, backup):
1149        """Return a volume to do backup.
1150
1151        To backup a snapshot, create a temp volume from the snapshot and
1152        back it up.
1153
1154        Otherwise to backup an in-use volume, create a temp volume and
1155        back it up.
1156        """
1157        volume = objects.Volume.get_by_id(context, backup.volume_id)
1158        snapshot = None
1159        if backup.snapshot_id:
1160            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
1161
1162        LOG.debug('Creating a new backup for volume %s.', volume['name'])
1163
1164        temp_vol_ref = None
1165        device_to_backup = volume
1166
1167        # NOTE(xyang): If it is to backup from snapshot, create a temp
1168        # volume from the source snapshot, backup the temp volume, and
1169        # then clean up the temp volume.
1170        if snapshot:
1171            temp_vol_ref = self._create_temp_volume_from_snapshot(
1172                context, volume, snapshot)
1173            backup.temp_volume_id = temp_vol_ref.id
1174            backup.save()
1175            device_to_backup = temp_vol_ref
1176
1177        else:
1178            # NOTE(xyang): Check volume status if it is not to backup from
1179            # snapshot; if 'in-use', create a temp volume from the source
1180            # volume, backup the temp volume, and then clean up the temp
1181            # volume; if 'available', just backup the volume.
1182            previous_status = volume.get('previous_status')
1183            if previous_status == "in-use":
1184                temp_vol_ref = self._create_temp_cloned_volume(
1185                    context, volume)
1186                backup.temp_volume_id = temp_vol_ref.id
1187                backup.save()
1188                device_to_backup = temp_vol_ref
1189
1190        return device_to_backup
1191
1192    def _get_backup_volume_temp_snapshot(self, context, backup):
1193        """Return a device to backup.
1194
1195        If it is to backup from snapshot, back it up directly.
1196
1197        Otherwise for in-use volume, create a temp snapshot and back it up.
1198        """
1199        volume = objects.Volume.get_by_id(context, backup.volume_id)
1200        snapshot = None
1201        if backup.snapshot_id:
1202            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
1203
1204        LOG.debug('Creating a new backup for volume %s.', volume['name'])
1205
1206        device_to_backup = volume
1207        is_snapshot = False
1208        temp_snapshot = None
1209
1210        # NOTE(xyang): If it is to backup from snapshot, back it up
1211        # directly. No need to clean it up.
1212        if snapshot:
1213            device_to_backup = snapshot
1214            is_snapshot = True
1215
1216        else:
1217            # NOTE(xyang): If it is not to backup from snapshot, check volume
1218            # status. If the volume status is 'in-use', create a temp snapshot
1219            # from the source volume, backup the temp snapshot, and then clean
1220            # up the temp snapshot; if the volume status is 'available', just
1221            # backup the volume.
1222            previous_status = volume.get('previous_status')
1223            if previous_status == "in-use":
1224                temp_snapshot = self._create_temp_snapshot(context, volume)
1225                backup.temp_snapshot_id = temp_snapshot.id
1226                backup.save()
1227                device_to_backup = temp_snapshot
1228                is_snapshot = True
1229
1230        return (device_to_backup, is_snapshot)
1231
1232    def _create_temp_snapshot(self, context, volume):
1233        kwargs = {
1234            'volume_id': volume['id'],
1235            'cgsnapshot_id': None,
1236            'user_id': context.user_id,
1237            'project_id': context.project_id,
1238            'status': fields.SnapshotStatus.CREATING,
1239            'progress': '0%',
1240            'volume_size': volume['size'],
1241            'display_name': 'backup-snap-%s' % volume['id'],
1242            'display_description': None,
1243            'volume_type_id': volume['volume_type_id'],
1244            'encryption_key_id': volume['encryption_key_id'],
1245            'metadata': {},
1246        }
1247        temp_snap_ref = objects.Snapshot(context=context, **kwargs)
1248        temp_snap_ref.create()
1249        try:
1250            model_update = self.create_snapshot(temp_snap_ref)
1251            if model_update:
1252                temp_snap_ref.update(model_update)
1253        except Exception:
1254            with excutils.save_and_reraise_exception():
1255                with temp_snap_ref.obj_as_admin():
1256                    self.db.volume_glance_metadata_delete_by_snapshot(
1257                        context, temp_snap_ref.id)
1258                    temp_snap_ref.destroy()
1259
1260        temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE
1261        temp_snap_ref.save()
1262        return temp_snap_ref
1263
1264    def _create_temp_volume(self, context, volume, volume_options=None):
1265        kwargs = {
1266            'size': volume.size,
1267            'display_name': 'backup-vol-%s' % volume.id,
1268            'host': volume.host,
1269            'cluster_name': volume.cluster_name,
1270            'user_id': context.user_id,
1271            'project_id': context.project_id,
1272            'status': 'creating',
1273            'attach_status': fields.VolumeAttachStatus.DETACHED,
1274            'availability_zone': volume.availability_zone,
1275            'volume_type_id': volume.volume_type_id,
1276            'admin_metadata': {'temporary': 'True'},
1277        }
1278        kwargs.update(volume_options or {})
1279        temp_vol_ref = objects.Volume(context=context.elevated(), **kwargs)
1280        temp_vol_ref.create()
1281        return temp_vol_ref
1282
1283    def _create_temp_cloned_volume(self, context, volume):
1284        temp_vol_ref = self._create_temp_volume(context, volume)
1285        try:
1286            model_update = self.create_cloned_volume(temp_vol_ref, volume)
1287            if model_update:
1288                temp_vol_ref.update(model_update)
1289        except Exception:
1290            with excutils.save_and_reraise_exception():
1291                temp_vol_ref.destroy()
1292
1293        temp_vol_ref.status = 'available'
1294        temp_vol_ref.save()
1295        return temp_vol_ref
1296
1297    def _create_temp_volume_from_snapshot(self, context, volume, snapshot,
1298                                          volume_options=None):
1299        temp_vol_ref = self._create_temp_volume(context, volume,
1300                                                volume_options=volume_options)
1301        try:
1302            model_update = self.create_volume_from_snapshot(temp_vol_ref,
1303                                                            snapshot)
1304            if model_update:
1305                temp_vol_ref.update(model_update)
1306        except Exception:
1307            with excutils.save_and_reraise_exception():
1308                temp_vol_ref.destroy()
1309
1310        temp_vol_ref.status = 'available'
1311        temp_vol_ref.save()
1312        return temp_vol_ref
1313
1314    def clear_download(self, context, volume):
1315        """Clean up after an interrupted image copy."""
1316        pass
1317
1318    def attach_volume(self, context, volume, instance_uuid, host_name,
1319                      mountpoint):
1320        """Callback for volume attached to instance or host."""
1321        pass
1322
1323    def detach_volume(self, context, volume, attachment=None):
1324        """Callback for volume detached."""
1325        pass
1326
1327    def do_setup(self, context):
1328        """Any initialization the volume driver does while starting."""
1329        pass
1330
1331    def validate_connector(self, connector):
1332        """Fail if connector doesn't contain all the data needed by driver."""
1333        pass
1334
1335    def update_migrated_volume(self, ctxt, volume, new_volume,
1336                               original_volume_status):
1337        """Return model update for migrated volume.
1338
1339        Each driver implementing this method needs to be responsible for the
1340        values of _name_id and provider_location. If None is returned or either
1341        key is not set, it means the volume table does not need to change the
1342        value(s) for the key(s).
1343        The return format is {"_name_id": value, "provider_location": value}.
1344
1345        :param volume: The original volume that was migrated to this backend
1346        :param new_volume: The migration volume object that was created on
1347                           this backend as part of the migration process
1348        :param original_volume_status: The status of the original volume
1349        :returns: model_update to update DB with any needed changes
1350        """
1351        msg = _("The method update_migrated_volume is not implemented.")
1352        raise NotImplementedError(msg)
1353
1354    @staticmethod
1355    def validate_connector_has_setting(connector, setting):
1356        pass
1357
1358    def retype(self, context, volume, new_type, diff, host):
1359        return False, None
1360
1361    def create_cloned_volume(self, volume, src_vref):
1362        """Creates a clone of the specified volume.
1363
1364        If volume_type extra specs includes 'replication: <is> True' the
1365        driver needs to create a volume replica (secondary)
1366        and setup replication between the newly created volume
1367        and the secondary volume.
1368        """
1369        raise NotImplementedError()
1370
1371    # #######  Interface methods for DataPath (Connector) ########
1372    @abc.abstractmethod
1373    def ensure_export(self, context, volume):
1374        """Synchronously recreates an export for a volume."""
1375        return
1376
1377    @abc.abstractmethod
1378    def create_export(self, context, volume, connector):
1379        """Exports the volume.
1380
1381        Can optionally return a Dictionary of changes
1382        to the volume object to be persisted.
1383        """
1384        return
1385
1386    def create_export_snapshot(self, context, snapshot, connector):
1387        """Exports the snapshot.
1388
1389        Can optionally return a Dictionary of changes
1390        to the snapshot object to be persisted.
1391        """
1392        return
1393
1394    @abc.abstractmethod
1395    def remove_export(self, context, volume):
1396        """Removes an export for a volume."""
1397        return
1398
1399    def remove_export_snapshot(self, context, snapshot):
1400        """Removes an export for a snapshot."""
1401        return
1402
1403    @abc.abstractmethod
1404    def initialize_connection(self, volume, connector):
1405        """Allow connection to connector and return connection info.
1406
1407        :param volume: The volume to be attached
1408        :param connector: Dictionary containing information about what is being
1409                          connected to.
1410        :returns conn_info: A dictionary of connection information.
1411        """
1412        return
1413
1414    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
1415        """Allow connection to connector and return connection info.
1416
1417        :param snapshot: The snapshot to be attached
1418        :param connector: Dictionary containing information about what
1419                          is being connected to.
1420        :returns conn_info: A dictionary of connection information. This
1421                            can optionally include a "initiator_updates"
1422                            field.
1423
1424        The "initiator_updates" field must be a dictionary containing a
1425        "set_values" and/or "remove_values" field. The "set_values" field must
1426        be a dictionary of key-value pairs to be set/updated in the db. The
1427        "remove_values" field must be a list of keys, previously set with
1428        "set_values", that will be deleted from the db.
1429        """
1430        return
1431
1432    @abc.abstractmethod
1433    def terminate_connection(self, volume, connector, **kwargs):
1434        """Disallow connection from connector.
1435
1436        :param volume: The volume to be disconnected.
1437        :param connector: A dictionary describing the connection with details
1438                          about the initiator. Can be None.
1439        """
1440        return
1441
1442    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
1443        """Disallow connection from connector."""
1444        return
1445
1446    def get_pool(self, volume):
1447        """Return pool name where volume reside on.
1448
1449        :param volume: The volume hosted by the driver.
1450        :returns: name of the pool where given volume is in.
1451        """
1452        return None
1453
1454    def update_provider_info(self, volumes, snapshots):
1455        """Get provider info updates from driver.
1456
1457        :param volumes: List of Cinder volumes to check for updates
1458        :param snapshots: List of Cinder snapshots to check for updates
1459        :returns: tuple (volume_updates, snapshot_updates)
1460
1461        where volume updates {'id': uuid, provider_id: <provider-id>}
1462        and snapshot updates {'id': uuid, provider_id: <provider-id>}
1463        """
1464        return None, None
1465
1466    def migrate_volume(self, context, volume, host):
1467        """Migrate volume stub.
1468
1469        This is for drivers that don't implement an enhanced version
1470        of this operation.
1471        """
1472        return (False, None)
1473
1474    def manage_existing(self, volume, existing_ref):
1475        """Manage exiting stub.
1476
1477        This is for drivers that don't implement manage_existing().
1478        """
1479        msg = _("Manage existing volume not implemented.")
1480        raise NotImplementedError(msg)
1481
1482    def unmanage(self, volume):
1483        """Unmanage stub.
1484
1485        This is for drivers that don't implement unmanage().
1486        """
1487        msg = _("Unmanage volume not implemented.")
1488        raise NotImplementedError(msg)
1489
1490    def freeze_backend(self, context):
1491        """Notify the backend that it's frozen.
1492
1493        We use set to prohibit the creation of any new resources
1494        on the backend, or any modifications to existing items on
1495        a backend.  We set/enforce this by not allowing scheduling
1496        of new volumes to the specified backend, and checking at the
1497        api for modifications to resources and failing.
1498
1499        In most cases the driver may not need to do anything, but
1500        this provides a handle if they need it.
1501
1502        :param context: security context
1503        :response: True|False
1504        """
1505        return True
1506
1507    def thaw_backend(self, context):
1508        """Notify the backend that it's unfrozen/thawed.
1509
1510        Returns the backend to a normal state after a freeze
1511        operation.
1512
1513        In most cases the driver may not need to do anything, but
1514        this provides a handle if they need it.
1515
1516        :param context: security context
1517        :response: True|False
1518        """
1519        return True
1520
1521    def failover_host(self, context, volumes, secondary_id=None, groups=None):
1522        """Failover a backend to a secondary replication target.
1523
1524        Instructs a replication capable/configured backend to failover
1525        to one of it's secondary replication targets. host=None is
1526        an acceptable input, and leaves it to the driver to failover
1527        to the only configured target, or to choose a target on it's
1528        own. All of the hosts volumes will be passed on to the driver
1529        in order for it to determine the replicated volumes on the host,
1530        if needed.
1531
1532        Response is a tuple, including the new target backend_id
1533        AND a lit of dictionaries with volume_id and updates.
1534        Key things to consider (attaching failed-over volumes):
1535        - provider_location
1536        - provider_auth
1537        - provider_id
1538        - replication_status
1539
1540        :param context: security context
1541        :param volumes: list of volume objects, in case the driver needs
1542                        to take action on them in some way
1543        :param secondary_id: Specifies rep target backend to fail over to
1544        :param groups: replication groups
1545        :returns: ID of the backend that was failed-over to,
1546                  model update for volumes, and model update for groups
1547        """
1548
1549        # Example volume_updates data structure:
1550        # [{'volume_id': <cinder-uuid>,
1551        #   'updates': {'provider_id': 8,
1552        #               'replication_status': 'failed-over',
1553        #               'replication_extended_status': 'whatever',...}},]
1554        # Example group_updates data structure:
1555        # [{'group_id': <cinder-uuid>,
1556        #   'updates': {'replication_status': 'failed-over',...}},]
1557        raise NotImplementedError()
1558
1559    def failover(self, context, volumes, secondary_id=None, groups=None):
1560        """Like failover but for a host that is clustered.
1561
1562        Most of the time this will be the exact same behavior as failover_host,
1563        so if it's not overwritten, it is assumed to be the case.
1564        """
1565        return self.failover_host(context, volumes, secondary_id, groups)
1566
1567    def failover_completed(self, context, active_backend_id=None):
1568        """This method is called after failover for clustered backends."""
1569        raise NotImplementedError()
1570
1571    @classmethod
1572    def _is_base_method(cls, method_name):
1573        method = getattr(cls, method_name)
1574        return method.__module__ == getattr(BaseVD, method_name).__module__
1575
1576    # Replication Group (Tiramisu)
1577    def enable_replication(self, context, group, volumes):
1578        """Enables replication for a group and volumes in the group.
1579
1580        :param group: group object
1581        :param volumes: list of volume objects in the group
1582        :returns: model_update - dict of group updates
1583        :returns: volume_model_updates - list of dicts of volume updates
1584        """
1585        raise NotImplementedError()
1586
1587    # Replication Group (Tiramisu)
1588    def disable_replication(self, context, group, volumes):
1589        """Disables replication for a group and volumes in the group.
1590
1591        :param group: group object
1592        :param volumes: list of volume objects in the group
1593        :returns: model_update - dict of group updates
1594        :returns: volume_model_updates - list of dicts of volume updates
1595        """
1596        raise NotImplementedError()
1597
1598    # Replication Group (Tiramisu)
1599    def failover_replication(self, context, group, volumes,
1600                             secondary_backend_id=None):
1601        """Fails over replication for a group and volumes in the group.
1602
1603        :param group: group object
1604        :param volumes: list of volume objects in the group
1605        :param secondary_backend_id: backend_id of the secondary site
1606        :returns: model_update - dict of group updates
1607        :returns: volume_model_updates - list of dicts of volume updates
1608        """
1609        raise NotImplementedError()
1610
1611    def get_replication_error_status(self, context, groups):
1612        """Returns error info for replicated groups and its volumes.
1613
1614        :returns: group_model_updates - list of dicts of group updates
1615
1616        if error happens. For example, a dict of a group can be as follows:
1617
1618        .. code:: python
1619
1620          {'group_id': xxxx,
1621           'replication_status': fields.ReplicationStatus.ERROR}
1622
1623        :returns: volume_model_updates - list of dicts of volume updates
1624
1625        if error happens. For example, a dict of a volume can be as follows:
1626
1627        .. code:: python
1628
1629          {'volume_id': xxxx,
1630           'replication_status': fields.ReplicationStatus.ERROR}
1631
1632        """
1633        return [], []
1634
1635    @classmethod
1636    def supports_replication_feature(cls, feature):
1637        """Check if driver class supports replication features.
1638
1639        Feature is a string that must be one of:
1640            - v2.1
1641            - a/a
1642        """
1643        if feature not in cls.REPLICATION_FEATURE_CHECKERS:
1644            return False
1645
1646        # Check if method is being implemented/overwritten by the driver
1647        method_name = cls.REPLICATION_FEATURE_CHECKERS[feature]
1648        return not cls._is_base_method(method_name)
1649
1650    def get_replication_updates(self, context):
1651        """Old replication update method, deprecate."""
1652        raise NotImplementedError()
1653
1654    def create_group(self, context, group):
1655        """Creates a group.
1656
1657        :param context: the context of the caller.
1658        :param group: the Group object of the group to be created.
1659        :returns: model_update
1660
1661        model_update will be in this format: {'status': xxx, ......}.
1662
1663        If the status in model_update is 'error', the manager will throw
1664        an exception and it will be caught in the try-except block in the
1665        manager. If the driver throws an exception, the manager will also
1666        catch it in the try-except block. The group status in the db will
1667        be changed to 'error'.
1668
1669        For a successful operation, the driver can either build the
1670        model_update and return it or return None. The group status will
1671        be set to 'available'.
1672        """
1673        raise NotImplementedError()
1674
1675    def delete_group(self, context, group, volumes):
1676        """Deletes a group.
1677
1678        :param context: the context of the caller.
1679        :param group: the Group object of the group to be deleted.
1680        :param volumes: a list of Volume objects in the group.
1681        :returns: model_update, volumes_model_update
1682
1683        param volumes is a list of objects retrieved from the db. It cannot
1684        be assigned to volumes_model_update. volumes_model_update is a list
1685        of dictionaries. It has to be built by the driver. An entry will be
1686        in this format: {'id': xxx, 'status': xxx, ......}. model_update
1687        will be in this format: {'status': xxx, ......}.
1688
1689        The driver should populate volumes_model_update and model_update
1690        and return them.
1691
1692        The manager will check volumes_model_update and update db accordingly
1693        for each volume. If the driver successfully deleted some volumes
1694        but failed to delete others, it should set statuses of the volumes
1695        accordingly so that the manager can update db correctly.
1696
1697        If the status in any entry of volumes_model_update is 'error_deleting'
1698        or 'error', the status in model_update will be set to the same if it
1699        is not already 'error_deleting' or 'error'.
1700
1701        If the status in model_update is 'error_deleting' or 'error', the
1702        manager will raise an exception and the status of the group will be
1703        set to 'error' in the db. If volumes_model_update is not returned by
1704        the driver, the manager will set the status of every volume in the
1705        group to 'error' in the except block.
1706
1707        If the driver raises an exception during the operation, it will be
1708        caught by the try-except block in the manager. The statuses of the
1709        group and all volumes in it will be set to 'error'.
1710
1711        For a successful operation, the driver can either build the
1712        model_update and volumes_model_update and return them or
1713        return None, None. The statuses of the group and all volumes
1714        will be set to 'deleted' after the manager deletes them from db.
1715        """
1716        raise NotImplementedError()
1717
1718    def update_group(self, context, group,
1719                     add_volumes=None, remove_volumes=None):
1720        """Updates a group.
1721
1722        :param context: the context of the caller.
1723        :param group: the Group object of the group to be updated.
1724        :param add_volumes: a list of Volume objects to be added.
1725        :param remove_volumes: a list of Volume objects to be removed.
1726        :returns: model_update, add_volumes_update, remove_volumes_update
1727
1728        model_update is a dictionary that the driver wants the manager
1729        to update upon a successful return. If None is returned, the manager
1730        will set the status to 'available'.
1731
1732        add_volumes_update and remove_volumes_update are lists of dictionaries
1733        that the driver wants the manager to update upon a successful return.
1734        Note that each entry requires a {'id': xxx} so that the correct
1735        volume entry can be updated. If None is returned, the volume will
1736        remain its original status. Also note that you cannot directly
1737        assign add_volumes to add_volumes_update as add_volumes is a list of
1738        volume objects and cannot be used for db update directly. Same with
1739        remove_volumes.
1740
1741        If the driver throws an exception, the status of the group as well as
1742        those of the volumes to be added/removed will be set to 'error'.
1743        """
1744        raise NotImplementedError()
1745
1746    def create_group_from_src(self, context, group, volumes,
1747                              group_snapshot=None, snapshots=None,
1748                              source_group=None, source_vols=None):
1749        """Creates a group from source.
1750
1751        :param context: the context of the caller.
1752        :param group: the Group object to be created.
1753        :param volumes: a list of Volume objects in the group.
1754        :param group_snapshot: the GroupSnapshot object as source.
1755        :param snapshots: a list of Snapshot objects in group_snapshot.
1756        :param source_group: the Group object as source.
1757        :param source_vols: a list of Volume objects in the source_group.
1758        :returns: model_update, volumes_model_update
1759
1760        The source can be group_snapshot or a source_group.
1761
1762        param volumes is a list of objects retrieved from the db. It cannot
1763        be assigned to volumes_model_update. volumes_model_update is a list
1764        of dictionaries. It has to be built by the driver. An entry will be
1765        in this format: {'id': xxx, 'status': xxx, ......}. model_update
1766        will be in this format: {'status': xxx, ......}.
1767
1768        To be consistent with other volume operations, the manager will
1769        assume the operation is successful if no exception is thrown by
1770        the driver. For a successful operation, the driver can either build
1771        the model_update and volumes_model_update and return them or
1772        return None, None.
1773        """
1774        raise NotImplementedError()
1775
1776    def create_group_snapshot(self, context, group_snapshot, snapshots):
1777        """Creates a group_snapshot.
1778
1779        :param context: the context of the caller.
1780        :param group_snapshot: the GroupSnapshot object to be created.
1781        :param snapshots: a list of Snapshot objects in the group_snapshot.
1782        :returns: model_update, snapshots_model_update
1783
1784        param snapshots is a list of Snapshot objects. It cannot be assigned
1785        to snapshots_model_update. snapshots_model_update is a list of
1786        dictionaries. It has to be built by the driver. An entry will be
1787        in this format: {'id': xxx, 'status': xxx, ......}. model_update
1788        will be in this format: {'status': xxx, ......}.
1789
1790        The driver should populate snapshots_model_update and model_update
1791        and return them.
1792
1793        The manager will check snapshots_model_update and update db accordingly
1794        for each snapshot. If the driver successfully deleted some snapshots
1795        but failed to delete others, it should set statuses of the snapshots
1796        accordingly so that the manager can update db correctly.
1797
1798        If the status in any entry of snapshots_model_update is 'error', the
1799        status in model_update will be set to the same if it is not already
1800        'error'.
1801
1802        If the status in model_update is 'error', the manager will raise an
1803        exception and the status of group_snapshot will be set to 'error' in
1804        the db. If snapshots_model_update is not returned by the driver, the
1805        manager will set the status of every snapshot to 'error' in the except
1806        block.
1807
1808        If the driver raises an exception during the operation, it will be
1809        caught by the try-except block in the manager and the statuses of
1810        group_snapshot and all snapshots will be set to 'error'.
1811
1812        For a successful operation, the driver can either build the
1813        model_update and snapshots_model_update and return them or
1814        return None, None. The statuses of group_snapshot and all snapshots
1815        will be set to 'available' at the end of the manager function.
1816        """
1817        raise NotImplementedError()
1818
1819    def delete_group_snapshot(self, context, group_snapshot, snapshots):
1820        """Deletes a group_snapshot.
1821
1822        :param context: the context of the caller.
1823        :param group_snapshot: the GroupSnapshot object to be deleted.
1824        :param snapshots: a list of Snapshot objects in the group_snapshot.
1825        :returns: model_update, snapshots_model_update
1826
1827        param snapshots is a list of objects. It cannot be assigned to
1828        snapshots_model_update. snapshots_model_update is a list of of
1829        dictionaries. It has to be built by the driver. An entry will be
1830        in this format: {'id': xxx, 'status': xxx, ......}. model_update
1831        will be in this format: {'status': xxx, ......}.
1832
1833        The driver should populate snapshots_model_update and model_update
1834        and return them.
1835
1836        The manager will check snapshots_model_update and update db accordingly
1837        for each snapshot. If the driver successfully deleted some snapshots
1838        but failed to delete others, it should set statuses of the snapshots
1839        accordingly so that the manager can update db correctly.
1840
1841        If the status in any entry of snapshots_model_update is
1842        'error_deleting' or 'error', the status in model_update will be set to
1843        the same if it is not already 'error_deleting' or 'error'.
1844
1845        If the status in model_update is 'error_deleting' or 'error', the
1846        manager will raise an exception and the status of group_snapshot will
1847        be set to 'error' in the db. If snapshots_model_update is not returned
1848        by the driver, the manager will set the status of every snapshot to
1849        'error' in the except block.
1850
1851        If the driver raises an exception during the operation, it will be
1852        caught by the try-except block in the manager and the statuses of
1853        group_snapshot and all snapshots will be set to 'error'.
1854
1855        For a successful operation, the driver can either build the
1856        model_update and snapshots_model_update and return them or
1857        return None, None. The statuses of group_snapshot and all snapshots
1858        will be set to 'deleted' after the manager deletes them from db.
1859        """
1860        raise NotImplementedError()
1861
1862    def extend_volume(self, volume, new_size):
1863        msg = _("Extend volume not implemented")
1864        raise NotImplementedError(msg)
1865
1866    def accept_transfer(self, context, volume, new_user, new_project):
1867        pass
1868
1869    def create_volume_from_backup(self, volume, backup):
1870        """Creates a volume from a backup.
1871
1872        Can optionally return a Dictionary of changes to the volume object to
1873        be persisted.
1874
1875        :param volume: the volume object to be created.
1876        :param backup: the backup object as source.
1877        :returns: volume_model_update
1878        """
1879
1880        raise NotImplementedError()
1881
1882
1883@six.add_metaclass(abc.ABCMeta)
1884class CloneableImageVD(object):
1885    @abc.abstractmethod
1886    def clone_image(self, volume, image_location,
1887                    image_id, image_meta, image_service):
1888        """Create a volume efficiently from an existing image.
1889
1890        image_location is a string whose format depends on the
1891        image service backend in use. The driver should use it
1892        to determine whether cloning is possible.
1893
1894        image_id is a string which represents id of the image.
1895        It can be used by the driver to introspect internal
1896        stores or registry to do an efficient image clone.
1897
1898        image_meta is a dictionary that includes 'disk_format' (e.g.
1899        raw, qcow2) and other image attributes that allow drivers to
1900        decide whether they can clone the image without first requiring
1901        conversion.
1902
1903        image_service is the reference of the image_service to use.
1904        Note that this is needed to be passed here for drivers that
1905        will want to fetch images from the image service directly.
1906
1907        Returns a dict of volume properties eg. provider_location,
1908        boolean indicating whether cloning occurred
1909        """
1910        return None, False
1911
1912
1913@six.add_metaclass(abc.ABCMeta)
1914class MigrateVD(object):
1915    @abc.abstractmethod
1916    def migrate_volume(self, context, volume, host):
1917        """Migrate the volume to the specified host.
1918
1919        Returns a boolean indicating whether the migration occurred, as well as
1920        model_update.
1921
1922        :param context: Context
1923        :param volume: A dictionary describing the volume to migrate
1924        :param host: A dictionary describing the host to migrate to, where
1925                     host['host'] is its name, and host['capabilities'] is a
1926                     dictionary of its reported capabilities.
1927        """
1928        return (False, None)
1929
1930
1931@six.add_metaclass(abc.ABCMeta)
1932class ManageableVD(object):
1933    @abc.abstractmethod
1934    def manage_existing(self, volume, existing_ref):
1935        """Brings an existing backend storage object under Cinder management.
1936
1937        existing_ref is passed straight through from the API request's
1938        manage_existing_ref value, and it is up to the driver how this should
1939        be interpreted.  It should be sufficient to identify a storage object
1940        that the driver should somehow associate with the newly-created cinder
1941        volume structure.
1942
1943        There are two ways to do this:
1944
1945        1. Rename the backend storage object so that it matches the,
1946           volume['name'] which is how drivers traditionally map between a
1947           cinder volume and the associated backend storage object.
1948
1949        2. Place some metadata on the volume, or somewhere in the backend, that
1950           allows other driver requests (e.g. delete, clone, attach, detach...)
1951           to locate the backend storage object when required.
1952
1953        If the existing_ref doesn't make sense, or doesn't refer to an existing
1954        backend storage object, raise a ManageExistingInvalidReference
1955        exception.
1956
1957        The volume may have a volume_type, and the driver can inspect that and
1958        compare against the properties of the referenced backend storage
1959        object.  If they are incompatible, raise a
1960        ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
1961
1962        :param volume:       Cinder volume to manage
1963        :param existing_ref: Driver-specific information used to identify a
1964                             volume
1965        """
1966        return
1967
1968    @abc.abstractmethod
1969    def manage_existing_get_size(self, volume, existing_ref):
1970        """Return size of volume to be managed by manage_existing.
1971
1972        When calculating the size, round up to the next GB.
1973
1974        :param volume:       Cinder volume to manage
1975        :param existing_ref: Driver-specific information used to identify a
1976                             volume
1977        :returns size:       Volume size in GiB (integer)
1978        """
1979        return
1980
1981    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
1982                               sort_keys, sort_dirs):
1983        """List volumes on the backend available for management by Cinder.
1984
1985        Returns a list of dictionaries, each specifying a volume in the host,
1986        with the following keys:
1987        - reference (dictionary): The reference for a volume, which can be
1988        passed to "manage_existing".
1989        - size (int): The size of the volume according to the storage
1990        backend, rounded up to the nearest GB.
1991        - safe_to_manage (boolean): Whether or not this volume is safe to
1992        manage according to the storage backend. For example, is the volume
1993        in use or invalid for any reason.
1994        - reason_not_safe (string): If safe_to_manage is False, the reason why.
1995        - cinder_id (string): If already managed, provide the Cinder ID.
1996        - extra_info (string): Any extra information to return to the user
1997
1998        :param cinder_volumes: A list of volumes in this host that Cinder
1999                               currently manages, used to determine if
2000                               a volume is manageable or not.
2001        :param marker:    The last item of the previous page; we return the
2002                          next results after this value (after sorting)
2003        :param limit:     Maximum number of items to return
2004        :param offset:    Number of items to skip after marker
2005        :param sort_keys: List of keys to sort results by (valid keys are
2006                          'identifier' and 'size')
2007        :param sort_dirs: List of directions to sort by, corresponding to
2008                          sort_keys (valid directions are 'asc' and 'desc')
2009        """
2010        return []
2011
2012    @abc.abstractmethod
2013    def unmanage(self, volume):
2014        """Removes the specified volume from Cinder management.
2015
2016        Does not delete the underlying backend storage object.
2017
2018        For most drivers, this will not need to do anything.  However, some
2019        drivers might use this call as an opportunity to clean up any
2020        Cinder-specific configuration that they have associated with the
2021        backend storage object.
2022
2023        :param volume: Cinder volume to unmanage
2024        """
2025        pass
2026
2027
2028@six.add_metaclass(abc.ABCMeta)
2029class ManageableSnapshotsVD(object):
2030    # NOTE: Can't use abstractmethod before all drivers implement it
2031    def manage_existing_snapshot(self, snapshot, existing_ref):
2032        """Brings an existing backend storage object under Cinder management.
2033
2034        existing_ref is passed straight through from the API request's
2035        manage_existing_ref value, and it is up to the driver how this should
2036        be interpreted.  It should be sufficient to identify a storage object
2037        that the driver should somehow associate with the newly-created cinder
2038        snapshot structure.
2039
2040        There are two ways to do this:
2041
2042        1. Rename the backend storage object so that it matches the
2043           snapshot['name'] which is how drivers traditionally map between a
2044           cinder snapshot and the associated backend storage object.
2045
2046        2. Place some metadata on the snapshot, or somewhere in the backend,
2047           that allows other driver requests (e.g. delete) to locate the
2048           backend storage object when required.
2049
2050        If the existing_ref doesn't make sense, or doesn't refer to an existing
2051        backend storage object, raise a ManageExistingInvalidReference
2052        exception.
2053
2054        :param snapshot:     Cinder volume snapshot to manage
2055        :param existing_ref: Driver-specific information used to identify a
2056                             volume snapshot
2057        """
2058        return
2059
2060    # NOTE: Can't use abstractmethod before all drivers implement it
2061    def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
2062        """Return size of snapshot to be managed by manage_existing.
2063
2064        When calculating the size, round up to the next GB.
2065
2066        :param snapshot:     Cinder volume snapshot to manage
2067        :param existing_ref: Driver-specific information used to identify a
2068                             volume snapshot
2069        :returns size:       Volume snapshot size in GiB (integer)
2070        """
2071        return
2072
2073    def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
2074                                 sort_keys, sort_dirs):
2075        """List snapshots on the backend available for management by Cinder.
2076
2077        Returns a list of dictionaries, each specifying a snapshot in the host,
2078        with the following keys:
2079        - reference (dictionary): The reference for a snapshot, which can be
2080        passed to "manage_existing_snapshot".
2081        - size (int): The size of the snapshot according to the storage
2082        backend, rounded up to the nearest GB.
2083        - safe_to_manage (boolean): Whether or not this snapshot is safe to
2084        manage according to the storage backend. For example, is the snapshot
2085        in use or invalid for any reason.
2086        - reason_not_safe (string): If safe_to_manage is False, the reason why.
2087        - cinder_id (string): If already managed, provide the Cinder ID.
2088        - extra_info (string): Any extra information to return to the user
2089        - source_reference (string): Similar to "reference", but for the
2090        snapshot's source volume.
2091
2092        :param cinder_snapshots: A list of snapshots in this host that Cinder
2093                                 currently manages, used to determine if
2094                                 a snapshot is manageable or not.
2095        :param marker:    The last item of the previous page; we return the
2096                          next results after this value (after sorting)
2097        :param limit:     Maximum number of items to return
2098        :param offset:    Number of items to skip after marker
2099        :param sort_keys: List of keys to sort results by (valid keys are
2100                          'identifier' and 'size')
2101        :param sort_dirs: List of directions to sort by, corresponding to
2102                          sort_keys (valid directions are 'asc' and 'desc')
2103
2104        """
2105        return []
2106
2107    # NOTE: Can't use abstractmethod before all drivers implement it
2108    def unmanage_snapshot(self, snapshot):
2109        """Removes the specified snapshot from Cinder management.
2110
2111        Does not delete the underlying backend storage object.
2112
2113        For most drivers, this will not need to do anything. However, some
2114        drivers might use this call as an opportunity to clean up any
2115        Cinder-specific configuration that they have associated with the
2116        backend storage object.
2117
2118        :param snapshot: Cinder volume snapshot to unmanage
2119        """
2120        pass
2121
2122
2123class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD,
2124                   MigrateVD, BaseVD):
2125    def check_for_setup_error(self):
2126        raise NotImplementedError()
2127
2128    def create_volume(self, volume):
2129        raise NotImplementedError()
2130
2131    def create_volume_from_snapshot(self, volume, snapshot):
2132        """Creates a volume from a snapshot.
2133
2134        If volume_type extra specs includes 'replication: <is> True'
2135        the driver needs to create a volume replica (secondary),
2136        and setup replication between the newly created volume and
2137        the secondary volume.
2138        """
2139
2140        raise NotImplementedError()
2141
2142    def delete_volume(self, volume):
2143        raise NotImplementedError()
2144
2145    def create_snapshot(self, snapshot):
2146        """Creates a snapshot."""
2147        raise NotImplementedError()
2148
2149    def delete_snapshot(self, snapshot):
2150        """Deletes a snapshot."""
2151        raise NotImplementedError()
2152
2153    def local_path(self, volume):
2154        raise NotImplementedError()
2155
2156    def clear_download(self, context, volume):
2157        pass
2158
2159    def extend_volume(self, volume, new_size):
2160        msg = _("Extend volume not implemented")
2161        raise NotImplementedError(msg)
2162
2163    def manage_existing(self, volume, existing_ref):
2164        msg = _("Manage existing volume not implemented.")
2165        raise NotImplementedError(msg)
2166
2167    def revert_to_snapshot(self, context, volume, snapshot):
2168        """Revert volume to snapshot.
2169
2170        Note: the revert process should not change the volume's
2171        current size, that means if the driver shrank
2172        the volume during the process, it should extend the
2173        volume internally.
2174        """
2175        msg = _("Revert volume to snapshot not implemented.")
2176        raise NotImplementedError(msg)
2177
2178    def manage_existing_get_size(self, volume, existing_ref):
2179        msg = _("Manage existing volume not implemented.")
2180        raise NotImplementedError(msg)
2181
2182    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
2183                               sort_keys, sort_dirs):
2184        msg = _("Get manageable volumes not implemented.")
2185        raise NotImplementedError(msg)
2186
2187    def unmanage(self, volume):
2188        pass
2189
2190    def manage_existing_snapshot(self, snapshot, existing_ref):
2191        msg = _("Manage existing snapshot not implemented.")
2192        raise NotImplementedError(msg)
2193
2194    def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
2195        msg = _("Manage existing snapshot not implemented.")
2196        raise NotImplementedError(msg)
2197
2198    def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
2199                                 sort_keys, sort_dirs):
2200        msg = _("Get manageable snapshots not implemented.")
2201        raise NotImplementedError(msg)
2202
2203    def unmanage_snapshot(self, snapshot):
2204        """Unmanage the specified snapshot from Cinder management."""
2205
2206    def retype(self, context, volume, new_type, diff, host):
2207        return False, None
2208
2209    # #######  Interface methods for DataPath (Connector) ########
2210    def ensure_export(self, context, volume):
2211        raise NotImplementedError()
2212
2213    def create_export(self, context, volume, connector):
2214        raise NotImplementedError()
2215
2216    def create_export_snapshot(self, context, snapshot, connector):
2217        raise NotImplementedError()
2218
2219    def remove_export(self, context, volume):
2220        raise NotImplementedError()
2221
2222    def remove_export_snapshot(self, context, snapshot):
2223        raise NotImplementedError()
2224
2225    def initialize_connection(self, volume, connector, **kwargs):
2226        raise NotImplementedError()
2227
2228    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
2229        """Allow connection from connector for a snapshot."""
2230
2231    def terminate_connection(self, volume, connector, **kwargs):
2232        """Disallow connection from connector
2233
2234        :param volume: The volume to be disconnected.
2235        :param connector: A dictionary describing the connection with details
2236                          about the initiator. Can be None.
2237        """
2238
2239    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
2240        """Disallow connection from connector for a snapshot."""
2241
2242    def create_consistencygroup(self, context, group):
2243        """Creates a consistencygroup.
2244
2245        :param context: the context of the caller.
2246        :param group: the dictionary of the consistency group to be created.
2247        :returns: model_update
2248
2249        model_update will be in this format: {'status': xxx, ......}.
2250
2251        If the status in model_update is 'error', the manager will throw
2252        an exception and it will be caught in the try-except block in the
2253        manager. If the driver throws an exception, the manager will also
2254        catch it in the try-except block. The group status in the db will
2255        be changed to 'error'.
2256
2257        For a successful operation, the driver can either build the
2258        model_update and return it or return None. The group status will
2259        be set to 'available'.
2260        """
2261        raise NotImplementedError()
2262
2263    def create_consistencygroup_from_src(self, context, group, volumes,
2264                                         cgsnapshot=None, snapshots=None,
2265                                         source_cg=None, source_vols=None):
2266        """Creates a consistencygroup from source.
2267
2268        :param context: the context of the caller.
2269        :param group: the dictionary of the consistency group to be created.
2270        :param volumes: a list of volume dictionaries in the group.
2271        :param cgsnapshot: the dictionary of the cgsnapshot as source.
2272        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
2273        :param source_cg: the dictionary of a consistency group as source.
2274        :param source_vols: a list of volume dictionaries in the source_cg.
2275        :returns: model_update, volumes_model_update
2276
2277        The source can be cgsnapshot or a source cg.
2278
2279        param volumes is retrieved directly from the db. It is a list of
2280        cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
2281        assigned to volumes_model_update. volumes_model_update is a list of
2282        dictionaries. It has to be built by the driver. An entry will be
2283        in this format: {'id': xxx, 'status': xxx, ......}. model_update
2284        will be in this format: {'status': xxx, ......}.
2285
2286        To be consistent with other volume operations, the manager will
2287        assume the operation is successful if no exception is thrown by
2288        the driver. For a successful operation, the driver can either build
2289        the model_update and volumes_model_update and return them or
2290        return None, None.
2291        """
2292        raise NotImplementedError()
2293
2294    def delete_consistencygroup(self, context, group, volumes):
2295        """Deletes a consistency group.
2296
2297        :param context: the context of the caller.
2298        :param group: the dictionary of the consistency group to be deleted.
2299        :param volumes: a list of volume dictionaries in the group.
2300        :returns: model_update, volumes_model_update
2301
2302        param volumes is retrieved directly from the db. It is a list of
2303        cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
2304        assigned to volumes_model_update. volumes_model_update is a list of
2305        dictionaries. It has to be built by the driver. An entry will be
2306        in this format: {'id': xxx, 'status': xxx, ......}. model_update
2307        will be in this format: {'status': xxx, ......}.
2308
2309        The driver should populate volumes_model_update and model_update
2310        and return them.
2311
2312        The manager will check volumes_model_update and update db accordingly
2313        for each volume. If the driver successfully deleted some volumes
2314        but failed to delete others, it should set statuses of the volumes
2315        accordingly so that the manager can update db correctly.
2316
2317        If the status in any entry of volumes_model_update is 'error_deleting'
2318        or 'error', the status in model_update will be set to the same if it
2319        is not already 'error_deleting' or 'error'.
2320
2321        If the status in model_update is 'error_deleting' or 'error', the
2322        manager will raise an exception and the status of the group will be
2323        set to 'error' in the db. If volumes_model_update is not returned by
2324        the driver, the manager will set the status of every volume in the
2325        group to 'error' in the except block.
2326
2327        If the driver raises an exception during the operation, it will be
2328        caught by the try-except block in the manager. The statuses of the
2329        group and all volumes in it will be set to 'error'.
2330
2331        For a successful operation, the driver can either build the
2332        model_update and volumes_model_update and return them or
2333        return None, None. The statuses of the group and all volumes
2334        will be set to 'deleted' after the manager deletes them from db.
2335        """
2336        raise NotImplementedError()
2337
2338    def update_consistencygroup(self, context, group,
2339                                add_volumes=None, remove_volumes=None):
2340        """Updates a consistency group.
2341
2342        :param context: the context of the caller.
2343        :param group: the dictionary of the consistency group to be updated.
2344        :param add_volumes: a list of volume dictionaries to be added.
2345        :param remove_volumes: a list of volume dictionaries to be removed.
2346        :returns: model_update, add_volumes_update, remove_volumes_update
2347
2348        model_update is a dictionary that the driver wants the manager
2349        to update upon a successful return. If None is returned, the manager
2350        will set the status to 'available'.
2351
2352        add_volumes_update and remove_volumes_update are lists of dictionaries
2353        that the driver wants the manager to update upon a successful return.
2354        Note that each entry requires a {'id': xxx} so that the correct
2355        volume entry can be updated. If None is returned, the volume will
2356        remain its original status. Also note that you cannot directly
2357        assign add_volumes to add_volumes_update as add_volumes is a list of
2358        cinder.db.sqlalchemy.models.Volume objects and cannot be used for
2359        db update directly. Same with remove_volumes.
2360
2361        If the driver throws an exception, the status of the group as well as
2362        those of the volumes to be added/removed will be set to 'error'.
2363        """
2364        raise NotImplementedError()
2365
2366    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
2367        """Creates a cgsnapshot.
2368
2369        :param context: the context of the caller.
2370        :param cgsnapshot: the dictionary of the cgsnapshot to be created.
2371        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
2372        :returns: model_update, snapshots_model_update
2373
2374        param snapshots is retrieved directly from the db. It is a list of
2375        cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
2376        assigned to snapshots_model_update. snapshots_model_update is a list
2377        of dictionaries. It has to be built by the driver. An entry will be
2378        in this format: {'id': xxx, 'status': xxx, ......}. model_update
2379        will be in this format: {'status': xxx, ......}.
2380
2381        The driver should populate snapshots_model_update and model_update
2382        and return them.
2383
2384        The manager will check snapshots_model_update and update db accordingly
2385        for each snapshot. If the driver successfully deleted some snapshots
2386        but failed to delete others, it should set statuses of the snapshots
2387        accordingly so that the manager can update db correctly.
2388
2389        If the status in any entry of snapshots_model_update is 'error', the
2390        status in model_update will be set to the same if it is not already
2391        'error'.
2392
2393        If the status in model_update is 'error', the manager will raise an
2394        exception and the status of cgsnapshot will be set to 'error' in the
2395        db. If snapshots_model_update is not returned by the driver, the
2396        manager will set the status of every snapshot to 'error' in the except
2397        block.
2398
2399        If the driver raises an exception during the operation, it will be
2400        caught by the try-except block in the manager and the statuses of
2401        cgsnapshot and all snapshots will be set to 'error'.
2402
2403        For a successful operation, the driver can either build the
2404        model_update and snapshots_model_update and return them or
2405        return None, None. The statuses of cgsnapshot and all snapshots
2406        will be set to 'available' at the end of the manager function.
2407        """
2408        raise NotImplementedError()
2409
2410    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
2411        """Deletes a cgsnapshot.
2412
2413        :param context: the context of the caller.
2414        :param cgsnapshot: the dictionary of the cgsnapshot to be deleted.
2415        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
2416        :returns: model_update, snapshots_model_update
2417
2418        param snapshots is retrieved directly from the db. It is a list of
2419        cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
2420        assigned to snapshots_model_update. snapshots_model_update is a list
2421        of dictionaries. It has to be built by the driver. An entry will be
2422        in this format: {'id': xxx, 'status': xxx, ......}. model_update
2423        will be in this format: {'status': xxx, ......}.
2424
2425        The driver should populate snapshots_model_update and model_update
2426        and return them.
2427
2428        The manager will check snapshots_model_update and update db accordingly
2429        for each snapshot. If the driver successfully deleted some snapshots
2430        but failed to delete others, it should set statuses of the snapshots
2431        accordingly so that the manager can update db correctly.
2432
2433        If the status in any entry of snapshots_model_update is
2434        'error_deleting' or 'error', the status in model_update will be set to
2435        the same if it is not already 'error_deleting' or 'error'.
2436
2437        If the status in model_update is 'error_deleting' or 'error', the
2438        manager will raise an exception and the status of cgsnapshot will be
2439        set to 'error' in the db. If snapshots_model_update is not returned by
2440        the driver, the manager will set the status of every snapshot to
2441        'error' in the except block.
2442
2443        If the driver raises an exception during the operation, it will be
2444        caught by the try-except block in the manager and the statuses of
2445        cgsnapshot and all snapshots will be set to 'error'.
2446
2447        For a successful operation, the driver can either build the
2448        model_update and snapshots_model_update and return them or
2449        return None, None. The statuses of cgsnapshot and all snapshots
2450        will be set to 'deleted' after the manager deletes them from db.
2451        """
2452        raise NotImplementedError()
2453
2454    def clone_image(self, volume, image_location, image_id, image_meta,
2455                    image_service):
2456        return None, False
2457
2458    def get_pool(self, volume):
2459        """Return pool name where volume reside on.
2460
2461        :param volume: The volume hosted by the driver.
2462        :returns: name of the pool where given volume is in.
2463        """
2464        return None
2465
2466    def migrate_volume(self, context, volume, host):
2467        return (False, None)
2468
2469    def accept_transfer(self, context, volume, new_user, new_project):
2470        pass
2471
2472
2473class ProxyVD(object):
2474    """Proxy Volume Driver to mark proxy drivers
2475
2476        If a driver uses a proxy class (e.g. by using __setattr__ and
2477        __getattr__) without directly inheriting from base volume driver this
2478        class can help marking them and retrieve the actual used driver object.
2479    """
2480    def _get_driver(self):
2481        """Returns the actual driver object.
2482
2483        Can be overloaded by the proxy.
2484        """
2485        return getattr(self, "driver", None)
2486
2487
2488class ISCSIDriver(VolumeDriver):
2489    """Executes commands relating to ISCSI volumes.
2490
2491    We make use of model provider properties as follows:
2492
2493    ``provider_location``
2494      if present, contains the iSCSI target information in the same
2495      format as an ietadm discovery
2496      i.e. '<ip>:<port>,<portal> <target IQN>'
2497
2498    ``provider_auth``
2499      if present, contains a space-separated triple:
2500      '<auth method> <auth username> <auth password>'.
2501      `CHAP` is the only auth_method in use at the moment.
2502    """
2503
2504    def __init__(self, *args, **kwargs):
2505        super(ISCSIDriver, self).__init__(*args, **kwargs)
2506
2507    def _do_iscsi_discovery(self, volume):
2508        # TODO(justinsb): Deprecate discovery and use stored info
2509        # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
2510        LOG.warning("ISCSI provider_location not stored, using discovery")
2511
2512        volume_name = volume['name']
2513
2514        try:
2515            # NOTE(griff) We're doing the split straight away which should be
2516            # safe since using '@' in hostname is considered invalid
2517
2518            (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
2519                                        '-t', 'sendtargets', '-p',
2520                                        volume['host'].split('@')[0],
2521                                        run_as_root=True)
2522        except processutils.ProcessExecutionError as ex:
2523            LOG.error("ISCSI discovery attempt failed for:%s",
2524                      volume['host'].split('@')[0])
2525            LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
2526            return None
2527
2528        for target in out.splitlines():
2529            if (self.configuration.target_ip_address in target
2530                    and volume_name in target):
2531                return target
2532        return None
2533
2534    def _get_iscsi_properties(self, volume, multipath=False):
2535        """Gets iscsi configuration
2536
2537        We ideally get saved information in the volume entity, but fall back
2538        to discovery if need be. Discovery may be completely removed in future
2539        The properties are:
2540
2541        :target_discovered:    boolean indicating whether discovery was used
2542
2543        :target_iqn:    the IQN of the iSCSI target
2544
2545        :target_portal:    the portal of the iSCSI target
2546
2547        :target_lun:    the lun of the iSCSI target
2548
2549        :volume_id:    the id of the volume (currently used by xen)
2550
2551        :auth_method:, :auth_username:, :auth_password:
2552
2553            the authentication details. Right now, either auth_method is not
2554            present meaning no authentication, or auth_method == `CHAP`
2555            meaning use CHAP with the specified credentials.
2556
2557        :discard:    boolean indicating if discard is supported
2558
2559        In some of drivers that support multiple connections (for multipath
2560        and for single path with failover on connection failure), it returns
2561        :target_iqns, :target_portals, :target_luns, which contain lists of
2562        multiple values. The main portal information is also returned in
2563        :target_iqn, :target_portal, :target_lun for backward compatibility.
2564
2565        Note that some of drivers don't return :target_portals even if they
2566        support multipath. Then the connector should use sendtargets discovery
2567        to find the other portals if it supports multipath.
2568        """
2569
2570        properties = {}
2571
2572        location = volume['provider_location']
2573
2574        if location:
2575            # provider_location is the same format as iSCSI discovery output
2576            properties['target_discovered'] = False
2577        else:
2578            location = self._do_iscsi_discovery(volume)
2579
2580            if not location:
2581                msg = (_("Could not find iSCSI export for volume %s") %
2582                        (volume['name']))
2583                raise exception.InvalidVolume(reason=msg)
2584
2585            LOG.debug("ISCSI Discovery: Found %s", location)
2586            properties['target_discovered'] = True
2587
2588        results = location.split(" ")
2589        portals = results[0].split(",")[0].split(";")
2590        iqn = results[1]
2591        nr_portals = len(portals)
2592
2593        try:
2594            lun = int(results[2])
2595        except (IndexError, ValueError):
2596            if (self.configuration.volume_driver ==
2597                    'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and
2598                    self.configuration.target_helper == 'tgtadm'):
2599                lun = 1
2600            else:
2601                lun = 0
2602
2603        if nr_portals > 1:
2604            properties['target_portals'] = portals
2605            properties['target_iqns'] = [iqn] * nr_portals
2606            properties['target_luns'] = [lun] * nr_portals
2607        properties['target_portal'] = portals[0]
2608        properties['target_iqn'] = iqn
2609        properties['target_lun'] = lun
2610
2611        properties['volume_id'] = volume['id']
2612
2613        auth = volume['provider_auth']
2614        if auth:
2615            (auth_method, auth_username, auth_secret) = auth.split()
2616
2617            properties['auth_method'] = auth_method
2618            properties['auth_username'] = auth_username
2619            properties['auth_password'] = auth_secret
2620
2621        geometry = volume.get('provider_geometry', None)
2622        if geometry:
2623            (physical_block_size, logical_block_size) = geometry.split()
2624            properties['physical_block_size'] = physical_block_size
2625            properties['logical_block_size'] = logical_block_size
2626
2627        encryption_key_id = volume.get('encryption_key_id', None)
2628        properties['encrypted'] = encryption_key_id is not None
2629
2630        return properties
2631
2632    def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
2633        check_exit_code = kwargs.pop('check_exit_code', 0)
2634        (out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
2635                                   iscsi_properties['target_iqn'],
2636                                   '-p', iscsi_properties['target_portal'],
2637                                   *iscsi_command, run_as_root=True,
2638                                   check_exit_code=check_exit_code)
2639        LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
2640                  {'command': iscsi_command, 'out': out, 'err': err})
2641        return (out, err)
2642
2643    def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
2644        check_exit_code = kwargs.pop('check_exit_code', 0)
2645        (out, err) = self._execute('iscsiadm',
2646                                   *iscsi_command,
2647                                   run_as_root=True,
2648                                   check_exit_code=check_exit_code)
2649        LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
2650                  {'command': iscsi_command, 'out': out, 'err': err})
2651        return (out, err)
2652
2653    def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
2654                         **kwargs):
2655        iscsi_command = ('--op', 'update', '-n', property_key,
2656                         '-v', property_value)
2657        return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
2658
2659    def initialize_connection(self, volume, connector):
2660        """Initializes the connection and returns connection info.
2661
2662        The iscsi driver returns a driver_volume_type of 'iscsi'.
2663        The format of the driver data is defined in _get_iscsi_properties.
2664        Example return value::
2665
2666            {
2667                'driver_volume_type': 'iscsi',
2668                'data': {
2669                    'target_discovered': True,
2670                    'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
2671                    'target_portal': '127.0.0.0.1:3260',
2672                    'volume_id': 1,
2673                    'discard': False,
2674                }
2675            }
2676
2677        If the backend driver supports multiple connections for multipath and
2678        for single path with failover, "target_portals", "target_iqns",
2679        "target_luns" are also populated::
2680
2681            {
2682                'driver_volume_type': 'iscsi',
2683                'data': {
2684                    'target_discovered': False,
2685                    'target_iqn': 'iqn.2010-10.org.openstack:volume1',
2686                    'target_iqns': ['iqn.2010-10.org.openstack:volume1',
2687                                    'iqn.2010-10.org.openstack:volume1-2'],
2688                    'target_portal': '10.0.0.1:3260',
2689                    'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'],
2690                    'target_lun': 1,
2691                    'target_luns': [1, 1],
2692                    'volume_id': 1,
2693                    'discard': False,
2694                }
2695            }
2696        """
2697        # NOTE(jdg): Yes, this is duplicated in the volume/target
2698        # drivers, for now leaving it as there are 3'rd party
2699        # drivers that don't use target drivers, but inherit from
2700        # this base class and use this init data
2701        iscsi_properties = self._get_iscsi_properties(volume)
2702        return {
2703            'driver_volume_type':
2704                self.configuration.safe_get('target_protocol'),
2705            'data': iscsi_properties
2706        }
2707
2708    def validate_connector(self, connector):
2709        # iSCSI drivers require the initiator information
2710        required = 'initiator'
2711        if required not in connector:
2712            LOG.error('The volume driver requires %(data)s '
2713                      'in the connector.', {'data': required})
2714            raise exception.InvalidConnectorException(missing=required)
2715
2716    def terminate_connection(self, volume, connector, **kwargs):
2717        pass
2718
2719    def get_volume_stats(self, refresh=False):
2720        """Get volume stats.
2721
2722        If 'refresh' is True, run update the stats first.
2723        """
2724        if refresh:
2725            self._update_volume_stats()
2726
2727        return self._stats
2728
2729    def _update_volume_stats(self):
2730        """Retrieve stats info from volume group."""
2731
2732        LOG.debug("Updating volume stats...")
2733        data = {}
2734        backend_name = self.configuration.safe_get('volume_backend_name')
2735        data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
2736        data["vendor_name"] = 'Open Source'
2737        data["driver_version"] = '1.0'
2738        data["storage_protocol"] = 'iSCSI'
2739        data["pools"] = []
2740        data["replication_enabled"] = False
2741
2742        self._update_pools_and_stats(data)
2743
2744
2745class ISERDriver(ISCSIDriver):
2746    """Executes commands relating to ISER volumes.
2747
2748    We make use of model provider properties as follows:
2749
2750    ``provider_location``
2751      if present, contains the iSER target information in the same
2752      format as an ietadm discovery
2753      i.e. '<ip>:<port>,<portal> <target IQN>'
2754
2755    ``provider_auth``
2756      if present, contains a space-separated triple:
2757      '<auth method> <auth username> <auth password>'.
2758      `CHAP` is the only auth_method in use at the moment.
2759    """
2760    def __init__(self, *args, **kwargs):
2761        super(ISERDriver, self).__init__(*args, **kwargs)
2762        # for backward compatibility
2763        self.configuration.num_volume_device_scan_tries = \
2764            self.configuration.num_iser_scan_tries
2765        self.configuration.target_prefix = \
2766            self.configuration.iser_target_prefix
2767        self.configuration.target_ip_address = \
2768            self.configuration.iser_ip_address
2769        self.configuration.target_port = self.configuration.iser_port
2770
2771    def initialize_connection(self, volume, connector):
2772        """Initializes the connection and returns connection info.
2773
2774        The iser driver returns a driver_volume_type of 'iser'.
2775        The format of the driver data is defined in _get_iser_properties.
2776        Example return value:
2777
2778        .. code-block:: default
2779
2780            {
2781                'driver_volume_type': 'iser',
2782                'data': {
2783                    'target_discovered': True,
2784                    'target_iqn':
2785                    'iqn.2010-10.org.iser.openstack:volume-00000001',
2786                    'target_portal': '127.0.0.0.1:3260',
2787                    'volume_id': 1,
2788                }
2789            }
2790
2791        """
2792        iser_properties = self._get_iscsi_properties(volume)
2793        return {
2794            'driver_volume_type': 'iser',
2795            'data': iser_properties
2796        }
2797
2798    def _update_volume_stats(self):
2799        """Retrieve stats info from volume group."""
2800
2801        LOG.debug("Updating volume stats...")
2802        data = {}
2803        backend_name = self.configuration.safe_get('volume_backend_name')
2804        data["volume_backend_name"] = backend_name or 'Generic_iSER'
2805        data["vendor_name"] = 'Open Source'
2806        data["driver_version"] = '1.0'
2807        data["storage_protocol"] = 'iSER'
2808        data["pools"] = []
2809
2810        self._update_pools_and_stats(data)
2811
2812
2813class FibreChannelDriver(VolumeDriver):
2814    """Executes commands relating to Fibre Channel volumes."""
2815    def __init__(self, *args, **kwargs):
2816        super(FibreChannelDriver, self).__init__(*args, **kwargs)
2817
2818    def initialize_connection(self, volume, connector):
2819        """Initializes the connection and returns connection info.
2820
2821        The  driver returns a driver_volume_type of 'fibre_channel'.
2822        The target_wwn can be a single entry or a list of wwns that
2823        correspond to the list of remote wwn(s) that will export the volume.
2824        Example return values:
2825
2826        .. code-block:: default
2827
2828            {
2829                'driver_volume_type': 'fibre_channel',
2830                'data': {
2831                    'target_discovered': True,
2832                    'target_lun': 1,
2833                    'target_wwn': '1234567890123',
2834                    'discard': False,
2835                }
2836            }
2837
2838        or
2839
2840        .. code-block:: default
2841
2842             {
2843                'driver_volume_type': 'fibre_channel',
2844                'data': {
2845                    'target_discovered': True,
2846                    'target_lun': 1,
2847                    'target_wwn': ['1234567890123', '0987654321321'],
2848                    'discard': False,
2849                }
2850            }
2851
2852        """
2853        msg = _("Driver must implement initialize_connection")
2854        raise NotImplementedError(msg)
2855
2856    def validate_connector(self, connector):
2857        """Fail if connector doesn't contain all the data needed by driver.
2858
2859        Do a check on the connector and ensure that it has wwnns, wwpns.
2860        """
2861        self.validate_connector_has_setting(connector, 'wwpns')
2862        self.validate_connector_has_setting(connector, 'wwnns')
2863
2864    @staticmethod
2865    def validate_connector_has_setting(connector, setting):
2866        """Test for non-empty setting in connector."""
2867        if setting not in connector or not connector[setting]:
2868            LOG.error(
2869                "FibreChannelDriver validate_connector failed. "
2870                "No '%(setting)s'. Make sure HBA state is Online.",
2871                {'setting': setting})
2872            raise exception.InvalidConnectorException(missing=setting)
2873
2874    def get_volume_stats(self, refresh=False):
2875        """Get volume stats.
2876
2877        If 'refresh' is True, run update the stats first.
2878        """
2879        if refresh:
2880            self._update_volume_stats()
2881
2882        return self._stats
2883
2884    def _update_volume_stats(self):
2885        """Retrieve stats info from volume group."""
2886
2887        LOG.debug("Updating volume stats...")
2888        data = {}
2889        backend_name = self.configuration.safe_get('volume_backend_name')
2890        data["volume_backend_name"] = backend_name or 'Generic_FC'
2891        data["vendor_name"] = 'Open Source'
2892        data["driver_version"] = '1.0'
2893        data["storage_protocol"] = 'FC'
2894        data["pools"] = []
2895
2896        self._update_pools_and_stats(data)
2897