1# Copyright 2017 Datera
2# All Rights Reserved.
3#
4#    Licensed under the Apache License, Version 2.0 (the "License"); you may
5#    not use this file except in compliance with the License. You may obtain
6#    a copy of the License at
7#
8#         http://www.apache.org/licenses/LICENSE-2.0
9#
10#    Unless required by applicable law or agreed to in writing, software
11#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13#    License for the specific language governing permissions and limitations
14#    under the License.
15
16import time
17import uuid
18
19from eventlet.green import threading
20from oslo_config import cfg
21from oslo_log import log as logging
22import six
23
24from cinder import exception
25from cinder.i18n import _
26from cinder import utils
27from cinder.volume import configuration
28from cinder.volume.drivers.san import san
29
30import cinder.volume.drivers.datera.datera_api2 as api2
31import cinder.volume.drivers.datera.datera_api21 as api21
32import cinder.volume.drivers.datera.datera_common as datc
33
34
35LOG = logging.getLogger(__name__)
36
37d_opts = [
38    cfg.StrOpt('datera_api_port',
39               default='7717',
40               help='Datera API port.'),
41    cfg.StrOpt('datera_api_version',
42               default='2',
43               deprecated_for_removal=True,
44               help='Datera API version.'),
45    cfg.IntOpt('datera_503_timeout',
46               default='120',
47               help='Timeout for HTTP 503 retry messages'),
48    cfg.IntOpt('datera_503_interval',
49               default='5',
50               help='Interval between 503 retries'),
51    cfg.BoolOpt('datera_debug',
52                default=False,
53                help="True to set function arg and return logging"),
54    cfg.BoolOpt('datera_debug_replica_count_override',
55                default=False,
56                help="ONLY FOR DEBUG/TESTING PURPOSES\n"
57                     "True to set replica_count to 1"),
58    cfg.StrOpt('datera_tenant_id',
59               default=None,
60               help="If set to 'Map' --> OpenStack project ID will be mapped "
61                    "implicitly to Datera tenant ID\n"
62                    "If set to 'None' --> Datera tenant ID will not be used "
63                    "during volume provisioning\n"
64                    "If set to anything else --> Datera tenant ID will be the "
65                    "provided value"),
66    cfg.BoolOpt('datera_disable_profiler',
67                default=False,
68                help="Set to True to disable profiling in the Datera driver"),
69]
70
71
72CONF = cfg.CONF
73CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
74CONF.register_opts(d_opts, group=configuration.SHARED_CONF_GROUP)
75
76
77@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
78class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi):
79
80    """The OpenStack Datera Driver
81
82    Version history:
83        * 1.0 - Initial driver
84        * 1.1 - Look for lun-0 instead of lun-1.
85        * 2.0 - Update For Datera API v2
86        * 2.1 - Multipath, ACL and reorg
87        * 2.2 - Capabilites List, Extended Volume-Type Support
88                Naming convention change,
89                Volume Manage/Unmanage support
90        * 2.3 - Templates, Tenants, Snapshot Polling,
91                2.1 Api Version Support, Restructure
92        * 2.3.1 - Scalability bugfixes
93        * 2.3.2 - Volume Placement, ACL multi-attach bugfix
94        * 2.4.0 - Fast Retype Support
95    """
96    VERSION = '2.4.0'
97
98    CI_WIKI_NAME = "datera-ci"
99
100    HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)}
101
102    def __init__(self, *args, **kwargs):
103        super(DateraDriver, self).__init__(*args, **kwargs)
104        self.configuration.append_config_values(d_opts)
105        self.username = self.configuration.san_login
106        self.password = self.configuration.san_password
107        self.cluster_stats = {}
108        self.datera_api_token = None
109        self.interval = self.configuration.datera_503_interval
110        self.retry_attempts = (self.configuration.datera_503_timeout /
111                               self.interval)
112        self.driver_prefix = str(uuid.uuid4())[:4]
113        self.datera_debug = self.configuration.datera_debug
114        self.datera_api_versions = []
115
116        if self.datera_debug:
117            utils.setup_tracing(['method'])
118        self.tenant_id = self.configuration.datera_tenant_id
119        if self.tenant_id and self.tenant_id.lower() == 'none':
120            self.tenant_id = None
121        self.api_check = time.time()
122        self.api_cache = []
123        self.api_timeout = 0
124        self.do_profile = not self.configuration.datera_disable_profiler
125        self.thread_local = threading.local()
126
127        backend_name = self.configuration.safe_get(
128            'volume_backend_name')
129        self.backend_name = backend_name or 'Datera'
130
131        datc.register_driver(self)
132
133    def do_setup(self, context):
134        # If we can't authenticate through the old and new method, just fail
135        # now.
136        if not all([self.username, self.password]):
137            msg = _("san_login and/or san_password is not set for Datera "
138                    "driver in the cinder.conf. Set this information and "
139                    "start the cinder-volume service again.")
140            LOG.error(msg)
141            raise exception.InvalidInput(msg)
142
143        self.login()
144        self._create_tenant()
145
146    # =================
147
148    # =================
149    # = Create Volume =
150    # =================
151
152    @datc._api_lookup
153    def create_volume(self, volume):
154        """Create a logical volume."""
155        pass
156
157    # =================
158    # = Extend Volume =
159    # =================
160
161    @datc._api_lookup
162    def extend_volume(self, volume, new_size):
163        pass
164
165    # =================
166
167    # =================
168    # = Cloned Volume =
169    # =================
170
171    @datc._api_lookup
172    def create_cloned_volume(self, volume, src_vref):
173        pass
174
175    # =================
176    # = Delete Volume =
177    # =================
178
179    @datc._api_lookup
180    def delete_volume(self, volume):
181        pass
182
183    # =================
184    # = Ensure Export =
185    # =================
186
187    @datc._api_lookup
188    def ensure_export(self, context, volume, connector=None):
189        """Gets the associated account, retrieves CHAP info and updates."""
190
191    # =========================
192    # = Initialize Connection =
193    # =========================
194
195    @datc._api_lookup
196    def initialize_connection(self, volume, connector):
197        pass
198
199    # =================
200    # = Create Export =
201    # =================
202
203    @datc._api_lookup
204    def create_export(self, context, volume, connector):
205        pass
206
207    # =================
208    # = Detach Volume =
209    # =================
210
211    @datc._api_lookup
212    def detach_volume(self, context, volume, attachment=None):
213        pass
214
215    # ===================
216    # = Create Snapshot =
217    # ===================
218
219    @datc._api_lookup
220    def create_snapshot(self, snapshot):
221        pass
222
223    # ===================
224    # = Delete Snapshot =
225    # ===================
226
227    @datc._api_lookup
228    def delete_snapshot(self, snapshot):
229        pass
230
231    # ========================
232    # = Volume From Snapshot =
233    # ========================
234
235    @datc._api_lookup
236    def create_volume_from_snapshot(self, volume, snapshot):
237        pass
238
239    # ==========
240    # = Retype =
241    # ==========
242
243    @datc._api_lookup
244    def retype(self, ctxt, volume, new_type, diff, host):
245        """Convert the volume to be of the new type.
246
247        Returns a boolean indicating whether the retype occurred.
248
249        :param ctxt: Context
250        :param volume: A dictionary describing the volume to migrate
251        :param new_type: A dictionary describing the volume type to convert to
252        :param diff: A dictionary with the difference between the two types
253        :param host: A dictionary describing the host to migrate to, where
254                     host['host'] is its name, and host['capabilities'] is a
255                     dictionary of its reported capabilities (Not Used).
256        """
257        pass
258
259    # ==========
260    # = Manage =
261    # ==========
262
263    @datc._api_lookup
264    def manage_existing(self, volume, existing_ref):
265        """Manage an existing volume on the Datera backend
266
267        The existing_ref must be either the current name or Datera UUID of
268        an app_instance on the Datera backend in a colon separated list with
269        the storage instance name and volume name.  This means only
270        single storage instances and single volumes are supported for
271        managing by cinder.
272
273        Eg.
274
275        (existing_ref['source-name'] ==
276             tenant:app_inst_name:storage_inst_name:vol_name)
277
278        if using Datera 2.1 API
279
280        or
281
282        (existing_ref['source-name'] ==
283             app_inst_name:storage_inst_name:vol_name)
284
285        if using 2.0 API
286
287        :param volume:       Cinder volume to manage
288        :param existing_ref: Driver-specific information used to identify a
289                             volume
290        """
291        pass
292
293    # ===================
294    # = Manage Get Size =
295    # ===================
296
297    @datc._api_lookup
298    def manage_existing_get_size(self, volume, existing_ref):
299        """Get the size of an unmanaged volume on the Datera backend
300
301        The existing_ref must be either the current name or Datera UUID of
302        an app_instance on the Datera backend in a colon separated list with
303        the storage instance name and volume name.  This means only
304        single storage instances and single volumes are supported for
305        managing by cinder.
306
307        Eg.
308
309        existing_ref == app_inst_name:storage_inst_name:vol_name
310
311        :param volume:       Cinder volume to manage
312        :param existing_ref: Driver-specific information used to identify a
313                             volume on the Datera backend
314        """
315        pass
316
317    # =========================
318    # = Get Manageable Volume =
319    # =========================
320
321    @datc._api_lookup
322    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
323                               sort_keys, sort_dirs):
324        """List volumes on the backend available for management by Cinder.
325
326        Returns a list of dictionaries, each specifying a volume in the host,
327        with the following keys:
328
329        - reference (dictionary): The reference for a volume, which can be
330          passed to 'manage_existing'.
331        - size (int): The size of the volume according to the storage
332          backend, rounded up to the nearest GB.
333        - safe_to_manage (boolean): Whether or not this volume is safe to
334          manage according to the storage backend. For example, is the volume
335          in use or invalid for any reason.
336        - reason_not_safe (string): If safe_to_manage is False, the reason why.
337        - cinder_id (string): If already managed, provide the Cinder ID.
338        - extra_info (string): Any extra information to return to the user
339
340        :param cinder_volumes: A list of volumes in this host that Cinder
341                               currently manages, used to determine if
342                               a volume is manageable or not.
343        :param marker:    The last item of the previous page; we return the
344                          next results after this value (after sorting)
345        :param limit:     Maximum number of items to return
346        :param offset:    Number of items to skip after marker
347        :param sort_keys: List of keys to sort results by (valid keys are
348                          'identifier' and 'size')
349        :param sort_dirs: List of directions to sort by, corresponding to
350                          sort_keys (valid directions are 'asc' and 'desc')
351        """
352        pass
353
354    # ============
355    # = Unmanage =
356    # ============
357
358    @datc._api_lookup
359    def unmanage(self, volume):
360        """Unmanage a currently managed volume in Cinder
361
362        :param volume:       Cinder volume to unmanage
363        """
364        pass
365
366    # ================
367    # = Volume Stats =
368    # ================
369
370    @datc._api_lookup
371    def get_volume_stats(self, refresh=False):
372        """Get volume stats.
373
374        If 'refresh' is True, run update first.
375        The name is a bit misleading as
376        the majority of the data here is cluster
377        data.
378        """
379        pass
380
381    # =========
382    # = Login =
383    # =========
384
385    @datc._api_lookup
386    def login(self):
387        pass
388
389    # =======
390    # = QoS =
391    # =======
392
393    def _update_qos(self, resource, policies):
394        url = datc.URL_TEMPLATES['vol_inst'](
395            policies['default_storage_name'],
396            policies['default_volume_name']) + '/performance_policy'
397        url = url.format(datc._get_name(resource['id']))
398        type_id = resource.get('volume_type_id', None)
399        if type_id is not None:
400            # Filter for just QOS policies in result. All of their keys
401            # should end with "max"
402            fpolicies = {k: int(v) for k, v in
403                         policies.items() if k.endswith("max")}
404            # Filter all 0 values from being passed
405            fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
406            if fpolicies:
407                self._issue_api_request(url, 'post', body=fpolicies,
408                                        api_version='2')
409
410    def _get_lunid(self):
411        return 0
412
413    # ============================
414    # = Volume-Types/Extra-Specs =
415    # ============================
416
417    def _init_vendor_properties(self):
418        """Create a dictionary of vendor unique properties.
419
420        This method creates a dictionary of vendor unique properties
421        and returns both created dictionary and vendor name.
422        Returned vendor name is used to check for name of vendor
423        unique properties.
424
425        - Vendor name shouldn't include colon(:) because of the separator
426          and it is automatically replaced by underscore(_).
427          ex. abc:d -> abc_d
428        - Vendor prefix is equal to vendor name.
429          ex. abcd
430        - Vendor unique properties must start with vendor prefix + ':'.
431          ex. abcd:maxIOPS
432
433        Each backend driver needs to override this method to expose
434        its own properties using _set_property() like this:
435
436        self._set_property(
437            properties,
438            "vendorPrefix:specific_property",
439            "Title of property",
440            _("Description of property"),
441            "type")
442
443        : return dictionary of vendor unique properties
444        : return vendor name
445
446        prefix: DF --> Datera Fabric
447        """
448
449        properties = {}
450
451        self._set_property(
452            properties,
453            "DF:placement_mode",
454            "Datera Volume Placement",
455            _("'single_flash' for single-flash-replica placement, "
456              "'all_flash' for all-flash-replica placement, "
457              "'hybrid' for hybrid placement"),
458            "string",
459            default="hybrid")
460
461        self._set_property(
462            properties,
463            "DF:round_robin",
464            "Datera Round Robin Portals",
465            _("True to round robin the provided portals for a target"),
466            "boolean",
467            default=False)
468
469        if self.configuration.get('datera_debug_replica_count_override'):
470            replica_count = 1
471        else:
472            replica_count = 3
473        self._set_property(
474            properties,
475            "DF:replica_count",
476            "Datera Volume Replica Count",
477            _("Specifies number of replicas for each volume. Can only be "
478              "increased once volume is created"),
479            "integer",
480            minimum=1,
481            default=replica_count)
482
483        self._set_property(
484            properties,
485            "DF:acl_allow_all",
486            "Datera ACL Allow All",
487            _("True to set acl 'allow_all' on volumes created.  Cannot be "
488              "changed on volume once set"),
489            "boolean",
490            default=False)
491
492        self._set_property(
493            properties,
494            "DF:ip_pool",
495            "Datera IP Pool",
496            _("Specifies IP pool to use for volume"),
497            "string",
498            default="default")
499
500        self._set_property(
501            properties,
502            "DF:template",
503            "Datera Template",
504            _("Specifies Template to use for volume provisioning"),
505            "string",
506            default="")
507
508        # ###### QoS Settings ###### #
509        self._set_property(
510            properties,
511            "DF:read_bandwidth_max",
512            "Datera QoS Max Bandwidth Read",
513            _("Max read bandwidth setting for volume qos, "
514              "use 0 for unlimited"),
515            "integer",
516            minimum=0,
517            default=0)
518
519        self._set_property(
520            properties,
521            "DF:default_storage_name",
522            "Datera Default Storage Instance Name",
523            _("The name to use for storage instances created"),
524            "string",
525            default="storage-1")
526
527        self._set_property(
528            properties,
529            "DF:default_volume_name",
530            "Datera Default Volume Name",
531            _("The name to use for volumes created"),
532            "string",
533            default="volume-1")
534
535        self._set_property(
536            properties,
537            "DF:write_bandwidth_max",
538            "Datera QoS Max Bandwidth Write",
539            _("Max write bandwidth setting for volume qos, "
540              "use 0 for unlimited"),
541            "integer",
542            minimum=0,
543            default=0)
544
545        self._set_property(
546            properties,
547            "DF:total_bandwidth_max",
548            "Datera QoS Max Bandwidth Total",
549            _("Max total bandwidth setting for volume qos, "
550              "use 0 for unlimited"),
551            "integer",
552            minimum=0,
553            default=0)
554
555        self._set_property(
556            properties,
557            "DF:read_iops_max",
558            "Datera QoS Max iops Read",
559            _("Max read iops setting for volume qos, "
560              "use 0 for unlimited"),
561            "integer",
562            minimum=0,
563            default=0)
564
565        self._set_property(
566            properties,
567            "DF:write_iops_max",
568            "Datera QoS Max IOPS Write",
569            _("Max write iops setting for volume qos, "
570              "use 0 for unlimited"),
571            "integer",
572            minimum=0,
573            default=0)
574
575        self._set_property(
576            properties,
577            "DF:total_iops_max",
578            "Datera QoS Max IOPS Total",
579            _("Max total iops setting for volume qos, "
580              "use 0 for unlimited"),
581            "integer",
582            minimum=0,
583            default=0)
584        # ###### End QoS Settings ###### #
585
586        return properties, 'DF'
587