1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# Copyright 2011 Justin Santa Barbara
4# All Rights Reserved.
5#
6#    Licensed under the Apache License, Version 2.0 (the "License"); you may
7#    not use this file except in compliance with the License. You may obtain
8#    a copy of the License at
9#
10#         http://www.apache.org/licenses/LICENSE-2.0
11#
12#    Unless required by applicable law or agreed to in writing, software
13#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15#    License for the specific language governing permissions and limitations
16#    under the License.
17
18"""Utilities and helper functions."""
19
20
21import abc
22import contextlib
23import datetime
24import functools
25import inspect
26import logging as py_logging
27import math
28import os
29import pyclbr
30import random
31import re
32import shutil
33import socket
34import stat
35import sys
36import tempfile
37import time
38import types
39
40from castellan import key_manager
41from os_brick import encryptors
42from os_brick.initiator import connector
43from oslo_concurrency import lockutils
44from oslo_concurrency import processutils
45from oslo_config import cfg
46from oslo_log import log as logging
47from oslo_utils import encodeutils
48from oslo_utils import excutils
49from oslo_utils import importutils
50from oslo_utils import strutils
51from oslo_utils import timeutils
52import retrying
53import six
54import webob.exc
55
56from cinder import exception
57from cinder.i18n import _
58
59
60CONF = cfg.CONF
61LOG = logging.getLogger(__name__)
62ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
63PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
64VALID_TRACE_FLAGS = {'method', 'api'}
65TRACE_METHOD = False
66TRACE_API = False
67INITIAL_AUTO_MOSR = 20
68INFINITE_UNKNOWN_VALUES = ('infinite', 'unknown')
69
70
71synchronized = lockutils.synchronized_with_prefix('cinder-')
72
73
74def as_int(obj, quiet=True):
75    # Try "2" -> 2
76    try:
77        return int(obj)
78    except (ValueError, TypeError):
79        pass
80    # Try "2.5" -> 2
81    try:
82        return int(float(obj))
83    except (ValueError, TypeError):
84        pass
85    # Eck, not sure what this is then.
86    if not quiet:
87        raise TypeError(_("Can not translate %s to integer.") % (obj))
88    return obj
89
90
91def check_exclusive_options(**kwargs):
92    """Checks that only one of the provided options is actually not-none.
93
94    Iterates over all the kwargs passed in and checks that only one of said
95    arguments is not-none, if more than one is not-none then an exception will
96    be raised with the names of those arguments who were not-none.
97    """
98
99    if not kwargs:
100        return
101
102    pretty_keys = kwargs.pop("pretty_keys", True)
103    exclusive_options = {}
104    for (k, v) in kwargs.items():
105        if v is not None:
106            exclusive_options[k] = True
107
108    if len(exclusive_options) > 1:
109        # Change the format of the names from pythonic to
110        # something that is more readable.
111        #
112        # Ex: 'the_key' -> 'the key'
113        if pretty_keys:
114            names = [k.replace('_', ' ') for k in kwargs.keys()]
115        else:
116            names = kwargs.keys()
117        names = ", ".join(sorted(names))
118        msg = (_("May specify only one of %s") % (names))
119        raise exception.InvalidInput(reason=msg)
120
121
122def execute(*cmd, **kwargs):
123    """Convenience wrapper around oslo's execute() method."""
124    if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
125        kwargs['root_helper'] = get_root_helper()
126    return processutils.execute(*cmd, **kwargs)
127
128
129def check_ssh_injection(cmd_list):
130    ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
131                             '<']
132
133    # Check whether injection attacks exist
134    for arg in cmd_list:
135        arg = arg.strip()
136
137        # Check for matching quotes on the ends
138        is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
139        if is_quoted:
140            # Check for unescaped quotes within the quoted argument
141            quoted = is_quoted.group('quoted')
142            if quoted:
143                if (re.match('[\'"]', quoted) or
144                        re.search('[^\\\\][\'"]', quoted)):
145                    raise exception.SSHInjectionThreat(command=cmd_list)
146        else:
147            # We only allow spaces within quoted arguments, and that
148            # is the only special character allowed within quotes
149            if len(arg.split()) > 1:
150                raise exception.SSHInjectionThreat(command=cmd_list)
151
152        # Second, check whether danger character in command. So the shell
153        # special operator must be a single argument.
154        for c in ssh_injection_pattern:
155            if c not in arg:
156                continue
157
158            result = arg.find(c)
159            if not result == -1:
160                if result == 0 or not arg[result - 1] == '\\':
161                    raise exception.SSHInjectionThreat(command=cmd_list)
162
163
164def check_metadata_properties(metadata=None):
165    """Checks that the volume metadata properties are valid."""
166
167    if not metadata:
168        metadata = {}
169    if not isinstance(metadata, dict):
170        msg = _("Metadata should be a dict.")
171        raise exception.InvalidInput(msg)
172
173    for k, v in metadata.items():
174        try:
175            check_string_length(k, "Metadata key: %s" % k, min_length=1)
176            check_string_length(v, "Value for metadata key: %s" % k)
177        except exception.InvalidInput as exc:
178            raise exception.InvalidVolumeMetadata(reason=exc)
179        # for backward compatibility
180        if len(k) > 255:
181            msg = _("Metadata property key %s greater than 255 "
182                    "characters.") % k
183            raise exception.InvalidVolumeMetadataSize(reason=msg)
184        if len(v) > 255:
185            msg = _("Metadata property key %s value greater than "
186                    "255 characters.") % k
187            raise exception.InvalidVolumeMetadataSize(reason=msg)
188
189
190def last_completed_audit_period(unit=None):
191    """This method gives you the most recently *completed* audit period.
192
193    arguments:
194            units: string, one of 'hour', 'day', 'month', 'year'
195                    Periods normally begin at the beginning (UTC) of the
196                    period unit (So a 'day' period begins at midnight UTC,
197                    a 'month' unit on the 1st, a 'year' on Jan, 1)
198                    unit string may be appended with an optional offset
199                    like so:  'day@18'  This will begin the period at 18:00
200                    UTC.  'month@15' starts a monthly period on the 15th,
201                    and year@3 begins a yearly one on March 1st.
202
203
204    returns:  2 tuple of datetimes (begin, end)
205              The begin timestamp of this audit period is the same as the
206              end of the previous.
207    """
208    if not unit:
209        unit = CONF.volume_usage_audit_period
210
211    offset = 0
212    if '@' in unit:
213        unit, offset = unit.split("@", 1)
214        offset = int(offset)
215
216    rightnow = timeutils.utcnow()
217    if unit not in ('month', 'day', 'year', 'hour'):
218        raise ValueError('Time period must be hour, day, month or year')
219    if unit == 'month':
220        if offset == 0:
221            offset = 1
222        end = datetime.datetime(day=offset,
223                                month=rightnow.month,
224                                year=rightnow.year)
225        if end >= rightnow:
226            year = rightnow.year
227            if 1 >= rightnow.month:
228                year -= 1
229                month = 12 + (rightnow.month - 1)
230            else:
231                month = rightnow.month - 1
232            end = datetime.datetime(day=offset,
233                                    month=month,
234                                    year=year)
235        year = end.year
236        if 1 >= end.month:
237            year -= 1
238            month = 12 + (end.month - 1)
239        else:
240            month = end.month - 1
241        begin = datetime.datetime(day=offset, month=month, year=year)
242
243    elif unit == 'year':
244        if offset == 0:
245            offset = 1
246        end = datetime.datetime(day=1, month=offset, year=rightnow.year)
247        if end >= rightnow:
248            end = datetime.datetime(day=1,
249                                    month=offset,
250                                    year=rightnow.year - 1)
251            begin = datetime.datetime(day=1,
252                                      month=offset,
253                                      year=rightnow.year - 2)
254        else:
255            begin = datetime.datetime(day=1,
256                                      month=offset,
257                                      year=rightnow.year - 1)
258
259    elif unit == 'day':
260        end = datetime.datetime(hour=offset,
261                                day=rightnow.day,
262                                month=rightnow.month,
263                                year=rightnow.year)
264        if end >= rightnow:
265            end = end - datetime.timedelta(days=1)
266        begin = end - datetime.timedelta(days=1)
267
268    elif unit == 'hour':
269        end = rightnow.replace(minute=offset, second=0, microsecond=0)
270        if end >= rightnow:
271            end = end - datetime.timedelta(hours=1)
272        begin = end - datetime.timedelta(hours=1)
273
274    return (begin, end)
275
276
277def time_format(at=None):
278    """Format datetime string to date.
279
280    :param at: Type is datetime.datetime (example
281        'datetime.datetime(2017, 12, 24, 22, 11, 32, 6086)')
282    :returns: Format date (example '2017-12-24T22:11:32Z').
283    """
284    if not at:
285        at = timeutils.utcnow()
286    date_string = at.strftime("%Y-%m-%dT%H:%M:%S")
287    tz = at.tzname(None) if at.tzinfo else 'UTC'
288    # Need to handle either iso8601 or python UTC format
289    date_string += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)
290    return date_string
291
292
293def is_none_string(val):
294    """Check if a string represents a None value."""
295    if not isinstance(val, six.string_types):
296        return False
297
298    return val.lower() == 'none'
299
300
301def monkey_patch():
302    """Patches decorators for all functions in a specified module.
303
304    If the CONF.monkey_patch set as True,
305    this function patches a decorator
306    for all functions in specified modules.
307
308    You can set decorators for each modules
309    using CONF.monkey_patch_modules.
310    The format is "Module path:Decorator function".
311    Example: 'cinder.api.ec2.cloud:' \
312     cinder.openstack.common.notifier.api.notify_decorator'
313
314    Parameters of the decorator are as follows.
315    (See cinder.openstack.common.notifier.api.notify_decorator)
316
317    :param name: name of the function
318    :param function: object of the function
319    """
320    # If CONF.monkey_patch is not True, this function do nothing.
321    if not CONF.monkey_patch:
322        return
323    # Get list of modules and decorators
324    for module_and_decorator in CONF.monkey_patch_modules:
325        module, decorator_name = module_and_decorator.split(':')
326        # import decorator function
327        decorator = importutils.import_class(decorator_name)
328        __import__(module)
329        # Retrieve module information using pyclbr
330        module_data = pyclbr.readmodule_ex(module)
331        for key in module_data.keys():
332            # set the decorator for the class methods
333            if isinstance(module_data[key], pyclbr.Class):
334                clz = importutils.import_class("%s.%s" % (module, key))
335                # On Python 3, unbound methods are regular functions
336                predicate = inspect.isfunction if six.PY3 else inspect.ismethod
337                for method, func in inspect.getmembers(clz, predicate):
338                    setattr(
339                        clz, method,
340                        decorator("%s.%s.%s" % (module, key, method), func))
341            # set the decorator for the function
342            elif isinstance(module_data[key], pyclbr.Function):
343                func = importutils.import_class("%s.%s" % (module, key))
344                setattr(sys.modules[module], key,
345                        decorator("%s.%s" % (module, key), func))
346
347
348def make_dev_path(dev, partition=None, base='/dev'):
349    """Return a path to a particular device.
350
351    >>> make_dev_path('xvdc')
352    /dev/xvdc
353
354    >>> make_dev_path('xvdc', 1)
355    /dev/xvdc1
356    """
357    path = os.path.join(base, dev)
358    if partition:
359        path += str(partition)
360    return path
361
362
363def sanitize_hostname(hostname):
364    """Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
365    if six.PY3:
366        hostname = hostname.encode('latin-1', 'ignore')
367        hostname = hostname.decode('latin-1')
368    else:
369        if isinstance(hostname, six.text_type):
370            hostname = hostname.encode('latin-1', 'ignore')
371
372    hostname = re.sub(r'[ _]', '-', hostname)
373    hostname = re.sub(r'[^\w.-]+', '', hostname)
374    hostname = hostname.lower()
375    hostname = hostname.strip('.-')
376
377    return hostname
378
379
380def read_file_as_root(file_path):
381    """Secure helper to read file as root."""
382    try:
383        out, _err = execute('cat', file_path, run_as_root=True)
384        return out
385    except processutils.ProcessExecutionError:
386        raise exception.FileNotFound(file_path=file_path)
387
388
389def robust_file_write(directory, filename, data):
390    """Robust file write.
391
392    Use "write to temp file and rename" model for writing the
393    persistence file.
394
395    :param directory: Target directory to create a file.
396    :param filename: File name to store specified data.
397    :param data: String data.
398    """
399    tempname = None
400    dirfd = None
401    try:
402        dirfd = os.open(directory, os.O_DIRECTORY)
403
404        # write data to temporary file
405        with tempfile.NamedTemporaryFile(prefix=filename,
406                                         dir=directory,
407                                         delete=False) as tf:
408            tempname = tf.name
409            tf.write(data.encode('utf-8'))
410            tf.flush()
411            os.fdatasync(tf.fileno())
412            tf.close()
413
414            # Fsync the directory to ensure the fact of the existence of
415            # the temp file hits the disk.
416            os.fsync(dirfd)
417            # If destination file exists, it will be replaced silently.
418            os.rename(tempname, os.path.join(directory, filename))
419            # Fsync the directory to ensure the rename hits the disk.
420            os.fsync(dirfd)
421    except OSError:
422        with excutils.save_and_reraise_exception():
423            LOG.error("Failed to write persistence file: %(path)s.",
424                      {'path': os.path.join(directory, filename)})
425            if os.path.isfile(tempname):
426                os.unlink(tempname)
427    finally:
428        if dirfd:
429            os.close(dirfd)
430
431
432@contextlib.contextmanager
433def temporary_chown(path, owner_uid=None):
434    """Temporarily chown a path.
435
436    :params owner_uid: UID of temporary owner (defaults to current user)
437    """
438    if owner_uid is None:
439        owner_uid = os.getuid()
440
441    orig_uid = os.stat(path).st_uid
442
443    if orig_uid != owner_uid:
444        execute('chown', owner_uid, path, run_as_root=True)
445    try:
446        yield
447    finally:
448        if orig_uid != owner_uid:
449            execute('chown', orig_uid, path, run_as_root=True)
450
451
452@contextlib.contextmanager
453def tempdir(**kwargs):
454    tmpdir = tempfile.mkdtemp(**kwargs)
455    try:
456        yield tmpdir
457    finally:
458        try:
459            shutil.rmtree(tmpdir)
460        except OSError as e:
461            LOG.debug('Could not remove tmpdir: %s',
462                      six.text_type(e))
463
464
465def walk_class_hierarchy(clazz, encountered=None):
466    """Walk class hierarchy, yielding most derived classes first."""
467    if not encountered:
468        encountered = []
469    for subclass in clazz.__subclasses__():
470        if subclass not in encountered:
471            encountered.append(subclass)
472            # drill down to leaves first
473            for subsubclass in walk_class_hierarchy(subclass, encountered):
474                yield subsubclass
475            yield subclass
476
477
478def get_root_helper():
479    return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
480
481
482def brick_get_connector_properties(multipath=False, enforce_multipath=False):
483    """Wrapper to automatically set root_helper in brick calls.
484
485    :param multipath: A boolean indicating whether the connector can
486                      support multipath.
487    :param enforce_multipath: If True, it raises exception when multipath=True
488                              is specified but multipathd is not running.
489                              If False, it falls back to multipath=False
490                              when multipathd is not running.
491    """
492
493    root_helper = get_root_helper()
494    return connector.get_connector_properties(root_helper,
495                                              CONF.my_ip,
496                                              multipath,
497                                              enforce_multipath)
498
499
500def brick_get_connector(protocol, driver=None,
501                        use_multipath=False,
502                        device_scan_attempts=3,
503                        *args, **kwargs):
504    """Wrapper to get a brick connector object.
505
506    This automatically populates the required protocol as well
507    as the root_helper needed to execute commands.
508    """
509
510    root_helper = get_root_helper()
511    return connector.InitiatorConnector.factory(protocol, root_helper,
512                                                driver=driver,
513                                                use_multipath=use_multipath,
514                                                device_scan_attempts=
515                                                device_scan_attempts,
516                                                *args, **kwargs)
517
518
519def brick_get_encryptor(connection_info, *args, **kwargs):
520    """Wrapper to get a brick encryptor object."""
521
522    root_helper = get_root_helper()
523    km = key_manager.API(CONF)
524    return encryptors.get_volume_encryptor(root_helper=root_helper,
525                                           connection_info=connection_info,
526                                           keymgr=km,
527                                           *args, **kwargs)
528
529
530def brick_attach_volume_encryptor(context, attach_info, encryption):
531    """Attach encryption layer."""
532    connection_info = attach_info['conn']
533    connection_info['data']['device_path'] = attach_info['device']['path']
534    encryptor = brick_get_encryptor(connection_info,
535                                    **encryption)
536    encryptor.attach_volume(context, **encryption)
537
538
539def brick_detach_volume_encryptor(attach_info, encryption):
540    """Detach encryption layer."""
541    connection_info = attach_info['conn']
542    connection_info['data']['device_path'] = attach_info['device']['path']
543
544    encryptor = brick_get_encryptor(connection_info,
545                                    **encryption)
546    encryptor.detach_volume(**encryption)
547
548
549def require_driver_initialized(driver):
550    """Verifies if `driver` is initialized
551
552    If the driver is not initialized, an exception will be raised.
553
554    :params driver: The driver instance.
555    :raises: `exception.DriverNotInitialized`
556    """
557    # we can't do anything if the driver didn't init
558    if not driver.initialized:
559        driver_name = driver.__class__.__name__
560        LOG.error("Volume driver %s not initialized", driver_name)
561        raise exception.DriverNotInitialized()
562    else:
563        log_unsupported_driver_warning(driver)
564
565
566def log_unsupported_driver_warning(driver):
567    """Annoy the log about unsupported drivers."""
568    if not driver.supported:
569        # Check to see if the driver is flagged as supported.
570        LOG.warning("Volume driver (%(driver_name)s %(version)s) is "
571                    "currently unsupported and may be removed in the "
572                    "next release of OpenStack.  Use at your own risk.",
573                    {'driver_name': driver.__class__.__name__,
574                     'version': driver.get_version()},
575                    resource={'type': 'driver',
576                              'id': driver.__class__.__name__})
577
578
579def get_file_mode(path):
580    """This primarily exists to make unit testing easier."""
581    return stat.S_IMODE(os.stat(path).st_mode)
582
583
584def get_file_gid(path):
585    """This primarily exists to make unit testing easier."""
586    return os.stat(path).st_gid
587
588
589def get_file_size(path):
590    """Returns the file size."""
591    return os.stat(path).st_size
592
593
594def _get_disk_of_partition(devpath, st=None):
595    """Gets a disk device path and status from partition path.
596
597    Returns a disk device path from a partition device path, and stat for
598    the device. If devpath is not a partition, devpath is returned as it is.
599    For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
600    for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
601    name ends with numbers).
602    """
603    diskpath = re.sub(r'(?:(?<=\d)p)?\d+$', '', devpath)
604    if diskpath != devpath:
605        try:
606            st_disk = os.stat(diskpath)
607            if stat.S_ISBLK(st_disk.st_mode):
608                return (diskpath, st_disk)
609        except OSError:
610            pass
611    # devpath is not a partition
612    if st is None:
613        st = os.stat(devpath)
614    return (devpath, st)
615
616
617def get_bool_param(param_string, params, default=False):
618    param = params.get(param_string, default)
619    if not strutils.is_valid_boolstr(param):
620        msg = _("Value '%(param)s' for '%(param_string)s' is not "
621                "a boolean.") % {'param': param, 'param_string': param_string}
622        raise exception.InvalidParameterValue(err=msg)
623
624    return strutils.bool_from_string(param, strict=True)
625
626
627def get_blkdev_major_minor(path, lookup_for_file=True):
628    """Get 'major:minor' number of block device.
629
630    Get the device's 'major:minor' number of a block device to control
631    I/O ratelimit of the specified path.
632    If lookup_for_file is True and the path is a regular file, lookup a disk
633    device which the file lies on and returns the result for the device.
634    """
635    st = os.stat(path)
636    if stat.S_ISBLK(st.st_mode):
637        path, st = _get_disk_of_partition(path, st)
638        return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
639    elif stat.S_ISCHR(st.st_mode):
640        # No I/O ratelimit control is provided for character devices
641        return None
642    elif lookup_for_file:
643        # lookup the mounted disk which the file lies on
644        out, _err = execute('df', path)
645        devpath = out.split("\n")[1].split()[0]
646        if devpath[0] is not '/':
647            # the file is on a network file system
648            return None
649        return get_blkdev_major_minor(devpath, False)
650    else:
651        msg = _("Unable to get a block device for file \'%s\'") % path
652        raise exception.Error(msg)
653
654
655def check_string_length(value, name, min_length=0, max_length=None,
656                        allow_all_spaces=True):
657    """Check the length of specified string.
658
659    :param value: the value of the string
660    :param name: the name of the string
661    :param min_length: the min_length of the string
662    :param max_length: the max_length of the string
663    """
664    try:
665        strutils.check_string_length(value, name=name,
666                                     min_length=min_length,
667                                     max_length=max_length)
668    except(ValueError, TypeError) as exc:
669        raise exception.InvalidInput(reason=exc)
670
671    if not allow_all_spaces and value.isspace():
672        msg = _('%(name)s cannot be all spaces.')
673        raise exception.InvalidInput(reason=msg)
674
675
676_visible_admin_metadata_keys = ['readonly', 'attached_mode']
677
678
679def add_visible_admin_metadata(volume):
680    """Add user-visible admin metadata to regular metadata.
681
682    Extracts the admin metadata keys that are to be made visible to
683    non-administrators, and adds them to the regular metadata structure for the
684    passed-in volume.
685    """
686    visible_admin_meta = {}
687
688    if volume.get('volume_admin_metadata'):
689        if isinstance(volume['volume_admin_metadata'], dict):
690            volume_admin_metadata = volume['volume_admin_metadata']
691            for key in volume_admin_metadata:
692                if key in _visible_admin_metadata_keys:
693                    visible_admin_meta[key] = volume_admin_metadata[key]
694        else:
695            for item in volume['volume_admin_metadata']:
696                if item['key'] in _visible_admin_metadata_keys:
697                    visible_admin_meta[item['key']] = item['value']
698    # avoid circular ref when volume is a Volume instance
699    elif (volume.get('admin_metadata') and
700            isinstance(volume.get('admin_metadata'), dict)):
701        for key in _visible_admin_metadata_keys:
702            if key in volume['admin_metadata'].keys():
703                visible_admin_meta[key] = volume['admin_metadata'][key]
704
705    if not visible_admin_meta:
706        return
707
708    # NOTE(zhiyan): update visible administration metadata to
709    # volume metadata, administration metadata will rewrite existing key.
710    if volume.get('volume_metadata'):
711        orig_meta = list(volume.get('volume_metadata'))
712        for item in orig_meta:
713            if item['key'] in visible_admin_meta.keys():
714                item['value'] = visible_admin_meta.pop(item['key'])
715        for key, value in visible_admin_meta.items():
716            orig_meta.append({'key': key, 'value': value})
717        volume['volume_metadata'] = orig_meta
718    # avoid circular ref when vol is a Volume instance
719    elif (volume.get('metadata') and
720            isinstance(volume.get('metadata'), dict)):
721        volume['metadata'].update(visible_admin_meta)
722    else:
723        volume['metadata'] = visible_admin_meta
724
725
726def remove_invalid_filter_options(context, filters,
727                                  allowed_search_options):
728    """Remove search options that are not valid for non-admin API/context."""
729
730    if context.is_admin:
731        # Allow all options
732        return
733    # Otherwise, strip out all unknown options
734    unknown_options = [opt for opt in filters
735                       if opt not in allowed_search_options]
736    bad_options = ", ".join(unknown_options)
737    LOG.debug("Removing options '%s' from query.", bad_options)
738    for opt in unknown_options:
739        del filters[opt]
740
741
742def is_blk_device(dev):
743    try:
744        if stat.S_ISBLK(os.stat(dev).st_mode):
745            return True
746        return False
747    except Exception:
748        LOG.debug('Path %s not found in is_blk_device check', dev)
749        return False
750
751
752class ComparableMixin(object):
753    def _compare(self, other, method):
754        try:
755            return method(self._cmpkey(), other._cmpkey())
756        except (AttributeError, TypeError):
757            # _cmpkey not implemented, or return different type,
758            # so I can't compare with "other".
759            return NotImplemented
760
761    def __lt__(self, other):
762        return self._compare(other, lambda s, o: s < o)
763
764    def __le__(self, other):
765        return self._compare(other, lambda s, o: s <= o)
766
767    def __eq__(self, other):
768        return self._compare(other, lambda s, o: s == o)
769
770    def __ge__(self, other):
771        return self._compare(other, lambda s, o: s >= o)
772
773    def __gt__(self, other):
774        return self._compare(other, lambda s, o: s > o)
775
776    def __ne__(self, other):
777        return self._compare(other, lambda s, o: s != o)
778
779
780def retry(exceptions, interval=1, retries=3, backoff_rate=2,
781          wait_random=False):
782
783    def _retry_on_exception(e):
784        return isinstance(e, exceptions)
785
786    def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
787        exp = backoff_rate ** previous_attempt_number
788        wait_for = interval * exp
789
790        if wait_random:
791            random.seed()
792            wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
793        else:
794            wait_val = wait_for * 1000.0
795
796        LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0))
797
798        return wait_val
799
800    def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
801        delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
802        LOG.debug("Failed attempt %s", previous_attempt_number)
803        LOG.debug("Have been at this for %s seconds",
804                  delay_since_first_attempt)
805        return previous_attempt_number == retries
806
807    if retries < 1:
808        raise ValueError('Retries must be greater than or '
809                         'equal to 1 (received: %s). ' % retries)
810
811    def _decorator(f):
812
813        @six.wraps(f)
814        def _wrapper(*args, **kwargs):
815            r = retrying.Retrying(retry_on_exception=_retry_on_exception,
816                                  wait_func=_backoff_sleep,
817                                  stop_func=_print_stop)
818            return r.call(f, *args, **kwargs)
819
820        return _wrapper
821
822    return _decorator
823
824
825def convert_str(text):
826    """Convert to native string.
827
828    Convert bytes and Unicode strings to native strings:
829
830    * convert to bytes on Python 2:
831      encode Unicode using encodeutils.safe_encode()
832    * convert to Unicode on Python 3: decode bytes from UTF-8
833    """
834    if six.PY2:
835        return encodeutils.to_utf8(text)
836    else:
837        if isinstance(text, bytes):
838            return text.decode('utf-8')
839        else:
840            return text
841
842
843def trace_method(f):
844    """Decorates a function if TRACE_METHOD is true."""
845    @functools.wraps(f)
846    def trace_method_logging_wrapper(*args, **kwargs):
847        if TRACE_METHOD:
848            return trace(f)(*args, **kwargs)
849        return f(*args, **kwargs)
850    return trace_method_logging_wrapper
851
852
853def trace_api(*dec_args, **dec_kwargs):
854    """Decorates a function if TRACE_API is true."""
855
856    def _decorator(f):
857        @functools.wraps(f)
858        def trace_api_logging_wrapper(*args, **kwargs):
859            if TRACE_API:
860                return trace(f, *dec_args, **dec_kwargs)(*args, **kwargs)
861            return f(*args, **kwargs)
862        return trace_api_logging_wrapper
863
864    if len(dec_args) == 0:
865        # filter_function is passed and args does not contain f
866        return _decorator
867    else:
868        # filter_function is not passed
869        return _decorator(dec_args[0])
870
871
872def trace(*dec_args, **dec_kwargs):
873    """Trace calls to the decorated function.
874
875    This decorator should always be defined as the outermost decorator so it
876    is defined last. This is important so it does not interfere
877    with other decorators.
878
879    Using this decorator on a function will cause its execution to be logged at
880    `DEBUG` level with arguments, return values, and exceptions.
881
882    :returns: a function decorator
883    """
884
885    def _decorator(f):
886
887        func_name = f.__name__
888
889        @functools.wraps(f)
890        def trace_logging_wrapper(*args, **kwargs):
891            filter_function = dec_kwargs.get('filter_function')
892
893            if len(args) > 0:
894                maybe_self = args[0]
895            else:
896                maybe_self = kwargs.get('self', None)
897
898            if maybe_self and hasattr(maybe_self, '__module__'):
899                logger = logging.getLogger(maybe_self.__module__)
900            else:
901                logger = LOG
902
903            # NOTE(ameade): Don't bother going any further if DEBUG log level
904            # is not enabled for the logger.
905            if not logger.isEnabledFor(py_logging.DEBUG):
906                return f(*args, **kwargs)
907
908            all_args = inspect.getcallargs(f, *args, **kwargs)
909
910            pass_filter = filter_function is None or filter_function(all_args)
911
912            if pass_filter:
913                logger.debug('==> %(func)s: call %(all_args)r',
914                             {'func': func_name, 'all_args': all_args})
915
916            start_time = time.time() * 1000
917            try:
918                result = f(*args, **kwargs)
919            except Exception as exc:
920                total_time = int(round(time.time() * 1000)) - start_time
921                logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
922                             {'func': func_name,
923                              'time': total_time,
924                              'exc': exc})
925                raise
926            total_time = int(round(time.time() * 1000)) - start_time
927
928            if isinstance(result, dict):
929                mask_result = strutils.mask_dict_password(result)
930            elif isinstance(result, six.string_types):
931                mask_result = strutils.mask_password(result)
932            else:
933                mask_result = result
934
935            if pass_filter:
936                logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
937                             {'func': func_name,
938                              'time': total_time,
939                              'result': mask_result})
940            return result
941        return trace_logging_wrapper
942
943    if len(dec_args) == 0:
944        # filter_function is passed and args does not contain f
945        return _decorator
946    else:
947        # filter_function is not passed
948        return _decorator(dec_args[0])
949
950
951class TraceWrapperMetaclass(type):
952    """Metaclass that wraps all methods of a class with trace_method.
953
954    This metaclass will cause every function inside of the class to be
955    decorated with the trace_method decorator.
956
957    To use the metaclass you define a class like so:
958    @six.add_metaclass(utils.TraceWrapperMetaclass)
959    class MyClass(object):
960    """
961    def __new__(meta, classname, bases, classDict):
962        newClassDict = {}
963        for attributeName, attribute in classDict.items():
964            if isinstance(attribute, types.FunctionType):
965                # replace it with a wrapped version
966                attribute = functools.update_wrapper(trace_method(attribute),
967                                                     attribute)
968            newClassDict[attributeName] = attribute
969
970        return type.__new__(meta, classname, bases, newClassDict)
971
972
973class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass):
974    """Metaclass that wraps all methods of a class with trace."""
975    pass
976
977
978def setup_tracing(trace_flags):
979    """Set global variables for each trace flag.
980
981    Sets variables TRACE_METHOD and TRACE_API, which represent
982    whether to log methods or api traces.
983
984    :param trace_flags: a list of strings
985    """
986    global TRACE_METHOD
987    global TRACE_API
988    try:
989        trace_flags = [flag.strip() for flag in trace_flags]
990    except TypeError:  # Handle when trace_flags is None or a test mock
991        trace_flags = []
992    for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
993        LOG.warning('Invalid trace flag: %s', invalid_flag)
994    TRACE_METHOD = 'method' in trace_flags
995    TRACE_API = 'api' in trace_flags
996
997
998def resolve_hostname(hostname):
999    """Resolves host name to IP address.
1000
1001    Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
1002    This routine also works if the data passed in hostname is already an IP.
1003    In this case, the same IP address will be returned.
1004
1005    :param hostname:  Host name to resolve.
1006    :returns:         IP Address for Host name.
1007    """
1008    result = socket.getaddrinfo(hostname, None)[0]
1009    (family, socktype, proto, canonname, sockaddr) = result
1010    LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
1011              {'host': hostname, 'ip': sockaddr[0]})
1012    return sockaddr[0]
1013
1014
1015def build_or_str(elements, str_format=None):
1016    """Builds a string of elements joined by 'or'.
1017
1018    Will join strings with the 'or' word and if a str_format is provided it
1019    will be used to format the resulted joined string.
1020    If there are no elements an empty string will be returned.
1021
1022    :param elements: Elements we want to join.
1023    :type elements: String or iterable of strings.
1024    :param str_format: String to use to format the response.
1025    :type str_format: String.
1026    """
1027    if not elements:
1028        return ''
1029
1030    if not isinstance(elements, six.string_types):
1031        elements = _(' or ').join(elements)
1032
1033    if str_format:
1034        return str_format % elements
1035    return elements
1036
1037
1038def calculate_virtual_free_capacity(total_capacity,
1039                                    free_capacity,
1040                                    provisioned_capacity,
1041                                    thin_provisioning_support,
1042                                    max_over_subscription_ratio,
1043                                    reserved_percentage,
1044                                    thin):
1045    """Calculate the virtual free capacity based on thin provisioning support.
1046
1047    :param total_capacity:  total_capacity_gb of a host_state or pool.
1048    :param free_capacity:   free_capacity_gb of a host_state or pool.
1049    :param provisioned_capacity:    provisioned_capacity_gb of a host_state
1050                                    or pool.
1051    :param thin_provisioning_support:   thin_provisioning_support of
1052                                        a host_state or a pool.
1053    :param max_over_subscription_ratio: max_over_subscription_ratio of
1054                                        a host_state or a pool
1055    :param reserved_percentage: reserved_percentage of a host_state or
1056                                a pool.
1057    :param thin: whether volume to be provisioned is thin
1058    :returns: the calculated virtual free capacity.
1059    """
1060
1061    total = float(total_capacity)
1062    reserved = float(reserved_percentage) / 100
1063
1064    if thin and thin_provisioning_support:
1065        free = (total * max_over_subscription_ratio
1066                - provisioned_capacity
1067                - math.floor(total * reserved))
1068    else:
1069        # Calculate how much free space is left after taking into
1070        # account the reserved space.
1071        free = free_capacity - math.floor(total * reserved)
1072    return free
1073
1074
1075def calculate_max_over_subscription_ratio(capability,
1076                                          global_max_over_subscription_ratio):
1077    # provisioned_capacity_gb is the apparent total capacity of
1078    # all the volumes created on a backend, which is greater than
1079    # or equal to allocated_capacity_gb, which is the apparent
1080    # total capacity of all the volumes created on a backend
1081    # in Cinder. Using allocated_capacity_gb as the default of
1082    # provisioned_capacity_gb if it is not set.
1083    allocated_capacity_gb = capability.get('allocated_capacity_gb', 0)
1084    provisioned_capacity_gb = capability.get('provisioned_capacity_gb',
1085                                             allocated_capacity_gb)
1086    thin_provisioning_support = capability.get('thin_provisioning_support',
1087                                               False)
1088    total_capacity_gb = capability.get('total_capacity_gb', 0)
1089    free_capacity_gb = capability.get('free_capacity_gb', 0)
1090    pool_name = capability.get('pool_name',
1091                               capability.get('volume_backend_name'))
1092
1093    # If thin provisioning is not supported the capacity filter will not use
1094    # the value we return, no matter what it is.
1095    if not thin_provisioning_support:
1096        LOG.debug("Trying to retrieve max_over_subscription_ratio from a "
1097                  "service that does not support thin provisioning")
1098        return 1.0
1099
1100    # Again, if total or free capacity is infinite or unknown, the capacity
1101    # filter will not use the max_over_subscription_ratio at all. So, does
1102    # not matter what we return here.
1103    if ((total_capacity_gb in INFINITE_UNKNOWN_VALUES) or
1104            (free_capacity_gb in INFINITE_UNKNOWN_VALUES)):
1105        return 1.0
1106
1107    max_over_subscription_ratio = (capability.get(
1108        'max_over_subscription_ratio') or global_max_over_subscription_ratio)
1109
1110    # We only calculate the automatic max_over_subscription_ratio (mosr)
1111    # when the global or driver conf is set auto and while
1112    # provisioned_capacity_gb is not 0. When auto is set and
1113    # provisioned_capacity_gb is 0, we use the default value 20.0.
1114    if max_over_subscription_ratio == 'auto':
1115        if provisioned_capacity_gb != 0:
1116            used_capacity = total_capacity_gb - free_capacity_gb
1117            LOG.debug("Calculating max_over_subscription_ratio for "
1118                      "pool %s: provisioned_capacity_gb=%s, "
1119                      "used_capacity=%s",
1120                      pool_name, provisioned_capacity_gb, used_capacity)
1121            max_over_subscription_ratio = 1 + (
1122                float(provisioned_capacity_gb) / (used_capacity + 1))
1123        else:
1124            max_over_subscription_ratio = INITIAL_AUTO_MOSR
1125
1126        LOG.info("Auto max_over_subscription_ratio for pool %s is "
1127                 "%s", pool_name, max_over_subscription_ratio)
1128    else:
1129        max_over_subscription_ratio = float(max_over_subscription_ratio)
1130
1131    return max_over_subscription_ratio
1132
1133
1134def validate_integer(value, name, min_value=None, max_value=None):
1135    """Make sure that value is a valid integer, potentially within range.
1136
1137    :param value: the value of the integer
1138    :param name: the name of the integer
1139    :param min_length: the min_length of the integer
1140    :param max_length: the max_length of the integer
1141    :returns: integer
1142    """
1143    try:
1144        value = strutils.validate_integer(value, name, min_value, max_value)
1145        return value
1146    except ValueError as e:
1147        raise webob.exc.HTTPBadRequest(explanation=six.text_type(e))
1148
1149
1150def validate_dictionary_string_length(specs):
1151    """Check the length of each key and value of dictionary."""
1152    if not isinstance(specs, dict):
1153        msg = _('specs must be a dictionary.')
1154        raise exception.InvalidInput(reason=msg)
1155
1156    for key, value in specs.items():
1157        if key is not None:
1158            check_string_length(key, 'Key "%s"' % key,
1159                                min_length=1, max_length=255)
1160
1161        if value is not None:
1162            check_string_length(value, 'Value for key "%s"' % key,
1163                                min_length=0, max_length=255)
1164
1165
1166def service_expired_time(with_timezone=False):
1167    return (timeutils.utcnow(with_timezone=with_timezone) -
1168            datetime.timedelta(seconds=CONF.service_down_time))
1169
1170
1171class DoNothing(str):
1172    """Class that literrally does nothing.
1173
1174    We inherit from str in case it's called with json.dumps.
1175    """
1176    def __call__(self, *args, **kwargs):
1177        return self
1178
1179    def __getattr__(self, name):
1180        return self
1181
1182
1183DO_NOTHING = DoNothing()
1184
1185
1186def notifications_enabled(conf):
1187    """Check if oslo notifications are enabled."""
1188    notifications_driver = set(conf.oslo_messaging_notifications.driver)
1189    return notifications_driver and notifications_driver != {'noop'}
1190
1191
1192def if_notifications_enabled(f):
1193    """Calls decorated method only if notifications are enabled."""
1194    @functools.wraps(f)
1195    def wrapped(*args, **kwargs):
1196        if notifications_enabled(CONF):
1197            return f(*args, **kwargs)
1198        return DO_NOTHING
1199    return wrapped
1200
1201
1202LOG_LEVELS = ('INFO', 'WARNING', 'ERROR', 'DEBUG')
1203
1204
1205def get_log_method(level_string):
1206    level_string = level_string or ''
1207    upper_level_string = level_string.upper()
1208    if upper_level_string not in LOG_LEVELS:
1209        raise exception.InvalidInput(
1210            reason=_('%s is not a valid log level.') % level_string)
1211    return getattr(logging, upper_level_string)
1212
1213
1214def set_log_levels(prefix, level_string):
1215    level = get_log_method(level_string)
1216    prefix = prefix or ''
1217
1218    for k, v in logging.get_loggers().items():
1219        if k and k.startswith(prefix):
1220            v.logger.setLevel(level)
1221
1222
1223def get_log_levels(prefix):
1224    prefix = prefix or ''
1225    return {k: logging.logging.getLevelName(v.logger.getEffectiveLevel())
1226            for k, v in logging.get_loggers().items()
1227            if k and k.startswith(prefix)}
1228
1229
1230def paths_normcase_equal(path_a, path_b):
1231    return os.path.normcase(path_a) == os.path.normcase(path_b)
1232