1# Copyright (c) 2012 NetApp, Inc.
2# Copyright (c) 2014 Red Hat, Inc.
3# All Rights Reserved.
4#
5#    Licensed under the Apache License, Version 2.0 (the "License"); you may
6#    not use this file except in compliance with the License. You may obtain
7#    a copy of the License at
8#
9#         http://www.apache.org/licenses/LICENSE-2.0
10#
11#    Unless required by applicable law or agreed to in writing, software
12#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14#    License for the specific language governing permissions and limitations
15#    under the License.
16
17import collections
18import errno
19import hashlib
20import inspect
21import json
22import math
23import os
24import re
25import shutil
26import time
27
28from oslo_config import cfg
29from oslo_log import log as logging
30from oslo_utils import units
31import six
32
33from cinder import compute
34from cinder import coordination
35from cinder import db
36from cinder import exception
37from cinder.i18n import _
38from cinder.image import image_utils
39from cinder import objects
40from cinder.objects import fields
41from cinder import utils
42from cinder.volume import configuration
43from cinder.volume import driver
44from cinder.volume import utils as volume_utils
45
46LOG = logging.getLogger(__name__)
47
48
49nas_opts = [
50    cfg.StrOpt('nas_host',
51               default='',
52               help='IP address or Hostname of NAS system.'),
53    cfg.StrOpt('nas_login',
54               default='admin',
55               help='User name to connect to NAS system.'),
56    cfg.StrOpt('nas_password',
57               default='',
58               help='Password to connect to NAS system.',
59               secret=True),
60    cfg.PortOpt('nas_ssh_port',
61                default=22,
62                help='SSH port to use to connect to NAS system.'),
63    cfg.StrOpt('nas_private_key',
64               default='',
65               help='Filename of private key to use for SSH authentication.'),
66    cfg.StrOpt('nas_secure_file_operations',
67               default='auto',
68               help=('Allow network-attached storage systems to operate in a '
69                     'secure environment where root level access is not '
70                     'permitted. If set to False, access is as the root user '
71                     'and insecure. If set to True, access is not as root. '
72                     'If set to auto, a check is done to determine if this is '
73                     'a new installation: True is used if so, otherwise '
74                     'False. Default is auto.')),
75    cfg.StrOpt('nas_secure_file_permissions',
76               default='auto',
77               help=('Set more secure file permissions on network-attached '
78                     'storage volume files to restrict broad other/world '
79                     'access. If set to False, volumes are created with open '
80                     'permissions. If set to True, volumes are created with '
81                     'permissions for the cinder user and group (660). If '
82                     'set to auto, a check is done to determine if '
83                     'this is a new installation: True is used if so, '
84                     'otherwise False. Default is auto.')),
85    cfg.StrOpt('nas_share_path',
86               default='',
87               help=('Path to the share to use for storing Cinder volumes. '
88                     'For example:  "/srv/export1" for an NFS server export '
89                     'available at 10.0.5.10:/srv/export1 .')),
90    cfg.StrOpt('nas_mount_options',
91               help=('Options used to mount the storage backend file system '
92                     'where Cinder volumes are stored.')),
93]
94
95volume_opts = [
96    cfg.StrOpt('nas_volume_prov_type',
97               default='thin',
98               choices=['thin', 'thick'],
99               help=('Provisioning type that will be used when '
100                     'creating volumes.')),
101]
102
103CONF = cfg.CONF
104CONF.register_opts(nas_opts, group=configuration.SHARED_CONF_GROUP)
105CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
106
107
108# TODO(bluex): remove when drivers stop using it
109def locked_volume_id_operation(f, external=False):
110    """Lock decorator for volume operations.
111
112       Takes a named lock prior to executing the operation. The lock is named
113       with the id of the volume. This lock can be used by driver methods
114       to prevent conflicts with other operations modifying the same volume.
115
116       May be applied to methods that take a 'volume' or 'snapshot' argument.
117    """
118
119    def lvo_inner1(inst, *args, **kwargs):
120        lock_tag = inst.driver_prefix
121        call_args = inspect.getcallargs(f, inst, *args, **kwargs)
122
123        if call_args.get('volume'):
124            volume_id = call_args['volume'].id
125        elif call_args.get('snapshot'):
126            volume_id = call_args['snapshot'].volume.id
127        else:
128            err_msg = _('The decorated method must accept either a volume or '
129                        'a snapshot object')
130            raise exception.VolumeBackendAPIException(data=err_msg)
131
132        @utils.synchronized('%s-%s' % (lock_tag, volume_id),
133                            external=external)
134        def lvo_inner2():
135            return f(inst, *args, **kwargs)
136        return lvo_inner2()
137    return lvo_inner1
138
139
140class RemoteFSDriver(driver.BaseVD):
141    """Common base for drivers that work like NFS."""
142
143    driver_volume_type = None
144    driver_prefix = 'remotefs'
145    volume_backend_name = None
146    vendor_name = 'Open Source'
147    SHARE_FORMAT_REGEX = r'.+:/.+'
148
149    # We let the drivers inheriting this specify
150    # whether thin provisioning is supported or not.
151    _thin_provisioning_support = False
152    _thick_provisioning_support = False
153
154    def __init__(self, *args, **kwargs):
155        super(RemoteFSDriver, self).__init__(*args, **kwargs)
156        self.shares = {}
157        self._mounted_shares = []
158        self._execute_as_root = True
159        self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
160        self._supports_encryption = False
161
162        if self.configuration:
163            self.configuration.append_config_values(nas_opts)
164            self.configuration.append_config_values(volume_opts)
165
166    def check_for_setup_error(self):
167        """Just to override parent behavior."""
168        pass
169
170    @utils.trace
171    def initialize_connection(self, volume, connector):
172        """Allow connection to connector and return connection info.
173
174        :param volume: volume reference
175        :param connector: connector reference
176        """
177        data = {'export': volume.provider_location,
178                'name': volume.name}
179        if volume.provider_location in self.shares:
180            data['options'] = self.shares[volume.provider_location]
181        return {
182            'driver_volume_type': self.driver_volume_type,
183            'data': data,
184            'mount_point_base': self._get_mount_point_base()
185        }
186
187    def do_setup(self, context):
188        """Any initialization the volume driver does while starting."""
189        super(RemoteFSDriver, self).do_setup(context)
190
191        # Validate the settings for our secure file options.
192        self.configuration.nas_secure_file_permissions = \
193            self.configuration.nas_secure_file_permissions.lower()
194        self.configuration.nas_secure_file_operations = \
195            self.configuration.nas_secure_file_operations.lower()
196        valid_secure_opts = ['auto', 'true', 'false']
197        secure_options = {'nas_secure_file_permissions':
198                          self.configuration.nas_secure_file_permissions,
199                          'nas_secure_file_operations':
200                          self.configuration.nas_secure_file_operations}
201
202        LOG.debug('NAS config: %s', secure_options)
203        for opt_name, opt_value in secure_options.items():
204            if opt_value not in valid_secure_opts:
205                err_parms = {'name': opt_name, 'value': opt_value}
206                msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
207                        "'auto', 'true', or 'false'") % err_parms
208                LOG.error(msg)
209                raise exception.InvalidConfigurationValue(msg)
210
211    def _get_provisioned_capacity(self):
212        """Returns the provisioned capacity.
213
214        Get the sum of sizes of volumes, snapshots and any other
215        files on the mountpoint.
216        """
217        provisioned_size = 0.0
218        for share in self.shares.keys():
219            mount_path = self._get_mount_point_for_share(share)
220            out, _ = self._execute('du', '-k', mount_path,
221                                   run_as_root=self._execute_as_root)
222            provisioned_size += int(out.split()[0]) * 1024
223        return round(provisioned_size / units.Gi, 2)
224
225    def _get_mount_point_base(self):
226        """Returns the mount point base for the remote fs.
227
228           This method facilitates returning mount point base
229           for the specific remote fs. Override this method
230           in the respective driver to return the entry to be
231           used while attach/detach using brick in cinder.
232           If not overridden then it returns None without
233           raising exception to continue working for cases
234           when not used with brick.
235        """
236        LOG.debug("Driver specific implementation needs to return"
237                  " mount_point_base.")
238        return None
239
240    @staticmethod
241    def _validate_state(current_state,
242                        acceptable_states,
243                        obj_description='volume',
244                        invalid_exc=exception.InvalidVolume):
245        if current_state not in acceptable_states:
246            message = _('Invalid %(obj_description)s state. '
247                        'Acceptable states for this operation: '
248                        '%(acceptable_states)s. '
249                        'Current %(obj_description)s state: '
250                        '%(current_state)s.')
251            raise invalid_exc(
252                message=message %
253                dict(obj_description=obj_description,
254                     acceptable_states=acceptable_states,
255                     current_state=current_state))
256
257    @utils.trace
258    def create_volume(self, volume):
259        """Creates a volume.
260
261        :param volume: volume reference
262        :returns: provider_location update dict for database
263        """
264
265        if volume.encryption_key_id and not self._supports_encryption:
266            message = _("Encryption is not yet supported.")
267            raise exception.VolumeDriverException(message=message)
268
269        LOG.debug('Creating volume %(vol)s', {'vol': volume.id})
270        self._ensure_shares_mounted()
271
272        volume.provider_location = self._find_share(volume)
273
274        LOG.info('casted to %s', volume.provider_location)
275
276        self._do_create_volume(volume)
277
278        return {'provider_location': volume.provider_location}
279
280    def _do_create_volume(self, volume):
281        """Create a volume on given remote share.
282
283        :param volume: volume reference
284        """
285        volume_path = self.local_path(volume)
286        volume_size = volume.size
287
288        if getattr(self.configuration,
289                   self.driver_prefix + '_qcow2_volumes', False):
290            # QCOW2 volumes are inherently sparse, so this setting
291            # will override the _sparsed_volumes setting.
292            self._create_qcow2_file(volume_path, volume_size)
293        elif getattr(self.configuration,
294                     self.driver_prefix + '_sparsed_volumes', False):
295            self._create_sparsed_file(volume_path, volume_size)
296        else:
297            self._create_regular_file(volume_path, volume_size)
298
299        self._set_rw_permissions(volume_path)
300
301    def _ensure_shares_mounted(self):
302        """Look for remote shares in the flags and mount them locally."""
303        mounted_shares = []
304
305        self._load_shares_config(getattr(self.configuration,
306                                         self.driver_prefix +
307                                         '_shares_config'))
308
309        for share in self.shares.keys():
310            try:
311                self._ensure_share_mounted(share)
312                mounted_shares.append(share)
313            except Exception as exc:
314                LOG.error('Exception during mounting %s', exc)
315
316        self._mounted_shares = mounted_shares
317
318        LOG.debug('Available shares %s', self._mounted_shares)
319
320    @utils.trace
321    def delete_volume(self, volume):
322        """Deletes a logical volume.
323
324        :param volume: volume reference
325        """
326
327        LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s',
328                  {'vol': volume.id, 'loc': volume.provider_location})
329        if not volume.provider_location:
330            LOG.warning('Volume %s does not have '
331                        'provider_location specified, '
332                        'skipping', volume.name)
333            return
334
335        self._ensure_share_mounted(volume.provider_location)
336
337        mounted_path = self.local_path(volume)
338
339        self._delete(mounted_path)
340
341    def ensure_export(self, ctx, volume):
342        """Synchronously recreates an export for a logical volume."""
343        self._ensure_share_mounted(volume.provider_location)
344
345    def create_export(self, ctx, volume, connector):
346        """Exports the volume.
347
348        Can optionally return a dictionary of changes
349        to the volume object to be persisted.
350        """
351        pass
352
353    def remove_export(self, ctx, volume):
354        """Removes an export for a logical volume."""
355        pass
356
357    def delete_snapshot(self, snapshot):
358        """Delete snapshot.
359
360        Do nothing for this driver, but allow manager to handle deletion
361        of snapshot in error state.
362        """
363        pass
364
365    def _delete(self, path):
366        # Note(lpetrut): this method is needed in order to provide
367        # interoperability with Windows as it will be overridden.
368        self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
369
370    def _create_sparsed_file(self, path, size):
371        """Creates a sparse file of a given size in GiB."""
372        self._execute('truncate', '-s', '%sG' % size,
373                      path, run_as_root=self._execute_as_root)
374
375    def _create_regular_file(self, path, size):
376        """Creates a regular file of given size in GiB."""
377
378        block_size_mb = 1
379        block_count = size * units.Gi // (block_size_mb * units.Mi)
380
381        self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
382                      'bs=%dM' % block_size_mb,
383                      'count=%d' % block_count,
384                      run_as_root=self._execute_as_root)
385
386    def _create_qcow2_file(self, path, size_gb):
387        """Creates a QCOW2 file of a given size in GiB."""
388
389        self._execute('qemu-img', 'create', '-f', 'qcow2',
390                      '-o', 'preallocation=metadata',
391                      path, str(size_gb * units.Gi),
392                      run_as_root=self._execute_as_root)
393
394    def _set_rw_permissions(self, path):
395        """Sets access permissions for given NFS path.
396
397        Volume file permissions are set based upon the value of
398        secure_file_permissions: 'true' sets secure access permissions and
399        'false' sets more open (insecure) access permissions.
400
401        :param path: the volume file path.
402        """
403        if self.configuration.nas_secure_file_permissions == 'true':
404            permissions = '660'
405            LOG.debug('File path %(path)s is being set with permissions: '
406                      '%(permissions)s',
407                      {'path': path, 'permissions': permissions})
408        else:
409            permissions = 'ugo+rw'
410            LOG.warning('%(path)s is being set with open permissions: '
411                        '%(perm)s', {'path': path, 'perm': permissions})
412
413        self._execute('chmod', permissions, path,
414                      run_as_root=self._execute_as_root)
415
416    def _set_rw_permissions_for_all(self, path):
417        """Sets 666 permissions for the path."""
418        self._execute('chmod', 'ugo+rw', path,
419                      run_as_root=self._execute_as_root)
420
421    def _set_rw_permissions_for_owner(self, path):
422        """Sets read-write permissions to the owner for the path."""
423        self._execute('chmod', 'u+rw', path,
424                      run_as_root=self._execute_as_root)
425
426    def local_path(self, volume):
427        """Get volume path (mounted locally fs path) for given volume.
428
429        :param volume: volume reference
430        """
431        remotefs_share = volume.provider_location
432        return os.path.join(self._get_mount_point_for_share(remotefs_share),
433                            volume.name)
434
435    def copy_image_to_volume(self, context, volume, image_service, image_id):
436        """Fetch the image from image_service and write it to the volume."""
437
438        image_utils.fetch_to_raw(context,
439                                 image_service,
440                                 image_id,
441                                 self.local_path(volume),
442                                 self.configuration.volume_dd_blocksize,
443                                 size=volume.size,
444                                 run_as_root=self._execute_as_root)
445
446        # NOTE (leseb): Set the virtual size of the image
447        # the raw conversion overwrote the destination file
448        # (which had the correct size)
449        # with the fetched glance image size,
450        # thus the initial 'size' parameter is not honored
451        # this sets the size to the one asked in the first place by the user
452        # and then verify the final virtual size
453        image_utils.resize_image(self.local_path(volume), volume.size,
454                                 run_as_root=self._execute_as_root)
455
456        data = image_utils.qemu_img_info(self.local_path(volume),
457                                         run_as_root=self._execute_as_root)
458        virt_size = data.virtual_size // units.Gi
459        if virt_size != volume.size:
460            raise exception.ImageUnacceptable(
461                image_id=image_id,
462                reason=(_("Expected volume size was %d") % volume.size)
463                + (_(" but size is now %d") % virt_size))
464
465    def copy_volume_to_image(self, context, volume, image_service, image_meta):
466        """Copy the volume to the specified image."""
467        image_utils.upload_volume(context,
468                                  image_service,
469                                  image_meta,
470                                  self.local_path(volume),
471                                  run_as_root=self._execute_as_root)
472
473    def _read_config_file(self, config_file):
474        # Returns list of lines in file
475        with open(config_file) as f:
476            return f.readlines()
477
478    def _load_shares_config(self, share_file=None):
479        self.shares = {}
480
481        if all((self.configuration.nas_host,
482                self.configuration.nas_share_path)):
483            LOG.debug('Using nas_host and nas_share_path configuration.')
484
485            nas_host = self.configuration.nas_host
486            nas_share_path = self.configuration.nas_share_path
487
488            share_address = '%s:%s' % (nas_host, nas_share_path)
489
490            if not re.match(self.SHARE_FORMAT_REGEX, share_address):
491                msg = (_("Share %s ignored due to invalid format. Must "
492                         "be of form address:/export. Please check the "
493                         "nas_host and nas_share_path settings."),
494                       share_address)
495                raise exception.InvalidConfigurationValue(msg)
496
497            self.shares[share_address] = self.configuration.nas_mount_options
498
499        elif share_file is not None:
500            LOG.debug('Loading shares from %s.', share_file)
501
502            for share in self._read_config_file(share_file):
503                # A configuration line may be either:
504                #  host:/vol_name
505                # or
506                #  host:/vol_name -o options=123,rw --other
507                if not share.strip():
508                    # Skip blank or whitespace-only lines
509                    continue
510                if share.startswith('#'):
511                    continue
512
513                share_info = share.split(' ', 1)
514                # results in share_info =
515                #  [ 'address:/vol', '-o options=123,rw --other' ]
516
517                share_address = share_info[0].strip()
518                # Replace \040 with a space, to support paths with spaces
519                share_address = share_address.replace("\\040", " ")
520                share_opts = None
521                if len(share_info) > 1:
522                    share_opts = share_info[1].strip()
523
524                if not re.match(self.SHARE_FORMAT_REGEX, share_address):
525                    LOG.error("Share %s ignored due to invalid format. "
526                              "Must be of form address:/export.",
527                              share_address)
528                    continue
529
530                self.shares[share_address] = share_opts
531
532        LOG.debug("shares loaded: %s", self.shares)
533
534    def _get_mount_point_for_share(self, path):
535        raise NotImplementedError()
536
537    def terminate_connection(self, volume, connector, **kwargs):
538        """Disallow connection from connector."""
539        pass
540
541    def get_volume_stats(self, refresh=False):
542        """Get volume stats.
543
544        If 'refresh' is True, update the stats first.
545        """
546        if refresh or not self._stats:
547            self._update_volume_stats()
548
549        return self._stats
550
551    def _update_volume_stats(self):
552        """Retrieve stats info from volume group."""
553
554        data = {}
555        backend_name = self.configuration.safe_get('volume_backend_name')
556        data['volume_backend_name'] = backend_name or self.volume_backend_name
557        data['vendor_name'] = 'Open Source'
558        data['driver_version'] = self.get_version()
559        data['storage_protocol'] = self.driver_volume_type
560
561        self._ensure_shares_mounted()
562
563        global_capacity = 0
564        global_free = 0
565        for share in self._mounted_shares:
566            capacity, free, used = self._get_capacity_info(share)
567            global_capacity += capacity
568            global_free += free
569
570        data['total_capacity_gb'] = global_capacity / float(units.Gi)
571        data['free_capacity_gb'] = global_free / float(units.Gi)
572        data['reserved_percentage'] = self.configuration.reserved_percentage
573        data['QoS_support'] = False
574        self._stats = data
575
576    def _get_capacity_info(self, share):
577        raise NotImplementedError()
578
579    def _find_share(self, volume):
580        raise NotImplementedError()
581
582    def _ensure_share_mounted(self, share):
583        raise NotImplementedError()
584
585    def secure_file_operations_enabled(self):
586        """Determine if driver is operating in Secure File Operations mode.
587
588        The Cinder Volume driver needs to query if this driver is operating
589        in a secure file mode; check our nas_secure_file_operations flag.
590        """
591        if self.configuration.nas_secure_file_operations == 'true':
592            return True
593        return False
594
595    def set_nas_security_options(self, is_new_cinder_install):
596        """Determine the setting to use for Secure NAS options.
597
598        This method must be overridden by child wishing to use secure
599        NAS file operations. This base method will set the NAS security
600        options to false.
601        """
602        doc_html = ("https://docs.openstack.org/cinder/latest/admin"
603                    "/blockstorage-nfs-backend.html")
604        self.configuration.nas_secure_file_operations = 'false'
605        LOG.warning("The NAS file operations will be run as root: "
606                    "allowing root level access at the storage backend. "
607                    "This is considered an insecure NAS environment. "
608                    "Please see %s for information on a secure NAS "
609                    "configuration.",
610                    doc_html)
611        self.configuration.nas_secure_file_permissions = 'false'
612        LOG.warning("The NAS file permissions mode will be 666 (allowing "
613                    "other/world read & write access). This is considered "
614                    "an insecure NAS environment. Please see %s for "
615                    "information on a secure NFS configuration.",
616                    doc_html)
617
618    def _determine_nas_security_option_setting(self, nas_option, mount_point,
619                                               is_new_cinder_install):
620        """Determine NAS security option setting when 'auto' is assigned.
621
622        This method determines the final 'true'/'false' setting of an NAS
623        security option when the default value of 'auto' has been detected.
624        If the nas option isn't 'auto' then its current value is used.
625
626        :param nas_option: The NAS security option value loaded from config.
627        :param mount_point: Mount where indicator file is written.
628        :param is_new_cinder_install: boolean for new Cinder installation.
629        :return string: 'true' or 'false' for new option setting.
630        """
631        if nas_option == 'auto':
632            # For auto detection, we first check to see if we have been
633            # through this process before by checking for the existence of
634            # the Cinder secure environment indicator file.
635            file_name = '.cinderSecureEnvIndicator'
636            file_path = os.path.join(mount_point, file_name)
637            if os.path.isfile(file_path):
638                nas_option = 'true'
639                LOG.info('Cinder secure environment '
640                         'indicator file exists.')
641            else:
642                # The indicator file does not exist. If it is a new
643                # installation, set to 'true' and create the indicator file.
644                if is_new_cinder_install:
645                    nas_option = 'true'
646                    try:
647                        with open(file_path, 'w') as fh:
648                            fh.write('Detector file for Cinder secure '
649                                     'environment usage.\n')
650                            fh.write('Do not delete this file.\n')
651
652                        # Set the permissions on our special marker file to
653                        # protect from accidental removal (owner write only).
654                        self._execute('chmod', '640', file_path,
655                                      run_as_root=self._execute_as_root)
656                        LOG.info('New Cinder secure environment indicator'
657                                 ' file created at path %s.', file_path)
658                    except IOError as err:
659                        LOG.error('Failed to created Cinder secure '
660                                  'environment indicator file: %s',
661                                  err)
662                        if err.errno == errno.EACCES:
663                            LOG.warning('Reverting to non-secure mode. Adjust '
664                                        'permissions at %s to allow the '
665                                        'cinder volume service write access '
666                                        'to use secure mode.',
667                                        mount_point)
668                            nas_option = 'false'
669                else:
670                    # For existing installs, we default to 'false'. The
671                    # admin can always set the option at the driver config.
672                    nas_option = 'false'
673
674        return nas_option
675
676
677class RemoteFSSnapDriverBase(RemoteFSDriver):
678    """Base class for remotefs drivers implementing qcow2 snapshots.
679
680       Driver must implement:
681         _local_volume_dir(self, volume)
682    """
683
684    _VALID_IMAGE_EXTENSIONS = []
685    # The following flag may be overriden by the concrete drivers in order
686    # to avoid using temporary volume snapshots when creating volume clones,
687    # when possible.
688
689    _always_use_temp_snap_when_cloning = True
690
691    def __init__(self, *args, **kwargs):
692        self._remotefsclient = None
693        self.base = None
694        self._nova = None
695        super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs)
696
697    def do_setup(self, context):
698        super(RemoteFSSnapDriverBase, self).do_setup(context)
699
700        self._nova = compute.API()
701
702    def snapshot_revert_use_temp_snapshot(self):
703        # Considering that RemoteFS based drivers use COW images
704        # for storing snapshots, having chains of such images,
705        # creating a backup snapshot when reverting one is not
706        # actutally helpful.
707        return False
708
709    def _local_volume_dir(self, volume):
710        share = volume.provider_location
711        local_dir = self._get_mount_point_for_share(share)
712        return local_dir
713
714    def _local_path_volume(self, volume):
715        path_to_disk = os.path.join(
716            self._local_volume_dir(volume),
717            volume.name)
718
719        return path_to_disk
720
721    def _get_new_snap_path(self, snapshot):
722        vol_path = self.local_path(snapshot.volume)
723        snap_path = '%s.%s' % (vol_path, snapshot.id)
724        return snap_path
725
726    def _local_path_volume_info(self, volume):
727        return '%s%s' % (self.local_path(volume), '.info')
728
729    def _read_file(self, filename):
730        """This method is to make it easier to stub out code for testing.
731
732        Returns a string representing the contents of the file.
733        """
734
735        with open(filename, 'r') as f:
736            return f.read()
737
738    def _write_info_file(self, info_path, snap_info):
739        if 'active' not in snap_info.keys():
740            msg = _("'active' must be present when writing snap_info.")
741            raise exception.RemoteFSException(msg)
742
743        if not (os.path.exists(info_path) or os.name == 'nt'):
744            # We're not managing file permissions on Windows.
745            # Plus, 'truncate' is not available.
746            self._execute('truncate', "-s0", info_path,
747                          run_as_root=self._execute_as_root)
748            self._set_rw_permissions(info_path)
749
750        with open(info_path, 'w') as f:
751            json.dump(snap_info, f, indent=1, sort_keys=True)
752
753    def _qemu_img_info_base(self, path, volume_name, basedir,
754                            force_share=False,
755                            run_as_root=False):
756        """Sanitize image_utils' qemu_img_info.
757
758        This code expects to deal only with relative filenames.
759        """
760
761        run_as_root = run_as_root or self._execute_as_root
762
763        info = image_utils.qemu_img_info(path,
764                                         force_share=force_share,
765                                         run_as_root=run_as_root)
766        if info.image:
767            info.image = os.path.basename(info.image)
768        if info.backing_file:
769            if self._VALID_IMAGE_EXTENSIONS:
770                valid_ext = r'(\.(%s))?' % '|'.join(
771                    self._VALID_IMAGE_EXTENSIONS)
772            else:
773                valid_ext = ''
774
775            backing_file_template = \
776                "(%(basedir)s/[0-9a-f]+/)?%" \
777                "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % {
778                    'basedir': basedir,
779                    'volname': volume_name,
780                    'valid_ext': valid_ext,
781                }
782            if not re.match(backing_file_template, info.backing_file,
783                            re.IGNORECASE):
784                raise exception.RemoteFSInvalidBackingFile(
785                    path=path, backing_file=info.backing_file)
786
787            info.backing_file = os.path.basename(info.backing_file)
788
789        return info
790
791    def _qemu_img_info(self, path, volume_name):
792        raise NotImplementedError()
793
794    def _img_commit(self, path):
795        # TODO(eharney): this is not using the correct permissions for
796        # NFS snapshots
797        #  It needs to run as root for volumes attached to instances, but
798        #  does not when in secure mode.
799        self._execute('qemu-img', 'commit', '-d', path,
800                      run_as_root=self._execute_as_root)
801        self._delete(path)
802
803    def _rebase_img(self, image, backing_file, volume_format):
804        # qemu-img create must run as root, because it reads from the
805        # backing file, which will be owned by qemu:qemu if attached to an
806        # instance.
807        # TODO(erlon): Sanity check this.
808        self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
809                      '-F', volume_format, run_as_root=self._execute_as_root)
810
811    def _read_info_file(self, info_path, empty_if_missing=False):
812        """Return dict of snapshot information.
813
814           :param: info_path: path to file
815           :param: empty_if_missing: True=return empty dict if no file
816        """
817
818        if not os.path.exists(info_path):
819            if empty_if_missing is True:
820                return {}
821
822        return json.loads(self._read_file(info_path))
823
824    def _get_higher_image_path(self, snapshot):
825        volume = snapshot.volume
826        info_path = self._local_path_volume_info(volume)
827        snap_info = self._read_info_file(info_path)
828
829        snapshot_file = snap_info[snapshot.id]
830        active_file = self.get_active_image_from_info(volume)
831        active_file_path = os.path.join(self._local_volume_dir(volume),
832                                        active_file)
833        backing_chain = self._get_backing_chain_for_path(
834            volume, active_file_path)
835        higher_file = next((os.path.basename(f['filename'])
836                            for f in backing_chain
837                            if utils.paths_normcase_equal(
838                                f.get('backing-filename', ''),
839                                snapshot_file)),
840                           None)
841        return higher_file
842
843    def _get_backing_chain_for_path(self, volume, path):
844        """Returns list of dicts containing backing-chain information.
845
846        Includes 'filename', and 'backing-filename' for each
847        applicable entry.
848
849        Consider converting this to use --backing-chain and --output=json
850        when environment supports qemu-img 1.5.0.
851
852        :param volume: volume reference
853        :param path: path to image file at top of chain
854
855        """
856
857        output = []
858
859        info = self._qemu_img_info(path, volume.name)
860        new_info = {}
861        new_info['filename'] = os.path.basename(path)
862        new_info['backing-filename'] = info.backing_file
863
864        output.append(new_info)
865
866        while new_info['backing-filename']:
867            filename = new_info['backing-filename']
868            path = os.path.join(self._local_volume_dir(volume), filename)
869            info = self._qemu_img_info(path, volume.name)
870            backing_filename = info.backing_file
871            new_info = {}
872            new_info['filename'] = filename
873            new_info['backing-filename'] = backing_filename
874
875            output.append(new_info)
876
877        return output
878
879    def _get_hash_str(self, base_str):
880        """Return a string that represents hash of base_str.
881
882        Returns string in a hex format.
883        """
884        if isinstance(base_str, six.text_type):
885            base_str = base_str.encode('utf-8')
886        return hashlib.md5(base_str).hexdigest()
887
888    def _get_mount_point_for_share(self, share):
889        """Return mount point for share.
890
891        :param share: example 172.18.194.100:/var/fs
892        """
893        return self._remotefsclient.get_mount_point(share)
894
895    def _get_available_capacity(self, share):
896        """Calculate available space on the share.
897
898        :param share: example 172.18.194.100:/var/fs
899        """
900        mount_point = self._get_mount_point_for_share(share)
901
902        out, _ = self._execute('df', '-k', mount_point,
903                               run_as_root=self._execute_as_root)
904        out = out.splitlines()[1]
905
906        size = int(out.split()[1]) * 1024
907        available = int(out.split()[3]) * 1024
908
909        return available, size
910
911    def _get_capacity_info(self, remotefs_share):
912        available, size = self._get_available_capacity(remotefs_share)
913        return size, available, size - available
914
915    def _get_mount_point_base(self):
916        return self.base
917
918    def _copy_volume_to_image(self, context, volume, image_service,
919                              image_meta):
920        """Copy the volume to the specified image."""
921
922        # If snapshots exist, flatten to a temporary image, and upload it
923
924        active_file = self.get_active_image_from_info(volume)
925        active_file_path = os.path.join(self._local_volume_dir(volume),
926                                        active_file)
927        info = self._qemu_img_info(active_file_path, volume.name)
928        backing_file = info.backing_file
929
930        root_file_fmt = info.file_format
931
932        tmp_params = {
933            'prefix': '%s.temp_image.%s' % (volume.id, image_meta['id']),
934            'suffix': '.img'
935        }
936        with image_utils.temporary_file(**tmp_params) as temp_path:
937            if backing_file or (root_file_fmt != 'raw'):
938                # Convert due to snapshots
939                # or volume data not being stored in raw format
940                #  (upload_volume assumes raw format input)
941                image_utils.convert_image(active_file_path, temp_path, 'raw',
942                                          run_as_root=self._execute_as_root)
943                upload_path = temp_path
944            else:
945                upload_path = active_file_path
946
947            image_utils.upload_volume(context,
948                                      image_service,
949                                      image_meta,
950                                      upload_path,
951                                      run_as_root=self._execute_as_root)
952
953    def get_active_image_from_info(self, volume):
954        """Returns filename of the active image from the info file."""
955
956        info_file = self._local_path_volume_info(volume)
957
958        snap_info = self._read_info_file(info_file, empty_if_missing=True)
959
960        if not snap_info:
961            # No info file = no snapshots exist
962            vol_path = os.path.basename(self.local_path(volume))
963            return vol_path
964
965        return snap_info['active']
966
967    def _local_path_active_image(self, volume):
968        active_fname = self.get_active_image_from_info(volume)
969        vol_dir = self._local_volume_dir(volume)
970
971        active_fpath = os.path.join(vol_dir, active_fname)
972        return active_fpath
973
974    def _snapshots_exist(self, volume):
975        if not volume.provider_location:
976            return False
977
978        active_fpath = self._local_path_active_image(volume)
979        base_vol_path = self.local_path(volume)
980
981        return not utils.paths_normcase_equal(active_fpath, base_vol_path)
982
983    def _is_volume_attached(self, volume):
984        return volume.attach_status == fields.VolumeAttachStatus.ATTACHED
985
986    def _create_cloned_volume(self, volume, src_vref, context):
987        LOG.info('Cloning volume %(src)s to volume %(dst)s',
988                 {'src': src_vref.id,
989                  'dst': volume.id})
990
991        acceptable_states = ['available', 'backing-up', 'downloading']
992        self._validate_state(src_vref.status,
993                             acceptable_states,
994                             obj_description='source volume')
995
996        volume_name = CONF.volume_name_template % volume.id
997
998        # Create fake volume and snapshot objects
999        vol_attrs = ['provider_location', 'size', 'id', 'name', 'status',
1000                     'volume_type', 'metadata']
1001        Volume = collections.namedtuple('Volume', vol_attrs)
1002        volume_info = Volume(provider_location=src_vref.provider_location,
1003                             size=src_vref.size,
1004                             id=volume.id,
1005                             name=volume_name,
1006                             status=src_vref.status,
1007                             volume_type=src_vref.volume_type,
1008                             metadata=src_vref.metadata)
1009
1010        if (self._always_use_temp_snap_when_cloning or
1011                self._snapshots_exist(src_vref)):
1012            kwargs = {
1013                'volume_id': src_vref.id,
1014                'user_id': context.user_id,
1015                'project_id': context.project_id,
1016                'status': fields.SnapshotStatus.CREATING,
1017                'progress': '0%',
1018                'volume_size': src_vref.size,
1019                'display_name': 'tmp-snap-%s' % src_vref.id,
1020                'display_description': None,
1021                'volume_type_id': src_vref.volume_type_id,
1022                'encryption_key_id': src_vref.encryption_key_id,
1023            }
1024            temp_snapshot = objects.Snapshot(context=context,
1025                                             **kwargs)
1026            temp_snapshot.create()
1027
1028            self._create_snapshot(temp_snapshot)
1029            try:
1030                self._copy_volume_from_snapshot(temp_snapshot,
1031                                                volume_info,
1032                                                volume.size)
1033                # remove temp snapshot after the cloning is done
1034                temp_snapshot.status = fields.SnapshotStatus.DELETING
1035                temp_snapshot.context = context.elevated()
1036                temp_snapshot.save()
1037            finally:
1038                self._delete_snapshot(temp_snapshot)
1039                temp_snapshot.destroy()
1040        else:
1041            self._copy_volume_image(self.local_path(src_vref),
1042                                    self.local_path(volume_info))
1043            self._extend_volume(volume_info, volume.size)
1044
1045        return {'provider_location': src_vref.provider_location}
1046
1047    def _copy_volume_image(self, src_path, dest_path):
1048        shutil.copyfile(src_path, dest_path)
1049        self._set_rw_permissions(dest_path)
1050
1051    def _delete_stale_snapshot(self, snapshot):
1052        info_path = self._local_path_volume_info(snapshot.volume)
1053        snap_info = self._read_info_file(info_path)
1054
1055        snapshot_file = snap_info[snapshot.id]
1056        active_file = self.get_active_image_from_info(snapshot.volume)
1057        snapshot_path = os.path.join(
1058            self._local_volume_dir(snapshot.volume), snapshot_file)
1059        if utils.paths_normcase_equal(snapshot_file, active_file):
1060            return
1061
1062        LOG.info('Deleting stale snapshot: %s', snapshot.id)
1063        self._delete(snapshot_path)
1064        del(snap_info[snapshot.id])
1065        self._write_info_file(info_path, snap_info)
1066
1067    def _delete_snapshot(self, snapshot):
1068        """Delete a snapshot.
1069
1070        If volume status is 'available', delete snapshot here in Cinder
1071        using qemu-img.
1072
1073        If volume status is 'in-use', calculate what qcow2 files need to
1074        merge, and call to Nova to perform this operation.
1075
1076        :raises: InvalidVolume if status not acceptable
1077        :raises: RemoteFSException(msg) if operation fails
1078        :returns: None
1079
1080        """
1081        LOG.debug('Deleting %(type)s snapshot %(snap)s of volume %(vol)s',
1082                  {'snap': snapshot.id, 'vol': snapshot.volume.id,
1083                   'type': ('online'
1084                            if self._is_volume_attached(snapshot.volume)
1085                            else 'offline')})
1086
1087        volume_status = snapshot.volume.status
1088        acceptable_states = ['available', 'in-use', 'backing-up', 'deleting',
1089                             'downloading']
1090        self._validate_state(volume_status, acceptable_states)
1091
1092        vol_path = self._local_volume_dir(snapshot.volume)
1093
1094        # Determine the true snapshot file for this snapshot
1095        # based on the .info file
1096        info_path = self._local_path_volume_info(snapshot.volume)
1097        snap_info = self._read_info_file(info_path, empty_if_missing=True)
1098
1099        if snapshot.id not in snap_info:
1100            # If snapshot info file is present, but snapshot record does not
1101            # exist, do not attempt to delete.
1102            # (This happens, for example, if snapshot_create failed due to lack
1103            # of permission to write to the share.)
1104            LOG.info('Snapshot record for %s is not present, allowing '
1105                     'snapshot_delete to proceed.', snapshot.id)
1106            return
1107
1108        snapshot_file = snap_info[snapshot.id]
1109        LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
1110        snapshot_path = os.path.join(
1111            self._local_volume_dir(snapshot.volume),
1112            snapshot_file)
1113
1114        snapshot_path_img_info = self._qemu_img_info(
1115            snapshot_path,
1116            snapshot.volume.name)
1117
1118        base_file = snapshot_path_img_info.backing_file
1119        if base_file is None:
1120            # There should always be at least the original volume
1121            # file as base.
1122            LOG.warning('No backing file found for %s, allowing '
1123                        'snapshot to be deleted.', snapshot_path)
1124
1125            # Snapshot may be stale, so just delete it and update the
1126            # info file instead of blocking
1127            return self._delete_stale_snapshot(snapshot)
1128
1129        base_path = os.path.join(vol_path, base_file)
1130        base_file_img_info = self._qemu_img_info(base_path,
1131                                                 snapshot.volume.name)
1132
1133        # Find what file has this as its backing file
1134        active_file = self.get_active_image_from_info(snapshot.volume)
1135
1136        if self._is_volume_attached(snapshot.volume):
1137            # Online delete
1138            context = snapshot._context
1139
1140            new_base_file = base_file_img_info.backing_file
1141
1142            base_id = None
1143            for key, value in snap_info.items():
1144                if utils.paths_normcase_equal(value,
1145                                              base_file) and key != 'active':
1146                    base_id = key
1147                    break
1148            if base_id is None:
1149                # This means we are deleting the oldest snapshot
1150                LOG.debug('No %(base_id)s found for %(file)s',
1151                          {'base_id': 'base_id', 'file': snapshot_file})
1152
1153            online_delete_info = {
1154                'active_file': active_file,
1155                'snapshot_file': snapshot_file,
1156                'base_file': base_file,
1157                'base_id': base_id,
1158                'new_base_file': new_base_file
1159            }
1160
1161            return self._delete_snapshot_online(context,
1162                                                snapshot,
1163                                                online_delete_info)
1164
1165        if utils.paths_normcase_equal(snapshot_file, active_file):
1166            # There is no top file
1167            #      T0       |        T1         |
1168            #     base      |   snapshot_file   | None
1169            # (guaranteed to|  (being deleted,  |
1170            #    exist)     |  committed down)  |
1171
1172            self._img_commit(snapshot_path)
1173            # Active file has changed
1174            snap_info['active'] = base_file
1175        else:
1176            #      T0        |      T1         |     T2         |      T3
1177            #     base       |  snapshot_file  |  higher_file   | highest_file
1178            # (guaranteed to | (being deleted, | (guaranteed to |  (may exist)
1179            #   exist, not   | committed down) |  exist, needs  |
1180            #   used here)   |                 |   ptr update)  |
1181
1182            # This file is guaranteed to exist since we aren't operating on
1183            # the active file.
1184            higher_file = self._get_higher_image_path(snapshot)
1185            if higher_file is None:
1186                msg = _('No file found with %s as backing file.') %\
1187                    snapshot_file
1188                raise exception.RemoteFSException(msg)
1189
1190            higher_id = next((i for i in snap_info
1191                              if utils.paths_normcase_equal(snap_info[i],
1192                                                            higher_file)
1193                              and i != 'active'),
1194                             None)
1195            if higher_id is None:
1196                msg = _('No snap found with %s as backing file.') %\
1197                    higher_file
1198                raise exception.RemoteFSException(msg)
1199
1200            self._img_commit(snapshot_path)
1201
1202            higher_file_path = os.path.join(vol_path, higher_file)
1203            base_file_fmt = base_file_img_info.file_format
1204            self._rebase_img(higher_file_path, base_file, base_file_fmt)
1205
1206        # Remove snapshot_file from info
1207        del(snap_info[snapshot.id])
1208        self._write_info_file(info_path, snap_info)
1209
1210    def _create_volume_from_snapshot(self, volume, snapshot):
1211        """Creates a volume from a snapshot.
1212
1213        Snapshot must not be the active snapshot. (offline)
1214        """
1215
1216        LOG.debug('Creating volume %(vol)s from snapshot %(snap)s',
1217                  {'vol': volume.id, 'snap': snapshot.id})
1218
1219        if snapshot.status != 'available':
1220            msg = _('Snapshot status must be "available" to clone. '
1221                    'But is: %(status)s') % {'status': snapshot.status}
1222
1223            raise exception.InvalidSnapshot(msg)
1224
1225        self._ensure_shares_mounted()
1226
1227        volume.provider_location = self._find_share(volume)
1228
1229        self._do_create_volume(volume)
1230
1231        self._copy_volume_from_snapshot(snapshot,
1232                                        volume,
1233                                        volume.size)
1234
1235        return {'provider_location': volume.provider_location}
1236
1237    def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
1238        raise NotImplementedError()
1239
1240    def _do_create_snapshot(self, snapshot, backing_filename,
1241                            new_snap_path):
1242        """Create a QCOW2 file backed by another file.
1243
1244        :param snapshot: snapshot reference
1245        :param backing_filename: filename of file that will back the
1246            new qcow2 file
1247        :param new_snap_path: filename of new qcow2 file
1248        """
1249        backing_path_full_path = os.path.join(
1250            self._local_volume_dir(snapshot.volume),
1251            backing_filename)
1252
1253        info = self._qemu_img_info(backing_path_full_path,
1254                                   snapshot.volume.name)
1255        backing_fmt = info.file_format
1256
1257        command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
1258                   'backing_file=%s,backing_fmt=%s' %
1259                   (backing_path_full_path, backing_fmt),
1260                   new_snap_path,
1261                   "%dG" % snapshot.volume.size]
1262        self._execute(*command, run_as_root=self._execute_as_root)
1263
1264        command = ['qemu-img', 'rebase', '-u',
1265                   '-b', backing_filename,
1266                   '-F', backing_fmt,
1267                   new_snap_path]
1268
1269        # qemu-img rebase must run as root for the same reasons as above
1270        self._execute(*command, run_as_root=self._execute_as_root)
1271
1272        self._set_rw_permissions(new_snap_path)
1273
1274        # if in secure mode, chown new file
1275        if self.secure_file_operations_enabled():
1276            ref_file = backing_path_full_path
1277            log_msg = 'Setting permissions: %(file)s -> %(user)s:%(group)s' % {
1278                'file': ref_file, 'user': os.stat(ref_file).st_uid,
1279                'group': os.stat(ref_file).st_gid}
1280            LOG.debug(log_msg)
1281            command = ['chown',
1282                       '--reference=%s' % ref_file,
1283                       new_snap_path]
1284            self._execute(*command, run_as_root=self._execute_as_root)
1285
1286    def _create_snapshot(self, snapshot):
1287        """Create a snapshot.
1288
1289        If volume is attached, call to Nova to create snapshot, providing a
1290        qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
1291        responsible for transitioning the VM between them and handling live
1292        transfers of data between files as required.
1293
1294        If volume is detached, create locally with qemu-img. Cinder handles
1295        manipulation of qcow2 files.
1296
1297        A file named volume-<uuid>.info is stored with the volume
1298        data and is a JSON table which contains a mapping between
1299        Cinder snapshot UUIDs and filenames, as these associations
1300        will change as snapshots are deleted.
1301
1302
1303        Basic snapshot operation:
1304
1305        1. Initial volume file:
1306            volume-1234
1307
1308        2. Snapshot created:
1309            volume-1234  <- volume-1234.aaaa
1310
1311            volume-1234.aaaa becomes the new "active" disk image.
1312            If the volume is not attached, this filename will be used to
1313            attach the volume to a VM at volume-attach time.
1314            If the volume is attached, the VM will switch to this file as
1315            part of the snapshot process.
1316
1317            Note that volume-1234.aaaa represents changes after snapshot
1318            'aaaa' was created.  So the data for snapshot 'aaaa' is actually
1319            in the backing file(s) of volume-1234.aaaa.
1320
1321            This file has a qcow2 header recording the fact that volume-1234 is
1322            its backing file.  Delta changes since the snapshot was created are
1323            stored in this file, and the backing file (volume-1234) does not
1324            change.
1325
1326            info file: { 'active': 'volume-1234.aaaa',
1327                         'aaaa':   'volume-1234.aaaa' }
1328
1329        3. Second snapshot created:
1330            volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
1331
1332            volume-1234.bbbb now becomes the "active" disk image, recording
1333            changes made to the volume.
1334
1335            info file: { 'active': 'volume-1234.bbbb',  (* changed!)
1336                         'aaaa':   'volume-1234.aaaa',
1337                         'bbbb':   'volume-1234.bbbb' } (* added!)
1338
1339        4. Snapshot deletion when volume is attached ('in-use' state):
1340
1341            * When first snapshot is deleted, Cinder calls Nova for online
1342              snapshot deletion. Nova deletes snapshot with id "aaaa" and
1343              makes snapshot with id "bbbb" point to the base image.
1344              Snapshot with id "bbbb" is the active image.
1345
1346              volume-1234 <- volume-1234.bbbb
1347
1348              info file: { 'active': 'volume-1234.bbbb',
1349                           'bbbb':   'volume-1234.bbbb'
1350                         }
1351
1352             * When second snapshot is deleted, Cinder calls Nova for online
1353               snapshot deletion. Nova deletes snapshot with id "bbbb" by
1354               pulling volume-1234's data into volume-1234.bbbb. This
1355               (logically) removes snapshot with id "bbbb" and the active
1356               file remains the same.
1357
1358               volume-1234.bbbb
1359
1360               info file: { 'active': 'volume-1234.bbbb' }
1361
1362           TODO (deepakcs): Change this once Nova supports blockCommit for
1363                            in-use volumes.
1364
1365        5. Snapshot deletion when volume is detached ('available' state):
1366
1367            * When first snapshot is deleted, Cinder does the snapshot
1368              deletion. volume-1234.aaaa is removed from the snapshot chain.
1369              The data from it is merged into its parent.
1370
1371              volume-1234.bbbb is rebased, having volume-1234 as its new
1372              parent.
1373
1374              volume-1234 <- volume-1234.bbbb
1375
1376              info file: { 'active': 'volume-1234.bbbb',
1377                           'bbbb':   'volume-1234.bbbb'
1378                         }
1379
1380            * When second snapshot is deleted, Cinder does the snapshot
1381              deletion. volume-1234.aaaa is removed from the snapshot chain.
1382              The base image, volume-1234 becomes the active image for this
1383              volume again.
1384
1385              volume-1234
1386
1387              info file: { 'active': 'volume-1234' }  (* changed!)
1388        """
1389
1390        LOG.debug('Creating %(type)s snapshot %(snap)s of volume %(vol)s',
1391                  {'snap': snapshot.id, 'vol': snapshot.volume.id,
1392                   'type': ('online'
1393                            if self._is_volume_attached(snapshot.volume)
1394                            else 'offline')})
1395
1396        status = snapshot.volume.status
1397
1398        acceptable_states = ['available', 'in-use', 'backing-up']
1399        if snapshot.id.startswith('tmp-snap-'):
1400            # This is an internal volume snapshot. In order to support
1401            # image caching, we'll allow creating/deleting such snapshots
1402            # while having volumes in 'downloading' state.
1403            acceptable_states.append('downloading')
1404
1405        self._validate_state(status, acceptable_states)
1406
1407        info_path = self._local_path_volume_info(snapshot.volume)
1408        snap_info = self._read_info_file(info_path, empty_if_missing=True)
1409        backing_filename = self.get_active_image_from_info(
1410            snapshot.volume)
1411        new_snap_path = self._get_new_snap_path(snapshot)
1412
1413        if self._is_volume_attached(snapshot.volume):
1414            self._create_snapshot_online(snapshot,
1415                                         backing_filename,
1416                                         new_snap_path)
1417        else:
1418            self._do_create_snapshot(snapshot,
1419                                     backing_filename,
1420                                     new_snap_path)
1421
1422        snap_info['active'] = os.path.basename(new_snap_path)
1423        snap_info[snapshot.id] = os.path.basename(new_snap_path)
1424        self._write_info_file(info_path, snap_info)
1425
1426    def _create_snapshot_online(self, snapshot, backing_filename,
1427                                new_snap_path):
1428        # Perform online snapshot via Nova
1429        self._do_create_snapshot(snapshot,
1430                                 backing_filename,
1431                                 new_snap_path)
1432
1433        connection_info = {
1434            'type': 'qcow2',
1435            'new_file': os.path.basename(new_snap_path),
1436            'snapshot_id': snapshot.id
1437        }
1438
1439        try:
1440            result = self._nova.create_volume_snapshot(
1441                snapshot.obj_context,
1442                snapshot.volume_id,
1443                connection_info)
1444            LOG.debug('nova call result: %s', result)
1445        except Exception:
1446            LOG.exception('Call to Nova to create snapshot failed')
1447            raise
1448
1449        # Loop and wait for result
1450        # Nova will call Cinderclient to update the status in the database
1451        # An update of progress = '90%' means that Nova is done
1452        seconds_elapsed = 0
1453        increment = 1
1454        timeout = 600
1455        while True:
1456            s = db.snapshot_get(snapshot.obj_context, snapshot.id)
1457
1458            LOG.debug('Status of snapshot %(id)s is now %(status)s',
1459                      {'id': snapshot['id'],
1460                       'status': s['status']})
1461
1462            if s['status'] == fields.SnapshotStatus.CREATING:
1463                if s['progress'] == '90%':
1464                    # Nova tasks completed successfully
1465                    break
1466
1467                time.sleep(increment)
1468                seconds_elapsed += increment
1469            elif s['status'] == fields.SnapshotStatus.ERROR:
1470
1471                msg = _('Nova returned "error" status '
1472                        'while creating snapshot.')
1473                raise exception.RemoteFSException(msg)
1474
1475            elif (s['status'] == fields.SnapshotStatus.DELETING or
1476                  s['status'] == fields.SnapshotStatus.ERROR_DELETING):
1477                msg = _('Snapshot %(id)s has been asked to be deleted while '
1478                        'waiting for it to become available. Perhaps a '
1479                        'concurrent request was made.') % {'id':
1480                                                           snapshot.id}
1481                raise exception.RemoteFSConcurrentRequest(msg)
1482
1483            if 10 < seconds_elapsed <= 20:
1484                increment = 2
1485            elif 20 < seconds_elapsed <= 60:
1486                increment = 5
1487            elif 60 < seconds_elapsed:
1488                increment = 10
1489
1490            if seconds_elapsed > timeout:
1491                msg = _('Timed out while waiting for Nova update '
1492                        'for creation of snapshot %s.') % snapshot.id
1493                raise exception.RemoteFSException(msg)
1494
1495    def _delete_snapshot_online(self, context, snapshot, info):
1496        # Update info over the course of this method
1497        # active file never changes
1498        info_path = self._local_path_volume_info(snapshot.volume)
1499        snap_info = self._read_info_file(info_path)
1500
1501        if utils.paths_normcase_equal(info['active_file'],
1502                                      info['snapshot_file']):
1503            # blockRebase/Pull base into active
1504            # info['base'] => snapshot_file
1505
1506            file_to_delete = info['base_file']
1507            if info['base_id'] is None:
1508                # Passing base=none to blockRebase ensures that
1509                # libvirt blanks out the qcow2 backing file pointer
1510                new_base = None
1511            else:
1512                new_base = info['new_base_file']
1513                snap_info[info['base_id']] = info['snapshot_file']
1514
1515            delete_info = {'file_to_merge': new_base,
1516                           'merge_target_file': None,  # current
1517                           'type': 'qcow2',
1518                           'volume_id': snapshot.volume.id}
1519
1520            del(snap_info[snapshot.id])
1521        else:
1522            # blockCommit snapshot into base
1523            # info['base'] <= snapshot_file
1524            # delete record of snapshot
1525            file_to_delete = info['snapshot_file']
1526
1527            delete_info = {'file_to_merge': info['snapshot_file'],
1528                           'merge_target_file': info['base_file'],
1529                           'type': 'qcow2',
1530                           'volume_id': snapshot.volume.id}
1531
1532            del(snap_info[snapshot.id])
1533
1534        self._nova_assisted_vol_snap_delete(context, snapshot, delete_info)
1535
1536        # Write info file updated above
1537        self._write_info_file(info_path, snap_info)
1538
1539        # Delete stale file
1540        path_to_delete = os.path.join(
1541            self._local_volume_dir(snapshot.volume), file_to_delete)
1542        self._delete(path_to_delete)
1543
1544    def _nova_assisted_vol_snap_delete(self, context, snapshot, delete_info):
1545        try:
1546            self._nova.delete_volume_snapshot(
1547                context,
1548                snapshot.id,
1549                delete_info)
1550        except Exception:
1551            LOG.exception('Call to Nova delete snapshot failed')
1552            raise
1553
1554        # Loop and wait for result
1555        # Nova will call Cinderclient to update the status in the database
1556        # An update of progress = '90%' means that Nova is done
1557        seconds_elapsed = 0
1558        increment = 1
1559        timeout = 7200
1560        while True:
1561            s = db.snapshot_get(context, snapshot.id)
1562
1563            if s['status'] == fields.SnapshotStatus.DELETING:
1564                if s['progress'] == '90%':
1565                    # Nova tasks completed successfully
1566                    break
1567                else:
1568                    LOG.debug('status of snapshot %s is still "deleting"... '
1569                              'waiting', snapshot.id)
1570                    time.sleep(increment)
1571                    seconds_elapsed += increment
1572            else:
1573                msg = _('Unable to delete snapshot %(id)s, '
1574                        'status: %(status)s.') % {'id': snapshot.id,
1575                                                  'status': s['status']}
1576                raise exception.RemoteFSException(msg)
1577
1578            if 10 < seconds_elapsed <= 20:
1579                increment = 2
1580            elif 20 < seconds_elapsed <= 60:
1581                increment = 5
1582            elif 60 < seconds_elapsed:
1583                increment = 10
1584
1585            if seconds_elapsed > timeout:
1586                msg = _('Timed out while waiting for Nova update '
1587                        'for deletion of snapshot %(id)s.') %\
1588                    {'id': snapshot.id}
1589                raise exception.RemoteFSException(msg)
1590
1591    def _extend_volume(self, volume, size_gb):
1592        raise NotImplementedError()
1593
1594    def _revert_to_snapshot(self, context, volume, snapshot):
1595        raise NotImplementedError()
1596
1597
1598class RemoteFSSnapDriver(RemoteFSSnapDriverBase):
1599    @locked_volume_id_operation
1600    def create_snapshot(self, snapshot):
1601        """Apply locking to the create snapshot operation."""
1602
1603        return self._create_snapshot(snapshot)
1604
1605    @locked_volume_id_operation
1606    def delete_snapshot(self, snapshot):
1607        """Apply locking to the delete snapshot operation."""
1608
1609        return self._delete_snapshot(snapshot)
1610
1611    @locked_volume_id_operation
1612    def create_volume_from_snapshot(self, volume, snapshot):
1613        return self._create_volume_from_snapshot(volume, snapshot)
1614
1615    @locked_volume_id_operation
1616    def create_cloned_volume(self, volume, src_vref):
1617        """Creates a clone of the specified volume."""
1618
1619        return self._create_cloned_volume(volume, src_vref,
1620                                          src_vref.obj_context)
1621
1622    @locked_volume_id_operation
1623    def copy_volume_to_image(self, context, volume, image_service, image_meta):
1624        """Copy the volume to the specified image."""
1625
1626        return self._copy_volume_to_image(context, volume, image_service,
1627                                          image_meta)
1628
1629    @locked_volume_id_operation
1630    def extend_volume(self, volume, size_gb):
1631        return self._extend_volume(volume, size_gb)
1632
1633    @locked_volume_id_operation
1634    def revert_to_snapshot(self, context, volume, snapshot):
1635        """Revert to specified snapshot."""
1636
1637        return self._revert_to_snapshot(context, volume, snapshot)
1638
1639
1640class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase):
1641    @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
1642    def create_snapshot(self, snapshot):
1643        """Apply locking to the create snapshot operation."""
1644
1645        return self._create_snapshot(snapshot)
1646
1647    @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
1648    def delete_snapshot(self, snapshot):
1649        """Apply locking to the delete snapshot operation."""
1650
1651        return self._delete_snapshot(snapshot)
1652
1653    @coordination.synchronized('{self.driver_prefix}-{volume.id}')
1654    def create_volume_from_snapshot(self, volume, snapshot):
1655        return self._create_volume_from_snapshot(volume, snapshot)
1656
1657    @coordination.synchronized('{self.driver_prefix}-{volume.id}')
1658    def create_cloned_volume(self, volume, src_vref):
1659        """Creates a clone of the specified volume."""
1660
1661        return self._create_cloned_volume(volume, src_vref,
1662                                          src_vref.obj_context)
1663
1664    @coordination.synchronized('{self.driver_prefix}-{volume.id}')
1665    def copy_volume_to_image(self, context, volume, image_service, image_meta):
1666        """Copy the volume to the specified image."""
1667
1668        return self._copy_volume_to_image(context, volume, image_service,
1669                                          image_meta)
1670
1671    @coordination.synchronized('{self.driver_prefix}-{volume.id}')
1672    def extend_volume(self, volume, size_gb):
1673        return self._extend_volume(volume, size_gb)
1674
1675    @coordination.synchronized('{self.driver_prefix}-{volume.id}')
1676    def revert_to_snapshot(self, context, volume, snapshot):
1677        """Revert to specified snapshot."""
1678
1679        return self._revert_to_snapshot(context, volume, snapshot)
1680
1681
1682class RemoteFSPoolMixin(object):
1683    """Drivers inheriting this will report each share as a pool."""
1684
1685    def _find_share(self, volume):
1686        # We let the scheduler choose a pool for us.
1687        pool_name = self._get_pool_name_from_volume(volume)
1688        share = self._get_share_from_pool_name(pool_name)
1689        return share
1690
1691    def _get_pool_name_from_volume(self, volume):
1692        pool_name = volume_utils.extract_host(volume['host'],
1693                                              level='pool')
1694        return pool_name
1695
1696    def _get_pool_name_from_share(self, share):
1697        raise NotImplementedError()
1698
1699    def _get_share_from_pool_name(self, pool_name):
1700        # To be implemented by drivers using pools.
1701        raise NotImplementedError()
1702
1703    def _update_volume_stats(self):
1704        data = {}
1705        pools = []
1706        backend_name = self.configuration.safe_get('volume_backend_name')
1707        data['volume_backend_name'] = backend_name or self.volume_backend_name
1708        data['vendor_name'] = self.vendor_name
1709        data['driver_version'] = self.get_version()
1710        data['storage_protocol'] = self.driver_volume_type
1711
1712        self._ensure_shares_mounted()
1713
1714        for share in self._mounted_shares:
1715            (share_capacity,
1716             share_free,
1717             share_used) = self._get_capacity_info(share)
1718
1719            pool = {'pool_name': self._get_pool_name_from_share(share),
1720                    'total_capacity_gb': share_capacity / float(units.Gi),
1721                    'free_capacity_gb': share_free / float(units.Gi),
1722                    'provisioned_capacity_gb': share_used / float(units.Gi),
1723                    'allocated_capacity_gb': (
1724                        share_capacity - share_free) / float(units.Gi),
1725                    'reserved_percentage': (
1726                        self.configuration.reserved_percentage),
1727                    'max_over_subscription_ratio': (
1728                        self.configuration.max_over_subscription_ratio),
1729                    'thin_provisioning_support': (
1730                        self._thin_provisioning_support),
1731                    'thick_provisioning_support': (
1732                        self._thick_provisioning_support),
1733                    'QoS_support': False,
1734                    }
1735
1736            pools.append(pool)
1737
1738        data['total_capacity_gb'] = 0
1739        data['free_capacity_gb'] = 0
1740        data['pools'] = pools
1741
1742        self._stats = data
1743
1744
1745class RevertToSnapshotMixin(object):
1746
1747    def _revert_to_snapshot(self, context, volume, snapshot):
1748        """Revert a volume to specified snapshot
1749
1750        The volume must not be attached. Only the latest snapshot
1751        can be used.
1752        """
1753        status = snapshot.volume.status
1754        acceptable_states = ['available', 'reverting']
1755
1756        self._validate_state(status, acceptable_states)
1757
1758        LOG.debug('Reverting volume %(vol)s to snapshot %(snap)s',
1759                  {'vol': snapshot.volume.id, 'snap': snapshot.id})
1760
1761        info_path = self._local_path_volume_info(snapshot.volume)
1762        snap_info = self._read_info_file(info_path)
1763
1764        snapshot_file = snap_info[snapshot.id]
1765        active_file = snap_info['active']
1766
1767        if not utils.paths_normcase_equal(snapshot_file, active_file):
1768            msg = _("Could not revert volume '%(volume_id)s' to snapshot "
1769                    "'%(snapshot_id)s' as it does not "
1770                    "appear to be the latest snapshot. Current active "
1771                    "image: %(active_file)s.")
1772            raise exception.InvalidSnapshot(
1773                msg % dict(snapshot_id=snapshot.id,
1774                           active_file=active_file,
1775                           volume_id=volume.id))
1776
1777        snapshot_path = os.path.join(
1778            self._local_volume_dir(snapshot.volume), snapshot_file)
1779        backing_filename = self._qemu_img_info(
1780            snapshot_path, volume.name).backing_file
1781
1782        # We revert the volume to the latest snapshot by recreating the top
1783        # image from the chain.
1784        # This workflow should work with most (if not all) drivers inheriting
1785        # this class.
1786        self._delete(snapshot_path)
1787        self._do_create_snapshot(snapshot, backing_filename, snapshot_path)
1788
1789
1790class RemoteFSManageableVolumesMixin(object):
1791    _SUPPORTED_IMAGE_FORMATS = ['raw', 'qcow2']
1792    _MANAGEABLE_IMAGE_RE = None
1793
1794    def _get_manageable_vol_location(self, existing_ref):
1795        if 'source-name' not in existing_ref:
1796            reason = _('The existing volume reference '
1797                       'must contain "source-name".')
1798            raise exception.ManageExistingInvalidReference(
1799                existing_ref=existing_ref, reason=reason)
1800
1801        vol_remote_path = os.path.normcase(
1802            os.path.normpath(existing_ref['source-name']))
1803
1804        for mounted_share in self._mounted_shares:
1805            # We don't currently attempt to resolve hostnames. This could
1806            # be troublesome for some distributed shares, which may have
1807            # hostnames resolving to multiple addresses.
1808            norm_share = os.path.normcase(os.path.normpath(mounted_share))
1809            head, match, share_rel_path = vol_remote_path.partition(norm_share)
1810            if not (match and share_rel_path.startswith(os.path.sep)):
1811                continue
1812
1813            mountpoint = self._get_mount_point_for_share(mounted_share)
1814            vol_local_path = os.path.join(mountpoint,
1815                                          share_rel_path.lstrip(os.path.sep))
1816
1817            LOG.debug("Found mounted share referenced by %s.",
1818                      vol_remote_path)
1819
1820            if os.path.isfile(vol_local_path):
1821                LOG.debug("Found volume %(path)s on share %(share)s.",
1822                          dict(path=vol_local_path, share=mounted_share))
1823                return dict(share=mounted_share,
1824                            mountpoint=mountpoint,
1825                            vol_local_path=vol_local_path,
1826                            vol_remote_path=vol_remote_path)
1827            else:
1828                LOG.error("Could not find volume %s on the "
1829                          "specified share.", vol_remote_path)
1830                break
1831
1832        raise exception.ManageExistingInvalidReference(
1833            existing_ref=existing_ref, reason=_('Volume not found.'))
1834
1835    def _get_managed_vol_expected_path(self, volume, volume_location):
1836        # This may be overridden by the drivers.
1837        return os.path.join(volume_location['mountpoint'],
1838                            volume.name)
1839
1840    def _is_volume_manageable(self, volume_path, already_managed=False):
1841        unmanageable_reason = None
1842
1843        if already_managed:
1844            return False, _('Volume already managed.')
1845
1846        try:
1847            img_info = self._qemu_img_info(volume_path, volume_name=None)
1848        except exception.RemoteFSInvalidBackingFile:
1849            return False, _("Backing file present.")
1850        except Exception:
1851            return False, _("Failed to open image.")
1852
1853        # We're double checking as some drivers do not validate backing
1854        # files through '_qemu_img_info'.
1855        if img_info.backing_file:
1856            return False, _("Backing file present.")
1857
1858        if img_info.file_format not in self._SUPPORTED_IMAGE_FORMATS:
1859            unmanageable_reason = _(
1860                "Unsupported image format: '%s'.") % img_info.file_format
1861            return False, unmanageable_reason
1862
1863        return True, None
1864
1865    def manage_existing(self, volume, existing_ref):
1866        LOG.info('Managing volume %(volume_id)s with ref %(ref)s',
1867                 {'volume_id': volume.id, 'ref': existing_ref})
1868
1869        vol_location = self._get_manageable_vol_location(existing_ref)
1870        vol_local_path = vol_location['vol_local_path']
1871
1872        manageable, unmanageable_reason = self._is_volume_manageable(
1873            vol_local_path)
1874
1875        if not manageable:
1876            raise exception.ManageExistingInvalidReference(
1877                existing_ref=existing_ref, reason=unmanageable_reason)
1878
1879        expected_vol_path = self._get_managed_vol_expected_path(
1880            volume, vol_location)
1881
1882        self._set_rw_permissions(vol_local_path)
1883
1884        # This should be the last thing we do.
1885        if expected_vol_path != vol_local_path:
1886            LOG.info("Renaming imported volume image %(src)s to %(dest)s",
1887                     dict(src=vol_location['vol_local_path'],
1888                          dest=expected_vol_path))
1889            os.rename(vol_location['vol_local_path'],
1890                      expected_vol_path)
1891
1892        return {'provider_location': vol_location['share']}
1893
1894    def _get_rounded_manageable_image_size(self, image_path):
1895        image_size = image_utils.qemu_img_info(
1896            image_path, run_as_root=self._execute_as_root).virtual_size
1897        return int(math.ceil(float(image_size) / units.Gi))
1898
1899    def manage_existing_get_size(self, volume, existing_ref):
1900        vol_location = self._get_manageable_vol_location(existing_ref)
1901        volume_path = vol_location['vol_local_path']
1902        return self._get_rounded_manageable_image_size(volume_path)
1903
1904    def unmanage(self, volume):
1905        pass
1906
1907    def _get_manageable_volume(self, share, volume_path, managed_volume=None):
1908        manageable, unmanageable_reason = self._is_volume_manageable(
1909            volume_path, already_managed=managed_volume is not None)
1910        size_gb = None
1911        if managed_volume:
1912            # We may not be able to query in-use images.
1913            size_gb = managed_volume.size
1914        else:
1915            try:
1916                size_gb = self._get_rounded_manageable_image_size(volume_path)
1917            except Exception:
1918                manageable = False
1919                unmanageable_reason = (unmanageable_reason or
1920                                       _("Failed to get size."))
1921
1922        mountpoint = self._get_mount_point_for_share(share)
1923        norm_mountpoint = os.path.normcase(os.path.normpath(mountpoint))
1924        norm_vol_path = os.path.normcase(os.path.normpath(volume_path))
1925
1926        ref = norm_vol_path.replace(norm_mountpoint, share).replace('\\', '/')
1927        manageable_volume = {
1928            'reference': {'source-name': ref},
1929            'size': size_gb,
1930            'safe_to_manage': manageable,
1931            'reason_not_safe': unmanageable_reason,
1932            'cinder_id': managed_volume.id if managed_volume else None,
1933            'extra_info': None,
1934        }
1935        return manageable_volume
1936
1937    def _get_share_manageable_volumes(self, share, managed_volumes):
1938        manageable_volumes = []
1939        mount_path = self._get_mount_point_for_share(share)
1940
1941        for dir_path, dir_names, file_names in os.walk(mount_path):
1942            for file_name in file_names:
1943                file_name = os.path.normcase(file_name)
1944                img_path = os.path.join(dir_path, file_name)
1945                # In the future, we may have the regex filtering images
1946                # as a config option.
1947                if (not self._MANAGEABLE_IMAGE_RE or
1948                        self._MANAGEABLE_IMAGE_RE.match(file_name)):
1949                    managed_volume = managed_volumes.get(
1950                        os.path.splitext(file_name)[0])
1951                    try:
1952                        manageable_volume = self._get_manageable_volume(
1953                            share, img_path, managed_volume)
1954                        manageable_volumes.append(manageable_volume)
1955                    except Exception as exc:
1956                        LOG.error(
1957                            "Failed to get manageable volume info: "
1958                            "'%(image_path)s'. Exception: %(exc)s.",
1959                            dict(image_path=img_path, exc=exc))
1960        return manageable_volumes
1961
1962    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
1963                               sort_keys, sort_dirs):
1964        manageable_volumes = []
1965        managed_volumes = {vol.name: vol for vol in cinder_volumes}
1966
1967        for share in self._mounted_shares:
1968            try:
1969                manageable_volumes += self._get_share_manageable_volumes(
1970                    share, managed_volumes)
1971            except Exception as exc:
1972                LOG.error("Failed to get manageable volumes for "
1973                          "share %(share)s. Exception: %(exc)s.",
1974                          dict(share=share, exc=exc))
1975
1976        return volume_utils.paginate_entries_list(
1977            manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)
1978