1# Copyright (C) 2015 Pure Storage, Inc.
2#
3#    Licensed under the Apache License, Version 2.0 (the "License"); you may
4#    not use this file except in compliance with the License. You may obtain
5#    a copy of the License at
6#
7#         http://www.apache.org/licenses/LICENSE-2.0
8#
9#    Unless required by applicable law or agreed to in writing, software
10#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12#    License for the specific language governing permissions and limitations
13#    under the License.
14
15from pytz import timezone
16import six
17
18from oslo_config import cfg
19from oslo_log import log as logging
20from oslo_utils import timeutils
21
22from cinder import objects
23from cinder import rpc
24from cinder import utils
25
26CONF = cfg.CONF
27
28LOG = logging.getLogger(__name__)
29
30
31class ImageVolumeCache(object):
32    def __init__(self, db, volume_api, max_cache_size_gb=0,
33                 max_cache_size_count=0):
34        self.db = db
35        self.volume_api = volume_api
36        self.max_cache_size_gb = int(max_cache_size_gb)
37        self.max_cache_size_count = int(max_cache_size_count)
38        self.notifier = rpc.get_notifier('volume', CONF.host)
39
40    def get_by_image_volume(self, context, volume_id):
41        return self.db.image_volume_cache_get_by_volume_id(context, volume_id)
42
43    def evict(self, context, cache_entry):
44        LOG.debug('Evicting image cache entry: %(entry)s.',
45                  {'entry': self._entry_to_str(cache_entry)})
46        self.db.image_volume_cache_delete(context, cache_entry['volume_id'])
47        self._notify_cache_eviction(context, cache_entry['image_id'],
48                                    cache_entry['host'])
49
50    @staticmethod
51    def _get_query_filters(volume_ref):
52        if volume_ref.is_clustered:
53            return {'cluster_name': volume_ref.cluster_name}
54        return {'host': volume_ref.host}
55
56    def get_entry(self, context, volume_ref, image_id, image_meta):
57        cache_entry = self.db.image_volume_cache_get_and_update_last_used(
58            context,
59            image_id,
60            **self._get_query_filters(volume_ref)
61        )
62
63        if cache_entry:
64            LOG.debug('Found image-volume cache entry: %(entry)s.',
65                      {'entry': self._entry_to_str(cache_entry)})
66
67            if self._should_update_entry(cache_entry, image_meta):
68                LOG.debug('Image-volume cache entry is out-dated, evicting: '
69                          '%(entry)s.',
70                          {'entry': self._entry_to_str(cache_entry)})
71                self._delete_image_volume(context, cache_entry)
72                cache_entry = None
73
74        if cache_entry:
75            self._notify_cache_hit(context, cache_entry['image_id'],
76                                   cache_entry['host'])
77        else:
78            self._notify_cache_miss(context, image_id,
79                                    volume_ref['host'])
80        return cache_entry
81
82    def create_cache_entry(self, context, volume_ref, image_id, image_meta):
83        """Create a new cache entry for an image.
84
85        This assumes that the volume described by volume_ref has already been
86        created and is in an available state.
87        """
88        LOG.debug('Creating new image-volume cache entry for image '
89                  '%(image_id)s on %(service)s',
90                  {'image_id': image_id,
91                   'service': volume_ref.service_topic_queue})
92
93        # When we are creating an image from a volume the updated_at field
94        # will be a unicode representation of the datetime. In that case
95        # we just need to parse it into one. If it is an actual datetime
96        # we want to just grab it as a UTC naive datetime.
97        image_updated_at = image_meta['updated_at']
98        if isinstance(image_updated_at, six.string_types):
99            image_updated_at = timeutils.parse_strtime(image_updated_at)
100        else:
101            image_updated_at = image_updated_at.astimezone(timezone('UTC'))
102
103        cache_entry = self.db.image_volume_cache_create(
104            context,
105            volume_ref.host,
106            volume_ref.cluster_name,
107            image_id,
108            image_updated_at.replace(tzinfo=None),
109            volume_ref.id,
110            volume_ref.size
111        )
112
113        LOG.debug('New image-volume cache entry created: %(entry)s.',
114                  {'entry': self._entry_to_str(cache_entry)})
115        return cache_entry
116
117    def ensure_space(self, context, volume):
118        """Makes room for a volume cache entry.
119
120        Returns True if successful, false otherwise.
121        """
122
123        # Check to see if the cache is actually limited.
124        if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:
125            return True
126
127        # Make sure that we can potentially fit the image in the cache
128        # and bail out before evicting everything else to try and make
129        # room for it.
130        if (self.max_cache_size_gb != 0 and
131                volume.size > self.max_cache_size_gb):
132            return False
133
134        # Assume the entries are ordered by most recently used to least used.
135        entries = self.db.image_volume_cache_get_all(
136            context,
137            **self._get_query_filters(volume))
138
139        current_count = len(entries)
140
141        current_size = 0
142        for entry in entries:
143            current_size += entry['size']
144
145        # Add values for the entry we intend to create.
146        current_size += volume.size
147        current_count += 1
148
149        LOG.debug('Image-volume cache for %(service)s current_size (GB) = '
150                  '%(size_gb)s (max = %(max_gb)s), current count = %(count)s '
151                  '(max = %(max_count)s).',
152                  {'service': volume.service_topic_queue,
153                   'size_gb': current_size,
154                   'max_gb': self.max_cache_size_gb,
155                   'count': current_count,
156                   'max_count': self.max_cache_size_count})
157
158        while (((current_size > self.max_cache_size_gb and
159                 self.max_cache_size_gb > 0)
160                or (current_count > self.max_cache_size_count and
161                    self.max_cache_size_count > 0))
162               and len(entries)):
163            entry = entries.pop()
164            LOG.debug('Reclaiming image-volume cache space; removing cache '
165                      'entry %(entry)s.', {'entry': self._entry_to_str(entry)})
166            self._delete_image_volume(context, entry)
167            current_size -= entry['size']
168            current_count -= 1
169            LOG.debug('Image-volume cache for %(service)s new size (GB) = '
170                      '%(size_gb)s, new count = %(count)s.',
171                      {'service': volume.service_topic_queue,
172                       'size_gb': current_size,
173                       'count': current_count})
174
175        # It is only possible to not free up enough gb, we will always be able
176        # to free enough count. This is because 0 means unlimited which means
177        # it is guaranteed to be >0 if limited, and we can always delete down
178        # to 0.
179        if self.max_cache_size_gb > 0:
180            if current_size > self.max_cache_size_gb > 0:
181                LOG.warning('Image-volume cache for %(service)s does '
182                            'not have enough space (GB).',
183                            {'service': volume.service_topic_queue})
184                return False
185
186        return True
187
188    @utils.if_notifications_enabled
189    def _notify_cache_hit(self, context, image_id, host):
190        self._notify_cache_action(context, image_id, host, 'hit')
191
192    @utils.if_notifications_enabled
193    def _notify_cache_miss(self, context, image_id, host):
194        self._notify_cache_action(context, image_id, host, 'miss')
195
196    @utils.if_notifications_enabled
197    def _notify_cache_eviction(self, context, image_id, host):
198        self._notify_cache_action(context, image_id, host, 'evict')
199
200    @utils.if_notifications_enabled
201    def _notify_cache_action(self, context, image_id, host, action):
202        data = {
203            'image_id': image_id,
204            'host': host,
205        }
206        LOG.debug('ImageVolumeCache notification: action=%(action)s'
207                  ' data=%(data)s.', {'action': action, 'data': data})
208        self.notifier.info(context, 'image_volume_cache.%s' % action, data)
209
210    def _delete_image_volume(self, context, cache_entry):
211        """Delete a volume and remove cache entry."""
212        volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])
213
214        # Delete will evict the cache entry.
215        self.volume_api.delete(context, volume)
216
217    def _should_update_entry(self, cache_entry, image_meta):
218        """Ensure that the cache entry image data is still valid."""
219        image_updated_utc = (image_meta['updated_at']
220                             .astimezone(timezone('UTC')))
221        cache_updated_utc = (cache_entry['image_updated_at']
222                             .replace(tzinfo=timezone('UTC')))
223
224        LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, '
225                  'requested image updated_at = %(image_utc)s.',
226                  {'entry_utc': six.text_type(cache_updated_utc),
227                   'image_utc': six.text_type(image_updated_utc)})
228
229        return image_updated_utc != cache_updated_utc
230
231    def _entry_to_str(self, cache_entry):
232        return six.text_type({
233            'id': cache_entry['id'],
234            'image_id': cache_entry['image_id'],
235            'volume_id': cache_entry['volume_id'],
236            'host': cache_entry['host'],
237            'size': cache_entry['size'],
238            'image_updated_at': cache_entry['image_updated_at'],
239            'last_used': cache_entry['last_used'],
240        })
241