1# Copyright 2015 IBM Corp. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); you may 4# not use this file except in compliance with the License. You may obtain 5# a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12# License for the specific language governing permissions and limitations 13# under the License. 14 15"""Cinder common internal object model""" 16 17import contextlib 18import datetime 19 20from oslo_log import log as logging 21from oslo_utils import versionutils 22from oslo_versionedobjects import base 23from oslo_versionedobjects import fields 24 25from cinder import db 26from cinder import exception 27from cinder.i18n import _ 28from cinder import objects 29 30 31LOG = logging.getLogger('object') 32obj_make_list = base.obj_make_list 33 34 35class CinderObjectVersionsHistory(dict): 36 """Helper class that maintains objects version history. 37 38 Current state of object versions is aggregated in a single version number 39 that explicitly identifies a set of object versions. That way a service 40 is able to report what objects it supports using a single string and all 41 the newer services will know exactly what that mean for a single object. 42 """ 43 44 def __init__(self): 45 super(CinderObjectVersionsHistory, self).__init__() 46 # NOTE(dulek): This is our pre-history and a starting point - Liberty. 47 # We want Mitaka to be able to talk to Liberty services, so we need to 48 # handle backporting to these objects versions (although I don't expect 49 # we've made a lot of incompatible changes inside the objects). 50 # 51 # If an object doesn't exist in Liberty, RPC API compatibility layer 52 # shouldn't send it or convert it to a dictionary. 53 # 54 # Please note that we do not need to add similar entires for each 55 # release. Liberty is here just for historical reasons. 56 self.versions = ['liberty'] 57 self['liberty'] = { 58 'Backup': '1.1', 59 'BackupImport': '1.1', 60 'BackupList': '1.0', 61 'ConsistencyGroup': '1.1', 62 'ConsistencyGroupList': '1.0', 63 'Service': '1.0', 64 'ServiceList': '1.0', 65 'Snapshot': '1.0', 66 'SnapshotList': '1.0', 67 'Volume': '1.1', 68 'VolumeAttachment': '1.0', 69 'VolumeAttachmentList': '1.0', 70 'VolumeList': '1.1', 71 'VolumeType': '1.0', 72 'VolumeTypeList': '1.0', 73 } 74 75 def get_current(self): 76 return self.versions[-1] 77 78 def get_current_versions(self): 79 return self[self.get_current()] 80 81 def add(self, ver, updates): 82 if ver in self.versions: 83 msg = 'Version %s already exists in history.' % ver 84 raise exception.ProgrammingError(reason=msg) 85 86 self[ver] = self[self.get_current()].copy() 87 self.versions.append(ver) 88 self[ver].update(updates) 89 90 91OBJ_VERSIONS = CinderObjectVersionsHistory() 92# NOTE(dulek): You should add a new version here each time you bump a version 93# of any object. As a second parameter you need to specify only what changed. 94# 95# When dropping backward compatibility with an OpenStack release we can rework 96# this and remove some history while keeping the versions order. 97OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3', 98 'CGSnapshot': '1.0', 'CGSnapshotList': '1.0', 99 'ConsistencyGroup': '1.2', 100 'ConsistencyGroupList': '1.1', 'Service': '1.1', 101 'Volume': '1.3', 'VolumeTypeList': '1.1'}) 102OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) 103OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'}) 104OBJ_VERSIONS.add('1.3', {'Service': '1.3'}) 105OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'}) 106OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'}) 107OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0', 108 'QualityOfServiceSpecsList': '1.0', 109 'VolumeType': '1.2'}) 110OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0', 111 'Service': '1.4', 'Volume': '1.4', 112 'ConsistencyGroup': '1.3'}) 113OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'}) 114OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'}) 115OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5', 116 'RequestSpec': '1.1', 'VolumeProperties': '1.1'}) 117OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0', 118 'Group': '1.1'}) 119OBJ_VERSIONS.add('1.12', {'VolumeType': '1.3'}) 120OBJ_VERSIONS.add('1.13', {'CleanupRequest': '1.0'}) 121OBJ_VERSIONS.add('1.14', {'VolumeAttachmentList': '1.1'}) 122OBJ_VERSIONS.add('1.15', {'Volume': '1.6', 'Snapshot': '1.2'}) 123OBJ_VERSIONS.add('1.16', {'BackupDeviceInfo': '1.0'}) 124OBJ_VERSIONS.add('1.17', {'VolumeAttachment': '1.1'}) 125OBJ_VERSIONS.add('1.18', {'Snapshot': '1.3'}) 126OBJ_VERSIONS.add('1.19', {'ConsistencyGroup': '1.4', 'CGSnapshot': '1.1'}) 127OBJ_VERSIONS.add('1.20', {'Cluster': '1.1'}) 128OBJ_VERSIONS.add('1.21', {'ManageableSnapshot': '1.0', 129 'ManageableVolume': '1.0', 130 'ManageableVolumeList': '1.0', 131 'ManageableSnapshotList': '1.0'}) 132OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'}) 133OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'}) 134OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'}) 135OBJ_VERSIONS.add('1.25', {'Group': '1.2'}) 136OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'}) 137OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'}) 138OBJ_VERSIONS.add('1.28', {'Service': '1.5'}) 139OBJ_VERSIONS.add('1.29', {'Service': '1.6'}) 140OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'}) 141OBJ_VERSIONS.add('1.31', {'Volume': '1.7'}) 142OBJ_VERSIONS.add('1.32', {'RequestSpec': '1.3'}) 143OBJ_VERSIONS.add('1.33', {'Volume': '1.8'}) 144OBJ_VERSIONS.add('1.34', {'VolumeAttachment': '1.3'}) 145OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'}) 146 147 148class CinderObjectRegistry(base.VersionedObjectRegistry): 149 def registration_hook(self, cls, index): 150 """Hook called when registering a class. 151 152 This method takes care of adding the class to cinder.objects namespace. 153 154 Should registering class have a method called cinder_ovo_cls_init it 155 will be called to support class initialization. This is convenient 156 for all persistent classes that need to register their models. 157 """ 158 setattr(objects, cls.obj_name(), cls) 159 160 # If registering class has a callable initialization method, call it. 161 if callable(getattr(cls, 'cinder_ovo_cls_init', None)): 162 cls.cinder_ovo_cls_init() 163 164 165class CinderObject(base.VersionedObject): 166 # NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova, 167 # cinder, and other objects can exist on the same bus and be distinguished 168 # from one another. 169 OBJ_PROJECT_NAMESPACE = 'cinder' 170 171 def cinder_obj_get_changes(self): 172 """Returns a dict of changed fields with tz unaware datetimes. 173 174 Any timezone aware datetime field will be converted to UTC timezone 175 and returned as timezone unaware datetime. 176 177 This will allow us to pass these fields directly to a db update 178 method as they can't have timezone information. 179 """ 180 # Get dirtied/changed fields 181 changes = self.obj_get_changes() 182 183 # Look for datetime objects that contain timezone information 184 for k, v in changes.items(): 185 if isinstance(v, datetime.datetime) and v.tzinfo: 186 # Remove timezone information and adjust the time according to 187 # the timezone information's offset. 188 changes[k] = v.replace(tzinfo=None) - v.utcoffset() 189 190 # Return modified dict 191 return changes 192 193 def obj_make_compatible(self, primitive, target_version): 194 _log_backport(self, target_version) 195 super(CinderObject, self).obj_make_compatible(primitive, 196 target_version) 197 198 def __contains__(self, name): 199 # We're using obj_extra_fields to provide aliases for some fields while 200 # in transition period. This override is to make these aliases pass 201 # "'foo' in obj" tests. 202 return name in self.obj_extra_fields or super(CinderObject, 203 self).__contains__(name) 204 205 206class CinderObjectDictCompat(base.VersionedObjectDictCompat): 207 """Mix-in to provide dictionary key access compat. 208 209 If an object needs to support attribute access using 210 dictionary items instead of object attributes, inherit 211 from this class. This should only be used as a temporary 212 measure until all callers are converted to use modern 213 attribute access. 214 215 NOTE(berrange) This class will eventually be deleted. 216 """ 217 218 def get(self, key, value=base._NotSpecifiedSentinel): 219 """For backwards-compatibility with dict-based objects. 220 221 NOTE(danms): May be removed in the future. 222 """ 223 if key not in self.obj_fields: 224 # NOTE(jdg): There are a number of places where we rely on the 225 # old dictionary version and do a get(xxx, None). 226 # The following preserves that compatibility but in 227 # the future we'll remove this shim altogether so don't 228 # rely on it. 229 LOG.debug('Cinder object %(object_name)s has no ' 230 'attribute named: %(attribute_name)s', 231 {'object_name': self.__class__.__name__, 232 'attribute_name': key}) 233 return None 234 if (value != base._NotSpecifiedSentinel and 235 key not in self.obj_extra_fields and 236 not self.obj_attr_is_set(key)): 237 return value 238 else: 239 try: 240 return getattr(self, key) 241 except (exception.ObjectActionError, NotImplementedError): 242 # Exception when haven't set a value for non-lazy 243 # loadable attribute, but to mimic typical dict 'get' 244 # behavior we should still return None 245 return None 246 247 248class CinderPersistentObject(object): 249 """Mixin class for Persistent objects. 250 251 This adds the fields that we use in common for all persistent objects. 252 """ 253 OPTIONAL_FIELDS = [] 254 255 Not = db.Not 256 Case = db.Case 257 258 fields = { 259 'created_at': fields.DateTimeField(nullable=True), 260 'updated_at': fields.DateTimeField(nullable=True), 261 'deleted_at': fields.DateTimeField(nullable=True), 262 'deleted': fields.BooleanField(default=False, 263 nullable=True), 264 } 265 266 @classmethod 267 def cinder_ovo_cls_init(cls): 268 """This method is called on OVO registration and sets the DB model.""" 269 # Persistent Versioned Objects Classes should have a DB model, and if 270 # they don't, then we have a problem and we must raise an exception on 271 # registration. 272 try: 273 cls.model = db.get_model_for_versioned_object(cls) 274 except (ImportError, AttributeError): 275 msg = _("Couldn't find ORM model for Persistent Versioned " 276 "Object %s.") % cls.obj_name() 277 raise exception.ProgrammingError(reason=msg) 278 279 @contextlib.contextmanager 280 def obj_as_admin(self): 281 """Context manager to make an object call as an admin. 282 283 This temporarily modifies the context embedded in an object to 284 be elevated() and restores it after the call completes. Example 285 usage: 286 287 with obj.obj_as_admin(): 288 obj.save() 289 """ 290 if self._context is None: 291 raise exception.OrphanedObjectError(method='obj_as_admin', 292 objtype=self.obj_name()) 293 294 original_context = self._context 295 self._context = self._context.elevated() 296 try: 297 yield 298 finally: 299 self._context = original_context 300 301 @contextlib.contextmanager 302 def as_read_deleted(self, mode='yes'): 303 """Context manager to make OVO with modified read deleted context. 304 305 This temporarily modifies the context embedded in an object to 306 have a different `read_deleted` parameter. 307 308 Parameter mode accepts most of the same parameters as our `model_query` 309 DB method. We support 'yes', 'no', and 'only'. 310 311 usage: 312 313 with obj.as_read_deleted(): 314 obj.refresh() 315 if obj.status = 'deleted': 316 ... 317 """ 318 if self._context is None: 319 raise exception.OrphanedObjectError(method='as_read_deleted', 320 objtype=self.obj_name()) 321 322 original_mode = self._context.read_deleted 323 self._context.read_deleted = mode 324 try: 325 yield 326 finally: 327 self._context.read_deleted = original_mode 328 329 @classmethod 330 def _get_expected_attrs(cls, context, *args, **kwargs): 331 return None 332 333 @classmethod 334 def get_by_id(cls, context, id, *args, **kwargs): 335 # To get by id we need to have a model and for the model to 336 # have an id field 337 if 'id' not in cls.fields: 338 msg = (_('VersionedObject %s cannot retrieve object by id.') % 339 (cls.obj_name())) 340 raise NotImplementedError(msg) 341 342 orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs) 343 # We pass parameters because fields to expect may depend on them 344 expected_attrs = cls._get_expected_attrs(context, *args, **kwargs) 345 kargs = {} 346 if expected_attrs: 347 kargs = {'expected_attrs': expected_attrs} 348 return cls._from_db_object(context, cls(context), orm_obj, **kargs) 349 350 def update_single_status_where(self, new_status, 351 expected_status, filters=()): 352 values = {'status': new_status} 353 expected_status = {'status': expected_status} 354 return self.conditional_update(values, expected_status, filters) 355 356 def conditional_update(self, values, expected_values=None, filters=(), 357 save_all=False, session=None, reflect_changes=True, 358 order=None): 359 """Compare-and-swap update. 360 361 A conditional object update that, unlike normal update, will SAVE the 362 contents of the update to the DB. 363 364 Update will only occur in the DB and the object if conditions are met. 365 366 If no expected_values are passed in we will default to make sure that 367 all fields have not been changed in the DB. Since we cannot know the 368 original value in the DB for dirty fields in the object those will be 369 excluded. 370 371 We have 4 different condition types we can use in expected_values: 372 - Equality: {'status': 'available'} 373 - Inequality: {'status': vol_obj.Not('deleting')} 374 - In range: {'status': ['available', 'error'] 375 - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) 376 377 Method accepts additional filters, which are basically anything that 378 can be passed to a sqlalchemy query's filter method, for example: 379 380 .. code-block:: python 381 382 [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] 383 384 We can select values based on conditions using Case objects in the 385 'values' argument. For example: 386 387 .. code-block:: python 388 389 has_snapshot_filter = sql.exists().where( 390 models.Snapshot.volume_id == models.Volume.id) 391 case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], 392 else_='no-snapshot') 393 volume.conditional_update({'status': case_values}, 394 {'status': 'available'})) 395 396 And we can use DB fields using model class attribute for example to 397 store previous status in the corresponding field even though we don't 398 know which value is in the db from those we allowed: 399 400 .. code-block:: python 401 402 volume.conditional_update({'status': 'deleting', 403 'previous_status': volume.model.status}, 404 {'status': ('available', 'error')}) 405 406 :param values: Dictionary of key-values to update in the DB. 407 :param expected_values: Dictionary of conditions that must be met for 408 the update to be executed. 409 :param filters: Iterable with additional filters 410 :param save_all: Object may have changes that are not in the DB, this 411 will say whether we want those changes saved as well. 412 :param session: Session to use for the update 413 :param reflect_changes: If we want changes made in the database to be 414 reflected in the versioned object. This may 415 mean in some cases that we have to reload the 416 object from the database. 417 :param order: Specific order of fields in which to update the values 418 :returns: number of db rows that were updated, which can be used as a 419 boolean, since it will be 0 if we couldn't update the DB and 420 1 if we could, because we are using unique index id. 421 """ 422 if 'id' not in self.fields: 423 msg = (_('VersionedObject %s does not support conditional update.') 424 % (self.obj_name())) 425 raise NotImplementedError(msg) 426 427 # If no conditions are set we will require object in DB to be unchanged 428 if expected_values is None: 429 changes = self.obj_what_changed() 430 431 expected = {key: getattr(self, key) 432 for key in self.fields.keys() 433 if self.obj_attr_is_set(key) and key not in changes and 434 key not in self.OPTIONAL_FIELDS} 435 else: 436 # Set the id in expected_values to limit conditional update to only 437 # change this object 438 expected = expected_values.copy() 439 expected['id'] = self.id 440 441 # If we want to save any additional changes the object has besides the 442 # ones referred in values 443 if save_all: 444 changes = self.cinder_obj_get_changes() 445 changes.update(values) 446 values = changes 447 448 result = db.conditional_update(self._context, self.model, values, 449 expected, filters, order=order) 450 451 # If we were able to update the DB then we need to update this object 452 # as well to reflect new DB contents and clear the object's dirty flags 453 # for those fields. 454 if result and reflect_changes: 455 # If we have used a Case, a db field or an expression in values we 456 # don't know which value was used, so we need to read the object 457 # back from the DB 458 if any(isinstance(v, self.Case) or db.is_orm_value(v) 459 for v in values.values()): 460 # Read back object from DB 461 obj = type(self).get_by_id(self._context, self.id) 462 db_values = obj.obj_to_primitive()['versioned_object.data'] 463 # Only update fields were changes were requested 464 values = {field: db_values[field] 465 for field, value in values.items()} 466 467 # NOTE(geguileo): We don't use update method because our objects 468 # will eventually move away from VersionedObjectDictCompat 469 for key, value in values.items(): 470 setattr(self, key, value) 471 self.obj_reset_changes(values.keys()) 472 return result 473 474 def refresh(self): 475 # To refresh we need to have a model and for the model to have an id 476 # field 477 if 'id' not in self.fields: 478 msg = (_('VersionedObject %s cannot retrieve object by id.') % 479 (self.obj_name())) 480 raise NotImplementedError(msg) 481 482 current = self.get_by_id(self._context, self.id) 483 484 # Copy contents retrieved from the DB into self 485 my_data = vars(self) 486 my_data.clear() 487 my_data.update(vars(current)) 488 489 @classmethod 490 def exists(cls, context, id_): 491 return db.resource_exists(context, cls.model, id_) 492 493 494class CinderComparableObject(base.ComparableVersionedObject): 495 def __eq__(self, obj): 496 if hasattr(obj, 'obj_to_primitive'): 497 return self.obj_to_primitive() == obj.obj_to_primitive() 498 return False 499 500 def __ne__(self, other): 501 return not self.__eq__(other) 502 503 504class ObjectListBase(base.ObjectListBase): 505 def obj_make_compatible(self, primitive, target_version): 506 _log_backport(self, target_version) 507 super(ObjectListBase, self).obj_make_compatible(primitive, 508 target_version) 509 510 511class ClusteredObject(object): 512 @property 513 def service_topic_queue(self): 514 return self.cluster_name or self.host 515 516 @property 517 def is_clustered(self): 518 return bool(self.cluster_name) 519 520 def assert_not_frozen(self): 521 ctxt = self._context.elevated() 522 if db.is_backend_frozen(ctxt, self.host, self.cluster_name): 523 msg = _('Modification operations are not allowed on frozen ' 524 'storage backends.') 525 raise exception.InvalidInput(reason=msg) 526 527 528class CinderObjectSerializer(base.VersionedObjectSerializer): 529 OBJ_BASE_CLASS = CinderObject 530 531 def __init__(self, version_cap=None): 532 super(CinderObjectSerializer, self).__init__() 533 self.version_cap = version_cap 534 535 # NOTE(geguileo): During upgrades we will use a manifest to ensure that 536 # all objects are properly backported. This allows us to properly 537 # backport child objects to the right version even if parent version 538 # has not been bumped. 539 if not version_cap or version_cap == OBJ_VERSIONS.get_current(): 540 self.manifest = None 541 else: 542 if version_cap not in OBJ_VERSIONS: 543 raise exception.CappedVersionUnknown(version=version_cap) 544 self.manifest = OBJ_VERSIONS[version_cap] 545 546 def _get_capped_obj_version(self, obj): 547 objname = obj.obj_name() 548 version_dict = OBJ_VERSIONS.get(self.version_cap, {}) 549 version_cap = version_dict.get(objname, None) 550 551 if version_cap: 552 cap_tuple = versionutils.convert_version_to_tuple(version_cap) 553 obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION) 554 if cap_tuple > obj_tuple: 555 # NOTE(dulek): Do not set version cap to be higher than actual 556 # object version as we don't support "forwardporting" of 557 # objects. If service will receive an object that's too old it 558 # should handle it explicitly. 559 version_cap = None 560 561 return version_cap 562 563 def serialize_entity(self, context, entity): 564 if isinstance(entity, (tuple, list, set, dict)): 565 entity = self._process_iterable(context, self.serialize_entity, 566 entity) 567 elif (hasattr(entity, 'obj_to_primitive') and 568 callable(entity.obj_to_primitive)): 569 # NOTE(dulek): Backport outgoing object to the capped version. 570 backport_ver = self._get_capped_obj_version(entity) 571 entity = entity.obj_to_primitive(backport_ver, self.manifest) 572 return entity 573 574 575def _log_backport(ovo, target_version): 576 """Log backported versioned objects.""" 577 if target_version and target_version != ovo.VERSION: 578 LOG.debug('Backporting %(obj_name)s from version %(src_vers)s ' 579 'to version %(dst_vers)s', 580 {'obj_name': ovo.obj_name(), 581 'src_vers': ovo.VERSION, 582 'dst_vers': target_version}) 583