1# Licensed under the Apache License, Version 2.0 (the "License");
2# you may not use this file except in compliance with the License.
3# You may obtain a copy of the License at
4#
5#    http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS,
9# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10# See the License for the specific language governing permissions and
11# limitations under the License.
12
13# import types so that we can reference ListType in sphinx param declarations.
14# We can't just use list, because sphinx gets confused by
15# openstack.resource.Resource.list and openstack.resource2.Resource.list
16import base64
17import datetime
18import functools
19import operator
20import threading
21import time
22import types  # noqa
23
24import iso8601
25
26from openstack.cloud import _normalize
27from openstack.cloud import _utils
28from openstack.cloud import exc
29from openstack.cloud import meta
30from openstack import exceptions
31from openstack import proxy
32from openstack import utils
33
34
35class ComputeCloudMixin(_normalize.Normalizer):
36
37    def __init__(self):
38        self._servers = None
39        self._servers_time = 0
40        self._servers_lock = threading.Lock()
41
42    @property
43    def _compute_region(self):
44        # This is only used in exception messages. Can we get rid of it?
45        return self.config.get_region_name('compute')
46
47    def get_flavor_name(self, flavor_id):
48        flavor = self.get_flavor(flavor_id, get_extra=False)
49        if flavor:
50            return flavor['name']
51        return None
52
53    def get_flavor_by_ram(self, ram, include=None, get_extra=True):
54        """Get a flavor based on amount of RAM available.
55
56        Finds the flavor with the least amount of RAM that is at least
57        as much as the specified amount. If `include` is given, further
58        filter based on matching flavor name.
59
60        :param int ram: Minimum amount of RAM.
61        :param string include: If given, will return a flavor whose name
62            contains this string as a substring.
63        """
64        flavors = self.list_flavors(get_extra=get_extra)
65        for flavor in sorted(flavors, key=operator.itemgetter('ram')):
66            if (flavor['ram'] >= ram
67                    and (not include or include in flavor['name'])):
68                return flavor
69        raise exc.OpenStackCloudException(
70            "Could not find a flavor with {ram} and '{include}'".format(
71                ram=ram, include=include))
72
73    @_utils.cache_on_arguments()
74    def _nova_extensions(self):
75        extensions = set()
76        data = proxy._json_response(
77            self.compute.get('/extensions'),
78            error_message="Error fetching extension list for nova")
79
80        for extension in self._get_and_munchify('extensions', data):
81            extensions.add(extension['alias'])
82        return extensions
83
84    def _has_nova_extension(self, extension_name):
85        return extension_name in self._nova_extensions()
86
87    def search_keypairs(self, name_or_id=None, filters=None):
88        keypairs = self.list_keypairs(
89            filters=filters if isinstance(filters, dict) else None
90        )
91        return _utils._filter_list(keypairs, name_or_id, filters)
92
93    def search_flavors(self, name_or_id=None, filters=None, get_extra=True):
94        flavors = self.list_flavors(get_extra=get_extra)
95        return _utils._filter_list(flavors, name_or_id, filters)
96
97    def search_servers(
98            self, name_or_id=None, filters=None, detailed=False,
99            all_projects=False, bare=False):
100        servers = self.list_servers(
101            detailed=detailed, all_projects=all_projects, bare=bare)
102        return _utils._filter_list(servers, name_or_id, filters)
103
104    def search_server_groups(self, name_or_id=None, filters=None):
105        """Seach server groups.
106
107        :param name: server group name or ID.
108        :param filters: a dict containing additional filters to use.
109
110        :returns: a list of dicts containing the server groups
111
112        :raises: ``OpenStackCloudException``: if something goes wrong during
113            the OpenStack API call.
114        """
115        server_groups = self.list_server_groups()
116        return _utils._filter_list(server_groups, name_or_id, filters)
117
118    def list_keypairs(self, filters=None):
119        """List all available keypairs.
120
121        :returns: A list of ``munch.Munch`` containing keypair info.
122
123        """
124        if not filters:
125            filters = {}
126        return list(self.compute.keypairs(allow_unknown_params=True,
127                                          **filters))
128
129    @_utils.cache_on_arguments()
130    def list_availability_zone_names(self, unavailable=False):
131        """List names of availability zones.
132
133        :param bool unavailable: Whether or not to include unavailable zones
134                                 in the output. Defaults to False.
135
136        :returns: A list of availability zone names, or an empty list if the
137                  list could not be fetched.
138        """
139        try:
140            zones = self.compute.availability_zones()
141            ret = []
142            for zone in zones:
143                if zone.state['available'] or unavailable:
144                    ret.append(zone.name)
145            return ret
146        except exceptions.SDKException:
147            self.log.debug(
148                "Availability zone list could not be fetched",
149                exc_info=True)
150            return []
151
152    @_utils.cache_on_arguments()
153    def list_flavors(self, get_extra=False):
154        """List all available flavors.
155
156        :param get_extra: Whether or not to fetch extra specs for each flavor.
157                          Defaults to True. Default behavior value can be
158                          overridden in clouds.yaml by setting
159                          openstack.cloud.get_extra_specs to False.
160        :returns: A list of flavor ``munch.Munch``.
161
162        """
163        data = self.compute.flavors(details=True)
164        flavors = []
165
166        for flavor in data:
167            if not flavor.extra_specs and get_extra:
168                flavor.fetch_extra_specs(self.compute)
169            flavors.append(flavor._to_munch(original_names=False))
170        return flavors
171
172    def list_server_security_groups(self, server):
173        """List all security groups associated with the given server.
174
175        :returns: A list of security group ``munch.Munch``.
176        """
177
178        # Don't even try if we're a cloud that doesn't have them
179        if not self._has_secgroups():
180            return []
181
182        server = self.compute.get_server(server)
183
184        server.fetch_security_groups(self.compute)
185
186        return self._normalize_secgroups(server.security_groups)
187
188    def _get_server_security_groups(self, server, security_groups):
189        if not self._has_secgroups():
190            raise exc.OpenStackCloudUnavailableFeature(
191                "Unavailable feature: security groups"
192            )
193
194        if not isinstance(server, dict):
195            server = self.get_server(server, bare=True)
196
197            if server is None:
198                self.log.debug('Server %s not found', server)
199                return None, None
200
201        if not isinstance(security_groups, (list, tuple)):
202            security_groups = [security_groups]
203
204        sec_group_objs = []
205
206        for sg in security_groups:
207            if not isinstance(sg, dict):
208                sg = self.get_security_group(sg)
209
210                if sg is None:
211                    self.log.debug('Security group %s not found for adding',
212                                   sg)
213
214                    return None, None
215
216            sec_group_objs.append(sg)
217
218        return server, sec_group_objs
219
220    def add_server_security_groups(self, server, security_groups):
221        """Add security groups to a server.
222
223        Add existing security groups to an existing server. If the security
224        groups are already present on the server this will continue unaffected.
225
226        :returns: False if server or security groups are undefined, True
227            otherwise.
228
229        :raises: ``OpenStackCloudException``, on operation error.
230        """
231        server, security_groups = self._get_server_security_groups(
232            server, security_groups)
233
234        if not (server and security_groups):
235            return False
236
237        for sg in security_groups:
238            self.compute.add_security_group_to_server(server, sg)
239
240        return True
241
242    def remove_server_security_groups(self, server, security_groups):
243        """Remove security groups from a server
244
245        Remove existing security groups from an existing server. If the
246        security groups are not present on the server this will continue
247        unaffected.
248
249        :returns: False if server or security groups are undefined, True
250            otherwise.
251
252        :raises: ``OpenStackCloudException``, on operation error.
253        """
254        server, security_groups = self._get_server_security_groups(
255            server, security_groups)
256
257        if not (server and security_groups):
258            return False
259
260        ret = True
261
262        for sg in security_groups:
263            try:
264                self.compute.remove_security_group_from_server(server, sg)
265
266            except exceptions.ResourceNotFound:
267                # NOTE(jamielennox): Is this ok? If we remove something that
268                # isn't present should we just conclude job done or is that an
269                # error? Nova returns ok if you try to add a group twice.
270                self.log.debug(
271                    "The security group %s was not present on server %s so "
272                    "no action was performed", sg.name, server.name)
273                ret = False
274
275        return ret
276
277    def list_servers(self, detailed=False, all_projects=False, bare=False,
278                     filters=None):
279        """List all available servers.
280
281        :param detailed: Whether or not to add detailed additional information.
282                         Defaults to False.
283        :param all_projects: Whether to list servers from all projects or just
284                             the current auth scoped project.
285        :param bare: Whether to skip adding any additional information to the
286                     server record. Defaults to False, meaning the addresses
287                     dict will be populated as needed from neutron. Setting
288                     to True implies detailed = False.
289        :param filters: Additional query parameters passed to the API server.
290
291        :returns: A list of server ``munch.Munch``.
292
293        """
294        # If pushdown filters are specified and we do not have batched caching
295        # enabled, bypass local caching and push down the filters.
296        if filters and self._SERVER_AGE == 0:
297            return self._list_servers(
298                detailed=detailed,
299                all_projects=all_projects,
300                bare=bare,
301                filters=filters,
302            )
303
304        if (time.time() - self._servers_time) >= self._SERVER_AGE:
305            # Since we're using cached data anyway, we don't need to
306            # have more than one thread actually submit the list
307            # servers task.  Let the first one submit it while holding
308            # a lock, and the non-blocking acquire method will cause
309            # subsequent threads to just skip this and use the old
310            # data until it succeeds.
311            # Initially when we never got data, block to retrieve some data.
312            first_run = self._servers is None
313            if self._servers_lock.acquire(first_run):
314                try:
315                    if not (first_run and self._servers is not None):
316                        self._servers = self._list_servers(
317                            detailed=detailed,
318                            all_projects=all_projects,
319                            bare=bare)
320                        self._servers_time = time.time()
321                finally:
322                    self._servers_lock.release()
323        # Wrap the return with filter_list so that if filters were passed
324        # but we were batching/caching and thus always fetching the whole
325        # list from the cloud, we still return a filtered list.
326        return _utils._filter_list(self._servers, None, filters)
327
328    def _list_servers(self, detailed=False, all_projects=False, bare=False,
329                      filters=None):
330        filters = filters or {}
331        servers = [
332            # TODO(mordred) Add original_names=False here and update the
333            # normalize file for server. Then, just remove the normalize call
334            # and the to_munch call.
335            self._normalize_server(server._to_munch())
336            for server in self.compute.servers(
337                all_projects=all_projects, allow_unknown_params=True,
338                **filters)]
339        return [
340            self._expand_server(server, detailed, bare)
341            for server in servers
342        ]
343
344    def list_server_groups(self):
345        """List all available server groups.
346
347        :returns: A list of server group dicts.
348
349        """
350        return list(self.compute.server_groups())
351
352    def get_compute_limits(self, name_or_id=None):
353        """ Get compute limits for a project
354
355        :param name_or_id: (optional) project name or ID to get limits for
356                           if different from the current project
357        :raises: OpenStackCloudException if it's not a valid project
358
359        :returns: Munch object with the limits
360        """
361        params = {}
362        project_id = None
363        error_msg = "Failed to get limits"
364        if name_or_id:
365
366            proj = self.get_project(name_or_id)
367            if not proj:
368                raise exc.OpenStackCloudException("project does not exist")
369            project_id = proj.id
370            params['tenant_id'] = project_id
371            error_msg = "{msg} for the project: {project} ".format(
372                msg=error_msg, project=name_or_id)
373
374        data = proxy._json_response(
375            self.compute.get('/limits', params=params))
376        limits = self._get_and_munchify('limits', data)
377        return self._normalize_compute_limits(limits, project_id=project_id)
378
379    def get_keypair(self, name_or_id, filters=None):
380        """Get a keypair by name or ID.
381
382        :param name_or_id: Name or ID of the keypair.
383        :param filters:
384            A dictionary of meta data to use for further filtering. Elements
385            of this dictionary may, themselves, be dictionaries. Example::
386
387                {
388                  'last_name': 'Smith',
389                  'other': {
390                      'gender': 'Female'
391                  }
392                }
393
394            OR
395            A string containing a jmespath expression for further filtering.
396            Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
397
398        :returns: A keypair ``munch.Munch`` or None if no matching keypair is
399                  found.
400        """
401        return _utils._get_entity(self, 'keypair', name_or_id, filters)
402
403    def get_flavor(self, name_or_id, filters=None, get_extra=True):
404        """Get a flavor by name or ID.
405
406        :param name_or_id: Name or ID of the flavor.
407        :param filters:
408            A dictionary of meta data to use for further filtering. Elements
409            of this dictionary may, themselves, be dictionaries. Example::
410
411                {
412                  'last_name': 'Smith',
413                  'other': {
414                      'gender': 'Female'
415                  }
416                }
417
418            OR
419            A string containing a jmespath expression for further filtering.
420            Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
421        :param get_extra:
422             Whether or not the list_flavors call should get the extra flavor
423             specs.
424
425        :returns: A flavor ``munch.Munch`` or None if no matching flavor is
426            found.
427
428        """
429        if not filters:
430            filters = {}
431        flavor = self.compute.find_flavor(
432            name_or_id, get_extra_specs=get_extra, **filters)
433        if flavor:
434            return flavor._to_munch(original_names=False)
435
436    def get_flavor_by_id(self, id, get_extra=False):
437        """ Get a flavor by ID
438
439        :param id: ID of the flavor.
440        :param get_extra:
441             Whether or not the list_flavors call should get the extra flavor
442             specs.
443        :returns: A flavor ``munch.Munch``.
444        """
445        flavor = self.compute.get_flavor(id, get_extra_specs=get_extra)
446        return flavor._to_munch(original_names=False)
447
448    def get_server_console(self, server, length=None):
449        """Get the console log for a server.
450
451        :param server: The server to fetch the console log for. Can be either
452                       a server dict or the Name or ID of the server.
453        :param int length: The number of lines you would like to retrieve from
454                           the end of the log. (optional, defaults to all)
455
456        :returns: A string containing the text of the console log or an
457                  empty string if the cloud does not support console logs.
458        :raises: OpenStackCloudException if an invalid server argument is given
459                 or if something else unforseen happens
460        """
461
462        if not isinstance(server, dict):
463            server = self.get_server(server, bare=True)
464
465        if not server:
466            raise exc.OpenStackCloudException(
467                "Console log requested for invalid server")
468
469        try:
470            return self._get_server_console_output(server['id'], length)
471        except exc.OpenStackCloudBadRequest:
472            return ""
473
474    def _get_server_console_output(self, server_id, length=None):
475        output = self.compute.get_server_console_output(
476            server=server_id,
477            length=length
478        )
479        if 'output' in output:
480            return output['output']
481
482    def get_server(
483            self, name_or_id=None, filters=None, detailed=False, bare=False,
484            all_projects=False):
485        """Get a server by name or ID.
486
487        :param name_or_id: Name or ID of the server.
488        :param filters:
489            A dictionary of meta data to use for further filtering. Elements
490            of this dictionary may, themselves, be dictionaries. Example::
491
492                {
493                  'last_name': 'Smith',
494                  'other': {
495                      'gender': 'Female'
496                  }
497                }
498
499            OR
500            A string containing a jmespath expression for further filtering.
501            Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
502        :param detailed: Whether or not to add detailed additional information.
503                         Defaults to False.
504        :param bare: Whether to skip adding any additional information to the
505                     server record. Defaults to False, meaning the addresses
506                     dict will be populated as needed from neutron. Setting
507                     to True implies detailed = False.
508        :param all_projects: Whether to get server from all projects or just
509                             the current auth scoped project.
510
511        :returns: A server ``munch.Munch`` or None if no matching server is
512                  found.
513
514        """
515        searchfunc = functools.partial(self.search_servers,
516                                       detailed=detailed, bare=True,
517                                       all_projects=all_projects)
518        server = _utils._get_entity(self, searchfunc, name_or_id, filters)
519        return self._expand_server(server, detailed, bare)
520
521    def _expand_server(self, server, detailed, bare):
522        if bare or not server:
523            return server
524        elif detailed:
525            return meta.get_hostvars_from_server(self, server)
526        else:
527            return meta.add_server_interfaces(self, server)
528
529    def get_server_by_id(self, id):
530        """Get a server by ID.
531
532        :param id: ID of the server.
533
534        :returns: A server dict or None if no matching server is found.
535        """
536        try:
537            data = proxy._json_response(
538                self.compute.get('/servers/{id}'.format(id=id)))
539            server = self._get_and_munchify('server', data)
540            return meta.add_server_interfaces(
541                self, self._normalize_server(server))
542        except exceptions.ResourceNotFound:
543            return None
544
545    def get_server_group(self, name_or_id=None, filters=None):
546        """Get a server group by name or ID.
547
548        :param name_or_id: Name or ID of the server group.
549        :param filters:
550            A dictionary of meta data to use for further filtering. Elements
551            of this dictionary may, themselves, be dictionaries. Example::
552
553                {
554                  'policy': 'affinity',
555                }
556
557            OR
558            A string containing a jmespath expression for further filtering.
559            Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]"
560
561        :returns: A server groups dict or None if no matching server group
562                  is found.
563
564        """
565        return _utils._get_entity(self, 'server_group', name_or_id,
566                                  filters)
567
568    def create_keypair(self, name, public_key=None):
569        """Create a new keypair.
570
571        :param name: Name of the keypair being created.
572        :param public_key: Public key for the new keypair.
573
574        :raises: OpenStackCloudException on operation error.
575        """
576        keypair = {
577            'name': name,
578        }
579        if public_key:
580            keypair['public_key'] = public_key
581        return self.compute.create_keypair(**keypair)
582
583    def delete_keypair(self, name):
584        """Delete a keypair.
585
586        :param name: Name of the keypair to delete.
587
588        :returns: True if delete succeeded, False otherwise.
589
590        :raises: OpenStackCloudException on operation error.
591        """
592        try:
593            self.compute.delete_keypair(name, ignore_missing=False)
594        except exceptions.ResourceNotFound:
595            self.log.debug("Keypair %s not found for deleting", name)
596            return False
597        return True
598
599    def create_image_snapshot(
600            self, name, server, wait=False, timeout=3600, **metadata):
601        """Create an image by snapshotting an existing server.
602
603        ..note::
604            On most clouds this is a cold snapshot - meaning that the server
605            in question will be shutdown before taking the snapshot. It is
606            possible that it's a live snapshot - but there is no way to know
607            as a user, so caveat emptor.
608
609        :param name: Name of the image to be created
610        :param server: Server name or ID or dict representing the server
611                       to be snapshotted
612        :param wait: If true, waits for image to be created.
613        :param timeout: Seconds to wait for image creation. None is forever.
614        :param metadata: Metadata to give newly-created image entity
615
616        :returns: A ``munch.Munch`` of the Image object
617
618        :raises: OpenStackCloudException if there are problems uploading
619        """
620        if not isinstance(server, dict):
621            server_obj = self.get_server(server, bare=True)
622            if not server_obj:
623                raise exc.OpenStackCloudException(
624                    "Server {server} could not be found and therefore"
625                    " could not be snapshotted.".format(server=server))
626            server = server_obj
627        image = self.compute.create_server_image(
628            server, name=name, metadata=metadata, wait=wait, timeout=timeout)
629        return image
630
631    def get_server_id(self, name_or_id):
632        server = self.get_server(name_or_id, bare=True)
633        if server:
634            return server['id']
635        return None
636
637    def get_server_private_ip(self, server):
638        return meta.get_server_private_ip(server, self)
639
640    def get_server_public_ip(self, server):
641        return meta.get_server_external_ipv4(self, server)
642
643    def get_server_meta(self, server):
644        # TODO(mordred) remove once ansible has moved to Inventory interface
645        server_vars = meta.get_hostvars_from_server(self, server)
646        groups = meta.get_groups_from_server(self, server, server_vars)
647        return dict(server_vars=server_vars, groups=groups)
648
649    @_utils.valid_kwargs(
650        'meta', 'files', 'userdata',
651        'reservation_id', 'return_raw', 'min_count',
652        'max_count', 'security_groups', 'key_name',
653        'availability_zone', 'block_device_mapping',
654        'block_device_mapping_v2', 'nics', 'scheduler_hints',
655        'config_drive', 'admin_pass', 'disk_config')
656    def create_server(
657            self, name, image=None, flavor=None,
658            auto_ip=True, ips=None, ip_pool=None,
659            root_volume=None, terminate_volume=False,
660            wait=False, timeout=180, reuse_ips=True,
661            network=None, boot_from_volume=False, volume_size='50',
662            boot_volume=None, volumes=None, nat_destination=None,
663            group=None,
664            **kwargs):
665        """Create a virtual server instance.
666
667        :param name: Something to name the server.
668        :param image: Image dict, name or ID to boot with. image is required
669                      unless boot_volume is given.
670        :param flavor: Flavor dict, name or ID to boot onto.
671        :param auto_ip: Whether to take actions to find a routable IP for
672                        the server. (defaults to True)
673        :param ips: List of IPs to attach to the server (defaults to None)
674        :param ip_pool: Name of the network or floating IP pool to get an
675                        address from. (defaults to None)
676        :param root_volume: Name or ID of a volume to boot from
677                            (defaults to None - deprecated, use boot_volume)
678        :param boot_volume: Name or ID of a volume to boot from
679                            (defaults to None)
680        :param terminate_volume: If booting from a volume, whether it should
681                                 be deleted when the server is destroyed.
682                                 (defaults to False)
683        :param volumes: (optional) A list of volumes to attach to the server
684        :param meta: (optional) A dict of arbitrary key/value metadata to
685                     store for this server. Both keys and values must be
686                     <=255 characters.
687        :param files: (optional, deprecated) A dict of files to overwrite
688                      on the server upon boot. Keys are file names (i.e.
689                      ``/etc/passwd``) and values
690                      are the file contents (either as a string or as a
691                      file-like object). A maximum of five entries is allowed,
692                      and each file must be 10k or less.
693        :param reservation_id: a UUID for the set of servers being requested.
694        :param min_count: (optional extension) The minimum number of
695                          servers to launch.
696        :param max_count: (optional extension) The maximum number of
697                          servers to launch.
698        :param security_groups: A list of security group names
699        :param userdata: user data to pass to be exposed by the metadata
700                      server this can be a file type object as well or a
701                      string.
702        :param key_name: (optional extension) name of previously created
703                      keypair to inject into the instance.
704        :param availability_zone: Name of the availability zone for instance
705                                  placement.
706        :param block_device_mapping: (optional) A dict of block
707                      device mappings for this server.
708        :param block_device_mapping_v2: (optional) A dict of block
709                      device mappings for this server.
710        :param nics:  (optional extension) an ordered list of nics to be
711                      added to this server, with information about
712                      connected networks, fixed IPs, port etc.
713        :param scheduler_hints: (optional extension) arbitrary key-value pairs
714                            specified by the client to help boot an instance
715        :param config_drive: (optional extension) value for config drive
716                            either boolean, or volume-id
717        :param disk_config: (optional extension) control how the disk is
718                            partitioned when the server is created.  possible
719                            values are 'AUTO' or 'MANUAL'.
720        :param admin_pass: (optional extension) add a user supplied admin
721                           password.
722        :param wait: (optional) Wait for the address to appear as assigned
723                     to the server. Defaults to False.
724        :param timeout: (optional) Seconds to wait, defaults to 60.
725                        See the ``wait`` parameter.
726        :param reuse_ips: (optional) Whether to attempt to reuse pre-existing
727                                     floating ips should a floating IP be
728                                     needed (defaults to True)
729        :param network: (optional) Network dict or name or ID to attach the
730                        server to.  Mutually exclusive with the nics parameter.
731                        Can also be a list of network names or IDs or
732                        network dicts.
733        :param boot_from_volume: Whether to boot from volume. 'boot_volume'
734                                 implies True, but boot_from_volume=True with
735                                 no boot_volume is valid and will create a
736                                 volume from the image and use that.
737        :param volume_size: When booting an image from volume, how big should
738                            the created volume be? Defaults to 50.
739        :param nat_destination: Which network should a created floating IP
740                                be attached to, if it's not possible to
741                                infer from the cloud's configuration.
742                                (Optional, defaults to None)
743        :param group: ServerGroup dict, name or id to boot the server in.
744                      If a group is provided in both scheduler_hints and in
745                      the group param, the group param will win.
746                      (Optional, defaults to None)
747        :returns: A ``munch.Munch`` representing the created server.
748        :raises: OpenStackCloudException on operation error.
749        """
750        # TODO(shade) Image is optional but flavor is not - yet flavor comes
751        # after image in the argument list. Doh.
752        if not flavor:
753            raise TypeError(
754                "create_server() missing 1 required argument: 'flavor'")
755        if not image and not boot_volume:
756            raise TypeError(
757                "create_server() requires either 'image' or 'boot_volume'")
758
759        microversion = None
760        server_json = {'server': kwargs}
761
762        # TODO(mordred) Add support for description starting in 2.19
763        security_groups = kwargs.get('security_groups', [])
764        if security_groups and not isinstance(kwargs['security_groups'], list):
765            security_groups = [security_groups]
766        if security_groups:
767            kwargs['security_groups'] = []
768            for sec_group in security_groups:
769                kwargs['security_groups'].append(dict(name=sec_group))
770        if 'userdata' in kwargs:
771            user_data = kwargs.pop('userdata')
772            if user_data:
773                kwargs['user_data'] = self._encode_server_userdata(user_data)
774        for (desired, given) in (
775                ('OS-DCF:diskConfig', 'disk_config'),
776                ('config_drive', 'config_drive'),
777                ('key_name', 'key_name'),
778                ('metadata', 'meta'),
779                ('adminPass', 'admin_pass')):
780            value = kwargs.pop(given, None)
781            if value:
782                kwargs[desired] = value
783
784        hints = kwargs.pop('scheduler_hints', {})
785        if group:
786            group_obj = self.get_server_group(group)
787            if not group_obj:
788                raise exc.OpenStackCloudException(
789                    "Server Group {group} was requested but was not found"
790                    " on the cloud".format(group=group))
791            hints['group'] = group_obj['id']
792        if hints:
793            server_json['os:scheduler_hints'] = hints
794        kwargs.setdefault('max_count', kwargs.get('max_count', 1))
795        kwargs.setdefault('min_count', kwargs.get('min_count', 1))
796
797        if 'nics' in kwargs and not isinstance(kwargs['nics'], list):
798            if isinstance(kwargs['nics'], dict):
799                # Be nice and help the user out
800                kwargs['nics'] = [kwargs['nics']]
801            else:
802                raise exc.OpenStackCloudException(
803                    'nics parameter to create_server takes a list of dicts.'
804                    ' Got: {nics}'.format(nics=kwargs['nics']))
805
806        if network and ('nics' not in kwargs or not kwargs['nics']):
807            nics = []
808            if not isinstance(network, list):
809                network = [network]
810            for net_name in network:
811                if isinstance(net_name, dict) and 'id' in net_name:
812                    network_obj = net_name
813                else:
814                    network_obj = self.get_network(name_or_id=net_name)
815                if not network_obj:
816                    raise exc.OpenStackCloudException(
817                        'Network {network} is not a valid network in'
818                        ' {cloud}:{region}'.format(
819                            network=network,
820                            cloud=self.name, region=self._compute_region))
821                nics.append({'net-id': network_obj['id']})
822
823            kwargs['nics'] = nics
824        if not network and ('nics' not in kwargs or not kwargs['nics']):
825            default_network = self.get_default_network()
826            if default_network:
827                kwargs['nics'] = [{'net-id': default_network['id']}]
828
829        networks = []
830        for nic in kwargs.pop('nics', []):
831            net = {}
832            if 'net-id' in nic:
833                # TODO(mordred) Make sure this is in uuid format
834                net['uuid'] = nic.pop('net-id')
835                # If there's a net-id, ignore net-name
836                nic.pop('net-name', None)
837            elif 'net-name' in nic:
838                net_name = nic.pop('net-name')
839                nic_net = self.get_network(net_name)
840                if not nic_net:
841                    raise exc.OpenStackCloudException(
842                        "Requested network {net} could not be found.".format(
843                            net=net_name))
844                net['uuid'] = nic_net['id']
845            for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'):
846                fixed_ip = nic.pop(ip_key, None)
847                if fixed_ip and net.get('fixed_ip'):
848                    raise exc.OpenStackCloudException(
849                        "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip"
850                        " may be given")
851                if fixed_ip:
852                    net['fixed_ip'] = fixed_ip
853            for key in ('port', 'port-id'):
854                if key in nic:
855                    net['port'] = nic.pop(key)
856            # A tag supported only in server microversion 2.32-2.36 or >= 2.42
857            # Bumping the version to 2.42 to support the 'tag' implementation
858            if 'tag' in nic:
859                microversion = utils.pick_microversion(self.compute, '2.42')
860                net['tag'] = nic.pop('tag')
861            if nic:
862                raise exc.OpenStackCloudException(
863                    "Additional unsupported keys given for server network"
864                    " creation: {keys}".format(keys=nic.keys()))
865            networks.append(net)
866        if networks:
867            kwargs['networks'] = networks
868
869        if image:
870            if isinstance(image, dict):
871                kwargs['imageRef'] = image['id']
872            else:
873                kwargs['imageRef'] = self.get_image(image).id
874        if isinstance(flavor, dict):
875            kwargs['flavorRef'] = flavor['id']
876        else:
877            kwargs['flavorRef'] = self.get_flavor(flavor, get_extra=False).id
878
879        if volumes is None:
880            volumes = []
881
882        # nova cli calls this boot_volume. Let's be the same
883        if root_volume and not boot_volume:
884            boot_volume = root_volume
885
886        kwargs = self._get_boot_from_volume_kwargs(
887            image=image, boot_from_volume=boot_from_volume,
888            boot_volume=boot_volume, volume_size=str(volume_size),
889            terminate_volume=terminate_volume,
890            volumes=volumes, kwargs=kwargs)
891
892        kwargs['name'] = name
893        endpoint = '/servers'
894        # TODO(mordred) We're only testing this in functional tests. We need
895        # to add unit tests for this too.
896        if 'block_device_mapping_v2' in kwargs:
897            endpoint = '/os-volumes_boot'
898        with _utils.shade_exceptions("Error in creating instance"):
899            data = proxy._json_response(
900                self.compute.post(endpoint, json=server_json,
901                                  microversion=microversion))
902            server = self._get_and_munchify('server', data)
903            admin_pass = server.get('adminPass') or kwargs.get('admin_pass')
904            if not wait:
905                # This is a direct get call to skip the list_servers
906                # cache which has absolutely no chance of containing the
907                # new server.
908                # Only do this if we're not going to wait for the server
909                # to complete booting, because the only reason we do it
910                # is to get a server record that is the return value from
911                # get/list rather than the return value of create. If we're
912                # going to do the wait loop below, this is a waste of a call
913                server = self.get_server_by_id(server.id)
914                if server.status == 'ERROR':
915                    raise exc.OpenStackCloudCreateException(
916                        resource='server', resource_id=server.id)
917
918        if wait:
919            server = self.wait_for_server(
920                server,
921                auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
922                reuse=reuse_ips, timeout=timeout,
923                nat_destination=nat_destination,
924            )
925
926        server.adminPass = admin_pass
927        return server
928
929    def _get_boot_from_volume_kwargs(
930            self, image, boot_from_volume, boot_volume, volume_size,
931            terminate_volume, volumes, kwargs):
932        """Return block device mappings
933
934        :param image: Image dict, name or id to boot with.
935
936        """
937        # TODO(mordred) We're only testing this in functional tests. We need
938        # to add unit tests for this too.
939        if boot_volume or boot_from_volume or volumes:
940            kwargs.setdefault('block_device_mapping_v2', [])
941        else:
942            return kwargs
943
944        # If we have boot_from_volume but no root volume, then we're
945        # booting an image from volume
946        if boot_volume:
947            volume = self.get_volume(boot_volume)
948            if not volume:
949                raise exc.OpenStackCloudException(
950                    'Volume {boot_volume} is not a valid volume'
951                    ' in {cloud}:{region}'.format(
952                        boot_volume=boot_volume,
953                        cloud=self.name, region=self._compute_region))
954            block_mapping = {
955                'boot_index': '0',
956                'delete_on_termination': terminate_volume,
957                'destination_type': 'volume',
958                'uuid': volume['id'],
959                'source_type': 'volume',
960            }
961            kwargs['block_device_mapping_v2'].append(block_mapping)
962            kwargs['imageRef'] = ''
963        elif boot_from_volume:
964
965            if isinstance(image, dict):
966                image_obj = image
967            else:
968                image_obj = self.get_image(image)
969            if not image_obj:
970                raise exc.OpenStackCloudException(
971                    'Image {image} is not a valid image in'
972                    ' {cloud}:{region}'.format(
973                        image=image,
974                        cloud=self.name, region=self._compute_region))
975
976            block_mapping = {
977                'boot_index': '0',
978                'delete_on_termination': terminate_volume,
979                'destination_type': 'volume',
980                'uuid': image_obj['id'],
981                'source_type': 'image',
982                'volume_size': volume_size,
983            }
984            kwargs['imageRef'] = ''
985            kwargs['block_device_mapping_v2'].append(block_mapping)
986        if volumes and kwargs['imageRef']:
987            # If we're attaching volumes on boot but booting from an image,
988            # we need to specify that in the BDM.
989            block_mapping = {
990                u'boot_index': 0,
991                u'delete_on_termination': True,
992                u'destination_type': u'local',
993                u'source_type': u'image',
994                u'uuid': kwargs['imageRef'],
995            }
996            kwargs['block_device_mapping_v2'].append(block_mapping)
997        for volume in volumes:
998            volume_obj = self.get_volume(volume)
999            if not volume_obj:
1000                raise exc.OpenStackCloudException(
1001                    'Volume {volume} is not a valid volume'
1002                    ' in {cloud}:{region}'.format(
1003                        volume=volume,
1004                        cloud=self.name, region=self._compute_region))
1005            block_mapping = {
1006                'boot_index': '-1',
1007                'delete_on_termination': False,
1008                'destination_type': 'volume',
1009                'uuid': volume_obj['id'],
1010                'source_type': 'volume',
1011            }
1012            kwargs['block_device_mapping_v2'].append(block_mapping)
1013        if boot_volume or boot_from_volume or volumes:
1014            self.list_volumes.invalidate(self)
1015        return kwargs
1016
1017    def wait_for_server(
1018            self, server, auto_ip=True, ips=None, ip_pool=None,
1019            reuse=True, timeout=180, nat_destination=None):
1020        """
1021        Wait for a server to reach ACTIVE status.
1022        """
1023        # server = self.compute.wait_for_server(
1024        #     server=server, interval=self._SERVER_AGE or 2, wait=timeout
1025        # )
1026        server_id = server['id']
1027        timeout_message = "Timeout waiting for the server to come up."
1028        start_time = time.time()
1029
1030        # There is no point in iterating faster than the list_servers cache
1031        for count in utils.iterate_timeout(
1032                timeout,
1033                timeout_message,
1034                # if _SERVER_AGE is 0 we still want to wait a bit
1035                # to be friendly with the server.
1036                wait=self._SERVER_AGE or 2):
1037            try:
1038                # Use the get_server call so that the list_servers
1039                # cache can be leveraged
1040                server = self.get_server(server_id)
1041            except Exception:
1042                continue
1043            if not server:
1044                continue
1045
1046            # We have more work to do, but the details of that are
1047            # hidden from the user. So, calculate remaining timeout
1048            # and pass it down into the IP stack.
1049            remaining_timeout = timeout - int(time.time() - start_time)
1050            if remaining_timeout <= 0:
1051                raise exc.OpenStackCloudTimeout(timeout_message)
1052
1053            server = self.get_active_server(
1054                server=server, reuse=reuse,
1055                auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
1056                wait=True, timeout=remaining_timeout,
1057                nat_destination=nat_destination)
1058
1059            if server is not None and server['status'] == 'ACTIVE':
1060                return server
1061
1062    def get_active_server(
1063            self, server, auto_ip=True, ips=None, ip_pool=None,
1064            reuse=True, wait=False, timeout=180, nat_destination=None):
1065
1066        if server['status'] == 'ERROR':
1067            if 'fault' in server and 'message' in server['fault']:
1068                raise exc.OpenStackCloudException(
1069                    "Error in creating the server."
1070                    " Compute service reports fault: {reason}".format(
1071                        reason=server['fault']['message']),
1072                    extra_data=dict(server=server))
1073
1074            raise exc.OpenStackCloudException(
1075                "Error in creating the server"
1076                " (no further information available)",
1077                extra_data=dict(server=server))
1078
1079        if server['status'] == 'ACTIVE':
1080            if 'addresses' in server and server['addresses']:
1081                return self.add_ips_to_server(
1082                    server, auto_ip, ips, ip_pool, reuse=reuse,
1083                    nat_destination=nat_destination,
1084                    wait=wait, timeout=timeout)
1085
1086            self.log.debug(
1087                'Server %(server)s reached ACTIVE state without'
1088                ' being allocated an IP address.'
1089                ' Deleting server.', {'server': server['id']})
1090            try:
1091                self._delete_server(
1092                    server=server, wait=wait, timeout=timeout)
1093            except Exception as e:
1094                raise exc.OpenStackCloudException(
1095                    'Server reached ACTIVE state without being'
1096                    ' allocated an IP address AND then could not'
1097                    ' be deleted: {0}'.format(e),
1098                    extra_data=dict(server=server))
1099            raise exc.OpenStackCloudException(
1100                'Server reached ACTIVE state without being'
1101                ' allocated an IP address.',
1102                extra_data=dict(server=server))
1103        return None
1104
1105    def rebuild_server(self, server_id, image_id, admin_pass=None,
1106                       detailed=False, bare=False,
1107                       wait=False, timeout=180):
1108        kwargs = {}
1109        if image_id:
1110            kwargs['imageRef'] = image_id
1111        if admin_pass:
1112            kwargs['adminPass'] = admin_pass
1113
1114        data = proxy._json_response(
1115            self.compute.post(
1116                '/servers/{server_id}/action'.format(server_id=server_id),
1117                json={'rebuild': kwargs}),
1118            error_message="Error in rebuilding instance")
1119        server = self._get_and_munchify('server', data)
1120        if not wait:
1121            return self._expand_server(
1122                self._normalize_server(server), bare=bare, detailed=detailed)
1123
1124        admin_pass = server.get('adminPass') or admin_pass
1125        for count in utils.iterate_timeout(
1126                timeout,
1127                "Timeout waiting for server {0} to "
1128                "rebuild.".format(server_id),
1129                wait=self._SERVER_AGE):
1130            try:
1131                server = self.get_server(server_id, bare=True)
1132            except Exception:
1133                continue
1134            if not server:
1135                continue
1136
1137            if server['status'] == 'ERROR':
1138                raise exc.OpenStackCloudException(
1139                    "Error in rebuilding the server",
1140                    extra_data=dict(server=server))
1141
1142            if server['status'] == 'ACTIVE':
1143                server.adminPass = admin_pass
1144                break
1145
1146        return self._expand_server(server, detailed=detailed, bare=bare)
1147
1148    def set_server_metadata(self, name_or_id, metadata):
1149        """Set metadata in a server instance.
1150
1151        :param str name_or_id: The name or ID of the server instance
1152            to update.
1153        :param dict metadata: A dictionary with the key=value pairs
1154            to set in the server instance. It only updates the key=value
1155            pairs provided. Existing ones will remain untouched.
1156
1157        :raises: OpenStackCloudException on operation error.
1158        """
1159        server = self.get_server(name_or_id, bare=True)
1160        if not server:
1161            raise exc.OpenStackCloudException(
1162                'Invalid Server {server}'.format(server=name_or_id))
1163
1164        self.compute.set_server_metadata(server=server.id, **metadata)
1165
1166    def delete_server_metadata(self, name_or_id, metadata_keys):
1167        """Delete metadata from a server instance.
1168
1169        :param str name_or_id: The name or ID of the server instance
1170            to update.
1171        :param metadata_keys: A list with the keys to be deleted
1172            from the server instance.
1173
1174        :raises: OpenStackCloudException on operation error.
1175        """
1176        server = self.get_server(name_or_id, bare=True)
1177        if not server:
1178            raise exc.OpenStackCloudException(
1179                'Invalid Server {server}'.format(server=name_or_id))
1180
1181        self.compute.delete_server_metadata(server=server.id,
1182                                            keys=metadata_keys)
1183
1184    def delete_server(
1185            self, name_or_id, wait=False, timeout=180, delete_ips=False,
1186            delete_ip_retry=1):
1187        """Delete a server instance.
1188
1189        :param name_or_id: name or ID of the server to delete
1190        :param bool wait: If true, waits for server to be deleted.
1191        :param int timeout: Seconds to wait for server deletion.
1192        :param bool delete_ips: If true, deletes any floating IPs
1193            associated with the instance.
1194        :param int delete_ip_retry: Number of times to retry deleting
1195            any floating ips, should the first try be unsuccessful.
1196
1197        :returns: True if delete succeeded, False otherwise if the
1198            server does not exist.
1199
1200        :raises: OpenStackCloudException on operation error.
1201        """
1202        # If delete_ips is True, we need the server to not be bare.
1203        server = self.get_server(name_or_id, bare=True)
1204        if not server:
1205            return False
1206
1207        # This portion of the code is intentionally left as a separate
1208        # private method in order to avoid an unnecessary API call to get
1209        # a server we already have.
1210        return self._delete_server(
1211            server, wait=wait, timeout=timeout, delete_ips=delete_ips,
1212            delete_ip_retry=delete_ip_retry)
1213
1214    def _delete_server_floating_ips(self, server, delete_ip_retry):
1215        # Does the server have floating ips in its
1216        # addresses dict? If not, skip this.
1217        server_floats = meta.find_nova_interfaces(
1218            server['addresses'], ext_tag='floating')
1219        for fip in server_floats:
1220            try:
1221                ip = self.get_floating_ip(id=None, filters={
1222                    'floating_ip_address': fip['addr']})
1223            except exc.OpenStackCloudURINotFound:
1224                # We're deleting. If it doesn't exist - awesome
1225                # NOTE(mordred) If the cloud is a nova FIP cloud but
1226                #               floating_ip_source is set to neutron, this
1227                #               can lead to a FIP leak.
1228                continue
1229            if not ip:
1230                continue
1231            deleted = self.delete_floating_ip(
1232                ip['id'], retry=delete_ip_retry)
1233            if not deleted:
1234                raise exc.OpenStackCloudException(
1235                    "Tried to delete floating ip {floating_ip}"
1236                    " associated with server {id} but there was"
1237                    " an error deleting it. Not deleting server.".format(
1238                        floating_ip=ip['floating_ip_address'],
1239                        id=server['id']))
1240
1241    def _delete_server(
1242            self, server, wait=False, timeout=180, delete_ips=False,
1243            delete_ip_retry=1):
1244        if not server:
1245            return False
1246
1247        if delete_ips and self._has_floating_ips():
1248            self._delete_server_floating_ips(server, delete_ip_retry)
1249
1250        try:
1251            proxy._json_response(
1252                self.compute.delete(
1253                    '/servers/{id}'.format(id=server['id'])),
1254                error_message="Error in deleting server")
1255        except exc.OpenStackCloudURINotFound:
1256            return False
1257        except Exception:
1258            raise
1259
1260        if not wait:
1261            return True
1262
1263        # If the server has volume attachments, or if it has booted
1264        # from volume, deleting it will change volume state so we will
1265        # need to invalidate the cache. Avoid the extra API call if
1266        # caching is not enabled.
1267        reset_volume_cache = False
1268        if (self.cache_enabled
1269                and self.has_service('volume')
1270                and self.get_volumes(server)):
1271            reset_volume_cache = True
1272
1273        for count in utils.iterate_timeout(
1274                timeout,
1275                "Timed out waiting for server to get deleted.",
1276                # if _SERVER_AGE is 0 we still want to wait a bit
1277                # to be friendly with the server.
1278                wait=self._SERVER_AGE or 2):
1279            with _utils.shade_exceptions("Error in deleting server"):
1280                server = self.get_server(server['id'], bare=True)
1281                if not server:
1282                    break
1283
1284        if reset_volume_cache:
1285            self.list_volumes.invalidate(self)
1286
1287        # Reset the list servers cache time so that the next list server
1288        # call gets a new list
1289        self._servers_time = self._servers_time - self._SERVER_AGE
1290        return True
1291
1292    @_utils.valid_kwargs(
1293        'name', 'description')
1294    def update_server(self, name_or_id, detailed=False, bare=False, **kwargs):
1295        """Update a server.
1296
1297        :param name_or_id: Name of the server to be updated.
1298        :param detailed: Whether or not to add detailed additional information.
1299                         Defaults to False.
1300        :param bare: Whether to skip adding any additional information to the
1301                     server record. Defaults to False, meaning the addresses
1302                     dict will be populated as needed from neutron. Setting
1303                     to True implies detailed = False.
1304        :name: New name for the server
1305        :description: New description for the server
1306
1307        :returns: a dictionary representing the updated server.
1308
1309        :raises: OpenStackCloudException on operation error.
1310        """
1311        server = self.get_server(name_or_id=name_or_id, bare=True)
1312        if server is None:
1313            raise exc.OpenStackCloudException(
1314                "failed to find server '{server}'".format(server=name_or_id))
1315
1316        data = proxy._json_response(
1317            self.compute.put(
1318                '/servers/{server_id}'.format(server_id=server['id']),
1319                json={'server': kwargs}),
1320            error_message="Error updating server {0}".format(name_or_id))
1321        server = self._normalize_server(
1322            self._get_and_munchify('server', data))
1323        return self._expand_server(server, bare=bare, detailed=detailed)
1324
1325    def create_server_group(self, name, policies=[], policy=None):
1326        """Create a new server group.
1327
1328        :param name: Name of the server group being created
1329        :param policies: List of policies for the server group.
1330
1331        :returns: a dict representing the new server group.
1332
1333        :raises: OpenStackCloudException on operation error.
1334        """
1335        sg_attrs = {
1336            'name': name
1337        }
1338        if policies:
1339            sg_attrs['policies'] = policies
1340        if policy:
1341            sg_attrs['policy'] = policy
1342        return self.compute.create_server_group(
1343            **sg_attrs
1344        )
1345
1346    def delete_server_group(self, name_or_id):
1347        """Delete a server group.
1348
1349        :param name_or_id: Name or ID of the server group to delete
1350
1351        :returns: True if delete succeeded, False otherwise
1352
1353        :raises: OpenStackCloudException on operation error.
1354        """
1355        server_group = self.get_server_group(name_or_id)
1356        if not server_group:
1357            self.log.debug("Server group %s not found for deleting",
1358                           name_or_id)
1359            return False
1360
1361        self.compute.delete_server_group(server_group, ignore_missing=False)
1362        return True
1363
1364    def create_flavor(self, name, ram, vcpus, disk, flavorid="auto",
1365                      ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
1366        """Create a new flavor.
1367
1368        :param name: Descriptive name of the flavor
1369        :param ram: Memory in MB for the flavor
1370        :param vcpus: Number of VCPUs for the flavor
1371        :param disk: Size of local disk in GB
1372        :param flavorid: ID for the flavor (optional)
1373        :param ephemeral: Ephemeral space size in GB
1374        :param swap: Swap space in MB
1375        :param rxtx_factor: RX/TX factor
1376        :param is_public: Make flavor accessible to the public
1377
1378        :returns: A ``munch.Munch`` describing the new flavor.
1379
1380        :raises: OpenStackCloudException on operation error.
1381        """
1382        attrs = {
1383            'disk': disk,
1384            'ephemeral': ephemeral,
1385            'id': flavorid,
1386            'is_public': is_public,
1387            'name': name,
1388            'ram': ram,
1389            'rxtx_factor': rxtx_factor,
1390            'swap': swap,
1391            'vcpus': vcpus,
1392        }
1393        if flavorid == 'auto':
1394            attrs['id'] = None
1395
1396        flavor = self.compute.create_flavor(**attrs)
1397
1398        return flavor._to_munch(original_names=False)
1399
1400    def delete_flavor(self, name_or_id):
1401        """Delete a flavor
1402
1403        :param name_or_id: ID or name of the flavor to delete.
1404
1405        :returns: True if delete succeeded, False otherwise.
1406
1407        :raises: OpenStackCloudException on operation error.
1408        """
1409        try:
1410            flavor = self.compute.find_flavor(name_or_id)
1411            if not flavor:
1412                self.log.debug(
1413                    "Flavor %s not found for deleting", name_or_id)
1414                return False
1415            self.compute.delete_flavor(flavor)
1416            return True
1417        except exceptions.SDKException:
1418            raise exceptions.OpenStackCloudException(
1419                "Unable to delete flavor {name}".format(name=name_or_id))
1420
1421    def set_flavor_specs(self, flavor_id, extra_specs):
1422        """Add extra specs to a flavor
1423
1424        :param string flavor_id: ID of the flavor to update.
1425        :param dict extra_specs: Dictionary of key-value pairs.
1426
1427        :raises: OpenStackCloudException on operation error.
1428        :raises: OpenStackCloudResourceNotFound if flavor ID is not found.
1429        """
1430        self.compute.create_flavor_extra_specs(flavor_id, extra_specs)
1431
1432    def unset_flavor_specs(self, flavor_id, keys):
1433        """Delete extra specs from a flavor
1434
1435        :param string flavor_id: ID of the flavor to update.
1436        :param keys: List of spec keys to delete.
1437
1438        :raises: OpenStackCloudException on operation error.
1439        :raises: OpenStackCloudResourceNotFound if flavor ID is not found.
1440        """
1441        for key in keys:
1442            self.compute.delete_flavor_extra_specs_property(flavor_id, key)
1443
1444    def add_flavor_access(self, flavor_id, project_id):
1445        """Grant access to a private flavor for a project/tenant.
1446
1447        :param string flavor_id: ID of the private flavor.
1448        :param string project_id: ID of the project/tenant.
1449
1450        :raises: OpenStackCloudException on operation error.
1451        """
1452        self.compute.flavor_add_tenant_access(flavor_id, project_id)
1453
1454    def remove_flavor_access(self, flavor_id, project_id):
1455        """Revoke access from a private flavor for a project/tenant.
1456
1457        :param string flavor_id: ID of the private flavor.
1458        :param string project_id: ID of the project/tenant.
1459
1460        :raises: OpenStackCloudException on operation error.
1461        """
1462        self.compute.flavor_remove_tenant_access(flavor_id, project_id)
1463
1464    def list_flavor_access(self, flavor_id):
1465        """List access from a private flavor for a project/tenant.
1466
1467        :param string flavor_id: ID of the private flavor.
1468
1469        :returns: a list of ``munch.Munch`` containing the access description
1470
1471        :raises: OpenStackCloudException on operation error.
1472        """
1473        access = self.compute.get_flavor_access(flavor_id)
1474        return _utils.normalize_flavor_accesses(access)
1475
1476    def list_hypervisors(self, filters={}):
1477        """List all hypervisors
1478
1479        :returns: A list of hypervisor ``munch.Munch``.
1480        """
1481
1482        return list(self.compute.hypervisors(
1483            details=True,
1484            allow_unknown_params=True,
1485            **filters))
1486
1487    def search_aggregates(self, name_or_id=None, filters=None):
1488        """Seach host aggregates.
1489
1490        :param name: aggregate name or id.
1491        :param filters: a dict containing additional filters to use.
1492
1493        :returns: a list of dicts containing the aggregates
1494
1495        :raises: ``OpenStackCloudException``: if something goes wrong during
1496            the OpenStack API call.
1497        """
1498        aggregates = self.list_aggregates()
1499        return _utils._filter_list(aggregates, name_or_id, filters)
1500
1501    def list_aggregates(self, filters={}):
1502        """List all available host aggregates.
1503
1504        :returns: A list of aggregate dicts.
1505
1506        """
1507        return self.compute.aggregates(allow_unknown_params=True, **filters)
1508
1509    def get_aggregate(self, name_or_id, filters=None):
1510        """Get an aggregate by name or ID.
1511
1512        :param name_or_id: Name or ID of the aggregate.
1513        :param dict filters:
1514            A dictionary of meta data to use for further filtering. Elements
1515            of this dictionary may, themselves, be dictionaries. Example::
1516
1517                {
1518                  'availability_zone': 'nova',
1519                  'metadata': {
1520                      'cpu_allocation_ratio': '1.0'
1521                  }
1522                }
1523
1524        :returns: An aggregate dict or None if no matching aggregate is
1525                  found.
1526
1527        """
1528        aggregate = self.compute.find_aggregate(
1529            name_or_id, ignore_missing=True)
1530        if aggregate:
1531            return aggregate._to_munch()
1532
1533    def create_aggregate(self, name, availability_zone=None):
1534        """Create a new host aggregate.
1535
1536        :param name: Name of the host aggregate being created
1537        :param availability_zone: Availability zone to assign hosts
1538
1539        :returns: a dict representing the new host aggregate.
1540
1541        :raises: OpenStackCloudException on operation error.
1542        """
1543        return self.compute.create_aggregate(
1544            name=name,
1545            availability_zone=availability_zone
1546        )
1547
1548    @_utils.valid_kwargs('name', 'availability_zone')
1549    def update_aggregate(self, name_or_id, **kwargs):
1550        """Update a host aggregate.
1551
1552        :param name_or_id: Name or ID of the aggregate being updated.
1553        :param name: New aggregate name
1554        :param availability_zone: Availability zone to assign to hosts
1555
1556        :returns: a dict representing the updated host aggregate.
1557
1558        :raises: OpenStackCloudException on operation error.
1559        """
1560        aggregate = self.get_aggregate(name_or_id)
1561        return self.compute.update_aggregate(aggregate, **kwargs)
1562
1563    def delete_aggregate(self, name_or_id):
1564        """Delete a host aggregate.
1565
1566        :param name_or_id: Name or ID of the host aggregate to delete.
1567
1568        :returns: True if delete succeeded, False otherwise.
1569
1570        :raises: OpenStackCloudException on operation error.
1571        """
1572        if (
1573            isinstance(name_or_id, (str, bytes))
1574            and not name_or_id.isdigit()
1575        ):
1576            aggregate = self.get_aggregate(name_or_id)
1577            if not aggregate:
1578                self.log.debug(
1579                    "Aggregate %s not found for deleting", name_or_id)
1580                return False
1581            name_or_id = aggregate.id
1582        try:
1583            self.compute.delete_aggregate(name_or_id, ignore_missing=False)
1584            return True
1585        except exceptions.ResourceNotFound:
1586            self.log.debug("Aggregate %s not found for deleting", name_or_id)
1587            return False
1588
1589    def set_aggregate_metadata(self, name_or_id, metadata):
1590        """Set aggregate metadata, replacing the existing metadata.
1591
1592        :param name_or_id: Name of the host aggregate to update
1593        :param metadata: Dict containing metadata to replace (Use
1594                {'key': None} to remove a key)
1595
1596        :returns: a dict representing the new host aggregate.
1597
1598        :raises: OpenStackCloudException on operation error.
1599        """
1600        aggregate = self.get_aggregate(name_or_id)
1601        if not aggregate:
1602            raise exc.OpenStackCloudException(
1603                "Host aggregate %s not found." % name_or_id)
1604
1605        return self.compute.set_aggregate_metadata(aggregate, metadata)
1606
1607    def add_host_to_aggregate(self, name_or_id, host_name):
1608        """Add a host to an aggregate.
1609
1610        :param name_or_id: Name or ID of the host aggregate.
1611        :param host_name: Host to add.
1612
1613        :raises: OpenStackCloudException on operation error.
1614        """
1615        aggregate = self.get_aggregate(name_or_id)
1616        if not aggregate:
1617            raise exc.OpenStackCloudException(
1618                "Host aggregate %s not found." % name_or_id)
1619
1620        return self.compute.add_host_to_aggregate(aggregate, host_name)
1621
1622    def remove_host_from_aggregate(self, name_or_id, host_name):
1623        """Remove a host from an aggregate.
1624
1625        :param name_or_id: Name or ID of the host aggregate.
1626        :param host_name: Host to remove.
1627
1628        :raises: OpenStackCloudException on operation error.
1629        """
1630        aggregate = self.get_aggregate(name_or_id)
1631        if not aggregate:
1632            raise exc.OpenStackCloudException(
1633                "Host aggregate %s not found." % name_or_id)
1634
1635        return self.compute.remove_host_from_aggregate(aggregate, host_name)
1636
1637    def set_compute_quotas(self, name_or_id, **kwargs):
1638        """ Set a quota in a project
1639
1640        :param name_or_id: project name or id
1641        :param kwargs: key/value pairs of quota name and quota value
1642
1643        :raises: OpenStackCloudException if the resource to set the
1644            quota does not exist.
1645        """
1646
1647        proj = self.get_project(name_or_id)
1648        if not proj:
1649            raise exc.OpenStackCloudException("project does not exist")
1650
1651        # compute_quotas = {key: val for key, val in kwargs.items()
1652        #                  if key in quota.COMPUTE_QUOTAS}
1653        # TODO(ghe): Manage volume and network quotas
1654        # network_quotas = {key: val for key, val in kwargs.items()
1655        #                  if key in quota.NETWORK_QUOTAS}
1656        # volume_quotas = {key: val for key, val in kwargs.items()
1657        #                 if key in quota.VOLUME_QUOTAS}
1658
1659        kwargs['force'] = True
1660        proxy._json_response(
1661            self.compute.put(
1662                '/os-quota-sets/{project}'.format(project=proj.id),
1663                json={'quota_set': kwargs}),
1664            error_message="No valid quota or resource")
1665
1666    def get_compute_quotas(self, name_or_id):
1667        """ Get quota for a project
1668
1669        :param name_or_id: project name or id
1670        :raises: OpenStackCloudException if it's not a valid project
1671
1672        :returns: Munch object with the quotas
1673        """
1674        proj = self.get_project(name_or_id)
1675        if not proj:
1676            raise exc.OpenStackCloudException("project does not exist")
1677        data = proxy._json_response(
1678            self.compute.get(
1679                '/os-quota-sets/{project}'.format(project=proj.id)))
1680        return self._get_and_munchify('quota_set', data)
1681
1682    def delete_compute_quotas(self, name_or_id):
1683        """ Delete quota for a project
1684
1685        :param name_or_id: project name or id
1686        :raises: OpenStackCloudException if it's not a valid project or the
1687                 nova client call failed
1688
1689        :returns: dict with the quotas
1690        """
1691        proj = self.get_project(name_or_id)
1692        if not proj:
1693            raise exc.OpenStackCloudException("project does not exist")
1694        return proxy._json_response(
1695            self.compute.delete(
1696                '/os-quota-sets/{project}'.format(project=proj.id)))
1697
1698    def get_compute_usage(self, name_or_id, start=None, end=None):
1699        """ Get usage for a specific project
1700
1701        :param name_or_id: project name or id
1702        :param start: :class:`datetime.datetime` or string. Start date in UTC
1703                      Defaults to 2010-07-06T12:00:00Z (the date the OpenStack
1704                      project was started)
1705        :param end: :class:`datetime.datetime` or string. End date in UTC.
1706                    Defaults to now
1707        :raises: OpenStackCloudException if it's not a valid project
1708
1709        :returns: Munch object with the usage
1710        """
1711
1712        def parse_date(date):
1713            try:
1714                return iso8601.parse_date(date)
1715            except iso8601.iso8601.ParseError:
1716                # Yes. This is an exception mask. However,iso8601 is an
1717                # implementation detail - and the error message is actually
1718                # less informative.
1719                raise exc.OpenStackCloudException(
1720                    "Date given, {date}, is invalid. Please pass in a date"
1721                    " string in ISO 8601 format -"
1722                    " YYYY-MM-DDTHH:MM:SS".format(
1723                        date=date))
1724
1725        def parse_datetime_for_nova(date):
1726            # Must strip tzinfo from the date- it breaks Nova. Also,
1727            # Nova is expecting this in UTC. If someone passes in an
1728            # ISO8601 date string or a datetime with timzeone data attached,
1729            # strip the timezone data but apply offset math first so that
1730            # the user's well formed perfectly valid date will be used
1731            # correctly.
1732            offset = date.utcoffset()
1733            if offset:
1734                date = date - datetime.timedelta(hours=offset)
1735            return date.replace(tzinfo=None)
1736
1737        if not start:
1738            start = parse_date('2010-07-06')
1739        elif not isinstance(start, datetime.datetime):
1740            start = parse_date(start)
1741        if not end:
1742            end = datetime.datetime.utcnow()
1743        elif not isinstance(start, datetime.datetime):
1744            end = parse_date(end)
1745
1746        start = parse_datetime_for_nova(start)
1747        end = parse_datetime_for_nova(end)
1748
1749        proj = self.get_project(name_or_id)
1750        if not proj:
1751            raise exc.OpenStackCloudException(
1752                "project does not exist: {name}".format(name=proj.id))
1753
1754        data = proxy._json_response(
1755            self.compute.get(
1756                '/os-simple-tenant-usage/{project}'.format(project=proj.id),
1757                params=dict(start=start.isoformat(), end=end.isoformat())),
1758            error_message="Unable to get usage for project: {name}".format(
1759                name=proj.id))
1760        return self._normalize_compute_usage(
1761            self._get_and_munchify('tenant_usage', data))
1762
1763    def _encode_server_userdata(self, userdata):
1764        if hasattr(userdata, 'read'):
1765            userdata = userdata.read()
1766
1767        if not isinstance(userdata, bytes):
1768            # If the userdata passed in is bytes, just send it unmodified
1769            if not isinstance(userdata, str):
1770                raise TypeError("%s can't be encoded" % type(userdata))
1771            # If it's not bytes, make it bytes
1772            userdata = userdata.encode('utf-8', 'strict')
1773
1774        # Once we have base64 bytes, make them into a utf-8 string for REST
1775        return base64.b64encode(userdata).decode('utf-8')
1776
1777    def get_openstack_vars(self, server):
1778        return meta.get_hostvars_from_server(self, server)
1779
1780    def _expand_server_vars(self, server):
1781        # Used by nodepool
1782        # TODO(mordred) remove after these make it into what we
1783        # actually want the API to be.
1784        return meta.expand_server_vars(self, server)
1785