1# Copyright 2012 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# All Rights Reserved.
4#
5# Copyright 2012 OpenStack Foundation
6# Copyright 2012 Nebula, Inc.
7# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
8#
9#    Licensed under the Apache License, Version 2.0 (the "License"); you may
10#    not use this file except in compliance with the License. You may obtain
11#    a copy of the License at
12#
13#         http://www.apache.org/licenses/LICENSE-2.0
14#
15#    Unless required by applicable law or agreed to in writing, software
16#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18#    License for the specific language governing permissions and limitations
19#    under the License.
20
21import collections
22import logging
23from operator import attrgetter
24
25from django.utils.translation import ugettext_lazy as _
26
27from novaclient import api_versions
28from novaclient import exceptions as nova_exceptions
29from novaclient.v2 import instance_action as nova_instance_action
30from novaclient.v2 import servers as nova_servers
31
32from horizon import exceptions as horizon_exceptions
33from horizon.utils import memoized
34
35from openstack_dashboard.api import _nova
36from openstack_dashboard.api import base
37from openstack_dashboard.api import cinder
38from openstack_dashboard.contrib.developer.profiler import api as profiler
39from openstack_dashboard.utils import settings as utils
40
41LOG = logging.getLogger(__name__)
42
43# API static values
44INSTANCE_ACTIVE_STATE = 'ACTIVE'
45VOLUME_STATE_AVAILABLE = "available"
46DEFAULT_QUOTA_NAME = 'default'
47
48get_microversion = _nova.get_microversion
49server_get = _nova.server_get
50Server = _nova.Server
51
52
53def is_feature_available(request, features):
54    return bool(get_microversion(request, features))
55
56
57class VolumeMultiattachNotSupported(horizon_exceptions.HorizonException):
58    status_code = 400
59
60
61class VNCConsole(base.APIDictWrapper):
62    """Wrapper for the "console" dictionary.
63
64    Returned by the novaclient.servers.get_vnc_console method.
65    """
66    _attrs = ['url', 'type']
67
68
69class SPICEConsole(base.APIDictWrapper):
70    """Wrapper for the "console" dictionary.
71
72    Returned by the novaclient.servers.get_spice_console method.
73    """
74    _attrs = ['url', 'type']
75
76
77class RDPConsole(base.APIDictWrapper):
78    """Wrapper for the "console" dictionary.
79
80    Returned by the novaclient.servers.get_rdp_console method.
81    """
82    _attrs = ['url', 'type']
83
84
85class SerialConsole(base.APIDictWrapper):
86    """Wrapper for the "console" dictionary.
87
88    Returned by the novaclient.servers.get_serial_console method.
89    """
90    _attrs = ['url', 'type']
91
92
93class MKSConsole(base.APIDictWrapper):
94    """Wrapper for the "console" dictionary.
95
96    Returned by the novaclient.servers.get_mks_console method.
97    """
98    _attrs = ['url', 'type']
99
100
101class Hypervisor(base.APIDictWrapper):
102    """Simple wrapper around novaclient.hypervisors.Hypervisor."""
103
104    _attrs = ['manager', '_loaded', '_info', 'hypervisor_hostname', 'id',
105              'servers']
106
107    @property
108    def servers(self):
109        # if hypervisor doesn't have servers, the attribute is not present
110        servers = []
111        try:
112            servers = self._apidict.servers
113        except Exception:
114            pass
115
116        return servers
117
118
119class NovaUsage(base.APIResourceWrapper):
120    """Simple wrapper around contrib/simple_usage.py."""
121
122    _attrs = ['start', 'server_usages', 'stop', 'tenant_id',
123              'total_local_gb_usage', 'total_memory_mb_usage',
124              'total_vcpus_usage', 'total_hours']
125
126    def get_summary(self):
127        return {'instances': self.total_active_instances,
128                'memory_mb': self.memory_mb,
129                'vcpus': self.vcpus,
130                'vcpu_hours': self.vcpu_hours,
131                'local_gb': self.local_gb,
132                'disk_gb_hours': self.disk_gb_hours,
133                'memory_mb_hours': self.memory_mb_hours}
134
135    @property
136    def total_active_instances(self):
137        return sum(1 for s in self.server_usages if s['ended_at'] is None)
138
139    @property
140    def vcpus(self):
141        return sum(s['vcpus'] for s in self.server_usages
142                   if s['ended_at'] is None)
143
144    @property
145    def vcpu_hours(self):
146        return getattr(self, "total_vcpus_usage", 0)
147
148    @property
149    def local_gb(self):
150        return sum(s['local_gb'] for s in self.server_usages
151                   if s['ended_at'] is None)
152
153    @property
154    def memory_mb(self):
155        return sum(s['memory_mb'] for s in self.server_usages
156                   if s['ended_at'] is None)
157
158    @property
159    def disk_gb_hours(self):
160        return getattr(self, "total_local_gb_usage", 0)
161
162    @property
163    def memory_mb_hours(self):
164        return getattr(self, "total_memory_mb_usage", 0)
165
166
167class FlavorExtraSpec(object):
168    def __init__(self, flavor_id, key, val):
169        self.flavor_id = flavor_id
170        self.id = key
171        self.key = key
172        self.value = val
173
174
175class QuotaSet(base.QuotaSet):
176
177    # We don't support nova-network, so we exclude nova-network relatd
178    # quota fields from the response.
179    ignore_quotas = {
180        "floating_ips",
181        "fixed_ips",
182        "security_groups",
183        "security_group_rules",
184    }
185
186
187def upgrade_api(request, client, version):
188    """Ugrade the nova API to the specified version if possible."""
189
190    min_ver, max_ver = api_versions._get_server_version_range(client)
191    if min_ver <= api_versions.APIVersion(version) <= max_ver:
192        client = _nova.novaclient(request, version)
193    return client
194
195
196@profiler.trace
197def server_vnc_console(request, instance_id, console_type='novnc'):
198    nc = _nova.novaclient(request)
199    console = nc.servers.get_vnc_console(instance_id, console_type)
200    return VNCConsole(console['console'])
201
202
203@profiler.trace
204def server_spice_console(request, instance_id, console_type='spice-html5'):
205    nc = _nova.novaclient(request)
206    console = nc.servers.get_spice_console(instance_id, console_type)
207    return SPICEConsole(console['console'])
208
209
210@profiler.trace
211def server_rdp_console(request, instance_id, console_type='rdp-html5'):
212    nc = _nova.novaclient(request)
213    console = nc.servers.get_rdp_console(instance_id, console_type)
214    return RDPConsole(console['console'])
215
216
217@profiler.trace
218def server_serial_console(request, instance_id, console_type='serial'):
219    nc = _nova.novaclient(request)
220    console = nc.servers.get_serial_console(instance_id, console_type)
221    return SerialConsole(console['console'])
222
223
224@profiler.trace
225def server_mks_console(request, instance_id, console_type='mks'):
226    microver = get_microversion(request, "remote_console_mks")
227    nc = _nova.novaclient(request, microver)
228    console = nc.servers.get_mks_console(instance_id, console_type)
229    return MKSConsole(console['remote_console'])
230
231
232@profiler.trace
233def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
234                  ephemeral=0, swap=0, metadata=None, is_public=True,
235                  rxtx_factor=1):
236    flavor = _nova.novaclient(request).flavors.create(name, memory, vcpu, disk,
237                                                      flavorid=flavorid,
238                                                      ephemeral=ephemeral,
239                                                      swap=swap,
240                                                      is_public=is_public,
241                                                      rxtx_factor=rxtx_factor)
242    if (metadata):
243        flavor_extra_set(request, flavor.id, metadata)
244    return flavor
245
246
247@profiler.trace
248def flavor_delete(request, flavor_id):
249    _nova.novaclient(request).flavors.delete(flavor_id)
250
251
252@profiler.trace
253def flavor_get(request, flavor_id, get_extras=False):
254    flavor = _nova.novaclient(request).flavors.get(flavor_id)
255    if get_extras:
256        flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
257    return flavor
258
259
260@profiler.trace
261@memoized.memoized
262def flavor_list(request, is_public=True, get_extras=False):
263    """Get the list of available instance sizes (flavors)."""
264    flavors = _nova.novaclient(request).flavors.list(is_public=is_public)
265    if get_extras:
266        for flavor in flavors:
267            flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
268    return flavors
269
270
271@profiler.trace
272def update_pagination(entities, page_size, marker, reversed_order=False):
273    has_more_data = has_prev_data = False
274    if len(entities) > page_size:
275        has_more_data = True
276        entities.pop()
277        if marker is not None:
278            has_prev_data = True
279    # first page condition when reached via prev back
280    elif reversed_order and marker is not None:
281        has_more_data = True
282    # last page condition
283    elif marker is not None:
284        has_prev_data = True
285
286    # restore the original ordering here
287    if reversed_order:
288        entities.reverse()
289
290    return entities, has_more_data, has_prev_data
291
292
293@profiler.trace
294@memoized.memoized
295def flavor_list_paged(request, is_public=True, get_extras=False, marker=None,
296                      paginate=False, sort_key="name", sort_dir="desc",
297                      reversed_order=False):
298    """Get the list of available instance sizes (flavors)."""
299    has_more_data = False
300    has_prev_data = False
301
302    if paginate:
303        if reversed_order:
304            sort_dir = 'desc' if sort_dir == 'asc' else 'asc'
305        page_size = utils.get_page_size(request)
306        flavors = _nova.novaclient(request).flavors.list(is_public=is_public,
307                                                         marker=marker,
308                                                         limit=page_size + 1,
309                                                         sort_key=sort_key,
310                                                         sort_dir=sort_dir)
311        flavors, has_more_data, has_prev_data = update_pagination(
312            flavors, page_size, marker, reversed_order)
313    else:
314        flavors = _nova.novaclient(request).flavors.list(is_public=is_public)
315
316    if get_extras:
317        for flavor in flavors:
318            flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
319
320    return (flavors, has_more_data, has_prev_data)
321
322
323@profiler.trace
324@memoized.memoized
325def flavor_access_list(request, flavor=None):
326    """Get the list of access instance sizes (flavors)."""
327    return _nova.novaclient(request).flavor_access.list(flavor=flavor)
328
329
330@profiler.trace
331def add_tenant_to_flavor(request, flavor, tenant):
332    """Add a tenant to the given flavor access list."""
333    return _nova.novaclient(request).flavor_access.add_tenant_access(
334        flavor=flavor, tenant=tenant)
335
336
337@profiler.trace
338def remove_tenant_from_flavor(request, flavor, tenant):
339    """Remove a tenant from the given flavor access list."""
340    return _nova.novaclient(request).flavor_access.remove_tenant_access(
341        flavor=flavor, tenant=tenant)
342
343
344@profiler.trace
345def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
346    """Get flavor extra specs."""
347    if flavor is None:
348        flavor = _nova.novaclient(request).flavors.get(flavor_id)
349    extras = flavor.get_keys()
350    if raw:
351        return extras
352    return [FlavorExtraSpec(flavor_id, key, value) for
353            key, value in extras.items()]
354
355
356@profiler.trace
357def flavor_extra_delete(request, flavor_id, keys):
358    """Unset the flavor extra spec keys."""
359    flavor = _nova.novaclient(request).flavors.get(flavor_id)
360    return flavor.unset_keys(keys)
361
362
363@profiler.trace
364def flavor_extra_set(request, flavor_id, metadata):
365    """Set the flavor extra spec keys."""
366    flavor = _nova.novaclient(request).flavors.get(flavor_id)
367    if (not metadata):  # not a way to delete keys
368        return None
369    return flavor.set_keys(metadata)
370
371
372@profiler.trace
373def snapshot_create(request, instance_id, name):
374    return _nova.novaclient(request).servers.create_image(instance_id, name)
375
376
377@profiler.trace
378def keypair_create(request, name, key_type='ssh'):
379    microversion = get_microversion(request, 'key_types')
380    return _nova.novaclient(request, microversion).\
381        keypairs.create(name, key_type=key_type)
382
383
384@profiler.trace
385def keypair_import(request, name, public_key, key_type='ssh'):
386    microversion = get_microversion(request, 'key_types')
387    return _nova.novaclient(request, microversion).\
388        keypairs.create(name, public_key, key_type)
389
390
391@profiler.trace
392def keypair_delete(request, name):
393    _nova.novaclient(request).keypairs.delete(name)
394
395
396@profiler.trace
397def keypair_list(request):
398    microversion = get_microversion(request, 'key_type_list')
399    return _nova.novaclient(request, microversion).keypairs.list()
400
401
402@profiler.trace
403def keypair_get(request, name):
404    return _nova.novaclient(request).keypairs.get(name)
405
406
407@profiler.trace
408def server_create(request, name, image, flavor, key_name, user_data,
409                  security_groups, block_device_mapping=None,
410                  block_device_mapping_v2=None, nics=None,
411                  availability_zone=None, instance_count=1, admin_pass=None,
412                  disk_config=None, config_drive=None, meta=None,
413                  scheduler_hints=None, description=None):
414    microversion = get_microversion(request, ("instance_description",
415                                              "auto_allocated_network"))
416    nova_client = _nova.novaclient(request, version=microversion)
417
418    # NOTE(amotoki): Handling auto allocated network
419    # Nova API 2.37 or later, it accepts a special string 'auto' for nics
420    # which means nova uses a network that is available for a current project
421    # if one exists and otherwise it creates a network automatically.
422    # This special handling is processed here as JS side assumes 'nics'
423    # is a list and it is easiest to handle it here.
424    if nics:
425        is_auto_allocate = any(nic.get('net-id') == '__auto_allocate__'
426                               for nic in nics)
427        if is_auto_allocate:
428            nics = 'auto'
429
430    kwargs = {}
431    if description is not None:
432        kwargs['description'] = description
433
434    return Server(nova_client.servers.create(
435        name.strip(), image, flavor, userdata=user_data,
436        security_groups=security_groups,
437        key_name=key_name, block_device_mapping=block_device_mapping,
438        block_device_mapping_v2=block_device_mapping_v2,
439        nics=nics, availability_zone=availability_zone,
440        min_count=instance_count, admin_pass=admin_pass,
441        disk_config=disk_config, config_drive=config_drive,
442        meta=meta, scheduler_hints=scheduler_hints, **kwargs), request)
443
444
445@profiler.trace
446def server_delete(request, instance_id):
447    _nova.novaclient(request).servers.delete(instance_id)
448    # Session is available and consistent for the current view
449    # among Horizon django servers even in load-balancing setup,
450    # so only the view listing the servers will recognize it as
451    # own DeleteInstance action performed. Note that dict is passed
452    # by reference in python. Quote from django's developer manual:
453    # " You can read it and write to request.session at any point
454    #   in your view. You can edit it multiple times."
455    request.session['server_deleted'] = instance_id
456
457
458def get_novaclient_with_locked_status(request):
459    microversion = get_microversion(request, "locked_attribute")
460    return _nova.novaclient(request, version=microversion)
461
462
463@profiler.trace
464def server_list_paged(request,
465                      search_opts=None,
466                      detailed=True,
467                      sort_dir="desc"):
468    has_more_data = False
469    has_prev_data = False
470    nova_client = get_novaclient_with_locked_status(request)
471    page_size = utils.get_page_size(request)
472    search_opts = {} if search_opts is None else search_opts
473    marker = search_opts.get('marker', None)
474
475    if not search_opts.get('all_tenants', False):
476        search_opts['project_id'] = request.user.tenant_id
477
478    if search_opts.pop('paginate', False):
479        reversed_order = sort_dir == "asc"
480        LOG.debug("Notify received on deleted server: %r",
481                  ('server_deleted' in request.session))
482        deleted = request.session.pop('server_deleted',
483                                      None)
484        view_marker = 'possibly_deleted' if deleted and marker else 'ok'
485        search_opts['marker'] = deleted if deleted else marker
486        search_opts['limit'] = page_size + 1
487        # NOTE(amotoki): It looks like the 'sort_keys' must be unique to make
488        # the pagination in the nova API works as expected. Multiple servers
489        # can have a same 'created_at' as its resolution is a second.
490        # To ensure the uniqueness we add 'uuid' to the sort keys.
491        # 'display_name' is added before 'uuid' to list servers in the
492        # alphabetical order.
493        sort_keys = ['created_at', 'display_name', 'uuid']
494
495        servers = [Server(s, request)
496                   for s in nova_client.servers.list(detailed, search_opts,
497                                                     sort_keys=sort_keys,
498                                                     sort_dirs=[sort_dir] * 3)]
499
500        if view_marker == 'possibly_deleted':
501            if not servers:
502                view_marker = 'head_deleted'
503                reversed_order = False
504                servers = [Server(s, request)
505                           for s in
506                           nova_client.servers.list(detailed,
507                                                    search_opts,
508                                                    sort_keys=sort_keys,
509                                                    sort_dirs=['desc'] * 3)]
510            if not servers:
511                view_marker = 'tail_deleted'
512                reversed_order = True
513                servers = [Server(s, request)
514                           for s in
515                           nova_client.servers.list(detailed,
516                                                    search_opts,
517                                                    sort_keys=sort_keys,
518                                                    sort_dirs=['asc'] * 3)]
519        (servers, has_more_data, has_prev_data) = update_pagination(
520            servers, page_size, marker, reversed_order)
521        has_prev_data = (False
522                         if view_marker == 'head_deleted'
523                         else has_prev_data)
524        has_more_data = (False
525                         if view_marker == 'tail_deleted'
526                         else has_more_data)
527    else:
528        servers = [Server(s, request)
529                   for s in nova_client.servers.list(detailed, search_opts)]
530    return (servers, has_more_data, has_prev_data)
531
532
533@profiler.trace
534def server_list(request, search_opts=None, detailed=True):
535    (servers, has_more_data, _) = server_list_paged(request,
536                                                    search_opts,
537                                                    detailed)
538    return (servers, has_more_data)
539
540
541@profiler.trace
542def server_console_output(request, instance_id, tail_length=None):
543    """Gets console output of an instance."""
544    nc = _nova.novaclient(request)
545    return nc.servers.get_console_output(instance_id, length=tail_length)
546
547
548@profiler.trace
549def server_pause(request, instance_id):
550    _nova.novaclient(request).servers.pause(instance_id)
551
552
553@profiler.trace
554def server_unpause(request, instance_id):
555    _nova.novaclient(request).servers.unpause(instance_id)
556
557
558@profiler.trace
559def server_suspend(request, instance_id):
560    _nova.novaclient(request).servers.suspend(instance_id)
561
562
563@profiler.trace
564def server_resume(request, instance_id):
565    _nova.novaclient(request).servers.resume(instance_id)
566
567
568@profiler.trace
569def server_shelve(request, instance_id):
570    _nova.novaclient(request).servers.shelve(instance_id)
571
572
573@profiler.trace
574def server_unshelve(request, instance_id):
575    _nova.novaclient(request).servers.unshelve(instance_id)
576
577
578@profiler.trace
579def server_reboot(request, instance_id, soft_reboot=False):
580    hardness = nova_servers.REBOOT_HARD
581    if soft_reboot:
582        hardness = nova_servers.REBOOT_SOFT
583    _nova.novaclient(request).servers.reboot(instance_id, hardness)
584
585
586@profiler.trace
587def server_rebuild(request, instance_id, image_id, password=None,
588                   disk_config=None, description=None):
589    kwargs = {}
590    if description:
591        kwargs['description'] = description
592    nc = _nova.get_novaclient_with_instance_desc(request)
593    return nc.servers.rebuild(instance_id, image_id, password, disk_config,
594                              **kwargs)
595
596
597@profiler.trace
598def server_update(request, instance_id, name, description=None):
599    nc = _nova.get_novaclient_with_instance_desc(request)
600    return nc.servers.update(instance_id, name=name.strip(),
601                             description=description)
602
603
604@profiler.trace
605def server_migrate(request, instance_id):
606    _nova.novaclient(request).servers.migrate(instance_id)
607
608
609@profiler.trace
610def server_live_migrate(request, instance_id, host, block_migration=False,
611                        disk_over_commit=False):
612    _nova.novaclient(request).servers.live_migrate(instance_id, host,
613                                                   block_migration,
614                                                   disk_over_commit)
615
616
617@profiler.trace
618def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
619    _nova.novaclient(request).servers.resize(instance_id, flavor,
620                                             disk_config, **kwargs)
621
622
623@profiler.trace
624def server_confirm_resize(request, instance_id):
625    _nova.novaclient(request).servers.confirm_resize(instance_id)
626
627
628@profiler.trace
629def server_revert_resize(request, instance_id):
630    _nova.novaclient(request).servers.revert_resize(instance_id)
631
632
633@profiler.trace
634def server_start(request, instance_id):
635    _nova.novaclient(request).servers.start(instance_id)
636
637
638@profiler.trace
639def server_stop(request, instance_id):
640    _nova.novaclient(request).servers.stop(instance_id)
641
642
643@profiler.trace
644def server_lock(request, instance_id):
645    microversion = get_microversion(request, "locked_attribute")
646    _nova.novaclient(request, version=microversion).servers.lock(instance_id)
647
648
649@profiler.trace
650def server_unlock(request, instance_id):
651    microversion = get_microversion(request, "locked_attribute")
652    _nova.novaclient(request, version=microversion).servers.unlock(instance_id)
653
654
655@profiler.trace
656def server_metadata_update(request, instance_id, metadata):
657    _nova.novaclient(request).servers.set_meta(instance_id, metadata)
658
659
660@profiler.trace
661def server_metadata_delete(request, instance_id, keys):
662    _nova.novaclient(request).servers.delete_meta(instance_id, keys)
663
664
665@profiler.trace
666def server_rescue(request, instance_id, password=None, image=None):
667    _nova.novaclient(request).servers.rescue(instance_id,
668                                             password=password,
669                                             image=image)
670
671
672@profiler.trace
673def server_unrescue(request, instance_id):
674    _nova.novaclient(request).servers.unrescue(instance_id)
675
676
677@profiler.trace
678def tenant_quota_get(request, tenant_id):
679    return QuotaSet(_nova.novaclient(request).quotas.get(tenant_id))
680
681
682@profiler.trace
683def tenant_quota_update(request, tenant_id, **kwargs):
684    if kwargs:
685        _nova.novaclient(request).quotas.update(tenant_id, **kwargs)
686
687
688@profiler.trace
689def default_quota_get(request, tenant_id):
690    return QuotaSet(_nova.novaclient(request).quotas.defaults(tenant_id))
691
692
693@profiler.trace
694def default_quota_update(request, **kwargs):
695    _nova.novaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME,
696                                                   **kwargs)
697
698
699def _get_usage_marker(usage):
700    marker = None
701    if hasattr(usage, 'server_usages') and usage.server_usages:
702        marker = usage.server_usages[-1].get('instance_id')
703    return marker
704
705
706def _get_usage_list_marker(usage_list):
707    marker = None
708    if usage_list:
709        marker = _get_usage_marker(usage_list[-1])
710    return marker
711
712
713def _merge_usage(usage, next_usage):
714    usage.server_usages.extend(next_usage.server_usages)
715    usage.total_hours += next_usage.total_hours
716    usage.total_memory_mb_usage += next_usage.total_memory_mb_usage
717    usage.total_vcpus_usage += next_usage.total_vcpus_usage
718    usage.total_local_gb_usage += next_usage.total_local_gb_usage
719
720
721def _merge_usage_list(usages, next_usage_list):
722    for next_usage in next_usage_list:
723        if next_usage.tenant_id in usages:
724            _merge_usage(usages[next_usage.tenant_id], next_usage)
725        else:
726            usages[next_usage.tenant_id] = next_usage
727
728
729@profiler.trace
730def usage_get(request, tenant_id, start, end):
731    client = upgrade_api(request, _nova.novaclient(request), '2.40')
732    usage = client.usage.get(tenant_id, start, end)
733    if client.api_version >= api_versions.APIVersion('2.40'):
734        # If the number of instances used to calculate the usage is greater
735        # than max_limit, the usage will be split across multiple requests
736        # and the responses will need to be merged back together.
737        marker = _get_usage_marker(usage)
738        while marker:
739            next_usage = client.usage.get(tenant_id, start, end, marker=marker)
740            marker = _get_usage_marker(next_usage)
741            if marker:
742                _merge_usage(usage, next_usage)
743    return NovaUsage(usage)
744
745
746@profiler.trace
747def usage_list(request, start, end):
748    client = upgrade_api(request, _nova.novaclient(request), '2.40')
749    usage_list = client.usage.list(start, end, True)
750    if client.api_version >= api_versions.APIVersion('2.40'):
751        # If the number of instances used to calculate the usage is greater
752        # than max_limit, the usage will be split across multiple requests
753        # and the responses will need to be merged back together.
754        usages = collections.OrderedDict()
755        _merge_usage_list(usages, usage_list)
756        marker = _get_usage_list_marker(usage_list)
757        while marker:
758            next_usage_list = client.usage.list(start, end, True,
759                                                marker=marker)
760            marker = _get_usage_list_marker(next_usage_list)
761            if marker:
762                _merge_usage_list(usages, next_usage_list)
763        usage_list = usages.values()
764    return [NovaUsage(u) for u in usage_list]
765
766
767@profiler.trace
768def get_password(request, instance_id, private_key=None):
769    return _nova.novaclient(request).servers.get_password(instance_id,
770                                                          private_key)
771
772
773@profiler.trace
774def instance_volume_attach(request, volume_id, instance_id, device):
775    # If we have a multiattach volume, we need to use microversion>=2.60.
776    volume = cinder.volume_get(request, volume_id)
777    if volume.multiattach:
778        version = get_microversion(request, 'multiattach')
779        if version:
780            client = _nova.novaclient(request, version)
781        else:
782            raise VolumeMultiattachNotSupported(
783                _('Multiattach volumes are not yet supported.'))
784    else:
785        client = _nova.novaclient(request)
786    return client.volumes.create_server_volume(
787        instance_id, volume_id, device)
788
789
790@profiler.trace
791def instance_volume_detach(request, instance_id, att_id):
792    return _nova.novaclient(request).volumes.delete_server_volume(instance_id,
793                                                                  att_id)
794
795
796@profiler.trace
797def instance_volumes_list(request, instance_id):
798    volumes = _nova.novaclient(request).volumes.get_server_volumes(instance_id)
799
800    for volume in volumes:
801        volume_data = cinder.cinderclient(request).volumes.get(volume.id)
802        volume.name = cinder.Volume(volume_data).name
803
804    return volumes
805
806
807@profiler.trace
808def hypervisor_list(request):
809    return _nova.novaclient(request).hypervisors.list()
810
811
812@profiler.trace
813def hypervisor_stats(request):
814    return _nova.novaclient(request).hypervisors.statistics()
815
816
817@profiler.trace
818def hypervisor_search(request, query, servers=True):
819    return _nova.novaclient(request).hypervisors.search(query, servers)
820
821
822@profiler.trace
823def evacuate_host(request, host, target=None, on_shared_storage=False):
824    # TODO(jmolle) This should be change for nova atomic api host_evacuate
825    hypervisors = _nova.novaclient(request).hypervisors.search(host, True)
826    response = []
827    err_code = None
828    for hypervisor in hypervisors:
829        hyper = Hypervisor(hypervisor)
830        # if hypervisor doesn't have servers, the attribute is not present
831        for server in hyper.servers:
832            try:
833                _nova.novaclient(request).servers.evacuate(server['uuid'],
834                                                           target,
835                                                           on_shared_storage)
836            except nova_exceptions.ClientException as err:
837                err_code = err.code
838                msg = _("Name: %(name)s ID: %(uuid)s")
839                msg = msg % {'name': server['name'], 'uuid': server['uuid']}
840                response.append(msg)
841
842    if err_code:
843        msg = _('Failed to evacuate instances: %s') % ', '.join(response)
844        raise nova_exceptions.ClientException(err_code, msg)
845
846    return True
847
848
849@profiler.trace
850def migrate_host(request, host, live_migrate=False, disk_over_commit=False,
851                 block_migration=False):
852    nc = _nova.novaclient(request)
853    hypervisors = nc.hypervisors.search(host, True)
854    response = []
855    err_code = None
856    for hyper in hypervisors:
857        for server in getattr(hyper, "servers", []):
858            try:
859                if live_migrate:
860                    instance = server_get(request, server['uuid'])
861
862                    # Checking that instance can be live-migrated
863                    if instance.status in ["ACTIVE", "PAUSED"]:
864                        nc.servers.live_migrate(
865                            server['uuid'],
866                            None,
867                            block_migration,
868                            disk_over_commit
869                        )
870                    else:
871                        nc.servers.migrate(server['uuid'])
872                else:
873                    nc.servers.migrate(server['uuid'])
874            except nova_exceptions.ClientException as err:
875                err_code = err.code
876                msg = _("Name: %(name)s ID: %(uuid)s")
877                msg = msg % {'name': server['name'], 'uuid': server['uuid']}
878                response.append(msg)
879
880    if err_code:
881        msg = _('Failed to migrate instances: %s') % ', '.join(response)
882        raise nova_exceptions.ClientException(err_code, msg)
883
884    return True
885
886
887@profiler.trace
888def tenant_absolute_limits(request, reserved=False, tenant_id=None):
889    # Nova does not allow to specify tenant_id for non-admin users
890    # even if tenant_id matches a tenant_id of the user.
891    if tenant_id == request.user.tenant_id:
892        tenant_id = None
893    limits = _nova.novaclient(request).limits.get(reserved=reserved,
894                                                  tenant_id=tenant_id).absolute
895    limits_dict = {}
896    for limit in limits:
897        if limit.value < 0:
898            # Workaround for nova bug 1370867 that absolute_limits
899            # returns negative value for total.*Used instead of 0.
900            # For such case, replace negative values with 0.
901            if limit.name.startswith('total') and limit.name.endswith('Used'):
902                limits_dict[limit.name] = 0
903            else:
904                # -1 is used to represent unlimited quotas
905                limits_dict[limit.name] = float("inf")
906        else:
907            limits_dict[limit.name] = limit.value
908    return limits_dict
909
910
911@profiler.trace
912def availability_zone_list(request, detailed=False):
913    nc = _nova.novaclient(request)
914    zones = nc.availability_zones.list(detailed=detailed)
915    zones.sort(key=attrgetter('zoneName'))
916    return zones
917
918
919@profiler.trace
920def server_group_list(request):
921    return _nova.novaclient(request).server_groups.list()
922
923
924@profiler.trace
925def server_group_create(request, **kwargs):
926    microversion = get_microversion(request, "servergroup_soft_policies")
927    nc = _nova.novaclient(request, version=microversion)
928    return nc.server_groups.create(**kwargs)
929
930
931@profiler.trace
932def server_group_delete(request, servergroup_id):
933    _nova.novaclient(request).server_groups.delete(servergroup_id)
934
935
936@profiler.trace
937def server_group_get(request, servergroup_id):
938    microversion = get_microversion(request, "servergroup_user_info")
939    return _nova.novaclient(request, version=microversion).server_groups.get(
940        servergroup_id)
941
942
943@profiler.trace
944def service_list(request, binary=None):
945    return _nova.novaclient(request).services.list(binary=binary)
946
947
948@profiler.trace
949def service_enable(request, host, binary):
950    return _nova.novaclient(request).services.enable(host, binary)
951
952
953@profiler.trace
954def service_disable(request, host, binary, reason=None):
955    if reason:
956        return _nova.novaclient(request).services.disable_log_reason(
957            host, binary, reason)
958    return _nova.novaclient(request).services.disable(host, binary)
959
960
961@profiler.trace
962def aggregate_details_list(request):
963    result = []
964    c = _nova.novaclient(request)
965    for aggregate in c.aggregates.list():
966        result.append(c.aggregates.get_details(aggregate.id))
967    return result
968
969
970@profiler.trace
971def aggregate_create(request, name, availability_zone=None):
972    return _nova.novaclient(request).aggregates.create(name, availability_zone)
973
974
975@profiler.trace
976def aggregate_delete(request, aggregate_id):
977    return _nova.novaclient(request).aggregates.delete(aggregate_id)
978
979
980@profiler.trace
981def aggregate_get(request, aggregate_id):
982    return _nova.novaclient(request).aggregates.get(aggregate_id)
983
984
985@profiler.trace
986def aggregate_update(request, aggregate_id, values):
987    _nova.novaclient(request).aggregates.update(aggregate_id, values)
988
989
990@profiler.trace
991def aggregate_set_metadata(request, aggregate_id, metadata):
992    return _nova.novaclient(request).aggregates.set_metadata(aggregate_id,
993                                                             metadata)
994
995
996@profiler.trace
997def add_host_to_aggregate(request, aggregate_id, host):
998    _nova.novaclient(request).aggregates.add_host(aggregate_id, host)
999
1000
1001@profiler.trace
1002def remove_host_from_aggregate(request, aggregate_id, host):
1003    _nova.novaclient(request).aggregates.remove_host(aggregate_id, host)
1004
1005
1006@profiler.trace
1007def interface_attach(request,
1008                     server, port_id=None, net_id=None, fixed_ip=None):
1009    return _nova.novaclient(request).servers.interface_attach(
1010        server, port_id, net_id, fixed_ip)
1011
1012
1013@profiler.trace
1014def interface_detach(request, server, port_id):
1015    return _nova.novaclient(request).servers.interface_detach(server, port_id)
1016
1017
1018@profiler.trace
1019def can_set_server_password():
1020    return utils.get_dict_config('OPENSTACK_HYPERVISOR_FEATURES',
1021                                 'can_set_password')
1022
1023
1024@profiler.trace
1025def instance_action_list(request, instance_id):
1026    return nova_instance_action.InstanceActionManager(
1027        _nova.novaclient(request)).list(instance_id)
1028
1029
1030@profiler.trace
1031def can_set_mount_point():
1032    """Return the Hypervisor's capability of setting mount points."""
1033    return utils.get_dict_config('OPENSTACK_HYPERVISOR_FEATURES',
1034                                 'can_set_mount_point')
1035
1036
1037@profiler.trace
1038def requires_keypair():
1039    return utils.get_dict_config('OPENSTACK_HYPERVISOR_FEATURES',
1040                                 'requires_keypair')
1041
1042
1043def can_set_quotas():
1044    return utils.get_dict_config('OPENSTACK_HYPERVISOR_FEATURES',
1045                                 'enable_quotas')
1046