Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / backend.py @ 99af08a4

History | View | Annotate | Download (26 kB)

1
# Copyright 2011 GRNET S.A. All rights reserved.
2
#
3
# Redistribution and use in source and binary forms, with or
4
# without modification, are permitted provided that the following
5
# conditions are met:
6
#
7
#   1. Redistributions of source code must retain the above
8
#      copyright notice, this list of conditions and the following
9
#      disclaimer.
10
#
11
#   2. Redistributions in binary form must reproduce the above
12
#      copyright notice, this list of conditions and the following
13
#      disclaimer in the documentation and/or other materials
14
#      provided with the distribution.
15
#
16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
# POSSIBILITY OF SUCH DAMAGE.
28
#
29
# The views and conclusions contained in the software and
30
# documentation are those of the authors and should not be
31
# interpreted as representing official policies, either expressed
32
# or implied, of GRNET S.A.
33

    
34
import json
35

    
36
from django.conf import settings
37
from django.db import transaction
38
from datetime import datetime
39

    
40
from synnefo.db.models import (Backend, VirtualMachine, Network,
41
                               BackendNetwork, BACKEND_STATUSES,
42
                               pooled_rapi_client, BridgePoolTable,
43
                               MacPrefixPoolTable, VirtualMachineDiagnostic)
44
from synnefo.logic import utils
45
from synnefo import quotas
46
from synnefo.api.util import release_resource
47

    
48
from logging import getLogger
49
log = getLogger(__name__)
50

    
51

    
52
_firewall_tags = {
53
    'ENABLED': settings.GANETI_FIREWALL_ENABLED_TAG,
54
    'DISABLED': settings.GANETI_FIREWALL_DISABLED_TAG,
55
    'PROTECTED': settings.GANETI_FIREWALL_PROTECTED_TAG}
56

    
57
_reverse_tags = dict((v.split(':')[3], k) for k, v in _firewall_tags.items())
58

    
59

    
60
@transaction.commit_on_success
61
def process_op_status(vm, etime, jobid, opcode, status, logmsg):
62
    """Process a job progress notification from the backend
63

64
    Process an incoming message from the backend (currently Ganeti).
65
    Job notifications with a terminating status (sucess, error, or canceled),
66
    also update the operating state of the VM.
67

68
    """
69
    # See #1492, #1031, #1111 why this line has been removed
70
    #if (opcode not in [x[0] for x in VirtualMachine.BACKEND_OPCODES] or
71
    if status not in [x[0] for x in BACKEND_STATUSES]:
72
        raise VirtualMachine.InvalidBackendMsgError(opcode, status)
73

    
74
    vm.backendjobid = jobid
75
    vm.backendjobstatus = status
76
    vm.backendopcode = opcode
77
    vm.backendlogmsg = logmsg
78

    
79
    # Notifications of success change the operating state
80
    state_for_success = VirtualMachine.OPER_STATE_FROM_OPCODE.get(opcode, None)
81
    if status == 'success' and state_for_success is not None:
82
        vm.operstate = state_for_success
83

    
84
    # Special case: if OP_INSTANCE_CREATE fails --> ERROR
85
    if opcode == 'OP_INSTANCE_CREATE' and status in ('canceled', 'error'):
86
        vm.operstate = 'ERROR'
87
        vm.backendtime = etime
88
    elif opcode == 'OP_INSTANCE_REMOVE':
89
        # Set the deleted flag explicitly, cater for admin-initiated removals
90
        # Special case: OP_INSTANCE_REMOVE fails for machines in ERROR,
91
        # when no instance exists at the Ganeti backend.
92
        # See ticket #799 for all the details.
93
        #
94
        if status == 'success' or (status == 'error' and
95
                                   vm.operstate == 'ERROR'):
96
            release_instance_nics(vm)
97
            vm.nics.all().delete()
98
            vm.deleted = True
99
            vm.operstate = state_for_success
100
            vm.backendtime = etime
101
            # Issue and accept commission to Quotaholder
102
            quotas.issue_and_accept_commission(vm, delete=True)
103

    
104
    # Update backendtime only for jobs that have been successfully completed,
105
    # since only these jobs update the state of the VM. Else a "race condition"
106
    # may occur when a successful job (e.g. OP_INSTANCE_REMOVE) completes
107
    # before an error job and messages arrive in reversed order.
108
    if status == 'success':
109
        vm.backendtime = etime
110

    
111
    vm.save()
112

    
113

    
114
@transaction.commit_on_success
115
def process_net_status(vm, etime, nics):
116
    """Process a net status notification from the backend
117

118
    Process an incoming message from the Ganeti backend,
119
    detailing the NIC configuration of a VM instance.
120

121
    Update the state of the VM in the DB accordingly.
122
    """
123

    
124
    ganeti_nics = process_ganeti_nics(nics)
125
    if not nics_changed(vm.nics.order_by('index'), ganeti_nics):
126
        log.debug("NICs for VM %s have not changed", vm)
127

    
128
    release_instance_nics(vm)
129

    
130
    for nic in ganeti_nics:
131
        ipv4 = nic.get('ipv4', '')
132
        net = nic['network']
133
        if ipv4:
134
            net.reserve_address(ipv4)
135

    
136
        nic['dirty'] = False
137
        vm.nics.create(**nic)
138
        # Dummy save the network, because UI uses changed-since for VMs
139
        # and Networks in order to show the VM NICs
140
        net.save()
141

    
142
    vm.backendtime = etime
143
    vm.save()
144

    
145

    
146
def process_ganeti_nics(ganeti_nics):
147
    """Process NIC dict from ganeti hooks."""
148
    new_nics = []
149
    for i, new_nic in enumerate(ganeti_nics):
150
        network = new_nic.get('network', '')
151
        n = str(network)
152
        pk = utils.id_from_network_name(n)
153

    
154
        net = Network.objects.get(pk=pk)
155

    
156
        # Get the new nic info
157
        mac = new_nic.get('mac', '')
158
        ipv4 = new_nic.get('ip', '')
159
        ipv6 = new_nic.get('ipv6', '')
160

    
161
        firewall = new_nic.get('firewall', '')
162
        firewall_profile = _reverse_tags.get(firewall, '')
163
        if not firewall_profile and net.public:
164
            firewall_profile = settings.DEFAULT_FIREWALL_PROFILE
165

    
166
        nic = {
167
            'index': i,
168
            'network': net,
169
            'mac': mac,
170
            'ipv4': ipv4,
171
            'ipv6': ipv6,
172
            'firewall_profile': firewall_profile,
173
            'state': 'ACTIVE'}
174

    
175
        new_nics.append(nic)
176
    return new_nics
177

    
178

    
179
def nics_changed(old_nics, new_nics):
180
    """Return True if NICs have changed in any way."""
181
    if len(old_nics) != len(new_nics):
182
        return True
183
    for old_nic, new_nic in zip(old_nics, new_nics):
184
        if not (old_nic.ipv4 == new_nic['ipv4'] and
185
                old_nic.ipv6 == new_nic['ipv6'] and
186
                old_nic.mac == new_nic['mac'] and
187
                old_nic.firewall_profile == new_nic['firewall_profile'] and
188
                old_nic.index == new_nic['index'] and
189
                old_nic.network == new_nic['network']):
190
            return True
191
    return False
192

    
193

    
194
def release_instance_nics(vm):
195
    for nic in vm.nics.all():
196
        net = nic.network
197
        if nic.ipv4:
198
            net.release_address(nic.ipv4)
199
        nic.delete()
200
        net.save()
201

    
202

    
203
@transaction.commit_on_success
204
def process_network_status(back_network, etime, jobid, opcode, status, logmsg):
205
    if status not in [x[0] for x in BACKEND_STATUSES]:
206
        raise Network.InvalidBackendMsgError(opcode, status)
207

    
208
    back_network.backendjobid = jobid
209
    back_network.backendjobstatus = status
210
    back_network.backendopcode = opcode
211
    back_network.backendlogmsg = logmsg
212

    
213
    # Notifications of success change the operating state
214
    state_for_success = BackendNetwork.OPER_STATE_FROM_OPCODE.get(opcode, None)
215
    if status == 'success' and state_for_success is not None:
216
        back_network.operstate = state_for_success
217

    
218
    if status in ('canceled', 'error') and opcode == 'OP_NETWORK_ADD':
219
        back_network.operstate = 'ERROR'
220
        back_network.backendtime = etime
221

    
222
    if opcode == 'OP_NETWORK_REMOVE':
223
        if status == 'success' or (status == 'error' and
224
                                   back_network.operstate == 'ERROR'):
225
            back_network.operstate = state_for_success
226
            back_network.deleted = True
227
            back_network.backendtime = etime
228

    
229
    if status == 'success':
230
        back_network.backendtime = etime
231
    back_network.save()
232
    # Also you must update the state of the Network!!
233
    update_network_state(back_network.network)
234

    
235

    
236
def update_network_state(network):
237
    """Update the state of a Network based on BackendNetwork states.
238

239
    Update the state of a Network based on the operstate of the networks in the
240
    backends that network exists.
241

242
    The state of the network is:
243
    * ACTIVE: If it is 'ACTIVE' in at least one backend.
244
    * DELETED: If it is is 'DELETED' in all backends that have been created.
245

246
    This function also releases the resources (MAC prefix or Bridge) and the
247
    quotas for the network.
248

249
    """
250
    if network.deleted:
251
        # Network has already been deleted. Just assert that state is also
252
        # DELETED
253
        if not network.state == "DELETED":
254
            network.state = "DELETED"
255
            network.save()
256
        return
257

    
258
    backend_states = [s.operstate for s in network.backend_networks.all()]
259
    if not backend_states:
260
        if network.state != "ACTIVE":
261
            network.state = "ACTIVE"
262
            network.save()
263
            return
264

    
265
    # Network is deleted when all BackendNetworks go to "DELETED" operstate
266
    deleted = reduce(lambda x, y: x == y, backend_states, 'DELETED')
267

    
268
    # Release the resources on the deletion of the Network
269
    if deleted:
270
        log.info("Network %r deleted. Releasing link %r mac_prefix %r",
271
                 network.id, network.mac_prefix, network.link)
272
        network.deleted = True
273
        network.state = "DELETED"
274
        if network.mac_prefix:
275
            if network.FLAVORS[network.flavor]["mac_prefix"] == "pool":
276
                release_resource(res_type="mac_prefix",
277
                                 value=network.mac_prefix)
278
        if network.link:
279
            if network.FLAVORS[network.flavor]["link"] == "pool":
280
                release_resource(res_type="bridge", value=network.link)
281

    
282
        # Issue commission
283
        if network.userid:
284
            quotas.issue_and_accept_commission(network, delete=True)
285
        elif not network.public:
286
            log.warning("Network %s does not have an owner!", network.id)
287
    network.save()
288

    
289

    
290
@transaction.commit_on_success
291
def process_network_modify(back_network, etime, jobid, opcode, status,
292
                           add_reserved_ips, remove_reserved_ips):
293
    assert (opcode == "OP_NETWORK_SET_PARAMS")
294
    if status not in [x[0] for x in BACKEND_STATUSES]:
295
        raise Network.InvalidBackendMsgError(opcode, status)
296

    
297
    back_network.backendjobid = jobid
298
    back_network.backendjobstatus = status
299
    back_network.opcode = opcode
300

    
301
    if add_reserved_ips or remove_reserved_ips:
302
        net = back_network.network
303
        pool = net.get_pool()
304
        if add_reserved_ips:
305
            for ip in add_reserved_ips:
306
                pool.reserve(ip, external=True)
307
        if remove_reserved_ips:
308
            for ip in remove_reserved_ips:
309
                pool.put(ip, external=True)
310
        pool.save()
311

    
312
    if status == 'success':
313
        back_network.backendtime = etime
314
    back_network.save()
315

    
316

    
317
@transaction.commit_on_success
318
def process_create_progress(vm, etime, progress):
319

    
320
    percentage = int(progress)
321

    
322
    # The percentage may exceed 100%, due to the way
323
    # snf-image:copy-progress tracks bytes read by image handling processes
324
    percentage = 100 if percentage > 100 else percentage
325
    if percentage < 0:
326
        raise ValueError("Percentage cannot be negative")
327

    
328
    # FIXME: log a warning here, see #1033
329
#   if last_update > percentage:
330
#       raise ValueError("Build percentage should increase monotonically " \
331
#                        "(old = %d, new = %d)" % (last_update, percentage))
332

    
333
    # This assumes that no message of type 'ganeti-create-progress' is going to
334
    # arrive once OP_INSTANCE_CREATE has succeeded for a Ganeti instance and
335
    # the instance is STARTED.  What if the two messages are processed by two
336
    # separate dispatcher threads, and the 'ganeti-op-status' message for
337
    # successful creation gets processed before the 'ganeti-create-progress'
338
    # message? [vkoukis]
339
    #
340
    #if not vm.operstate == 'BUILD':
341
    #    raise VirtualMachine.IllegalState("VM is not in building state")
342

    
343
    vm.buildpercentage = percentage
344
    vm.backendtime = etime
345
    vm.save()
346

    
347

    
348
def create_instance_diagnostic(vm, message, source, level="DEBUG", etime=None,
349
                               details=None):
350
    """
351
    Create virtual machine instance diagnostic entry.
352

353
    :param vm: VirtualMachine instance to create diagnostic for.
354
    :param message: Diagnostic message.
355
    :param source: Diagnostic source identifier (e.g. image-helper).
356
    :param level: Diagnostic level (`DEBUG`, `INFO`, `WARNING`, `ERROR`).
357
    :param etime: The time the message occured (if available).
358
    :param details: Additional details or debug information.
359
    """
360
    VirtualMachineDiagnostic.objects.create_for_vm(vm, level, source=source,
361
                                                   source_date=etime,
362
                                                   message=message,
363
                                                   details=details)
364

    
365

    
366
def create_instance(vm, public_nic, flavor, image):
367
    """`image` is a dictionary which should contain the keys:
368
            'backend_id', 'format' and 'metadata'
369

370
        metadata value should be a dictionary.
371
    """
372

    
373
    # Handle arguments to CreateInstance() as a dictionary,
374
    # initialize it based on a deployment-specific value.
375
    # This enables the administrator to override deployment-specific
376
    # arguments, such as the disk template to use, name of os provider
377
    # and hypervisor-specific parameters at will (see Synnefo #785, #835).
378
    #
379
    kw = settings.GANETI_CREATEINSTANCE_KWARGS
380
    kw['mode'] = 'create'
381
    kw['name'] = vm.backend_vm_id
382
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
383

    
384
    kw['disk_template'] = flavor.disk_template
385
    kw['disks'] = [{"size": flavor.disk * 1024}]
386
    provider = flavor.disk_provider
387
    if provider:
388
        kw['disks'][0]['provider'] = provider
389

    
390
        if provider == 'vlmc':
391
            kw['disks'][0]['origin'] = flavor.disk_origin
392

    
393
    kw['nics'] = [public_nic]
394
    if settings.GANETI_USE_HOTPLUG:
395
        kw['hotplug'] = True
396
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
397
    # kw['os'] = settings.GANETI_OS_PROVIDER
398
    kw['ip_check'] = False
399
    kw['name_check'] = False
400

    
401
    # Do not specific a node explicitly, have
402
    # Ganeti use an iallocator instead
403
    #kw['pnode'] = rapi.GetNodes()[0]
404

    
405
    kw['dry_run'] = settings.TEST
406

    
407
    kw['beparams'] = {
408
        'auto_balance': True,
409
        'vcpus': flavor.cpu,
410
        'memory': flavor.ram}
411

    
412
    kw['osparams'] = {
413
        'config_url': vm.config_url,
414
        # Store image id and format to Ganeti
415
        'img_id': image['backend_id'],
416
        'img_format': image['format']}
417

    
418
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
419
    # kw['hvparams'] = dict(serial_console=False)
420

    
421
    log.debug("Creating instance %s", utils.hide_pass(kw))
422
    with pooled_rapi_client(vm) as client:
423
        return client.CreateInstance(**kw)
424

    
425

    
426
def delete_instance(vm):
427
    with pooled_rapi_client(vm) as client:
428
        return client.DeleteInstance(vm.backend_vm_id, dry_run=settings.TEST)
429

    
430

    
431
def reboot_instance(vm, reboot_type):
432
    assert reboot_type in ('soft', 'hard')
433
    with pooled_rapi_client(vm) as client:
434
        return client.RebootInstance(vm.backend_vm_id, reboot_type,
435
                                     dry_run=settings.TEST)
436

    
437

    
438
def startup_instance(vm):
439
    with pooled_rapi_client(vm) as client:
440
        return client.StartupInstance(vm.backend_vm_id, dry_run=settings.TEST)
441

    
442

    
443
def shutdown_instance(vm):
444
    with pooled_rapi_client(vm) as client:
445
        return client.ShutdownInstance(vm.backend_vm_id, dry_run=settings.TEST)
446

    
447

    
448
def get_instance_console(vm):
449
    # RAPI GetInstanceConsole() returns endpoints to the vnc_bind_address,
450
    # which is a cluster-wide setting, either 0.0.0.0 or 127.0.0.1, and pretty
451
    # useless (see #783).
452
    #
453
    # Until this is fixed on the Ganeti side, construct a console info reply
454
    # directly.
455
    #
456
    # WARNING: This assumes that VNC runs on port network_port on
457
    #          the instance's primary node, and is probably
458
    #          hypervisor-specific.
459
    #
460
    log.debug("Getting console for vm %s", vm)
461

    
462
    console = {}
463
    console['kind'] = 'vnc'
464

    
465
    with pooled_rapi_client(vm) as client:
466
        i = client.GetInstance(vm.backend_vm_id)
467

    
468
    if i['hvparams']['serial_console']:
469
        raise Exception("hv parameter serial_console cannot be true")
470
    console['host'] = i['pnode']
471
    console['port'] = i['network_port']
472

    
473
    return console
474

    
475

    
476
def get_instance_info(vm):
477
    with pooled_rapi_client(vm) as client:
478
        return client.GetInstanceInfo(vm.backend_vm_id)
479

    
480

    
481
def create_network(network, backend, connect=True):
482
    """Create a network in a Ganeti backend"""
483
    log.debug("Creating network %s in backend %s", network, backend)
484

    
485
    job_id = _create_network(network, backend)
486

    
487
    if connect:
488
        job_ids = connect_network(network, backend, depends=[job_id])
489
        return job_ids
490
    else:
491
        return [job_id]
492

    
493

    
494
def _create_network(network, backend):
495
    """Create a network."""
496

    
497
    network_type = network.public and 'public' or 'private'
498

    
499
    tags = network.backend_tag
500
    if network.dhcp:
501
        tags.append('nfdhcpd')
502

    
503
    if network.public:
504
        conflicts_check = True
505
    else:
506
        conflicts_check = False
507

    
508
    try:
509
        bn = BackendNetwork.objects.get(network=network, backend=backend)
510
        mac_prefix = bn.mac_prefix
511
    except BackendNetwork.DoesNotExist:
512
        raise Exception("BackendNetwork for network '%s' in backend '%s'"
513
                        " does not exist" % (network.id, backend.id))
514

    
515
    with pooled_rapi_client(backend) as client:
516
        return client.CreateNetwork(network_name=network.backend_id,
517
                                    network=network.subnet,
518
                                    network6=network.subnet6,
519
                                    gateway=network.gateway,
520
                                    gateway6=network.gateway6,
521
                                    network_type=network_type,
522
                                    mac_prefix=mac_prefix,
523
                                    conflicts_check=conflicts_check,
524
                                    tags=tags)
525

    
526

    
527
def connect_network(network, backend, depends=[], group=None):
528
    """Connect a network to nodegroups."""
529
    log.debug("Connecting network %s to backend %s", network, backend)
530

    
531
    if network.public:
532
        conflicts_check = True
533
    else:
534
        conflicts_check = False
535

    
536
    depends = [[job, ["success", "error", "canceled"]] for job in depends]
537
    with pooled_rapi_client(backend) as client:
538
        groups = [group] if group is not None else client.GetGroups()
539
        job_ids = []
540
        for group in groups:
541
            job_id = client.ConnectNetwork(network.backend_id, group,
542
                                           network.mode, network.link,
543
                                           conflicts_check,
544
                                           depends=depends)
545
            job_ids.append(job_id)
546
    return job_ids
547

    
548

    
549
def delete_network(network, backend, disconnect=True):
550
    log.debug("Deleting network %s from backend %s", network, backend)
551

    
552
    depends = []
553
    if disconnect:
554
        depends = disconnect_network(network, backend)
555
    _delete_network(network, backend, depends=depends)
556

    
557

    
558
def _delete_network(network, backend, depends=[]):
559
    depends = [[job, ["success", "error", "canceled"]] for job in depends]
560
    with pooled_rapi_client(backend) as client:
561
        return client.DeleteNetwork(network.backend_id, depends)
562

    
563

    
564
def disconnect_network(network, backend, group=None):
565
    log.debug("Disconnecting network %s to backend %s", network, backend)
566

    
567
    with pooled_rapi_client(backend) as client:
568
        groups = [group] if group is not None else client.GetGroups()
569
        job_ids = []
570
        for group in groups:
571
            job_id = client.DisconnectNetwork(network.backend_id, group)
572
            job_ids.append(job_id)
573
    return job_ids
574

    
575

    
576
def connect_to_network(vm, network, address=None):
577
    backend = vm.backend
578
    bnet, created = BackendNetwork.objects.get_or_create(backend=backend,
579
                                                         network=network)
580
    depend_jobs = []
581
    if bnet.operstate != "ACTIVE":
582
        depend_jobs = create_network(network, backend, connect=True)
583

    
584
    depends = [[job, ["success", "error", "canceled"]] for job in depend_jobs]
585

    
586
    nic = {'ip': address, 'network': network.backend_id}
587

    
588
    log.debug("Connecting vm %s to network %s(%s)", vm, network, address)
589

    
590
    with pooled_rapi_client(vm) as client:
591
        return client.ModifyInstance(vm.backend_vm_id, nics=[('add',  nic)],
592
                                     hotplug=settings.GANETI_USE_HOTPLUG,
593
                                     depends=depends,
594
                                     dry_run=settings.TEST)
595

    
596

    
597
def disconnect_from_network(vm, nic):
598
    op = [('remove', nic.index, {})]
599

    
600
    log.debug("Removing nic of VM %s, with index %s", vm, str(nic.index))
601

    
602
    with pooled_rapi_client(vm) as client:
603
        return client.ModifyInstance(vm.backend_vm_id, nics=op,
604
                                     hotplug=settings.GANETI_USE_HOTPLUG,
605
                                     dry_run=settings.TEST)
606

    
607

    
608
def set_firewall_profile(vm, profile):
609
    try:
610
        tag = _firewall_tags[profile]
611
    except KeyError:
612
        raise ValueError("Unsopported Firewall Profile: %s" % profile)
613

    
614
    log.debug("Setting tag of VM %s to %s", vm, profile)
615

    
616
    with pooled_rapi_client(vm) as client:
617
        # Delete all firewall tags
618
        for t in _firewall_tags.values():
619
            client.DeleteInstanceTags(vm.backend_vm_id, [t],
620
                                      dry_run=settings.TEST)
621

    
622
        client.AddInstanceTags(vm.backend_vm_id, [tag], dry_run=settings.TEST)
623

    
624
        # XXX NOP ModifyInstance call to force process_net_status to run
625
        # on the dispatcher
626
        os_name = settings.GANETI_CREATEINSTANCE_KWARGS['os']
627
        client.ModifyInstance(vm.backend_vm_id,
628
                              os_name=os_name)
629

    
630

    
631
def get_ganeti_instances(backend=None, bulk=False):
632
    instances = []
633
    for backend in get_backends(backend):
634
        with pooled_rapi_client(backend) as client:
635
            instances.append(client.GetInstances(bulk=bulk))
636

    
637
    return reduce(list.__add__, instances, [])
638

    
639

    
640
def get_ganeti_nodes(backend=None, bulk=False):
641
    nodes = []
642
    for backend in get_backends(backend):
643
        with pooled_rapi_client(backend) as client:
644
            nodes.append(client.GetNodes(bulk=bulk))
645

    
646
    return reduce(list.__add__, nodes, [])
647

    
648

    
649
def get_ganeti_jobs(backend=None, bulk=False):
650
    jobs = []
651
    for backend in get_backends(backend):
652
        with pooled_rapi_client(backend) as client:
653
            jobs.append(client.GetJobs(bulk=bulk))
654
    return reduce(list.__add__, jobs, [])
655

    
656
##
657
##
658
##
659

    
660

    
661
def get_backends(backend=None):
662
    if backend:
663
        if backend.offline:
664
            return []
665
        return [backend]
666
    return Backend.objects.filter(offline=False)
667

    
668

    
669
def get_physical_resources(backend):
670
    """ Get the physical resources of a backend.
671

672
    Get the resources of a backend as reported by the backend (not the db).
673

674
    """
675
    nodes = get_ganeti_nodes(backend, bulk=True)
676
    attr = ['mfree', 'mtotal', 'dfree', 'dtotal', 'pinst_cnt', 'ctotal']
677
    res = {}
678
    for a in attr:
679
        res[a] = 0
680
    for n in nodes:
681
        # Filter out drained, offline and not vm_capable nodes since they will
682
        # not take part in the vm allocation process
683
        can_host_vms = n['vm_capable'] and not (n['drained'] or n['offline'])
684
        if can_host_vms and n['cnodes']:
685
            for a in attr:
686
                res[a] += int(n[a])
687
    return res
688

    
689

    
690
def update_resources(backend, resources=None):
691
    """ Update the state of the backend resources in db.
692

693
    """
694

    
695
    if not resources:
696
        resources = get_physical_resources(backend)
697

    
698
    backend.mfree = resources['mfree']
699
    backend.mtotal = resources['mtotal']
700
    backend.dfree = resources['dfree']
701
    backend.dtotal = resources['dtotal']
702
    backend.pinst_cnt = resources['pinst_cnt']
703
    backend.ctotal = resources['ctotal']
704
    backend.updated = datetime.now()
705
    backend.save()
706

    
707

    
708
def get_memory_from_instances(backend):
709
    """ Get the memory that is used from instances.
710

711
    Get the used memory of a backend. Note: This is different for
712
    the real memory used, due to kvm's memory de-duplication.
713

714
    """
715
    with pooled_rapi_client(backend) as client:
716
        instances = client.GetInstances(bulk=True)
717
    mem = 0
718
    for i in instances:
719
        mem += i['oper_ram']
720
    return mem
721

    
722
##
723
## Synchronized operations for reconciliation
724
##
725

    
726

    
727
def create_network_synced(network, backend):
728
    result = _create_network_synced(network, backend)
729
    if result[0] != 'success':
730
        return result
731
    result = connect_network_synced(network, backend)
732
    return result
733

    
734

    
735
def _create_network_synced(network, backend):
736
    with pooled_rapi_client(backend) as client:
737
        job = _create_network(network, backend)
738
        result = wait_for_job(client, job)
739
    return result
740

    
741

    
742
def connect_network_synced(network, backend):
743
    with pooled_rapi_client(backend) as client:
744
        for group in client.GetGroups():
745
            job = client.ConnectNetwork(network.backend_id, group,
746
                                        network.mode, network.link)
747
            result = wait_for_job(client, job)
748
            if result[0] != 'success':
749
                return result
750

    
751
    return result
752

    
753

    
754
def wait_for_job(client, jobid):
755
    result = client.WaitForJobChange(jobid, ['status', 'opresult'], None, None)
756
    status = result['job_info'][0]
757
    while status not in ['success', 'error', 'cancel']:
758
        result = client.WaitForJobChange(jobid, ['status', 'opresult'],
759
                                         [result], None)
760
        status = result['job_info'][0]
761

    
762
    if status == 'success':
763
        return (status, None)
764
    else:
765
        error = result['job_info'][1]
766
        return (status, error)