Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / backend.py @ ca4d59e3

History | View | Annotate | Download (27.4 kB)

1
# Copyright 2011 GRNET S.A. All rights reserved.
2
#
3
# Redistribution and use in source and binary forms, with or
4
# without modification, are permitted provided that the following
5
# conditions are met:
6
#
7
#   1. Redistributions of source code must retain the above
8
#      copyright notice, this list of conditions and the following
9
#      disclaimer.
10
#
11
#   2. Redistributions in binary form must reproduce the above
12
#      copyright notice, this list of conditions and the following
13
#      disclaimer in the documentation and/or other materials
14
#      provided with the distribution.
15
#
16
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
17
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
20
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
# POSSIBILITY OF SUCH DAMAGE.
28
#
29
# The views and conclusions contained in the software and
30
# documentation are those of the authors and should not be
31
# interpreted as representing official policies, either expressed
32
# or implied, of GRNET S.A.
33
from django.conf import settings
34
from django.db import transaction
35
from datetime import datetime
36

    
37
from synnefo.db.models import (Backend, VirtualMachine, Network,
38
                               BackendNetwork, BACKEND_STATUSES,
39
                               pooled_rapi_client, VirtualMachineDiagnostic,
40
                               Flavor)
41
from synnefo.logic import utils
42
from synnefo import quotas
43
from synnefo.api.util import release_resource
44
from synnefo.util.mac2eui64 import mac2eui64
45

    
46
from logging import getLogger
47
log = getLogger(__name__)
48

    
49

    
50
_firewall_tags = {
51
    'ENABLED': settings.GANETI_FIREWALL_ENABLED_TAG,
52
    'DISABLED': settings.GANETI_FIREWALL_DISABLED_TAG,
53
    'PROTECTED': settings.GANETI_FIREWALL_PROTECTED_TAG}
54

    
55
_reverse_tags = dict((v.split(':')[3], k) for k, v in _firewall_tags.items())
56

    
57

    
58
@transaction.commit_on_success
59
def process_op_status(vm, etime, jobid, opcode, status, logmsg, nics=None,
60
                      beparams=None):
61
    """Process a job progress notification from the backend
62

63
    Process an incoming message from the backend (currently Ganeti).
64
    Job notifications with a terminating status (sucess, error, or canceled),
65
    also update the operating state of the VM.
66

67
    """
68
    # See #1492, #1031, #1111 why this line has been removed
69
    #if (opcode not in [x[0] for x in VirtualMachine.BACKEND_OPCODES] or
70
    if status not in [x[0] for x in BACKEND_STATUSES]:
71
        raise VirtualMachine.InvalidBackendMsgError(opcode, status)
72

    
73
    vm.backendjobid = jobid
74
    vm.backendjobstatus = status
75
    vm.backendopcode = opcode
76
    vm.backendlogmsg = logmsg
77

    
78
    # Notifications of success change the operating state
79
    state_for_success = VirtualMachine.OPER_STATE_FROM_OPCODE.get(opcode, None)
80
    if status == 'success' and state_for_success is not None:
81
        vm.operstate = state_for_success
82

    
83
    if status == "success" and nics is not None:
84
        # Update the NICs of the VM
85
        _process_net_status(vm, etime, nics)
86

    
87
    if beparams:
88
        assert(opcode == "OP_INSTANCE_SET_PARAMS"), "'beparams' should exist"\
89
                                                    " only for SET_PARAMS"
90
        # VM Resize
91
        if status == "success":
92
            # VM has been resized. Change the flavor
93
            _process_resize(vm, beparams)
94
            vm.operstate = "STOPPED"
95
        elif status in ("canceled", "error"):
96
            vm.operstate = "STOPPED"
97
        else:
98
            vm.operstate = "RESIZE"
99

    
100
    # Special case: if OP_INSTANCE_CREATE fails --> ERROR
101
    if opcode == 'OP_INSTANCE_CREATE' and status in ('canceled', 'error'):
102
        vm.operstate = 'ERROR'
103
        vm.backendtime = etime
104
    elif opcode == 'OP_INSTANCE_REMOVE':
105
        # Set the deleted flag explicitly, cater for admin-initiated removals
106
        # Special case: OP_INSTANCE_REMOVE fails for machines in ERROR,
107
        # when no instance exists at the Ganeti backend.
108
        # See ticket #799 for all the details.
109
        #
110
        if status == 'success' or (status == 'error' and
111
                                   vm.operstate == 'ERROR'):
112
            _process_net_status(vm, etime, nics=[])
113
            vm.deleted = True
114
            vm.operstate = state_for_success
115
            vm.backendtime = etime
116
            # Issue and accept commission to Quotaholder
117
            quotas.issue_and_accept_commission(vm, delete=True)
118

    
119
    # Update backendtime only for jobs that have been successfully completed,
120
    # since only these jobs update the state of the VM. Else a "race condition"
121
    # may occur when a successful job (e.g. OP_INSTANCE_REMOVE) completes
122
    # before an error job and messages arrive in reversed order.
123
    if status == 'success':
124
        vm.backendtime = etime
125

    
126
    vm.save()
127

    
128

    
129
def _process_resize(vm, beparams):
130
    """Change flavor of a VirtualMachine based on new beparams."""
131
    old_flavor = vm.flavor
132
    vcpus = beparams.get("vcpus", None) or old_flavor.cpu
133
    minmem, maxmem = beparams.get("minmem"), beparams.get("maxmem")
134
    assert(minmem == maxmem), "Different minmem from maxmem"
135
    if vcpus is None and maxmem is None:
136
        return
137
    ram = maxmem or old_flavor.ram
138
    try:
139
        new_flavor = Flavor.objects.get(cpu=vcpus, ram=ram,
140
                                        disk=old_flavor.disk,
141
                                        disk_template=old_flavor.disk_template)
142
    except Flavor.DoesNotExist:
143
        raise Exception("Can not find flavor for VM")
144
    vm.flavor = new_flavor
145
    vm.save()
146

    
147

    
148
@transaction.commit_on_success
149
def process_net_status(vm, etime, nics):
150
    """Wrap _process_net_status inside transaction."""
151
    _process_net_status(vm, etime, nics)
152

    
153

    
154
def _process_net_status(vm, etime, nics):
155
    """Process a net status notification from the backend
156

157
    Process an incoming message from the Ganeti backend,
158
    detailing the NIC configuration of a VM instance.
159

160
    Update the state of the VM in the DB accordingly.
161
    """
162

    
163
    ganeti_nics = process_ganeti_nics(nics)
164
    if not nics_changed(vm.nics.order_by('index'), ganeti_nics):
165
        log.debug("NICs for VM %s have not changed", vm)
166
        return
167

    
168
    # Get X-Lock on backend before getting X-Lock on network IP pools, to
169
    # guarantee that no deadlock will occur with Backend allocator.
170
    Backend.objects.select_for_update().get(id=vm.backend_id)
171

    
172
    release_instance_nics(vm)
173

    
174
    for nic in ganeti_nics:
175
        ipv4 = nic.get('ipv4', '')
176
        net = nic['network']
177
        if ipv4:
178
            net.reserve_address(ipv4)
179

    
180
        nic['dirty'] = False
181
        vm.nics.create(**nic)
182
        # Dummy save the network, because UI uses changed-since for VMs
183
        # and Networks in order to show the VM NICs
184
        net.save()
185

    
186
    vm.backendtime = etime
187
    vm.save()
188

    
189

    
190
def process_ganeti_nics(ganeti_nics):
191
    """Process NIC dict from ganeti hooks."""
192
    new_nics = []
193
    for i, new_nic in enumerate(ganeti_nics):
194
        network = new_nic.get('network', '')
195
        n = str(network)
196
        pk = utils.id_from_network_name(n)
197

    
198
        net = Network.objects.get(pk=pk)
199

    
200
        # Get the new nic info
201
        mac = new_nic.get('mac', '')
202
        ipv4 = new_nic.get('ip', '')
203
        if net.subnet6:
204
            ipv6 = mac2eui64(mac, net.subnet6)
205
        else:
206
            ipv6 = ''
207

    
208
        firewall = new_nic.get('firewall', '')
209
        firewall_profile = _reverse_tags.get(firewall, '')
210
        if not firewall_profile and net.public:
211
            firewall_profile = settings.DEFAULT_FIREWALL_PROFILE
212

    
213
        nic = {
214
            'index': i,
215
            'network': net,
216
            'mac': mac,
217
            'ipv4': ipv4,
218
            'ipv6': ipv6,
219
            'firewall_profile': firewall_profile,
220
            'state': 'ACTIVE'}
221

    
222
        new_nics.append(nic)
223
    return new_nics
224

    
225

    
226
def nics_changed(old_nics, new_nics):
227
    """Return True if NICs have changed in any way."""
228
    if len(old_nics) != len(new_nics):
229
        return True
230
    fields = ["ipv4", "ipv6", "mac", "firewall_profile", "index", "network"]
231
    for old_nic, new_nic in zip(old_nics, new_nics):
232
        for field in fields:
233
            if getattr(old_nic, field) != new_nic[field]:
234
                return True
235
    return False
236

    
237

    
238
def release_instance_nics(vm):
239
    for nic in vm.nics.all():
240
        net = nic.network
241
        if nic.ipv4:
242
            net.release_address(nic.ipv4)
243
        nic.delete()
244
        net.save()
245

    
246

    
247
@transaction.commit_on_success
248
def process_network_status(back_network, etime, jobid, opcode, status, logmsg):
249
    if status not in [x[0] for x in BACKEND_STATUSES]:
250
        raise Network.InvalidBackendMsgError(opcode, status)
251

    
252
    back_network.backendjobid = jobid
253
    back_network.backendjobstatus = status
254
    back_network.backendopcode = opcode
255
    back_network.backendlogmsg = logmsg
256

    
257
    # Notifications of success change the operating state
258
    state_for_success = BackendNetwork.OPER_STATE_FROM_OPCODE.get(opcode, None)
259
    if status == 'success' and state_for_success is not None:
260
        back_network.operstate = state_for_success
261

    
262
    if status in ('canceled', 'error') and opcode == 'OP_NETWORK_ADD':
263
        back_network.operstate = 'ERROR'
264
        back_network.backendtime = etime
265

    
266
    if opcode == 'OP_NETWORK_REMOVE':
267
        if status == 'success' or (status == 'error' and
268
                                   back_network.operstate == 'ERROR'):
269
            back_network.operstate = state_for_success
270
            back_network.deleted = True
271
            back_network.backendtime = etime
272

    
273
    if status == 'success':
274
        back_network.backendtime = etime
275
    back_network.save()
276
    # Also you must update the state of the Network!!
277
    update_network_state(back_network.network)
278

    
279

    
280
def update_network_state(network):
281
    """Update the state of a Network based on BackendNetwork states.
282

283
    Update the state of a Network based on the operstate of the networks in the
284
    backends that network exists.
285

286
    The state of the network is:
287
    * ACTIVE: If it is 'ACTIVE' in at least one backend.
288
    * DELETED: If it is is 'DELETED' in all backends that have been created.
289

290
    This function also releases the resources (MAC prefix or Bridge) and the
291
    quotas for the network.
292

293
    """
294
    if network.deleted:
295
        # Network has already been deleted. Just assert that state is also
296
        # DELETED
297
        if not network.state == "DELETED":
298
            network.state = "DELETED"
299
            network.save()
300
        return
301

    
302
    backend_states = [s.operstate for s in network.backend_networks.all()]
303
    if not backend_states and network.action != "DESTROY":
304
        if network.state != "ACTIVE":
305
            network.state = "ACTIVE"
306
            network.save()
307
            return
308

    
309
    # Network is deleted when all BackendNetworks go to "DELETED" operstate
310
    deleted = reduce(lambda x, y: x == y and "DELETED", backend_states,
311
                     "DELETED")
312

    
313
    # Release the resources on the deletion of the Network
314
    if deleted:
315
        log.info("Network %r deleted. Releasing link %r mac_prefix %r",
316
                 network.id, network.mac_prefix, network.link)
317
        network.deleted = True
318
        network.state = "DELETED"
319
        if network.mac_prefix:
320
            if network.FLAVORS[network.flavor]["mac_prefix"] == "pool":
321
                release_resource(res_type="mac_prefix",
322
                                 value=network.mac_prefix)
323
        if network.link:
324
            if network.FLAVORS[network.flavor]["link"] == "pool":
325
                release_resource(res_type="bridge", value=network.link)
326

    
327
        # Issue commission
328
        if network.userid:
329
            quotas.issue_and_accept_commission(network, delete=True)
330
        elif not network.public:
331
            log.warning("Network %s does not have an owner!", network.id)
332
    network.save()
333

    
334

    
335
@transaction.commit_on_success
336
def process_network_modify(back_network, etime, jobid, opcode, status,
337
                           add_reserved_ips, remove_reserved_ips):
338
    assert (opcode == "OP_NETWORK_SET_PARAMS")
339
    if status not in [x[0] for x in BACKEND_STATUSES]:
340
        raise Network.InvalidBackendMsgError(opcode, status)
341

    
342
    back_network.backendjobid = jobid
343
    back_network.backendjobstatus = status
344
    back_network.opcode = opcode
345

    
346
    if add_reserved_ips or remove_reserved_ips:
347
        net = back_network.network
348
        pool = net.get_pool()
349
        if add_reserved_ips:
350
            for ip in add_reserved_ips:
351
                pool.reserve(ip, external=True)
352
        if remove_reserved_ips:
353
            for ip in remove_reserved_ips:
354
                pool.put(ip, external=True)
355
        pool.save()
356

    
357
    if status == 'success':
358
        back_network.backendtime = etime
359
    back_network.save()
360

    
361

    
362
@transaction.commit_on_success
363
def process_create_progress(vm, etime, progress):
364

    
365
    percentage = int(progress)
366

    
367
    # The percentage may exceed 100%, due to the way
368
    # snf-image:copy-progress tracks bytes read by image handling processes
369
    percentage = 100 if percentage > 100 else percentage
370
    if percentage < 0:
371
        raise ValueError("Percentage cannot be negative")
372

    
373
    # FIXME: log a warning here, see #1033
374
#   if last_update > percentage:
375
#       raise ValueError("Build percentage should increase monotonically " \
376
#                        "(old = %d, new = %d)" % (last_update, percentage))
377

    
378
    # This assumes that no message of type 'ganeti-create-progress' is going to
379
    # arrive once OP_INSTANCE_CREATE has succeeded for a Ganeti instance and
380
    # the instance is STARTED.  What if the two messages are processed by two
381
    # separate dispatcher threads, and the 'ganeti-op-status' message for
382
    # successful creation gets processed before the 'ganeti-create-progress'
383
    # message? [vkoukis]
384
    #
385
    #if not vm.operstate == 'BUILD':
386
    #    raise VirtualMachine.IllegalState("VM is not in building state")
387

    
388
    vm.buildpercentage = percentage
389
    vm.backendtime = etime
390
    vm.save()
391

    
392

    
393
def create_instance_diagnostic(vm, message, source, level="DEBUG", etime=None,
394
                               details=None):
395
    """
396
    Create virtual machine instance diagnostic entry.
397

398
    :param vm: VirtualMachine instance to create diagnostic for.
399
    :param message: Diagnostic message.
400
    :param source: Diagnostic source identifier (e.g. image-helper).
401
    :param level: Diagnostic level (`DEBUG`, `INFO`, `WARNING`, `ERROR`).
402
    :param etime: The time the message occured (if available).
403
    :param details: Additional details or debug information.
404
    """
405
    VirtualMachineDiagnostic.objects.create_for_vm(vm, level, source=source,
406
                                                   source_date=etime,
407
                                                   message=message,
408
                                                   details=details)
409

    
410

    
411
def create_instance(vm, public_nic, flavor, image):
412
    """`image` is a dictionary which should contain the keys:
413
            'backend_id', 'format' and 'metadata'
414

415
        metadata value should be a dictionary.
416
    """
417

    
418
    # Handle arguments to CreateInstance() as a dictionary,
419
    # initialize it based on a deployment-specific value.
420
    # This enables the administrator to override deployment-specific
421
    # arguments, such as the disk template to use, name of os provider
422
    # and hypervisor-specific parameters at will (see Synnefo #785, #835).
423
    #
424
    kw = vm.backend.get_create_params()
425
    kw['mode'] = 'create'
426
    kw['name'] = vm.backend_vm_id
427
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
428

    
429
    kw['disk_template'] = flavor.disk_template
430
    kw['disks'] = [{"size": flavor.disk * 1024}]
431
    provider = flavor.disk_provider
432
    if provider:
433
        kw['disks'][0]['provider'] = provider
434
        kw['disks'][0]['origin'] = flavor.disk_origin
435

    
436
    kw['nics'] = [public_nic]
437
    if vm.backend.use_hotplug():
438
        kw['hotplug'] = True
439
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
440
    # kw['os'] = settings.GANETI_OS_PROVIDER
441
    kw['ip_check'] = False
442
    kw['name_check'] = False
443

    
444
    # Do not specific a node explicitly, have
445
    # Ganeti use an iallocator instead
446
    #kw['pnode'] = rapi.GetNodes()[0]
447

    
448
    kw['dry_run'] = settings.TEST
449

    
450
    kw['beparams'] = {
451
        'auto_balance': True,
452
        'vcpus': flavor.cpu,
453
        'memory': flavor.ram}
454

    
455
    kw['osparams'] = {
456
        'config_url': vm.config_url,
457
        # Store image id and format to Ganeti
458
        'img_id': image['backend_id'],
459
        'img_format': image['format']}
460

    
461
    # Defined in settings.GANETI_CREATEINSTANCE_KWARGS
462
    # kw['hvparams'] = dict(serial_console=False)
463

    
464
    log.debug("Creating instance %s", utils.hide_pass(kw))
465
    with pooled_rapi_client(vm) as client:
466
        return client.CreateInstance(**kw)
467

    
468

    
469
def delete_instance(vm):
470
    with pooled_rapi_client(vm) as client:
471
        return client.DeleteInstance(vm.backend_vm_id, dry_run=settings.TEST)
472

    
473

    
474
def reboot_instance(vm, reboot_type):
475
    assert reboot_type in ('soft', 'hard')
476
    with pooled_rapi_client(vm) as client:
477
        return client.RebootInstance(vm.backend_vm_id, reboot_type,
478
                                     dry_run=settings.TEST)
479

    
480

    
481
def startup_instance(vm):
482
    with pooled_rapi_client(vm) as client:
483
        return client.StartupInstance(vm.backend_vm_id, dry_run=settings.TEST)
484

    
485

    
486
def shutdown_instance(vm):
487
    with pooled_rapi_client(vm) as client:
488
        return client.ShutdownInstance(vm.backend_vm_id, dry_run=settings.TEST)
489

    
490

    
491
def resize_instance(vm, vcpus, memory):
492
    beparams = {"vcpus": int(vcpus),
493
                "minmem": int(memory),
494
                "maxmem": int(memory)}
495
    with pooled_rapi_client(vm) as client:
496
        return client.ModifyInstance(vm.backend_vm_id, beparams=beparams)
497

    
498

    
499
def get_instance_console(vm):
500
    # RAPI GetInstanceConsole() returns endpoints to the vnc_bind_address,
501
    # which is a cluster-wide setting, either 0.0.0.0 or 127.0.0.1, and pretty
502
    # useless (see #783).
503
    #
504
    # Until this is fixed on the Ganeti side, construct a console info reply
505
    # directly.
506
    #
507
    # WARNING: This assumes that VNC runs on port network_port on
508
    #          the instance's primary node, and is probably
509
    #          hypervisor-specific.
510
    #
511
    log.debug("Getting console for vm %s", vm)
512

    
513
    console = {}
514
    console['kind'] = 'vnc'
515

    
516
    with pooled_rapi_client(vm) as client:
517
        i = client.GetInstance(vm.backend_vm_id)
518

    
519
    if vm.backend.hypervisor == "kvm" and i['hvparams']['serial_console']:
520
        raise Exception("hv parameter serial_console cannot be true")
521
    console['host'] = i['pnode']
522
    console['port'] = i['network_port']
523

    
524
    return console
525

    
526

    
527
def get_instance_info(vm):
528
    with pooled_rapi_client(vm) as client:
529
        return client.GetInstanceInfo(vm.backend_vm_id)
530

    
531

    
532
def create_network(network, backend, connect=True):
533
    """Create a network in a Ganeti backend"""
534
    log.debug("Creating network %s in backend %s", network, backend)
535

    
536
    job_id = _create_network(network, backend)
537

    
538
    if connect:
539
        job_ids = connect_network(network, backend, depends=[job_id])
540
        return job_ids
541
    else:
542
        return [job_id]
543

    
544

    
545
def _create_network(network, backend):
546
    """Create a network."""
547

    
548
    network_type = network.public and 'public' or 'private'
549

    
550
    tags = network.backend_tag
551
    if network.dhcp:
552
        tags.append('nfdhcpd')
553

    
554
    if network.public:
555
        conflicts_check = True
556
    else:
557
        conflicts_check = False
558

    
559
    try:
560
        bn = BackendNetwork.objects.get(network=network, backend=backend)
561
        mac_prefix = bn.mac_prefix
562
    except BackendNetwork.DoesNotExist:
563
        raise Exception("BackendNetwork for network '%s' in backend '%s'"
564
                        " does not exist" % (network.id, backend.id))
565

    
566
    with pooled_rapi_client(backend) as client:
567
        return client.CreateNetwork(network_name=network.backend_id,
568
                                    network=network.subnet,
569
                                    network6=network.subnet6,
570
                                    gateway=network.gateway,
571
                                    gateway6=network.gateway6,
572
                                    network_type=network_type,
573
                                    mac_prefix=mac_prefix,
574
                                    conflicts_check=conflicts_check,
575
                                    tags=tags)
576

    
577

    
578
def connect_network(network, backend, depends=[], group=None):
579
    """Connect a network to nodegroups."""
580
    log.debug("Connecting network %s to backend %s", network, backend)
581

    
582
    if network.public:
583
        conflicts_check = True
584
    else:
585
        conflicts_check = False
586

    
587
    depends = [[job, ["success", "error", "canceled"]] for job in depends]
588
    with pooled_rapi_client(backend) as client:
589
        groups = [group] if group is not None else client.GetGroups()
590
        job_ids = []
591
        for group in groups:
592
            job_id = client.ConnectNetwork(network.backend_id, group,
593
                                           network.mode, network.link,
594
                                           conflicts_check,
595
                                           depends=depends)
596
            job_ids.append(job_id)
597
    return job_ids
598

    
599

    
600
def delete_network(network, backend, disconnect=True):
601
    log.debug("Deleting network %s from backend %s", network, backend)
602

    
603
    depends = []
604
    if disconnect:
605
        depends = disconnect_network(network, backend)
606
    _delete_network(network, backend, depends=depends)
607

    
608

    
609
def _delete_network(network, backend, depends=[]):
610
    depends = [[job, ["success", "error", "canceled"]] for job in depends]
611
    with pooled_rapi_client(backend) as client:
612
        return client.DeleteNetwork(network.backend_id, depends)
613

    
614

    
615
def disconnect_network(network, backend, group=None):
616
    log.debug("Disconnecting network %s to backend %s", network, backend)
617

    
618
    with pooled_rapi_client(backend) as client:
619
        groups = [group] if group is not None else client.GetGroups()
620
        job_ids = []
621
        for group in groups:
622
            job_id = client.DisconnectNetwork(network.backend_id, group)
623
            job_ids.append(job_id)
624
    return job_ids
625

    
626

    
627
def connect_to_network(vm, network, address=None):
628
    backend = vm.backend
629
    network = Network.objects.select_for_update().get(id=network.id)
630
    bnet, created = BackendNetwork.objects.get_or_create(backend=backend,
631
                                                         network=network)
632
    depend_jobs = []
633
    if bnet.operstate != "ACTIVE":
634
        depend_jobs = create_network(network, backend, connect=True)
635

    
636
    depends = [[job, ["success", "error", "canceled"]] for job in depend_jobs]
637

    
638
    nic = {'ip': address, 'network': network.backend_id}
639

    
640
    log.debug("Connecting vm %s to network %s(%s)", vm, network, address)
641

    
642
    with pooled_rapi_client(vm) as client:
643
        return client.ModifyInstance(vm.backend_vm_id, nics=[('add',  nic)],
644
                                     hotplug=vm.backend.use_hotplug(),
645
                                     depends=depends,
646
                                     dry_run=settings.TEST)
647

    
648

    
649
def disconnect_from_network(vm, nic):
650
    op = [('remove', nic.index, {})]
651

    
652
    log.debug("Removing nic of VM %s, with index %s", vm, str(nic.index))
653

    
654
    with pooled_rapi_client(vm) as client:
655
        return client.ModifyInstance(vm.backend_vm_id, nics=op,
656
                                     hotplug=vm.backend.use_hotplug(),
657
                                     dry_run=settings.TEST)
658

    
659

    
660
def set_firewall_profile(vm, profile):
661
    try:
662
        tag = _firewall_tags[profile]
663
    except KeyError:
664
        raise ValueError("Unsopported Firewall Profile: %s" % profile)
665

    
666
    log.debug("Setting tag of VM %s to %s", vm, profile)
667

    
668
    with pooled_rapi_client(vm) as client:
669
        # Delete all firewall tags
670
        for t in _firewall_tags.values():
671
            client.DeleteInstanceTags(vm.backend_vm_id, [t],
672
                                      dry_run=settings.TEST)
673

    
674
        client.AddInstanceTags(vm.backend_vm_id, [tag], dry_run=settings.TEST)
675

    
676
        # XXX NOP ModifyInstance call to force process_net_status to run
677
        # on the dispatcher
678
        os_name = settings.GANETI_CREATEINSTANCE_KWARGS['os']
679
        client.ModifyInstance(vm.backend_vm_id,
680
                              os_name=os_name)
681

    
682

    
683
def get_instances(backend, bulk=True):
684
    with pooled_rapi_client(backend) as c:
685
        return c.GetInstances(bulk=bulk)
686

    
687

    
688
def get_nodes(backend, bulk=True):
689
    with pooled_rapi_client(backend) as c:
690
        return c.GetNodes(bulk=bulk)
691

    
692

    
693
def get_jobs(backend):
694
    with pooled_rapi_client(backend) as c:
695
        return c.GetJobs()
696

    
697

    
698
def get_physical_resources(backend):
699
    """ Get the physical resources of a backend.
700

701
    Get the resources of a backend as reported by the backend (not the db).
702

703
    """
704
    nodes = get_nodes(backend, bulk=True)
705
    attr = ['mfree', 'mtotal', 'dfree', 'dtotal', 'pinst_cnt', 'ctotal']
706
    res = {}
707
    for a in attr:
708
        res[a] = 0
709
    for n in nodes:
710
        # Filter out drained, offline and not vm_capable nodes since they will
711
        # not take part in the vm allocation process
712
        can_host_vms = n['vm_capable'] and not (n['drained'] or n['offline'])
713
        if can_host_vms and n['cnodes']:
714
            for a in attr:
715
                res[a] += int(n[a])
716
    return res
717

    
718

    
719
def update_resources(backend, resources=None):
720
    """ Update the state of the backend resources in db.
721

722
    """
723

    
724
    if not resources:
725
        resources = get_physical_resources(backend)
726

    
727
    backend.mfree = resources['mfree']
728
    backend.mtotal = resources['mtotal']
729
    backend.dfree = resources['dfree']
730
    backend.dtotal = resources['dtotal']
731
    backend.pinst_cnt = resources['pinst_cnt']
732
    backend.ctotal = resources['ctotal']
733
    backend.updated = datetime.now()
734
    backend.save()
735

    
736

    
737
def get_memory_from_instances(backend):
738
    """ Get the memory that is used from instances.
739

740
    Get the used memory of a backend. Note: This is different for
741
    the real memory used, due to kvm's memory de-duplication.
742

743
    """
744
    with pooled_rapi_client(backend) as client:
745
        instances = client.GetInstances(bulk=True)
746
    mem = 0
747
    for i in instances:
748
        mem += i['oper_ram']
749
    return mem
750

    
751
##
752
## Synchronized operations for reconciliation
753
##
754

    
755

    
756
def create_network_synced(network, backend):
757
    result = _create_network_synced(network, backend)
758
    if result[0] != 'success':
759
        return result
760
    result = connect_network_synced(network, backend)
761
    return result
762

    
763

    
764
def _create_network_synced(network, backend):
765
    with pooled_rapi_client(backend) as client:
766
        job = _create_network(network, backend)
767
        result = wait_for_job(client, job)
768
    return result
769

    
770

    
771
def connect_network_synced(network, backend):
772
    with pooled_rapi_client(backend) as client:
773
        for group in client.GetGroups():
774
            job = client.ConnectNetwork(network.backend_id, group,
775
                                        network.mode, network.link)
776
            result = wait_for_job(client, job)
777
            if result[0] != 'success':
778
                return result
779

    
780
    return result
781

    
782

    
783
def wait_for_job(client, jobid):
784
    result = client.WaitForJobChange(jobid, ['status', 'opresult'], None, None)
785
    status = result['job_info'][0]
786
    while status not in ['success', 'error', 'cancel']:
787
        result = client.WaitForJobChange(jobid, ['status', 'opresult'],
788
                                         [result], None)
789
        status = result['job_info'][0]
790

    
791
    if status == 'success':
792
        return (status, None)
793
    else:
794
        error = result['job_info'][1]
795
        return (status, error)