Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ 6e9255ab

History | View | Annotate | Download (27.2 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.core.management import setup_environ
60
try:
61
    from synnefo import settings
62
except ImportError:
63
    raise Exception("Cannot import settings, make sure PYTHONPATH contains "
64
                    "the parent directory of the Synnefo Django project.")
65
setup_environ(settings)
66

    
67

    
68
import logging
69
import itertools
70
import bitarray
71
from datetime import datetime, timedelta
72

    
73
from django.db import transaction
74
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
75
                               pooled_rapi_client, Network,
76
                               BackendNetwork)
77
from synnefo.db.pools import IPPool
78
from synnefo.logic import utils, backend as backend_mod
79
from synnefo.logic.rapi import GanetiApiError
80

    
81
logger = logging.getLogger()
82
logging.basicConfig()
83

    
84
try:
85
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
86
except AttributeError:
87
    CHECK_INTERVAL = 60
88

    
89

    
90
class BackendReconciler(object):
91
    def __init__(self, backend, logger, options=None):
92
        self.backend = backend
93
        self.log = logger
94
        self.client = backend.get_client()
95
        if options is None:
96
            self.options = {}
97
        else:
98
            self.options = options
99

    
100
    def close(self):
101
        self.backend.put_client(self.client)
102

    
103
    @transaction.commit_on_success
104
    def reconcile(self):
105
        log = self.log
106
        backend = self.backend
107
        log.debug("Reconciling backend %s", backend)
108

    
109
        self.db_servers = get_database_servers(backend)
110
        self.db_servers_keys = set(self.db_servers.keys())
111
        log.debug("Got servers info from database.")
112

    
113
        self.gnt_servers = get_ganeti_servers(backend)
114
        self.gnt_servers_keys = set(self.gnt_servers.keys())
115
        log.debug("Got servers info from Ganeti backend.")
116

    
117
        self.event_time = datetime.now()
118

    
119
        self.stale_servers = self.reconcile_stale_servers()
120
        self.orphan_servers = self.reconcile_orphan_servers()
121
        self.unsynced_servers = self.reconcile_unsynced_servers()
122
        self.close()
123

    
124
    def get_build_status(self, db_server):
125
        job = db_server.backendjobid
126
        if job is None:
127
            created = db_server.created
128
            # Job has not yet been enqueued.
129
            if self.event_time < created + timedelta(seconds=60):
130
                return "RUNNING"
131
            else:
132
                return "ERROR"
133
        else:
134
            updated = db_server.backendtime
135
            if self.event_time >= updated + timedelta(seconds=60):
136
                try:
137
                    job_info = self.client.GetJobStatus(job_id=job)
138
                    finalized = ["success", "error", "cancelled"]
139
                    if job_info["status"] == "error":
140
                        return "ERROR"
141
                    elif job_info["status"] not in finalized:
142
                        return "RUNNING"
143
                    else:
144
                        return "FINALIZED"
145
                except GanetiApiError:
146
                    return "ERROR"
147
            else:
148
                self.log.debug("Pending build for server '%s'", db_server.id)
149
                return "RUNNING"
150

    
151
    def reconcile_stale_servers(self):
152
        # Detect stale servers
153
        stale = []
154
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
155
        for server_id in stale_keys:
156
            db_server = self.db_servers[server_id]
157
            if db_server.operstate == "BUILD":
158
                build_status = self.get_build_status(db_server)
159
                if build_status == "ERROR":
160
                    # Special handling of BUILD eerrors
161
                    self.reconcile_building_server(db_server)
162
                elif build_status != "RUNNING":
163
                    stale.append(server_id)
164
            else:
165
                stale.append(server_id)
166

    
167
        # Report them
168
        if stale:
169
            self.log.info("Found stale servers %s at backend %s",
170
                          ", ".join(map(str, stale)), self.backend)
171
        else:
172
            self.log.debug("No stale servers at backend %s", self.backend)
173

    
174
        # Fix them
175
        if stale and self.options["fix_stale"]:
176
            for server_id in stale:
177
                db_server = self.db_servers[server_id]
178
                backend_mod.process_op_status(
179
                    vm=db_server,
180
                    etime=self.event_time,
181
                    jobid=-0,
182
                    opcode='OP_INSTANCE_REMOVE', status='success',
183
                    logmsg='Reconciliation: simulated Ganeti event')
184
            self.log.debug("Simulated Ganeti removal for stale servers.")
185

    
186
    def reconcile_orphan_servers(self):
187
        orphans = self.gnt_servers_keys - self.db_servers_keys
188
        if orphans:
189
            self.log.info("Found orphan servers %s at backend %s",
190
                          ", ".join(map(str, orphans)), self.backend)
191
        else:
192
            self.log.debug("No orphan servers at backend %s", self.backend)
193

    
194
        if orphans and self.options["fix_orphans"]:
195
            for server_id in orphans:
196
                server_name = utils.id_to_instance_name(server_id)
197
                self.client.DeleteInstance(server_name)
198
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
199

    
200
    def reconcile_unsynced_servers(self):
201
        #log = self.log
202
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
203
            db_server = self.db_servers[server_id]
204
            gnt_server = self.gnt_servers[server_id]
205
            if db_server.operstate == "BUILD":
206
                build_status = self.get_build_status(db_server)
207
                if build_status == "RUNNING":
208
                    # Do not reconcile building VMs
209
                    continue
210
                elif build_status == "ERROR":
211
                    # Special handling of build errors
212
                    self.reconcile_building_server(db_server)
213
                    continue
214

    
215
            self.reconcile_unsynced_operstate(server_id, db_server,
216
                                              gnt_server)
217
            self.reconcile_unsynced_flavor(server_id, db_server,
218
                                           gnt_server)
219
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
220
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
221

    
222
    def reconcile_building_server(self, db_server):
223
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
224
                      db_server.id)
225
        if self.options["fix_unsynced"]:
226
            fix_opcode = "OP_INSTANCE_CREATE"
227
            backend_mod.process_op_status(
228
                vm=db_server,
229
                etime=self.event_time,
230
                jobid=-0,
231
                opcode=fix_opcode, status='error',
232
                logmsg='Reconciliation: simulated Ganeti event')
233
            self.log.debug("Simulated Ganeti error build event for"
234
                           " server '%s'", db_server.id)
235

    
236
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
237
        if db_server.operstate != gnt_server["state"]:
238
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
239
                          server_id, db_server.operstate, gnt_server["state"])
240
            if self.options["fix_unsynced"]:
241
                # If server is in building state, you will have first to
242
                # reconcile it's creation, to avoid wrong quotas
243
                if db_server.operstate == "BUILD":
244
                    backend_mod.process_op_status(
245
                        vm=db_server, etime=self.event_time, jobid=-0,
246
                        opcode="OP_INSTANCE_CREATE", status='success',
247
                        logmsg='Reconciliation: simulated Ganeti event')
248
                fix_opcode = "OP_INSTANCE_STARTUP"\
249
                    if gnt_server["state"] == "STARTED"\
250
                    else "OP_INSTANCE_SHUTDOWN"
251
                backend_mod.process_op_status(
252
                    vm=db_server, etime=self.event_time, jobid=-0,
253
                    opcode=fix_opcode, status='success',
254
                    logmsg='Reconciliation: simulated Ganeti event')
255
                self.log.debug("Simulated Ganeti state event for server '%s'",
256
                               server_id)
257

    
258
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
259
        db_flavor = db_server.flavor
260
        gnt_flavor = gnt_server["flavor"]
261
        if (db_flavor.ram != gnt_flavor["ram"] or
262
           db_flavor.cpu != gnt_flavor["vcpus"]):
263
            try:
264
                gnt_flavor = Flavor.objects.get(
265
                    ram=gnt_flavor["ram"],
266
                    cpu=gnt_flavor["vcpus"],
267
                    disk=db_flavor.disk,
268
                    disk_template=db_flavor.disk_template)
269
            except Flavor.DoesNotExist:
270
                self.log.warning("Server '%s' has unknown flavor.", server_id)
271
                return
272

    
273
            self.log.info("Server '%s' has flavor '%' in DB and '%s' in"
274
                          " Ganeti", server_id, db_flavor, gnt_flavor)
275
            if self.options["fix_unsynced_flavors"]:
276
                old_state = db_server.operstate
277
                opcode = "OP_INSTANCE_SET_PARAMS"
278
                beparams = {"vcpus": gnt_flavor.cpu,
279
                            "minmem": gnt_flavor.ram,
280
                            "maxmem": gnt_flavor.ram}
281
                backend_mod.process_op_status(
282
                    vm=db_server, etime=self.event_time, jobid=-0,
283
                    opcode=opcode, status='success',
284
                    beparams=beparams,
285
                    logmsg='Reconciliation: simulated Ganeti event')
286
                # process_op_status with beparams will set the vmstate to
287
                # shutdown. Fix this be returning it to old state
288
                vm = VirtualMachine.objects.get(pk=server_id)
289
                vm.operstate = old_state
290
                vm.save()
291
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
292
                               server_id)
293

    
294
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
295
        db_nics = db_server.nics.order_by("index")
296
        gnt_nics = gnt_server["nics"]
297
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
298
        if backend_mod.nics_changed(db_nics, gnt_nics_parsed):
299
            msg = "Found unsynced NICs for server '%s'.\n\t"\
300
                  "DB: %s\n\tGaneti: %s"
301
            db_nics_str = ", ".join(map(format_db_nic, db_nics))
302
            gnt_nics_str = ", ".join(map(format_gnt_nic, gnt_nics_parsed))
303
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
304
            if self.options["fix_unsynced_nics"]:
305
                backend_mod.process_net_status(vm=db_server,
306
                                               etime=self.event_time,
307
                                               nics=gnt_nics)
308

    
309
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
310
        pass
311

    
312

    
313
def format_db_nic(nic):
314
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" % (nic.index,
315
           nic.ipv4, nic.network_id, nic.mac, nic.firewall_profile)
316

    
317

    
318
def format_gnt_nic(nic):
319
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" %\
320
           (nic["index"], nic["ipv4"], nic["network"], nic["mac"],
321
            nic["firewall_profile"])
322

    
323

    
324
#
325
# Networks
326
#
327

    
328

    
329
def get_networks_from_ganeti(backend):
330
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
331

    
332
    networks = {}
333
    with pooled_rapi_client(backend) as c:
334
        for net in c.GetNetworks(bulk=True):
335
            if net['name'].startswith(prefix):
336
                id = utils.id_from_network_name(net['name'])
337
                networks[id] = net
338

    
339
    return networks
340

    
341

    
342
def hanging_networks(backend, GNets):
343
    """Get networks that are not connected to all Nodegroups.
344

345
    """
346
    def get_network_groups(group_list):
347
        groups = set()
348
        for g in group_list:
349
            g_name = g.split('(')[0]
350
            groups.add(g_name)
351
        return groups
352

    
353
    with pooled_rapi_client(backend) as c:
354
        groups = set(c.GetGroups())
355

    
356
    hanging = {}
357
    for id, info in GNets.items():
358
        group_list = get_network_groups(info['group_list'])
359
        if group_list != groups:
360
            hanging[id] = groups - group_list
361
    return hanging
362

    
363

    
364
def get_online_backends():
365
    return Backend.objects.filter(offline=False)
366

    
367

    
368
def get_database_servers(backend):
369
    servers = backend.virtual_machines.select_related("nics", "flavor")\
370
                                      .filter(deleted=False)
371
    return dict([(s.id, s) for s in servers])
372

    
373

    
374
def get_ganeti_servers(backend):
375
    gnt_instances = backend_mod.get_instances(backend)
376
    # Filter out non-synnefo instances
377
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
378
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
379
                           gnt_instances)
380
    gnt_instances = map(parse_gnt_instance, gnt_instances)
381
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
382

    
383

    
384
def parse_gnt_instance(instance):
385
    try:
386
        instance_id = utils.id_from_instance_name(instance['name'])
387
    except Exception:
388
        logger.error("Ignoring instance with malformed name %s",
389
                     instance['name'])
390
        return (None, None)
391

    
392
    beparams = instance["beparams"]
393

    
394
    vcpus = beparams["vcpus"]
395
    ram = beparams["maxmem"]
396
    state = instance["oper_state"] and "STARTED" or "STOPPED"
397

    
398
    return {
399
        "id": instance_id,
400
        "state": state,  # FIX
401
        "updated": datetime.fromtimestamp(instance["mtime"]),
402
        "disks": disks_from_instance(instance),
403
        "nics": nics_from_instance(instance),
404
        "flavor": {"vcpus": vcpus,
405
                   "ram": ram},
406
        "tags": instance["tags"]
407
    }
408

    
409

    
410
def nics_from_instance(i):
411
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
412
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
413
    networks = zip(itertools.repeat('network'), i['nic.networks'])
414
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
415
    # links = zip(itertools.repeat('link'), i['nic.links'])
416
    # nics = zip(ips,macs,modes,networks,links)
417
    nics = zip(ips, macs, networks)
418
    nics = map(lambda x: dict(x), nics)
419
    #nics = dict(enumerate(nics))
420
    tags = i["tags"]
421
    for tag in tags:
422
        t = tag.split(":")
423
        if t[0:2] == ["synnefo", "network"]:
424
            if len(t) != 4:
425
                logger.error("Malformed synefo tag %s", tag)
426
                continue
427
            try:
428
                index = int(t[2])
429
                nics[index]['firewall'] = t[3]
430
            except ValueError:
431
                logger.error("Malformed synnefo tag %s", tag)
432
            except IndexError:
433
                logger.error("Found tag %s for non-existent NIC %d",
434
                             tag, index)
435
    return nics
436

    
437

    
438
def disks_from_instance(i):
439
    return dict([(index, {"size": size})
440
                 for index, size in enumerate(i["disk.sizes"])])
441

    
442

    
443
class NetworkReconciler(object):
444
    def __init__(self, logger, fix=False, conflicting_ips=False):
445
        self.log = logger
446
        self.conflicting_ips = conflicting_ips
447
        self.fix = fix
448

    
449
    @transaction.commit_on_success
450
    def reconcile_networks(self):
451
        # Get models from DB
452
        backends = Backend.objects.exclude(offline=True)
453
        networks = Network.objects.filter(deleted=False)
454

    
455
        self.event_time = datetime.now()
456

    
457
        # Get info from all ganeti backends
458
        ganeti_networks = {}
459
        ganeti_hanging_networks = {}
460
        for b in backends:
461
            g_nets = get_networks_from_ganeti(b)
462
            ganeti_networks[b] = g_nets
463
            g_hanging_nets = hanging_networks(b, g_nets)
464
            ganeti_hanging_networks[b] = g_hanging_nets
465

    
466
        # Perform reconciliation for each network
467
        for network in networks:
468
            ip_available_maps = []
469
            ip_reserved_maps = []
470
            for bend in backends:
471
                bnet = get_backend_network(network, bend)
472
                gnet = ganeti_networks[bend].get(network.id)
473
                if not bnet:
474
                    if network.floating_ip_pool:
475
                        # Network is a floating IP pool and does not exist in
476
                        # backend. We need to create it
477
                        bnet = self.reconcile_parted_network(network, bend)
478
                    elif not gnet:
479
                        # Network does not exist either in Ganeti nor in BD.
480
                        continue
481
                    else:
482
                        # Network exists in Ganeti and not in DB.
483
                        if network.action != "DESTROY" and not network.public:
484
                            bnet = self.reconcile_parted_network(network, bend)
485
                        else:
486
                            continue
487

    
488
                if not gnet:
489
                    # Network does not exist in Ganeti. If the network action
490
                    # is DESTROY, we have to mark as deleted in DB, else we
491
                    # have to create it in Ganeti.
492
                    if network.action == "DESTROY":
493
                        if bnet.operstate != "DELETED":
494
                            self.reconcile_stale_network(bnet)
495
                    else:
496
                        self.reconcile_missing_network(network, bend)
497
                    # Skip rest reconciliation!
498
                    continue
499

    
500
                try:
501
                    hanging_groups = ganeti_hanging_networks[bend][network.id]
502
                except KeyError:
503
                    # Network is connected to all nodegroups
504
                    hanging_groups = []
505

    
506
                if hanging_groups:
507
                    # CASE-3: Ganeti networks not connected to all nodegroups
508
                    self.reconcile_hanging_groups(network, bend,
509
                                                  hanging_groups)
510
                    continue
511

    
512
                if bnet.operstate != 'ACTIVE':
513
                    # CASE-4: Unsynced network state. At this point the network
514
                    # exists and is connected to all nodes so is must be
515
                    # active!
516
                    self.reconcile_unsynced_network(network, bend, bnet)
517

    
518
                # Get ganeti IP Pools
519
                available_map, reserved_map = get_network_pool(gnet)
520
                ip_available_maps.append(available_map)
521
                ip_reserved_maps.append(reserved_map)
522

    
523
            if ip_available_maps or ip_reserved_maps:
524
                # CASE-5: Unsynced IP Pools
525
                self.reconcile_ip_pools(network, ip_available_maps,
526
                                        ip_reserved_maps)
527

    
528
            if self.conflicting_ips:
529
                self.detect_conflicting_ips()
530

    
531
        # CASE-6: Orphan networks
532
        self.reconcile_orphan_networks(networks, ganeti_networks)
533

    
534
    def reconcile_parted_network(self, network, backend):
535
        self.log.info("D: Missing DB entry for network %s in backend %s",
536
                      network, backend)
537
        if self.fix:
538
            network.create_backend_network(backend)
539
            self.log.info("F: Created DB entry")
540
            bnet = get_backend_network(network, backend)
541
            return bnet
542

    
543
    def reconcile_stale_network(self, backend_network):
544
        self.log.info("D: Stale DB entry for network %s in backend %s",
545
                      backend_network.network, backend_network.backend)
546
        if self.fix:
547
            backend_mod.process_network_status(
548
                backend_network, self.event_time, 0,
549
                "OP_NETWORK_REMOVE",
550
                "success",
551
                "Reconciliation simulated event")
552
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
553

    
554
    def reconcile_missing_network(self, network, backend):
555
        self.log.info("D: Missing Ganeti network %s in backend %s",
556
                      network, backend)
557
        if self.fix:
558
            backend_mod.create_network(network, backend)
559
            self.log.info("F: Issued OP_NETWORK_CONNECT")
560

    
561
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
562
        self.log.info('D: Network %s in backend %s is not connected to '
563
                      'the following groups:', network, backend)
564
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
565
        if self.fix:
566
            for group in hanging_groups:
567
                self.log.info('F: Connecting network %s to nodegroup %s',
568
                              network, group)
569
                backend_mod.connect_network(network, backend, depends=[],
570
                                            group=group)
571

    
572
    def reconcile_unsynced_network(self, network, backend, backend_network):
573
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
574
        if self.fix:
575
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
576
            backend_mod.process_network_status(
577
                backend_network, self.event_time, 0,
578
                "OP_NETWORK_CONNECT",
579
                "success",
580
                "Reconciliation simulated eventd")
581

    
582
    def reconcile_ip_pools(self, network, available_maps, reserved_maps):
583
        available_map = reduce(lambda x, y: x & y, available_maps)
584
        reserved_map = reduce(lambda x, y: x & y, reserved_maps)
585

    
586
        pool = network.get_pool()
587
        # Temporary release unused floating IPs
588
        temp_pool = network.get_pool()
589
        used_ips = network.nics.values_list("ipv4", flat=True)
590
        unused_static_ips = network.floating_ips.exclude(ipv4__in=used_ips)
591
        map(lambda ip: temp_pool.put(ip.ipv4), unused_static_ips)
592
        if temp_pool.available != available_map:
593
            self.log.info("D: Unsynced available map of network %s:\n"
594
                          "\tDB: %r\n\tGB: %r", network,
595
                          temp_pool.available.to01(),
596
                          available_map.to01())
597
            if self.fix:
598
                pool.available = available_map
599
                # Release unsued floating IPs, as they are not included in the
600
                # available map
601
                map(lambda ip: pool.reserve(ip.ipv4), unused_static_ips)
602
                pool.save()
603
        if pool.reserved != reserved_map:
604
            self.log.info("D: Unsynced reserved map of network %s:\n"
605
                          "\tDB: %r\n\tGB: %r", network, pool.reserved.to01(),
606
                          reserved_map.to01())
607
            if self.fix:
608
                pool.reserved = reserved_map
609
                pool.save()
610

    
611
    def detect_conflicting_ips(self, network):
612
        """Detect NIC's that have the same IP in the same network."""
613
        machine_ips = network.nics.all().values_list('ipv4', 'machine')
614
        ips = map(lambda x: x[0], machine_ips)
615
        distinct_ips = set(ips)
616
        if len(distinct_ips) < len(ips):
617
            for i in distinct_ips:
618
                ips.remove(i)
619
            for i in ips:
620
                machines = [utils.id_to_instance_name(x[1])
621
                            for x in machine_ips if x[0] == i]
622
                self.log.info('D: Conflicting IP:%s Machines: %s',
623
                              i, ', '.join(machines))
624

    
625
    def reconcile_orphan_networks(self, db_networks, ganeti_networks):
626
        # Detect Orphan Networks in Ganeti
627
        db_network_ids = set([net.id for net in db_networks])
628
        for back_end, ganeti_networks in ganeti_networks.items():
629
            ganeti_network_ids = set(ganeti_networks.keys())
630
            orphans = ganeti_network_ids - db_network_ids
631

    
632
            if len(orphans) > 0:
633
                self.log.info('D: Orphan Networks in backend %s:',
634
                              back_end.clustername)
635
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
636
                if self.fix:
637
                    for net_id in orphans:
638
                        self.log.info('Disconnecting and deleting network %d',
639
                                      net_id)
640
                        try:
641
                            network = Network.objects.get(id=net_id)
642
                            backend_mod.delete_network(network,
643
                                                       backend=back_end)
644
                        except Network.DoesNotExist:
645
                            self.log.info("Not entry for network %s in DB !!",
646
                                          net_id)
647

    
648

    
649
def get_backend_network(network, backend):
650
    try:
651
        return BackendNetwork.objects.get(network=network, backend=backend)
652
    except BackendNetwork.DoesNotExist:
653
        return None
654

    
655

    
656
def get_network_pool(gnet):
657
    """Return available and reserved IP maps.
658

659
    Extract the available and reserved IP map from the info return from Ganeti
660
    for a network.
661

662
    """
663
    converter = IPPool(Foo(gnet['network']))
664
    a_map = bitarray_from_map(gnet['map'])
665
    a_map.invert()
666
    reserved = gnet['external_reservations']
667
    r_map = a_map.copy()
668
    r_map.setall(True)
669
    if reserved:
670
        for address in reserved.split(','):
671
            index = converter.value_to_index(address.strip())
672
            a_map[index] = True
673
            r_map[index] = False
674
    return a_map, r_map
675

    
676

    
677
def bitarray_from_map(bitmap):
678
    return bitarray.bitarray(bitmap.replace("X", "1").replace(".", "0"))
679

    
680

    
681
class Foo():
682
    def __init__(self, subnet):
683
        self.available_map = ''
684
        self.reserved_map = ''
685
        self.size = 0
686
        self.network = Foo.Foo1(subnet)
687

    
688
    class Foo1():
689
        def __init__(self, subnet):
690
            self.subnet = subnet
691
            self.gateway = None