Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ 0c09b1c0

History | View | Annotate | Download (27.6 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.core.management import setup_environ
60
from django.conf import settings
61

    
62
import logging
63
import itertools
64
import bitarray
65
from datetime import datetime
66

    
67
from django.db import transaction
68
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
69
                               pooled_rapi_client, Network,
70
                               BackendNetwork)
71
from synnefo.db.pools import IPPool
72
from synnefo.logic import utils, backend as backend_mod
73

    
74
logger = logging.getLogger()
75
logging.basicConfig()
76

    
77
try:
78
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
79
except AttributeError:
80
    CHECK_INTERVAL = 60
81

    
82
GANETI_JOB_ERROR = "error"
83
GANETI_JOBS_PENDING = ["queued", "waiting", "running", "canceling"]
84
GANETI_JOBS_FINALIZED = ["success", "error", "canceled"]
85

    
86

    
87
class BackendReconciler(object):
88
    def __init__(self, backend, logger, options=None):
89
        self.backend = backend
90
        self.log = logger
91
        self.client = backend.get_client()
92
        if options is None:
93
            self.options = {}
94
        else:
95
            self.options = options
96

    
97
    def close(self):
98
        self.backend.put_client(self.client)
99

    
100
    @transaction.commit_on_success
101
    def reconcile(self):
102
        log = self.log
103
        backend = self.backend
104
        log.debug("Reconciling backend %s", backend)
105

    
106
        self.db_servers = get_database_servers(backend)
107
        self.db_servers_keys = set(self.db_servers.keys())
108
        log.debug("Got servers info from database.")
109

    
110
        self.gnt_servers = get_ganeti_servers(backend)
111
        self.gnt_servers_keys = set(self.gnt_servers.keys())
112
        log.debug("Got servers info from Ganeti backend.")
113

    
114
        self.gnt_jobs = get_ganeti_jobs(backend)
115
        log.debug("Got jobs from Ganeti backend")
116

    
117
        self.event_time = datetime.now()
118

    
119
        self.stale_servers = self.reconcile_stale_servers()
120
        self.orphan_servers = self.reconcile_orphan_servers()
121
        self.unsynced_servers = self.reconcile_unsynced_servers()
122
        self.close()
123

    
124
    def get_build_status(self, db_server):
125
        job_id = db_server.backendjobid
126
        if job_id in self.gnt_jobs:
127
            gnt_job_status = self.gnt_jobs[job_id]["status"]
128
            if gnt_job_status == GANETI_JOB_ERROR:
129
                return "ERROR"
130
            elif gnt_job_status not in GANETI_JOBS_FINALIZED:
131
                return "RUNNING"
132
            else:
133
                return "FINALIZED"
134
        else:
135
            return "ERROR"
136

    
137
    def reconcile_stale_servers(self):
138
        # Detect stale servers
139
        stale = []
140
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
141
        for server_id in stale_keys:
142
            db_server = self.db_servers[server_id]
143
            if db_server.operstate == "BUILD":
144
                build_status = self.get_build_status(db_server)
145
                if build_status == "ERROR":
146
                    # Special handling of BUILD eerrors
147
                    self.reconcile_building_server(db_server)
148
                elif build_status != "RUNNING":
149
                    stale.append(server_id)
150
            else:
151
                stale.append(server_id)
152

    
153
        # Report them
154
        if stale:
155
            self.log.info("Found stale servers %s at backend %s",
156
                          ", ".join(map(str, stale)), self.backend)
157
        else:
158
            self.log.debug("No stale servers at backend %s", self.backend)
159

    
160
        # Fix them
161
        if stale and self.options["fix_stale"]:
162
            for server_id in stale:
163
                db_server = self.db_servers[server_id]
164
                backend_mod.process_op_status(
165
                    vm=db_server,
166
                    etime=self.event_time,
167
                    jobid=-0,
168
                    opcode='OP_INSTANCE_REMOVE', status='success',
169
                    logmsg='Reconciliation: simulated Ganeti event')
170
            self.log.debug("Simulated Ganeti removal for stale servers.")
171

    
172
    def reconcile_orphan_servers(self):
173
        orphans = self.gnt_servers_keys - self.db_servers_keys
174
        if orphans:
175
            self.log.info("Found orphan servers %s at backend %s",
176
                          ", ".join(map(str, orphans)), self.backend)
177
        else:
178
            self.log.debug("No orphan servers at backend %s", self.backend)
179

    
180
        if orphans and self.options["fix_orphans"]:
181
            for server_id in orphans:
182
                server_name = utils.id_to_instance_name(server_id)
183
                self.client.DeleteInstance(server_name)
184
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
185

    
186
    def reconcile_unsynced_servers(self):
187
        #log = self.log
188
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
189
            db_server = self.db_servers[server_id]
190
            gnt_server = self.gnt_servers[server_id]
191
            if db_server.operstate == "BUILD":
192
                build_status = self.get_build_status(db_server)
193
                if build_status == "RUNNING":
194
                    # Do not reconcile building VMs
195
                    continue
196
                elif build_status == "ERROR":
197
                    # Special handling of build errors
198
                    self.reconcile_building_server(db_server)
199
                    continue
200

    
201
            self.reconcile_unsynced_operstate(server_id, db_server,
202
                                              gnt_server)
203
            self.reconcile_unsynced_flavor(server_id, db_server,
204
                                           gnt_server)
205
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
206
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
207
            if db_server.task is not None:
208
                self.reconcile_pending_task(server_id, db_server)
209

    
210
    def reconcile_building_server(self, db_server):
211
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
212
                      db_server.id)
213
        if self.options["fix_unsynced"]:
214
            fix_opcode = "OP_INSTANCE_CREATE"
215
            backend_mod.process_op_status(
216
                vm=db_server,
217
                etime=self.event_time,
218
                jobid=-0,
219
                opcode=fix_opcode, status='error',
220
                logmsg='Reconciliation: simulated Ganeti event')
221
            self.log.debug("Simulated Ganeti error build event for"
222
                           " server '%s'", db_server.id)
223

    
224
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
225
        if db_server.operstate != gnt_server["state"]:
226
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
227
                          server_id, db_server.operstate, gnt_server["state"])
228
            if self.options["fix_unsynced"]:
229
                # If server is in building state, you will have first to
230
                # reconcile it's creation, to avoid wrong quotas
231
                if db_server.operstate == "BUILD":
232
                    backend_mod.process_op_status(
233
                        vm=db_server, etime=self.event_time, jobid=-0,
234
                        opcode="OP_INSTANCE_CREATE", status='success',
235
                        logmsg='Reconciliation: simulated Ganeti event')
236
                fix_opcode = "OP_INSTANCE_STARTUP"\
237
                    if gnt_server["state"] == "STARTED"\
238
                    else "OP_INSTANCE_SHUTDOWN"
239
                backend_mod.process_op_status(
240
                    vm=db_server, etime=self.event_time, jobid=-0,
241
                    opcode=fix_opcode, status='success',
242
                    logmsg='Reconciliation: simulated Ganeti event')
243
                self.log.debug("Simulated Ganeti state event for server '%s'",
244
                               server_id)
245

    
246
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
247
        db_flavor = db_server.flavor
248
        gnt_flavor = gnt_server["flavor"]
249
        if (db_flavor.ram != gnt_flavor["ram"] or
250
           db_flavor.cpu != gnt_flavor["vcpus"]):
251
            try:
252
                gnt_flavor = Flavor.objects.get(
253
                    ram=gnt_flavor["ram"],
254
                    cpu=gnt_flavor["vcpus"],
255
                    disk=db_flavor.disk,
256
                    disk_template=db_flavor.disk_template)
257
            except Flavor.DoesNotExist:
258
                self.log.warning("Server '%s' has unknown flavor.", server_id)
259
                return
260

    
261
            self.log.info("Server '%s' has flavor '%' in DB and '%s' in"
262
                          " Ganeti", server_id, db_flavor, gnt_flavor)
263
            if self.options["fix_unsynced_flavors"]:
264
                old_state = db_server.operstate
265
                opcode = "OP_INSTANCE_SET_PARAMS"
266
                beparams = {"vcpus": gnt_flavor.cpu,
267
                            "minmem": gnt_flavor.ram,
268
                            "maxmem": gnt_flavor.ram}
269
                backend_mod.process_op_status(
270
                    vm=db_server, etime=self.event_time, jobid=-0,
271
                    opcode=opcode, status='success',
272
                    beparams=beparams,
273
                    logmsg='Reconciliation: simulated Ganeti event')
274
                # process_op_status with beparams will set the vmstate to
275
                # shutdown. Fix this be returning it to old state
276
                vm = VirtualMachine.objects.get(pk=server_id)
277
                vm.operstate = old_state
278
                vm.save()
279
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
280
                               server_id)
281

    
282
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
283
        db_nics = db_server.nics.order_by("index")
284
        gnt_nics = gnt_server["nics"]
285
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
286
        if backend_mod.nics_changed(db_nics, gnt_nics_parsed):
287
            msg = "Found unsynced NICs for server '%s'.\n\t"\
288
                  "DB: %s\n\tGaneti: %s"
289
            db_nics_str = ", ".join(map(format_db_nic, db_nics))
290
            gnt_nics_str = ", ".join(map(format_gnt_nic, gnt_nics_parsed))
291
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
292
            if self.options["fix_unsynced_nics"]:
293
                backend_mod.process_net_status(vm=db_server,
294
                                               etime=self.event_time,
295
                                               nics=gnt_nics)
296

    
297
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
298
        pass
299

    
300
    def reconcile_pending_task(self, server_id, db_server):
301
        job_id = db_server.task_job_id
302
        pending_task = False
303
        if job_id not in self.gnt_jobs:
304
            pending_task = True
305
        else:
306
            gnt_job_status = self.gnt_jobs[job_id]["status"]
307
            if gnt_job_status in GANETI_JOBS_FINALIZED:
308
                pending_task = True
309

    
310
        if pending_task:
311
            self.log.info("Found server '%s' with pending task: '%s'",
312
                          server_id, db_server.task)
313
            if self.options["fix_pending_tasks"]:
314
                db_server.task = None
315
                db_server.task_job_id = None
316
                db_server.save()
317
                self.log.info("Cleared pending task for server '%s", server_id)
318

    
319

    
320
def format_db_nic(nic):
321
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" % (nic.index,
322
           nic.ipv4, nic.network_id, nic.mac, nic.firewall_profile)
323

    
324

    
325
def format_gnt_nic(nic):
326
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" %\
327
           (nic["index"], nic["ipv4"], nic["network"], nic["mac"],
328
            nic["firewall_profile"])
329

    
330

    
331
#
332
# Networks
333
#
334

    
335

    
336
def get_networks_from_ganeti(backend):
337
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
338

    
339
    networks = {}
340
    with pooled_rapi_client(backend) as c:
341
        for net in c.GetNetworks(bulk=True):
342
            if net['name'].startswith(prefix):
343
                id = utils.id_from_network_name(net['name'])
344
                networks[id] = net
345

    
346
    return networks
347

    
348

    
349
def hanging_networks(backend, GNets):
350
    """Get networks that are not connected to all Nodegroups.
351

352
    """
353
    def get_network_groups(group_list):
354
        groups = set()
355
        for g in group_list:
356
            g_name = g.split('(')[0]
357
            groups.add(g_name)
358
        return groups
359

    
360
    with pooled_rapi_client(backend) as c:
361
        groups = set(c.GetGroups())
362

    
363
    hanging = {}
364
    for id, info in GNets.items():
365
        group_list = get_network_groups(info['group_list'])
366
        if group_list != groups:
367
            hanging[id] = groups - group_list
368
    return hanging
369

    
370

    
371
def get_online_backends():
372
    return Backend.objects.filter(offline=False)
373

    
374

    
375
def get_database_servers(backend):
376
    servers = backend.virtual_machines.select_related("nics", "flavor")\
377
                                      .filter(deleted=False)
378
    return dict([(s.id, s) for s in servers])
379

    
380

    
381
def get_ganeti_servers(backend):
382
    gnt_instances = backend_mod.get_instances(backend)
383
    # Filter out non-synnefo instances
384
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
385
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
386
                           gnt_instances)
387
    gnt_instances = map(parse_gnt_instance, gnt_instances)
388
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
389

    
390

    
391
def parse_gnt_instance(instance):
392
    try:
393
        instance_id = utils.id_from_instance_name(instance['name'])
394
    except Exception:
395
        logger.error("Ignoring instance with malformed name %s",
396
                     instance['name'])
397
        return (None, None)
398

    
399
    beparams = instance["beparams"]
400

    
401
    vcpus = beparams["vcpus"]
402
    ram = beparams["maxmem"]
403
    state = instance["oper_state"] and "STARTED" or "STOPPED"
404

    
405
    return {
406
        "id": instance_id,
407
        "state": state,  # FIX
408
        "updated": datetime.fromtimestamp(instance["mtime"]),
409
        "disks": disks_from_instance(instance),
410
        "nics": nics_from_instance(instance),
411
        "flavor": {"vcpus": vcpus,
412
                   "ram": ram},
413
        "tags": instance["tags"]
414
    }
415

    
416

    
417
def nics_from_instance(i):
418
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
419
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
420
    networks = zip(itertools.repeat('network'), i['nic.networks'])
421
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
422
    # links = zip(itertools.repeat('link'), i['nic.links'])
423
    # nics = zip(ips,macs,modes,networks,links)
424
    nics = zip(ips, macs, networks)
425
    nics = map(lambda x: dict(x), nics)
426
    #nics = dict(enumerate(nics))
427
    tags = i["tags"]
428
    for tag in tags:
429
        t = tag.split(":")
430
        if t[0:2] == ["synnefo", "network"]:
431
            if len(t) != 4:
432
                logger.error("Malformed synefo tag %s", tag)
433
                continue
434
            try:
435
                index = int(t[2])
436
                nics[index]['firewall'] = t[3]
437
            except ValueError:
438
                logger.error("Malformed synnefo tag %s", tag)
439
            except IndexError:
440
                logger.error("Found tag %s for non-existent NIC %d",
441
                             tag, index)
442
    return nics
443

    
444

    
445
def get_ganeti_jobs(backend):
446
    gnt_jobs = backend_mod.get_jobs(backend)
447
    return dict([(int(j["id"]), j) for j in gnt_jobs])
448

    
449

    
450
def disks_from_instance(i):
451
    return dict([(index, {"size": size})
452
                 for index, size in enumerate(i["disk.sizes"])])
453

    
454

    
455
class NetworkReconciler(object):
456
    def __init__(self, logger, fix=False, conflicting_ips=False):
457
        self.log = logger
458
        self.conflicting_ips = conflicting_ips
459
        self.fix = fix
460

    
461
    @transaction.commit_on_success
462
    def reconcile_networks(self):
463
        # Get models from DB
464
        backends = Backend.objects.exclude(offline=True)
465
        networks = Network.objects.filter(deleted=False)
466

    
467
        self.event_time = datetime.now()
468

    
469
        # Get info from all ganeti backends
470
        ganeti_networks = {}
471
        ganeti_hanging_networks = {}
472
        for b in backends:
473
            g_nets = get_networks_from_ganeti(b)
474
            ganeti_networks[b] = g_nets
475
            g_hanging_nets = hanging_networks(b, g_nets)
476
            ganeti_hanging_networks[b] = g_hanging_nets
477

    
478
        # Perform reconciliation for each network
479
        for network in networks:
480
            ip_available_maps = []
481
            ip_reserved_maps = []
482
            for bend in backends:
483
                bnet = get_backend_network(network, bend)
484
                gnet = ganeti_networks[bend].get(network.id)
485
                if not bnet:
486
                    if network.floating_ip_pool:
487
                        # Network is a floating IP pool and does not exist in
488
                        # backend. We need to create it
489
                        bnet = self.reconcile_parted_network(network, bend)
490
                    elif not gnet:
491
                        # Network does not exist either in Ganeti nor in BD.
492
                        continue
493
                    else:
494
                        # Network exists in Ganeti and not in DB.
495
                        if network.action != "DESTROY" and not network.public:
496
                            bnet = self.reconcile_parted_network(network, bend)
497
                        else:
498
                            continue
499

    
500
                if not gnet:
501
                    # Network does not exist in Ganeti. If the network action
502
                    # is DESTROY, we have to mark as deleted in DB, else we
503
                    # have to create it in Ganeti.
504
                    if network.action == "DESTROY":
505
                        if bnet.operstate != "DELETED":
506
                            self.reconcile_stale_network(bnet)
507
                    else:
508
                        self.reconcile_missing_network(network, bend)
509
                    # Skip rest reconciliation!
510
                    continue
511

    
512
                try:
513
                    hanging_groups = ganeti_hanging_networks[bend][network.id]
514
                except KeyError:
515
                    # Network is connected to all nodegroups
516
                    hanging_groups = []
517

    
518
                if hanging_groups:
519
                    # CASE-3: Ganeti networks not connected to all nodegroups
520
                    self.reconcile_hanging_groups(network, bend,
521
                                                  hanging_groups)
522
                    continue
523

    
524
                if bnet.operstate != 'ACTIVE':
525
                    # CASE-4: Unsynced network state. At this point the network
526
                    # exists and is connected to all nodes so is must be
527
                    # active!
528
                    self.reconcile_unsynced_network(network, bend, bnet)
529

    
530
                # Get ganeti IP Pools
531
                available_map, reserved_map = get_network_pool(gnet)
532
                ip_available_maps.append(available_map)
533
                ip_reserved_maps.append(reserved_map)
534

    
535
            if ip_available_maps or ip_reserved_maps:
536
                # CASE-5: Unsynced IP Pools
537
                self.reconcile_ip_pools(network, ip_available_maps,
538
                                        ip_reserved_maps)
539

    
540
            if self.conflicting_ips:
541
                self.detect_conflicting_ips()
542

    
543
        # CASE-6: Orphan networks
544
        self.reconcile_orphan_networks(networks, ganeti_networks)
545

    
546
    def reconcile_parted_network(self, network, backend):
547
        self.log.info("D: Missing DB entry for network %s in backend %s",
548
                      network, backend)
549
        if self.fix:
550
            network.create_backend_network(backend)
551
            self.log.info("F: Created DB entry")
552
            bnet = get_backend_network(network, backend)
553
            return bnet
554

    
555
    def reconcile_stale_network(self, backend_network):
556
        self.log.info("D: Stale DB entry for network %s in backend %s",
557
                      backend_network.network, backend_network.backend)
558
        if self.fix:
559
            backend_mod.process_network_status(
560
                backend_network, self.event_time, 0,
561
                "OP_NETWORK_REMOVE",
562
                "success",
563
                "Reconciliation simulated event")
564
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
565

    
566
    def reconcile_missing_network(self, network, backend):
567
        self.log.info("D: Missing Ganeti network %s in backend %s",
568
                      network, backend)
569
        if self.fix:
570
            backend_mod.create_network(network, backend)
571
            self.log.info("F: Issued OP_NETWORK_CONNECT")
572

    
573
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
574
        self.log.info('D: Network %s in backend %s is not connected to '
575
                      'the following groups:', network, backend)
576
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
577
        if self.fix:
578
            for group in hanging_groups:
579
                self.log.info('F: Connecting network %s to nodegroup %s',
580
                              network, group)
581
                backend_mod.connect_network(network, backend, depends=[],
582
                                            group=group)
583

    
584
    def reconcile_unsynced_network(self, network, backend, backend_network):
585
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
586
        if self.fix:
587
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
588
            backend_mod.process_network_status(
589
                backend_network, self.event_time, 0,
590
                "OP_NETWORK_CONNECT",
591
                "success",
592
                "Reconciliation simulated eventd")
593

    
594
    def reconcile_ip_pools(self, network, available_maps, reserved_maps):
595
        available_map = reduce(lambda x, y: x & y, available_maps)
596
        reserved_map = reduce(lambda x, y: x & y, reserved_maps)
597

    
598
        pool = network.get_pool()
599
        # Temporary release unused floating IPs
600
        temp_pool = network.get_pool()
601
        used_ips = network.nics.values_list("ipv4", flat=True)
602
        unused_static_ips = network.floating_ips.exclude(ipv4__in=used_ips)
603
        map(lambda ip: temp_pool.put(ip.ipv4), unused_static_ips)
604
        if temp_pool.available != available_map:
605
            self.log.info("D: Unsynced available map of network %s:\n"
606
                          "\tDB: %r\n\tGB: %r", network,
607
                          temp_pool.available.to01(),
608
                          available_map.to01())
609
            if self.fix:
610
                pool.available = available_map
611
                # Release unsued floating IPs, as they are not included in the
612
                # available map
613
                map(lambda ip: pool.reserve(ip.ipv4), unused_static_ips)
614
                pool.save()
615
        if pool.reserved != reserved_map:
616
            self.log.info("D: Unsynced reserved map of network %s:\n"
617
                          "\tDB: %r\n\tGB: %r", network, pool.reserved.to01(),
618
                          reserved_map.to01())
619
            if self.fix:
620
                pool.reserved = reserved_map
621
                pool.save()
622

    
623
    def detect_conflicting_ips(self, network):
624
        """Detect NIC's that have the same IP in the same network."""
625
        machine_ips = network.nics.all().values_list('ipv4', 'machine')
626
        ips = map(lambda x: x[0], machine_ips)
627
        distinct_ips = set(ips)
628
        if len(distinct_ips) < len(ips):
629
            for i in distinct_ips:
630
                ips.remove(i)
631
            for i in ips:
632
                machines = [utils.id_to_instance_name(x[1])
633
                            for x in machine_ips if x[0] == i]
634
                self.log.info('D: Conflicting IP:%s Machines: %s',
635
                              i, ', '.join(machines))
636

    
637
    def reconcile_orphan_networks(self, db_networks, ganeti_networks):
638
        # Detect Orphan Networks in Ganeti
639
        db_network_ids = set([net.id for net in db_networks])
640
        for back_end, ganeti_networks in ganeti_networks.items():
641
            ganeti_network_ids = set(ganeti_networks.keys())
642
            orphans = ganeti_network_ids - db_network_ids
643

    
644
            if len(orphans) > 0:
645
                self.log.info('D: Orphan Networks in backend %s:',
646
                              back_end.clustername)
647
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
648
                if self.fix:
649
                    for net_id in orphans:
650
                        self.log.info('Disconnecting and deleting network %d',
651
                                      net_id)
652
                        try:
653
                            network = Network.objects.get(id=net_id)
654
                            backend_mod.delete_network(network,
655
                                                       backend=back_end)
656
                        except Network.DoesNotExist:
657
                            self.log.info("Not entry for network %s in DB !!",
658
                                          net_id)
659

    
660

    
661
def get_backend_network(network, backend):
662
    try:
663
        return BackendNetwork.objects.get(network=network, backend=backend)
664
    except BackendNetwork.DoesNotExist:
665
        return None
666

    
667

    
668
def get_network_pool(gnet):
669
    """Return available and reserved IP maps.
670

671
    Extract the available and reserved IP map from the info return from Ganeti
672
    for a network.
673

674
    """
675
    converter = IPPool(Foo(gnet['network']))
676
    a_map = bitarray_from_map(gnet['map'])
677
    a_map.invert()
678
    reserved = gnet['external_reservations']
679
    r_map = a_map.copy()
680
    r_map.setall(True)
681
    if reserved:
682
        for address in reserved.split(','):
683
            index = converter.value_to_index(address.strip())
684
            a_map[index] = True
685
            r_map[index] = False
686
    return a_map, r_map
687

    
688

    
689
def bitarray_from_map(bitmap):
690
    return bitarray.bitarray(bitmap.replace("X", "1").replace(".", "0"))
691

    
692

    
693
class Foo():
694
    def __init__(self, subnet):
695
        self.available_map = ''
696
        self.reserved_map = ''
697
        self.size = 0
698
        self.network = Foo.Foo1(subnet)
699

    
700
    class Foo1():
701
        def __init__(self, subnet):
702
            self.subnet = subnet
703
            self.gateway = None