Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ 4111c53e

History | View | Annotate | Download (27.5 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.conf import settings
60

    
61
import logging
62
import itertools
63
import bitarray
64
from datetime import datetime
65

    
66
from django.db import transaction
67
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
68
                               pooled_rapi_client, Network,
69
                               BackendNetwork)
70
from synnefo.db.pools import IPPool
71
from synnefo.logic import utils, backend as backend_mod
72

    
73
logger = logging.getLogger()
74
logging.basicConfig()
75

    
76
try:
77
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
78
except AttributeError:
79
    CHECK_INTERVAL = 60
80

    
81
GANETI_JOB_ERROR = "error"
82
GANETI_JOBS_PENDING = ["queued", "waiting", "running", "canceling"]
83
GANETI_JOBS_FINALIZED = ["success", "error", "canceled"]
84

    
85

    
86
class BackendReconciler(object):
87
    def __init__(self, backend, logger, options=None):
88
        self.backend = backend
89
        self.log = logger
90
        self.client = backend.get_client()
91
        if options is None:
92
            self.options = {}
93
        else:
94
            self.options = options
95

    
96
    def close(self):
97
        self.backend.put_client(self.client)
98

    
99
    @transaction.commit_on_success
100
    def reconcile(self):
101
        log = self.log
102
        backend = self.backend
103
        log.debug("Reconciling backend %s", backend)
104

    
105
        self.db_servers = get_database_servers(backend)
106
        self.db_servers_keys = set(self.db_servers.keys())
107
        log.debug("Got servers info from database.")
108

    
109
        self.gnt_servers = get_ganeti_servers(backend)
110
        self.gnt_servers_keys = set(self.gnt_servers.keys())
111
        log.debug("Got servers info from Ganeti backend.")
112

    
113
        self.gnt_jobs = get_ganeti_jobs(backend)
114
        log.debug("Got jobs from Ganeti backend")
115

    
116
        self.event_time = datetime.now()
117

    
118
        self.stale_servers = self.reconcile_stale_servers()
119
        self.orphan_servers = self.reconcile_orphan_servers()
120
        self.unsynced_servers = self.reconcile_unsynced_servers()
121
        self.close()
122

    
123
    def get_build_status(self, db_server):
124
        job_id = db_server.backendjobid
125
        if job_id in self.gnt_jobs:
126
            gnt_job_status = self.gnt_jobs[job_id]["status"]
127
            if gnt_job_status == GANETI_JOB_ERROR:
128
                return "ERROR"
129
            elif gnt_job_status not in GANETI_JOBS_FINALIZED:
130
                return "RUNNING"
131
            else:
132
                return "FINALIZED"
133
        else:
134
            return "ERROR"
135

    
136
    def reconcile_stale_servers(self):
137
        # Detect stale servers
138
        stale = []
139
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
140
        for server_id in stale_keys:
141
            db_server = self.db_servers[server_id]
142
            if db_server.operstate == "BUILD":
143
                build_status = self.get_build_status(db_server)
144
                if build_status == "ERROR":
145
                    # Special handling of BUILD eerrors
146
                    self.reconcile_building_server(db_server)
147
                elif build_status != "RUNNING":
148
                    stale.append(server_id)
149
            else:
150
                stale.append(server_id)
151

    
152
        # Report them
153
        if stale:
154
            self.log.info("Found stale servers %s at backend %s",
155
                          ", ".join(map(str, stale)), self.backend)
156
        else:
157
            self.log.debug("No stale servers at backend %s", self.backend)
158

    
159
        # Fix them
160
        if stale and self.options["fix_stale"]:
161
            for server_id in stale:
162
                db_server = self.db_servers[server_id]
163
                backend_mod.process_op_status(
164
                    vm=db_server,
165
                    etime=self.event_time,
166
                    jobid=-0,
167
                    opcode='OP_INSTANCE_REMOVE', status='success',
168
                    logmsg='Reconciliation: simulated Ganeti event')
169
            self.log.debug("Simulated Ganeti removal for stale servers.")
170

    
171
    def reconcile_orphan_servers(self):
172
        orphans = self.gnt_servers_keys - self.db_servers_keys
173
        if orphans:
174
            self.log.info("Found orphan servers %s at backend %s",
175
                          ", ".join(map(str, orphans)), self.backend)
176
        else:
177
            self.log.debug("No orphan servers at backend %s", self.backend)
178

    
179
        if orphans and self.options["fix_orphans"]:
180
            for server_id in orphans:
181
                server_name = utils.id_to_instance_name(server_id)
182
                self.client.DeleteInstance(server_name)
183
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
184

    
185
    def reconcile_unsynced_servers(self):
186
        #log = self.log
187
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
188
            db_server = self.db_servers[server_id]
189
            gnt_server = self.gnt_servers[server_id]
190
            if db_server.operstate == "BUILD":
191
                build_status = self.get_build_status(db_server)
192
                if build_status == "RUNNING":
193
                    # Do not reconcile building VMs
194
                    continue
195
                elif build_status == "ERROR":
196
                    # Special handling of build errors
197
                    self.reconcile_building_server(db_server)
198
                    continue
199

    
200
            self.reconcile_unsynced_operstate(server_id, db_server,
201
                                              gnt_server)
202
            self.reconcile_unsynced_flavor(server_id, db_server,
203
                                           gnt_server)
204
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
205
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
206
            if db_server.task is not None:
207
                self.reconcile_pending_task(server_id, db_server)
208

    
209
    def reconcile_building_server(self, db_server):
210
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
211
                      db_server.id)
212
        if self.options["fix_unsynced"]:
213
            fix_opcode = "OP_INSTANCE_CREATE"
214
            backend_mod.process_op_status(
215
                vm=db_server,
216
                etime=self.event_time,
217
                jobid=-0,
218
                opcode=fix_opcode, status='error',
219
                logmsg='Reconciliation: simulated Ganeti event')
220
            self.log.debug("Simulated Ganeti error build event for"
221
                           " server '%s'", db_server.id)
222

    
223
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
224
        if db_server.operstate != gnt_server["state"]:
225
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
226
                          server_id, db_server.operstate, gnt_server["state"])
227
            if self.options["fix_unsynced"]:
228
                # If server is in building state, you will have first to
229
                # reconcile it's creation, to avoid wrong quotas
230
                if db_server.operstate == "BUILD":
231
                    backend_mod.process_op_status(
232
                        vm=db_server, etime=self.event_time, jobid=-0,
233
                        opcode="OP_INSTANCE_CREATE", status='success',
234
                        logmsg='Reconciliation: simulated Ganeti event')
235
                fix_opcode = "OP_INSTANCE_STARTUP"\
236
                    if gnt_server["state"] == "STARTED"\
237
                    else "OP_INSTANCE_SHUTDOWN"
238
                backend_mod.process_op_status(
239
                    vm=db_server, etime=self.event_time, jobid=-0,
240
                    opcode=fix_opcode, status='success',
241
                    logmsg='Reconciliation: simulated Ganeti event')
242
                self.log.debug("Simulated Ganeti state event for server '%s'",
243
                               server_id)
244

    
245
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
246
        db_flavor = db_server.flavor
247
        gnt_flavor = gnt_server["flavor"]
248
        if (db_flavor.ram != gnt_flavor["ram"] or
249
           db_flavor.cpu != gnt_flavor["vcpus"]):
250
            try:
251
                gnt_flavor = Flavor.objects.get(
252
                    ram=gnt_flavor["ram"],
253
                    cpu=gnt_flavor["vcpus"],
254
                    disk=db_flavor.disk,
255
                    disk_template=db_flavor.disk_template)
256
            except Flavor.DoesNotExist:
257
                self.log.warning("Server '%s' has unknown flavor.", server_id)
258
                return
259

    
260
            self.log.info("Server '%s' has flavor '%s' in DB and '%s' in"
261
                          " Ganeti", server_id, db_flavor, gnt_flavor)
262
            if self.options["fix_unsynced_flavors"]:
263
                old_state = db_server.operstate
264
                opcode = "OP_INSTANCE_SET_PARAMS"
265
                beparams = {"vcpus": gnt_flavor.cpu,
266
                            "minmem": gnt_flavor.ram,
267
                            "maxmem": gnt_flavor.ram}
268
                backend_mod.process_op_status(
269
                    vm=db_server, etime=self.event_time, jobid=-0,
270
                    opcode=opcode, status='success',
271
                    beparams=beparams,
272
                    logmsg='Reconciliation: simulated Ganeti event')
273
                # process_op_status with beparams will set the vmstate to
274
                # shutdown. Fix this be returning it to old state
275
                vm = VirtualMachine.objects.get(pk=server_id)
276
                vm.operstate = old_state
277
                vm.save()
278
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
279
                               server_id)
280

    
281
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
282
        db_nics = db_server.nics.order_by("index")
283
        gnt_nics = gnt_server["nics"]
284
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
285
        if backend_mod.nics_changed(db_nics, gnt_nics_parsed):
286
            msg = "Found unsynced NICs for server '%s'.\n\t"\
287
                  "DB: %s\n\tGaneti: %s"
288
            db_nics_str = ", ".join(map(format_db_nic, db_nics))
289
            gnt_nics_str = ", ".join(map(format_gnt_nic, gnt_nics_parsed))
290
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
291
            if self.options["fix_unsynced_nics"]:
292
                backend_mod.process_net_status(vm=db_server,
293
                                               etime=self.event_time,
294
                                               nics=gnt_nics)
295

    
296
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
297
        pass
298

    
299
    def reconcile_pending_task(self, server_id, db_server):
300
        job_id = db_server.task_job_id
301
        pending_task = False
302
        if job_id not in self.gnt_jobs:
303
            pending_task = True
304
        else:
305
            gnt_job_status = self.gnt_jobs[job_id]["status"]
306
            if gnt_job_status in GANETI_JOBS_FINALIZED:
307
                pending_task = True
308

    
309
        if pending_task:
310
            self.log.info("Found server '%s' with pending task: '%s'",
311
                          server_id, db_server.task)
312
            if self.options["fix_pending_tasks"]:
313
                db_server.task = None
314
                db_server.task_job_id = None
315
                db_server.save()
316
                self.log.info("Cleared pending task for server '%s", server_id)
317

    
318

    
319
def format_db_nic(nic):
320
    return "Index: %s, IP: %s Network: %s MAC: %s Firewall: %s" % (nic.index,
321
           nic.ipv4, nic.network_id, nic.mac, nic.firewall_profile)
322

    
323

    
324
def format_gnt_nic(nic):
325
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" %\
326
           (nic["index"], nic["ipv4"], nic["network"], nic["mac"],
327
            nic["firewall_profile"])
328

    
329

    
330
#
331
# Networks
332
#
333

    
334

    
335
def get_networks_from_ganeti(backend):
336
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
337

    
338
    networks = {}
339
    with pooled_rapi_client(backend) as c:
340
        for net in c.GetNetworks(bulk=True):
341
            if net['name'].startswith(prefix):
342
                id = utils.id_from_network_name(net['name'])
343
                networks[id] = net
344

    
345
    return networks
346

    
347

    
348
def hanging_networks(backend, GNets):
349
    """Get networks that are not connected to all Nodegroups.
350

351
    """
352
    def get_network_groups(group_list):
353
        groups = set()
354
        for g in group_list:
355
            g_name = g.split('(')[0]
356
            groups.add(g_name)
357
        return groups
358

    
359
    with pooled_rapi_client(backend) as c:
360
        groups = set(c.GetGroups())
361

    
362
    hanging = {}
363
    for id, info in GNets.items():
364
        group_list = get_network_groups(info['group_list'])
365
        if group_list != groups:
366
            hanging[id] = groups - group_list
367
    return hanging
368

    
369

    
370
def get_online_backends():
371
    return Backend.objects.filter(offline=False)
372

    
373

    
374
def get_database_servers(backend):
375
    servers = backend.virtual_machines.select_related("nics", "flavor")\
376
                                      .filter(deleted=False)
377
    return dict([(s.id, s) for s in servers])
378

    
379

    
380
def get_ganeti_servers(backend):
381
    gnt_instances = backend_mod.get_instances(backend)
382
    # Filter out non-synnefo instances
383
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
384
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
385
                           gnt_instances)
386
    gnt_instances = map(parse_gnt_instance, gnt_instances)
387
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
388

    
389

    
390
def parse_gnt_instance(instance):
391
    try:
392
        instance_id = utils.id_from_instance_name(instance['name'])
393
    except Exception:
394
        logger.error("Ignoring instance with malformed name %s",
395
                     instance['name'])
396
        return (None, None)
397

    
398
    beparams = instance["beparams"]
399

    
400
    vcpus = beparams["vcpus"]
401
    ram = beparams["maxmem"]
402
    state = instance["oper_state"] and "STARTED" or "STOPPED"
403

    
404
    return {
405
        "id": instance_id,
406
        "state": state,  # FIX
407
        "updated": datetime.fromtimestamp(instance["mtime"]),
408
        "disks": disks_from_instance(instance),
409
        "nics": nics_from_instance(instance),
410
        "flavor": {"vcpus": vcpus,
411
                   "ram": ram},
412
        "tags": instance["tags"]
413
    }
414

    
415

    
416
def nics_from_instance(i):
417
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
418
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
419
    networks = zip(itertools.repeat('network'), i['nic.networks'])
420
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
421
    # links = zip(itertools.repeat('link'), i['nic.links'])
422
    # nics = zip(ips,macs,modes,networks,links)
423
    nics = zip(ips, macs, networks)
424
    nics = map(lambda x: dict(x), nics)
425
    #nics = dict(enumerate(nics))
426
    tags = i["tags"]
427
    for tag in tags:
428
        t = tag.split(":")
429
        if t[0:2] == ["synnefo", "network"]:
430
            if len(t) != 4:
431
                logger.error("Malformed synefo tag %s", tag)
432
                continue
433
            try:
434
                index = int(t[2])
435
                nics[index]['firewall'] = t[3]
436
            except ValueError:
437
                logger.error("Malformed synnefo tag %s", tag)
438
            except IndexError:
439
                logger.error("Found tag %s for non-existent NIC %d",
440
                             tag, index)
441
    return nics
442

    
443

    
444
def get_ganeti_jobs(backend):
445
    gnt_jobs = backend_mod.get_jobs(backend)
446
    return dict([(int(j["id"]), j) for j in gnt_jobs])
447

    
448

    
449
def disks_from_instance(i):
450
    return dict([(index, {"size": size})
451
                 for index, size in enumerate(i["disk.sizes"])])
452

    
453

    
454
class NetworkReconciler(object):
455
    def __init__(self, logger, fix=False, conflicting_ips=False):
456
        self.log = logger
457
        self.conflicting_ips = conflicting_ips
458
        self.fix = fix
459

    
460
    @transaction.commit_on_success
461
    def reconcile_networks(self):
462
        # Get models from DB
463
        backends = Backend.objects.exclude(offline=True)
464
        networks = Network.objects.filter(deleted=False)
465

    
466
        self.event_time = datetime.now()
467

    
468
        # Get info from all ganeti backends
469
        ganeti_networks = {}
470
        ganeti_hanging_networks = {}
471
        for b in backends:
472
            g_nets = get_networks_from_ganeti(b)
473
            ganeti_networks[b] = g_nets
474
            g_hanging_nets = hanging_networks(b, g_nets)
475
            ganeti_hanging_networks[b] = g_hanging_nets
476

    
477
        # Perform reconciliation for each network
478
        for network in networks:
479
            ip_available_maps = []
480
            ip_reserved_maps = []
481
            for bend in backends:
482
                bnet = get_backend_network(network, bend)
483
                gnet = ganeti_networks[bend].get(network.id)
484
                if not bnet:
485
                    if network.floating_ip_pool:
486
                        # Network is a floating IP pool and does not exist in
487
                        # backend. We need to create it
488
                        bnet = self.reconcile_parted_network(network, bend)
489
                    elif not gnet:
490
                        # Network does not exist either in Ganeti nor in BD.
491
                        continue
492
                    else:
493
                        # Network exists in Ganeti and not in DB.
494
                        if network.action != "DESTROY" and not network.public:
495
                            bnet = self.reconcile_parted_network(network, bend)
496
                        else:
497
                            continue
498

    
499
                if not gnet:
500
                    # Network does not exist in Ganeti. If the network action
501
                    # is DESTROY, we have to mark as deleted in DB, else we
502
                    # have to create it in Ganeti.
503
                    if network.action == "DESTROY":
504
                        if bnet.operstate != "DELETED":
505
                            self.reconcile_stale_network(bnet)
506
                    else:
507
                        self.reconcile_missing_network(network, bend)
508
                    # Skip rest reconciliation!
509
                    continue
510

    
511
                try:
512
                    hanging_groups = ganeti_hanging_networks[bend][network.id]
513
                except KeyError:
514
                    # Network is connected to all nodegroups
515
                    hanging_groups = []
516

    
517
                if hanging_groups:
518
                    # CASE-3: Ganeti networks not connected to all nodegroups
519
                    self.reconcile_hanging_groups(network, bend,
520
                                                  hanging_groups)
521
                    continue
522

    
523
                if bnet.operstate != 'ACTIVE':
524
                    # CASE-4: Unsynced network state. At this point the network
525
                    # exists and is connected to all nodes so is must be
526
                    # active!
527
                    self.reconcile_unsynced_network(network, bend, bnet)
528

    
529
                # Get ganeti IP Pools
530
                available_map, reserved_map = get_network_pool(gnet)
531
                ip_available_maps.append(available_map)
532
                ip_reserved_maps.append(reserved_map)
533

    
534
            if ip_available_maps or ip_reserved_maps:
535
                # CASE-5: Unsynced IP Pools
536
                self.reconcile_ip_pools(network, ip_available_maps,
537
                                        ip_reserved_maps)
538

    
539
            if self.conflicting_ips:
540
                self.detect_conflicting_ips()
541

    
542
        # CASE-6: Orphan networks
543
        self.reconcile_orphan_networks(networks, ganeti_networks)
544

    
545
    def reconcile_parted_network(self, network, backend):
546
        self.log.info("D: Missing DB entry for network %s in backend %s",
547
                      network, backend)
548
        if self.fix:
549
            network.create_backend_network(backend)
550
            self.log.info("F: Created DB entry")
551
            bnet = get_backend_network(network, backend)
552
            return bnet
553

    
554
    def reconcile_stale_network(self, backend_network):
555
        self.log.info("D: Stale DB entry for network %s in backend %s",
556
                      backend_network.network, backend_network.backend)
557
        if self.fix:
558
            backend_mod.process_network_status(
559
                backend_network, self.event_time, 0,
560
                "OP_NETWORK_REMOVE",
561
                "success",
562
                "Reconciliation simulated event")
563
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
564

    
565
    def reconcile_missing_network(self, network, backend):
566
        self.log.info("D: Missing Ganeti network %s in backend %s",
567
                      network, backend)
568
        if self.fix:
569
            backend_mod.create_network(network, backend)
570
            self.log.info("F: Issued OP_NETWORK_CONNECT")
571

    
572
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
573
        self.log.info('D: Network %s in backend %s is not connected to '
574
                      'the following groups:', network, backend)
575
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
576
        if self.fix:
577
            for group in hanging_groups:
578
                self.log.info('F: Connecting network %s to nodegroup %s',
579
                              network, group)
580
                backend_mod.connect_network(network, backend, depends=[],
581
                                            group=group)
582

    
583
    def reconcile_unsynced_network(self, network, backend, backend_network):
584
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
585
        if self.fix:
586
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
587
            backend_mod.process_network_status(
588
                backend_network, self.event_time, 0,
589
                "OP_NETWORK_CONNECT",
590
                "success",
591
                "Reconciliation simulated eventd")
592

    
593
    def reconcile_ip_pools(self, network, available_maps, reserved_maps):
594
        available_map = reduce(lambda x, y: x & y, available_maps)
595
        reserved_map = reduce(lambda x, y: x & y, reserved_maps)
596

    
597
        pool = network.get_pool()
598
        # Temporary release unused floating IPs
599
        temp_pool = network.get_pool()
600
        used_ips = network.nics.values_list("ipv4", flat=True)
601
        unused_static_ips = network.floating_ips.exclude(ipv4__in=used_ips)
602
        map(lambda ip: temp_pool.put(ip.ipv4), unused_static_ips)
603
        if temp_pool.available != available_map:
604
            self.log.info("D: Unsynced available map of network %s:\n"
605
                          "\tDB: %r\n\tGB: %r", network,
606
                          temp_pool.available.to01(),
607
                          available_map.to01())
608
            if self.fix:
609
                pool.available = available_map
610
                # Release unsued floating IPs, as they are not included in the
611
                # available map
612
                map(lambda ip: pool.reserve(ip.ipv4), unused_static_ips)
613
                pool.save()
614
        if pool.reserved != reserved_map:
615
            self.log.info("D: Unsynced reserved map of network %s:\n"
616
                          "\tDB: %r\n\tGB: %r", network, pool.reserved.to01(),
617
                          reserved_map.to01())
618
            if self.fix:
619
                pool.reserved = reserved_map
620
                pool.save()
621

    
622
    def detect_conflicting_ips(self, network):
623
        """Detect NIC's that have the same IP in the same network."""
624
        machine_ips = network.nics.all().values_list('ipv4', 'machine')
625
        ips = map(lambda x: x[0], machine_ips)
626
        distinct_ips = set(ips)
627
        if len(distinct_ips) < len(ips):
628
            for i in distinct_ips:
629
                ips.remove(i)
630
            for i in ips:
631
                machines = [utils.id_to_instance_name(x[1])
632
                            for x in machine_ips if x[0] == i]
633
                self.log.info('D: Conflicting IP:%s Machines: %s',
634
                              i, ', '.join(machines))
635

    
636
    def reconcile_orphan_networks(self, db_networks, ganeti_networks):
637
        # Detect Orphan Networks in Ganeti
638
        db_network_ids = set([net.id for net in db_networks])
639
        for back_end, ganeti_networks in ganeti_networks.items():
640
            ganeti_network_ids = set(ganeti_networks.keys())
641
            orphans = ganeti_network_ids - db_network_ids
642

    
643
            if len(orphans) > 0:
644
                self.log.info('D: Orphan Networks in backend %s:',
645
                              back_end.clustername)
646
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
647
                if self.fix:
648
                    for net_id in orphans:
649
                        self.log.info('Disconnecting and deleting network %d',
650
                                      net_id)
651
                        try:
652
                            network = Network.objects.get(id=net_id)
653
                            backend_mod.delete_network(network,
654
                                                       backend=back_end)
655
                        except Network.DoesNotExist:
656
                            self.log.info("Not entry for network %s in DB !!",
657
                                          net_id)
658

    
659

    
660
def get_backend_network(network, backend):
661
    try:
662
        return BackendNetwork.objects.get(network=network, backend=backend)
663
    except BackendNetwork.DoesNotExist:
664
        return None
665

    
666

    
667
def get_network_pool(gnet):
668
    """Return available and reserved IP maps.
669

670
    Extract the available and reserved IP map from the info return from Ganeti
671
    for a network.
672

673
    """
674
    converter = IPPool(Foo(gnet['network']))
675
    a_map = bitarray_from_map(gnet['map'])
676
    a_map.invert()
677
    reserved = gnet['external_reservations']
678
    r_map = a_map.copy()
679
    r_map.setall(True)
680
    if reserved:
681
        for address in reserved.split(','):
682
            index = converter.value_to_index(address.strip())
683
            a_map[index] = True
684
            r_map[index] = False
685
    return a_map, r_map
686

    
687

    
688
def bitarray_from_map(bitmap):
689
    return bitarray.bitarray(bitmap.replace("X", "1").replace(".", "0"))
690

    
691

    
692
class Foo():
693
    def __init__(self, subnet):
694
        self.available_map = ''
695
        self.reserved_map = ''
696
        self.size = 0
697
        self.network = Foo.Foo1(subnet)
698

    
699
    class Foo1():
700
        def __init__(self, subnet):
701
            self.subnet = subnet
702
            self.gateway = None