Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ ce55f724

History | View | Annotate | Download (27.8 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.core.management import setup_environ
60
try:
61
    from synnefo import settings
62
except ImportError:
63
    raise Exception("Cannot import settings, make sure PYTHONPATH contains "
64
                    "the parent directory of the Synnefo Django project.")
65
setup_environ(settings)
66

    
67

    
68
import logging
69
import itertools
70
import bitarray
71
from datetime import datetime
72

    
73
from django.db import transaction
74
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
75
                               pooled_rapi_client, Network,
76
                               BackendNetwork)
77
from synnefo.db.pools import IPPool
78
from synnefo.logic import utils, backend as backend_mod
79

    
80
logger = logging.getLogger()
81
logging.basicConfig()
82

    
83
try:
84
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
85
except AttributeError:
86
    CHECK_INTERVAL = 60
87

    
88
GANETI_JOB_ERROR = "error"
89
GANETI_JOBS_PENDING = ["queued", "waiting", "running", "canceling"]
90
GANETI_JOBS_FINALIZED = ["success", "error", "canceled"]
91

    
92

    
93
class BackendReconciler(object):
94
    def __init__(self, backend, logger, options=None):
95
        self.backend = backend
96
        self.log = logger
97
        self.client = backend.get_client()
98
        if options is None:
99
            self.options = {}
100
        else:
101
            self.options = options
102

    
103
    def close(self):
104
        self.backend.put_client(self.client)
105

    
106
    @transaction.commit_on_success
107
    def reconcile(self):
108
        log = self.log
109
        backend = self.backend
110
        log.debug("Reconciling backend %s", backend)
111

    
112
        self.db_servers = get_database_servers(backend)
113
        self.db_servers_keys = set(self.db_servers.keys())
114
        log.debug("Got servers info from database.")
115

    
116
        self.gnt_servers = get_ganeti_servers(backend)
117
        self.gnt_servers_keys = set(self.gnt_servers.keys())
118
        log.debug("Got servers info from Ganeti backend.")
119

    
120
        self.gnt_jobs = get_ganeti_jobs(backend)
121
        log.debug("Got jobs from Ganeti backend")
122

    
123
        self.event_time = datetime.now()
124

    
125
        self.stale_servers = self.reconcile_stale_servers()
126
        self.orphan_servers = self.reconcile_orphan_servers()
127
        self.unsynced_servers = self.reconcile_unsynced_servers()
128
        self.close()
129

    
130
    def get_build_status(self, db_server):
131
        job_id = db_server.backendjobid
132
        if job_id in self.gnt_jobs:
133
            gnt_job_status = self.gnt_jobs[job_id]["status"]
134
            if gnt_job_status == GANETI_JOB_ERROR:
135
                return "ERROR"
136
            elif gnt_job_status not in GANETI_JOBS_FINALIZED:
137
                return "RUNNING"
138
            else:
139
                return "FINALIZED"
140
        else:
141
            return "ERROR"
142

    
143
    def reconcile_stale_servers(self):
144
        # Detect stale servers
145
        stale = []
146
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
147
        for server_id in stale_keys:
148
            db_server = self.db_servers[server_id]
149
            if db_server.operstate == "BUILD":
150
                build_status = self.get_build_status(db_server)
151
                if build_status == "ERROR":
152
                    # Special handling of BUILD eerrors
153
                    self.reconcile_building_server(db_server)
154
                elif build_status != "RUNNING":
155
                    stale.append(server_id)
156
            else:
157
                stale.append(server_id)
158

    
159
        # Report them
160
        if stale:
161
            self.log.info("Found stale servers %s at backend %s",
162
                          ", ".join(map(str, stale)), self.backend)
163
        else:
164
            self.log.debug("No stale servers at backend %s", self.backend)
165

    
166
        # Fix them
167
        if stale and self.options["fix_stale"]:
168
            for server_id in stale:
169
                db_server = self.db_servers[server_id]
170
                backend_mod.process_op_status(
171
                    vm=db_server,
172
                    etime=self.event_time,
173
                    jobid=-0,
174
                    opcode='OP_INSTANCE_REMOVE', status='success',
175
                    logmsg='Reconciliation: simulated Ganeti event')
176
            self.log.debug("Simulated Ganeti removal for stale servers.")
177

    
178
    def reconcile_orphan_servers(self):
179
        orphans = self.gnt_servers_keys - self.db_servers_keys
180
        if orphans:
181
            self.log.info("Found orphan servers %s at backend %s",
182
                          ", ".join(map(str, orphans)), self.backend)
183
        else:
184
            self.log.debug("No orphan servers at backend %s", self.backend)
185

    
186
        if orphans and self.options["fix_orphans"]:
187
            for server_id in orphans:
188
                server_name = utils.id_to_instance_name(server_id)
189
                self.client.DeleteInstance(server_name)
190
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
191

    
192
    def reconcile_unsynced_servers(self):
193
        #log = self.log
194
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
195
            db_server = self.db_servers[server_id]
196
            gnt_server = self.gnt_servers[server_id]
197
            if db_server.operstate == "BUILD":
198
                build_status = self.get_build_status(db_server)
199
                if build_status == "RUNNING":
200
                    # Do not reconcile building VMs
201
                    continue
202
                elif build_status == "ERROR":
203
                    # Special handling of build errors
204
                    self.reconcile_building_server(db_server)
205
                    continue
206

    
207
            self.reconcile_unsynced_operstate(server_id, db_server,
208
                                              gnt_server)
209
            self.reconcile_unsynced_flavor(server_id, db_server,
210
                                           gnt_server)
211
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
212
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
213
            if db_server.task is not None:
214
                self.reconcile_pending_task(server_id, db_server)
215

    
216
    def reconcile_building_server(self, db_server):
217
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
218
                      db_server.id)
219
        if self.options["fix_unsynced"]:
220
            fix_opcode = "OP_INSTANCE_CREATE"
221
            backend_mod.process_op_status(
222
                vm=db_server,
223
                etime=self.event_time,
224
                jobid=-0,
225
                opcode=fix_opcode, status='error',
226
                logmsg='Reconciliation: simulated Ganeti event')
227
            self.log.debug("Simulated Ganeti error build event for"
228
                           " server '%s'", db_server.id)
229

    
230
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
231
        if db_server.operstate != gnt_server["state"]:
232
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
233
                          server_id, db_server.operstate, gnt_server["state"])
234
            if self.options["fix_unsynced"]:
235
                # If server is in building state, you will have first to
236
                # reconcile it's creation, to avoid wrong quotas
237
                if db_server.operstate == "BUILD":
238
                    backend_mod.process_op_status(
239
                        vm=db_server, etime=self.event_time, jobid=-0,
240
                        opcode="OP_INSTANCE_CREATE", status='success',
241
                        logmsg='Reconciliation: simulated Ganeti event')
242
                fix_opcode = "OP_INSTANCE_STARTUP"\
243
                    if gnt_server["state"] == "STARTED"\
244
                    else "OP_INSTANCE_SHUTDOWN"
245
                backend_mod.process_op_status(
246
                    vm=db_server, etime=self.event_time, jobid=-0,
247
                    opcode=fix_opcode, status='success',
248
                    logmsg='Reconciliation: simulated Ganeti event')
249
                self.log.debug("Simulated Ganeti state event for server '%s'",
250
                               server_id)
251

    
252
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
253
        db_flavor = db_server.flavor
254
        gnt_flavor = gnt_server["flavor"]
255
        if (db_flavor.ram != gnt_flavor["ram"] or
256
           db_flavor.cpu != gnt_flavor["vcpus"]):
257
            try:
258
                gnt_flavor = Flavor.objects.get(
259
                    ram=gnt_flavor["ram"],
260
                    cpu=gnt_flavor["vcpus"],
261
                    disk=db_flavor.disk,
262
                    disk_template=db_flavor.disk_template)
263
            except Flavor.DoesNotExist:
264
                self.log.warning("Server '%s' has unknown flavor.", server_id)
265
                return
266

    
267
            self.log.info("Server '%s' has flavor '%' in DB and '%s' in"
268
                          " Ganeti", server_id, db_flavor, gnt_flavor)
269
            if self.options["fix_unsynced_flavors"]:
270
                old_state = db_server.operstate
271
                opcode = "OP_INSTANCE_SET_PARAMS"
272
                beparams = {"vcpus": gnt_flavor.cpu,
273
                            "minmem": gnt_flavor.ram,
274
                            "maxmem": gnt_flavor.ram}
275
                backend_mod.process_op_status(
276
                    vm=db_server, etime=self.event_time, jobid=-0,
277
                    opcode=opcode, status='success',
278
                    beparams=beparams,
279
                    logmsg='Reconciliation: simulated Ganeti event')
280
                # process_op_status with beparams will set the vmstate to
281
                # shutdown. Fix this be returning it to old state
282
                vm = VirtualMachine.objects.get(pk=server_id)
283
                vm.operstate = old_state
284
                vm.save()
285
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
286
                               server_id)
287

    
288
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
289
        db_nics = db_server.nics.order_by("index")
290
        gnt_nics = gnt_server["nics"]
291
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
292
        if backend_mod.nics_changed(db_nics, gnt_nics_parsed):
293
            msg = "Found unsynced NICs for server '%s'.\n\t"\
294
                  "DB: %s\n\tGaneti: %s"
295
            db_nics_str = ", ".join(map(format_db_nic, db_nics))
296
            gnt_nics_str = ", ".join(map(format_gnt_nic, gnt_nics_parsed))
297
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
298
            if self.options["fix_unsynced_nics"]:
299
                backend_mod.process_net_status(vm=db_server,
300
                                               etime=self.event_time,
301
                                               nics=gnt_nics)
302

    
303
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
304
        pass
305

    
306
    def reconcile_pending_task(self, server_id, db_server):
307
        job_id = db_server.task_job_id
308
        pending_task = False
309
        if job_id not in self.gnt_jobs:
310
            pending_task = True
311
        else:
312
            gnt_job_status = self.gnt_jobs[job_id]["status"]
313
            if gnt_job_status in GANETI_JOBS_FINALIZED:
314
                pending_task = True
315

    
316
        if pending_task:
317
            self.log.info("Found server '%s' with pending task: '%s'",
318
                          server_id, db_server.task)
319
            if self.options["fixed_pending_tasks"]:
320
                db_server.task = None
321
                db_server.task_job_id = None
322
                db_server.save()
323
                self.log.info("Cleared pending task for server '%s", server_id)
324

    
325

    
326
def format_db_nic(nic):
327
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" % (nic.index,
328
           nic.ipv4, nic.network_id, nic.mac, nic.firewall_profile)
329

    
330

    
331
def format_gnt_nic(nic):
332
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" %\
333
           (nic["index"], nic["ipv4"], nic["network"], nic["mac"],
334
            nic["firewall_profile"])
335

    
336

    
337
#
338
# Networks
339
#
340

    
341

    
342
def get_networks_from_ganeti(backend):
343
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
344

    
345
    networks = {}
346
    with pooled_rapi_client(backend) as c:
347
        for net in c.GetNetworks(bulk=True):
348
            if net['name'].startswith(prefix):
349
                id = utils.id_from_network_name(net['name'])
350
                networks[id] = net
351

    
352
    return networks
353

    
354

    
355
def hanging_networks(backend, GNets):
356
    """Get networks that are not connected to all Nodegroups.
357

358
    """
359
    def get_network_groups(group_list):
360
        groups = set()
361
        for g in group_list:
362
            g_name = g.split('(')[0]
363
            groups.add(g_name)
364
        return groups
365

    
366
    with pooled_rapi_client(backend) as c:
367
        groups = set(c.GetGroups())
368

    
369
    hanging = {}
370
    for id, info in GNets.items():
371
        group_list = get_network_groups(info['group_list'])
372
        if group_list != groups:
373
            hanging[id] = groups - group_list
374
    return hanging
375

    
376

    
377
def get_online_backends():
378
    return Backend.objects.filter(offline=False)
379

    
380

    
381
def get_database_servers(backend):
382
    servers = backend.virtual_machines.select_related("nics", "flavor")\
383
                                      .filter(deleted=False)
384
    return dict([(s.id, s) for s in servers])
385

    
386

    
387
def get_ganeti_servers(backend):
388
    gnt_instances = backend_mod.get_instances(backend)
389
    # Filter out non-synnefo instances
390
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
391
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
392
                           gnt_instances)
393
    gnt_instances = map(parse_gnt_instance, gnt_instances)
394
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
395

    
396

    
397
def parse_gnt_instance(instance):
398
    try:
399
        instance_id = utils.id_from_instance_name(instance['name'])
400
    except Exception:
401
        logger.error("Ignoring instance with malformed name %s",
402
                     instance['name'])
403
        return (None, None)
404

    
405
    beparams = instance["beparams"]
406

    
407
    vcpus = beparams["vcpus"]
408
    ram = beparams["maxmem"]
409
    state = instance["oper_state"] and "STARTED" or "STOPPED"
410

    
411
    return {
412
        "id": instance_id,
413
        "state": state,  # FIX
414
        "updated": datetime.fromtimestamp(instance["mtime"]),
415
        "disks": disks_from_instance(instance),
416
        "nics": nics_from_instance(instance),
417
        "flavor": {"vcpus": vcpus,
418
                   "ram": ram},
419
        "tags": instance["tags"]
420
    }
421

    
422

    
423
def nics_from_instance(i):
424
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
425
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
426
    networks = zip(itertools.repeat('network'), i['nic.networks'])
427
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
428
    # links = zip(itertools.repeat('link'), i['nic.links'])
429
    # nics = zip(ips,macs,modes,networks,links)
430
    nics = zip(ips, macs, networks)
431
    nics = map(lambda x: dict(x), nics)
432
    #nics = dict(enumerate(nics))
433
    tags = i["tags"]
434
    for tag in tags:
435
        t = tag.split(":")
436
        if t[0:2] == ["synnefo", "network"]:
437
            if len(t) != 4:
438
                logger.error("Malformed synefo tag %s", tag)
439
                continue
440
            try:
441
                index = int(t[2])
442
                nics[index]['firewall'] = t[3]
443
            except ValueError:
444
                logger.error("Malformed synnefo tag %s", tag)
445
            except IndexError:
446
                logger.error("Found tag %s for non-existent NIC %d",
447
                             tag, index)
448
    return nics
449

    
450

    
451
def get_ganeti_jobs(backend):
452
    gnt_jobs = backend_mod.get_jobs(backend)
453
    return dict([(int(j["id"]), j) for j in gnt_jobs])
454

    
455

    
456
def disks_from_instance(i):
457
    return dict([(index, {"size": size})
458
                 for index, size in enumerate(i["disk.sizes"])])
459

    
460

    
461
class NetworkReconciler(object):
462
    def __init__(self, logger, fix=False, conflicting_ips=False):
463
        self.log = logger
464
        self.conflicting_ips = conflicting_ips
465
        self.fix = fix
466

    
467
    @transaction.commit_on_success
468
    def reconcile_networks(self):
469
        # Get models from DB
470
        backends = Backend.objects.exclude(offline=True)
471
        networks = Network.objects.filter(deleted=False)
472

    
473
        self.event_time = datetime.now()
474

    
475
        # Get info from all ganeti backends
476
        ganeti_networks = {}
477
        ganeti_hanging_networks = {}
478
        for b in backends:
479
            g_nets = get_networks_from_ganeti(b)
480
            ganeti_networks[b] = g_nets
481
            g_hanging_nets = hanging_networks(b, g_nets)
482
            ganeti_hanging_networks[b] = g_hanging_nets
483

    
484
        # Perform reconciliation for each network
485
        for network in networks:
486
            ip_available_maps = []
487
            ip_reserved_maps = []
488
            for bend in backends:
489
                bnet = get_backend_network(network, bend)
490
                gnet = ganeti_networks[bend].get(network.id)
491
                if not bnet:
492
                    if network.floating_ip_pool:
493
                        # Network is a floating IP pool and does not exist in
494
                        # backend. We need to create it
495
                        bnet = self.reconcile_parted_network(network, bend)
496
                    elif not gnet:
497
                        # Network does not exist either in Ganeti nor in BD.
498
                        continue
499
                    else:
500
                        # Network exists in Ganeti and not in DB.
501
                        if network.action != "DESTROY" and not network.public:
502
                            bnet = self.reconcile_parted_network(network, bend)
503
                        else:
504
                            continue
505

    
506
                if not gnet:
507
                    # Network does not exist in Ganeti. If the network action
508
                    # is DESTROY, we have to mark as deleted in DB, else we
509
                    # have to create it in Ganeti.
510
                    if network.action == "DESTROY":
511
                        if bnet.operstate != "DELETED":
512
                            self.reconcile_stale_network(bnet)
513
                    else:
514
                        self.reconcile_missing_network(network, bend)
515
                    # Skip rest reconciliation!
516
                    continue
517

    
518
                try:
519
                    hanging_groups = ganeti_hanging_networks[bend][network.id]
520
                except KeyError:
521
                    # Network is connected to all nodegroups
522
                    hanging_groups = []
523

    
524
                if hanging_groups:
525
                    # CASE-3: Ganeti networks not connected to all nodegroups
526
                    self.reconcile_hanging_groups(network, bend,
527
                                                  hanging_groups)
528
                    continue
529

    
530
                if bnet.operstate != 'ACTIVE':
531
                    # CASE-4: Unsynced network state. At this point the network
532
                    # exists and is connected to all nodes so is must be
533
                    # active!
534
                    self.reconcile_unsynced_network(network, bend, bnet)
535

    
536
                # Get ganeti IP Pools
537
                available_map, reserved_map = get_network_pool(gnet)
538
                ip_available_maps.append(available_map)
539
                ip_reserved_maps.append(reserved_map)
540

    
541
            if ip_available_maps or ip_reserved_maps:
542
                # CASE-5: Unsynced IP Pools
543
                self.reconcile_ip_pools(network, ip_available_maps,
544
                                        ip_reserved_maps)
545

    
546
            if self.conflicting_ips:
547
                self.detect_conflicting_ips()
548

    
549
        # CASE-6: Orphan networks
550
        self.reconcile_orphan_networks(networks, ganeti_networks)
551

    
552
    def reconcile_parted_network(self, network, backend):
553
        self.log.info("D: Missing DB entry for network %s in backend %s",
554
                      network, backend)
555
        if self.fix:
556
            network.create_backend_network(backend)
557
            self.log.info("F: Created DB entry")
558
            bnet = get_backend_network(network, backend)
559
            return bnet
560

    
561
    def reconcile_stale_network(self, backend_network):
562
        self.log.info("D: Stale DB entry for network %s in backend %s",
563
                      backend_network.network, backend_network.backend)
564
        if self.fix:
565
            backend_mod.process_network_status(
566
                backend_network, self.event_time, 0,
567
                "OP_NETWORK_REMOVE",
568
                "success",
569
                "Reconciliation simulated event")
570
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
571

    
572
    def reconcile_missing_network(self, network, backend):
573
        self.log.info("D: Missing Ganeti network %s in backend %s",
574
                      network, backend)
575
        if self.fix:
576
            backend_mod.create_network(network, backend)
577
            self.log.info("F: Issued OP_NETWORK_CONNECT")
578

    
579
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
580
        self.log.info('D: Network %s in backend %s is not connected to '
581
                      'the following groups:', network, backend)
582
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
583
        if self.fix:
584
            for group in hanging_groups:
585
                self.log.info('F: Connecting network %s to nodegroup %s',
586
                              network, group)
587
                backend_mod.connect_network(network, backend, depends=[],
588
                                            group=group)
589

    
590
    def reconcile_unsynced_network(self, network, backend, backend_network):
591
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
592
        if self.fix:
593
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
594
            backend_mod.process_network_status(
595
                backend_network, self.event_time, 0,
596
                "OP_NETWORK_CONNECT",
597
                "success",
598
                "Reconciliation simulated eventd")
599

    
600
    def reconcile_ip_pools(self, network, available_maps, reserved_maps):
601
        available_map = reduce(lambda x, y: x & y, available_maps)
602
        reserved_map = reduce(lambda x, y: x & y, reserved_maps)
603

    
604
        pool = network.get_pool()
605
        # Temporary release unused floating IPs
606
        temp_pool = network.get_pool()
607
        used_ips = network.nics.values_list("ipv4", flat=True)
608
        unused_static_ips = network.floating_ips.exclude(ipv4__in=used_ips)
609
        map(lambda ip: temp_pool.put(ip.ipv4), unused_static_ips)
610
        if temp_pool.available != available_map:
611
            self.log.info("D: Unsynced available map of network %s:\n"
612
                          "\tDB: %r\n\tGB: %r", network,
613
                          temp_pool.available.to01(),
614
                          available_map.to01())
615
            if self.fix:
616
                pool.available = available_map
617
                # Release unsued floating IPs, as they are not included in the
618
                # available map
619
                map(lambda ip: pool.reserve(ip.ipv4), unused_static_ips)
620
                pool.save()
621
        if pool.reserved != reserved_map:
622
            self.log.info("D: Unsynced reserved map of network %s:\n"
623
                          "\tDB: %r\n\tGB: %r", network, pool.reserved.to01(),
624
                          reserved_map.to01())
625
            if self.fix:
626
                pool.reserved = reserved_map
627
                pool.save()
628

    
629
    def detect_conflicting_ips(self, network):
630
        """Detect NIC's that have the same IP in the same network."""
631
        machine_ips = network.nics.all().values_list('ipv4', 'machine')
632
        ips = map(lambda x: x[0], machine_ips)
633
        distinct_ips = set(ips)
634
        if len(distinct_ips) < len(ips):
635
            for i in distinct_ips:
636
                ips.remove(i)
637
            for i in ips:
638
                machines = [utils.id_to_instance_name(x[1])
639
                            for x in machine_ips if x[0] == i]
640
                self.log.info('D: Conflicting IP:%s Machines: %s',
641
                              i, ', '.join(machines))
642

    
643
    def reconcile_orphan_networks(self, db_networks, ganeti_networks):
644
        # Detect Orphan Networks in Ganeti
645
        db_network_ids = set([net.id for net in db_networks])
646
        for back_end, ganeti_networks in ganeti_networks.items():
647
            ganeti_network_ids = set(ganeti_networks.keys())
648
            orphans = ganeti_network_ids - db_network_ids
649

    
650
            if len(orphans) > 0:
651
                self.log.info('D: Orphan Networks in backend %s:',
652
                              back_end.clustername)
653
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
654
                if self.fix:
655
                    for net_id in orphans:
656
                        self.log.info('Disconnecting and deleting network %d',
657
                                      net_id)
658
                        try:
659
                            network = Network.objects.get(id=net_id)
660
                            backend_mod.delete_network(network,
661
                                                       backend=back_end)
662
                        except Network.DoesNotExist:
663
                            self.log.info("Not entry for network %s in DB !!",
664
                                          net_id)
665

    
666

    
667
def get_backend_network(network, backend):
668
    try:
669
        return BackendNetwork.objects.get(network=network, backend=backend)
670
    except BackendNetwork.DoesNotExist:
671
        return None
672

    
673

    
674
def get_network_pool(gnet):
675
    """Return available and reserved IP maps.
676

677
    Extract the available and reserved IP map from the info return from Ganeti
678
    for a network.
679

680
    """
681
    converter = IPPool(Foo(gnet['network']))
682
    a_map = bitarray_from_map(gnet['map'])
683
    a_map.invert()
684
    reserved = gnet['external_reservations']
685
    r_map = a_map.copy()
686
    r_map.setall(True)
687
    if reserved:
688
        for address in reserved.split(','):
689
            index = converter.value_to_index(address.strip())
690
            a_map[index] = True
691
            r_map[index] = False
692
    return a_map, r_map
693

    
694

    
695
def bitarray_from_map(bitmap):
696
    return bitarray.bitarray(bitmap.replace("X", "1").replace(".", "0"))
697

    
698

    
699
class Foo():
700
    def __init__(self, subnet):
701
        self.available_map = ''
702
        self.reserved_map = ''
703
        self.size = 0
704
        self.network = Foo.Foo1(subnet)
705

    
706
    class Foo1():
707
        def __init__(self, subnet):
708
            self.subnet = subnet
709
            self.gateway = None