Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ f15bf3d9

History | View | Annotate | Download (27.8 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.conf import settings
60

    
61
import logging
62
import itertools
63
import bitarray
64
from datetime import datetime
65

    
66
from django.db import transaction
67
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
68
                               pooled_rapi_client, Network,
69
                               BackendNetwork)
70
from synnefo.db.pools import IPPool
71
from synnefo.logic import utils, backend as backend_mod
72

    
73
logger = logging.getLogger()
74
logging.basicConfig()
75

    
76
try:
77
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
78
except AttributeError:
79
    CHECK_INTERVAL = 60
80

    
81
GANETI_JOB_ERROR = "error"
82
GANETI_JOBS_PENDING = ["queued", "waiting", "running", "canceling"]
83
GANETI_JOBS_FINALIZED = ["success", "error", "canceled"]
84

    
85

    
86
class BackendReconciler(object):
87
    def __init__(self, backend, logger, options=None):
88
        self.backend = backend
89
        self.log = logger
90
        self.client = backend.get_client()
91
        if options is None:
92
            self.options = {}
93
        else:
94
            self.options = options
95

    
96
    def close(self):
97
        self.backend.put_client(self.client)
98

    
99
    @transaction.commit_on_success
100
    def reconcile(self):
101
        log = self.log
102
        backend = self.backend
103
        log.debug("Reconciling backend %s", backend)
104

    
105
        self.db_servers = get_database_servers(backend)
106
        self.db_servers_keys = set(self.db_servers.keys())
107
        log.debug("Got servers info from database.")
108

    
109
        self.gnt_servers = get_ganeti_servers(backend)
110
        self.gnt_servers_keys = set(self.gnt_servers.keys())
111
        log.debug("Got servers info from Ganeti backend.")
112

    
113
        self.gnt_jobs = get_ganeti_jobs(backend)
114
        log.debug("Got jobs from Ganeti backend")
115

    
116
        self.event_time = datetime.now()
117

    
118
        self.stale_servers = self.reconcile_stale_servers()
119
        self.orphan_servers = self.reconcile_orphan_servers()
120
        self.unsynced_servers = self.reconcile_unsynced_servers()
121
        self.close()
122

    
123
    def get_build_status(self, db_server):
124
        job_id = db_server.backendjobid
125
        if job_id in self.gnt_jobs:
126
            gnt_job_status = self.gnt_jobs[job_id]["status"]
127
            if gnt_job_status == GANETI_JOB_ERROR:
128
                return "ERROR"
129
            elif gnt_job_status not in GANETI_JOBS_FINALIZED:
130
                return "RUNNING"
131
            else:
132
                return "FINALIZED"
133
        else:
134
            return "ERROR"
135

    
136
    def reconcile_stale_servers(self):
137
        # Detect stale servers
138
        stale = []
139
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
140
        for server_id in stale_keys:
141
            db_server = self.db_servers[server_id]
142
            if db_server.operstate == "BUILD":
143
                build_status = self.get_build_status(db_server)
144
                if build_status == "ERROR":
145
                    # Special handling of BUILD eerrors
146
                    self.reconcile_building_server(db_server)
147
                elif build_status != "RUNNING":
148
                    stale.append(server_id)
149
            elif (db_server.operstate == "ERROR" and
150
                  db_server.action != "DESTROY"):
151
                # Servers at building ERROR are stale only if the user has
152
                # asked to destroy them.
153
                pass
154
            else:
155
                stale.append(server_id)
156

    
157
        # Report them
158
        if stale:
159
            self.log.info("Found stale servers %s at backend %s",
160
                          ", ".join(map(str, stale)), self.backend)
161
        else:
162
            self.log.debug("No stale servers at backend %s", self.backend)
163

    
164
        # Fix them
165
        if stale and self.options["fix_stale"]:
166
            for server_id in stale:
167
                db_server = self.db_servers[server_id]
168
                backend_mod.process_op_status(
169
                    vm=db_server,
170
                    etime=self.event_time,
171
                    jobid=-0,
172
                    opcode='OP_INSTANCE_REMOVE', status='success',
173
                    logmsg='Reconciliation: simulated Ganeti event')
174
            self.log.debug("Simulated Ganeti removal for stale servers.")
175

    
176
    def reconcile_orphan_servers(self):
177
        orphans = self.gnt_servers_keys - self.db_servers_keys
178
        if orphans:
179
            self.log.info("Found orphan servers %s at backend %s",
180
                          ", ".join(map(str, orphans)), self.backend)
181
        else:
182
            self.log.debug("No orphan servers at backend %s", self.backend)
183

    
184
        if orphans and self.options["fix_orphans"]:
185
            for server_id in orphans:
186
                server_name = utils.id_to_instance_name(server_id)
187
                self.client.DeleteInstance(server_name)
188
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
189

    
190
    def reconcile_unsynced_servers(self):
191
        #log = self.log
192
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
193
            db_server = self.db_servers[server_id]
194
            gnt_server = self.gnt_servers[server_id]
195
            if db_server.operstate == "BUILD":
196
                build_status = self.get_build_status(db_server)
197
                if build_status == "RUNNING":
198
                    # Do not reconcile building VMs
199
                    continue
200
                elif build_status == "ERROR":
201
                    # Special handling of build errors
202
                    self.reconcile_building_server(db_server)
203
                    continue
204

    
205
            self.reconcile_unsynced_operstate(server_id, db_server,
206
                                              gnt_server)
207
            self.reconcile_unsynced_flavor(server_id, db_server,
208
                                           gnt_server)
209
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
210
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
211
            if db_server.task is not None:
212
                self.reconcile_pending_task(server_id, db_server)
213

    
214
    def reconcile_building_server(self, db_server):
215
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
216
                      db_server.id)
217
        if self.options["fix_unsynced"]:
218
            fix_opcode = "OP_INSTANCE_CREATE"
219
            backend_mod.process_op_status(
220
                vm=db_server,
221
                etime=self.event_time,
222
                jobid=-0,
223
                opcode=fix_opcode, status='error',
224
                logmsg='Reconciliation: simulated Ganeti event')
225
            self.log.debug("Simulated Ganeti error build event for"
226
                           " server '%s'", db_server.id)
227

    
228
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
229
        if db_server.operstate != gnt_server["state"]:
230
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
231
                          server_id, db_server.operstate, gnt_server["state"])
232
            if self.options["fix_unsynced"]:
233
                # If server is in building state, you will have first to
234
                # reconcile it's creation, to avoid wrong quotas
235
                if db_server.operstate == "BUILD":
236
                    backend_mod.process_op_status(
237
                        vm=db_server, etime=self.event_time, jobid=-0,
238
                        opcode="OP_INSTANCE_CREATE", status='success',
239
                        logmsg='Reconciliation: simulated Ganeti event')
240
                fix_opcode = "OP_INSTANCE_STARTUP"\
241
                    if gnt_server["state"] == "STARTED"\
242
                    else "OP_INSTANCE_SHUTDOWN"
243
                backend_mod.process_op_status(
244
                    vm=db_server, etime=self.event_time, jobid=-0,
245
                    opcode=fix_opcode, status='success',
246
                    logmsg='Reconciliation: simulated Ganeti event')
247
                self.log.debug("Simulated Ganeti state event for server '%s'",
248
                               server_id)
249

    
250
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
251
        db_flavor = db_server.flavor
252
        gnt_flavor = gnt_server["flavor"]
253
        if (db_flavor.ram != gnt_flavor["ram"] or
254
           db_flavor.cpu != gnt_flavor["vcpus"]):
255
            try:
256
                gnt_flavor = Flavor.objects.get(
257
                    ram=gnt_flavor["ram"],
258
                    cpu=gnt_flavor["vcpus"],
259
                    disk=db_flavor.disk,
260
                    disk_template=db_flavor.disk_template)
261
            except Flavor.DoesNotExist:
262
                self.log.warning("Server '%s' has unknown flavor.", server_id)
263
                return
264

    
265
            self.log.info("Server '%s' has flavor '%s' in DB and '%s' in"
266
                          " Ganeti", server_id, db_flavor, gnt_flavor)
267
            if self.options["fix_unsynced_flavors"]:
268
                old_state = db_server.operstate
269
                opcode = "OP_INSTANCE_SET_PARAMS"
270
                beparams = {"vcpus": gnt_flavor.cpu,
271
                            "minmem": gnt_flavor.ram,
272
                            "maxmem": gnt_flavor.ram}
273
                backend_mod.process_op_status(
274
                    vm=db_server, etime=self.event_time, jobid=-0,
275
                    opcode=opcode, status='success',
276
                    beparams=beparams,
277
                    logmsg='Reconciliation: simulated Ganeti event')
278
                # process_op_status with beparams will set the vmstate to
279
                # shutdown. Fix this be returning it to old state
280
                vm = VirtualMachine.objects.get(pk=server_id)
281
                vm.operstate = old_state
282
                vm.save()
283
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
284
                               server_id)
285

    
286
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
287
        db_nics = db_server.nics.order_by("index")
288
        gnt_nics = gnt_server["nics"]
289
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
290
        if backend_mod.nics_changed(db_nics, gnt_nics_parsed):
291
            msg = "Found unsynced NICs for server '%s'.\n\t"\
292
                  "DB: %s\n\tGaneti: %s"
293
            db_nics_str = ", ".join(map(format_db_nic, db_nics))
294
            gnt_nics_str = ", ".join(map(format_gnt_nic, gnt_nics_parsed))
295
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
296
            if self.options["fix_unsynced_nics"]:
297
                backend_mod.process_net_status(vm=db_server,
298
                                               etime=self.event_time,
299
                                               nics=gnt_nics)
300

    
301
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
302
        pass
303

    
304
    def reconcile_pending_task(self, server_id, db_server):
305
        job_id = db_server.task_job_id
306
        pending_task = False
307
        if job_id not in self.gnt_jobs:
308
            pending_task = True
309
        else:
310
            gnt_job_status = self.gnt_jobs[job_id]["status"]
311
            if gnt_job_status in GANETI_JOBS_FINALIZED:
312
                pending_task = True
313

    
314
        if pending_task:
315
            self.log.info("Found server '%s' with pending task: '%s'",
316
                          server_id, db_server.task)
317
            if self.options["fix_pending_tasks"]:
318
                db_server.task = None
319
                db_server.task_job_id = None
320
                db_server.save()
321
                self.log.info("Cleared pending task for server '%s", server_id)
322

    
323

    
324
def format_db_nic(nic):
325
    return "Index: %s, IP: %s Network: %s MAC: %s Firewall: %s" % (nic.index,
326
           nic.ipv4, nic.network_id, nic.mac, nic.firewall_profile)
327

    
328

    
329
def format_gnt_nic(nic):
330
    return "Index: %s IP: %s Network: %s MAC: %s Firewall: %s" %\
331
           (nic["index"], nic["ipv4"], nic["network"], nic["mac"],
332
            nic["firewall_profile"])
333

    
334

    
335
#
336
# Networks
337
#
338

    
339

    
340
def get_networks_from_ganeti(backend):
341
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
342

    
343
    networks = {}
344
    with pooled_rapi_client(backend) as c:
345
        for net in c.GetNetworks(bulk=True):
346
            if net['name'].startswith(prefix):
347
                id = utils.id_from_network_name(net['name'])
348
                networks[id] = net
349

    
350
    return networks
351

    
352

    
353
def hanging_networks(backend, GNets):
354
    """Get networks that are not connected to all Nodegroups.
355

356
    """
357
    def get_network_groups(group_list):
358
        groups = set()
359
        for g in group_list:
360
            g_name = g.split('(')[0]
361
            groups.add(g_name)
362
        return groups
363

    
364
    with pooled_rapi_client(backend) as c:
365
        groups = set(c.GetGroups())
366

    
367
    hanging = {}
368
    for id, info in GNets.items():
369
        group_list = get_network_groups(info['group_list'])
370
        if group_list != groups:
371
            hanging[id] = groups - group_list
372
    return hanging
373

    
374

    
375
def get_online_backends():
376
    return Backend.objects.filter(offline=False)
377

    
378

    
379
def get_database_servers(backend):
380
    servers = backend.virtual_machines.select_related("nics", "flavor")\
381
                                      .filter(deleted=False)
382
    return dict([(s.id, s) for s in servers])
383

    
384

    
385
def get_ganeti_servers(backend):
386
    gnt_instances = backend_mod.get_instances(backend)
387
    # Filter out non-synnefo instances
388
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
389
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
390
                           gnt_instances)
391
    gnt_instances = map(parse_gnt_instance, gnt_instances)
392
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
393

    
394

    
395
def parse_gnt_instance(instance):
396
    try:
397
        instance_id = utils.id_from_instance_name(instance['name'])
398
    except Exception:
399
        logger.error("Ignoring instance with malformed name %s",
400
                     instance['name'])
401
        return (None, None)
402

    
403
    beparams = instance["beparams"]
404

    
405
    vcpus = beparams["vcpus"]
406
    ram = beparams["maxmem"]
407
    state = instance["oper_state"] and "STARTED" or "STOPPED"
408

    
409
    return {
410
        "id": instance_id,
411
        "state": state,  # FIX
412
        "updated": datetime.fromtimestamp(instance["mtime"]),
413
        "disks": disks_from_instance(instance),
414
        "nics": nics_from_instance(instance),
415
        "flavor": {"vcpus": vcpus,
416
                   "ram": ram},
417
        "tags": instance["tags"]
418
    }
419

    
420

    
421
def nics_from_instance(i):
422
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
423
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
424
    networks = zip(itertools.repeat('network'), i['nic.networks'])
425
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
426
    # links = zip(itertools.repeat('link'), i['nic.links'])
427
    # nics = zip(ips,macs,modes,networks,links)
428
    nics = zip(ips, macs, networks)
429
    nics = map(lambda x: dict(x), nics)
430
    #nics = dict(enumerate(nics))
431
    tags = i["tags"]
432
    for tag in tags:
433
        t = tag.split(":")
434
        if t[0:2] == ["synnefo", "network"]:
435
            if len(t) != 4:
436
                logger.error("Malformed synefo tag %s", tag)
437
                continue
438
            try:
439
                index = int(t[2])
440
                nics[index]['firewall'] = t[3]
441
            except ValueError:
442
                logger.error("Malformed synnefo tag %s", tag)
443
            except IndexError:
444
                logger.error("Found tag %s for non-existent NIC %d",
445
                             tag, index)
446
    return nics
447

    
448

    
449
def get_ganeti_jobs(backend):
450
    gnt_jobs = backend_mod.get_jobs(backend)
451
    return dict([(int(j["id"]), j) for j in gnt_jobs])
452

    
453

    
454
def disks_from_instance(i):
455
    return dict([(index, {"size": size})
456
                 for index, size in enumerate(i["disk.sizes"])])
457

    
458

    
459
class NetworkReconciler(object):
460
    def __init__(self, logger, fix=False, conflicting_ips=False):
461
        self.log = logger
462
        self.conflicting_ips = conflicting_ips
463
        self.fix = fix
464

    
465
    @transaction.commit_on_success
466
    def reconcile_networks(self):
467
        # Get models from DB
468
        backends = Backend.objects.exclude(offline=True)
469
        networks = Network.objects.filter(deleted=False)
470

    
471
        self.event_time = datetime.now()
472

    
473
        # Get info from all ganeti backends
474
        ganeti_networks = {}
475
        ganeti_hanging_networks = {}
476
        for b in backends:
477
            g_nets = get_networks_from_ganeti(b)
478
            ganeti_networks[b] = g_nets
479
            g_hanging_nets = hanging_networks(b, g_nets)
480
            ganeti_hanging_networks[b] = g_hanging_nets
481

    
482
        # Perform reconciliation for each network
483
        for network in networks:
484
            ip_available_maps = []
485
            ip_reserved_maps = []
486
            for bend in backends:
487
                bnet = get_backend_network(network, bend)
488
                gnet = ganeti_networks[bend].get(network.id)
489
                if not bnet:
490
                    if network.floating_ip_pool:
491
                        # Network is a floating IP pool and does not exist in
492
                        # backend. We need to create it
493
                        bnet = self.reconcile_parted_network(network, bend)
494
                    elif not gnet:
495
                        # Network does not exist either in Ganeti nor in BD.
496
                        continue
497
                    else:
498
                        # Network exists in Ganeti and not in DB.
499
                        if network.action != "DESTROY" and not network.public:
500
                            bnet = self.reconcile_parted_network(network, bend)
501
                        else:
502
                            continue
503

    
504
                if not gnet:
505
                    # Network does not exist in Ganeti. If the network action
506
                    # is DESTROY, we have to mark as deleted in DB, else we
507
                    # have to create it in Ganeti.
508
                    if network.action == "DESTROY":
509
                        if bnet.operstate != "DELETED":
510
                            self.reconcile_stale_network(bnet)
511
                    else:
512
                        self.reconcile_missing_network(network, bend)
513
                    # Skip rest reconciliation!
514
                    continue
515

    
516
                try:
517
                    hanging_groups = ganeti_hanging_networks[bend][network.id]
518
                except KeyError:
519
                    # Network is connected to all nodegroups
520
                    hanging_groups = []
521

    
522
                if hanging_groups:
523
                    # CASE-3: Ganeti networks not connected to all nodegroups
524
                    self.reconcile_hanging_groups(network, bend,
525
                                                  hanging_groups)
526
                    continue
527

    
528
                if bnet.operstate != 'ACTIVE':
529
                    # CASE-4: Unsynced network state. At this point the network
530
                    # exists and is connected to all nodes so is must be
531
                    # active!
532
                    self.reconcile_unsynced_network(network, bend, bnet)
533

    
534
                # Get ganeti IP Pools
535
                available_map, reserved_map = get_network_pool(gnet)
536
                ip_available_maps.append(available_map)
537
                ip_reserved_maps.append(reserved_map)
538

    
539
            if ip_available_maps or ip_reserved_maps:
540
                # CASE-5: Unsynced IP Pools
541
                self.reconcile_ip_pools(network, ip_available_maps,
542
                                        ip_reserved_maps)
543

    
544
            if self.conflicting_ips:
545
                self.detect_conflicting_ips()
546

    
547
        # CASE-6: Orphan networks
548
        self.reconcile_orphan_networks(networks, ganeti_networks)
549

    
550
    def reconcile_parted_network(self, network, backend):
551
        self.log.info("D: Missing DB entry for network %s in backend %s",
552
                      network, backend)
553
        if self.fix:
554
            network.create_backend_network(backend)
555
            self.log.info("F: Created DB entry")
556
            bnet = get_backend_network(network, backend)
557
            return bnet
558

    
559
    def reconcile_stale_network(self, backend_network):
560
        self.log.info("D: Stale DB entry for network %s in backend %s",
561
                      backend_network.network, backend_network.backend)
562
        if self.fix:
563
            backend_mod.process_network_status(
564
                backend_network, self.event_time, 0,
565
                "OP_NETWORK_REMOVE",
566
                "success",
567
                "Reconciliation simulated event")
568
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
569

    
570
    def reconcile_missing_network(self, network, backend):
571
        self.log.info("D: Missing Ganeti network %s in backend %s",
572
                      network, backend)
573
        if self.fix:
574
            backend_mod.create_network(network, backend)
575
            self.log.info("F: Issued OP_NETWORK_CONNECT")
576

    
577
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
578
        self.log.info('D: Network %s in backend %s is not connected to '
579
                      'the following groups:', network, backend)
580
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
581
        if self.fix:
582
            for group in hanging_groups:
583
                self.log.info('F: Connecting network %s to nodegroup %s',
584
                              network, group)
585
                backend_mod.connect_network(network, backend, depends=[],
586
                                            group=group)
587

    
588
    def reconcile_unsynced_network(self, network, backend, backend_network):
589
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
590
        if self.fix:
591
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
592
            backend_mod.process_network_status(
593
                backend_network, self.event_time, 0,
594
                "OP_NETWORK_CONNECT",
595
                "success",
596
                "Reconciliation simulated eventd")
597

    
598
    def reconcile_ip_pools(self, network, available_maps, reserved_maps):
599
        available_map = reduce(lambda x, y: x & y, available_maps)
600
        reserved_map = reduce(lambda x, y: x & y, reserved_maps)
601

    
602
        pool = network.get_pool()
603
        # Temporary release unused floating IPs
604
        temp_pool = network.get_pool()
605
        used_ips = network.nics.values_list("ipv4", flat=True)
606
        unused_static_ips = network.floating_ips.exclude(ipv4__in=used_ips)
607
        map(lambda ip: temp_pool.put(ip.ipv4), unused_static_ips)
608
        if temp_pool.available != available_map:
609
            self.log.info("D: Unsynced available map of network %s:\n"
610
                          "\tDB: %r\n\tGB: %r", network,
611
                          temp_pool.available.to01(),
612
                          available_map.to01())
613
            if self.fix:
614
                pool.available = available_map
615
                # Release unsued floating IPs, as they are not included in the
616
                # available map
617
                map(lambda ip: pool.reserve(ip.ipv4), unused_static_ips)
618
                pool.save()
619
        if pool.reserved != reserved_map:
620
            self.log.info("D: Unsynced reserved map of network %s:\n"
621
                          "\tDB: %r\n\tGB: %r", network, pool.reserved.to01(),
622
                          reserved_map.to01())
623
            if self.fix:
624
                pool.reserved = reserved_map
625
                pool.save()
626

    
627
    def detect_conflicting_ips(self, network):
628
        """Detect NIC's that have the same IP in the same network."""
629
        machine_ips = network.nics.all().values_list('ipv4', 'machine')
630
        ips = map(lambda x: x[0], machine_ips)
631
        distinct_ips = set(ips)
632
        if len(distinct_ips) < len(ips):
633
            for i in distinct_ips:
634
                ips.remove(i)
635
            for i in ips:
636
                machines = [utils.id_to_instance_name(x[1])
637
                            for x in machine_ips if x[0] == i]
638
                self.log.info('D: Conflicting IP:%s Machines: %s',
639
                              i, ', '.join(machines))
640

    
641
    def reconcile_orphan_networks(self, db_networks, ganeti_networks):
642
        # Detect Orphan Networks in Ganeti
643
        db_network_ids = set([net.id for net in db_networks])
644
        for back_end, ganeti_networks in ganeti_networks.items():
645
            ganeti_network_ids = set(ganeti_networks.keys())
646
            orphans = ganeti_network_ids - db_network_ids
647

    
648
            if len(orphans) > 0:
649
                self.log.info('D: Orphan Networks in backend %s:',
650
                              back_end.clustername)
651
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
652
                if self.fix:
653
                    for net_id in orphans:
654
                        self.log.info('Disconnecting and deleting network %d',
655
                                      net_id)
656
                        try:
657
                            network = Network.objects.get(id=net_id)
658
                            backend_mod.delete_network(network,
659
                                                       backend=back_end)
660
                        except Network.DoesNotExist:
661
                            self.log.info("Not entry for network %s in DB !!",
662
                                          net_id)
663

    
664

    
665
def get_backend_network(network, backend):
666
    try:
667
        return BackendNetwork.objects.get(network=network, backend=backend)
668
    except BackendNetwork.DoesNotExist:
669
        return None
670

    
671

    
672
def get_network_pool(gnet):
673
    """Return available and reserved IP maps.
674

675
    Extract the available and reserved IP map from the info return from Ganeti
676
    for a network.
677

678
    """
679
    converter = IPPool(Foo(gnet['network']))
680
    a_map = bitarray_from_map(gnet['map'])
681
    a_map.invert()
682
    reserved = gnet['external_reservations']
683
    r_map = a_map.copy()
684
    r_map.setall(True)
685
    if reserved:
686
        for address in reserved.split(','):
687
            index = converter.value_to_index(address.strip())
688
            a_map[index] = True
689
            r_map[index] = False
690
    return a_map, r_map
691

    
692

    
693
def bitarray_from_map(bitmap):
694
    return bitarray.bitarray(bitmap.replace("X", "1").replace(".", "0"))
695

    
696

    
697
class Foo():
698
    def __init__(self, subnet):
699
        self.available_map = ''
700
        self.reserved_map = ''
701
        self.size = 0
702
        self.network = Foo.Foo1(subnet)
703

    
704
    class Foo1():
705
        def __init__(self, subnet):
706
            self.subnet = subnet
707
            self.gateway = None