Statistics
| Branch: | Tag: | Revision:

root / snf-cyclades-app / synnefo / logic / reconciliation.py @ e6fbada1

History | View | Annotate | Download (30.1 kB)

1
# -*- coding: utf-8 -*-
2
#
3
# Copyright 2011-2013 GRNET S.A. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or
6
# without modification, are permitted provided that the following
7
# conditions are met:
8
#
9
#   1. Redistributions of source code must retain the above
10
#      copyright notice, this list of conditions and the following
11
#      disclaimer.
12
#
13
#   2. Redistributions in binary form must reproduce the above
14
#      copyright notice, this list of conditions and the following
15
#      disclaimer in the documentation and/or other materials
16
#      provided with the distribution.
17
#
18
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
19
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
22
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
25
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
# POSSIBILITY OF SUCH DAMAGE.
30
#
31
# The views and conclusions contained in the software and
32
# documentation are those of the authors and should not be
33
# interpreted as representing official policies, either expressed
34
# or implied, of GRNET S.A.
35
#
36
"""Business logic for reconciliation
37

38
Reconcile the contents of the DB with the actual state of the
39
Ganeti backend.
40

41
Let D be the set of VMs in the DB, G the set of VMs in Ganeti.
42
RULES:
43
    R1. Stale servers in DB:
44
            For any v in D but not in G:
45
            Set deleted=True.
46
    R2. Orphan instances in Ganet:
47
            For any v in G with deleted=True in D:
48
            Issue OP_INSTANCE_DESTROY.
49
    R3. Unsynced operstate:
50
            For any v whose operating state differs between G and V:
51
            Set the operating state in D based on the state in G.
52
In the code, D, G are Python dicts mapping instance ids to operating state.
53
For D, the operating state is chosen from VirtualMachine.OPER_STATES.
54
For G, the operating state is True if the machine is up, False otherwise.
55

56
"""
57

    
58

    
59
from django.conf import settings
60

    
61
import logging
62
import itertools
63
import bitarray
64
from datetime import datetime
65

    
66
from django.db import transaction
67
from synnefo.db.models import (Backend, VirtualMachine, Flavor,
68
                               pooled_rapi_client, Network,
69
                               BackendNetwork, BridgePoolTable,
70
                               MacPrefixPoolTable)
71
from synnefo.db import pools
72
from synnefo.logic import utils, backend as backend_mod
73

    
74
logger = logging.getLogger()
75
logging.basicConfig()
76

    
77
try:
78
    CHECK_INTERVAL = settings.RECONCILIATION_CHECK_INTERVAL
79
except AttributeError:
80
    CHECK_INTERVAL = 60
81

    
82
GANETI_JOB_ERROR = "error"
83
GANETI_JOBS_PENDING = ["queued", "waiting", "running", "canceling"]
84
GANETI_JOBS_FINALIZED = ["success", "error", "canceled"]
85

    
86

    
87
class BackendReconciler(object):
88
    def __init__(self, backend, logger, options=None):
89
        self.backend = backend
90
        self.log = logger
91
        self.client = backend.get_client()
92
        if options is None:
93
            self.options = {}
94
        else:
95
            self.options = options
96

    
97
    def close(self):
98
        self.backend.put_client(self.client)
99

    
100
    @transaction.commit_on_success
101
    def reconcile(self):
102
        log = self.log
103
        backend = self.backend
104
        log.debug("Reconciling backend %s", backend)
105

    
106
        self.db_servers = get_database_servers(backend)
107
        self.db_servers_keys = set(self.db_servers.keys())
108
        log.debug("Got servers info from database.")
109

    
110
        self.gnt_servers = get_ganeti_servers(backend)
111
        self.gnt_servers_keys = set(self.gnt_servers.keys())
112
        log.debug("Got servers info from Ganeti backend.")
113

    
114
        self.gnt_jobs = get_ganeti_jobs(backend)
115
        log.debug("Got jobs from Ganeti backend")
116

    
117
        self.event_time = datetime.now()
118

    
119
        self.stale_servers = self.reconcile_stale_servers()
120
        self.orphan_servers = self.reconcile_orphan_servers()
121
        self.unsynced_servers = self.reconcile_unsynced_servers()
122
        self.close()
123

    
124
    def get_build_status(self, db_server):
125
        job_id = db_server.backendjobid
126
        if job_id in self.gnt_jobs:
127
            gnt_job_status = self.gnt_jobs[job_id]["status"]
128
            if gnt_job_status == GANETI_JOB_ERROR:
129
                return "ERROR"
130
            elif gnt_job_status not in GANETI_JOBS_FINALIZED:
131
                return "RUNNING"
132
            else:
133
                return "FINALIZED"
134
        else:
135
            return "ERROR"
136

    
137
    def reconcile_stale_servers(self):
138
        # Detect stale servers
139
        stale = []
140
        stale_keys = self.db_servers_keys - self.gnt_servers_keys
141
        for server_id in stale_keys:
142
            db_server = self.db_servers[server_id]
143
            if db_server.operstate == "BUILD":
144
                build_status = self.get_build_status(db_server)
145
                if build_status == "ERROR":
146
                    # Special handling of BUILD eerrors
147
                    self.reconcile_building_server(db_server)
148
                elif build_status != "RUNNING":
149
                    stale.append(server_id)
150
            elif (db_server.operstate == "ERROR" and
151
                  db_server.action != "DESTROY"):
152
                # Servers at building ERROR are stale only if the user has
153
                # asked to destroy them.
154
                pass
155
            else:
156
                stale.append(server_id)
157

    
158
        # Report them
159
        if stale:
160
            self.log.info("Found stale servers %s at backend %s",
161
                          ", ".join(map(str, stale)), self.backend)
162
        else:
163
            self.log.debug("No stale servers at backend %s", self.backend)
164

    
165
        # Fix them
166
        if stale and self.options["fix_stale"]:
167
            for server_id in stale:
168
                db_server = self.db_servers[server_id]
169
                backend_mod.process_op_status(
170
                    vm=db_server,
171
                    etime=self.event_time,
172
                    jobid=-0,
173
                    opcode='OP_INSTANCE_REMOVE', status='success',
174
                    logmsg='Reconciliation: simulated Ganeti event')
175
            self.log.debug("Simulated Ganeti removal for stale servers.")
176

    
177
    def reconcile_orphan_servers(self):
178
        orphans = self.gnt_servers_keys - self.db_servers_keys
179
        if orphans:
180
            self.log.info("Found orphan servers %s at backend %s",
181
                          ", ".join(map(str, orphans)), self.backend)
182
        else:
183
            self.log.debug("No orphan servers at backend %s", self.backend)
184

    
185
        if orphans and self.options["fix_orphans"]:
186
            for server_id in orphans:
187
                server_name = utils.id_to_instance_name(server_id)
188
                self.client.DeleteInstance(server_name)
189
            self.log.debug("Issued OP_INSTANCE_REMOVE for orphan servers.")
190

    
191
    def reconcile_unsynced_servers(self):
192
        #log = self.log
193
        for server_id in self.db_servers_keys & self.gnt_servers_keys:
194
            db_server = self.db_servers[server_id]
195
            gnt_server = self.gnt_servers[server_id]
196
            if db_server.operstate == "BUILD":
197
                build_status = self.get_build_status(db_server)
198
                if build_status == "RUNNING":
199
                    # Do not reconcile building VMs
200
                    continue
201
                elif build_status == "ERROR":
202
                    # Special handling of build errors
203
                    self.reconcile_building_server(db_server)
204
                    continue
205

    
206
            self.reconcile_unsynced_operstate(server_id, db_server,
207
                                              gnt_server)
208
            self.reconcile_unsynced_flavor(server_id, db_server,
209
                                           gnt_server)
210
            self.reconcile_unsynced_nics(server_id, db_server, gnt_server)
211
            self.reconcile_unsynced_disks(server_id, db_server, gnt_server)
212
            if db_server.task is not None:
213
                self.reconcile_pending_task(server_id, db_server)
214

    
215
    def reconcile_building_server(self, db_server):
216
        self.log.info("Server '%s' is BUILD in DB, but 'ERROR' in Ganeti.",
217
                      db_server.id)
218
        if self.options["fix_unsynced"]:
219
            fix_opcode = "OP_INSTANCE_CREATE"
220
            backend_mod.process_op_status(
221
                vm=db_server,
222
                etime=self.event_time,
223
                jobid=-0,
224
                opcode=fix_opcode, status='error',
225
                logmsg='Reconciliation: simulated Ganeti event')
226
            self.log.debug("Simulated Ganeti error build event for"
227
                           " server '%s'", db_server.id)
228

    
229
    def reconcile_unsynced_operstate(self, server_id, db_server, gnt_server):
230
        if db_server.operstate != gnt_server["state"]:
231
            self.log.info("Server '%s' is '%s' in DB and '%s' in Ganeti.",
232
                          server_id, db_server.operstate, gnt_server["state"])
233
            if self.options["fix_unsynced"]:
234
                # If server is in building state, you will have first to
235
                # reconcile it's creation, to avoid wrong quotas
236
                if db_server.operstate == "BUILD":
237
                    backend_mod.process_op_status(
238
                        vm=db_server, etime=self.event_time, jobid=-0,
239
                        opcode="OP_INSTANCE_CREATE", status='success',
240
                        logmsg='Reconciliation: simulated Ganeti event')
241
                fix_opcode = "OP_INSTANCE_STARTUP"\
242
                    if gnt_server["state"] == "STARTED"\
243
                    else "OP_INSTANCE_SHUTDOWN"
244
                backend_mod.process_op_status(
245
                    vm=db_server, etime=self.event_time, jobid=-0,
246
                    opcode=fix_opcode, status='success',
247
                    logmsg='Reconciliation: simulated Ganeti event')
248
                self.log.debug("Simulated Ganeti state event for server '%s'",
249
                               server_id)
250

    
251
    def reconcile_unsynced_flavor(self, server_id, db_server, gnt_server):
252
        db_flavor = db_server.flavor
253
        gnt_flavor = gnt_server["flavor"]
254
        if (db_flavor.ram != gnt_flavor["ram"] or
255
           db_flavor.cpu != gnt_flavor["vcpus"]):
256
            try:
257
                gnt_flavor = Flavor.objects.get(
258
                    ram=gnt_flavor["ram"],
259
                    cpu=gnt_flavor["vcpus"],
260
                    disk=db_flavor.disk,
261
                    disk_template=db_flavor.disk_template)
262
            except Flavor.DoesNotExist:
263
                self.log.warning("Server '%s' has unknown flavor.", server_id)
264
                return
265

    
266
            self.log.info("Server '%s' has flavor '%s' in DB and '%s' in"
267
                          " Ganeti", server_id, db_flavor, gnt_flavor)
268
            if self.options["fix_unsynced_flavors"]:
269
                old_state = db_server.operstate
270
                opcode = "OP_INSTANCE_SET_PARAMS"
271
                beparams = {"vcpus": gnt_flavor.cpu,
272
                            "minmem": gnt_flavor.ram,
273
                            "maxmem": gnt_flavor.ram}
274
                backend_mod.process_op_status(
275
                    vm=db_server, etime=self.event_time, jobid=-0,
276
                    opcode=opcode, status='success',
277
                    job_fields={"beparams": beparams},
278
                    logmsg='Reconciliation: simulated Ganeti event')
279
                # process_op_status with beparams will set the vmstate to
280
                # shutdown. Fix this be returning it to old state
281
                vm = VirtualMachine.objects.get(pk=server_id)
282
                vm.operstate = old_state
283
                vm.save()
284
                self.log.debug("Simulated Ganeti flavor event for server '%s'",
285
                               server_id)
286

    
287
    def reconcile_unsynced_nics(self, server_id, db_server, gnt_server):
288
        db_nics = db_server.nics.order_by("index")
289
        gnt_nics = gnt_server["nics"]
290
        gnt_nics_parsed = backend_mod.process_ganeti_nics(gnt_nics)
291
        nics_changed = len(db_nics) != len(gnt_nics)
292
        for db_nic, gnt_nic in zip(db_nics, sorted(gnt_nics_parsed.items())):
293
            gnt_nic_id, gnt_nic = gnt_nic
294
            if (db_nic.id == gnt_nic_id) and\
295
               backend_mod.nics_are_equal(db_nic, gnt_nic):
296
                continue
297
            else:
298
                nics_changed = True
299
                break
300
        if nics_changed:
301
            msg = "Found unsynced NICs for server '%s'.\n"\
302
                  "\tDB:\n\t\t%s\n\tGaneti:\n\t\t%s"
303
            db_nics_str = "\n\t\t".join(map(format_db_nic, db_nics))
304
            gnt_nics_str = "\n\t\t".join(map(format_gnt_nic,
305
                                         gnt_nics_parsed.items()))
306
            self.log.info(msg, server_id, db_nics_str, gnt_nics_str)
307
            if self.options["fix_unsynced_nics"]:
308
                backend_mod.process_net_status(vm=db_server,
309
                                               etime=self.event_time,
310
                                               nics=gnt_nics)
311

    
312
    def reconcile_unsynced_disks(self, server_id, db_server, gnt_server):
313
        pass
314

    
315
    def reconcile_pending_task(self, server_id, db_server):
316
        job_id = db_server.task_job_id
317
        pending_task = False
318
        if job_id not in self.gnt_jobs:
319
            pending_task = True
320
        else:
321
            gnt_job_status = self.gnt_jobs[job_id]["status"]
322
            if gnt_job_status in GANETI_JOBS_FINALIZED:
323
                pending_task = True
324

    
325
        if pending_task:
326
            self.log.info("Found server '%s' with pending task: '%s'",
327
                          server_id, db_server.task)
328
            if self.options["fix_pending_tasks"]:
329
                db_server.task = None
330
                db_server.task_job_id = None
331
                db_server.save()
332
                self.log.info("Cleared pending task for server '%s", server_id)
333

    
334

    
335
NIC_MSG = ": %s\t".join(["ID", "State", "IP", "Network", "MAC", "Firewall"])\
336
    + ": %s"
337

    
338

    
339
def format_db_nic(nic):
340
    return NIC_MSG % (nic.id, nic.state, nic.ipv4, nic.network_id, nic.mac,
341
                      nic.firewall_profile)
342

    
343

    
344
def format_gnt_nic(nic):
345
    nic_name, nic = nic
346
    return NIC_MSG % (nic_name, nic["state"], nic["ipv4"], nic["network"],
347
                      nic["mac"], nic["firewall_profile"])
348

    
349

    
350
#
351
# Networks
352
#
353

    
354

    
355
def get_networks_from_ganeti(backend):
356
    prefix = settings.BACKEND_PREFIX_ID + 'net-'
357

    
358
    networks = {}
359
    with pooled_rapi_client(backend) as c:
360
        for net in c.GetNetworks(bulk=True):
361
            if net['name'].startswith(prefix):
362
                id = utils.id_from_network_name(net['name'])
363
                networks[id] = net
364

    
365
    return networks
366

    
367

    
368
def hanging_networks(backend, GNets):
369
    """Get networks that are not connected to all Nodegroups.
370

371
    """
372
    def get_network_groups(group_list):
373
        groups = set()
374
        for (name, mode, link) in group_list:
375
            groups.add(name)
376
        return groups
377

    
378
    with pooled_rapi_client(backend) as c:
379
        groups = set(c.GetGroups())
380

    
381
    hanging = {}
382
    for id, info in GNets.items():
383
        group_list = get_network_groups(info['group_list'])
384
        if group_list != groups:
385
            hanging[id] = groups - group_list
386
    return hanging
387

    
388

    
389
def get_online_backends():
390
    return Backend.objects.filter(offline=False)
391

    
392

    
393
def get_database_servers(backend):
394
    servers = backend.virtual_machines.select_related("nics", "flavor")\
395
                                      .filter(deleted=False)
396
    return dict([(s.id, s) for s in servers])
397

    
398

    
399
def get_ganeti_servers(backend):
400
    gnt_instances = backend_mod.get_instances(backend)
401
    # Filter out non-synnefo instances
402
    snf_backend_prefix = settings.BACKEND_PREFIX_ID
403
    gnt_instances = filter(lambda i: i["name"].startswith(snf_backend_prefix),
404
                           gnt_instances)
405
    gnt_instances = map(parse_gnt_instance, gnt_instances)
406
    return dict([(i["id"], i) for i in gnt_instances if i["id"] is not None])
407

    
408

    
409
def parse_gnt_instance(instance):
410
    try:
411
        instance_id = utils.id_from_instance_name(instance['name'])
412
    except Exception:
413
        logger.error("Ignoring instance with malformed name %s",
414
                     instance['name'])
415
        return (None, None)
416

    
417
    beparams = instance["beparams"]
418

    
419
    vcpus = beparams["vcpus"]
420
    ram = beparams["maxmem"]
421
    state = instance["oper_state"] and "STARTED" or "STOPPED"
422

    
423
    return {
424
        "id": instance_id,
425
        "state": state,  # FIX
426
        "updated": datetime.fromtimestamp(instance["mtime"]),
427
        "disks": disks_from_instance(instance),
428
        "nics": nics_from_instance(instance),
429
        "flavor": {"vcpus": vcpus,
430
                   "ram": ram},
431
        "tags": instance["tags"]
432
    }
433

    
434

    
435
def nics_from_instance(i):
436
    ips = zip(itertools.repeat('ip'), i['nic.ips'])
437
    names = zip(itertools.repeat('name'), i['nic.names'])
438
    macs = zip(itertools.repeat('mac'), i['nic.macs'])
439
    networks = zip(itertools.repeat('network'), i['nic.networks.names'])
440
    # modes = zip(itertools.repeat('mode'), i['nic.modes'])
441
    # links = zip(itertools.repeat('link'), i['nic.links'])
442
    # nics = zip(ips,macs,modes,networks,links)
443
    nics = zip(ips, names, macs, networks)
444
    nics = map(lambda x: dict(x), nics)
445
    #nics = dict(enumerate(nics))
446
    tags = i["tags"]
447
    for tag in tags:
448
        t = tag.split(":")
449
        if t[0:2] == ["synnefo", "network"]:
450
            if len(t) != 4:
451
                logger.error("Malformed synefo tag %s", tag)
452
                continue
453
            try:
454
                index = int(t[2])
455
                nics[index]['firewall'] = t[3]
456
            except ValueError:
457
                logger.error("Malformed synnefo tag %s", tag)
458
            except IndexError:
459
                logger.error("Found tag %s for non-existent NIC %d",
460
                             tag, index)
461
    return nics
462

    
463

    
464
def get_ganeti_jobs(backend):
465
    gnt_jobs = backend_mod.get_jobs(backend)
466
    return dict([(int(j["id"]), j) for j in gnt_jobs])
467

    
468

    
469
def disks_from_instance(i):
470
    return dict([(index, {"size": size})
471
                 for index, size in enumerate(i["disk.sizes"])])
472

    
473

    
474
class NetworkReconciler(object):
475
    def __init__(self, logger, fix=False):
476
        self.log = logger
477
        self.fix = fix
478

    
479
    @transaction.commit_on_success
480
    def reconcile_networks(self):
481
        # Get models from DB
482
        self.backends = Backend.objects.exclude(offline=True)
483
        self.networks = Network.objects.filter(deleted=False)
484

    
485
        self.event_time = datetime.now()
486

    
487
        # Get info from all ganeti backends
488
        self.ganeti_networks = {}
489
        self.ganeti_hanging_networks = {}
490
        for b in self.backends:
491
            g_nets = get_networks_from_ganeti(b)
492
            self.ganeti_networks[b] = g_nets
493
            g_hanging_nets = hanging_networks(b, g_nets)
494
            self.ganeti_hanging_networks[b] = g_hanging_nets
495

    
496
        self._reconcile_orphan_networks()
497

    
498
        for network in self.networks:
499
            self._reconcile_network(network)
500

    
501
    @transaction.commit_on_success
502
    def _reconcile_network(self, network):
503
        """Reconcile a network with corresponging Ganeti networks.
504

505
        Reconcile a Network and the associated BackendNetworks with the
506
        corresponding Ganeti networks in all Ganeti backends.
507

508
        """
509
        network_ip_pool = network.get_pool()  # X-Lock on IP Pool
510
        for bend in self.backends:
511
            bnet = get_backend_network(network, bend)
512
            gnet = self.ganeti_networks[bend].get(network.id)
513
            if not bnet:
514
                if network.floating_ip_pool:
515
                    # Network is a floating IP pool and does not exist in
516
                    # backend. We need to create it
517
                    bnet = self.reconcile_parted_network(network, bend)
518
                elif not gnet:
519
                    # Network does not exist either in Ganeti nor in BD.
520
                    continue
521
                else:
522
                    # Network exists in Ganeti and not in DB.
523
                    if network.action != "DESTROY" and not network.public:
524
                        bnet = self.reconcile_parted_network(network, bend)
525
                    else:
526
                        continue
527

    
528
            if not gnet:
529
                # Network does not exist in Ganeti. If the network action
530
                # is DESTROY, we have to mark as deleted in DB, else we
531
                # have to create it in Ganeti.
532
                if network.action == "DESTROY":
533
                    if bnet.operstate != "DELETED":
534
                        self.reconcile_stale_network(bnet)
535
                else:
536
                    self.reconcile_missing_network(network, bend)
537
                # Skip rest reconciliation!
538
                continue
539

    
540
            try:
541
                hanging_groups = self.ganeti_hanging_networks[bend][network.id]
542
            except KeyError:
543
                # Network is connected to all nodegroups
544
                hanging_groups = []
545

    
546
            if hanging_groups:
547
                # CASE-3: Ganeti networks not connected to all nodegroups
548
                self.reconcile_hanging_groups(network, bend,
549
                                              hanging_groups)
550
                continue
551

    
552
            if bnet.operstate != 'ACTIVE':
553
                # CASE-4: Unsynced network state. At this point the network
554
                # exists and is connected to all nodes so is must be
555
                # active!
556
                self.reconcile_unsynced_network(network, bend, bnet)
557

    
558
            # Check that externally reserved IPs of the network in Ganeti are
559
            # also externally reserved to the IP pool
560
            externally_reserved = gnet['external_reservations']
561
            if externally_reserved:
562
                for ip in externally_reserved.split(","):
563
                    ip = ip.strip()
564
                    if not network_ip_pool.is_reserved(ip):
565
                        msg = ("D: IP '%s' is reserved for network '%s' in"
566
                               " backend '%s' but not in DB.")
567
                        self.log.info(msg, ip, network, bend)
568
                        if self.fix:
569
                            network_ip_pool.reserve(ip, external=True)
570
                            network_ip_pool.save()
571
                            self.log.info("F: Reserved IP '%s'", ip)
572

    
573
    def reconcile_parted_network(self, network, backend):
574
        self.log.info("D: Missing DB entry for network %s in backend %s",
575
                      network, backend)
576
        if self.fix:
577
            network.create_backend_network(backend)
578
            self.log.info("F: Created DB entry")
579
            bnet = get_backend_network(network, backend)
580
            return bnet
581

    
582
    def reconcile_stale_network(self, backend_network):
583
        self.log.info("D: Stale DB entry for network %s in backend %s",
584
                      backend_network.network, backend_network.backend)
585
        if self.fix:
586
            backend_mod.process_network_status(
587
                backend_network, self.event_time, 0,
588
                "OP_NETWORK_REMOVE",
589
                "success",
590
                "Reconciliation simulated event")
591
            self.log.info("F: Reconciled event: OP_NETWORK_REMOVE")
592

    
593
    def reconcile_missing_network(self, network, backend):
594
        self.log.info("D: Missing Ganeti network %s in backend %s",
595
                      network, backend)
596
        if self.fix:
597
            backend_mod.create_network(network, backend)
598
            self.log.info("F: Issued OP_NETWORK_CONNECT")
599

    
600
    def reconcile_hanging_groups(self, network, backend, hanging_groups):
601
        self.log.info('D: Network %s in backend %s is not connected to '
602
                      'the following groups:', network, backend)
603
        self.log.info('-  ' + '\n-  '.join(hanging_groups))
604
        if self.fix:
605
            for group in hanging_groups:
606
                self.log.info('F: Connecting network %s to nodegroup %s',
607
                              network, group)
608
                backend_mod.connect_network(network, backend, depends=[],
609
                                            group=group)
610

    
611
    def reconcile_unsynced_network(self, network, backend, backend_network):
612
        self.log.info("D: Unsynced network %s in backend %s", network, backend)
613
        if self.fix:
614
            self.log.info("F: Issuing OP_NETWORK_CONNECT")
615
            backend_mod.process_network_status(
616
                backend_network, self.event_time, 0,
617
                "OP_NETWORK_CONNECT",
618
                "success",
619
                "Reconciliation simulated eventd")
620

    
621
    def _reconcile_orphan_networks(self):
622
        db_networks = self.networks
623
        ganeti_networks = self.ganeti_networks
624
        # Detect Orphan Networks in Ganeti
625
        db_network_ids = set([net.id for net in db_networks])
626
        for back_end, ganeti_networks in ganeti_networks.items():
627
            ganeti_network_ids = set(ganeti_networks.keys())
628
            orphans = ganeti_network_ids - db_network_ids
629

    
630
            if len(orphans) > 0:
631
                self.log.info('D: Orphan Networks in backend %s:',
632
                              back_end.clustername)
633
                self.log.info('-  ' + '\n-  '.join([str(o) for o in orphans]))
634
                if self.fix:
635
                    for net_id in orphans:
636
                        self.log.info('Disconnecting and deleting network %d',
637
                                      net_id)
638
                        try:
639
                            network = Network.objects.get(id=net_id)
640
                            backend_mod.delete_network(network,
641
                                                       backend=back_end)
642
                        except Network.DoesNotExist:
643
                            self.log.info("Not entry for network %s in DB !!",
644
                                          net_id)
645

    
646

    
647
def get_backend_network(network, backend):
648
    try:
649
        return BackendNetwork.objects.get(network=network, backend=backend)
650
    except BackendNetwork.DoesNotExist:
651
        return None
652

    
653

    
654
class PoolReconciler(object):
655
    def __init__(self, logger, fix=False):
656
        self.log = logger
657
        self.fix = fix
658

    
659
    def reconcile(self):
660
        self.reconcile_bridges()
661
        self.reconcile_mac_prefixes()
662
        for network in Network.objects.filter(deleted=False):
663
            self.reconcile_ip_pool(network)
664

    
665
    @transaction.commit_on_success
666
    def reconcile_bridges(self):
667
        networks = Network.objects.filter(deleted=False,
668
                                          flavor="PHYSICAL_VLAN")
669
        check_unique_values(objects=networks, field='link', logger=self.log)
670
        try:
671
            pool = BridgePoolTable.get_pool()
672
        except pools.EmptyPool:
673
            self.log.info("There is no available pool for bridges.")
674
            return
675

    
676
        used_bridges = set(networks.values_list('link', flat=True))
677
        check_pool_consistent(pool=pool, pool_class=pools.BridgePool,
678
                              used_values=used_bridges, fix=self.fix,
679
                              logger=self.log)
680

    
681
    @transaction.commit_on_success
682
    def reconcile_mac_prefixes(self):
683
        networks = Network.objects.filter(deleted=False, flavor="MAC_FILTERED")
684
        check_unique_values(objects=networks, field='mac_prefix',
685
                            logger=self.log)
686
        try:
687
            pool = MacPrefixPoolTable.get_pool()
688
        except pools.EmptyPool:
689
            self.log.info("There is no available pool for MAC prefixes.")
690
            return
691

    
692
        used_mac_prefixes = set(networks.values_list('mac_prefix', flat=True))
693
        check_pool_consistent(pool=pool, pool_class=pools.MacPrefixPool,
694
                              used_values=used_mac_prefixes, fix=self.fix,
695
                              logger=self.log)
696

    
697
    @transaction.commit_on_success
698
    def reconcile_ip_pool(self, network):
699
        # Check that all NICs have unique IPv4 address
700
        nics = network.nics.filter(ipv4__isnull=False)
701
        check_unique_values(objects=nics, field='ipv4', logger=self.log)
702

    
703
        # Check that all Floating IPs have unique IPv4 address
704
        floating_ips = network.floating_ips.filter(deleted=False)
705
        check_unique_values(objects=floating_ips, field='ipv4',
706
                            logger=self.log)
707

    
708
        # First get(lock) the IP pool of the network to prevent new NICs
709
        # from being created.
710
        network_ip_pool = network.get_pool()
711
        used_ips = set(list(nics.values_list("ipv4", flat=True)) +
712
                       list(floating_ips.values_list("ipv4", flat=True)))
713

    
714
        check_pool_consistent(pool=network_ip_pool,
715
                              pool_class=pools.IPPool,
716
                              used_values=used_ips,
717
                              fix=self.fix, logger=self.log)
718

    
719

    
720
def check_unique_values(objects, field, logger):
721
    used_values = list(objects.values_list(field, flat=True))
722
    if len(used_values) != len(set(used_values)):
723
        duplicate_values = [v for v in used_values if used_values.count(v) > 1]
724
        for value in duplicate_values:
725
            filter_args = {field: value}
726
            using_objects = objects.filter(**filter_args)
727
            msg = "Value '%s' is used as %s for more than one objects: %s"
728
            logger.error(msg, value, field, ",".join(map(str, using_objects)))
729
        return False
730
    logger.debug("Values for field '%s' are unique.", field)
731
    return True
732

    
733

    
734
def check_pool_consistent(pool, pool_class, used_values, fix, logger):
735
    dummy_pool = create_empty_pool(pool, pool_class)
736
    [dummy_pool.reserve(value) for value in used_values]
737
    if dummy_pool.available != pool.available:
738
        msg = "'%s' is not consistent!\nPool: %s\nUsed: %s"
739
        pool_diff = dummy_pool.available ^ pool.available
740
        for index in pool_diff.itersearch(bitarray.bitarray("1")):
741
            value = pool.index_to_value(int(index))
742
            msg = "%s is incosistent! Value '%s' is %s but should be %s."
743
            value1 = pool.is_available(value) and "available" or "unavailable"
744
            value2 = dummy_pool.is_available(value) and "available"\
745
                or "unavailable"
746
            logger.error(msg, pool, value, value1, value2)
747
        if fix:
748
            pool.available = dummy_pool.available
749
            pool.save()
750
            logger.info("Fixed available map of pool '%s'", pool)
751

    
752

    
753
def create_empty_pool(pool, pool_class):
754
    pool_row = pool.pool_table
755
    pool_row.available_map = ""
756
    pool_row.reserved_map = ""
757
    return pool_class(pool_row)