Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ 013da361

History | View | Annotate | Download (46 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
import os
35
import random
36
import logging
37
import time
38

    
39
from ganeti import errors
40
from ganeti import locking
41
from ganeti import utils
42
from ganeti import constants
43
from ganeti import rpc
44
from ganeti import objects
45
from ganeti import serializer
46

    
47

    
48
_config_lock = locking.SharedLock()
49

    
50

    
51
def _ValidateConfig(data):
52
  """Verifies that a configuration objects looks valid.
53

54
  This only verifies the version of the configuration.
55

56
  @raise errors.ConfigurationError: if the version differs from what
57
      we expect
58

59
  """
60
  if data.version != constants.CONFIG_VERSION:
61
    raise errors.ConfigurationError("Cluster configuration version"
62
                                    " mismatch, got %s instead of %s" %
63
                                    (data.version,
64
                                     constants.CONFIG_VERSION))
65

    
66

    
67
class TemporaryReservationManager:
68
  """A temporary resource reservation manager.
69

70
  This is used to reserve resources in a job, before using them, making sure
71
  other jobs cannot get them in the meantime.
72

73
  """
74
  def __init__(self):
75
    self._ec_reserved = {}
76

    
77
  def Reserved(self, resource):
78
    for holder_reserved in self._ec_reserved.items():
79
      if resource in holder_reserved:
80
        return True
81
    return False
82

    
83
  def Reserve(self, ec_id, resource):
84
    if self.Reserved(resource):
85
      raise errors.ReservationError("Duplicate reservation for resource: %s." %
86
                                    (resource))
87
    if ec_id not in self._ec_reserved:
88
      self._ec_reserved[ec_id] = set([resource])
89
    else:
90
      self._ec_reserved[ec_id].add(resource)
91

    
92
  def DropECReservations(self, ec_id):
93
    if ec_id in self._ec_reserved:
94
      del self._ec_reserved[ec_id]
95

    
96
  def GetReserved(self):
97
    all_reserved = set()
98
    for holder_reserved in self._ec_reserved.values():
99
      all_reserved.update(holder_reserved)
100
    return all_reserved
101

    
102
  def Generate(self, existing, generate_one_fn, ec_id):
103
    """Generate a new resource of this type
104

105
    """
106
    assert callable(generate_one_fn)
107

    
108
    all_elems = self.GetReserved()
109
    all_elems.update(existing)
110
    retries = 64
111
    while retries > 0:
112
      new_resource = generate_one_fn()
113
      if new_resource is not None and new_resource not in all_elems:
114
        break
115
    else:
116
      raise errors.ConfigurationError("Not able generate new resource"
117
                                      " (last tried: %s)" % new_resource)
118
    self.Reserve(ec_id, new_resource)
119
    return new_resource
120

    
121

    
122
class ConfigWriter:
123
  """The interface to the cluster configuration.
124

125
  """
126
  def __init__(self, cfg_file=None, offline=False):
127
    self.write_count = 0
128
    self._lock = _config_lock
129
    self._config_data = None
130
    self._offline = offline
131
    if cfg_file is None:
132
      self._cfg_file = constants.CLUSTER_CONF_FILE
133
    else:
134
      self._cfg_file = cfg_file
135
    self._temporary_ids = set()
136
    self._temporary_drbds = {}
137
    self._temporary_macs = set()
138
    # Note: in order to prevent errors when resolving our name in
139
    # _DistributeConfig, we compute it here once and reuse it; it's
140
    # better to raise an error before starting to modify the config
141
    # file than after it was modified
142
    self._my_hostname = utils.HostInfo().name
143
    self._last_cluster_serial = -1
144
    self._OpenConfig()
145

    
146
  # this method needs to be static, so that we can call it on the class
147
  @staticmethod
148
  def IsCluster():
149
    """Check if the cluster is configured.
150

151
    """
152
    return os.path.exists(constants.CLUSTER_CONF_FILE)
153

    
154
  @locking.ssynchronized(_config_lock, shared=1)
155
  def GenerateMAC(self):
156
    """Generate a MAC for an instance.
157

158
    This should check the current instances for duplicates.
159

160
    """
161
    prefix = self._config_data.cluster.mac_prefix
162
    all_macs = self._AllMACs()
163
    retries = 64
164
    while retries > 0:
165
      byte1 = random.randrange(0, 256)
166
      byte2 = random.randrange(0, 256)
167
      byte3 = random.randrange(0, 256)
168
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
169
      if mac not in all_macs and mac not in self._temporary_macs:
170
        break
171
      retries -= 1
172
    else:
173
      raise errors.ConfigurationError("Can't generate unique MAC")
174
    self._temporary_macs.add(mac)
175
    return mac
176

    
177
  @locking.ssynchronized(_config_lock, shared=1)
178
  def IsMacInUse(self, mac):
179
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.
180

181
    This only checks instances managed by this cluster, it does not
182
    check for potential collisions elsewhere.
183

184
    """
185
    all_macs = self._AllMACs()
186
    return mac in all_macs or mac in self._temporary_macs
187

    
188
  @locking.ssynchronized(_config_lock, shared=1)
189
  def GenerateDRBDSecret(self):
190
    """Generate a DRBD secret.
191

192
    This checks the current disks for duplicates.
193

194
    """
195
    all_secrets = self._AllDRBDSecrets()
196
    retries = 64
197
    while retries > 0:
198
      secret = utils.GenerateSecret()
199
      if secret not in all_secrets:
200
        break
201
      retries -= 1
202
    else:
203
      raise errors.ConfigurationError("Can't generate unique DRBD secret")
204
    return secret
205

    
206
  def _AllLVs(self):
207
    """Compute the list of all LVs.
208

209
    """
210
    lvnames = set()
211
    for instance in self._config_data.instances.values():
212
      node_data = instance.MapLVsByNode()
213
      for lv_list in node_data.values():
214
        lvnames.update(lv_list)
215
    return lvnames
216

    
217
  def _AllIDs(self, include_temporary):
218
    """Compute the list of all UUIDs and names we have.
219

220
    @type include_temporary: boolean
221
    @param include_temporary: whether to include the _temporary_ids set
222
    @rtype: set
223
    @return: a set of IDs
224

225
    """
226
    existing = set()
227
    if include_temporary:
228
      existing.update(self._temporary_ids)
229
    existing.update(self._AllLVs())
230
    existing.update(self._config_data.instances.keys())
231
    existing.update(self._config_data.nodes.keys())
232
    existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
233
    return existing
234

    
235
  def _GenerateUniqueID(self):
236
    """Generate an unique UUID.
237

238
    This checks the current node, instances and disk names for
239
    duplicates.
240

241
    @rtype: string
242
    @return: the unique id
243

244
    """
245
    existing = self._AllIDs(include_temporary=True)
246
    retries = 64
247
    while retries > 0:
248
      unique_id = utils.NewUUID()
249
      if unique_id not in existing and unique_id is not None:
250
        break
251
    else:
252
      raise errors.ConfigurationError("Not able generate an unique ID"
253
                                      " (last tried ID: %s" % unique_id)
254
    self._temporary_ids.add(unique_id)
255
    return unique_id
256

    
257
  @locking.ssynchronized(_config_lock, shared=1)
258
  def GenerateUniqueID(self):
259
    """Generate an unique ID.
260

261
    This is just a wrapper over the unlocked version.
262

263
    """
264
    return self._GenerateUniqueID()
265

    
266
  def _CleanupTemporaryIDs(self):
267
    """Cleanups the _temporary_ids structure.
268

269
    """
270
    existing = self._AllIDs(include_temporary=False)
271
    self._temporary_ids = self._temporary_ids - existing
272

    
273
  def _AllMACs(self):
274
    """Return all MACs present in the config.
275

276
    @rtype: list
277
    @return: the list of all MACs
278

279
    """
280
    result = []
281
    for instance in self._config_data.instances.values():
282
      for nic in instance.nics:
283
        result.append(nic.mac)
284

    
285
    return result
286

    
287
  def _AllDRBDSecrets(self):
288
    """Return all DRBD secrets present in the config.
289

290
    @rtype: list
291
    @return: the list of all DRBD secrets
292

293
    """
294
    def helper(disk, result):
295
      """Recursively gather secrets from this disk."""
296
      if disk.dev_type == constants.DT_DRBD8:
297
        result.append(disk.logical_id[5])
298
      if disk.children:
299
        for child in disk.children:
300
          helper(child, result)
301

    
302
    result = []
303
    for instance in self._config_data.instances.values():
304
      for disk in instance.disks:
305
        helper(disk, result)
306

    
307
    return result
308

    
309
  def _CheckDiskIDs(self, disk, l_ids, p_ids):
310
    """Compute duplicate disk IDs
311

312
    @type disk: L{objects.Disk}
313
    @param disk: the disk at which to start searching
314
    @type l_ids: list
315
    @param l_ids: list of current logical ids
316
    @type p_ids: list
317
    @param p_ids: list of current physical ids
318
    @rtype: list
319
    @return: a list of error messages
320

321
    """
322
    result = []
323
    if disk.logical_id is not None:
324
      if disk.logical_id in l_ids:
325
        result.append("duplicate logical id %s" % str(disk.logical_id))
326
      else:
327
        l_ids.append(disk.logical_id)
328
    if disk.physical_id is not None:
329
      if disk.physical_id in p_ids:
330
        result.append("duplicate physical id %s" % str(disk.physical_id))
331
      else:
332
        p_ids.append(disk.physical_id)
333

    
334
    if disk.children:
335
      for child in disk.children:
336
        result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
337
    return result
338

    
339
  def _UnlockedVerifyConfig(self):
340
    """Verify function.
341

342
    @rtype: list
343
    @return: a list of error messages; a non-empty list signifies
344
        configuration errors
345

346
    """
347
    result = []
348
    seen_macs = []
349
    ports = {}
350
    data = self._config_data
351
    seen_lids = []
352
    seen_pids = []
353

    
354
    # global cluster checks
355
    if not data.cluster.enabled_hypervisors:
356
      result.append("enabled hypervisors list doesn't have any entries")
357
    invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
358
    if invalid_hvs:
359
      result.append("enabled hypervisors contains invalid entries: %s" %
360
                    invalid_hvs)
361

    
362
    if data.cluster.master_node not in data.nodes:
363
      result.append("cluster has invalid primary node '%s'" %
364
                    data.cluster.master_node)
365

    
366
    # per-instance checks
367
    for instance_name in data.instances:
368
      instance = data.instances[instance_name]
369
      if instance.primary_node not in data.nodes:
370
        result.append("instance '%s' has invalid primary node '%s'" %
371
                      (instance_name, instance.primary_node))
372
      for snode in instance.secondary_nodes:
373
        if snode not in data.nodes:
374
          result.append("instance '%s' has invalid secondary node '%s'" %
375
                        (instance_name, snode))
376
      for idx, nic in enumerate(instance.nics):
377
        if nic.mac in seen_macs:
378
          result.append("instance '%s' has NIC %d mac %s duplicate" %
379
                        (instance_name, idx, nic.mac))
380
        else:
381
          seen_macs.append(nic.mac)
382

    
383
      # gather the drbd ports for duplicate checks
384
      for dsk in instance.disks:
385
        if dsk.dev_type in constants.LDS_DRBD:
386
          tcp_port = dsk.logical_id[2]
387
          if tcp_port not in ports:
388
            ports[tcp_port] = []
389
          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
390
      # gather network port reservation
391
      net_port = getattr(instance, "network_port", None)
392
      if net_port is not None:
393
        if net_port not in ports:
394
          ports[net_port] = []
395
        ports[net_port].append((instance.name, "network port"))
396

    
397
      # instance disk verify
398
      for idx, disk in enumerate(instance.disks):
399
        result.extend(["instance '%s' disk %d error: %s" %
400
                       (instance.name, idx, msg) for msg in disk.Verify()])
401
        result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
402

    
403
    # cluster-wide pool of free ports
404
    for free_port in data.cluster.tcpudp_port_pool:
405
      if free_port not in ports:
406
        ports[free_port] = []
407
      ports[free_port].append(("cluster", "port marked as free"))
408

    
409
    # compute tcp/udp duplicate ports
410
    keys = ports.keys()
411
    keys.sort()
412
    for pnum in keys:
413
      pdata = ports[pnum]
414
      if len(pdata) > 1:
415
        txt = ", ".join(["%s/%s" % val for val in pdata])
416
        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
417

    
418
    # highest used tcp port check
419
    if keys:
420
      if keys[-1] > data.cluster.highest_used_port:
421
        result.append("Highest used port mismatch, saved %s, computed %s" %
422
                      (data.cluster.highest_used_port, keys[-1]))
423

    
424
    if not data.nodes[data.cluster.master_node].master_candidate:
425
      result.append("Master node is not a master candidate")
426

    
427
    # master candidate checks
428
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
429
    if mc_now < mc_max:
430
      result.append("Not enough master candidates: actual %d, target %d" %
431
                    (mc_now, mc_max))
432

    
433
    # node checks
434
    for node in data.nodes.values():
435
      if [node.master_candidate, node.drained, node.offline].count(True) > 1:
436
        result.append("Node %s state is invalid: master_candidate=%s,"
437
                      " drain=%s, offline=%s" %
438
                      (node.name, node.master_candidate, node.drain,
439
                       node.offline))
440

    
441
    # drbd minors check
442
    d_map, duplicates = self._UnlockedComputeDRBDMap()
443
    for node, minor, instance_a, instance_b in duplicates:
444
      result.append("DRBD minor %d on node %s is assigned twice to instances"
445
                    " %s and %s" % (minor, node, instance_a, instance_b))
446

    
447
    # IP checks
448
    ips = { data.cluster.master_ip: ["cluster_ip"] }
449
    def _helper(ip, name):
450
      if ip in ips:
451
        ips[ip].append(name)
452
      else:
453
        ips[ip] = [name]
454

    
455
    for node in data.nodes.values():
456
      _helper(node.primary_ip, "node:%s/primary" % node.name)
457
      if node.secondary_ip != node.primary_ip:
458
        _helper(node.secondary_ip, "node:%s/secondary" % node.name)
459

    
460
    for ip, owners in ips.items():
461
      if len(owners) > 1:
462
        result.append("IP address %s is used by multiple owners: %s" %
463
                      (ip, ", ".join(owners)))
464
    return result
465

    
466
  @locking.ssynchronized(_config_lock, shared=1)
467
  def VerifyConfig(self):
468
    """Verify function.
469

470
    This is just a wrapper over L{_UnlockedVerifyConfig}.
471

472
    @rtype: list
473
    @return: a list of error messages; a non-empty list signifies
474
        configuration errors
475

476
    """
477
    return self._UnlockedVerifyConfig()
478

    
479
  def _UnlockedSetDiskID(self, disk, node_name):
480
    """Convert the unique ID to the ID needed on the target nodes.
481

482
    This is used only for drbd, which needs ip/port configuration.
483

484
    The routine descends down and updates its children also, because
485
    this helps when the only the top device is passed to the remote
486
    node.
487

488
    This function is for internal use, when the config lock is already held.
489

490
    """
491
    if disk.children:
492
      for child in disk.children:
493
        self._UnlockedSetDiskID(child, node_name)
494

    
495
    if disk.logical_id is None and disk.physical_id is not None:
496
      return
497
    if disk.dev_type == constants.LD_DRBD8:
498
      pnode, snode, port, pminor, sminor, secret = disk.logical_id
499
      if node_name not in (pnode, snode):
500
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
501
                                        node_name)
502
      pnode_info = self._UnlockedGetNodeInfo(pnode)
503
      snode_info = self._UnlockedGetNodeInfo(snode)
504
      if pnode_info is None or snode_info is None:
505
        raise errors.ConfigurationError("Can't find primary or secondary node"
506
                                        " for %s" % str(disk))
507
      p_data = (pnode_info.secondary_ip, port)
508
      s_data = (snode_info.secondary_ip, port)
509
      if pnode == node_name:
510
        disk.physical_id = p_data + s_data + (pminor, secret)
511
      else: # it must be secondary, we tested above
512
        disk.physical_id = s_data + p_data + (sminor, secret)
513
    else:
514
      disk.physical_id = disk.logical_id
515
    return
516

    
517
  @locking.ssynchronized(_config_lock)
518
  def SetDiskID(self, disk, node_name):
519
    """Convert the unique ID to the ID needed on the target nodes.
520

521
    This is used only for drbd, which needs ip/port configuration.
522

523
    The routine descends down and updates its children also, because
524
    this helps when the only the top device is passed to the remote
525
    node.
526

527
    """
528
    return self._UnlockedSetDiskID(disk, node_name)
529

    
530
  @locking.ssynchronized(_config_lock)
531
  def AddTcpUdpPort(self, port):
532
    """Adds a new port to the available port pool.
533

534
    """
535
    if not isinstance(port, int):
536
      raise errors.ProgrammerError("Invalid type passed for port")
537

    
538
    self._config_data.cluster.tcpudp_port_pool.add(port)
539
    self._WriteConfig()
540

    
541
  @locking.ssynchronized(_config_lock, shared=1)
542
  def GetPortList(self):
543
    """Returns a copy of the current port list.
544

545
    """
546
    return self._config_data.cluster.tcpudp_port_pool.copy()
547

    
548
  @locking.ssynchronized(_config_lock)
549
  def AllocatePort(self):
550
    """Allocate a port.
551

552
    The port will be taken from the available port pool or from the
553
    default port range (and in this case we increase
554
    highest_used_port).
555

556
    """
557
    # If there are TCP/IP ports configured, we use them first.
558
    if self._config_data.cluster.tcpudp_port_pool:
559
      port = self._config_data.cluster.tcpudp_port_pool.pop()
560
    else:
561
      port = self._config_data.cluster.highest_used_port + 1
562
      if port >= constants.LAST_DRBD_PORT:
563
        raise errors.ConfigurationError("The highest used port is greater"
564
                                        " than %s. Aborting." %
565
                                        constants.LAST_DRBD_PORT)
566
      self._config_data.cluster.highest_used_port = port
567

    
568
    self._WriteConfig()
569
    return port
570

    
571
  def _UnlockedComputeDRBDMap(self):
572
    """Compute the used DRBD minor/nodes.
573

574
    @rtype: (dict, list)
575
    @return: dictionary of node_name: dict of minor: instance_name;
576
        the returned dict will have all the nodes in it (even if with
577
        an empty list), and a list of duplicates; if the duplicates
578
        list is not empty, the configuration is corrupted and its caller
579
        should raise an exception
580

581
    """
582
    def _AppendUsedPorts(instance_name, disk, used):
583
      duplicates = []
584
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
585
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
586
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
587
          assert node in used, ("Node '%s' of instance '%s' not found"
588
                                " in node list" % (node, instance_name))
589
          if port in used[node]:
590
            duplicates.append((node, port, instance_name, used[node][port]))
591
          else:
592
            used[node][port] = instance_name
593
      if disk.children:
594
        for child in disk.children:
595
          duplicates.extend(_AppendUsedPorts(instance_name, child, used))
596
      return duplicates
597

    
598
    duplicates = []
599
    my_dict = dict((node, {}) for node in self._config_data.nodes)
600
    for instance in self._config_data.instances.itervalues():
601
      for disk in instance.disks:
602
        duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
603
    for (node, minor), instance in self._temporary_drbds.iteritems():
604
      if minor in my_dict[node] and my_dict[node][minor] != instance:
605
        duplicates.append((node, minor, instance, my_dict[node][minor]))
606
      else:
607
        my_dict[node][minor] = instance
608
    return my_dict, duplicates
609

    
610
  @locking.ssynchronized(_config_lock)
611
  def ComputeDRBDMap(self):
612
    """Compute the used DRBD minor/nodes.
613

614
    This is just a wrapper over L{_UnlockedComputeDRBDMap}.
615

616
    @return: dictionary of node_name: dict of minor: instance_name;
617
        the returned dict will have all the nodes in it (even if with
618
        an empty list).
619

620
    """
621
    d_map, duplicates = self._UnlockedComputeDRBDMap()
622
    if duplicates:
623
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
624
                                      str(duplicates))
625
    return d_map
626

    
627
  @locking.ssynchronized(_config_lock)
628
  def AllocateDRBDMinor(self, nodes, instance):
629
    """Allocate a drbd minor.
630

631
    The free minor will be automatically computed from the existing
632
    devices. A node can be given multiple times in order to allocate
633
    multiple minors. The result is the list of minors, in the same
634
    order as the passed nodes.
635

636
    @type instance: string
637
    @param instance: the instance for which we allocate minors
638

639
    """
640
    assert isinstance(instance, basestring), \
641
           "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
642

    
643
    d_map, duplicates = self._UnlockedComputeDRBDMap()
644
    if duplicates:
645
      raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
646
                                      str(duplicates))
647
    result = []
648
    for nname in nodes:
649
      ndata = d_map[nname]
650
      if not ndata:
651
        # no minors used, we can start at 0
652
        result.append(0)
653
        ndata[0] = instance
654
        self._temporary_drbds[(nname, 0)] = instance
655
        continue
656
      keys = ndata.keys()
657
      keys.sort()
658
      ffree = utils.FirstFree(keys)
659
      if ffree is None:
660
        # return the next minor
661
        # TODO: implement high-limit check
662
        minor = keys[-1] + 1
663
      else:
664
        minor = ffree
665
      # double-check minor against current instances
666
      assert minor not in d_map[nname], \
667
             ("Attempt to reuse allocated DRBD minor %d on node %s,"
668
              " already allocated to instance %s" %
669
              (minor, nname, d_map[nname][minor]))
670
      ndata[minor] = instance
671
      # double-check minor against reservation
672
      r_key = (nname, minor)
673
      assert r_key not in self._temporary_drbds, \
674
             ("Attempt to reuse reserved DRBD minor %d on node %s,"
675
              " reserved for instance %s" %
676
              (minor, nname, self._temporary_drbds[r_key]))
677
      self._temporary_drbds[r_key] = instance
678
      result.append(minor)
679
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
680
                  nodes, result)
681
    return result
682

    
683
  def _UnlockedReleaseDRBDMinors(self, instance):
684
    """Release temporary drbd minors allocated for a given instance.
685

686
    @type instance: string
687
    @param instance: the instance for which temporary minors should be
688
                     released
689

690
    """
691
    assert isinstance(instance, basestring), \
692
           "Invalid argument passed to ReleaseDRBDMinors"
693
    for key, name in self._temporary_drbds.items():
694
      if name == instance:
695
        del self._temporary_drbds[key]
696

    
697
  @locking.ssynchronized(_config_lock)
698
  def ReleaseDRBDMinors(self, instance):
699
    """Release temporary drbd minors allocated for a given instance.
700

701
    This should be called on the error paths, on the success paths
702
    it's automatically called by the ConfigWriter add and update
703
    functions.
704

705
    This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
706

707
    @type instance: string
708
    @param instance: the instance for which temporary minors should be
709
                     released
710

711
    """
712
    self._UnlockedReleaseDRBDMinors(instance)
713

    
714
  @locking.ssynchronized(_config_lock, shared=1)
715
  def GetConfigVersion(self):
716
    """Get the configuration version.
717

718
    @return: Config version
719

720
    """
721
    return self._config_data.version
722

    
723
  @locking.ssynchronized(_config_lock, shared=1)
724
  def GetClusterName(self):
725
    """Get cluster name.
726

727
    @return: Cluster name
728

729
    """
730
    return self._config_data.cluster.cluster_name
731

    
732
  @locking.ssynchronized(_config_lock, shared=1)
733
  def GetMasterNode(self):
734
    """Get the hostname of the master node for this cluster.
735

736
    @return: Master hostname
737

738
    """
739
    return self._config_data.cluster.master_node
740

    
741
  @locking.ssynchronized(_config_lock, shared=1)
742
  def GetMasterIP(self):
743
    """Get the IP of the master node for this cluster.
744

745
    @return: Master IP
746

747
    """
748
    return self._config_data.cluster.master_ip
749

    
750
  @locking.ssynchronized(_config_lock, shared=1)
751
  def GetMasterNetdev(self):
752
    """Get the master network device for this cluster.
753

754
    """
755
    return self._config_data.cluster.master_netdev
756

    
757
  @locking.ssynchronized(_config_lock, shared=1)
758
  def GetFileStorageDir(self):
759
    """Get the file storage dir for this cluster.
760

761
    """
762
    return self._config_data.cluster.file_storage_dir
763

    
764
  @locking.ssynchronized(_config_lock, shared=1)
765
  def GetHypervisorType(self):
766
    """Get the hypervisor type for this cluster.
767

768
    """
769
    return self._config_data.cluster.enabled_hypervisors[0]
770

    
771
  @locking.ssynchronized(_config_lock, shared=1)
772
  def GetHostKey(self):
773
    """Return the rsa hostkey from the config.
774

775
    @rtype: string
776
    @return: the rsa hostkey
777

778
    """
779
    return self._config_data.cluster.rsahostkeypub
780

    
781
  @locking.ssynchronized(_config_lock)
782
  def AddInstance(self, instance, ec_id):
783
    """Add an instance to the config.
784

785
    This should be used after creating a new instance.
786

787
    @type instance: L{objects.Instance}
788
    @param instance: the instance object
789

790
    """
791
    if not isinstance(instance, objects.Instance):
792
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
793

    
794
    if instance.disk_template != constants.DT_DISKLESS:
795
      all_lvs = instance.MapLVsByNode()
796
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
797

    
798
    all_macs = self._AllMACs()
799
    for nic in instance.nics:
800
      if nic.mac in all_macs:
801
        raise errors.ConfigurationError("Cannot add instance %s:"
802
                                        " MAC address '%s' already in use." %
803
                                        (instance.name, nic.mac))
804

    
805
    self._EnsureUUID(instance, ec_id)
806

    
807
    instance.serial_no = 1
808
    instance.ctime = instance.mtime = time.time()
809
    self._config_data.instances[instance.name] = instance
810
    self._config_data.cluster.serial_no += 1
811
    self._UnlockedReleaseDRBDMinors(instance.name)
812
    for nic in instance.nics:
813
      self._temporary_macs.discard(nic.mac)
814
    self._WriteConfig()
815

    
816
  def _EnsureUUID(self, item, ec_id):
817
    """Ensures a given object has a valid UUID.
818

819
    @param item: the instance or node to be checked
820
    @param ec_id: the execution context id for the uuid reservation
821

822
    """
823
    if not item.uuid:
824
      item.uuid = self._GenerateUniqueID()
825
    elif item.uuid in self._AllIDs(temporary=True):
826
      raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
827
                                      (item.name, item.uuid))
828

    
829
  def _SetInstanceStatus(self, instance_name, status):
830
    """Set the instance's status to a given value.
831

832
    """
833
    assert isinstance(status, bool), \
834
           "Invalid status '%s' passed to SetInstanceStatus" % (status,)
835

    
836
    if instance_name not in self._config_data.instances:
837
      raise errors.ConfigurationError("Unknown instance '%s'" %
838
                                      instance_name)
839
    instance = self._config_data.instances[instance_name]
840
    if instance.admin_up != status:
841
      instance.admin_up = status
842
      instance.serial_no += 1
843
      instance.mtime = time.time()
844
      self._WriteConfig()
845

    
846
  @locking.ssynchronized(_config_lock)
847
  def MarkInstanceUp(self, instance_name):
848
    """Mark the instance status to up in the config.
849

850
    """
851
    self._SetInstanceStatus(instance_name, True)
852

    
853
  @locking.ssynchronized(_config_lock)
854
  def RemoveInstance(self, instance_name):
855
    """Remove the instance from the configuration.
856

857
    """
858
    if instance_name not in self._config_data.instances:
859
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
860
    del self._config_data.instances[instance_name]
861
    self._config_data.cluster.serial_no += 1
862
    self._WriteConfig()
863

    
864
  @locking.ssynchronized(_config_lock)
865
  def RenameInstance(self, old_name, new_name):
866
    """Rename an instance.
867

868
    This needs to be done in ConfigWriter and not by RemoveInstance
869
    combined with AddInstance as only we can guarantee an atomic
870
    rename.
871

872
    """
873
    if old_name not in self._config_data.instances:
874
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
875
    inst = self._config_data.instances[old_name]
876
    del self._config_data.instances[old_name]
877
    inst.name = new_name
878

    
879
    for disk in inst.disks:
880
      if disk.dev_type == constants.LD_FILE:
881
        # rename the file paths in logical and physical id
882
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
883
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
884
                                              os.path.join(file_storage_dir,
885
                                                           inst.name,
886
                                                           disk.iv_name))
887

    
888
    self._config_data.instances[inst.name] = inst
889
    self._WriteConfig()
890

    
891
  @locking.ssynchronized(_config_lock)
892
  def MarkInstanceDown(self, instance_name):
893
    """Mark the status of an instance to down in the configuration.
894

895
    """
896
    self._SetInstanceStatus(instance_name, False)
897

    
898
  def _UnlockedGetInstanceList(self):
899
    """Get the list of instances.
900

901
    This function is for internal use, when the config lock is already held.
902

903
    """
904
    return self._config_data.instances.keys()
905

    
906
  @locking.ssynchronized(_config_lock, shared=1)
907
  def GetInstanceList(self):
908
    """Get the list of instances.
909

910
    @return: array of instances, ex. ['instance2.example.com',
911
        'instance1.example.com']
912

913
    """
914
    return self._UnlockedGetInstanceList()
915

    
916
  @locking.ssynchronized(_config_lock, shared=1)
917
  def ExpandInstanceName(self, short_name):
918
    """Attempt to expand an incomplete instance name.
919

920
    """
921
    return utils.MatchNameComponent(short_name,
922
                                    self._config_data.instances.keys(),
923
                                    case_sensitive=False)
924

    
925
  def _UnlockedGetInstanceInfo(self, instance_name):
926
    """Returns information about an instance.
927

928
    This function is for internal use, when the config lock is already held.
929

930
    """
931
    if instance_name not in self._config_data.instances:
932
      return None
933

    
934
    return self._config_data.instances[instance_name]
935

    
936
  @locking.ssynchronized(_config_lock, shared=1)
937
  def GetInstanceInfo(self, instance_name):
938
    """Returns information about an instance.
939

940
    It takes the information from the configuration file. Other information of
941
    an instance are taken from the live systems.
942

943
    @param instance_name: name of the instance, e.g.
944
        I{instance1.example.com}
945

946
    @rtype: L{objects.Instance}
947
    @return: the instance object
948

949
    """
950
    return self._UnlockedGetInstanceInfo(instance_name)
951

    
952
  @locking.ssynchronized(_config_lock, shared=1)
953
  def GetAllInstancesInfo(self):
954
    """Get the configuration of all instances.
955

956
    @rtype: dict
957
    @return: dict of (instance, instance_info), where instance_info is what
958
              would GetInstanceInfo return for the node
959

960
    """
961
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
962
                    for instance in self._UnlockedGetInstanceList()])
963
    return my_dict
964

    
965
  @locking.ssynchronized(_config_lock)
966
  def AddNode(self, node, ec_id):
967
    """Add a node to the configuration.
968

969
    @type node: L{objects.Node}
970
    @param node: a Node instance
971

972
    """
973
    logging.info("Adding node %s to configuration", node.name)
974

    
975
    self._EnsureUUID(node, ec_id)
976

    
977
    node.serial_no = 1
978
    node.ctime = node.mtime = time.time()
979
    self._config_data.nodes[node.name] = node
980
    self._config_data.cluster.serial_no += 1
981
    self._WriteConfig()
982

    
983
  @locking.ssynchronized(_config_lock)
984
  def RemoveNode(self, node_name):
985
    """Remove a node from the configuration.
986

987
    """
988
    logging.info("Removing node %s from configuration", node_name)
989

    
990
    if node_name not in self._config_data.nodes:
991
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
992

    
993
    del self._config_data.nodes[node_name]
994
    self._config_data.cluster.serial_no += 1
995
    self._WriteConfig()
996

    
997
  @locking.ssynchronized(_config_lock, shared=1)
998
  def ExpandNodeName(self, short_name):
999
    """Attempt to expand an incomplete instance name.
1000

1001
    """
1002
    return utils.MatchNameComponent(short_name,
1003
                                    self._config_data.nodes.keys(),
1004
                                    case_sensitive=False)
1005

    
1006
  def _UnlockedGetNodeInfo(self, node_name):
1007
    """Get the configuration of a node, as stored in the config.
1008

1009
    This function is for internal use, when the config lock is already
1010
    held.
1011

1012
    @param node_name: the node name, e.g. I{node1.example.com}
1013

1014
    @rtype: L{objects.Node}
1015
    @return: the node object
1016

1017
    """
1018
    if node_name not in self._config_data.nodes:
1019
      return None
1020

    
1021
    return self._config_data.nodes[node_name]
1022

    
1023

    
1024
  @locking.ssynchronized(_config_lock, shared=1)
1025
  def GetNodeInfo(self, node_name):
1026
    """Get the configuration of a node, as stored in the config.
1027

1028
    This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1029

1030
    @param node_name: the node name, e.g. I{node1.example.com}
1031

1032
    @rtype: L{objects.Node}
1033
    @return: the node object
1034

1035
    """
1036
    return self._UnlockedGetNodeInfo(node_name)
1037

    
1038
  def _UnlockedGetNodeList(self):
1039
    """Return the list of nodes which are in the configuration.
1040

1041
    This function is for internal use, when the config lock is already
1042
    held.
1043

1044
    @rtype: list
1045

1046
    """
1047
    return self._config_data.nodes.keys()
1048

    
1049

    
1050
  @locking.ssynchronized(_config_lock, shared=1)
1051
  def GetNodeList(self):
1052
    """Return the list of nodes which are in the configuration.
1053

1054
    """
1055
    return self._UnlockedGetNodeList()
1056

    
1057
  @locking.ssynchronized(_config_lock, shared=1)
1058
  def GetOnlineNodeList(self):
1059
    """Return the list of nodes which are online.
1060

1061
    """
1062
    all_nodes = [self._UnlockedGetNodeInfo(node)
1063
                 for node in self._UnlockedGetNodeList()]
1064
    return [node.name for node in all_nodes if not node.offline]
1065

    
1066
  @locking.ssynchronized(_config_lock, shared=1)
1067
  def GetAllNodesInfo(self):
1068
    """Get the configuration of all nodes.
1069

1070
    @rtype: dict
1071
    @return: dict of (node, node_info), where node_info is what
1072
              would GetNodeInfo return for the node
1073

1074
    """
1075
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
1076
                    for node in self._UnlockedGetNodeList()])
1077
    return my_dict
1078

    
1079
  def _UnlockedGetMasterCandidateStats(self, exceptions=None):
1080
    """Get the number of current and maximum desired and possible candidates.
1081

1082
    @type exceptions: list
1083
    @param exceptions: if passed, list of nodes that should be ignored
1084
    @rtype: tuple
1085
    @return: tuple of (current, desired and possible, possible)
1086

1087
    """
1088
    mc_now = mc_should = mc_max = 0
1089
    for node in self._config_data.nodes.values():
1090
      if exceptions and node.name in exceptions:
1091
        continue
1092
      if not (node.offline or node.drained):
1093
        mc_max += 1
1094
      if node.master_candidate:
1095
        mc_now += 1
1096
    mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
1097
    return (mc_now, mc_should, mc_max)
1098

    
1099
  @locking.ssynchronized(_config_lock, shared=1)
1100
  def GetMasterCandidateStats(self, exceptions=None):
1101
    """Get the number of current and maximum possible candidates.
1102

1103
    This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
1104

1105
    @type exceptions: list
1106
    @param exceptions: if passed, list of nodes that should be ignored
1107
    @rtype: tuple
1108
    @return: tuple of (current, max)
1109

1110
    """
1111
    return self._UnlockedGetMasterCandidateStats(exceptions)
1112

    
1113
  @locking.ssynchronized(_config_lock)
1114
  def MaintainCandidatePool(self, exceptions):
1115
    """Try to grow the candidate pool to the desired size.
1116

1117
    @type exceptions: list
1118
    @param exceptions: if passed, list of nodes that should be ignored
1119
    @rtype: list
1120
    @return: list with the adjusted nodes (L{objects.Node} instances)
1121

1122
    """
1123
    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
1124
    mod_list = []
1125
    if mc_now < mc_max:
1126
      node_list = self._config_data.nodes.keys()
1127
      random.shuffle(node_list)
1128
      for name in node_list:
1129
        if mc_now >= mc_max:
1130
          break
1131
        node = self._config_data.nodes[name]
1132
        if (node.master_candidate or node.offline or node.drained or
1133
            node.name in exceptions):
1134
          continue
1135
        mod_list.append(node)
1136
        node.master_candidate = True
1137
        node.serial_no += 1
1138
        mc_now += 1
1139
      if mc_now != mc_max:
1140
        # this should not happen
1141
        logging.warning("Warning: MaintainCandidatePool didn't manage to"
1142
                        " fill the candidate pool (%d/%d)", mc_now, mc_max)
1143
      if mod_list:
1144
        self._config_data.cluster.serial_no += 1
1145
        self._WriteConfig()
1146

    
1147
    return mod_list
1148

    
1149
  def _BumpSerialNo(self):
1150
    """Bump up the serial number of the config.
1151

1152
    """
1153
    self._config_data.serial_no += 1
1154
    self._config_data.mtime = time.time()
1155

    
1156
  def _AllUUIDObjects(self):
1157
    """Returns all objects with uuid attributes.
1158

1159
    """
1160
    return (self._config_data.instances.values() +
1161
            self._config_data.nodes.values() +
1162
            [self._config_data.cluster])
1163

    
1164
  def _OpenConfig(self):
1165
    """Read the config data from disk.
1166

1167
    """
1168
    raw_data = utils.ReadFile(self._cfg_file)
1169

    
1170
    try:
1171
      data = objects.ConfigData.FromDict(serializer.Load(raw_data))
1172
    except Exception, err:
1173
      raise errors.ConfigurationError(err)
1174

    
1175
    # Make sure the configuration has the right version
1176
    _ValidateConfig(data)
1177

    
1178
    if (not hasattr(data, 'cluster') or
1179
        not hasattr(data.cluster, 'rsahostkeypub')):
1180
      raise errors.ConfigurationError("Incomplete configuration"
1181
                                      " (missing cluster.rsahostkeypub)")
1182

    
1183
    # Upgrade configuration if needed
1184
    data.UpgradeConfig()
1185

    
1186
    self._config_data = data
1187
    # reset the last serial as -1 so that the next write will cause
1188
    # ssconf update
1189
    self._last_cluster_serial = -1
1190

    
1191
    # And finally run our (custom) config upgrade sequence
1192
    self._UpgradeConfig()
1193

    
1194
  def _UpgradeConfig(self):
1195
    """Run upgrade steps that cannot be done purely in the objects.
1196

1197
    This is because some data elements need uniqueness across the
1198
    whole configuration, etc.
1199

1200
    @warning: this function will call L{_WriteConfig()}, so it needs
1201
        to either be called with the lock held or from a safe place
1202
        (the constructor)
1203

1204
    """
1205
    modified = False
1206
    for item in self._AllUUIDObjects():
1207
      if item.uuid is None:
1208
        item.uuid = self._GenerateUniqueID()
1209
        modified = True
1210
    if modified:
1211
      self._WriteConfig()
1212

    
1213
  def _DistributeConfig(self, feedback_fn):
1214
    """Distribute the configuration to the other nodes.
1215

1216
    Currently, this only copies the configuration file. In the future,
1217
    it could be used to encapsulate the 2/3-phase update mechanism.
1218

1219
    """
1220
    if self._offline:
1221
      return True
1222

    
1223
    bad = False
1224

    
1225
    node_list = []
1226
    addr_list = []
1227
    myhostname = self._my_hostname
1228
    # we can skip checking whether _UnlockedGetNodeInfo returns None
1229
    # since the node list comes from _UnlocketGetNodeList, and we are
1230
    # called with the lock held, so no modifications should take place
1231
    # in between
1232
    for node_name in self._UnlockedGetNodeList():
1233
      if node_name == myhostname:
1234
        continue
1235
      node_info = self._UnlockedGetNodeInfo(node_name)
1236
      if not node_info.master_candidate:
1237
        continue
1238
      node_list.append(node_info.name)
1239
      addr_list.append(node_info.primary_ip)
1240

    
1241
    result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
1242
                                            address_list=addr_list)
1243
    for to_node, to_result in result.items():
1244
      msg = to_result.fail_msg
1245
      if msg:
1246
        msg = ("Copy of file %s to node %s failed: %s" %
1247
               (self._cfg_file, to_node, msg))
1248
        logging.error(msg)
1249

    
1250
        if feedback_fn:
1251
          feedback_fn(msg)
1252

    
1253
        bad = True
1254

    
1255
    return not bad
1256

    
1257
  def _WriteConfig(self, destination=None, feedback_fn=None):
1258
    """Write the configuration data to persistent storage.
1259

1260
    """
1261
    assert feedback_fn is None or callable(feedback_fn)
1262

    
1263
    # First, cleanup the _temporary_ids set, if an ID is now in the
1264
    # other objects it should be discarded to prevent unbounded growth
1265
    # of that structure
1266
    self._CleanupTemporaryIDs()
1267

    
1268
    # Warn on config errors, but don't abort the save - the
1269
    # configuration has already been modified, and we can't revert;
1270
    # the best we can do is to warn the user and save as is, leaving
1271
    # recovery to the user
1272
    config_errors = self._UnlockedVerifyConfig()
1273
    if config_errors:
1274
      errmsg = ("Configuration data is not consistent: %s" %
1275
                (", ".join(config_errors)))
1276
      logging.critical(errmsg)
1277
      if feedback_fn:
1278
        feedback_fn(errmsg)
1279

    
1280
    if destination is None:
1281
      destination = self._cfg_file
1282
    self._BumpSerialNo()
1283
    txt = serializer.Dump(self._config_data.ToDict())
1284

    
1285
    utils.WriteFile(destination, data=txt)
1286

    
1287
    self.write_count += 1
1288

    
1289
    # and redistribute the config file to master candidates
1290
    self._DistributeConfig(feedback_fn)
1291

    
1292
    # Write ssconf files on all nodes (including locally)
1293
    if self._last_cluster_serial < self._config_data.cluster.serial_no:
1294
      if not self._offline:
1295
        result = rpc.RpcRunner.call_write_ssconf_files(
1296
          self._UnlockedGetNodeList(),
1297
          self._UnlockedGetSsconfValues())
1298

    
1299
        for nname, nresu in result.items():
1300
          msg = nresu.fail_msg
1301
          if msg:
1302
            errmsg = ("Error while uploading ssconf files to"
1303
                      " node %s: %s" % (nname, msg))
1304
            logging.warning(errmsg)
1305

    
1306
            if feedback_fn:
1307
              feedback_fn(errmsg)
1308

    
1309
      self._last_cluster_serial = self._config_data.cluster.serial_no
1310

    
1311
  def _UnlockedGetSsconfValues(self):
1312
    """Return the values needed by ssconf.
1313

1314
    @rtype: dict
1315
    @return: a dictionary with keys the ssconf names and values their
1316
        associated value
1317

1318
    """
1319
    fn = "\n".join
1320
    instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
1321
    node_names = utils.NiceSort(self._UnlockedGetNodeList())
1322
    node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
1323
    node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
1324
                    for ninfo in node_info]
1325
    node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
1326
                    for ninfo in node_info]
1327

    
1328
    instance_data = fn(instance_names)
1329
    off_data = fn(node.name for node in node_info if node.offline)
1330
    on_data = fn(node.name for node in node_info if not node.offline)
1331
    mc_data = fn(node.name for node in node_info if node.master_candidate)
1332
    mc_ips_data = fn(node.primary_ip for node in node_info
1333
                     if node.master_candidate)
1334
    node_data = fn(node_names)
1335
    node_pri_ips_data = fn(node_pri_ips)
1336
    node_snd_ips_data = fn(node_snd_ips)
1337

    
1338
    cluster = self._config_data.cluster
1339
    cluster_tags = fn(cluster.GetTags())
1340
    return {
1341
      constants.SS_CLUSTER_NAME: cluster.cluster_name,
1342
      constants.SS_CLUSTER_TAGS: cluster_tags,
1343
      constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
1344
      constants.SS_MASTER_CANDIDATES: mc_data,
1345
      constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
1346
      constants.SS_MASTER_IP: cluster.master_ip,
1347
      constants.SS_MASTER_NETDEV: cluster.master_netdev,
1348
      constants.SS_MASTER_NODE: cluster.master_node,
1349
      constants.SS_NODE_LIST: node_data,
1350
      constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
1351
      constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
1352
      constants.SS_OFFLINE_NODES: off_data,
1353
      constants.SS_ONLINE_NODES: on_data,
1354
      constants.SS_INSTANCE_LIST: instance_data,
1355
      constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
1356
      }
1357

    
1358
  @locking.ssynchronized(_config_lock, shared=1)
1359
  def GetVGName(self):
1360
    """Return the volume group name.
1361

1362
    """
1363
    return self._config_data.cluster.volume_group_name
1364

    
1365
  @locking.ssynchronized(_config_lock)
1366
  def SetVGName(self, vg_name):
1367
    """Set the volume group name.
1368

1369
    """
1370
    self._config_data.cluster.volume_group_name = vg_name
1371
    self._config_data.cluster.serial_no += 1
1372
    self._WriteConfig()
1373

    
1374
  @locking.ssynchronized(_config_lock, shared=1)
1375
  def GetMACPrefix(self):
1376
    """Return the mac prefix.
1377

1378
    """
1379
    return self._config_data.cluster.mac_prefix
1380

    
1381
  @locking.ssynchronized(_config_lock, shared=1)
1382
  def GetClusterInfo(self):
1383
    """Returns information about the cluster
1384

1385
    @rtype: L{objects.Cluster}
1386
    @return: the cluster object
1387

1388
    """
1389
    return self._config_data.cluster
1390

    
1391
  @locking.ssynchronized(_config_lock)
1392
  def Update(self, target, feedback_fn):
1393
    """Notify function to be called after updates.
1394

1395
    This function must be called when an object (as returned by
1396
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
1397
    caller wants the modifications saved to the backing store. Note
1398
    that all modified objects will be saved, but the target argument
1399
    is the one the caller wants to ensure that it's saved.
1400

1401
    @param target: an instance of either L{objects.Cluster},
1402
        L{objects.Node} or L{objects.Instance} which is existing in
1403
        the cluster
1404
    @param feedback_fn: Callable feedback function
1405

1406
    """
1407
    if self._config_data is None:
1408
      raise errors.ProgrammerError("Configuration file not read,"
1409
                                   " cannot save.")
1410
    update_serial = False
1411
    if isinstance(target, objects.Cluster):
1412
      test = target == self._config_data.cluster
1413
    elif isinstance(target, objects.Node):
1414
      test = target in self._config_data.nodes.values()
1415
      update_serial = True
1416
    elif isinstance(target, objects.Instance):
1417
      test = target in self._config_data.instances.values()
1418
    else:
1419
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
1420
                                   " ConfigWriter.Update" % type(target))
1421
    if not test:
1422
      raise errors.ConfigurationError("Configuration updated since object"
1423
                                      " has been read or unknown object")
1424
    target.serial_no += 1
1425
    target.mtime = now = time.time()
1426

    
1427
    if update_serial:
1428
      # for node updates, we need to increase the cluster serial too
1429
      self._config_data.cluster.serial_no += 1
1430
      self._config_data.cluster.mtime = now
1431

    
1432
    if isinstance(target, objects.Instance):
1433
      self._UnlockedReleaseDRBDMinors(target.name)
1434
      for nic in target.nics:
1435
        self._temporary_macs.discard(nic.mac)
1436

    
1437
    self._WriteConfig(feedback_fn=feedback_fn)
1438

    
1439
  @locking.ssynchronized(_config_lock)
1440
  def DropECReservations(self, ec_id):
1441
    """Drop per-execution-context reservations
1442

1443
    """
1444
    pass
1445