Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ b9f72b4e

History | View | Annotate | Download (28 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
import os
35
import tempfile
36
import random
37
import logging
38

    
39
from ganeti import errors
40
from ganeti import locking
41
from ganeti import utils
42
from ganeti import constants
43
from ganeti import rpc
44
from ganeti import objects
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
_config_lock = locking.SharedLock()
50

    
51

    
52
def ValidateConfig():
53
  sstore = ssconf.SimpleStore()
54

    
55
  if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
56
    raise errors.ConfigurationError("Cluster configuration version"
57
                                    " mismatch, got %s instead of %s" %
58
                                    (sstore.GetConfigVersion(),
59
                                     constants.CONFIG_VERSION))
60

    
61

    
62
class ConfigWriter:
63
  """The interface to the cluster configuration.
64

65
  """
66
  def __init__(self, cfg_file=None, offline=False):
67
    self.write_count = 0
68
    self._lock = _config_lock
69
    self._config_data = None
70
    self._config_time = None
71
    self._config_size = None
72
    self._config_inode = None
73
    self._offline = offline
74
    if cfg_file is None:
75
      self._cfg_file = constants.CLUSTER_CONF_FILE
76
    else:
77
      self._cfg_file = cfg_file
78
    self._temporary_ids = set()
79
    self._temporary_drbds = {}
80
    # Note: in order to prevent errors when resolving our name in
81
    # _DistributeConfig, we compute it here once and reuse it; it's
82
    # better to raise an error before starting to modify the config
83
    # file than after it was modified
84
    self._my_hostname = utils.HostInfo().name
85

    
86
  # this method needs to be static, so that we can call it on the class
87
  @staticmethod
88
  def IsCluster():
89
    """Check if the cluster is configured.
90

91
    """
92
    return os.path.exists(constants.CLUSTER_CONF_FILE)
93

    
94
  @locking.ssynchronized(_config_lock, shared=1)
95
  def GenerateMAC(self):
96
    """Generate a MAC for an instance.
97

98
    This should check the current instances for duplicates.
99

100
    """
101
    self._OpenConfig()
102
    prefix = self._config_data.cluster.mac_prefix
103
    all_macs = self._AllMACs()
104
    retries = 64
105
    while retries > 0:
106
      byte1 = random.randrange(0, 256)
107
      byte2 = random.randrange(0, 256)
108
      byte3 = random.randrange(0, 256)
109
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
110
      if mac not in all_macs:
111
        break
112
      retries -= 1
113
    else:
114
      raise errors.ConfigurationError("Can't generate unique MAC")
115
    return mac
116

    
117
  @locking.ssynchronized(_config_lock, shared=1)
118
  def IsMacInUse(self, mac):
119
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.
120

121
    This only checks instances managed by this cluster, it does not
122
    check for potential collisions elsewhere.
123

124
    """
125
    self._OpenConfig()
126
    all_macs = self._AllMACs()
127
    return mac in all_macs
128

    
129
  def _ComputeAllLVs(self):
130
    """Compute the list of all LVs.
131

132
    """
133
    self._OpenConfig()
134
    lvnames = set()
135
    for instance in self._config_data.instances.values():
136
      node_data = instance.MapLVsByNode()
137
      for lv_list in node_data.values():
138
        lvnames.update(lv_list)
139
    return lvnames
140

    
141
  @locking.ssynchronized(_config_lock, shared=1)
142
  def GenerateUniqueID(self, exceptions=None):
143
    """Generate an unique disk name.
144

145
    This checks the current node, instances and disk names for
146
    duplicates.
147

148
    Args:
149
      - exceptions: a list with some other names which should be checked
150
                    for uniqueness (used for example when you want to get
151
                    more than one id at one time without adding each one in
152
                    turn to the config file
153

154
    Returns: the unique id as a string
155

156
    """
157
    existing = set()
158
    existing.update(self._temporary_ids)
159
    existing.update(self._ComputeAllLVs())
160
    existing.update(self._config_data.instances.keys())
161
    existing.update(self._config_data.nodes.keys())
162
    if exceptions is not None:
163
      existing.update(exceptions)
164
    retries = 64
165
    while retries > 0:
166
      unique_id = utils.NewUUID()
167
      if unique_id not in existing and unique_id is not None:
168
        break
169
    else:
170
      raise errors.ConfigurationError("Not able generate an unique ID"
171
                                      " (last tried ID: %s" % unique_id)
172
    self._temporary_ids.add(unique_id)
173
    return unique_id
174

    
175
  def _AllMACs(self):
176
    """Return all MACs present in the config.
177

178
    """
179
    self._OpenConfig()
180

    
181
    result = []
182
    for instance in self._config_data.instances.values():
183
      for nic in instance.nics:
184
        result.append(nic.mac)
185

    
186
    return result
187

    
188
  @locking.ssynchronized(_config_lock, shared=1)
189
  def VerifyConfig(self):
190
    """Stub verify function.
191
    """
192
    self._OpenConfig()
193

    
194
    result = []
195
    seen_macs = []
196
    data = self._config_data
197
    for instance_name in data.instances:
198
      instance = data.instances[instance_name]
199
      if instance.primary_node not in data.nodes:
200
        result.append("instance '%s' has invalid primary node '%s'" %
201
                      (instance_name, instance.primary_node))
202
      for snode in instance.secondary_nodes:
203
        if snode not in data.nodes:
204
          result.append("instance '%s' has invalid secondary node '%s'" %
205
                        (instance_name, snode))
206
      for idx, nic in enumerate(instance.nics):
207
        if nic.mac in seen_macs:
208
          result.append("instance '%s' has NIC %d mac %s duplicate" %
209
                        (instance_name, idx, nic.mac))
210
        else:
211
          seen_macs.append(nic.mac)
212
    return result
213

    
214
  def _UnlockedSetDiskID(self, disk, node_name):
215
    """Convert the unique ID to the ID needed on the target nodes.
216

217
    This is used only for drbd, which needs ip/port configuration.
218

219
    The routine descends down and updates its children also, because
220
    this helps when the only the top device is passed to the remote
221
    node.
222

223
    This function is for internal use, when the config lock is already held.
224

225
    """
226
    if disk.children:
227
      for child in disk.children:
228
        self._UnlockedSetDiskID(child, node_name)
229

    
230
    if disk.logical_id is None and disk.physical_id is not None:
231
      return
232
    if disk.dev_type == constants.LD_DRBD8:
233
      pnode, snode, port, pminor, sminor = disk.logical_id
234
      if node_name not in (pnode, snode):
235
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
236
                                        node_name)
237
      pnode_info = self._UnlockedGetNodeInfo(pnode)
238
      snode_info = self._UnlockedGetNodeInfo(snode)
239
      if pnode_info is None or snode_info is None:
240
        raise errors.ConfigurationError("Can't find primary or secondary node"
241
                                        " for %s" % str(disk))
242
      p_data = (pnode_info.secondary_ip, port)
243
      s_data = (snode_info.secondary_ip, port)
244
      if pnode == node_name:
245
        disk.physical_id = p_data + s_data + (pminor,)
246
      else: # it must be secondary, we tested above
247
        disk.physical_id = s_data + p_data + (sminor,)
248
    else:
249
      disk.physical_id = disk.logical_id
250
    return
251

    
252
  @locking.ssynchronized(_config_lock)
253
  def SetDiskID(self, disk, node_name):
254
    """Convert the unique ID to the ID needed on the target nodes.
255

256
    This is used only for drbd, which needs ip/port configuration.
257

258
    The routine descends down and updates its children also, because
259
    this helps when the only the top device is passed to the remote
260
    node.
261

262
    """
263
    return self._UnlockedSetDiskID(disk, node_name)
264

    
265
  @locking.ssynchronized(_config_lock)
266
  def AddTcpUdpPort(self, port):
267
    """Adds a new port to the available port pool.
268

269
    """
270
    if not isinstance(port, int):
271
      raise errors.ProgrammerError("Invalid type passed for port")
272

    
273
    self._OpenConfig()
274
    self._config_data.cluster.tcpudp_port_pool.add(port)
275
    self._WriteConfig()
276

    
277
  @locking.ssynchronized(_config_lock, shared=1)
278
  def GetPortList(self):
279
    """Returns a copy of the current port list.
280

281
    """
282
    self._OpenConfig()
283
    return self._config_data.cluster.tcpudp_port_pool.copy()
284

    
285
  @locking.ssynchronized(_config_lock)
286
  def AllocatePort(self):
287
    """Allocate a port.
288

289
    The port will be taken from the available port pool or from the
290
    default port range (and in this case we increase
291
    highest_used_port).
292

293
    """
294
    self._OpenConfig()
295

    
296
    # If there are TCP/IP ports configured, we use them first.
297
    if self._config_data.cluster.tcpudp_port_pool:
298
      port = self._config_data.cluster.tcpudp_port_pool.pop()
299
    else:
300
      port = self._config_data.cluster.highest_used_port + 1
301
      if port >= constants.LAST_DRBD_PORT:
302
        raise errors.ConfigurationError("The highest used port is greater"
303
                                        " than %s. Aborting." %
304
                                        constants.LAST_DRBD_PORT)
305
      self._config_data.cluster.highest_used_port = port
306

    
307
    self._WriteConfig()
308
    return port
309

    
310
  def _ComputeDRBDMap(self, instance):
311
    """Compute the used DRBD minor/nodes.
312

313
    Return: dictionary of node_name: dict of minor: instance_name. The
314
    returned dict will have all the nodes in it (even if with an empty
315
    list).
316

317
    """
318
    def _AppendUsedPorts(instance_name, disk, used):
319
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) == 5:
320
        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id
321
        for node, port in ((nodeA, minorA), (nodeB, minorB)):
322
          assert node in used, "Instance node not found in node list"
323
          if port in used[node]:
324
            raise errors.ProgrammerError("DRBD minor already used:"
325
                                         " %s/%s, %s/%s" %
326
                                         (node, port, instance_name,
327
                                          used[node][port]))
328

    
329
          used[node][port] = instance_name
330
      if disk.children:
331
        for child in disk.children:
332
          _AppendUsedPorts(instance_name, child, used)
333

    
334
    my_dict = dict((node, {}) for node in self._config_data.nodes)
335
    for (node, minor), instance in self._temporary_drbds.iteritems():
336
      my_dict[node][minor] = instance
337
    for instance in self._config_data.instances.itervalues():
338
      for disk in instance.disks:
339
        _AppendUsedPorts(instance.name, disk, my_dict)
340
    return my_dict
341

    
342
  @locking.ssynchronized(_config_lock)
343
  def AllocateDRBDMinor(self, nodes, instance):
344
    """Allocate a drbd minor.
345

346
    The free minor will be automatically computed from the existing
347
    devices. A node can be given multiple times in order to allocate
348
    multiple minors. The result is the list of minors, in the same
349
    order as the passed nodes.
350

351
    """
352
    self._OpenConfig()
353

    
354
    d_map = self._ComputeDRBDMap(instance)
355
    result = []
356
    for nname in nodes:
357
      ndata = d_map[nname]
358
      if not ndata:
359
        # no minors used, we can start at 0
360
        result.append(0)
361
        ndata[0] = instance
362
        continue
363
      keys = ndata.keys()
364
      keys.sort()
365
      ffree = utils.FirstFree(keys)
366
      if ffree is None:
367
        # return the next minor
368
        # TODO: implement high-limit check
369
        minor = keys[-1] + 1
370
      else:
371
        minor = ffree
372
      result.append(minor)
373
      ndata[minor] = instance
374
      assert (nname, minor) not in self._temporary_drbds, \
375
             "Attempt to reuse reserved DRBD minor"
376
      self._temporary_drbds[(nname, minor)] = instance
377
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
378
                  nodes, result)
379
    return result
380

    
381
  @locking.ssynchronized(_config_lock)
382
  def ReleaseDRBDMinors(self, instance):
383
    """Release temporary drbd minors allocated for a given instance.
384

385
    This should be called on both the error paths and on the success
386
    paths (after the instance has been added or updated).
387

388
    @type instance: string
389
    @param instance: the instance for which temporary minors should be
390
                     released
391

392
    """
393
    for key, name in self._temporary_drbds.items():
394
      if name == instance:
395
        del self._temporary_drbds[key]
396

    
397
  @locking.ssynchronized(_config_lock, shared=1)
398
  def GetHostKey(self):
399
    """Return the rsa hostkey from the config.
400

401
    Args: None
402

403
    Returns: rsa hostkey
404
    """
405
    self._OpenConfig()
406
    return self._config_data.cluster.rsahostkeypub
407

    
408
  @locking.ssynchronized(_config_lock)
409
  def AddInstance(self, instance):
410
    """Add an instance to the config.
411

412
    This should be used after creating a new instance.
413

414
    Args:
415
      instance: the instance object
416
    """
417
    if not isinstance(instance, objects.Instance):
418
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
419

    
420
    if instance.disk_template != constants.DT_DISKLESS:
421
      all_lvs = instance.MapLVsByNode()
422
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
423

    
424
    self._OpenConfig()
425
    instance.serial_no = 1
426
    self._config_data.instances[instance.name] = instance
427
    self._config_data.cluster.serial_no += 1
428
    self._WriteConfig()
429

    
430
  def _SetInstanceStatus(self, instance_name, status):
431
    """Set the instance's status to a given value.
432

433
    """
434
    if status not in ("up", "down"):
435
      raise errors.ProgrammerError("Invalid status '%s' passed to"
436
                                   " ConfigWriter._SetInstanceStatus()" %
437
                                   status)
438
    self._OpenConfig()
439

    
440
    if instance_name not in self._config_data.instances:
441
      raise errors.ConfigurationError("Unknown instance '%s'" %
442
                                      instance_name)
443
    instance = self._config_data.instances[instance_name]
444
    if instance.status != status:
445
      instance.status = status
446
      instance.serial_no += 1
447
      self._WriteConfig()
448

    
449
  @locking.ssynchronized(_config_lock)
450
  def MarkInstanceUp(self, instance_name):
451
    """Mark the instance status to up in the config.
452

453
    """
454
    self._SetInstanceStatus(instance_name, "up")
455

    
456
  @locking.ssynchronized(_config_lock)
457
  def RemoveInstance(self, instance_name):
458
    """Remove the instance from the configuration.
459

460
    """
461
    self._OpenConfig()
462

    
463
    if instance_name not in self._config_data.instances:
464
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
465
    del self._config_data.instances[instance_name]
466
    self._config_data.cluster.serial_no += 1
467
    self._WriteConfig()
468

    
469
  @locking.ssynchronized(_config_lock)
470
  def RenameInstance(self, old_name, new_name):
471
    """Rename an instance.
472

473
    This needs to be done in ConfigWriter and not by RemoveInstance
474
    combined with AddInstance as only we can guarantee an atomic
475
    rename.
476

477
    """
478
    self._OpenConfig()
479
    if old_name not in self._config_data.instances:
480
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
481
    inst = self._config_data.instances[old_name]
482
    del self._config_data.instances[old_name]
483
    inst.name = new_name
484

    
485
    for disk in inst.disks:
486
      if disk.dev_type == constants.LD_FILE:
487
        # rename the file paths in logical and physical id
488
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
489
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
490
                                              os.path.join(file_storage_dir,
491
                                                           inst.name,
492
                                                           disk.iv_name))
493

    
494
    self._config_data.instances[inst.name] = inst
495
    self._config_data.cluster.serial_no += 1
496
    self._WriteConfig()
497

    
498
  @locking.ssynchronized(_config_lock)
499
  def MarkInstanceDown(self, instance_name):
500
    """Mark the status of an instance to down in the configuration.
501

502
    """
503
    self._SetInstanceStatus(instance_name, "down")
504

    
505
  def _UnlockedGetInstanceList(self):
506
    """Get the list of instances.
507

508
    This function is for internal use, when the config lock is already held.
509

510
    """
511
    self._OpenConfig()
512
    return self._config_data.instances.keys()
513

    
514
  @locking.ssynchronized(_config_lock, shared=1)
515
  def GetInstanceList(self):
516
    """Get the list of instances.
517

518
    Returns:
519
      array of instances, ex. ['instance2.example.com','instance1.example.com']
520
      these contains all the instances, also the ones in Admin_down state
521

522
    """
523
    return self._UnlockedGetInstanceList()
524

    
525
  @locking.ssynchronized(_config_lock, shared=1)
526
  def ExpandInstanceName(self, short_name):
527
    """Attempt to expand an incomplete instance name.
528

529
    """
530
    self._OpenConfig()
531

    
532
    return utils.MatchNameComponent(short_name,
533
                                    self._config_data.instances.keys())
534

    
535
  def _UnlockedGetInstanceInfo(self, instance_name):
536
    """Returns informations about an instance.
537

538
    This function is for internal use, when the config lock is already held.
539

540
    """
541
    self._OpenConfig()
542

    
543
    if instance_name not in self._config_data.instances:
544
      return None
545

    
546
    return self._config_data.instances[instance_name]
547

    
548
  @locking.ssynchronized(_config_lock, shared=1)
549
  def GetInstanceInfo(self, instance_name):
550
    """Returns informations about an instance.
551

552
    It takes the information from the configuration file. Other informations of
553
    an instance are taken from the live systems.
554

555
    Args:
556
      instance: name of the instance, ex instance1.example.com
557

558
    Returns:
559
      the instance object
560

561
    """
562
    return self._UnlockedGetInstanceInfo(instance_name)
563

    
564
  @locking.ssynchronized(_config_lock, shared=1)
565
  def GetAllInstancesInfo(self):
566
    """Get the configuration of all instances.
567

568
    @rtype: dict
569
    @returns: dict of (instance, instance_info), where instance_info is what
570
              would GetInstanceInfo return for the node
571

572
    """
573
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
574
                    for instance in self._UnlockedGetInstanceList()])
575
    return my_dict
576

    
577
  @locking.ssynchronized(_config_lock)
578
  def AddNode(self, node):
579
    """Add a node to the configuration.
580

581
    Args:
582
      node: an object.Node instance
583

584
    """
585
    logging.info("Adding node %s to configuration" % node.name)
586

    
587
    self._OpenConfig()
588
    node.serial_no = 1
589
    self._config_data.nodes[node.name] = node
590
    self._config_data.cluster.serial_no += 1
591
    self._WriteConfig()
592

    
593
  @locking.ssynchronized(_config_lock)
594
  def RemoveNode(self, node_name):
595
    """Remove a node from the configuration.
596

597
    """
598
    logging.info("Removing node %s from configuration" % node_name)
599

    
600
    self._OpenConfig()
601
    if node_name not in self._config_data.nodes:
602
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
603

    
604
    del self._config_data.nodes[node_name]
605
    self._config_data.cluster.serial_no += 1
606
    self._WriteConfig()
607

    
608
  @locking.ssynchronized(_config_lock, shared=1)
609
  def ExpandNodeName(self, short_name):
610
    """Attempt to expand an incomplete instance name.
611

612
    """
613
    self._OpenConfig()
614

    
615
    return utils.MatchNameComponent(short_name,
616
                                    self._config_data.nodes.keys())
617

    
618
  def _UnlockedGetNodeInfo(self, node_name):
619
    """Get the configuration of a node, as stored in the config.
620

621
    This function is for internal use, when the config lock is already held.
622

623
    Args: node: nodename (tuple) of the node
624

625
    Returns: the node object
626

627
    """
628
    self._OpenConfig()
629

    
630
    if node_name not in self._config_data.nodes:
631
      return None
632

    
633
    return self._config_data.nodes[node_name]
634

    
635

    
636
  @locking.ssynchronized(_config_lock, shared=1)
637
  def GetNodeInfo(self, node_name):
638
    """Get the configuration of a node, as stored in the config.
639

640
    Args: node: nodename (tuple) of the node
641

642
    Returns: the node object
643

644
    """
645
    return self._UnlockedGetNodeInfo(node_name)
646

    
647
  def _UnlockedGetNodeList(self):
648
    """Return the list of nodes which are in the configuration.
649

650
    This function is for internal use, when the config lock is already held.
651

652
    """
653
    self._OpenConfig()
654
    return self._config_data.nodes.keys()
655

    
656

    
657
  @locking.ssynchronized(_config_lock, shared=1)
658
  def GetNodeList(self):
659
    """Return the list of nodes which are in the configuration.
660

661
    """
662
    return self._UnlockedGetNodeList()
663

    
664
  @locking.ssynchronized(_config_lock, shared=1)
665
  def GetAllNodesInfo(self):
666
    """Get the configuration of all nodes.
667

668
    @rtype: dict
669
    @returns: dict of (node, node_info), where node_info is what
670
              would GetNodeInfo return for the node
671

672
    """
673
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
674
                    for node in self._UnlockedGetNodeList()])
675
    return my_dict
676

    
677
  @locking.ssynchronized(_config_lock, shared=1)
678
  def DumpConfig(self):
679
    """Return the entire configuration of the cluster.
680
    """
681
    self._OpenConfig()
682
    return self._config_data
683

    
684
  def _BumpSerialNo(self):
685
    """Bump up the serial number of the config.
686

687
    """
688
    self._config_data.serial_no += 1
689

    
690
  def _OpenConfig(self):
691
    """Read the config data from disk.
692

693
    In case we already have configuration data and the config file has
694
    the same mtime as when we read it, we skip the parsing of the
695
    file, since de-serialisation could be slow.
696

697
    """
698
    try:
699
      st = os.stat(self._cfg_file)
700
    except OSError, err:
701
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
702
    if (self._config_data is not None and
703
        self._config_time is not None and
704
        self._config_time == st.st_mtime and
705
        self._config_size == st.st_size and
706
        self._config_inode == st.st_ino):
707
      # data is current, so skip loading of config file
708
      return
709

    
710
    # Make sure the configuration has the right version
711
    ValidateConfig()
712

    
713
    f = open(self._cfg_file, 'r')
714
    try:
715
      try:
716
        data = objects.ConfigData.FromDict(serializer.Load(f.read()))
717
      except Exception, err:
718
        raise errors.ConfigurationError(err)
719
    finally:
720
      f.close()
721
    if (not hasattr(data, 'cluster') or
722
        not hasattr(data.cluster, 'rsahostkeypub')):
723
      raise errors.ConfigurationError("Incomplete configuration"
724
                                      " (missing cluster.rsahostkeypub)")
725
    self._config_data = data
726
    self._config_time = st.st_mtime
727
    self._config_size = st.st_size
728
    self._config_inode = st.st_ino
729

    
730
  def _DistributeConfig(self):
731
    """Distribute the configuration to the other nodes.
732

733
    Currently, this only copies the configuration file. In the future,
734
    it could be used to encapsulate the 2/3-phase update mechanism.
735

736
    """
737
    if self._offline:
738
      return True
739
    bad = False
740
    nodelist = self._UnlockedGetNodeList()
741
    myhostname = self._my_hostname
742

    
743
    try:
744
      nodelist.remove(myhostname)
745
    except ValueError:
746
      pass
747

    
748
    result = rpc.call_upload_file(nodelist, self._cfg_file)
749
    for node in nodelist:
750
      if not result[node]:
751
        logging.error("copy of file %s to node %s failed",
752
                      self._cfg_file, node)
753
        bad = True
754
    return not bad
755

    
756
  def _WriteConfig(self, destination=None):
757
    """Write the configuration data to persistent storage.
758

759
    """
760
    if destination is None:
761
      destination = self._cfg_file
762
    self._BumpSerialNo()
763
    txt = serializer.Dump(self._config_data.ToDict())
764
    dir_name, file_name = os.path.split(destination)
765
    fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
766
    f = os.fdopen(fd, 'w')
767
    try:
768
      f.write(txt)
769
      os.fsync(f.fileno())
770
    finally:
771
      f.close()
772
    # we don't need to do os.close(fd) as f.close() did it
773
    os.rename(name, destination)
774
    self.write_count += 1
775
    # re-set our cache as not to re-read the config file
776
    try:
777
      st = os.stat(destination)
778
    except OSError, err:
779
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
780
    self._config_time = st.st_mtime
781
    self._config_size = st.st_size
782
    self._config_inode = st.st_ino
783
    # and redistribute the config file
784
    self._DistributeConfig()
785

    
786
  @locking.ssynchronized(_config_lock)
787
  def InitConfig(self, node, primary_ip, secondary_ip,
788
                 hostkeypub, mac_prefix, vg_name, def_bridge):
789
    """Create the initial cluster configuration.
790

791
    It will contain the current node, which will also be the master
792
    node, and no instances or operating systmes.
793

794
    Args:
795
      node: the nodename of the initial node
796
      primary_ip: the IP address of the current host
797
      secondary_ip: the secondary IP of the current host or None
798
      hostkeypub: the public hostkey of this host
799

800
    """
801
    hu_port = constants.FIRST_DRBD_PORT - 1
802
    globalconfig = objects.Cluster(serial_no=1,
803
                                   rsahostkeypub=hostkeypub,
804
                                   highest_used_port=hu_port,
805
                                   mac_prefix=mac_prefix,
806
                                   volume_group_name=vg_name,
807
                                   default_bridge=def_bridge,
808
                                   tcpudp_port_pool=set())
809
    if secondary_ip is None:
810
      secondary_ip = primary_ip
811
    nodeconfig = objects.Node(name=node, primary_ip=primary_ip,
812
                              secondary_ip=secondary_ip, serial_no=1)
813

    
814
    self._config_data = objects.ConfigData(nodes={node: nodeconfig},
815
                                           instances={},
816
                                           cluster=globalconfig,
817
                                           serial_no=1)
818
    self._WriteConfig()
819

    
820
  @locking.ssynchronized(_config_lock, shared=1)
821
  def GetVGName(self):
822
    """Return the volume group name.
823

824
    """
825
    self._OpenConfig()
826
    return self._config_data.cluster.volume_group_name
827

    
828
  @locking.ssynchronized(_config_lock)
829
  def SetVGName(self, vg_name):
830
    """Set the volume group name.
831

832
    """
833
    self._OpenConfig()
834
    self._config_data.cluster.volume_group_name = vg_name
835
    self._config_data.cluster.serial_no += 1
836
    self._WriteConfig()
837

    
838
  @locking.ssynchronized(_config_lock, shared=1)
839
  def GetDefBridge(self):
840
    """Return the default bridge.
841

842
    """
843
    self._OpenConfig()
844
    return self._config_data.cluster.default_bridge
845

    
846
  @locking.ssynchronized(_config_lock, shared=1)
847
  def GetMACPrefix(self):
848
    """Return the mac prefix.
849

850
    """
851
    self._OpenConfig()
852
    return self._config_data.cluster.mac_prefix
853

    
854
  @locking.ssynchronized(_config_lock, shared=1)
855
  def GetClusterInfo(self):
856
    """Returns informations about the cluster
857

858
    Returns:
859
      the cluster object
860

861
    """
862
    self._OpenConfig()
863

    
864
    return self._config_data.cluster
865

    
866
  @locking.ssynchronized(_config_lock)
867
  def Update(self, target):
868
    """Notify function to be called after updates.
869

870
    This function must be called when an object (as returned by
871
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
872
    caller wants the modifications saved to the backing store. Note
873
    that all modified objects will be saved, but the target argument
874
    is the one the caller wants to ensure that it's saved.
875

876
    """
877
    if self._config_data is None:
878
      raise errors.ProgrammerError("Configuration file not read,"
879
                                   " cannot save.")
880
    if isinstance(target, objects.Cluster):
881
      test = target == self._config_data.cluster
882
    elif isinstance(target, objects.Node):
883
      test = target in self._config_data.nodes.values()
884
    elif isinstance(target, objects.Instance):
885
      test = target in self._config_data.instances.values()
886
    else:
887
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
888
                                   " ConfigWriter.Update" % type(target))
889
    if not test:
890
      raise errors.ConfigurationError("Configuration updated since object"
891
                                      " has been read or unknown object")
892
    target.serial_no += 1
893

    
894
    self._WriteConfig()