Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ b989e85d

History | View | Annotate | Download (27.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
import os
35
import tempfile
36
import random
37
import logging
38

    
39
from ganeti import errors
40
from ganeti import locking
41
from ganeti import utils
42
from ganeti import constants
43
from ganeti import rpc
44
from ganeti import objects
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
_config_lock = locking.SharedLock()
50

    
51

    
52
def ValidateConfig():
53
  sstore = ssconf.SimpleStore()
54

    
55
  if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
56
    raise errors.ConfigurationError("Cluster configuration version"
57
                                    " mismatch, got %s instead of %s" %
58
                                    (sstore.GetConfigVersion(),
59
                                     constants.CONFIG_VERSION))
60

    
61

    
62
class ConfigWriter:
63
  """The interface to the cluster configuration.
64

65
  """
66
  def __init__(self, cfg_file=None, offline=False):
67
    self.write_count = 0
68
    self._lock = _config_lock
69
    self._config_data = None
70
    self._config_time = None
71
    self._config_size = None
72
    self._config_inode = None
73
    self._offline = offline
74
    if cfg_file is None:
75
      self._cfg_file = constants.CLUSTER_CONF_FILE
76
    else:
77
      self._cfg_file = cfg_file
78
    self._temporary_ids = set()
79
    self._temporary_drbds = {}
80
    # Note: in order to prevent errors when resolving our name in
81
    # _DistributeConfig, we compute it here once and reuse it; it's
82
    # better to raise an error before starting to modify the config
83
    # file than after it was modified
84
    self._my_hostname = utils.HostInfo().name
85

    
86
  # this method needs to be static, so that we can call it on the class
87
  @staticmethod
88
  def IsCluster():
89
    """Check if the cluster is configured.
90

91
    """
92
    return os.path.exists(constants.CLUSTER_CONF_FILE)
93

    
94
  @locking.ssynchronized(_config_lock, shared=1)
95
  def GenerateMAC(self):
96
    """Generate a MAC for an instance.
97

98
    This should check the current instances for duplicates.
99

100
    """
101
    self._OpenConfig()
102
    prefix = self._config_data.cluster.mac_prefix
103
    all_macs = self._AllMACs()
104
    retries = 64
105
    while retries > 0:
106
      byte1 = random.randrange(0, 256)
107
      byte2 = random.randrange(0, 256)
108
      byte3 = random.randrange(0, 256)
109
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
110
      if mac not in all_macs:
111
        break
112
      retries -= 1
113
    else:
114
      raise errors.ConfigurationError("Can't generate unique MAC")
115
    return mac
116

    
117
  @locking.ssynchronized(_config_lock, shared=1)
118
  def IsMacInUse(self, mac):
119
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.
120

121
    This only checks instances managed by this cluster, it does not
122
    check for potential collisions elsewhere.
123

124
    """
125
    self._OpenConfig()
126
    all_macs = self._AllMACs()
127
    return mac in all_macs
128

    
129
  def _ComputeAllLVs(self):
130
    """Compute the list of all LVs.
131

132
    """
133
    self._OpenConfig()
134
    lvnames = set()
135
    for instance in self._config_data.instances.values():
136
      node_data = instance.MapLVsByNode()
137
      for lv_list in node_data.values():
138
        lvnames.update(lv_list)
139
    return lvnames
140

    
141
  @locking.ssynchronized(_config_lock, shared=1)
142
  def GenerateUniqueID(self, exceptions=None):
143
    """Generate an unique disk name.
144

145
    This checks the current node, instances and disk names for
146
    duplicates.
147

148
    Args:
149
      - exceptions: a list with some other names which should be checked
150
                    for uniqueness (used for example when you want to get
151
                    more than one id at one time without adding each one in
152
                    turn to the config file
153

154
    Returns: the unique id as a string
155

156
    """
157
    existing = set()
158
    existing.update(self._temporary_ids)
159
    existing.update(self._ComputeAllLVs())
160
    existing.update(self._config_data.instances.keys())
161
    existing.update(self._config_data.nodes.keys())
162
    if exceptions is not None:
163
      existing.update(exceptions)
164
    retries = 64
165
    while retries > 0:
166
      unique_id = utils.NewUUID()
167
      if unique_id not in existing and unique_id is not None:
168
        break
169
    else:
170
      raise errors.ConfigurationError("Not able generate an unique ID"
171
                                      " (last tried ID: %s" % unique_id)
172
    self._temporary_ids.add(unique_id)
173
    return unique_id
174

    
175
  def _AllMACs(self):
176
    """Return all MACs present in the config.
177

178
    """
179
    self._OpenConfig()
180

    
181
    result = []
182
    for instance in self._config_data.instances.values():
183
      for nic in instance.nics:
184
        result.append(nic.mac)
185

    
186
    return result
187

    
188
  @locking.ssynchronized(_config_lock, shared=1)
189
  def VerifyConfig(self):
190
    """Stub verify function.
191
    """
192
    self._OpenConfig()
193

    
194
    result = []
195
    seen_macs = []
196
    data = self._config_data
197
    for instance_name in data.instances:
198
      instance = data.instances[instance_name]
199
      if instance.primary_node not in data.nodes:
200
        result.append("instance '%s' has invalid primary node '%s'" %
201
                      (instance_name, instance.primary_node))
202
      for snode in instance.secondary_nodes:
203
        if snode not in data.nodes:
204
          result.append("instance '%s' has invalid secondary node '%s'" %
205
                        (instance_name, snode))
206
      for idx, nic in enumerate(instance.nics):
207
        if nic.mac in seen_macs:
208
          result.append("instance '%s' has NIC %d mac %s duplicate" %
209
                        (instance_name, idx, nic.mac))
210
        else:
211
          seen_macs.append(nic.mac)
212
    return result
213

    
214
  def _UnlockedSetDiskID(self, disk, node_name):
215
    """Convert the unique ID to the ID needed on the target nodes.
216

217
    This is used only for drbd, which needs ip/port configuration.
218

219
    The routine descends down and updates its children also, because
220
    this helps when the only the top device is passed to the remote
221
    node.
222

223
    This function is for internal use, when the config lock is already held.
224

225
    """
226
    if disk.children:
227
      for child in disk.children:
228
        self._UnlockedSetDiskID(child, node_name)
229

    
230
    if disk.logical_id is None and disk.physical_id is not None:
231
      return
232
    if disk.dev_type == constants.LD_DRBD8:
233
      pnode, snode, port, pminor, sminor = disk.logical_id
234
      if node_name not in (pnode, snode):
235
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
236
                                        node_name)
237
      pnode_info = self._UnlockedGetNodeInfo(pnode)
238
      snode_info = self._UnlockedGetNodeInfo(snode)
239
      if pnode_info is None or snode_info is None:
240
        raise errors.ConfigurationError("Can't find primary or secondary node"
241
                                        " for %s" % str(disk))
242
      p_data = (pnode_info.secondary_ip, port)
243
      s_data = (snode_info.secondary_ip, port)
244
      if pnode == node_name:
245
        disk.physical_id = p_data + s_data + (pminor,)
246
      else: # it must be secondary, we tested above
247
        disk.physical_id = s_data + p_data + (sminor,)
248
    else:
249
      disk.physical_id = disk.logical_id
250
    return
251

    
252
  @locking.ssynchronized(_config_lock)
253
  def SetDiskID(self, disk, node_name):
254
    """Convert the unique ID to the ID needed on the target nodes.
255

256
    This is used only for drbd, which needs ip/port configuration.
257

258
    The routine descends down and updates its children also, because
259
    this helps when the only the top device is passed to the remote
260
    node.
261

262
    """
263
    return self._UnlockedSetDiskID(disk, node_name)
264

    
265
  @locking.ssynchronized(_config_lock)
266
  def AddTcpUdpPort(self, port):
267
    """Adds a new port to the available port pool.
268

269
    """
270
    if not isinstance(port, int):
271
      raise errors.ProgrammerError("Invalid type passed for port")
272

    
273
    self._OpenConfig()
274
    self._config_data.cluster.tcpudp_port_pool.add(port)
275
    self._WriteConfig()
276

    
277
  @locking.ssynchronized(_config_lock, shared=1)
278
  def GetPortList(self):
279
    """Returns a copy of the current port list.
280

281
    """
282
    self._OpenConfig()
283
    return self._config_data.cluster.tcpudp_port_pool.copy()
284

    
285
  @locking.ssynchronized(_config_lock)
286
  def AllocatePort(self):
287
    """Allocate a port.
288

289
    The port will be taken from the available port pool or from the
290
    default port range (and in this case we increase
291
    highest_used_port).
292

293
    """
294
    self._OpenConfig()
295

    
296
    # If there are TCP/IP ports configured, we use them first.
297
    if self._config_data.cluster.tcpudp_port_pool:
298
      port = self._config_data.cluster.tcpudp_port_pool.pop()
299
    else:
300
      port = self._config_data.cluster.highest_used_port + 1
301
      if port >= constants.LAST_DRBD_PORT:
302
        raise errors.ConfigurationError("The highest used port is greater"
303
                                        " than %s. Aborting." %
304
                                        constants.LAST_DRBD_PORT)
305
      self._config_data.cluster.highest_used_port = port
306

    
307
    self._WriteConfig()
308
    return port
309

    
310
  def _ComputeDRBDMap(self, instance):
311
    """Compute the used DRBD minor/nodes.
312

313
    Return: dictionary of node_name: dict of minor: instance_name. The
314
    returned dict will have all the nodes in it (even if with an empty
315
    list).
316

317
    """
318
    def _AppendUsedPorts(instance_name, disk, used):
319
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) == 5:
320
        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id
321
        for node, port in ((nodeA, minorA), (nodeB, minorB)):
322
          assert node in used, "Instance node not found in node list"
323
          if port in used[node]:
324
            raise errors.ProgrammerError("DRBD minor already used:"
325
                                         " %s/%s, %s/%s" %
326
                                         (node, port, instance_name,
327
                                          used[node][port]))
328

    
329
          used[node][port] = instance_name
330
      if disk.children:
331
        for child in disk.children:
332
          _AppendUsedPorts(instance_name, child, used)
333

    
334
    my_dict = dict((node, {}) for node in self._config_data.nodes)
335
    for (node, minor), instance in self._temporary_drbds.iteritems():
336
      my_dict[node][minor] = instance
337
    for instance in self._config_data.instances.itervalues():
338
      for disk in instance.disks:
339
        _AppendUsedPorts(instance.name, disk, my_dict)
340
    return my_dict
341

    
342
  @locking.ssynchronized(_config_lock)
343
  def AllocateDRBDMinor(self, nodes, instance):
344
    """Allocate a drbd minor.
345

346
    The free minor will be automatically computed from the existing
347
    devices. A node can be given multiple times in order to allocate
348
    multiple minors. The result is the list of minors, in the same
349
    order as the passed nodes.
350

351
    """
352
    self._OpenConfig()
353

    
354
    d_map = self._ComputeDRBDMap(instance)
355
    result = []
356
    for nname in nodes:
357
      ndata = d_map[nname]
358
      if not ndata:
359
        # no minors used, we can start at 0
360
        result.append(0)
361
        ndata[0] = instance
362
        continue
363
      keys = ndata.keys()
364
      keys.sort()
365
      ffree = utils.FirstFree(keys)
366
      if ffree is None:
367
        # return the next minor
368
        # TODO: implement high-limit check
369
        minor = keys[-1] + 1
370
      else:
371
        minor = ffree
372
      result.append(minor)
373
      ndata[minor] = instance
374
      assert (nname, minor) not in self._temporary_drbds, \
375
             "Attempt to reuse reserved DRBD minor"
376
      self._temporary_drbds[(nname, minor)] = instance
377
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
378
                  nodes, result)
379
    return result
380

    
381
  @locking.ssynchronized(_config_lock)
382
  def ReleaseDRBDMinors(self, instance):
383
    """Release temporary drbd minors allocated for a given instance.
384

385
    This should be called on both the error paths and on the success
386
    paths (after the instance has been added or updated).
387

388
    @type instance: string
389
    @param instance: the instance for which temporary minors should be
390
                     released
391

392
    """
393
    for key, name in self._temporary_drbds.items():
394
      if name == instance:
395
        del self._temporary_drbds[key]
396

    
397
  @locking.ssynchronized(_config_lock, shared=1)
398
  def GetHostKey(self):
399
    """Return the rsa hostkey from the config.
400

401
    Args: None
402

403
    Returns: rsa hostkey
404
    """
405
    self._OpenConfig()
406
    return self._config_data.cluster.rsahostkeypub
407

    
408
  @locking.ssynchronized(_config_lock)
409
  def AddInstance(self, instance):
410
    """Add an instance to the config.
411

412
    This should be used after creating a new instance.
413

414
    Args:
415
      instance: the instance object
416
    """
417
    if not isinstance(instance, objects.Instance):
418
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
419

    
420
    if instance.disk_template != constants.DT_DISKLESS:
421
      all_lvs = instance.MapLVsByNode()
422
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
423

    
424
    self._OpenConfig()
425
    instance.serial_no = 1
426
    self._config_data.instances[instance.name] = instance
427
    self._WriteConfig()
428

    
429
  def _SetInstanceStatus(self, instance_name, status):
430
    """Set the instance's status to a given value.
431

432
    """
433
    if status not in ("up", "down"):
434
      raise errors.ProgrammerError("Invalid status '%s' passed to"
435
                                   " ConfigWriter._SetInstanceStatus()" %
436
                                   status)
437
    self._OpenConfig()
438

    
439
    if instance_name not in self._config_data.instances:
440
      raise errors.ConfigurationError("Unknown instance '%s'" %
441
                                      instance_name)
442
    instance = self._config_data.instances[instance_name]
443
    if instance.status != status:
444
      instance.status = status
445
      instance.serial_no += 1
446
      self._WriteConfig()
447

    
448
  @locking.ssynchronized(_config_lock)
449
  def MarkInstanceUp(self, instance_name):
450
    """Mark the instance status to up in the config.
451

452
    """
453
    self._SetInstanceStatus(instance_name, "up")
454

    
455
  @locking.ssynchronized(_config_lock)
456
  def RemoveInstance(self, instance_name):
457
    """Remove the instance from the configuration.
458

459
    """
460
    self._OpenConfig()
461

    
462
    if instance_name not in self._config_data.instances:
463
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
464
    del self._config_data.instances[instance_name]
465
    self._WriteConfig()
466

    
467
  @locking.ssynchronized(_config_lock)
468
  def RenameInstance(self, old_name, new_name):
469
    """Rename an instance.
470

471
    This needs to be done in ConfigWriter and not by RemoveInstance
472
    combined with AddInstance as only we can guarantee an atomic
473
    rename.
474

475
    """
476
    self._OpenConfig()
477
    if old_name not in self._config_data.instances:
478
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
479
    inst = self._config_data.instances[old_name]
480
    del self._config_data.instances[old_name]
481
    inst.name = new_name
482

    
483
    for disk in inst.disks:
484
      if disk.dev_type == constants.LD_FILE:
485
        # rename the file paths in logical and physical id
486
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
487
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
488
                                              os.path.join(file_storage_dir,
489
                                                           inst.name,
490
                                                           disk.iv_name))
491

    
492
    self._config_data.instances[inst.name] = inst
493
    self._WriteConfig()
494

    
495
  @locking.ssynchronized(_config_lock)
496
  def MarkInstanceDown(self, instance_name):
497
    """Mark the status of an instance to down in the configuration.
498

499
    """
500
    self._SetInstanceStatus(instance_name, "down")
501

    
502
  def _UnlockedGetInstanceList(self):
503
    """Get the list of instances.
504

505
    This function is for internal use, when the config lock is already held.
506

507
    """
508
    self._OpenConfig()
509
    return self._config_data.instances.keys()
510

    
511
  @locking.ssynchronized(_config_lock, shared=1)
512
  def GetInstanceList(self):
513
    """Get the list of instances.
514

515
    Returns:
516
      array of instances, ex. ['instance2.example.com','instance1.example.com']
517
      these contains all the instances, also the ones in Admin_down state
518

519
    """
520
    return self._UnlockedGetInstanceList()
521

    
522
  @locking.ssynchronized(_config_lock, shared=1)
523
  def ExpandInstanceName(self, short_name):
524
    """Attempt to expand an incomplete instance name.
525

526
    """
527
    self._OpenConfig()
528

    
529
    return utils.MatchNameComponent(short_name,
530
                                    self._config_data.instances.keys())
531

    
532
  def _UnlockedGetInstanceInfo(self, instance_name):
533
    """Returns informations about an instance.
534

535
    This function is for internal use, when the config lock is already held.
536

537
    """
538
    self._OpenConfig()
539

    
540
    if instance_name not in self._config_data.instances:
541
      return None
542

    
543
    return self._config_data.instances[instance_name]
544

    
545
  @locking.ssynchronized(_config_lock, shared=1)
546
  def GetInstanceInfo(self, instance_name):
547
    """Returns informations about an instance.
548

549
    It takes the information from the configuration file. Other informations of
550
    an instance are taken from the live systems.
551

552
    Args:
553
      instance: name of the instance, ex instance1.example.com
554

555
    Returns:
556
      the instance object
557

558
    """
559
    return self._UnlockedGetInstanceInfo(instance_name)
560

    
561
  @locking.ssynchronized(_config_lock, shared=1)
562
  def GetAllInstancesInfo(self):
563
    """Get the configuration of all instances.
564

565
    @rtype: dict
566
    @returns: dict of (instance, instance_info), where instance_info is what
567
              would GetInstanceInfo return for the node
568

569
    """
570
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
571
                    for instance in self._UnlockedGetInstanceList()])
572
    return my_dict
573

    
574
  @locking.ssynchronized(_config_lock)
575
  def AddNode(self, node):
576
    """Add a node to the configuration.
577

578
    Args:
579
      node: an object.Node instance
580

581
    """
582
    logging.info("Adding node %s to configuration" % node.name)
583

    
584
    self._OpenConfig()
585
    node.serial_no = 1
586
    self._config_data.nodes[node.name] = node
587
    self._WriteConfig()
588

    
589
  @locking.ssynchronized(_config_lock)
590
  def RemoveNode(self, node_name):
591
    """Remove a node from the configuration.
592

593
    """
594
    logging.info("Removing node %s from configuration" % node_name)
595

    
596
    self._OpenConfig()
597
    if node_name not in self._config_data.nodes:
598
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
599

    
600
    del self._config_data.nodes[node_name]
601
    self._WriteConfig()
602

    
603
  @locking.ssynchronized(_config_lock, shared=1)
604
  def ExpandNodeName(self, short_name):
605
    """Attempt to expand an incomplete instance name.
606

607
    """
608
    self._OpenConfig()
609

    
610
    return utils.MatchNameComponent(short_name,
611
                                    self._config_data.nodes.keys())
612

    
613
  def _UnlockedGetNodeInfo(self, node_name):
614
    """Get the configuration of a node, as stored in the config.
615

616
    This function is for internal use, when the config lock is already held.
617

618
    Args: node: nodename (tuple) of the node
619

620
    Returns: the node object
621

622
    """
623
    self._OpenConfig()
624

    
625
    if node_name not in self._config_data.nodes:
626
      return None
627

    
628
    return self._config_data.nodes[node_name]
629

    
630

    
631
  @locking.ssynchronized(_config_lock, shared=1)
632
  def GetNodeInfo(self, node_name):
633
    """Get the configuration of a node, as stored in the config.
634

635
    Args: node: nodename (tuple) of the node
636

637
    Returns: the node object
638

639
    """
640
    return self._UnlockedGetNodeInfo(node_name)
641

    
642
  def _UnlockedGetNodeList(self):
643
    """Return the list of nodes which are in the configuration.
644

645
    This function is for internal use, when the config lock is already held.
646

647
    """
648
    self._OpenConfig()
649
    return self._config_data.nodes.keys()
650

    
651

    
652
  @locking.ssynchronized(_config_lock, shared=1)
653
  def GetNodeList(self):
654
    """Return the list of nodes which are in the configuration.
655

656
    """
657
    return self._UnlockedGetNodeList()
658

    
659
  @locking.ssynchronized(_config_lock, shared=1)
660
  def GetAllNodesInfo(self):
661
    """Get the configuration of all nodes.
662

663
    @rtype: dict
664
    @returns: dict of (node, node_info), where node_info is what
665
              would GetNodeInfo return for the node
666

667
    """
668
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
669
                    for node in self._UnlockedGetNodeList()])
670
    return my_dict
671

    
672
  @locking.ssynchronized(_config_lock, shared=1)
673
  def DumpConfig(self):
674
    """Return the entire configuration of the cluster.
675
    """
676
    self._OpenConfig()
677
    return self._config_data
678

    
679
  def _BumpSerialNo(self):
680
    """Bump up the serial number of the config.
681

682
    """
683
    self._config_data.serial_no += 1
684

    
685
  def _OpenConfig(self):
686
    """Read the config data from disk.
687

688
    In case we already have configuration data and the config file has
689
    the same mtime as when we read it, we skip the parsing of the
690
    file, since de-serialisation could be slow.
691

692
    """
693
    try:
694
      st = os.stat(self._cfg_file)
695
    except OSError, err:
696
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
697
    if (self._config_data is not None and
698
        self._config_time is not None and
699
        self._config_time == st.st_mtime and
700
        self._config_size == st.st_size and
701
        self._config_inode == st.st_ino):
702
      # data is current, so skip loading of config file
703
      return
704

    
705
    # Make sure the configuration has the right version
706
    ValidateConfig()
707

    
708
    f = open(self._cfg_file, 'r')
709
    try:
710
      try:
711
        data = objects.ConfigData.FromDict(serializer.Load(f.read()))
712
      except Exception, err:
713
        raise errors.ConfigurationError(err)
714
    finally:
715
      f.close()
716
    if (not hasattr(data, 'cluster') or
717
        not hasattr(data.cluster, 'rsahostkeypub')):
718
      raise errors.ConfigurationError("Incomplete configuration"
719
                                      " (missing cluster.rsahostkeypub)")
720
    self._config_data = data
721
    self._config_time = st.st_mtime
722
    self._config_size = st.st_size
723
    self._config_inode = st.st_ino
724

    
725
  def _DistributeConfig(self):
726
    """Distribute the configuration to the other nodes.
727

728
    Currently, this only copies the configuration file. In the future,
729
    it could be used to encapsulate the 2/3-phase update mechanism.
730

731
    """
732
    if self._offline:
733
      return True
734
    bad = False
735
    nodelist = self._UnlockedGetNodeList()
736
    myhostname = self._my_hostname
737

    
738
    try:
739
      nodelist.remove(myhostname)
740
    except ValueError:
741
      pass
742

    
743
    result = rpc.call_upload_file(nodelist, self._cfg_file)
744
    for node in nodelist:
745
      if not result[node]:
746
        logging.error("copy of file %s to node %s failed",
747
                      self._cfg_file, node)
748
        bad = True
749
    return not bad
750

    
751
  def _WriteConfig(self, destination=None):
752
    """Write the configuration data to persistent storage.
753

754
    """
755
    if destination is None:
756
      destination = self._cfg_file
757
    self._BumpSerialNo()
758
    txt = serializer.Dump(self._config_data.ToDict())
759
    dir_name, file_name = os.path.split(destination)
760
    fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
761
    f = os.fdopen(fd, 'w')
762
    try:
763
      f.write(txt)
764
      os.fsync(f.fileno())
765
    finally:
766
      f.close()
767
    # we don't need to do os.close(fd) as f.close() did it
768
    os.rename(name, destination)
769
    self.write_count += 1
770
    # re-set our cache as not to re-read the config file
771
    try:
772
      st = os.stat(destination)
773
    except OSError, err:
774
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
775
    self._config_time = st.st_mtime
776
    self._config_size = st.st_size
777
    self._config_inode = st.st_ino
778
    # and redistribute the config file
779
    self._DistributeConfig()
780

    
781
  @locking.ssynchronized(_config_lock)
782
  def InitConfig(self, node, primary_ip, secondary_ip,
783
                 hostkeypub, mac_prefix, vg_name, def_bridge):
784
    """Create the initial cluster configuration.
785

786
    It will contain the current node, which will also be the master
787
    node, and no instances or operating systmes.
788

789
    Args:
790
      node: the nodename of the initial node
791
      primary_ip: the IP address of the current host
792
      secondary_ip: the secondary IP of the current host or None
793
      hostkeypub: the public hostkey of this host
794

795
    """
796
    hu_port = constants.FIRST_DRBD_PORT - 1
797
    globalconfig = objects.Cluster(serial_no=1,
798
                                   rsahostkeypub=hostkeypub,
799
                                   highest_used_port=hu_port,
800
                                   mac_prefix=mac_prefix,
801
                                   volume_group_name=vg_name,
802
                                   default_bridge=def_bridge,
803
                                   tcpudp_port_pool=set())
804
    if secondary_ip is None:
805
      secondary_ip = primary_ip
806
    nodeconfig = objects.Node(name=node, primary_ip=primary_ip,
807
                              secondary_ip=secondary_ip, serial_no=1)
808

    
809
    self._config_data = objects.ConfigData(nodes={node: nodeconfig},
810
                                           instances={},
811
                                           cluster=globalconfig,
812
                                           serial_no=1)
813
    self._WriteConfig()
814

    
815
  @locking.ssynchronized(_config_lock, shared=1)
816
  def GetVGName(self):
817
    """Return the volume group name.
818

819
    """
820
    self._OpenConfig()
821
    return self._config_data.cluster.volume_group_name
822

    
823
  @locking.ssynchronized(_config_lock)
824
  def SetVGName(self, vg_name):
825
    """Set the volume group name.
826

827
    """
828
    self._OpenConfig()
829
    self._config_data.cluster.volume_group_name = vg_name
830
    self._WriteConfig()
831

    
832
  @locking.ssynchronized(_config_lock, shared=1)
833
  def GetDefBridge(self):
834
    """Return the default bridge.
835

836
    """
837
    self._OpenConfig()
838
    return self._config_data.cluster.default_bridge
839

    
840
  @locking.ssynchronized(_config_lock, shared=1)
841
  def GetMACPrefix(self):
842
    """Return the mac prefix.
843

844
    """
845
    self._OpenConfig()
846
    return self._config_data.cluster.mac_prefix
847

    
848
  @locking.ssynchronized(_config_lock, shared=1)
849
  def GetClusterInfo(self):
850
    """Returns informations about the cluster
851

852
    Returns:
853
      the cluster object
854

855
    """
856
    self._OpenConfig()
857

    
858
    return self._config_data.cluster
859

    
860
  @locking.ssynchronized(_config_lock)
861
  def Update(self, target):
862
    """Notify function to be called after updates.
863

864
    This function must be called when an object (as returned by
865
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
866
    caller wants the modifications saved to the backing store. Note
867
    that all modified objects will be saved, but the target argument
868
    is the one the caller wants to ensure that it's saved.
869

870
    """
871
    if self._config_data is None:
872
      raise errors.ProgrammerError("Configuration file not read,"
873
                                   " cannot save.")
874
    if isinstance(target, objects.Cluster):
875
      test = target == self._config_data.cluster
876
    elif isinstance(target, objects.Node):
877
      test = target in self._config_data.nodes.values()
878
    elif isinstance(target, objects.Instance):
879
      test = target in self._config_data.instances.values()
880
    else:
881
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
882
                                   " ConfigWriter.Update" % type(target))
883
    if not test:
884
      raise errors.ConfigurationError("Configuration updated since object"
885
                                      " has been read or unknown object")
886
    target.serial_no += 1
887

    
888
    self._WriteConfig()