Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ a81c53c9

History | View | Annotate | Download (27.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
import os
35
import tempfile
36
import random
37
import logging
38

    
39
from ganeti import errors
40
from ganeti import locking
41
from ganeti import logger
42
from ganeti import utils
43
from ganeti import constants
44
from ganeti import rpc
45
from ganeti import objects
46
from ganeti import serializer
47
from ganeti import ssconf
48

    
49

    
50
_config_lock = locking.SharedLock()
51

    
52

    
53
def ValidateConfig():
54
  sstore = ssconf.SimpleStore()
55

    
56
  if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
57
    raise errors.ConfigurationError("Cluster configuration version"
58
                                    " mismatch, got %s instead of %s" %
59
                                    (sstore.GetConfigVersion(),
60
                                     constants.CONFIG_VERSION))
61

    
62

    
63
class ConfigWriter:
64
  """The interface to the cluster configuration.
65

66
  """
67
  def __init__(self, cfg_file=None, offline=False):
68
    self.write_count = 0
69
    self._lock = _config_lock
70
    self._config_data = None
71
    self._config_time = None
72
    self._config_size = None
73
    self._config_inode = None
74
    self._offline = offline
75
    if cfg_file is None:
76
      self._cfg_file = constants.CLUSTER_CONF_FILE
77
    else:
78
      self._cfg_file = cfg_file
79
    self._temporary_ids = set()
80
    self._temporary_drbds = {}
81
    # Note: in order to prevent errors when resolving our name in
82
    # _DistributeConfig, we compute it here once and reuse it; it's
83
    # better to raise an error before starting to modify the config
84
    # file than after it was modified
85
    self._my_hostname = utils.HostInfo().name
86

    
87
  # this method needs to be static, so that we can call it on the class
88
  @staticmethod
89
  def IsCluster():
90
    """Check if the cluster is configured.
91

92
    """
93
    return os.path.exists(constants.CLUSTER_CONF_FILE)
94

    
95
  @locking.ssynchronized(_config_lock, shared=1)
96
  def GenerateMAC(self):
97
    """Generate a MAC for an instance.
98

99
    This should check the current instances for duplicates.
100

101
    """
102
    self._OpenConfig()
103
    prefix = self._config_data.cluster.mac_prefix
104
    all_macs = self._AllMACs()
105
    retries = 64
106
    while retries > 0:
107
      byte1 = random.randrange(0, 256)
108
      byte2 = random.randrange(0, 256)
109
      byte3 = random.randrange(0, 256)
110
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
111
      if mac not in all_macs:
112
        break
113
      retries -= 1
114
    else:
115
      raise errors.ConfigurationError("Can't generate unique MAC")
116
    return mac
117

    
118
  @locking.ssynchronized(_config_lock, shared=1)
119
  def IsMacInUse(self, mac):
120
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.
121

122
    This only checks instances managed by this cluster, it does not
123
    check for potential collisions elsewhere.
124

125
    """
126
    self._OpenConfig()
127
    all_macs = self._AllMACs()
128
    return mac in all_macs
129

    
130
  def _ComputeAllLVs(self):
131
    """Compute the list of all LVs.
132

133
    """
134
    self._OpenConfig()
135
    lvnames = set()
136
    for instance in self._config_data.instances.values():
137
      node_data = instance.MapLVsByNode()
138
      for lv_list in node_data.values():
139
        lvnames.update(lv_list)
140
    return lvnames
141

    
142
  @locking.ssynchronized(_config_lock, shared=1)
143
  def GenerateUniqueID(self, exceptions=None):
144
    """Generate an unique disk name.
145

146
    This checks the current node, instances and disk names for
147
    duplicates.
148

149
    Args:
150
      - exceptions: a list with some other names which should be checked
151
                    for uniqueness (used for example when you want to get
152
                    more than one id at one time without adding each one in
153
                    turn to the config file
154

155
    Returns: the unique id as a string
156

157
    """
158
    existing = set()
159
    existing.update(self._temporary_ids)
160
    existing.update(self._ComputeAllLVs())
161
    existing.update(self._config_data.instances.keys())
162
    existing.update(self._config_data.nodes.keys())
163
    if exceptions is not None:
164
      existing.update(exceptions)
165
    retries = 64
166
    while retries > 0:
167
      unique_id = utils.NewUUID()
168
      if unique_id not in existing and unique_id is not None:
169
        break
170
    else:
171
      raise errors.ConfigurationError("Not able generate an unique ID"
172
                                      " (last tried ID: %s" % unique_id)
173
    self._temporary_ids.add(unique_id)
174
    return unique_id
175

    
176
  def _AllMACs(self):
177
    """Return all MACs present in the config.
178

179
    """
180
    self._OpenConfig()
181

    
182
    result = []
183
    for instance in self._config_data.instances.values():
184
      for nic in instance.nics:
185
        result.append(nic.mac)
186

    
187
    return result
188

    
189
  @locking.ssynchronized(_config_lock, shared=1)
190
  def VerifyConfig(self):
191
    """Stub verify function.
192
    """
193
    self._OpenConfig()
194

    
195
    result = []
196
    seen_macs = []
197
    data = self._config_data
198
    for instance_name in data.instances:
199
      instance = data.instances[instance_name]
200
      if instance.primary_node not in data.nodes:
201
        result.append("instance '%s' has invalid primary node '%s'" %
202
                      (instance_name, instance.primary_node))
203
      for snode in instance.secondary_nodes:
204
        if snode not in data.nodes:
205
          result.append("instance '%s' has invalid secondary node '%s'" %
206
                        (instance_name, snode))
207
      for idx, nic in enumerate(instance.nics):
208
        if nic.mac in seen_macs:
209
          result.append("instance '%s' has NIC %d mac %s duplicate" %
210
                        (instance_name, idx, nic.mac))
211
        else:
212
          seen_macs.append(nic.mac)
213
    return result
214

    
215
  def _UnlockedSetDiskID(self, disk, node_name):
216
    """Convert the unique ID to the ID needed on the target nodes.
217

218
    This is used only for drbd, which needs ip/port configuration.
219

220
    The routine descends down and updates its children also, because
221
    this helps when the only the top device is passed to the remote
222
    node.
223

224
    This function is for internal use, when the config lock is already held.
225

226
    """
227
    if disk.children:
228
      for child in disk.children:
229
        self._UnlockedSetDiskID(child, node_name)
230

    
231
    if disk.logical_id is None and disk.physical_id is not None:
232
      return
233
    if disk.dev_type == constants.LD_DRBD8:
234
      pnode, snode, port, pminor, sminor = disk.logical_id
235
      if node_name not in (pnode, snode):
236
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
237
                                        node_name)
238
      pnode_info = self._UnlockedGetNodeInfo(pnode)
239
      snode_info = self._UnlockedGetNodeInfo(snode)
240
      if pnode_info is None or snode_info is None:
241
        raise errors.ConfigurationError("Can't find primary or secondary node"
242
                                        " for %s" % str(disk))
243
      p_data = (pnode_info.secondary_ip, port)
244
      s_data = (snode_info.secondary_ip, port)
245
      if pnode == node_name:
246
        disk.physical_id = p_data + s_data + (pminor,)
247
      else: # it must be secondary, we tested above
248
        disk.physical_id = s_data + p_data + (sminor,)
249
    else:
250
      disk.physical_id = disk.logical_id
251
    return
252

    
253
  @locking.ssynchronized(_config_lock)
254
  def SetDiskID(self, disk, node_name):
255
    """Convert the unique ID to the ID needed on the target nodes.
256

257
    This is used only for drbd, which needs ip/port configuration.
258

259
    The routine descends down and updates its children also, because
260
    this helps when the only the top device is passed to the remote
261
    node.
262

263
    """
264
    return self._UnlockedSetDiskID(disk, node_name)
265

    
266
  @locking.ssynchronized(_config_lock)
267
  def AddTcpUdpPort(self, port):
268
    """Adds a new port to the available port pool.
269

270
    """
271
    if not isinstance(port, int):
272
      raise errors.ProgrammerError("Invalid type passed for port")
273

    
274
    self._OpenConfig()
275
    self._config_data.cluster.tcpudp_port_pool.add(port)
276
    self._WriteConfig()
277

    
278
  @locking.ssynchronized(_config_lock, shared=1)
279
  def GetPortList(self):
280
    """Returns a copy of the current port list.
281

282
    """
283
    self._OpenConfig()
284
    return self._config_data.cluster.tcpudp_port_pool.copy()
285

    
286
  @locking.ssynchronized(_config_lock)
287
  def AllocatePort(self):
288
    """Allocate a port.
289

290
    The port will be taken from the available port pool or from the
291
    default port range (and in this case we increase
292
    highest_used_port).
293

294
    """
295
    self._OpenConfig()
296

    
297
    # If there are TCP/IP ports configured, we use them first.
298
    if self._config_data.cluster.tcpudp_port_pool:
299
      port = self._config_data.cluster.tcpudp_port_pool.pop()
300
    else:
301
      port = self._config_data.cluster.highest_used_port + 1
302
      if port >= constants.LAST_DRBD_PORT:
303
        raise errors.ConfigurationError("The highest used port is greater"
304
                                        " than %s. Aborting." %
305
                                        constants.LAST_DRBD_PORT)
306
      self._config_data.cluster.highest_used_port = port
307

    
308
    self._WriteConfig()
309
    return port
310

    
311
  def _ComputeDRBDMap(self, instance):
312
    """Compute the used DRBD minor/nodes.
313

314
    Return: dictionary of node_name: dict of minor: instance_name. The
315
    returned dict will have all the nodes in it (even if with an empty
316
    list).
317

318
    """
319
    def _AppendUsedPorts(instance_name, disk, used):
320
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) == 5:
321
        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id
322
        for node, port in ((nodeA, minorA), (nodeB, minorB)):
323
          assert node in used, "Instance node not found in node list"
324
          if port in used[node]:
325
            raise errors.ProgrammerError("DRBD minor already used:"
326
                                         " %s/%s, %s/%s" %
327
                                         (node, port, instance_name,
328
                                          used[node][port]))
329

    
330
          used[node][port] = instance_name
331
      if disk.children:
332
        for child in disk.children:
333
          _AppendUsedPorts(instance_name, child, used)
334

    
335
    my_dict = dict((node, {}) for node in self._config_data.nodes)
336
    for (node, minor), instance in self._temporary_drbds.iteritems():
337
      my_dict[node][minor] = instance
338
    for instance in self._config_data.instances.itervalues():
339
      for disk in instance.disks:
340
        _AppendUsedPorts(instance.name, disk, my_dict)
341
    return my_dict
342

    
343
  @locking.ssynchronized(_config_lock)
344
  def AllocateDRBDMinor(self, nodes, instance):
345
    """Allocate a drbd minor.
346

347
    The free minor will be automatically computed from the existing
348
    devices. A node can be given multiple times in order to allocate
349
    multiple minors. The result is the list of minors, in the same
350
    order as the passed nodes.
351

352
    """
353
    self._OpenConfig()
354

    
355
    d_map = self._ComputeDRBDMap(instance)
356
    result = []
357
    for nname in nodes:
358
      ndata = d_map[nname]
359
      if not ndata:
360
        # no minors used, we can start at 0
361
        result.append(0)
362
        ndata[0] = instance
363
        continue
364
      keys = ndata.keys()
365
      keys.sort()
366
      ffree = utils.FirstFree(keys)
367
      if ffree is None:
368
        # return the next minor
369
        # TODO: implement high-limit check
370
        minor = keys[-1] + 1
371
      else:
372
        minor = ffree
373
      result.append(minor)
374
      ndata[minor] = instance
375
      assert (nname, minor) not in self._temporary_drbds, \
376
             "Attempt to reuse reserved DRBD minor"
377
      self._temporary_drbds[(nname, minor)] = instance
378
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
379
                  nodes, result)
380
    return result
381

    
382
  @locking.ssynchronized(_config_lock)
383
  def ReleaseDRBDMinors(self, instance):
384
    """Release temporary drbd minors allocated for a given instance.
385

386
    This should be called on both the error paths and on the success
387
    paths (after the instance has been added or updated).
388

389
    @type instance: string
390
    @param instance: the instance for which temporary minors should be
391
                     released
392

393
    """
394
    for key, name in self._temporary_drbds.items():
395
      if name == instance:
396
        del self._temporary_drbds[key]
397

    
398
  @locking.ssynchronized(_config_lock, shared=1)
399
  def GetHostKey(self):
400
    """Return the rsa hostkey from the config.
401

402
    Args: None
403

404
    Returns: rsa hostkey
405
    """
406
    self._OpenConfig()
407
    return self._config_data.cluster.rsahostkeypub
408

    
409
  @locking.ssynchronized(_config_lock)
410
  def AddInstance(self, instance):
411
    """Add an instance to the config.
412

413
    This should be used after creating a new instance.
414

415
    Args:
416
      instance: the instance object
417
    """
418
    if not isinstance(instance, objects.Instance):
419
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
420

    
421
    if instance.disk_template != constants.DT_DISKLESS:
422
      all_lvs = instance.MapLVsByNode()
423
      logger.Info("Instance '%s' DISK_LAYOUT: %s" % (instance.name, all_lvs))
424

    
425
    self._OpenConfig()
426
    self._config_data.instances[instance.name] = instance
427
    self._WriteConfig()
428

    
429
  def _SetInstanceStatus(self, instance_name, status):
430
    """Set the instance's status to a given value.
431

432
    """
433
    if status not in ("up", "down"):
434
      raise errors.ProgrammerError("Invalid status '%s' passed to"
435
                                   " ConfigWriter._SetInstanceStatus()" %
436
                                   status)
437
    self._OpenConfig()
438

    
439
    if instance_name not in self._config_data.instances:
440
      raise errors.ConfigurationError("Unknown instance '%s'" %
441
                                      instance_name)
442
    instance = self._config_data.instances[instance_name]
443
    if instance.status != status:
444
      instance.status = status
445
      self._WriteConfig()
446

    
447
  @locking.ssynchronized(_config_lock)
448
  def MarkInstanceUp(self, instance_name):
449
    """Mark the instance status to up in the config.
450

451
    """
452
    self._SetInstanceStatus(instance_name, "up")
453

    
454
  @locking.ssynchronized(_config_lock)
455
  def RemoveInstance(self, instance_name):
456
    """Remove the instance from the configuration.
457

458
    """
459
    self._OpenConfig()
460

    
461
    if instance_name not in self._config_data.instances:
462
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
463
    del self._config_data.instances[instance_name]
464
    self._WriteConfig()
465

    
466
  @locking.ssynchronized(_config_lock)
467
  def RenameInstance(self, old_name, new_name):
468
    """Rename an instance.
469

470
    This needs to be done in ConfigWriter and not by RemoveInstance
471
    combined with AddInstance as only we can guarantee an atomic
472
    rename.
473

474
    """
475
    self._OpenConfig()
476
    if old_name not in self._config_data.instances:
477
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
478
    inst = self._config_data.instances[old_name]
479
    del self._config_data.instances[old_name]
480
    inst.name = new_name
481

    
482
    for disk in inst.disks:
483
      if disk.dev_type == constants.LD_FILE:
484
        # rename the file paths in logical and physical id
485
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
486
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
487
                                              os.path.join(file_storage_dir,
488
                                                           inst.name,
489
                                                           disk.iv_name))
490

    
491
    self._config_data.instances[inst.name] = inst
492
    self._WriteConfig()
493

    
494
  @locking.ssynchronized(_config_lock)
495
  def MarkInstanceDown(self, instance_name):
496
    """Mark the status of an instance to down in the configuration.
497

498
    """
499
    self._SetInstanceStatus(instance_name, "down")
500

    
501
  def _UnlockedGetInstanceList(self):
502
    """Get the list of instances.
503

504
    This function is for internal use, when the config lock is already held.
505

506
    """
507
    self._OpenConfig()
508
    return self._config_data.instances.keys()
509

    
510
  @locking.ssynchronized(_config_lock, shared=1)
511
  def GetInstanceList(self):
512
    """Get the list of instances.
513

514
    Returns:
515
      array of instances, ex. ['instance2.example.com','instance1.example.com']
516
      these contains all the instances, also the ones in Admin_down state
517

518
    """
519
    return self._UnlockedGetInstanceList()
520

    
521
  @locking.ssynchronized(_config_lock, shared=1)
522
  def ExpandInstanceName(self, short_name):
523
    """Attempt to expand an incomplete instance name.
524

525
    """
526
    self._OpenConfig()
527

    
528
    return utils.MatchNameComponent(short_name,
529
                                    self._config_data.instances.keys())
530

    
531
  def _UnlockedGetInstanceInfo(self, instance_name):
532
    """Returns informations about an instance.
533

534
    This function is for internal use, when the config lock is already held.
535

536
    """
537
    self._OpenConfig()
538

    
539
    if instance_name not in self._config_data.instances:
540
      return None
541

    
542
    return self._config_data.instances[instance_name]
543

    
544
  @locking.ssynchronized(_config_lock, shared=1)
545
  def GetInstanceInfo(self, instance_name):
546
    """Returns informations about an instance.
547

548
    It takes the information from the configuration file. Other informations of
549
    an instance are taken from the live systems.
550

551
    Args:
552
      instance: name of the instance, ex instance1.example.com
553

554
    Returns:
555
      the instance object
556

557
    """
558
    return self._UnlockedGetInstanceInfo(instance_name)
559

    
560
  @locking.ssynchronized(_config_lock, shared=1)
561
  def GetAllInstancesInfo(self):
562
    """Get the configuration of all instances.
563

564
    @rtype: dict
565
    @returns: dict of (instance, instance_info), where instance_info is what
566
              would GetInstanceInfo return for the node
567

568
    """
569
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
570
                    for instance in self._UnlockedGetInstanceList()])
571
    return my_dict
572

    
573
  @locking.ssynchronized(_config_lock)
574
  def AddNode(self, node):
575
    """Add a node to the configuration.
576

577
    Args:
578
      node: an object.Node instance
579

580
    """
581
    logging.info("Adding node %s to configuration" % node.name)
582

    
583
    self._OpenConfig()
584
    self._config_data.nodes[node.name] = node
585
    self._WriteConfig()
586

    
587
  @locking.ssynchronized(_config_lock)
588
  def RemoveNode(self, node_name):
589
    """Remove a node from the configuration.
590

591
    """
592
    logging.info("Removing node %s from configuration" % node_name)
593

    
594
    self._OpenConfig()
595
    if node_name not in self._config_data.nodes:
596
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
597

    
598
    del self._config_data.nodes[node_name]
599
    self._WriteConfig()
600

    
601
  @locking.ssynchronized(_config_lock, shared=1)
602
  def ExpandNodeName(self, short_name):
603
    """Attempt to expand an incomplete instance name.
604

605
    """
606
    self._OpenConfig()
607

    
608
    return utils.MatchNameComponent(short_name,
609
                                    self._config_data.nodes.keys())
610

    
611
  def _UnlockedGetNodeInfo(self, node_name):
612
    """Get the configuration of a node, as stored in the config.
613

614
    This function is for internal use, when the config lock is already held.
615

616
    Args: node: nodename (tuple) of the node
617

618
    Returns: the node object
619

620
    """
621
    self._OpenConfig()
622

    
623
    if node_name not in self._config_data.nodes:
624
      return None
625

    
626
    return self._config_data.nodes[node_name]
627

    
628

    
629
  @locking.ssynchronized(_config_lock, shared=1)
630
  def GetNodeInfo(self, node_name):
631
    """Get the configuration of a node, as stored in the config.
632

633
    Args: node: nodename (tuple) of the node
634

635
    Returns: the node object
636

637
    """
638
    return self._UnlockedGetNodeInfo(node_name)
639

    
640
  def _UnlockedGetNodeList(self):
641
    """Return the list of nodes which are in the configuration.
642

643
    This function is for internal use, when the config lock is already held.
644

645
    """
646
    self._OpenConfig()
647
    return self._config_data.nodes.keys()
648

    
649

    
650
  @locking.ssynchronized(_config_lock, shared=1)
651
  def GetNodeList(self):
652
    """Return the list of nodes which are in the configuration.
653

654
    """
655
    return self._UnlockedGetNodeList()
656

    
657
  @locking.ssynchronized(_config_lock, shared=1)
658
  def GetAllNodesInfo(self):
659
    """Get the configuration of all nodes.
660

661
    @rtype: dict
662
    @returns: dict of (node, node_info), where node_info is what
663
              would GetNodeInfo return for the node
664

665
    """
666
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
667
                    for node in self._UnlockedGetNodeList()])
668
    return my_dict
669

    
670
  @locking.ssynchronized(_config_lock, shared=1)
671
  def DumpConfig(self):
672
    """Return the entire configuration of the cluster.
673
    """
674
    self._OpenConfig()
675
    return self._config_data
676

    
677
  def _BumpSerialNo(self):
678
    """Bump up the serial number of the config.
679

680
    """
681
    self._config_data.cluster.serial_no += 1
682

    
683
  def _OpenConfig(self):
684
    """Read the config data from disk.
685

686
    In case we already have configuration data and the config file has
687
    the same mtime as when we read it, we skip the parsing of the
688
    file, since de-serialisation could be slow.
689

690
    """
691
    try:
692
      st = os.stat(self._cfg_file)
693
    except OSError, err:
694
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
695
    if (self._config_data is not None and
696
        self._config_time is not None and
697
        self._config_time == st.st_mtime and
698
        self._config_size == st.st_size and
699
        self._config_inode == st.st_ino):
700
      # data is current, so skip loading of config file
701
      return
702

    
703
    # Make sure the configuration has the right version
704
    ValidateConfig()
705

    
706
    f = open(self._cfg_file, 'r')
707
    try:
708
      try:
709
        data = objects.ConfigData.FromDict(serializer.Load(f.read()))
710
      except Exception, err:
711
        raise errors.ConfigurationError(err)
712
    finally:
713
      f.close()
714
    if (not hasattr(data, 'cluster') or
715
        not hasattr(data.cluster, 'rsahostkeypub')):
716
      raise errors.ConfigurationError("Incomplete configuration"
717
                                      " (missing cluster.rsahostkeypub)")
718
    self._config_data = data
719
    self._config_time = st.st_mtime
720
    self._config_size = st.st_size
721
    self._config_inode = st.st_ino
722

    
723
  def _DistributeConfig(self):
724
    """Distribute the configuration to the other nodes.
725

726
    Currently, this only copies the configuration file. In the future,
727
    it could be used to encapsulate the 2/3-phase update mechanism.
728

729
    """
730
    if self._offline:
731
      return True
732
    bad = False
733
    nodelist = self._UnlockedGetNodeList()
734
    myhostname = self._my_hostname
735

    
736
    try:
737
      nodelist.remove(myhostname)
738
    except ValueError:
739
      pass
740

    
741
    result = rpc.call_upload_file(nodelist, self._cfg_file)
742
    for node in nodelist:
743
      if not result[node]:
744
        logger.Error("copy of file %s to node %s failed" %
745
                     (self._cfg_file, node))
746
        bad = True
747
    return not bad
748

    
749
  def _WriteConfig(self, destination=None):
750
    """Write the configuration data to persistent storage.
751

752
    """
753
    if destination is None:
754
      destination = self._cfg_file
755
    self._BumpSerialNo()
756
    txt = serializer.Dump(self._config_data.ToDict())
757
    dir_name, file_name = os.path.split(destination)
758
    fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
759
    f = os.fdopen(fd, 'w')
760
    try:
761
      f.write(txt)
762
      os.fsync(f.fileno())
763
    finally:
764
      f.close()
765
    # we don't need to do os.close(fd) as f.close() did it
766
    os.rename(name, destination)
767
    self.write_count += 1
768
    # re-set our cache as not to re-read the config file
769
    try:
770
      st = os.stat(destination)
771
    except OSError, err:
772
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
773
    self._config_time = st.st_mtime
774
    self._config_size = st.st_size
775
    self._config_inode = st.st_ino
776
    # and redistribute the config file
777
    self._DistributeConfig()
778

    
779
  @locking.ssynchronized(_config_lock)
780
  def InitConfig(self, node, primary_ip, secondary_ip,
781
                 hostkeypub, mac_prefix, vg_name, def_bridge):
782
    """Create the initial cluster configuration.
783

784
    It will contain the current node, which will also be the master
785
    node, and no instances or operating systmes.
786

787
    Args:
788
      node: the nodename of the initial node
789
      primary_ip: the IP address of the current host
790
      secondary_ip: the secondary IP of the current host or None
791
      hostkeypub: the public hostkey of this host
792

793
    """
794
    hu_port = constants.FIRST_DRBD_PORT - 1
795
    globalconfig = objects.Cluster(serial_no=1,
796
                                   rsahostkeypub=hostkeypub,
797
                                   highest_used_port=hu_port,
798
                                   mac_prefix=mac_prefix,
799
                                   volume_group_name=vg_name,
800
                                   default_bridge=def_bridge,
801
                                   tcpudp_port_pool=set())
802
    if secondary_ip is None:
803
      secondary_ip = primary_ip
804
    nodeconfig = objects.Node(name=node, primary_ip=primary_ip,
805
                              secondary_ip=secondary_ip)
806

    
807
    self._config_data = objects.ConfigData(nodes={node: nodeconfig},
808
                                           instances={},
809
                                           cluster=globalconfig)
810
    self._WriteConfig()
811

    
812
  @locking.ssynchronized(_config_lock, shared=1)
813
  def GetVGName(self):
814
    """Return the volume group name.
815

816
    """
817
    self._OpenConfig()
818
    return self._config_data.cluster.volume_group_name
819

    
820
  @locking.ssynchronized(_config_lock)
821
  def SetVGName(self, vg_name):
822
    """Set the volume group name.
823

824
    """
825
    self._OpenConfig()
826
    self._config_data.cluster.volume_group_name = vg_name
827
    self._WriteConfig()
828

    
829
  @locking.ssynchronized(_config_lock, shared=1)
830
  def GetDefBridge(self):
831
    """Return the default bridge.
832

833
    """
834
    self._OpenConfig()
835
    return self._config_data.cluster.default_bridge
836

    
837
  @locking.ssynchronized(_config_lock, shared=1)
838
  def GetMACPrefix(self):
839
    """Return the mac prefix.
840

841
    """
842
    self._OpenConfig()
843
    return self._config_data.cluster.mac_prefix
844

    
845
  @locking.ssynchronized(_config_lock, shared=1)
846
  def GetClusterInfo(self):
847
    """Returns informations about the cluster
848

849
    Returns:
850
      the cluster object
851

852
    """
853
    self._OpenConfig()
854

    
855
    return self._config_data.cluster
856

    
857
  @locking.ssynchronized(_config_lock)
858
  def Update(self, target):
859
    """Notify function to be called after updates.
860

861
    This function must be called when an object (as returned by
862
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
863
    caller wants the modifications saved to the backing store. Note
864
    that all modified objects will be saved, but the target argument
865
    is the one the caller wants to ensure that it's saved.
866

867
    """
868
    if self._config_data is None:
869
      raise errors.ProgrammerError("Configuration file not read,"
870
                                   " cannot save.")
871
    if isinstance(target, objects.Cluster):
872
      test = target == self._config_data.cluster
873
    elif isinstance(target, objects.Node):
874
      test = target in self._config_data.nodes.values()
875
    elif isinstance(target, objects.Instance):
876
      test = target in self._config_data.instances.values()
877
    else:
878
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
879
                                   " ConfigWriter.Update" % type(target))
880
    if not test:
881
      raise errors.ConfigurationError("Configuration updated since object"
882
                                      " has been read or unknown object")
883
    self._WriteConfig()