Statistics
| Branch: | Tag: | Revision:

root / lib / config.py @ 74a48621

History | View | Annotate | Download (27.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Configuration management for Ganeti
23

24
This module provides the interface to the Ganeti cluster configuration.
25

26
The configuration data is stored on every node but is updated on the master
27
only. After each update, the master distributes the data to the other nodes.
28

29
Currently, the data storage format is JSON. YAML was slow and consuming too
30
much memory.
31

32
"""
33

    
34
import os
35
import tempfile
36
import random
37
import logging
38

    
39
from ganeti import errors
40
from ganeti import locking
41
from ganeti import utils
42
from ganeti import constants
43
from ganeti import rpc
44
from ganeti import objects
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
_config_lock = locking.SharedLock()
50

    
51

    
52
def ValidateConfig():
53
  sstore = ssconf.SimpleStore()
54

    
55
  if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
56
    raise errors.ConfigurationError("Cluster configuration version"
57
                                    " mismatch, got %s instead of %s" %
58
                                    (sstore.GetConfigVersion(),
59
                                     constants.CONFIG_VERSION))
60

    
61

    
62
class ConfigWriter:
63
  """The interface to the cluster configuration.
64

65
  """
66
  def __init__(self, cfg_file=None, offline=False):
67
    self.write_count = 0
68
    self._lock = _config_lock
69
    self._config_data = None
70
    self._config_time = None
71
    self._config_size = None
72
    self._config_inode = None
73
    self._offline = offline
74
    if cfg_file is None:
75
      self._cfg_file = constants.CLUSTER_CONF_FILE
76
    else:
77
      self._cfg_file = cfg_file
78
    self._temporary_ids = set()
79
    self._temporary_drbds = {}
80
    # Note: in order to prevent errors when resolving our name in
81
    # _DistributeConfig, we compute it here once and reuse it; it's
82
    # better to raise an error before starting to modify the config
83
    # file than after it was modified
84
    self._my_hostname = utils.HostInfo().name
85

    
86
  # this method needs to be static, so that we can call it on the class
87
  @staticmethod
88
  def IsCluster():
89
    """Check if the cluster is configured.
90

91
    """
92
    return os.path.exists(constants.CLUSTER_CONF_FILE)
93

    
94
  @locking.ssynchronized(_config_lock, shared=1)
95
  def GenerateMAC(self):
96
    """Generate a MAC for an instance.
97

98
    This should check the current instances for duplicates.
99

100
    """
101
    self._OpenConfig()
102
    prefix = self._config_data.cluster.mac_prefix
103
    all_macs = self._AllMACs()
104
    retries = 64
105
    while retries > 0:
106
      byte1 = random.randrange(0, 256)
107
      byte2 = random.randrange(0, 256)
108
      byte3 = random.randrange(0, 256)
109
      mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
110
      if mac not in all_macs:
111
        break
112
      retries -= 1
113
    else:
114
      raise errors.ConfigurationError("Can't generate unique MAC")
115
    return mac
116

    
117
  @locking.ssynchronized(_config_lock, shared=1)
118
  def IsMacInUse(self, mac):
119
    """Predicate: check if the specified MAC is in use in the Ganeti cluster.
120

121
    This only checks instances managed by this cluster, it does not
122
    check for potential collisions elsewhere.
123

124
    """
125
    self._OpenConfig()
126
    all_macs = self._AllMACs()
127
    return mac in all_macs
128

    
129
  def _ComputeAllLVs(self):
130
    """Compute the list of all LVs.
131

132
    """
133
    self._OpenConfig()
134
    lvnames = set()
135
    for instance in self._config_data.instances.values():
136
      node_data = instance.MapLVsByNode()
137
      for lv_list in node_data.values():
138
        lvnames.update(lv_list)
139
    return lvnames
140

    
141
  @locking.ssynchronized(_config_lock, shared=1)
142
  def GenerateUniqueID(self, exceptions=None):
143
    """Generate an unique disk name.
144

145
    This checks the current node, instances and disk names for
146
    duplicates.
147

148
    Args:
149
      - exceptions: a list with some other names which should be checked
150
                    for uniqueness (used for example when you want to get
151
                    more than one id at one time without adding each one in
152
                    turn to the config file
153

154
    Returns: the unique id as a string
155

156
    """
157
    existing = set()
158
    existing.update(self._temporary_ids)
159
    existing.update(self._ComputeAllLVs())
160
    existing.update(self._config_data.instances.keys())
161
    existing.update(self._config_data.nodes.keys())
162
    if exceptions is not None:
163
      existing.update(exceptions)
164
    retries = 64
165
    while retries > 0:
166
      unique_id = utils.NewUUID()
167
      if unique_id not in existing and unique_id is not None:
168
        break
169
    else:
170
      raise errors.ConfigurationError("Not able generate an unique ID"
171
                                      " (last tried ID: %s" % unique_id)
172
    self._temporary_ids.add(unique_id)
173
    return unique_id
174

    
175
  def _AllMACs(self):
176
    """Return all MACs present in the config.
177

178
    """
179
    self._OpenConfig()
180

    
181
    result = []
182
    for instance in self._config_data.instances.values():
183
      for nic in instance.nics:
184
        result.append(nic.mac)
185

    
186
    return result
187

    
188
  @locking.ssynchronized(_config_lock, shared=1)
189
  def VerifyConfig(self):
190
    """Stub verify function.
191
    """
192
    self._OpenConfig()
193

    
194
    result = []
195
    seen_macs = []
196
    data = self._config_data
197
    for instance_name in data.instances:
198
      instance = data.instances[instance_name]
199
      if instance.primary_node not in data.nodes:
200
        result.append("instance '%s' has invalid primary node '%s'" %
201
                      (instance_name, instance.primary_node))
202
      for snode in instance.secondary_nodes:
203
        if snode not in data.nodes:
204
          result.append("instance '%s' has invalid secondary node '%s'" %
205
                        (instance_name, snode))
206
      for idx, nic in enumerate(instance.nics):
207
        if nic.mac in seen_macs:
208
          result.append("instance '%s' has NIC %d mac %s duplicate" %
209
                        (instance_name, idx, nic.mac))
210
        else:
211
          seen_macs.append(nic.mac)
212
    return result
213

    
214
  def _UnlockedSetDiskID(self, disk, node_name):
215
    """Convert the unique ID to the ID needed on the target nodes.
216

217
    This is used only for drbd, which needs ip/port configuration.
218

219
    The routine descends down and updates its children also, because
220
    this helps when the only the top device is passed to the remote
221
    node.
222

223
    This function is for internal use, when the config lock is already held.
224

225
    """
226
    if disk.children:
227
      for child in disk.children:
228
        self._UnlockedSetDiskID(child, node_name)
229

    
230
    if disk.logical_id is None and disk.physical_id is not None:
231
      return
232
    if disk.dev_type == constants.LD_DRBD8:
233
      pnode, snode, port, pminor, sminor = disk.logical_id
234
      if node_name not in (pnode, snode):
235
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
236
                                        node_name)
237
      pnode_info = self._UnlockedGetNodeInfo(pnode)
238
      snode_info = self._UnlockedGetNodeInfo(snode)
239
      if pnode_info is None or snode_info is None:
240
        raise errors.ConfigurationError("Can't find primary or secondary node"
241
                                        " for %s" % str(disk))
242
      p_data = (pnode_info.secondary_ip, port)
243
      s_data = (snode_info.secondary_ip, port)
244
      if pnode == node_name:
245
        disk.physical_id = p_data + s_data + (pminor,)
246
      else: # it must be secondary, we tested above
247
        disk.physical_id = s_data + p_data + (sminor,)
248
    else:
249
      disk.physical_id = disk.logical_id
250
    return
251

    
252
  @locking.ssynchronized(_config_lock)
253
  def SetDiskID(self, disk, node_name):
254
    """Convert the unique ID to the ID needed on the target nodes.
255

256
    This is used only for drbd, which needs ip/port configuration.
257

258
    The routine descends down and updates its children also, because
259
    this helps when the only the top device is passed to the remote
260
    node.
261

262
    """
263
    return self._UnlockedSetDiskID(disk, node_name)
264

    
265
  @locking.ssynchronized(_config_lock)
266
  def AddTcpUdpPort(self, port):
267
    """Adds a new port to the available port pool.
268

269
    """
270
    if not isinstance(port, int):
271
      raise errors.ProgrammerError("Invalid type passed for port")
272

    
273
    self._OpenConfig()
274
    self._config_data.cluster.tcpudp_port_pool.add(port)
275
    self._WriteConfig()
276

    
277
  @locking.ssynchronized(_config_lock, shared=1)
278
  def GetPortList(self):
279
    """Returns a copy of the current port list.
280

281
    """
282
    self._OpenConfig()
283
    return self._config_data.cluster.tcpudp_port_pool.copy()
284

    
285
  @locking.ssynchronized(_config_lock)
286
  def AllocatePort(self):
287
    """Allocate a port.
288

289
    The port will be taken from the available port pool or from the
290
    default port range (and in this case we increase
291
    highest_used_port).
292

293
    """
294
    self._OpenConfig()
295

    
296
    # If there are TCP/IP ports configured, we use them first.
297
    if self._config_data.cluster.tcpudp_port_pool:
298
      port = self._config_data.cluster.tcpudp_port_pool.pop()
299
    else:
300
      port = self._config_data.cluster.highest_used_port + 1
301
      if port >= constants.LAST_DRBD_PORT:
302
        raise errors.ConfigurationError("The highest used port is greater"
303
                                        " than %s. Aborting." %
304
                                        constants.LAST_DRBD_PORT)
305
      self._config_data.cluster.highest_used_port = port
306

    
307
    self._WriteConfig()
308
    return port
309

    
310
  def _ComputeDRBDMap(self, instance):
311
    """Compute the used DRBD minor/nodes.
312

313
    Return: dictionary of node_name: dict of minor: instance_name. The
314
    returned dict will have all the nodes in it (even if with an empty
315
    list).
316

317
    """
318
    def _AppendUsedPorts(instance_name, disk, used):
319
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) == 5:
320
        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id
321
        for node, port in ((nodeA, minorA), (nodeB, minorB)):
322
          assert node in used, "Instance node not found in node list"
323
          if port in used[node]:
324
            raise errors.ProgrammerError("DRBD minor already used:"
325
                                         " %s/%s, %s/%s" %
326
                                         (node, port, instance_name,
327
                                          used[node][port]))
328

    
329
          used[node][port] = instance_name
330
      if disk.children:
331
        for child in disk.children:
332
          _AppendUsedPorts(instance_name, child, used)
333

    
334
    my_dict = dict((node, {}) for node in self._config_data.nodes)
335
    for (node, minor), instance in self._temporary_drbds.iteritems():
336
      my_dict[node][minor] = instance
337
    for instance in self._config_data.instances.itervalues():
338
      for disk in instance.disks:
339
        _AppendUsedPorts(instance.name, disk, my_dict)
340
    return my_dict
341

    
342
  @locking.ssynchronized(_config_lock)
343
  def AllocateDRBDMinor(self, nodes, instance):
344
    """Allocate a drbd minor.
345

346
    The free minor will be automatically computed from the existing
347
    devices. A node can be given multiple times in order to allocate
348
    multiple minors. The result is the list of minors, in the same
349
    order as the passed nodes.
350

351
    """
352
    self._OpenConfig()
353

    
354
    d_map = self._ComputeDRBDMap(instance)
355
    result = []
356
    for nname in nodes:
357
      ndata = d_map[nname]
358
      if not ndata:
359
        # no minors used, we can start at 0
360
        result.append(0)
361
        ndata[0] = instance
362
        continue
363
      keys = ndata.keys()
364
      keys.sort()
365
      ffree = utils.FirstFree(keys)
366
      if ffree is None:
367
        # return the next minor
368
        # TODO: implement high-limit check
369
        minor = keys[-1] + 1
370
      else:
371
        minor = ffree
372
      result.append(minor)
373
      ndata[minor] = instance
374
      assert (nname, minor) not in self._temporary_drbds, \
375
             "Attempt to reuse reserved DRBD minor"
376
      self._temporary_drbds[(nname, minor)] = instance
377
    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
378
                  nodes, result)
379
    return result
380

    
381
  @locking.ssynchronized(_config_lock)
382
  def ReleaseDRBDMinors(self, instance):
383
    """Release temporary drbd minors allocated for a given instance.
384

385
    This should be called on both the error paths and on the success
386
    paths (after the instance has been added or updated).
387

388
    @type instance: string
389
    @param instance: the instance for which temporary minors should be
390
                     released
391

392
    """
393
    for key, name in self._temporary_drbds.items():
394
      if name == instance:
395
        del self._temporary_drbds[key]
396

    
397
  @locking.ssynchronized(_config_lock, shared=1)
398
  def GetHostKey(self):
399
    """Return the rsa hostkey from the config.
400

401
    Args: None
402

403
    Returns: rsa hostkey
404
    """
405
    self._OpenConfig()
406
    return self._config_data.cluster.rsahostkeypub
407

    
408
  @locking.ssynchronized(_config_lock)
409
  def AddInstance(self, instance):
410
    """Add an instance to the config.
411

412
    This should be used after creating a new instance.
413

414
    Args:
415
      instance: the instance object
416
    """
417
    if not isinstance(instance, objects.Instance):
418
      raise errors.ProgrammerError("Invalid type passed to AddInstance")
419

    
420
    if instance.disk_template != constants.DT_DISKLESS:
421
      all_lvs = instance.MapLVsByNode()
422
      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
423

    
424
    self._OpenConfig()
425
    self._config_data.instances[instance.name] = instance
426
    self._WriteConfig()
427

    
428
  def _SetInstanceStatus(self, instance_name, status):
429
    """Set the instance's status to a given value.
430

431
    """
432
    if status not in ("up", "down"):
433
      raise errors.ProgrammerError("Invalid status '%s' passed to"
434
                                   " ConfigWriter._SetInstanceStatus()" %
435
                                   status)
436
    self._OpenConfig()
437

    
438
    if instance_name not in self._config_data.instances:
439
      raise errors.ConfigurationError("Unknown instance '%s'" %
440
                                      instance_name)
441
    instance = self._config_data.instances[instance_name]
442
    if instance.status != status:
443
      instance.status = status
444
      self._WriteConfig()
445

    
446
  @locking.ssynchronized(_config_lock)
447
  def MarkInstanceUp(self, instance_name):
448
    """Mark the instance status to up in the config.
449

450
    """
451
    self._SetInstanceStatus(instance_name, "up")
452

    
453
  @locking.ssynchronized(_config_lock)
454
  def RemoveInstance(self, instance_name):
455
    """Remove the instance from the configuration.
456

457
    """
458
    self._OpenConfig()
459

    
460
    if instance_name not in self._config_data.instances:
461
      raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
462
    del self._config_data.instances[instance_name]
463
    self._WriteConfig()
464

    
465
  @locking.ssynchronized(_config_lock)
466
  def RenameInstance(self, old_name, new_name):
467
    """Rename an instance.
468

469
    This needs to be done in ConfigWriter and not by RemoveInstance
470
    combined with AddInstance as only we can guarantee an atomic
471
    rename.
472

473
    """
474
    self._OpenConfig()
475
    if old_name not in self._config_data.instances:
476
      raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
477
    inst = self._config_data.instances[old_name]
478
    del self._config_data.instances[old_name]
479
    inst.name = new_name
480

    
481
    for disk in inst.disks:
482
      if disk.dev_type == constants.LD_FILE:
483
        # rename the file paths in logical and physical id
484
        file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
485
        disk.physical_id = disk.logical_id = (disk.logical_id[0],
486
                                              os.path.join(file_storage_dir,
487
                                                           inst.name,
488
                                                           disk.iv_name))
489

    
490
    self._config_data.instances[inst.name] = inst
491
    self._WriteConfig()
492

    
493
  @locking.ssynchronized(_config_lock)
494
  def MarkInstanceDown(self, instance_name):
495
    """Mark the status of an instance to down in the configuration.
496

497
    """
498
    self._SetInstanceStatus(instance_name, "down")
499

    
500
  def _UnlockedGetInstanceList(self):
501
    """Get the list of instances.
502

503
    This function is for internal use, when the config lock is already held.
504

505
    """
506
    self._OpenConfig()
507
    return self._config_data.instances.keys()
508

    
509
  @locking.ssynchronized(_config_lock, shared=1)
510
  def GetInstanceList(self):
511
    """Get the list of instances.
512

513
    Returns:
514
      array of instances, ex. ['instance2.example.com','instance1.example.com']
515
      these contains all the instances, also the ones in Admin_down state
516

517
    """
518
    return self._UnlockedGetInstanceList()
519

    
520
  @locking.ssynchronized(_config_lock, shared=1)
521
  def ExpandInstanceName(self, short_name):
522
    """Attempt to expand an incomplete instance name.
523

524
    """
525
    self._OpenConfig()
526

    
527
    return utils.MatchNameComponent(short_name,
528
                                    self._config_data.instances.keys())
529

    
530
  def _UnlockedGetInstanceInfo(self, instance_name):
531
    """Returns informations about an instance.
532

533
    This function is for internal use, when the config lock is already held.
534

535
    """
536
    self._OpenConfig()
537

    
538
    if instance_name not in self._config_data.instances:
539
      return None
540

    
541
    return self._config_data.instances[instance_name]
542

    
543
  @locking.ssynchronized(_config_lock, shared=1)
544
  def GetInstanceInfo(self, instance_name):
545
    """Returns informations about an instance.
546

547
    It takes the information from the configuration file. Other informations of
548
    an instance are taken from the live systems.
549

550
    Args:
551
      instance: name of the instance, ex instance1.example.com
552

553
    Returns:
554
      the instance object
555

556
    """
557
    return self._UnlockedGetInstanceInfo(instance_name)
558

    
559
  @locking.ssynchronized(_config_lock, shared=1)
560
  def GetAllInstancesInfo(self):
561
    """Get the configuration of all instances.
562

563
    @rtype: dict
564
    @returns: dict of (instance, instance_info), where instance_info is what
565
              would GetInstanceInfo return for the node
566

567
    """
568
    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
569
                    for instance in self._UnlockedGetInstanceList()])
570
    return my_dict
571

    
572
  @locking.ssynchronized(_config_lock)
573
  def AddNode(self, node):
574
    """Add a node to the configuration.
575

576
    Args:
577
      node: an object.Node instance
578

579
    """
580
    logging.info("Adding node %s to configuration" % node.name)
581

    
582
    self._OpenConfig()
583
    self._config_data.nodes[node.name] = node
584
    self._WriteConfig()
585

    
586
  @locking.ssynchronized(_config_lock)
587
  def RemoveNode(self, node_name):
588
    """Remove a node from the configuration.
589

590
    """
591
    logging.info("Removing node %s from configuration" % node_name)
592

    
593
    self._OpenConfig()
594
    if node_name not in self._config_data.nodes:
595
      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
596

    
597
    del self._config_data.nodes[node_name]
598
    self._WriteConfig()
599

    
600
  @locking.ssynchronized(_config_lock, shared=1)
601
  def ExpandNodeName(self, short_name):
602
    """Attempt to expand an incomplete instance name.
603

604
    """
605
    self._OpenConfig()
606

    
607
    return utils.MatchNameComponent(short_name,
608
                                    self._config_data.nodes.keys())
609

    
610
  def _UnlockedGetNodeInfo(self, node_name):
611
    """Get the configuration of a node, as stored in the config.
612

613
    This function is for internal use, when the config lock is already held.
614

615
    Args: node: nodename (tuple) of the node
616

617
    Returns: the node object
618

619
    """
620
    self._OpenConfig()
621

    
622
    if node_name not in self._config_data.nodes:
623
      return None
624

    
625
    return self._config_data.nodes[node_name]
626

    
627

    
628
  @locking.ssynchronized(_config_lock, shared=1)
629
  def GetNodeInfo(self, node_name):
630
    """Get the configuration of a node, as stored in the config.
631

632
    Args: node: nodename (tuple) of the node
633

634
    Returns: the node object
635

636
    """
637
    return self._UnlockedGetNodeInfo(node_name)
638

    
639
  def _UnlockedGetNodeList(self):
640
    """Return the list of nodes which are in the configuration.
641

642
    This function is for internal use, when the config lock is already held.
643

644
    """
645
    self._OpenConfig()
646
    return self._config_data.nodes.keys()
647

    
648

    
649
  @locking.ssynchronized(_config_lock, shared=1)
650
  def GetNodeList(self):
651
    """Return the list of nodes which are in the configuration.
652

653
    """
654
    return self._UnlockedGetNodeList()
655

    
656
  @locking.ssynchronized(_config_lock, shared=1)
657
  def GetAllNodesInfo(self):
658
    """Get the configuration of all nodes.
659

660
    @rtype: dict
661
    @returns: dict of (node, node_info), where node_info is what
662
              would GetNodeInfo return for the node
663

664
    """
665
    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
666
                    for node in self._UnlockedGetNodeList()])
667
    return my_dict
668

    
669
  @locking.ssynchronized(_config_lock, shared=1)
670
  def DumpConfig(self):
671
    """Return the entire configuration of the cluster.
672
    """
673
    self._OpenConfig()
674
    return self._config_data
675

    
676
  def _BumpSerialNo(self):
677
    """Bump up the serial number of the config.
678

679
    """
680
    self._config_data.cluster.serial_no += 1
681

    
682
  def _OpenConfig(self):
683
    """Read the config data from disk.
684

685
    In case we already have configuration data and the config file has
686
    the same mtime as when we read it, we skip the parsing of the
687
    file, since de-serialisation could be slow.
688

689
    """
690
    try:
691
      st = os.stat(self._cfg_file)
692
    except OSError, err:
693
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
694
    if (self._config_data is not None and
695
        self._config_time is not None and
696
        self._config_time == st.st_mtime and
697
        self._config_size == st.st_size and
698
        self._config_inode == st.st_ino):
699
      # data is current, so skip loading of config file
700
      return
701

    
702
    # Make sure the configuration has the right version
703
    ValidateConfig()
704

    
705
    f = open(self._cfg_file, 'r')
706
    try:
707
      try:
708
        data = objects.ConfigData.FromDict(serializer.Load(f.read()))
709
      except Exception, err:
710
        raise errors.ConfigurationError(err)
711
    finally:
712
      f.close()
713
    if (not hasattr(data, 'cluster') or
714
        not hasattr(data.cluster, 'rsahostkeypub')):
715
      raise errors.ConfigurationError("Incomplete configuration"
716
                                      " (missing cluster.rsahostkeypub)")
717
    self._config_data = data
718
    self._config_time = st.st_mtime
719
    self._config_size = st.st_size
720
    self._config_inode = st.st_ino
721

    
722
  def _DistributeConfig(self):
723
    """Distribute the configuration to the other nodes.
724

725
    Currently, this only copies the configuration file. In the future,
726
    it could be used to encapsulate the 2/3-phase update mechanism.
727

728
    """
729
    if self._offline:
730
      return True
731
    bad = False
732
    nodelist = self._UnlockedGetNodeList()
733
    myhostname = self._my_hostname
734

    
735
    try:
736
      nodelist.remove(myhostname)
737
    except ValueError:
738
      pass
739

    
740
    result = rpc.call_upload_file(nodelist, self._cfg_file)
741
    for node in nodelist:
742
      if not result[node]:
743
        logging.error("copy of file %s to node %s failed",
744
                      self._cfg_file, node)
745
        bad = True
746
    return not bad
747

    
748
  def _WriteConfig(self, destination=None):
749
    """Write the configuration data to persistent storage.
750

751
    """
752
    if destination is None:
753
      destination = self._cfg_file
754
    self._BumpSerialNo()
755
    txt = serializer.Dump(self._config_data.ToDict())
756
    dir_name, file_name = os.path.split(destination)
757
    fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
758
    f = os.fdopen(fd, 'w')
759
    try:
760
      f.write(txt)
761
      os.fsync(f.fileno())
762
    finally:
763
      f.close()
764
    # we don't need to do os.close(fd) as f.close() did it
765
    os.rename(name, destination)
766
    self.write_count += 1
767
    # re-set our cache as not to re-read the config file
768
    try:
769
      st = os.stat(destination)
770
    except OSError, err:
771
      raise errors.ConfigurationError("Can't stat config file: %s" % err)
772
    self._config_time = st.st_mtime
773
    self._config_size = st.st_size
774
    self._config_inode = st.st_ino
775
    # and redistribute the config file
776
    self._DistributeConfig()
777

    
778
  @locking.ssynchronized(_config_lock)
779
  def InitConfig(self, node, primary_ip, secondary_ip,
780
                 hostkeypub, mac_prefix, vg_name, def_bridge):
781
    """Create the initial cluster configuration.
782

783
    It will contain the current node, which will also be the master
784
    node, and no instances or operating systmes.
785

786
    Args:
787
      node: the nodename of the initial node
788
      primary_ip: the IP address of the current host
789
      secondary_ip: the secondary IP of the current host or None
790
      hostkeypub: the public hostkey of this host
791

792
    """
793
    hu_port = constants.FIRST_DRBD_PORT - 1
794
    globalconfig = objects.Cluster(serial_no=1,
795
                                   rsahostkeypub=hostkeypub,
796
                                   highest_used_port=hu_port,
797
                                   mac_prefix=mac_prefix,
798
                                   volume_group_name=vg_name,
799
                                   default_bridge=def_bridge,
800
                                   tcpudp_port_pool=set())
801
    if secondary_ip is None:
802
      secondary_ip = primary_ip
803
    nodeconfig = objects.Node(name=node, primary_ip=primary_ip,
804
                              secondary_ip=secondary_ip)
805

    
806
    self._config_data = objects.ConfigData(nodes={node: nodeconfig},
807
                                           instances={},
808
                                           cluster=globalconfig)
809
    self._WriteConfig()
810

    
811
  @locking.ssynchronized(_config_lock, shared=1)
812
  def GetVGName(self):
813
    """Return the volume group name.
814

815
    """
816
    self._OpenConfig()
817
    return self._config_data.cluster.volume_group_name
818

    
819
  @locking.ssynchronized(_config_lock)
820
  def SetVGName(self, vg_name):
821
    """Set the volume group name.
822

823
    """
824
    self._OpenConfig()
825
    self._config_data.cluster.volume_group_name = vg_name
826
    self._WriteConfig()
827

    
828
  @locking.ssynchronized(_config_lock, shared=1)
829
  def GetDefBridge(self):
830
    """Return the default bridge.
831

832
    """
833
    self._OpenConfig()
834
    return self._config_data.cluster.default_bridge
835

    
836
  @locking.ssynchronized(_config_lock, shared=1)
837
  def GetMACPrefix(self):
838
    """Return the mac prefix.
839

840
    """
841
    self._OpenConfig()
842
    return self._config_data.cluster.mac_prefix
843

    
844
  @locking.ssynchronized(_config_lock, shared=1)
845
  def GetClusterInfo(self):
846
    """Returns informations about the cluster
847

848
    Returns:
849
      the cluster object
850

851
    """
852
    self._OpenConfig()
853

    
854
    return self._config_data.cluster
855

    
856
  @locking.ssynchronized(_config_lock)
857
  def Update(self, target):
858
    """Notify function to be called after updates.
859

860
    This function must be called when an object (as returned by
861
    GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
862
    caller wants the modifications saved to the backing store. Note
863
    that all modified objects will be saved, but the target argument
864
    is the one the caller wants to ensure that it's saved.
865

866
    """
867
    if self._config_data is None:
868
      raise errors.ProgrammerError("Configuration file not read,"
869
                                   " cannot save.")
870
    if isinstance(target, objects.Cluster):
871
      test = target == self._config_data.cluster
872
    elif isinstance(target, objects.Node):
873
      test = target in self._config_data.nodes.values()
874
    elif isinstance(target, objects.Instance):
875
      test = target in self._config_data.instances.values()
876
    else:
877
      raise errors.ProgrammerError("Invalid object type (%s) passed to"
878
                                   " ConfigWriter.Update" % type(target))
879
    if not test:
880
      raise errors.ConfigurationError("Configuration updated since object"
881
                                      " has been read or unknown object")
882
    self._WriteConfig()