4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Configuration management for Ganeti
24 This module provides the interface to the Ganeti cluster configuration.
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
29 Currently, the data storage format is JSON. YAML was slow and consuming too
39 from ganeti import errors
40 from ganeti import locking
41 from ganeti import utils
42 from ganeti import constants
43 from ganeti import rpc
44 from ganeti import objects
45 from ganeti import serializer
48 _config_lock = locking.SharedLock()
51 def _ValidateConfig(data):
52 if data.version != constants.CONFIG_VERSION:
53 raise errors.ConfigurationError("Cluster configuration version"
54 " mismatch, got %s instead of %s" %
56 constants.CONFIG_VERSION))
60 """The interface to the cluster configuration.
63 def __init__(self, cfg_file=None, offline=False):
65 self._lock = _config_lock
66 self._config_data = None
67 self._offline = offline
69 self._cfg_file = constants.CLUSTER_CONF_FILE
71 self._cfg_file = cfg_file
72 self._temporary_ids = set()
73 self._temporary_drbds = {}
74 # Note: in order to prevent errors when resolving our name in
75 # _DistributeConfig, we compute it here once and reuse it; it's
76 # better to raise an error before starting to modify the config
77 # file than after it was modified
78 self._my_hostname = utils.HostInfo().name
81 # this method needs to be static, so that we can call it on the class
84 """Check if the cluster is configured.
87 return os.path.exists(constants.CLUSTER_CONF_FILE)
89 @locking.ssynchronized(_config_lock, shared=1)
90 def GenerateMAC(self):
91 """Generate a MAC for an instance.
93 This should check the current instances for duplicates.
96 prefix = self._config_data.cluster.mac_prefix
97 all_macs = self._AllMACs()
100 byte1 = random.randrange(0, 256)
101 byte2 = random.randrange(0, 256)
102 byte3 = random.randrange(0, 256)
103 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
104 if mac not in all_macs:
108 raise errors.ConfigurationError("Can't generate unique MAC")
111 @locking.ssynchronized(_config_lock, shared=1)
112 def IsMacInUse(self, mac):
113 """Predicate: check if the specified MAC is in use in the Ganeti cluster.
115 This only checks instances managed by this cluster, it does not
116 check for potential collisions elsewhere.
119 all_macs = self._AllMACs()
120 return mac in all_macs
122 @locking.ssynchronized(_config_lock, shared=1)
123 def GenerateDRBDSecret(self):
124 """Generate a DRBD secret.
126 This checks the current disks for duplicates.
129 all_secrets = self._AllDRBDSecrets()
132 secret = utils.GenerateSecret()
133 if secret not in all_secrets:
137 raise errors.ConfigurationError("Can't generate unique DRBD secret")
140 def _ComputeAllLVs(self):
141 """Compute the list of all LVs.
145 for instance in self._config_data.instances.values():
146 node_data = instance.MapLVsByNode()
147 for lv_list in node_data.values():
148 lvnames.update(lv_list)
151 @locking.ssynchronized(_config_lock, shared=1)
152 def GenerateUniqueID(self, exceptions=None):
153 """Generate an unique disk name.
155 This checks the current node, instances and disk names for
159 - exceptions: a list with some other names which should be checked
160 for uniqueness (used for example when you want to get
161 more than one id at one time without adding each one in
162 turn to the config file
164 Returns: the unique id as a string
168 existing.update(self._temporary_ids)
169 existing.update(self._ComputeAllLVs())
170 existing.update(self._config_data.instances.keys())
171 existing.update(self._config_data.nodes.keys())
172 if exceptions is not None:
173 existing.update(exceptions)
176 unique_id = utils.NewUUID()
177 if unique_id not in existing and unique_id is not None:
180 raise errors.ConfigurationError("Not able generate an unique ID"
181 " (last tried ID: %s" % unique_id)
182 self._temporary_ids.add(unique_id)
186 """Return all MACs present in the config.
190 for instance in self._config_data.instances.values():
191 for nic in instance.nics:
192 result.append(nic.mac)
196 def _AllDRBDSecrets(self):
197 """Return all DRBD secrets present in the config.
200 def helper(disk, result):
201 """Recursively gather secrets from this disk."""
202 if disk.dev_type == constants.DT_DRBD8:
203 result.append(disk.logical_id[5])
205 for child in disk.children:
206 helper(child, result)
209 for instance in self._config_data.instances.values():
210 for disk in instance.disks:
215 @locking.ssynchronized(_config_lock, shared=1)
216 def VerifyConfig(self):
223 data = self._config_data
224 for instance_name in data.instances:
225 instance = data.instances[instance_name]
226 if instance.primary_node not in data.nodes:
227 result.append("instance '%s' has invalid primary node '%s'" %
228 (instance_name, instance.primary_node))
229 for snode in instance.secondary_nodes:
230 if snode not in data.nodes:
231 result.append("instance '%s' has invalid secondary node '%s'" %
232 (instance_name, snode))
233 for idx, nic in enumerate(instance.nics):
234 if nic.mac in seen_macs:
235 result.append("instance '%s' has NIC %d mac %s duplicate" %
236 (instance_name, idx, nic.mac))
238 seen_macs.append(nic.mac)
240 # gather the drbd ports for duplicate checks
241 for dsk in instance.disks:
242 if dsk.dev_type in constants.LDS_DRBD:
243 tcp_port = dsk.logical_id[2]
244 if tcp_port not in ports:
246 ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
247 # gather network port reservation
248 net_port = getattr(instance, "network_port", None)
249 if net_port is not None:
250 if net_port not in ports:
252 ports[net_port].append((instance.name, "network port"))
254 # cluster-wide pool of free ports
255 for free_port in data.cluster.tcpudp_port_pool:
256 if free_port not in ports:
257 ports[free_port] = []
258 ports[free_port].append(("cluster", "port marked as free"))
260 # compute tcp/udp duplicate ports
266 txt = ", ".join(["%s/%s" % val for val in pdata])
267 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
269 # highest used tcp port check
271 if keys[-1] > data.cluster.highest_used_port:
272 result.append("Highest used port mismatch, saved %s, computed %s" %
273 (data.cluster.highest_used_port, keys[-1]))
275 if not data.nodes[data.cluster.master_node].master_candidate:
276 result.append("Master node is not a master candidate")
278 mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
280 result.append("Not enough master candidates: actual %d, target %d" %
285 def _UnlockedSetDiskID(self, disk, node_name):
286 """Convert the unique ID to the ID needed on the target nodes.
288 This is used only for drbd, which needs ip/port configuration.
290 The routine descends down and updates its children also, because
291 this helps when the only the top device is passed to the remote
294 This function is for internal use, when the config lock is already held.
298 for child in disk.children:
299 self._UnlockedSetDiskID(child, node_name)
301 if disk.logical_id is None and disk.physical_id is not None:
303 if disk.dev_type == constants.LD_DRBD8:
304 pnode, snode, port, pminor, sminor, secret = disk.logical_id
305 if node_name not in (pnode, snode):
306 raise errors.ConfigurationError("DRBD device not knowing node %s" %
308 pnode_info = self._UnlockedGetNodeInfo(pnode)
309 snode_info = self._UnlockedGetNodeInfo(snode)
310 if pnode_info is None or snode_info is None:
311 raise errors.ConfigurationError("Can't find primary or secondary node"
312 " for %s" % str(disk))
313 p_data = (pnode_info.secondary_ip, port)
314 s_data = (snode_info.secondary_ip, port)
315 if pnode == node_name:
316 disk.physical_id = p_data + s_data + (pminor, secret)
317 else: # it must be secondary, we tested above
318 disk.physical_id = s_data + p_data + (sminor, secret)
320 disk.physical_id = disk.logical_id
323 @locking.ssynchronized(_config_lock)
324 def SetDiskID(self, disk, node_name):
325 """Convert the unique ID to the ID needed on the target nodes.
327 This is used only for drbd, which needs ip/port configuration.
329 The routine descends down and updates its children also, because
330 this helps when the only the top device is passed to the remote
334 return self._UnlockedSetDiskID(disk, node_name)
336 @locking.ssynchronized(_config_lock)
337 def AddTcpUdpPort(self, port):
338 """Adds a new port to the available port pool.
341 if not isinstance(port, int):
342 raise errors.ProgrammerError("Invalid type passed for port")
344 self._config_data.cluster.tcpudp_port_pool.add(port)
347 @locking.ssynchronized(_config_lock, shared=1)
348 def GetPortList(self):
349 """Returns a copy of the current port list.
352 return self._config_data.cluster.tcpudp_port_pool.copy()
354 @locking.ssynchronized(_config_lock)
355 def AllocatePort(self):
358 The port will be taken from the available port pool or from the
359 default port range (and in this case we increase
363 # If there are TCP/IP ports configured, we use them first.
364 if self._config_data.cluster.tcpudp_port_pool:
365 port = self._config_data.cluster.tcpudp_port_pool.pop()
367 port = self._config_data.cluster.highest_used_port + 1
368 if port >= constants.LAST_DRBD_PORT:
369 raise errors.ConfigurationError("The highest used port is greater"
370 " than %s. Aborting." %
371 constants.LAST_DRBD_PORT)
372 self._config_data.cluster.highest_used_port = port
377 def _ComputeDRBDMap(self, instance):
378 """Compute the used DRBD minor/nodes.
380 Return: dictionary of node_name: dict of minor: instance_name. The
381 returned dict will have all the nodes in it (even if with an empty
385 def _AppendUsedPorts(instance_name, disk, used):
386 if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
387 nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
388 for node, port in ((nodeA, minorA), (nodeB, minorB)):
389 assert node in used, "Instance node not found in node list"
390 if port in used[node]:
391 raise errors.ProgrammerError("DRBD minor already used:"
393 (node, port, instance_name,
396 used[node][port] = instance_name
398 for child in disk.children:
399 _AppendUsedPorts(instance_name, child, used)
401 my_dict = dict((node, {}) for node in self._config_data.nodes)
402 for (node, minor), instance in self._temporary_drbds.iteritems():
403 my_dict[node][minor] = instance
404 for instance in self._config_data.instances.itervalues():
405 for disk in instance.disks:
406 _AppendUsedPorts(instance.name, disk, my_dict)
409 @locking.ssynchronized(_config_lock)
410 def AllocateDRBDMinor(self, nodes, instance):
411 """Allocate a drbd minor.
413 The free minor will be automatically computed from the existing
414 devices. A node can be given multiple times in order to allocate
415 multiple minors. The result is the list of minors, in the same
416 order as the passed nodes.
419 d_map = self._ComputeDRBDMap(instance)
424 # no minors used, we can start at 0
427 self._temporary_drbds[(nname, 0)] = instance
431 ffree = utils.FirstFree(keys)
433 # return the next minor
434 # TODO: implement high-limit check
439 ndata[minor] = instance
440 assert (nname, minor) not in self._temporary_drbds, \
441 "Attempt to reuse reserved DRBD minor"
442 self._temporary_drbds[(nname, minor)] = instance
443 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
447 @locking.ssynchronized(_config_lock)
448 def ReleaseDRBDMinors(self, instance):
449 """Release temporary drbd minors allocated for a given instance.
451 This should be called on both the error paths and on the success
452 paths (after the instance has been added or updated).
454 @type instance: string
455 @param instance: the instance for which temporary minors should be
459 for key, name in self._temporary_drbds.items():
461 del self._temporary_drbds[key]
463 @locking.ssynchronized(_config_lock, shared=1)
464 def GetConfigVersion(self):
465 """Get the configuration version.
467 @return: Config version
470 return self._config_data.version
472 @locking.ssynchronized(_config_lock, shared=1)
473 def GetClusterName(self):
476 @return: Cluster name
479 return self._config_data.cluster.cluster_name
481 @locking.ssynchronized(_config_lock, shared=1)
482 def GetMasterNode(self):
483 """Get the hostname of the master node for this cluster.
485 @return: Master hostname
488 return self._config_data.cluster.master_node
490 @locking.ssynchronized(_config_lock, shared=1)
491 def GetMasterIP(self):
492 """Get the IP of the master node for this cluster.
497 return self._config_data.cluster.master_ip
499 @locking.ssynchronized(_config_lock, shared=1)
500 def GetMasterNetdev(self):
501 """Get the master network device for this cluster.
504 return self._config_data.cluster.master_netdev
506 @locking.ssynchronized(_config_lock, shared=1)
507 def GetFileStorageDir(self):
508 """Get the file storage dir for this cluster.
511 return self._config_data.cluster.file_storage_dir
513 @locking.ssynchronized(_config_lock, shared=1)
514 def GetHypervisorType(self):
515 """Get the hypervisor type for this cluster.
518 return self._config_data.cluster.default_hypervisor
520 @locking.ssynchronized(_config_lock, shared=1)
521 def GetHostKey(self):
522 """Return the rsa hostkey from the config.
528 return self._config_data.cluster.rsahostkeypub
530 @locking.ssynchronized(_config_lock)
531 def AddInstance(self, instance):
532 """Add an instance to the config.
534 This should be used after creating a new instance.
537 instance: the instance object
539 if not isinstance(instance, objects.Instance):
540 raise errors.ProgrammerError("Invalid type passed to AddInstance")
542 if instance.disk_template != constants.DT_DISKLESS:
543 all_lvs = instance.MapLVsByNode()
544 logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
546 instance.serial_no = 1
547 self._config_data.instances[instance.name] = instance
550 def _SetInstanceStatus(self, instance_name, status):
551 """Set the instance's status to a given value.
554 if status not in ("up", "down"):
555 raise errors.ProgrammerError("Invalid status '%s' passed to"
556 " ConfigWriter._SetInstanceStatus()" %
559 if instance_name not in self._config_data.instances:
560 raise errors.ConfigurationError("Unknown instance '%s'" %
562 instance = self._config_data.instances[instance_name]
563 if instance.status != status:
564 instance.status = status
565 instance.serial_no += 1
568 @locking.ssynchronized(_config_lock)
569 def MarkInstanceUp(self, instance_name):
570 """Mark the instance status to up in the config.
573 self._SetInstanceStatus(instance_name, "up")
575 @locking.ssynchronized(_config_lock)
576 def RemoveInstance(self, instance_name):
577 """Remove the instance from the configuration.
580 if instance_name not in self._config_data.instances:
581 raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
582 del self._config_data.instances[instance_name]
585 @locking.ssynchronized(_config_lock)
586 def RenameInstance(self, old_name, new_name):
587 """Rename an instance.
589 This needs to be done in ConfigWriter and not by RemoveInstance
590 combined with AddInstance as only we can guarantee an atomic
594 if old_name not in self._config_data.instances:
595 raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
596 inst = self._config_data.instances[old_name]
597 del self._config_data.instances[old_name]
600 for disk in inst.disks:
601 if disk.dev_type == constants.LD_FILE:
602 # rename the file paths in logical and physical id
603 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
604 disk.physical_id = disk.logical_id = (disk.logical_id[0],
605 os.path.join(file_storage_dir,
609 self._config_data.instances[inst.name] = inst
612 @locking.ssynchronized(_config_lock)
613 def MarkInstanceDown(self, instance_name):
614 """Mark the status of an instance to down in the configuration.
617 self._SetInstanceStatus(instance_name, "down")
619 def _UnlockedGetInstanceList(self):
620 """Get the list of instances.
622 This function is for internal use, when the config lock is already held.
625 return self._config_data.instances.keys()
627 @locking.ssynchronized(_config_lock, shared=1)
628 def GetInstanceList(self):
629 """Get the list of instances.
632 array of instances, ex. ['instance2.example.com','instance1.example.com']
633 these contains all the instances, also the ones in Admin_down state
636 return self._UnlockedGetInstanceList()
638 @locking.ssynchronized(_config_lock, shared=1)
639 def ExpandInstanceName(self, short_name):
640 """Attempt to expand an incomplete instance name.
643 return utils.MatchNameComponent(short_name,
644 self._config_data.instances.keys())
646 def _UnlockedGetInstanceInfo(self, instance_name):
647 """Returns informations about an instance.
649 This function is for internal use, when the config lock is already held.
652 if instance_name not in self._config_data.instances:
655 return self._config_data.instances[instance_name]
657 @locking.ssynchronized(_config_lock, shared=1)
658 def GetInstanceInfo(self, instance_name):
659 """Returns informations about an instance.
661 It takes the information from the configuration file. Other informations of
662 an instance are taken from the live systems.
665 instance: name of the instance, ex instance1.example.com
671 return self._UnlockedGetInstanceInfo(instance_name)
673 @locking.ssynchronized(_config_lock, shared=1)
674 def GetAllInstancesInfo(self):
675 """Get the configuration of all instances.
678 @returns: dict of (instance, instance_info), where instance_info is what
679 would GetInstanceInfo return for the node
682 my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
683 for instance in self._UnlockedGetInstanceList()])
686 @locking.ssynchronized(_config_lock)
687 def AddNode(self, node):
688 """Add a node to the configuration.
691 node: an object.Node instance
694 logging.info("Adding node %s to configuration" % node.name)
697 self._config_data.nodes[node.name] = node
698 self._config_data.cluster.serial_no += 1
701 @locking.ssynchronized(_config_lock)
702 def RemoveNode(self, node_name):
703 """Remove a node from the configuration.
706 logging.info("Removing node %s from configuration" % node_name)
708 if node_name not in self._config_data.nodes:
709 raise errors.ConfigurationError("Unknown node '%s'" % node_name)
711 del self._config_data.nodes[node_name]
712 self._config_data.cluster.serial_no += 1
715 @locking.ssynchronized(_config_lock, shared=1)
716 def ExpandNodeName(self, short_name):
717 """Attempt to expand an incomplete instance name.
720 return utils.MatchNameComponent(short_name,
721 self._config_data.nodes.keys())
723 def _UnlockedGetNodeInfo(self, node_name):
724 """Get the configuration of a node, as stored in the config.
726 This function is for internal use, when the config lock is already held.
728 Args: node: nodename (tuple) of the node
730 Returns: the node object
733 if node_name not in self._config_data.nodes:
736 return self._config_data.nodes[node_name]
739 @locking.ssynchronized(_config_lock, shared=1)
740 def GetNodeInfo(self, node_name):
741 """Get the configuration of a node, as stored in the config.
743 Args: node: nodename (tuple) of the node
745 Returns: the node object
748 return self._UnlockedGetNodeInfo(node_name)
750 def _UnlockedGetNodeList(self):
751 """Return the list of nodes which are in the configuration.
753 This function is for internal use, when the config lock is already held.
756 return self._config_data.nodes.keys()
759 @locking.ssynchronized(_config_lock, shared=1)
760 def GetNodeList(self):
761 """Return the list of nodes which are in the configuration.
764 return self._UnlockedGetNodeList()
766 @locking.ssynchronized(_config_lock, shared=1)
767 def GetAllNodesInfo(self):
768 """Get the configuration of all nodes.
771 @return: dict of (node, node_info), where node_info is what
772 would GetNodeInfo return for the node
775 my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
776 for node in self._UnlockedGetNodeList()])
779 def _UnlockedGetMasterCandidateStats(self):
780 """Get the number of current and maximum desired and possible candidates.
783 @return: tuple of (current, desired and possible)
787 for node in self._config_data.nodes.itervalues():
790 if node.master_candidate:
792 mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
793 return (mc_now, mc_max)
795 @locking.ssynchronized(_config_lock, shared=1)
796 def GetMasterCandidateStats(self):
797 """Get the number of current and maximum possible candidates.
799 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
802 @return: tuple of (current, max)
805 return self._UnlockedGetMasterCandidateStats()
807 @locking.ssynchronized(_config_lock)
808 def MaintainCandidatePool(self):
809 """Try to grow the candidate pool to the desired size.
812 @return: list with the adjusted node names
815 mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
818 node_list = self._config_data.nodes.keys()
819 random.shuffle(node_list)
820 for name in node_list:
823 node = self._config_data.nodes[name]
824 if node.master_candidate or node.offline:
826 mod_list.append(node.name)
827 node.master_candidate = True
831 # this should not happen
832 logging.warning("Warning: MaintainCandidatePool didn't manage to"
833 " fill the candidate pool (%d/%d)", mc_now, mc_max)
835 self._config_data.cluster.serial_no += 1
840 def _BumpSerialNo(self):
841 """Bump up the serial number of the config.
844 self._config_data.serial_no += 1
846 def _OpenConfig(self):
847 """Read the config data from disk.
849 In case we already have configuration data and the config file has
850 the same mtime as when we read it, we skip the parsing of the
851 file, since de-serialisation could be slow.
854 f = open(self._cfg_file, 'r')
857 data = objects.ConfigData.FromDict(serializer.Load(f.read()))
858 except Exception, err:
859 raise errors.ConfigurationError(err)
863 # Make sure the configuration has the right version
864 _ValidateConfig(data)
866 if (not hasattr(data, 'cluster') or
867 not hasattr(data.cluster, 'rsahostkeypub')):
868 raise errors.ConfigurationError("Incomplete configuration"
869 " (missing cluster.rsahostkeypub)")
870 self._config_data = data
871 # init the last serial as -1 so that the next write will cause
873 self._last_cluster_serial = -1
875 def _DistributeConfig(self):
876 """Distribute the configuration to the other nodes.
878 Currently, this only copies the configuration file. In the future,
879 it could be used to encapsulate the 2/3-phase update mechanism.
888 myhostname = self._my_hostname
889 # we can skip checking whether _UnlockedGetNodeInfo returns None
890 # since the node list comes from _UnlocketGetNodeList, and we are
891 # called with the lock held, so no modifications should take place
893 for node_name in self._UnlockedGetNodeList():
894 if node_name == myhostname:
896 node_info = self._UnlockedGetNodeInfo(node_name)
897 if not node_info.master_candidate:
899 node_list.append(node_info.name)
900 addr_list.append(node_info.primary_ip)
902 result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
903 address_list=addr_list)
904 for node in node_list:
906 logging.error("copy of file %s to node %s failed",
907 self._cfg_file, node)
911 def _WriteConfig(self, destination=None):
912 """Write the configuration data to persistent storage.
915 if destination is None:
916 destination = self._cfg_file
918 txt = serializer.Dump(self._config_data.ToDict())
919 dir_name, file_name = os.path.split(destination)
920 fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
921 f = os.fdopen(fd, 'w')
927 # we don't need to do os.close(fd) as f.close() did it
928 os.rename(name, destination)
929 self.write_count += 1
931 # and redistribute the config file to master candidates
932 self._DistributeConfig()
934 # Write ssconf files on all nodes (including locally)
935 if self._last_cluster_serial < self._config_data.cluster.serial_no:
936 if not self._offline:
937 rpc.RpcRunner.call_write_ssconf_files(self._UnlockedGetNodeList(),
938 self._UnlockedGetSsconfValues())
939 self._last_cluster_serial = self._config_data.cluster.serial_no
941 def _UnlockedGetSsconfValues(self):
942 """Return the values needed by ssconf.
945 @return: a dictionary with keys the ssconf names and values their
950 node_names = utils.NiceSort(self._UnlockedGetNodeList())
951 node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
953 off_data = fn(node.name for node in node_info if node.offline)
954 mc_data = fn(node.name for node in node_info if node.master_candidate)
955 node_data = fn(node_names)
957 cluster = self._config_data.cluster
959 constants.SS_CLUSTER_NAME: cluster.cluster_name,
960 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
961 constants.SS_MASTER_CANDIDATES: mc_data,
962 constants.SS_MASTER_IP: cluster.master_ip,
963 constants.SS_MASTER_NETDEV: cluster.master_netdev,
964 constants.SS_MASTER_NODE: cluster.master_node,
965 constants.SS_NODE_LIST: node_data,
966 constants.SS_OFFLINE_NODES: off_data,
969 @locking.ssynchronized(_config_lock)
970 def InitConfig(self, version, cluster_config, master_node_config):
971 """Create the initial cluster configuration.
973 It will contain the current node, which will also be the master
974 node, and no instances.
977 @param version: Configuration version
978 @type cluster_config: objects.Cluster
979 @param cluster_config: Cluster configuration
980 @type master_node_config: objects.Node
981 @param master_node_config: Master node configuration
985 master_node_config.name: master_node_config,
988 self._config_data = objects.ConfigData(version=version,
989 cluster=cluster_config,
995 @locking.ssynchronized(_config_lock, shared=1)
997 """Return the volume group name.
1000 return self._config_data.cluster.volume_group_name
1002 @locking.ssynchronized(_config_lock)
1003 def SetVGName(self, vg_name):
1004 """Set the volume group name.
1007 self._config_data.cluster.volume_group_name = vg_name
1008 self._config_data.cluster.serial_no += 1
1011 @locking.ssynchronized(_config_lock, shared=1)
1012 def GetDefBridge(self):
1013 """Return the default bridge.
1016 return self._config_data.cluster.default_bridge
1018 @locking.ssynchronized(_config_lock, shared=1)
1019 def GetMACPrefix(self):
1020 """Return the mac prefix.
1023 return self._config_data.cluster.mac_prefix
1025 @locking.ssynchronized(_config_lock, shared=1)
1026 def GetClusterInfo(self):
1027 """Returns informations about the cluster
1033 return self._config_data.cluster
1035 @locking.ssynchronized(_config_lock)
1036 def Update(self, target):
1037 """Notify function to be called after updates.
1039 This function must be called when an object (as returned by
1040 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
1041 caller wants the modifications saved to the backing store. Note
1042 that all modified objects will be saved, but the target argument
1043 is the one the caller wants to ensure that it's saved.
1046 if self._config_data is None:
1047 raise errors.ProgrammerError("Configuration file not read,"
1049 update_serial = False
1050 if isinstance(target, objects.Cluster):
1051 test = target == self._config_data.cluster
1052 elif isinstance(target, objects.Node):
1053 test = target in self._config_data.nodes.values()
1054 update_serial = True
1055 elif isinstance(target, objects.Instance):
1056 test = target in self._config_data.instances.values()
1058 raise errors.ProgrammerError("Invalid object type (%s) passed to"
1059 " ConfigWriter.Update" % type(target))
1061 raise errors.ConfigurationError("Configuration updated since object"
1062 " has been read or unknown object")
1063 target.serial_no += 1
1066 # for node updates, we need to increase the cluster serial too
1067 self._config_data.cluster.serial_no += 1