4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Configuration management for Ganeti
24 This module provides the interface to the Ganeti cluster configuration.
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
29 Currently, the data storage format is JSON. YAML was slow and consuming too
39 from ganeti import errors
40 from ganeti import locking
41 from ganeti import utils
42 from ganeti import constants
43 from ganeti import rpc
44 from ganeti import objects
45 from ganeti import serializer
48 _config_lock = locking.SharedLock()
51 def _ValidateConfig(data):
52 """Verifies that a configuration objects looks valid.
54 This only verifies the version of the configuration.
56 @raise errors.ConfigurationError: if the version differs from what
60 if data.version != constants.CONFIG_VERSION:
61 raise errors.ConfigurationError("Cluster configuration version"
62 " mismatch, got %s instead of %s" %
64 constants.CONFIG_VERSION))
68 """The interface to the cluster configuration.
71 def __init__(self, cfg_file=None, offline=False):
73 self._lock = _config_lock
74 self._config_data = None
75 self._offline = offline
77 self._cfg_file = constants.CLUSTER_CONF_FILE
79 self._cfg_file = cfg_file
80 self._temporary_ids = set()
81 self._temporary_drbds = {}
82 # Note: in order to prevent errors when resolving our name in
83 # _DistributeConfig, we compute it here once and reuse it; it's
84 # better to raise an error before starting to modify the config
85 # file than after it was modified
86 self._my_hostname = utils.HostInfo().name
87 self._last_cluster_serial = -1
90 # this method needs to be static, so that we can call it on the class
93 """Check if the cluster is configured.
96 return os.path.exists(constants.CLUSTER_CONF_FILE)
98 @locking.ssynchronized(_config_lock, shared=1)
99 def GenerateMAC(self):
100 """Generate a MAC for an instance.
102 This should check the current instances for duplicates.
105 prefix = self._config_data.cluster.mac_prefix
106 all_macs = self._AllMACs()
109 byte1 = random.randrange(0, 256)
110 byte2 = random.randrange(0, 256)
111 byte3 = random.randrange(0, 256)
112 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
113 if mac not in all_macs:
117 raise errors.ConfigurationError("Can't generate unique MAC")
120 @locking.ssynchronized(_config_lock, shared=1)
121 def IsMacInUse(self, mac):
122 """Predicate: check if the specified MAC is in use in the Ganeti cluster.
124 This only checks instances managed by this cluster, it does not
125 check for potential collisions elsewhere.
128 all_macs = self._AllMACs()
129 return mac in all_macs
131 @locking.ssynchronized(_config_lock, shared=1)
132 def GenerateDRBDSecret(self):
133 """Generate a DRBD secret.
135 This checks the current disks for duplicates.
138 all_secrets = self._AllDRBDSecrets()
141 secret = utils.GenerateSecret()
142 if secret not in all_secrets:
146 raise errors.ConfigurationError("Can't generate unique DRBD secret")
149 def _ComputeAllLVs(self):
150 """Compute the list of all LVs.
154 for instance in self._config_data.instances.values():
155 node_data = instance.MapLVsByNode()
156 for lv_list in node_data.values():
157 lvnames.update(lv_list)
160 @locking.ssynchronized(_config_lock, shared=1)
161 def GenerateUniqueID(self, exceptions=None):
162 """Generate an unique disk name.
164 This checks the current node, instances and disk names for
167 @param exceptions: a list with some other names which should be checked
168 for uniqueness (used for example when you want to get
169 more than one id at one time without adding each one in
170 turn to the config file)
173 @return: the unique id
177 existing.update(self._temporary_ids)
178 existing.update(self._ComputeAllLVs())
179 existing.update(self._config_data.instances.keys())
180 existing.update(self._config_data.nodes.keys())
181 if exceptions is not None:
182 existing.update(exceptions)
185 unique_id = utils.NewUUID()
186 if unique_id not in existing and unique_id is not None:
189 raise errors.ConfigurationError("Not able generate an unique ID"
190 " (last tried ID: %s" % unique_id)
191 self._temporary_ids.add(unique_id)
195 """Return all MACs present in the config.
198 @return: the list of all MACs
202 for instance in self._config_data.instances.values():
203 for nic in instance.nics:
204 result.append(nic.mac)
208 def _AllDRBDSecrets(self):
209 """Return all DRBD secrets present in the config.
212 @return: the list of all DRBD secrets
215 def helper(disk, result):
216 """Recursively gather secrets from this disk."""
217 if disk.dev_type == constants.DT_DRBD8:
218 result.append(disk.logical_id[5])
220 for child in disk.children:
221 helper(child, result)
224 for instance in self._config_data.instances.values():
225 for disk in instance.disks:
230 @locking.ssynchronized(_config_lock, shared=1)
231 def VerifyConfig(self):
238 data = self._config_data
239 for instance_name in data.instances:
240 instance = data.instances[instance_name]
241 if instance.primary_node not in data.nodes:
242 result.append("instance '%s' has invalid primary node '%s'" %
243 (instance_name, instance.primary_node))
244 for snode in instance.secondary_nodes:
245 if snode not in data.nodes:
246 result.append("instance '%s' has invalid secondary node '%s'" %
247 (instance_name, snode))
248 for idx, nic in enumerate(instance.nics):
249 if nic.mac in seen_macs:
250 result.append("instance '%s' has NIC %d mac %s duplicate" %
251 (instance_name, idx, nic.mac))
253 seen_macs.append(nic.mac)
255 # gather the drbd ports for duplicate checks
256 for dsk in instance.disks:
257 if dsk.dev_type in constants.LDS_DRBD:
258 tcp_port = dsk.logical_id[2]
259 if tcp_port not in ports:
261 ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
262 # gather network port reservation
263 net_port = getattr(instance, "network_port", None)
264 if net_port is not None:
265 if net_port not in ports:
267 ports[net_port].append((instance.name, "network port"))
269 # cluster-wide pool of free ports
270 for free_port in data.cluster.tcpudp_port_pool:
271 if free_port not in ports:
272 ports[free_port] = []
273 ports[free_port].append(("cluster", "port marked as free"))
275 # compute tcp/udp duplicate ports
281 txt = ", ".join(["%s/%s" % val for val in pdata])
282 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
284 # highest used tcp port check
286 if keys[-1] > data.cluster.highest_used_port:
287 result.append("Highest used port mismatch, saved %s, computed %s" %
288 (data.cluster.highest_used_port, keys[-1]))
290 if not data.nodes[data.cluster.master_node].master_candidate:
291 result.append("Master node is not a master candidate")
293 mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
295 result.append("Not enough master candidates: actual %d, target %d" %
300 def _UnlockedSetDiskID(self, disk, node_name):
301 """Convert the unique ID to the ID needed on the target nodes.
303 This is used only for drbd, which needs ip/port configuration.
305 The routine descends down and updates its children also, because
306 this helps when the only the top device is passed to the remote
309 This function is for internal use, when the config lock is already held.
313 for child in disk.children:
314 self._UnlockedSetDiskID(child, node_name)
316 if disk.logical_id is None and disk.physical_id is not None:
318 if disk.dev_type == constants.LD_DRBD8:
319 pnode, snode, port, pminor, sminor, secret = disk.logical_id
320 if node_name not in (pnode, snode):
321 raise errors.ConfigurationError("DRBD device not knowing node %s" %
323 pnode_info = self._UnlockedGetNodeInfo(pnode)
324 snode_info = self._UnlockedGetNodeInfo(snode)
325 if pnode_info is None or snode_info is None:
326 raise errors.ConfigurationError("Can't find primary or secondary node"
327 " for %s" % str(disk))
328 p_data = (pnode_info.secondary_ip, port)
329 s_data = (snode_info.secondary_ip, port)
330 if pnode == node_name:
331 disk.physical_id = p_data + s_data + (pminor, secret)
332 else: # it must be secondary, we tested above
333 disk.physical_id = s_data + p_data + (sminor, secret)
335 disk.physical_id = disk.logical_id
338 @locking.ssynchronized(_config_lock)
339 def SetDiskID(self, disk, node_name):
340 """Convert the unique ID to the ID needed on the target nodes.
342 This is used only for drbd, which needs ip/port configuration.
344 The routine descends down and updates its children also, because
345 this helps when the only the top device is passed to the remote
349 return self._UnlockedSetDiskID(disk, node_name)
351 @locking.ssynchronized(_config_lock)
352 def AddTcpUdpPort(self, port):
353 """Adds a new port to the available port pool.
356 if not isinstance(port, int):
357 raise errors.ProgrammerError("Invalid type passed for port")
359 self._config_data.cluster.tcpudp_port_pool.add(port)
362 @locking.ssynchronized(_config_lock, shared=1)
363 def GetPortList(self):
364 """Returns a copy of the current port list.
367 return self._config_data.cluster.tcpudp_port_pool.copy()
369 @locking.ssynchronized(_config_lock)
370 def AllocatePort(self):
373 The port will be taken from the available port pool or from the
374 default port range (and in this case we increase
378 # If there are TCP/IP ports configured, we use them first.
379 if self._config_data.cluster.tcpudp_port_pool:
380 port = self._config_data.cluster.tcpudp_port_pool.pop()
382 port = self._config_data.cluster.highest_used_port + 1
383 if port >= constants.LAST_DRBD_PORT:
384 raise errors.ConfigurationError("The highest used port is greater"
385 " than %s. Aborting." %
386 constants.LAST_DRBD_PORT)
387 self._config_data.cluster.highest_used_port = port
392 def _ComputeDRBDMap(self, instance):
393 """Compute the used DRBD minor/nodes.
395 @return: dictionary of node_name: dict of minor: instance_name;
396 the returned dict will have all the nodes in it (even if with
400 def _AppendUsedPorts(instance_name, disk, used):
401 if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
402 nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
403 for node, port in ((nodeA, minorA), (nodeB, minorB)):
404 assert node in used, "Instance node not found in node list"
405 if port in used[node]:
406 raise errors.ProgrammerError("DRBD minor already used:"
408 (node, port, instance_name,
411 used[node][port] = instance_name
413 for child in disk.children:
414 _AppendUsedPorts(instance_name, child, used)
416 my_dict = dict((node, {}) for node in self._config_data.nodes)
417 for (node, minor), instance in self._temporary_drbds.iteritems():
418 my_dict[node][minor] = instance
419 for instance in self._config_data.instances.itervalues():
420 for disk in instance.disks:
421 _AppendUsedPorts(instance.name, disk, my_dict)
424 @locking.ssynchronized(_config_lock)
425 def AllocateDRBDMinor(self, nodes, instance):
426 """Allocate a drbd minor.
428 The free minor will be automatically computed from the existing
429 devices. A node can be given multiple times in order to allocate
430 multiple minors. The result is the list of minors, in the same
431 order as the passed nodes.
434 d_map = self._ComputeDRBDMap(instance)
439 # no minors used, we can start at 0
442 self._temporary_drbds[(nname, 0)] = instance
446 ffree = utils.FirstFree(keys)
448 # return the next minor
449 # TODO: implement high-limit check
454 ndata[minor] = instance
455 assert (nname, minor) not in self._temporary_drbds, \
456 "Attempt to reuse reserved DRBD minor"
457 self._temporary_drbds[(nname, minor)] = instance
458 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
462 @locking.ssynchronized(_config_lock)
463 def ReleaseDRBDMinors(self, instance):
464 """Release temporary drbd minors allocated for a given instance.
466 This should be called on both the error paths and on the success
467 paths (after the instance has been added or updated).
469 @type instance: string
470 @param instance: the instance for which temporary minors should be
474 for key, name in self._temporary_drbds.items():
476 del self._temporary_drbds[key]
478 @locking.ssynchronized(_config_lock, shared=1)
479 def GetConfigVersion(self):
480 """Get the configuration version.
482 @return: Config version
485 return self._config_data.version
487 @locking.ssynchronized(_config_lock, shared=1)
488 def GetClusterName(self):
491 @return: Cluster name
494 return self._config_data.cluster.cluster_name
496 @locking.ssynchronized(_config_lock, shared=1)
497 def GetMasterNode(self):
498 """Get the hostname of the master node for this cluster.
500 @return: Master hostname
503 return self._config_data.cluster.master_node
505 @locking.ssynchronized(_config_lock, shared=1)
506 def GetMasterIP(self):
507 """Get the IP of the master node for this cluster.
512 return self._config_data.cluster.master_ip
514 @locking.ssynchronized(_config_lock, shared=1)
515 def GetMasterNetdev(self):
516 """Get the master network device for this cluster.
519 return self._config_data.cluster.master_netdev
521 @locking.ssynchronized(_config_lock, shared=1)
522 def GetFileStorageDir(self):
523 """Get the file storage dir for this cluster.
526 return self._config_data.cluster.file_storage_dir
528 @locking.ssynchronized(_config_lock, shared=1)
529 def GetHypervisorType(self):
530 """Get the hypervisor type for this cluster.
533 return self._config_data.cluster.default_hypervisor
535 @locking.ssynchronized(_config_lock, shared=1)
536 def GetHostKey(self):
537 """Return the rsa hostkey from the config.
540 @return: the rsa hostkey
543 return self._config_data.cluster.rsahostkeypub
545 @locking.ssynchronized(_config_lock)
546 def AddInstance(self, instance):
547 """Add an instance to the config.
549 This should be used after creating a new instance.
551 @type instance: L{objects.Instance}
552 @param instance: the instance object
555 if not isinstance(instance, objects.Instance):
556 raise errors.ProgrammerError("Invalid type passed to AddInstance")
558 if instance.disk_template != constants.DT_DISKLESS:
559 all_lvs = instance.MapLVsByNode()
560 logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
562 instance.serial_no = 1
563 self._config_data.instances[instance.name] = instance
566 def _SetInstanceStatus(self, instance_name, status):
567 """Set the instance's status to a given value.
570 if status not in ("up", "down"):
571 raise errors.ProgrammerError("Invalid status '%s' passed to"
572 " ConfigWriter._SetInstanceStatus()" %
575 if instance_name not in self._config_data.instances:
576 raise errors.ConfigurationError("Unknown instance '%s'" %
578 instance = self._config_data.instances[instance_name]
579 if instance.status != status:
580 instance.status = status
581 instance.serial_no += 1
584 @locking.ssynchronized(_config_lock)
585 def MarkInstanceUp(self, instance_name):
586 """Mark the instance status to up in the config.
589 self._SetInstanceStatus(instance_name, "up")
591 @locking.ssynchronized(_config_lock)
592 def RemoveInstance(self, instance_name):
593 """Remove the instance from the configuration.
596 if instance_name not in self._config_data.instances:
597 raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
598 del self._config_data.instances[instance_name]
601 @locking.ssynchronized(_config_lock)
602 def RenameInstance(self, old_name, new_name):
603 """Rename an instance.
605 This needs to be done in ConfigWriter and not by RemoveInstance
606 combined with AddInstance as only we can guarantee an atomic
610 if old_name not in self._config_data.instances:
611 raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
612 inst = self._config_data.instances[old_name]
613 del self._config_data.instances[old_name]
616 for disk in inst.disks:
617 if disk.dev_type == constants.LD_FILE:
618 # rename the file paths in logical and physical id
619 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
620 disk.physical_id = disk.logical_id = (disk.logical_id[0],
621 os.path.join(file_storage_dir,
625 self._config_data.instances[inst.name] = inst
628 @locking.ssynchronized(_config_lock)
629 def MarkInstanceDown(self, instance_name):
630 """Mark the status of an instance to down in the configuration.
633 self._SetInstanceStatus(instance_name, "down")
635 def _UnlockedGetInstanceList(self):
636 """Get the list of instances.
638 This function is for internal use, when the config lock is already held.
641 return self._config_data.instances.keys()
643 @locking.ssynchronized(_config_lock, shared=1)
644 def GetInstanceList(self):
645 """Get the list of instances.
647 @return: array of instances, ex. ['instance2.example.com',
648 'instance1.example.com']
651 return self._UnlockedGetInstanceList()
653 @locking.ssynchronized(_config_lock, shared=1)
654 def ExpandInstanceName(self, short_name):
655 """Attempt to expand an incomplete instance name.
658 return utils.MatchNameComponent(short_name,
659 self._config_data.instances.keys())
661 def _UnlockedGetInstanceInfo(self, instance_name):
662 """Returns informations about an instance.
664 This function is for internal use, when the config lock is already held.
667 if instance_name not in self._config_data.instances:
670 return self._config_data.instances[instance_name]
672 @locking.ssynchronized(_config_lock, shared=1)
673 def GetInstanceInfo(self, instance_name):
674 """Returns informations about an instance.
676 It takes the information from the configuration file. Other informations of
677 an instance are taken from the live systems.
679 @param instance_name: name of the instance, e.g.
680 I{instance1.example.com}
682 @rtype: L{objects.Instance}
683 @return: the instance object
686 return self._UnlockedGetInstanceInfo(instance_name)
688 @locking.ssynchronized(_config_lock, shared=1)
689 def GetAllInstancesInfo(self):
690 """Get the configuration of all instances.
693 @returns: dict of (instance, instance_info), where instance_info is what
694 would GetInstanceInfo return for the node
697 my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
698 for instance in self._UnlockedGetInstanceList()])
701 @locking.ssynchronized(_config_lock)
702 def AddNode(self, node):
703 """Add a node to the configuration.
705 @type node: L{objects.Node}
706 @param node: a Node instance
709 logging.info("Adding node %s to configuration" % node.name)
712 self._config_data.nodes[node.name] = node
713 self._config_data.cluster.serial_no += 1
716 @locking.ssynchronized(_config_lock)
717 def RemoveNode(self, node_name):
718 """Remove a node from the configuration.
721 logging.info("Removing node %s from configuration" % node_name)
723 if node_name not in self._config_data.nodes:
724 raise errors.ConfigurationError("Unknown node '%s'" % node_name)
726 del self._config_data.nodes[node_name]
727 self._config_data.cluster.serial_no += 1
730 @locking.ssynchronized(_config_lock, shared=1)
731 def ExpandNodeName(self, short_name):
732 """Attempt to expand an incomplete instance name.
735 return utils.MatchNameComponent(short_name,
736 self._config_data.nodes.keys())
738 def _UnlockedGetNodeInfo(self, node_name):
739 """Get the configuration of a node, as stored in the config.
741 This function is for internal use, when the config lock is already
744 @param node_name: the node name, e.g. I{node1.example.com}
746 @rtype: L{objects.Node}
747 @return: the node object
750 if node_name not in self._config_data.nodes:
753 return self._config_data.nodes[node_name]
756 @locking.ssynchronized(_config_lock, shared=1)
757 def GetNodeInfo(self, node_name):
758 """Get the configuration of a node, as stored in the config.
760 This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
762 @param node_name: the node name, e.g. I{node1.example.com}
764 @rtype: L{objects.Node}
765 @return: the node object
768 return self._UnlockedGetNodeInfo(node_name)
770 def _UnlockedGetNodeList(self):
771 """Return the list of nodes which are in the configuration.
773 This function is for internal use, when the config lock is already
779 return self._config_data.nodes.keys()
782 @locking.ssynchronized(_config_lock, shared=1)
783 def GetNodeList(self):
784 """Return the list of nodes which are in the configuration.
787 return self._UnlockedGetNodeList()
789 @locking.ssynchronized(_config_lock, shared=1)
790 def GetAllNodesInfo(self):
791 """Get the configuration of all nodes.
794 @return: dict of (node, node_info), where node_info is what
795 would GetNodeInfo return for the node
798 my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
799 for node in self._UnlockedGetNodeList()])
802 def _UnlockedGetMasterCandidateStats(self):
803 """Get the number of current and maximum desired and possible candidates.
806 @return: tuple of (current, desired and possible)
810 for node in self._config_data.nodes.itervalues():
813 if node.master_candidate:
815 mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
816 return (mc_now, mc_max)
818 @locking.ssynchronized(_config_lock, shared=1)
819 def GetMasterCandidateStats(self):
820 """Get the number of current and maximum possible candidates.
822 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
825 @return: tuple of (current, max)
828 return self._UnlockedGetMasterCandidateStats()
830 @locking.ssynchronized(_config_lock)
831 def MaintainCandidatePool(self):
832 """Try to grow the candidate pool to the desired size.
835 @return: list with the adjusted nodes (L{objects.Node} instances)
838 mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
841 node_list = self._config_data.nodes.keys()
842 random.shuffle(node_list)
843 for name in node_list:
846 node = self._config_data.nodes[name]
847 if node.master_candidate or node.offline:
849 mod_list.append(node)
850 node.master_candidate = True
854 # this should not happen
855 logging.warning("Warning: MaintainCandidatePool didn't manage to"
856 " fill the candidate pool (%d/%d)", mc_now, mc_max)
858 self._config_data.cluster.serial_no += 1
863 def _BumpSerialNo(self):
864 """Bump up the serial number of the config.
867 self._config_data.serial_no += 1
869 def _OpenConfig(self):
870 """Read the config data from disk.
873 f = open(self._cfg_file, 'r')
876 data = objects.ConfigData.FromDict(serializer.Load(f.read()))
877 except Exception, err:
878 raise errors.ConfigurationError(err)
882 # Make sure the configuration has the right version
883 _ValidateConfig(data)
885 if (not hasattr(data, 'cluster') or
886 not hasattr(data.cluster, 'rsahostkeypub')):
887 raise errors.ConfigurationError("Incomplete configuration"
888 " (missing cluster.rsahostkeypub)")
889 self._config_data = data
890 # reset the last serial as -1 so that the next write will cause
892 self._last_cluster_serial = -1
894 def _DistributeConfig(self):
895 """Distribute the configuration to the other nodes.
897 Currently, this only copies the configuration file. In the future,
898 it could be used to encapsulate the 2/3-phase update mechanism.
907 myhostname = self._my_hostname
908 # we can skip checking whether _UnlockedGetNodeInfo returns None
909 # since the node list comes from _UnlocketGetNodeList, and we are
910 # called with the lock held, so no modifications should take place
912 for node_name in self._UnlockedGetNodeList():
913 if node_name == myhostname:
915 node_info = self._UnlockedGetNodeInfo(node_name)
916 if not node_info.master_candidate:
918 node_list.append(node_info.name)
919 addr_list.append(node_info.primary_ip)
921 result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
922 address_list=addr_list)
923 for node in node_list:
925 logging.error("copy of file %s to node %s failed",
926 self._cfg_file, node)
930 def _WriteConfig(self, destination=None):
931 """Write the configuration data to persistent storage.
934 if destination is None:
935 destination = self._cfg_file
937 txt = serializer.Dump(self._config_data.ToDict())
938 dir_name, file_name = os.path.split(destination)
939 fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
940 f = os.fdopen(fd, 'w')
946 # we don't need to do os.close(fd) as f.close() did it
947 os.rename(name, destination)
948 self.write_count += 1
950 # and redistribute the config file to master candidates
951 self._DistributeConfig()
953 # Write ssconf files on all nodes (including locally)
954 if self._last_cluster_serial < self._config_data.cluster.serial_no:
955 if not self._offline:
956 rpc.RpcRunner.call_write_ssconf_files(self._UnlockedGetNodeList(),
957 self._UnlockedGetSsconfValues())
958 self._last_cluster_serial = self._config_data.cluster.serial_no
960 def _UnlockedGetSsconfValues(self):
961 """Return the values needed by ssconf.
964 @return: a dictionary with keys the ssconf names and values their
969 node_names = utils.NiceSort(self._UnlockedGetNodeList())
970 node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
972 off_data = fn(node.name for node in node_info if node.offline)
973 mc_data = fn(node.name for node in node_info if node.master_candidate)
974 node_data = fn(node_names)
976 cluster = self._config_data.cluster
978 constants.SS_CLUSTER_NAME: cluster.cluster_name,
979 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
980 constants.SS_MASTER_CANDIDATES: mc_data,
981 constants.SS_MASTER_IP: cluster.master_ip,
982 constants.SS_MASTER_NETDEV: cluster.master_netdev,
983 constants.SS_MASTER_NODE: cluster.master_node,
984 constants.SS_NODE_LIST: node_data,
985 constants.SS_OFFLINE_NODES: off_data,
988 @locking.ssynchronized(_config_lock)
989 def InitConfig(self, version, cluster_config, master_node_config):
990 """Create the initial cluster configuration.
992 It will contain the current node, which will also be the master
993 node, and no instances.
996 @param version: Configuration version
997 @type cluster_config: objects.Cluster
998 @param cluster_config: Cluster configuration
999 @type master_node_config: objects.Node
1000 @param master_node_config: Master node configuration
1004 master_node_config.name: master_node_config,
1007 self._config_data = objects.ConfigData(version=version,
1008 cluster=cluster_config,
1014 @locking.ssynchronized(_config_lock, shared=1)
1015 def GetVGName(self):
1016 """Return the volume group name.
1019 return self._config_data.cluster.volume_group_name
1021 @locking.ssynchronized(_config_lock)
1022 def SetVGName(self, vg_name):
1023 """Set the volume group name.
1026 self._config_data.cluster.volume_group_name = vg_name
1027 self._config_data.cluster.serial_no += 1
1030 @locking.ssynchronized(_config_lock, shared=1)
1031 def GetDefBridge(self):
1032 """Return the default bridge.
1035 return self._config_data.cluster.default_bridge
1037 @locking.ssynchronized(_config_lock, shared=1)
1038 def GetMACPrefix(self):
1039 """Return the mac prefix.
1042 return self._config_data.cluster.mac_prefix
1044 @locking.ssynchronized(_config_lock, shared=1)
1045 def GetClusterInfo(self):
1046 """Returns informations about the cluster
1048 @rtype: L{objects.Cluster}
1049 @return: the cluster object
1052 return self._config_data.cluster
1054 @locking.ssynchronized(_config_lock)
1055 def Update(self, target):
1056 """Notify function to be called after updates.
1058 This function must be called when an object (as returned by
1059 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
1060 caller wants the modifications saved to the backing store. Note
1061 that all modified objects will be saved, but the target argument
1062 is the one the caller wants to ensure that it's saved.
1064 @param target: an instance of either L{objects.Cluster},
1065 L{objects.Node} or L{objects.Instance} which is existing in
1069 if self._config_data is None:
1070 raise errors.ProgrammerError("Configuration file not read,"
1072 update_serial = False
1073 if isinstance(target, objects.Cluster):
1074 test = target == self._config_data.cluster
1075 elif isinstance(target, objects.Node):
1076 test = target in self._config_data.nodes.values()
1077 update_serial = True
1078 elif isinstance(target, objects.Instance):
1079 test = target in self._config_data.instances.values()
1081 raise errors.ProgrammerError("Invalid object type (%s) passed to"
1082 " ConfigWriter.Update" % type(target))
1084 raise errors.ConfigurationError("Configuration updated since object"
1085 " has been read or unknown object")
1086 target.serial_no += 1
1089 # for node updates, we need to increase the cluster serial too
1090 self._config_data.cluster.serial_no += 1