4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Configuration management for Ganeti
24 This module provides the interface to the Ganeti cluster configuration.
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
29 Currently, the data storage format is JSON. YAML was slow and consuming too
39 from ganeti import errors
40 from ganeti import locking
41 from ganeti import utils
42 from ganeti import constants
43 from ganeti import rpc
44 from ganeti import objects
45 from ganeti import serializer
48 _config_lock = locking.SharedLock()
51 def _ValidateConfig(data):
52 if data.version != constants.CONFIG_VERSION:
53 raise errors.ConfigurationError("Cluster configuration version"
54 " mismatch, got %s instead of %s" %
56 constants.CONFIG_VERSION))
60 """The interface to the cluster configuration.
63 def __init__(self, cfg_file=None, offline=False):
65 self._lock = _config_lock
66 self._config_data = None
67 self._offline = offline
69 self._cfg_file = constants.CLUSTER_CONF_FILE
71 self._cfg_file = cfg_file
72 self._temporary_ids = set()
73 self._temporary_drbds = {}
74 # Note: in order to prevent errors when resolving our name in
75 # _DistributeConfig, we compute it here once and reuse it; it's
76 # better to raise an error before starting to modify the config
77 # file than after it was modified
78 self._my_hostname = utils.HostInfo().name
81 # this method needs to be static, so that we can call it on the class
84 """Check if the cluster is configured.
87 return os.path.exists(constants.CLUSTER_CONF_FILE)
89 @locking.ssynchronized(_config_lock, shared=1)
90 def GenerateMAC(self):
91 """Generate a MAC for an instance.
93 This should check the current instances for duplicates.
96 prefix = self._config_data.cluster.mac_prefix
97 all_macs = self._AllMACs()
100 byte1 = random.randrange(0, 256)
101 byte2 = random.randrange(0, 256)
102 byte3 = random.randrange(0, 256)
103 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
104 if mac not in all_macs:
108 raise errors.ConfigurationError("Can't generate unique MAC")
111 @locking.ssynchronized(_config_lock, shared=1)
112 def IsMacInUse(self, mac):
113 """Predicate: check if the specified MAC is in use in the Ganeti cluster.
115 This only checks instances managed by this cluster, it does not
116 check for potential collisions elsewhere.
119 all_macs = self._AllMACs()
120 return mac in all_macs
122 @locking.ssynchronized(_config_lock, shared=1)
123 def GenerateDRBDSecret(self):
124 """Generate a DRBD secret.
126 This checks the current disks for duplicates.
129 all_secrets = self._AllDRBDSecrets()
132 secret = utils.GenerateSecret()
133 if secret not in all_secrets:
137 raise errors.ConfigurationError("Can't generate unique DRBD secret")
140 def _ComputeAllLVs(self):
141 """Compute the list of all LVs.
145 for instance in self._config_data.instances.values():
146 node_data = instance.MapLVsByNode()
147 for lv_list in node_data.values():
148 lvnames.update(lv_list)
151 @locking.ssynchronized(_config_lock, shared=1)
152 def GenerateUniqueID(self, exceptions=None):
153 """Generate an unique disk name.
155 This checks the current node, instances and disk names for
159 - exceptions: a list with some other names which should be checked
160 for uniqueness (used for example when you want to get
161 more than one id at one time without adding each one in
162 turn to the config file
164 Returns: the unique id as a string
168 existing.update(self._temporary_ids)
169 existing.update(self._ComputeAllLVs())
170 existing.update(self._config_data.instances.keys())
171 existing.update(self._config_data.nodes.keys())
172 if exceptions is not None:
173 existing.update(exceptions)
176 unique_id = utils.NewUUID()
177 if unique_id not in existing and unique_id is not None:
180 raise errors.ConfigurationError("Not able generate an unique ID"
181 " (last tried ID: %s" % unique_id)
182 self._temporary_ids.add(unique_id)
186 """Return all MACs present in the config.
190 for instance in self._config_data.instances.values():
191 for nic in instance.nics:
192 result.append(nic.mac)
196 def _AllDRBDSecrets(self):
197 """Return all DRBD secrets present in the config.
200 def helper(disk, result):
201 """Recursively gather secrets from this disk."""
202 if disk.dev_type == constants.DT_DRBD8:
203 result.append(disk.logical_id[5])
205 for child in disk.children:
206 helper(child, result)
209 for instance in self._config_data.instances.values():
210 for disk in instance.disks:
215 @locking.ssynchronized(_config_lock, shared=1)
216 def VerifyConfig(self):
217 """Stub verify function.
222 data = self._config_data
223 for instance_name in data.instances:
224 instance = data.instances[instance_name]
225 if instance.primary_node not in data.nodes:
226 result.append("instance '%s' has invalid primary node '%s'" %
227 (instance_name, instance.primary_node))
228 for snode in instance.secondary_nodes:
229 if snode not in data.nodes:
230 result.append("instance '%s' has invalid secondary node '%s'" %
231 (instance_name, snode))
232 for idx, nic in enumerate(instance.nics):
233 if nic.mac in seen_macs:
234 result.append("instance '%s' has NIC %d mac %s duplicate" %
235 (instance_name, idx, nic.mac))
237 seen_macs.append(nic.mac)
239 # gather the drbd ports for duplicate checks
240 for dsk in instance.disks:
241 if dsk.dev_type in constants.LDS_DRBD:
242 tcp_port = dsk.logical_id[2]
243 if tcp_port not in ports:
245 ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
246 # gather network port reservation
247 net_port = getattr(instance, "network_port", None)
248 if net_port is not None:
249 if net_port not in ports:
251 ports[net_port].append((instance.name, "network port"))
253 # cluster-wide pool of free ports
254 for free_port in self._config_data.cluster.tcpudp_port_pool:
255 if free_port not in ports:
256 ports[free_port] = []
257 ports[free_port].append(("cluster", "port marked as free"))
259 # compute tcp/udp duplicate ports
265 txt = ", ".join(["%s/%s" % val for val in pdata])
266 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
268 # highest used tcp port check
270 if keys[-1] > self._config_data.cluster.highest_used_port:
271 result.append("Highest used port mismatch, saved %s, computed %s" %
272 (self._config_data.cluster.highest_used_port,
277 def _UnlockedSetDiskID(self, disk, node_name):
278 """Convert the unique ID to the ID needed on the target nodes.
280 This is used only for drbd, which needs ip/port configuration.
282 The routine descends down and updates its children also, because
283 this helps when the only the top device is passed to the remote
286 This function is for internal use, when the config lock is already held.
290 for child in disk.children:
291 self._UnlockedSetDiskID(child, node_name)
293 if disk.logical_id is None and disk.physical_id is not None:
295 if disk.dev_type == constants.LD_DRBD8:
296 pnode, snode, port, pminor, sminor, secret = disk.logical_id
297 if node_name not in (pnode, snode):
298 raise errors.ConfigurationError("DRBD device not knowing node %s" %
300 pnode_info = self._UnlockedGetNodeInfo(pnode)
301 snode_info = self._UnlockedGetNodeInfo(snode)
302 if pnode_info is None or snode_info is None:
303 raise errors.ConfigurationError("Can't find primary or secondary node"
304 " for %s" % str(disk))
305 p_data = (pnode_info.secondary_ip, port)
306 s_data = (snode_info.secondary_ip, port)
307 if pnode == node_name:
308 disk.physical_id = p_data + s_data + (pminor, secret)
309 else: # it must be secondary, we tested above
310 disk.physical_id = s_data + p_data + (sminor, secret)
312 disk.physical_id = disk.logical_id
315 @locking.ssynchronized(_config_lock)
316 def SetDiskID(self, disk, node_name):
317 """Convert the unique ID to the ID needed on the target nodes.
319 This is used only for drbd, which needs ip/port configuration.
321 The routine descends down and updates its children also, because
322 this helps when the only the top device is passed to the remote
326 return self._UnlockedSetDiskID(disk, node_name)
328 @locking.ssynchronized(_config_lock)
329 def AddTcpUdpPort(self, port):
330 """Adds a new port to the available port pool.
333 if not isinstance(port, int):
334 raise errors.ProgrammerError("Invalid type passed for port")
336 self._config_data.cluster.tcpudp_port_pool.add(port)
339 @locking.ssynchronized(_config_lock, shared=1)
340 def GetPortList(self):
341 """Returns a copy of the current port list.
344 return self._config_data.cluster.tcpudp_port_pool.copy()
346 @locking.ssynchronized(_config_lock)
347 def AllocatePort(self):
350 The port will be taken from the available port pool or from the
351 default port range (and in this case we increase
355 # If there are TCP/IP ports configured, we use them first.
356 if self._config_data.cluster.tcpudp_port_pool:
357 port = self._config_data.cluster.tcpudp_port_pool.pop()
359 port = self._config_data.cluster.highest_used_port + 1
360 if port >= constants.LAST_DRBD_PORT:
361 raise errors.ConfigurationError("The highest used port is greater"
362 " than %s. Aborting." %
363 constants.LAST_DRBD_PORT)
364 self._config_data.cluster.highest_used_port = port
369 def _ComputeDRBDMap(self, instance):
370 """Compute the used DRBD minor/nodes.
372 Return: dictionary of node_name: dict of minor: instance_name. The
373 returned dict will have all the nodes in it (even if with an empty
377 def _AppendUsedPorts(instance_name, disk, used):
378 if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
379 nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
380 for node, port in ((nodeA, minorA), (nodeB, minorB)):
381 assert node in used, "Instance node not found in node list"
382 if port in used[node]:
383 raise errors.ProgrammerError("DRBD minor already used:"
385 (node, port, instance_name,
388 used[node][port] = instance_name
390 for child in disk.children:
391 _AppendUsedPorts(instance_name, child, used)
393 my_dict = dict((node, {}) for node in self._config_data.nodes)
394 for (node, minor), instance in self._temporary_drbds.iteritems():
395 my_dict[node][minor] = instance
396 for instance in self._config_data.instances.itervalues():
397 for disk in instance.disks:
398 _AppendUsedPorts(instance.name, disk, my_dict)
401 @locking.ssynchronized(_config_lock)
402 def AllocateDRBDMinor(self, nodes, instance):
403 """Allocate a drbd minor.
405 The free minor will be automatically computed from the existing
406 devices. A node can be given multiple times in order to allocate
407 multiple minors. The result is the list of minors, in the same
408 order as the passed nodes.
411 d_map = self._ComputeDRBDMap(instance)
416 # no minors used, we can start at 0
419 self._temporary_drbds[(nname, 0)] = instance
423 ffree = utils.FirstFree(keys)
425 # return the next minor
426 # TODO: implement high-limit check
431 ndata[minor] = instance
432 assert (nname, minor) not in self._temporary_drbds, \
433 "Attempt to reuse reserved DRBD minor"
434 self._temporary_drbds[(nname, minor)] = instance
435 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
439 @locking.ssynchronized(_config_lock)
440 def ReleaseDRBDMinors(self, instance):
441 """Release temporary drbd minors allocated for a given instance.
443 This should be called on both the error paths and on the success
444 paths (after the instance has been added or updated).
446 @type instance: string
447 @param instance: the instance for which temporary minors should be
451 for key, name in self._temporary_drbds.items():
453 del self._temporary_drbds[key]
455 @locking.ssynchronized(_config_lock, shared=1)
456 def GetConfigVersion(self):
457 """Get the configuration version.
459 @return: Config version
462 return self._config_data.version
464 @locking.ssynchronized(_config_lock, shared=1)
465 def GetClusterName(self):
468 @return: Cluster name
471 return self._config_data.cluster.cluster_name
473 @locking.ssynchronized(_config_lock, shared=1)
474 def GetMasterNode(self):
475 """Get the hostname of the master node for this cluster.
477 @return: Master hostname
480 return self._config_data.cluster.master_node
482 @locking.ssynchronized(_config_lock, shared=1)
483 def GetMasterIP(self):
484 """Get the IP of the master node for this cluster.
489 return self._config_data.cluster.master_ip
491 @locking.ssynchronized(_config_lock, shared=1)
492 def GetMasterNetdev(self):
493 """Get the master network device for this cluster.
496 return self._config_data.cluster.master_netdev
498 @locking.ssynchronized(_config_lock, shared=1)
499 def GetFileStorageDir(self):
500 """Get the file storage dir for this cluster.
503 return self._config_data.cluster.file_storage_dir
505 @locking.ssynchronized(_config_lock, shared=1)
506 def GetHypervisorType(self):
507 """Get the hypervisor type for this cluster.
510 return self._config_data.cluster.default_hypervisor
512 @locking.ssynchronized(_config_lock, shared=1)
513 def GetHostKey(self):
514 """Return the rsa hostkey from the config.
520 return self._config_data.cluster.rsahostkeypub
522 @locking.ssynchronized(_config_lock)
523 def AddInstance(self, instance):
524 """Add an instance to the config.
526 This should be used after creating a new instance.
529 instance: the instance object
531 if not isinstance(instance, objects.Instance):
532 raise errors.ProgrammerError("Invalid type passed to AddInstance")
534 if instance.disk_template != constants.DT_DISKLESS:
535 all_lvs = instance.MapLVsByNode()
536 logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
538 instance.serial_no = 1
539 self._config_data.instances[instance.name] = instance
542 def _SetInstanceStatus(self, instance_name, status):
543 """Set the instance's status to a given value.
546 if status not in ("up", "down"):
547 raise errors.ProgrammerError("Invalid status '%s' passed to"
548 " ConfigWriter._SetInstanceStatus()" %
551 if instance_name not in self._config_data.instances:
552 raise errors.ConfigurationError("Unknown instance '%s'" %
554 instance = self._config_data.instances[instance_name]
555 if instance.status != status:
556 instance.status = status
557 instance.serial_no += 1
560 @locking.ssynchronized(_config_lock)
561 def MarkInstanceUp(self, instance_name):
562 """Mark the instance status to up in the config.
565 self._SetInstanceStatus(instance_name, "up")
567 @locking.ssynchronized(_config_lock)
568 def RemoveInstance(self, instance_name):
569 """Remove the instance from the configuration.
572 if instance_name not in self._config_data.instances:
573 raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
574 del self._config_data.instances[instance_name]
577 @locking.ssynchronized(_config_lock)
578 def RenameInstance(self, old_name, new_name):
579 """Rename an instance.
581 This needs to be done in ConfigWriter and not by RemoveInstance
582 combined with AddInstance as only we can guarantee an atomic
586 if old_name not in self._config_data.instances:
587 raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
588 inst = self._config_data.instances[old_name]
589 del self._config_data.instances[old_name]
592 for disk in inst.disks:
593 if disk.dev_type == constants.LD_FILE:
594 # rename the file paths in logical and physical id
595 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
596 disk.physical_id = disk.logical_id = (disk.logical_id[0],
597 os.path.join(file_storage_dir,
601 self._config_data.instances[inst.name] = inst
604 @locking.ssynchronized(_config_lock)
605 def MarkInstanceDown(self, instance_name):
606 """Mark the status of an instance to down in the configuration.
609 self._SetInstanceStatus(instance_name, "down")
611 def _UnlockedGetInstanceList(self):
612 """Get the list of instances.
614 This function is for internal use, when the config lock is already held.
617 return self._config_data.instances.keys()
619 @locking.ssynchronized(_config_lock, shared=1)
620 def GetInstanceList(self):
621 """Get the list of instances.
624 array of instances, ex. ['instance2.example.com','instance1.example.com']
625 these contains all the instances, also the ones in Admin_down state
628 return self._UnlockedGetInstanceList()
630 @locking.ssynchronized(_config_lock, shared=1)
631 def ExpandInstanceName(self, short_name):
632 """Attempt to expand an incomplete instance name.
635 return utils.MatchNameComponent(short_name,
636 self._config_data.instances.keys())
638 def _UnlockedGetInstanceInfo(self, instance_name):
639 """Returns informations about an instance.
641 This function is for internal use, when the config lock is already held.
644 if instance_name not in self._config_data.instances:
647 return self._config_data.instances[instance_name]
649 @locking.ssynchronized(_config_lock, shared=1)
650 def GetInstanceInfo(self, instance_name):
651 """Returns informations about an instance.
653 It takes the information from the configuration file. Other informations of
654 an instance are taken from the live systems.
657 instance: name of the instance, ex instance1.example.com
663 return self._UnlockedGetInstanceInfo(instance_name)
665 @locking.ssynchronized(_config_lock, shared=1)
666 def GetAllInstancesInfo(self):
667 """Get the configuration of all instances.
670 @returns: dict of (instance, instance_info), where instance_info is what
671 would GetInstanceInfo return for the node
674 my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
675 for instance in self._UnlockedGetInstanceList()])
678 @locking.ssynchronized(_config_lock)
679 def AddNode(self, node):
680 """Add a node to the configuration.
683 node: an object.Node instance
686 logging.info("Adding node %s to configuration" % node.name)
689 self._config_data.nodes[node.name] = node
690 self._config_data.cluster.serial_no += 1
693 @locking.ssynchronized(_config_lock)
694 def RemoveNode(self, node_name):
695 """Remove a node from the configuration.
698 logging.info("Removing node %s from configuration" % node_name)
700 if node_name not in self._config_data.nodes:
701 raise errors.ConfigurationError("Unknown node '%s'" % node_name)
703 del self._config_data.nodes[node_name]
704 self._config_data.cluster.serial_no += 1
707 @locking.ssynchronized(_config_lock, shared=1)
708 def ExpandNodeName(self, short_name):
709 """Attempt to expand an incomplete instance name.
712 return utils.MatchNameComponent(short_name,
713 self._config_data.nodes.keys())
715 def _UnlockedGetNodeInfo(self, node_name):
716 """Get the configuration of a node, as stored in the config.
718 This function is for internal use, when the config lock is already held.
720 Args: node: nodename (tuple) of the node
722 Returns: the node object
725 if node_name not in self._config_data.nodes:
728 return self._config_data.nodes[node_name]
731 @locking.ssynchronized(_config_lock, shared=1)
732 def GetNodeInfo(self, node_name):
733 """Get the configuration of a node, as stored in the config.
735 Args: node: nodename (tuple) of the node
737 Returns: the node object
740 return self._UnlockedGetNodeInfo(node_name)
742 def _UnlockedGetNodeList(self):
743 """Return the list of nodes which are in the configuration.
745 This function is for internal use, when the config lock is already held.
748 return self._config_data.nodes.keys()
751 @locking.ssynchronized(_config_lock, shared=1)
752 def GetNodeList(self):
753 """Return the list of nodes which are in the configuration.
756 return self._UnlockedGetNodeList()
758 @locking.ssynchronized(_config_lock, shared=1)
759 def GetAllNodesInfo(self):
760 """Get the configuration of all nodes.
763 @returns: dict of (node, node_info), where node_info is what
764 would GetNodeInfo return for the node
767 my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
768 for node in self._UnlockedGetNodeList()])
771 def _BumpSerialNo(self):
772 """Bump up the serial number of the config.
775 self._config_data.serial_no += 1
777 def _OpenConfig(self):
778 """Read the config data from disk.
780 In case we already have configuration data and the config file has
781 the same mtime as when we read it, we skip the parsing of the
782 file, since de-serialisation could be slow.
785 f = open(self._cfg_file, 'r')
788 data = objects.ConfigData.FromDict(serializer.Load(f.read()))
789 except Exception, err:
790 raise errors.ConfigurationError(err)
794 # Make sure the configuration has the right version
795 _ValidateConfig(data)
797 if (not hasattr(data, 'cluster') or
798 not hasattr(data.cluster, 'rsahostkeypub')):
799 raise errors.ConfigurationError("Incomplete configuration"
800 " (missing cluster.rsahostkeypub)")
801 self._config_data = data
802 # init the last serial as -1 so that the next write will cause
804 self._last_cluster_serial = -1
806 def _DistributeConfig(self):
807 """Distribute the configuration to the other nodes.
809 Currently, this only copies the configuration file. In the future,
810 it could be used to encapsulate the 2/3-phase update mechanism.
819 myhostname = self._my_hostname
820 # we can skip checking whether _UnlockedGetNodeInfo returns None
821 # since the node list comes from _UnlocketGetNodeList, and we are
822 # called with the lock held, so no modifications should take place
824 for node_name in self._UnlockedGetNodeList():
825 if node_name == myhostname:
827 node_info = self._UnlockedGetNodeInfo(node_name)
828 if not node_info.master_candidate:
830 node_list.append(node_info.name)
831 addr_list.append(node_info.primary_ip)
833 result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
834 address_list=addr_list)
835 for node in node_list:
837 logging.error("copy of file %s to node %s failed",
838 self._cfg_file, node)
842 def _WriteConfig(self, destination=None):
843 """Write the configuration data to persistent storage.
846 if destination is None:
847 destination = self._cfg_file
849 txt = serializer.Dump(self._config_data.ToDict())
850 dir_name, file_name = os.path.split(destination)
851 fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
852 f = os.fdopen(fd, 'w')
858 # we don't need to do os.close(fd) as f.close() did it
859 os.rename(name, destination)
860 self.write_count += 1
862 # and redistribute the config file to master candidates
863 self._DistributeConfig()
865 # Write ssconf files on all nodes (including locally)
866 if self._last_cluster_serial < self._config_data.cluster.serial_no:
867 if not self._offline:
868 rpc.RpcRunner.call_write_ssconf_files(self._UnlockedGetNodeList(),
869 self._UnlockedGetSsconfValues())
870 self._last_cluster_serial = self._config_data.cluster.serial_no
872 def _UnlockedGetSsconfValues(self):
873 """Return the values needed by ssconf.
876 @return: a dictionary with keys the ssconf names and values their
880 node_list = utils.NiceSort(self._UnlockedGetNodeList())
881 mc_list = [self._UnlockedGetNodeInfo(name) for name in node_list]
882 mc_list = [node.name for node in mc_list if node.master_candidate]
883 node_list = "\n".join(node_list)
884 mc_list = "\n".join(mc_list)
886 cluster = self._config_data.cluster
888 constants.SS_CLUSTER_NAME: cluster.cluster_name,
889 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
890 constants.SS_MASTER_CANDIDATES: mc_list,
891 constants.SS_MASTER_IP: cluster.master_ip,
892 constants.SS_MASTER_NETDEV: cluster.master_netdev,
893 constants.SS_MASTER_NODE: cluster.master_node,
894 constants.SS_NODE_LIST: node_list,
897 @locking.ssynchronized(_config_lock)
898 def InitConfig(self, version, cluster_config, master_node_config):
899 """Create the initial cluster configuration.
901 It will contain the current node, which will also be the master
902 node, and no instances.
905 @param version: Configuration version
906 @type cluster_config: objects.Cluster
907 @param cluster_config: Cluster configuration
908 @type master_node_config: objects.Node
909 @param master_node_config: Master node configuration
913 master_node_config.name: master_node_config,
916 self._config_data = objects.ConfigData(version=version,
917 cluster=cluster_config,
923 @locking.ssynchronized(_config_lock, shared=1)
925 """Return the volume group name.
928 return self._config_data.cluster.volume_group_name
930 @locking.ssynchronized(_config_lock)
931 def SetVGName(self, vg_name):
932 """Set the volume group name.
935 self._config_data.cluster.volume_group_name = vg_name
936 self._config_data.cluster.serial_no += 1
939 @locking.ssynchronized(_config_lock, shared=1)
940 def GetDefBridge(self):
941 """Return the default bridge.
944 return self._config_data.cluster.default_bridge
946 @locking.ssynchronized(_config_lock, shared=1)
947 def GetMACPrefix(self):
948 """Return the mac prefix.
951 return self._config_data.cluster.mac_prefix
953 @locking.ssynchronized(_config_lock, shared=1)
954 def GetClusterInfo(self):
955 """Returns informations about the cluster
961 return self._config_data.cluster
963 @locking.ssynchronized(_config_lock)
964 def Update(self, target):
965 """Notify function to be called after updates.
967 This function must be called when an object (as returned by
968 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
969 caller wants the modifications saved to the backing store. Note
970 that all modified objects will be saved, but the target argument
971 is the one the caller wants to ensure that it's saved.
974 if self._config_data is None:
975 raise errors.ProgrammerError("Configuration file not read,"
977 update_serial = False
978 if isinstance(target, objects.Cluster):
979 test = target == self._config_data.cluster
980 elif isinstance(target, objects.Node):
981 test = target in self._config_data.nodes.values()
983 elif isinstance(target, objects.Instance):
984 test = target in self._config_data.instances.values()
986 raise errors.ProgrammerError("Invalid object type (%s) passed to"
987 " ConfigWriter.Update" % type(target))
989 raise errors.ConfigurationError("Configuration updated since object"
990 " has been read or unknown object")
991 target.serial_no += 1
994 # for node updates, we need to increase the cluster serial too
995 self._config_data.cluster.serial_no += 1