4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Configuration management for Ganeti
24 This module provides the interface to the Ganeti cluster configuration.
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
29 Currently, the data storage format is JSON. YAML was slow and consuming too
34 # pylint: disable-msg=R0904
35 # R0904: Too many public methods
42 from ganeti import errors
43 from ganeti import locking
44 from ganeti import utils
45 from ganeti import constants
46 from ganeti import rpc
47 from ganeti import objects
48 from ganeti import serializer
49 from ganeti import uidpool
50 from ganeti import netutils
51 from ganeti import runtime
54 _config_lock = locking.SharedLock("ConfigWriter")
56 # job id used for resource management at config upgrade time
57 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
60 def _ValidateConfig(data):
61 """Verifies that a configuration objects looks valid.
63 This only verifies the version of the configuration.
65 @raise errors.ConfigurationError: if the version differs from what
69 if data.version != constants.CONFIG_VERSION:
70 raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
73 class TemporaryReservationManager:
74 """A temporary resource reservation manager.
76 This is used to reserve resources in a job, before using them, making sure
77 other jobs cannot get them in the meantime.
81 self._ec_reserved = {}
83 def Reserved(self, resource):
84 for holder_reserved in self._ec_reserved.values():
85 if resource in holder_reserved:
89 def Reserve(self, ec_id, resource):
90 if self.Reserved(resource):
91 raise errors.ReservationError("Duplicate reservation for resource '%s'"
93 if ec_id not in self._ec_reserved:
94 self._ec_reserved[ec_id] = set([resource])
96 self._ec_reserved[ec_id].add(resource)
98 def DropECReservations(self, ec_id):
99 if ec_id in self._ec_reserved:
100 del self._ec_reserved[ec_id]
102 def GetReserved(self):
104 for holder_reserved in self._ec_reserved.values():
105 all_reserved.update(holder_reserved)
108 def Generate(self, existing, generate_one_fn, ec_id):
109 """Generate a new resource of this type
112 assert callable(generate_one_fn)
114 all_elems = self.GetReserved()
115 all_elems.update(existing)
118 new_resource = generate_one_fn()
119 if new_resource is not None and new_resource not in all_elems:
122 raise errors.ConfigurationError("Not able generate new resource"
123 " (last tried: %s)" % new_resource)
124 self.Reserve(ec_id, new_resource)
129 """The interface to the cluster configuration.
131 @ivar _temporary_lvs: reservation manager for temporary LVs
132 @ivar _all_rms: a list of all temporary reservation managers
135 def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
136 accept_foreign=False):
138 self._lock = _config_lock
139 self._config_data = None
140 self._offline = offline
142 self._cfg_file = constants.CLUSTER_CONF_FILE
144 self._cfg_file = cfg_file
145 self._getents = _getents
146 self._temporary_ids = TemporaryReservationManager()
147 self._temporary_drbds = {}
148 self._temporary_macs = TemporaryReservationManager()
149 self._temporary_secrets = TemporaryReservationManager()
150 self._temporary_lvs = TemporaryReservationManager()
151 self._all_rms = [self._temporary_ids, self._temporary_macs,
152 self._temporary_secrets, self._temporary_lvs]
153 # Note: in order to prevent errors when resolving our name in
154 # _DistributeConfig, we compute it here once and reuse it; it's
155 # better to raise an error before starting to modify the config
156 # file than after it was modified
157 self._my_hostname = netutils.Hostname.GetSysName()
158 self._last_cluster_serial = -1
160 self._OpenConfig(accept_foreign)
162 # this method needs to be static, so that we can call it on the class
165 """Check if the cluster is configured.
168 return os.path.exists(constants.CLUSTER_CONF_FILE)
170 def _GenerateOneMAC(self):
171 """Generate one mac address
174 prefix = self._config_data.cluster.mac_prefix
175 byte1 = random.randrange(0, 256)
176 byte2 = random.randrange(0, 256)
177 byte3 = random.randrange(0, 256)
178 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
181 @locking.ssynchronized(_config_lock, shared=1)
182 def GetNdParams(self, node):
183 """Get the node params populated with cluster defaults.
185 @type node: L{object.Node}
186 @param node: The node we want to know the params for
187 @return: A dict with the filled in node params
190 nodegroup = self._UnlockedGetNodeGroup(node.group)
191 return self._config_data.cluster.FillND(node, nodegroup)
193 @locking.ssynchronized(_config_lock, shared=1)
194 def GenerateMAC(self, ec_id):
195 """Generate a MAC for an instance.
197 This should check the current instances for duplicates.
200 existing = self._AllMACs()
201 return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
203 @locking.ssynchronized(_config_lock, shared=1)
204 def ReserveMAC(self, mac, ec_id):
205 """Reserve a MAC for an instance.
207 This only checks instances managed by this cluster, it does not
208 check for potential collisions elsewhere.
211 all_macs = self._AllMACs()
213 raise errors.ReservationError("mac already in use")
215 self._temporary_macs.Reserve(mac, ec_id)
217 @locking.ssynchronized(_config_lock, shared=1)
218 def ReserveLV(self, lv_name, ec_id):
219 """Reserve an VG/LV pair for an instance.
221 @type lv_name: string
222 @param lv_name: the logical volume name to reserve
225 all_lvs = self._AllLVs()
226 if lv_name in all_lvs:
227 raise errors.ReservationError("LV already in use")
229 self._temporary_lvs.Reserve(lv_name, ec_id)
231 @locking.ssynchronized(_config_lock, shared=1)
232 def GenerateDRBDSecret(self, ec_id):
233 """Generate a DRBD secret.
235 This checks the current disks for duplicates.
238 return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
239 utils.GenerateSecret,
243 """Compute the list of all LVs.
247 for instance in self._config_data.instances.values():
248 node_data = instance.MapLVsByNode()
249 for lv_list in node_data.values():
250 lvnames.update(lv_list)
253 def _AllIDs(self, include_temporary):
254 """Compute the list of all UUIDs and names we have.
256 @type include_temporary: boolean
257 @param include_temporary: whether to include the _temporary_ids set
259 @return: a set of IDs
263 if include_temporary:
264 existing.update(self._temporary_ids.GetReserved())
265 existing.update(self._AllLVs())
266 existing.update(self._config_data.instances.keys())
267 existing.update(self._config_data.nodes.keys())
268 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
271 def _GenerateUniqueID(self, ec_id):
272 """Generate an unique UUID.
274 This checks the current node, instances and disk names for
278 @return: the unique id
281 existing = self._AllIDs(include_temporary=False)
282 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
284 @locking.ssynchronized(_config_lock, shared=1)
285 def GenerateUniqueID(self, ec_id):
286 """Generate an unique ID.
288 This is just a wrapper over the unlocked version.
291 @param ec_id: unique id for the job to reserve the id to
294 return self._GenerateUniqueID(ec_id)
297 """Return all MACs present in the config.
300 @return: the list of all MACs
304 for instance in self._config_data.instances.values():
305 for nic in instance.nics:
306 result.append(nic.mac)
310 def _AllDRBDSecrets(self):
311 """Return all DRBD secrets present in the config.
314 @return: the list of all DRBD secrets
317 def helper(disk, result):
318 """Recursively gather secrets from this disk."""
319 if disk.dev_type == constants.DT_DRBD8:
320 result.append(disk.logical_id[5])
322 for child in disk.children:
323 helper(child, result)
326 for instance in self._config_data.instances.values():
327 for disk in instance.disks:
332 def _CheckDiskIDs(self, disk, l_ids, p_ids):
333 """Compute duplicate disk IDs
335 @type disk: L{objects.Disk}
336 @param disk: the disk at which to start searching
338 @param l_ids: list of current logical ids
340 @param p_ids: list of current physical ids
342 @return: a list of error messages
346 if disk.logical_id is not None:
347 if disk.logical_id in l_ids:
348 result.append("duplicate logical id %s" % str(disk.logical_id))
350 l_ids.append(disk.logical_id)
351 if disk.physical_id is not None:
352 if disk.physical_id in p_ids:
353 result.append("duplicate physical id %s" % str(disk.physical_id))
355 p_ids.append(disk.physical_id)
358 for child in disk.children:
359 result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
362 def _UnlockedVerifyConfig(self):
366 @return: a list of error messages; a non-empty list signifies
373 data = self._config_data
377 # global cluster checks
378 if not data.cluster.enabled_hypervisors:
379 result.append("enabled hypervisors list doesn't have any entries")
380 invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
382 result.append("enabled hypervisors contains invalid entries: %s" %
384 missing_hvp = (set(data.cluster.enabled_hypervisors) -
385 set(data.cluster.hvparams.keys()))
387 result.append("hypervisor parameters missing for the enabled"
388 " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
390 if data.cluster.master_node not in data.nodes:
391 result.append("cluster has invalid primary node '%s'" %
392 data.cluster.master_node)
394 # per-instance checks
395 for instance_name in data.instances:
396 instance = data.instances[instance_name]
397 if instance.name != instance_name:
398 result.append("instance '%s' is indexed by wrong name '%s'" %
399 (instance.name, instance_name))
400 if instance.primary_node not in data.nodes:
401 result.append("instance '%s' has invalid primary node '%s'" %
402 (instance_name, instance.primary_node))
403 for snode in instance.secondary_nodes:
404 if snode not in data.nodes:
405 result.append("instance '%s' has invalid secondary node '%s'" %
406 (instance_name, snode))
407 for idx, nic in enumerate(instance.nics):
408 if nic.mac in seen_macs:
409 result.append("instance '%s' has NIC %d mac %s duplicate" %
410 (instance_name, idx, nic.mac))
412 seen_macs.append(nic.mac)
414 # gather the drbd ports for duplicate checks
415 for dsk in instance.disks:
416 if dsk.dev_type in constants.LDS_DRBD:
417 tcp_port = dsk.logical_id[2]
418 if tcp_port not in ports:
420 ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
421 # gather network port reservation
422 net_port = getattr(instance, "network_port", None)
423 if net_port is not None:
424 if net_port not in ports:
426 ports[net_port].append((instance.name, "network port"))
428 # instance disk verify
429 for idx, disk in enumerate(instance.disks):
430 result.extend(["instance '%s' disk %d error: %s" %
431 (instance.name, idx, msg) for msg in disk.Verify()])
432 result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
434 # cluster-wide pool of free ports
435 for free_port in data.cluster.tcpudp_port_pool:
436 if free_port not in ports:
437 ports[free_port] = []
438 ports[free_port].append(("cluster", "port marked as free"))
440 # compute tcp/udp duplicate ports
446 txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
447 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
449 # highest used tcp port check
451 if keys[-1] > data.cluster.highest_used_port:
452 result.append("Highest used port mismatch, saved %s, computed %s" %
453 (data.cluster.highest_used_port, keys[-1]))
455 if not data.nodes[data.cluster.master_node].master_candidate:
456 result.append("Master node is not a master candidate")
458 # master candidate checks
459 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
461 result.append("Not enough master candidates: actual %d, target %d" %
465 for node_name, node in data.nodes.items():
466 if node.name != node_name:
467 result.append("Node '%s' is indexed by wrong name '%s'" %
468 (node.name, node_name))
469 if [node.master_candidate, node.drained, node.offline].count(True) > 1:
470 result.append("Node %s state is invalid: master_candidate=%s,"
471 " drain=%s, offline=%s" %
472 (node.name, node.master_candidate, node.drained,
476 nodegroups_names = set()
477 for nodegroup_uuid in data.nodegroups:
478 nodegroup = data.nodegroups[nodegroup_uuid]
479 if nodegroup.uuid != nodegroup_uuid:
480 result.append("nodegroup '%s' (uuid: '%s') indexed by wrong uuid '%s'"
481 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
482 if utils.UUID_RE.match(nodegroup.name.lower()):
483 result.append("nodegroup '%s' (uuid: '%s') has uuid-like name" %
484 (nodegroup.name, nodegroup.uuid))
485 if nodegroup.name in nodegroups_names:
486 result.append("duplicate nodegroup name '%s'" % nodegroup.name)
488 nodegroups_names.add(nodegroup.name)
491 _, duplicates = self._UnlockedComputeDRBDMap()
492 for node, minor, instance_a, instance_b in duplicates:
493 result.append("DRBD minor %d on node %s is assigned twice to instances"
494 " %s and %s" % (minor, node, instance_a, instance_b))
497 default_nicparams = data.cluster.nicparams[constants.PP_DEFAULT]
500 def _AddIpAddress(ip, name):
501 ips.setdefault(ip, []).append(name)
503 _AddIpAddress(data.cluster.master_ip, "cluster_ip")
505 for node in data.nodes.values():
506 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
507 if node.secondary_ip != node.primary_ip:
508 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
510 for instance in data.instances.values():
511 for idx, nic in enumerate(instance.nics):
515 nicparams = objects.FillDict(default_nicparams, nic.nicparams)
516 nic_mode = nicparams[constants.NIC_MODE]
517 nic_link = nicparams[constants.NIC_LINK]
519 if nic_mode == constants.NIC_MODE_BRIDGED:
520 link = "bridge:%s" % nic_link
521 elif nic_mode == constants.NIC_MODE_ROUTED:
522 link = "route:%s" % nic_link
524 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
526 _AddIpAddress("%s/%s" % (link, nic.ip),
527 "instance:%s/nic:%d" % (instance.name, idx))
529 for ip, owners in ips.items():
531 result.append("IP address %s is used by multiple owners: %s" %
532 (ip, utils.CommaJoin(owners)))
536 @locking.ssynchronized(_config_lock, shared=1)
537 def VerifyConfig(self):
540 This is just a wrapper over L{_UnlockedVerifyConfig}.
543 @return: a list of error messages; a non-empty list signifies
547 return self._UnlockedVerifyConfig()
549 def _UnlockedSetDiskID(self, disk, node_name):
550 """Convert the unique ID to the ID needed on the target nodes.
552 This is used only for drbd, which needs ip/port configuration.
554 The routine descends down and updates its children also, because
555 this helps when the only the top device is passed to the remote
558 This function is for internal use, when the config lock is already held.
562 for child in disk.children:
563 self._UnlockedSetDiskID(child, node_name)
565 if disk.logical_id is None and disk.physical_id is not None:
567 if disk.dev_type == constants.LD_DRBD8:
568 pnode, snode, port, pminor, sminor, secret = disk.logical_id
569 if node_name not in (pnode, snode):
570 raise errors.ConfigurationError("DRBD device not knowing node %s" %
572 pnode_info = self._UnlockedGetNodeInfo(pnode)
573 snode_info = self._UnlockedGetNodeInfo(snode)
574 if pnode_info is None or snode_info is None:
575 raise errors.ConfigurationError("Can't find primary or secondary node"
576 " for %s" % str(disk))
577 p_data = (pnode_info.secondary_ip, port)
578 s_data = (snode_info.secondary_ip, port)
579 if pnode == node_name:
580 disk.physical_id = p_data + s_data + (pminor, secret)
581 else: # it must be secondary, we tested above
582 disk.physical_id = s_data + p_data + (sminor, secret)
584 disk.physical_id = disk.logical_id
587 @locking.ssynchronized(_config_lock)
588 def SetDiskID(self, disk, node_name):
589 """Convert the unique ID to the ID needed on the target nodes.
591 This is used only for drbd, which needs ip/port configuration.
593 The routine descends down and updates its children also, because
594 this helps when the only the top device is passed to the remote
598 return self._UnlockedSetDiskID(disk, node_name)
600 @locking.ssynchronized(_config_lock)
601 def AddTcpUdpPort(self, port):
602 """Adds a new port to the available port pool.
605 if not isinstance(port, int):
606 raise errors.ProgrammerError("Invalid type passed for port")
608 self._config_data.cluster.tcpudp_port_pool.add(port)
611 @locking.ssynchronized(_config_lock, shared=1)
612 def GetPortList(self):
613 """Returns a copy of the current port list.
616 return self._config_data.cluster.tcpudp_port_pool.copy()
618 @locking.ssynchronized(_config_lock)
619 def AllocatePort(self):
622 The port will be taken from the available port pool or from the
623 default port range (and in this case we increase
627 # If there are TCP/IP ports configured, we use them first.
628 if self._config_data.cluster.tcpudp_port_pool:
629 port = self._config_data.cluster.tcpudp_port_pool.pop()
631 port = self._config_data.cluster.highest_used_port + 1
632 if port >= constants.LAST_DRBD_PORT:
633 raise errors.ConfigurationError("The highest used port is greater"
634 " than %s. Aborting." %
635 constants.LAST_DRBD_PORT)
636 self._config_data.cluster.highest_used_port = port
641 def _UnlockedComputeDRBDMap(self):
642 """Compute the used DRBD minor/nodes.
645 @return: dictionary of node_name: dict of minor: instance_name;
646 the returned dict will have all the nodes in it (even if with
647 an empty list), and a list of duplicates; if the duplicates
648 list is not empty, the configuration is corrupted and its caller
649 should raise an exception
652 def _AppendUsedPorts(instance_name, disk, used):
654 if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
655 node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
656 for node, port in ((node_a, minor_a), (node_b, minor_b)):
657 assert node in used, ("Node '%s' of instance '%s' not found"
658 " in node list" % (node, instance_name))
659 if port in used[node]:
660 duplicates.append((node, port, instance_name, used[node][port]))
662 used[node][port] = instance_name
664 for child in disk.children:
665 duplicates.extend(_AppendUsedPorts(instance_name, child, used))
669 my_dict = dict((node, {}) for node in self._config_data.nodes)
670 for instance in self._config_data.instances.itervalues():
671 for disk in instance.disks:
672 duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
673 for (node, minor), instance in self._temporary_drbds.iteritems():
674 if minor in my_dict[node] and my_dict[node][minor] != instance:
675 duplicates.append((node, minor, instance, my_dict[node][minor]))
677 my_dict[node][minor] = instance
678 return my_dict, duplicates
680 @locking.ssynchronized(_config_lock)
681 def ComputeDRBDMap(self):
682 """Compute the used DRBD minor/nodes.
684 This is just a wrapper over L{_UnlockedComputeDRBDMap}.
686 @return: dictionary of node_name: dict of minor: instance_name;
687 the returned dict will have all the nodes in it (even if with
691 d_map, duplicates = self._UnlockedComputeDRBDMap()
693 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
697 @locking.ssynchronized(_config_lock)
698 def AllocateDRBDMinor(self, nodes, instance):
699 """Allocate a drbd minor.
701 The free minor will be automatically computed from the existing
702 devices. A node can be given multiple times in order to allocate
703 multiple minors. The result is the list of minors, in the same
704 order as the passed nodes.
706 @type instance: string
707 @param instance: the instance for which we allocate minors
710 assert isinstance(instance, basestring), \
711 "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
713 d_map, duplicates = self._UnlockedComputeDRBDMap()
715 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
721 # no minors used, we can start at 0
724 self._temporary_drbds[(nname, 0)] = instance
728 ffree = utils.FirstFree(keys)
730 # return the next minor
731 # TODO: implement high-limit check
735 # double-check minor against current instances
736 assert minor not in d_map[nname], \
737 ("Attempt to reuse allocated DRBD minor %d on node %s,"
738 " already allocated to instance %s" %
739 (minor, nname, d_map[nname][minor]))
740 ndata[minor] = instance
741 # double-check minor against reservation
742 r_key = (nname, minor)
743 assert r_key not in self._temporary_drbds, \
744 ("Attempt to reuse reserved DRBD minor %d on node %s,"
745 " reserved for instance %s" %
746 (minor, nname, self._temporary_drbds[r_key]))
747 self._temporary_drbds[r_key] = instance
749 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
753 def _UnlockedReleaseDRBDMinors(self, instance):
754 """Release temporary drbd minors allocated for a given instance.
756 @type instance: string
757 @param instance: the instance for which temporary minors should be
761 assert isinstance(instance, basestring), \
762 "Invalid argument passed to ReleaseDRBDMinors"
763 for key, name in self._temporary_drbds.items():
765 del self._temporary_drbds[key]
767 @locking.ssynchronized(_config_lock)
768 def ReleaseDRBDMinors(self, instance):
769 """Release temporary drbd minors allocated for a given instance.
771 This should be called on the error paths, on the success paths
772 it's automatically called by the ConfigWriter add and update
775 This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
777 @type instance: string
778 @param instance: the instance for which temporary minors should be
782 self._UnlockedReleaseDRBDMinors(instance)
784 @locking.ssynchronized(_config_lock, shared=1)
785 def GetConfigVersion(self):
786 """Get the configuration version.
788 @return: Config version
791 return self._config_data.version
793 @locking.ssynchronized(_config_lock, shared=1)
794 def GetClusterName(self):
797 @return: Cluster name
800 return self._config_data.cluster.cluster_name
802 @locking.ssynchronized(_config_lock, shared=1)
803 def GetMasterNode(self):
804 """Get the hostname of the master node for this cluster.
806 @return: Master hostname
809 return self._config_data.cluster.master_node
811 @locking.ssynchronized(_config_lock, shared=1)
812 def GetMasterIP(self):
813 """Get the IP of the master node for this cluster.
818 return self._config_data.cluster.master_ip
820 @locking.ssynchronized(_config_lock, shared=1)
821 def GetMasterNetdev(self):
822 """Get the master network device for this cluster.
825 return self._config_data.cluster.master_netdev
827 @locking.ssynchronized(_config_lock, shared=1)
828 def GetFileStorageDir(self):
829 """Get the file storage dir for this cluster.
832 return self._config_data.cluster.file_storage_dir
834 @locking.ssynchronized(_config_lock, shared=1)
835 def GetHypervisorType(self):
836 """Get the hypervisor type for this cluster.
839 return self._config_data.cluster.enabled_hypervisors[0]
841 @locking.ssynchronized(_config_lock, shared=1)
842 def GetHostKey(self):
843 """Return the rsa hostkey from the config.
846 @return: the rsa hostkey
849 return self._config_data.cluster.rsahostkeypub
851 @locking.ssynchronized(_config_lock, shared=1)
852 def GetDefaultIAllocator(self):
853 """Get the default instance allocator for this cluster.
856 return self._config_data.cluster.default_iallocator
858 @locking.ssynchronized(_config_lock, shared=1)
859 def GetPrimaryIPFamily(self):
860 """Get cluster primary ip family.
862 @return: primary ip family
865 return self._config_data.cluster.primary_ip_family
867 @locking.ssynchronized(_config_lock, shared=1)
868 def LookupNodeGroup(self, target):
869 """Lookup a node group's UUID.
871 @type target: string or None
872 @param target: group name or UUID or None to look for the default
874 @return: nodegroup UUID
875 @raises errors.OpPrereqError: when the target group cannot be found
879 if len(self._config_data.nodegroups) != 1:
880 raise errors.OpPrereqError("More than one nodegroup exists. Target"
881 " group must be specified explicitely.")
883 return self._config_data.nodegroups.keys()[0]
884 if target in self._config_data.nodegroups:
886 for nodegroup in self._config_data.nodegroups.values():
887 if nodegroup.name == target:
888 return nodegroup.uuid
889 raise errors.OpPrereqError("Nodegroup '%s' not found" % target)
891 def _UnlockedGetNodeGroup(self, uuid):
892 """Lookup a node group.
895 @param uuid: group UUID
896 @rtype: L{objects.NodeGroup} or None
897 @return: nodegroup object, or None if not found
900 if uuid not in self._config_data.nodegroups:
903 return self._config_data.nodegroups[uuid]
905 @locking.ssynchronized(_config_lock, shared=1)
906 def GetNodeGroup(self, uuid):
907 """Lookup a node group.
910 @param uuid: group UUID
911 @rtype: L{objects.NodeGroup} or None
912 @return: nodegroup object, or None if not found
915 return self._UnlockedGetNodeGroup(uuid)
917 @locking.ssynchronized(_config_lock, shared=1)
918 def GetAllNodeGroupsInfo(self):
919 """Get the configuration of all node groups.
922 return dict(self._config_data.nodegroups)
924 @locking.ssynchronized(_config_lock, shared=1)
925 def GetNodeGroupList(self):
926 """Get a list of node groups.
929 return self._config_data.nodegroups.keys()
931 @locking.ssynchronized(_config_lock)
932 def AddInstance(self, instance, ec_id):
933 """Add an instance to the config.
935 This should be used after creating a new instance.
937 @type instance: L{objects.Instance}
938 @param instance: the instance object
941 if not isinstance(instance, objects.Instance):
942 raise errors.ProgrammerError("Invalid type passed to AddInstance")
944 if instance.disk_template != constants.DT_DISKLESS:
945 all_lvs = instance.MapLVsByNode()
946 logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
948 all_macs = self._AllMACs()
949 for nic in instance.nics:
950 if nic.mac in all_macs:
951 raise errors.ConfigurationError("Cannot add instance %s:"
952 " MAC address '%s' already in use." %
953 (instance.name, nic.mac))
955 self._EnsureUUID(instance, ec_id)
957 instance.serial_no = 1
958 instance.ctime = instance.mtime = time.time()
959 self._config_data.instances[instance.name] = instance
960 self._config_data.cluster.serial_no += 1
961 self._UnlockedReleaseDRBDMinors(instance.name)
964 def _EnsureUUID(self, item, ec_id):
965 """Ensures a given object has a valid UUID.
967 @param item: the instance or node to be checked
968 @param ec_id: the execution context id for the uuid reservation
972 item.uuid = self._GenerateUniqueID(ec_id)
973 elif item.uuid in self._AllIDs(include_temporary=True):
974 raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
975 " in use" % (item.name, item.uuid))
977 def _SetInstanceStatus(self, instance_name, status):
978 """Set the instance's status to a given value.
981 assert isinstance(status, bool), \
982 "Invalid status '%s' passed to SetInstanceStatus" % (status,)
984 if instance_name not in self._config_data.instances:
985 raise errors.ConfigurationError("Unknown instance '%s'" %
987 instance = self._config_data.instances[instance_name]
988 if instance.admin_up != status:
989 instance.admin_up = status
990 instance.serial_no += 1
991 instance.mtime = time.time()
994 @locking.ssynchronized(_config_lock)
995 def MarkInstanceUp(self, instance_name):
996 """Mark the instance status to up in the config.
999 self._SetInstanceStatus(instance_name, True)
1001 @locking.ssynchronized(_config_lock)
1002 def RemoveInstance(self, instance_name):
1003 """Remove the instance from the configuration.
1006 if instance_name not in self._config_data.instances:
1007 raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
1008 del self._config_data.instances[instance_name]
1009 self._config_data.cluster.serial_no += 1
1012 @locking.ssynchronized(_config_lock)
1013 def RenameInstance(self, old_name, new_name):
1014 """Rename an instance.
1016 This needs to be done in ConfigWriter and not by RemoveInstance
1017 combined with AddInstance as only we can guarantee an atomic
1021 if old_name not in self._config_data.instances:
1022 raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
1023 inst = self._config_data.instances[old_name]
1024 del self._config_data.instances[old_name]
1025 inst.name = new_name
1027 for disk in inst.disks:
1028 if disk.dev_type == constants.LD_FILE:
1029 # rename the file paths in logical and physical id
1030 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1031 disk.physical_id = disk.logical_id = (disk.logical_id[0],
1032 utils.PathJoin(file_storage_dir,
1036 # Force update of ssconf files
1037 self._config_data.cluster.serial_no += 1
1039 self._config_data.instances[inst.name] = inst
1042 @locking.ssynchronized(_config_lock)
1043 def MarkInstanceDown(self, instance_name):
1044 """Mark the status of an instance to down in the configuration.
1047 self._SetInstanceStatus(instance_name, False)
1049 def _UnlockedGetInstanceList(self):
1050 """Get the list of instances.
1052 This function is for internal use, when the config lock is already held.
1055 return self._config_data.instances.keys()
1057 @locking.ssynchronized(_config_lock, shared=1)
1058 def GetInstanceList(self):
1059 """Get the list of instances.
1061 @return: array of instances, ex. ['instance2.example.com',
1062 'instance1.example.com']
1065 return self._UnlockedGetInstanceList()
1067 @locking.ssynchronized(_config_lock, shared=1)
1068 def ExpandInstanceName(self, short_name):
1069 """Attempt to expand an incomplete instance name.
1072 return utils.MatchNameComponent(short_name,
1073 self._config_data.instances.keys(),
1074 case_sensitive=False)
1076 def _UnlockedGetInstanceInfo(self, instance_name):
1077 """Returns information about an instance.
1079 This function is for internal use, when the config lock is already held.
1082 if instance_name not in self._config_data.instances:
1085 return self._config_data.instances[instance_name]
1087 @locking.ssynchronized(_config_lock, shared=1)
1088 def GetInstanceInfo(self, instance_name):
1089 """Returns information about an instance.
1091 It takes the information from the configuration file. Other information of
1092 an instance are taken from the live systems.
1094 @param instance_name: name of the instance, e.g.
1095 I{instance1.example.com}
1097 @rtype: L{objects.Instance}
1098 @return: the instance object
1101 return self._UnlockedGetInstanceInfo(instance_name)
1103 @locking.ssynchronized(_config_lock, shared=1)
1104 def GetAllInstancesInfo(self):
1105 """Get the configuration of all instances.
1108 @return: dict of (instance, instance_info), where instance_info is what
1109 would GetInstanceInfo return for the node
1112 my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
1113 for instance in self._UnlockedGetInstanceList()])
1116 @locking.ssynchronized(_config_lock)
1117 def AddNode(self, node, ec_id):
1118 """Add a node to the configuration.
1120 @type node: L{objects.Node}
1121 @param node: a Node instance
1124 logging.info("Adding node %s to configuration", node.name)
1126 self._EnsureUUID(node, ec_id)
1129 node.ctime = node.mtime = time.time()
1130 self._UnlockedAddNodeToGroup(node.name, node.group)
1131 self._config_data.nodes[node.name] = node
1132 self._config_data.cluster.serial_no += 1
1135 @locking.ssynchronized(_config_lock)
1136 def RemoveNode(self, node_name):
1137 """Remove a node from the configuration.
1140 logging.info("Removing node %s from configuration", node_name)
1142 if node_name not in self._config_data.nodes:
1143 raise errors.ConfigurationError("Unknown node '%s'" % node_name)
1145 self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
1146 del self._config_data.nodes[node_name]
1147 self._config_data.cluster.serial_no += 1
1150 @locking.ssynchronized(_config_lock, shared=1)
1151 def ExpandNodeName(self, short_name):
1152 """Attempt to expand an incomplete instance name.
1155 return utils.MatchNameComponent(short_name,
1156 self._config_data.nodes.keys(),
1157 case_sensitive=False)
1159 def _UnlockedGetNodeInfo(self, node_name):
1160 """Get the configuration of a node, as stored in the config.
1162 This function is for internal use, when the config lock is already
1165 @param node_name: the node name, e.g. I{node1.example.com}
1167 @rtype: L{objects.Node}
1168 @return: the node object
1171 if node_name not in self._config_data.nodes:
1174 return self._config_data.nodes[node_name]
1176 @locking.ssynchronized(_config_lock, shared=1)
1177 def GetNodeInfo(self, node_name):
1178 """Get the configuration of a node, as stored in the config.
1180 This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1182 @param node_name: the node name, e.g. I{node1.example.com}
1184 @rtype: L{objects.Node}
1185 @return: the node object
1188 return self._UnlockedGetNodeInfo(node_name)
1190 @locking.ssynchronized(_config_lock, shared=1)
1191 def GetNodeInstances(self, node_name):
1192 """Get the instances of a node, as stored in the config.
1194 @param node_name: the node name, e.g. I{node1.example.com}
1196 @rtype: (list, list)
1197 @return: a tuple with two lists: the primary and the secondary instances
1202 for inst in self._config_data.instances.values():
1203 if inst.primary_node == node_name:
1204 pri.append(inst.name)
1205 if node_name in inst.secondary_nodes:
1206 sec.append(inst.name)
1209 def _UnlockedGetNodeList(self):
1210 """Return the list of nodes which are in the configuration.
1212 This function is for internal use, when the config lock is already
1218 return self._config_data.nodes.keys()
1220 @locking.ssynchronized(_config_lock, shared=1)
1221 def GetNodeList(self):
1222 """Return the list of nodes which are in the configuration.
1225 return self._UnlockedGetNodeList()
1227 def _UnlockedGetOnlineNodeList(self):
1228 """Return the list of nodes which are online.
1231 all_nodes = [self._UnlockedGetNodeInfo(node)
1232 for node in self._UnlockedGetNodeList()]
1233 return [node.name for node in all_nodes if not node.offline]
1235 @locking.ssynchronized(_config_lock, shared=1)
1236 def GetOnlineNodeList(self):
1237 """Return the list of nodes which are online.
1240 return self._UnlockedGetOnlineNodeList()
1242 @locking.ssynchronized(_config_lock, shared=1)
1243 def GetNonVmCapableNodeList(self):
1244 """Return the list of nodes which are not vm capable.
1247 all_nodes = [self._UnlockedGetNodeInfo(node)
1248 for node in self._UnlockedGetNodeList()]
1249 return [node.name for node in all_nodes if not node.vm_capable]
1251 @locking.ssynchronized(_config_lock, shared=1)
1252 def GetAllNodesInfo(self):
1253 """Get the configuration of all nodes.
1256 @return: dict of (node, node_info), where node_info is what
1257 would GetNodeInfo return for the node
1260 my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
1261 for node in self._UnlockedGetNodeList()])
1264 def _UnlockedGetMasterCandidateStats(self, exceptions=None):
1265 """Get the number of current and maximum desired and possible candidates.
1267 @type exceptions: list
1268 @param exceptions: if passed, list of nodes that should be ignored
1270 @return: tuple of (current, desired and possible, possible)
1273 mc_now = mc_should = mc_max = 0
1274 for node in self._config_data.nodes.values():
1275 if exceptions and node.name in exceptions:
1277 if not (node.offline or node.drained) and node.master_capable:
1279 if node.master_candidate:
1281 mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
1282 return (mc_now, mc_should, mc_max)
1284 @locking.ssynchronized(_config_lock, shared=1)
1285 def GetMasterCandidateStats(self, exceptions=None):
1286 """Get the number of current and maximum possible candidates.
1288 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
1290 @type exceptions: list
1291 @param exceptions: if passed, list of nodes that should be ignored
1293 @return: tuple of (current, max)
1296 return self._UnlockedGetMasterCandidateStats(exceptions)
1298 @locking.ssynchronized(_config_lock)
1299 def MaintainCandidatePool(self, exceptions):
1300 """Try to grow the candidate pool to the desired size.
1302 @type exceptions: list
1303 @param exceptions: if passed, list of nodes that should be ignored
1305 @return: list with the adjusted nodes (L{objects.Node} instances)
1308 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
1311 node_list = self._config_data.nodes.keys()
1312 random.shuffle(node_list)
1313 for name in node_list:
1314 if mc_now >= mc_max:
1316 node = self._config_data.nodes[name]
1317 if (node.master_candidate or node.offline or node.drained or
1318 node.name in exceptions or not node.master_capable):
1320 mod_list.append(node)
1321 node.master_candidate = True
1324 if mc_now != mc_max:
1325 # this should not happen
1326 logging.warning("Warning: MaintainCandidatePool didn't manage to"
1327 " fill the candidate pool (%d/%d)", mc_now, mc_max)
1329 self._config_data.cluster.serial_no += 1
1334 def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid):
1335 """Add a given node to the specified group.
1338 if nodegroup_uuid not in self._config_data.nodegroups:
1339 # This can happen if a node group gets deleted between its lookup and
1340 # when we're adding the first node to it, since we don't keep a lock in
1341 # the meantime. It's ok though, as we'll fail cleanly if the node group
1342 # is not found anymore.
1343 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
1344 if node_name not in self._config_data.nodegroups[nodegroup_uuid].members:
1345 self._config_data.nodegroups[nodegroup_uuid].members.append(node_name)
1347 def _UnlockedRemoveNodeFromGroup(self, node):
1348 """Remove a given node from its group.
1351 nodegroup = node.group
1352 if nodegroup not in self._config_data.nodegroups:
1353 logging.warning("Warning: node '%s' has unknown node group '%s'"
1354 " (while being removed from it)", node.name, nodegroup)
1355 nodegroup_obj = self._config_data.nodegroups[nodegroup]
1356 if node.name not in nodegroup_obj.members:
1357 logging.warning("Warning: node '%s' not a member of its node group '%s'"
1358 " (while being removed from it)", node.name, nodegroup)
1360 nodegroup_obj.members.remove(node.name)
1362 def _BumpSerialNo(self):
1363 """Bump up the serial number of the config.
1366 self._config_data.serial_no += 1
1367 self._config_data.mtime = time.time()
1369 def _AllUUIDObjects(self):
1370 """Returns all objects with uuid attributes.
1373 return (self._config_data.instances.values() +
1374 self._config_data.nodes.values() +
1375 self._config_data.nodegroups.values() +
1376 [self._config_data.cluster])
1378 def _OpenConfig(self, accept_foreign):
1379 """Read the config data from disk.
1382 raw_data = utils.ReadFile(self._cfg_file)
1385 data = objects.ConfigData.FromDict(serializer.Load(raw_data))
1386 except Exception, err:
1387 raise errors.ConfigurationError(err)
1389 # Make sure the configuration has the right version
1390 _ValidateConfig(data)
1392 if (not hasattr(data, 'cluster') or
1393 not hasattr(data.cluster, 'rsahostkeypub')):
1394 raise errors.ConfigurationError("Incomplete configuration"
1395 " (missing cluster.rsahostkeypub)")
1397 if data.cluster.master_node != self._my_hostname and not accept_foreign:
1398 msg = ("The configuration denotes node %s as master, while my"
1399 " hostname is %s; opening a foreign configuration is only"
1400 " possible in accept_foreign mode" %
1401 (data.cluster.master_node, self._my_hostname))
1402 raise errors.ConfigurationError(msg)
1404 # Upgrade configuration if needed
1405 data.UpgradeConfig()
1407 self._config_data = data
1408 # reset the last serial as -1 so that the next write will cause
1410 self._last_cluster_serial = -1
1412 # And finally run our (custom) config upgrade sequence
1413 self._UpgradeConfig()
1415 self._cfg_id = utils.GetFileID(path=self._cfg_file)
1417 def _UpgradeConfig(self):
1418 """Run upgrade steps that cannot be done purely in the objects.
1420 This is because some data elements need uniqueness across the
1421 whole configuration, etc.
1423 @warning: this function will call L{_WriteConfig()}, but also
1424 L{DropECReservations} so it needs to be called only from a
1425 "safe" place (the constructor). If one wanted to call it with
1426 the lock held, a DropECReservationUnlocked would need to be
1427 created first, to avoid causing deadlock.
1431 for item in self._AllUUIDObjects():
1432 if item.uuid is None:
1433 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
1435 if not self._config_data.nodegroups:
1436 default_nodegroup_uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
1437 default_nodegroup = objects.NodeGroup(
1438 uuid=default_nodegroup_uuid,
1442 self._config_data.nodegroups[default_nodegroup_uuid] = default_nodegroup
1444 for node in self._config_data.nodes.values():
1446 node.group = self.LookupNodeGroup(None)
1448 # This is technically *not* an upgrade, but needs to be done both when
1449 # nodegroups are being added, and upon normally loading the config,
1450 # because the members list of a node group is discarded upon
1451 # serializing/deserializing the object.
1452 self._UnlockedAddNodeToGroup(node.name, node.group)
1455 # This is ok even if it acquires the internal lock, as _UpgradeConfig is
1456 # only called at config init time, without the lock held
1457 self.DropECReservations(_UPGRADE_CONFIG_JID)
1459 def _DistributeConfig(self, feedback_fn):
1460 """Distribute the configuration to the other nodes.
1462 Currently, this only copies the configuration file. In the future,
1463 it could be used to encapsulate the 2/3-phase update mechanism.
1473 myhostname = self._my_hostname
1474 # we can skip checking whether _UnlockedGetNodeInfo returns None
1475 # since the node list comes from _UnlocketGetNodeList, and we are
1476 # called with the lock held, so no modifications should take place
1478 for node_name in self._UnlockedGetNodeList():
1479 if node_name == myhostname:
1481 node_info = self._UnlockedGetNodeInfo(node_name)
1482 if not node_info.master_candidate:
1484 node_list.append(node_info.name)
1485 addr_list.append(node_info.primary_ip)
1487 result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
1488 address_list=addr_list)
1489 for to_node, to_result in result.items():
1490 msg = to_result.fail_msg
1492 msg = ("Copy of file %s to node %s failed: %s" %
1493 (self._cfg_file, to_node, msg))
1503 def _WriteConfig(self, destination=None, feedback_fn=None):
1504 """Write the configuration data to persistent storage.
1507 assert feedback_fn is None or callable(feedback_fn)
1509 # Warn on config errors, but don't abort the save - the
1510 # configuration has already been modified, and we can't revert;
1511 # the best we can do is to warn the user and save as is, leaving
1512 # recovery to the user
1513 config_errors = self._UnlockedVerifyConfig()
1515 errmsg = ("Configuration data is not consistent: %s" %
1516 (utils.CommaJoin(config_errors)))
1517 logging.critical(errmsg)
1521 if destination is None:
1522 destination = self._cfg_file
1523 self._BumpSerialNo()
1524 txt = serializer.Dump(self._config_data.ToDict())
1526 getents = self._getents()
1528 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
1529 close=False, gid=getents.confd_gid, mode=0640)
1530 except errors.LockError:
1531 raise errors.ConfigurationError("The configuration file has been"
1532 " modified since the last write, cannot"
1535 self._cfg_id = utils.GetFileID(fd=fd)
1539 self.write_count += 1
1541 # and redistribute the config file to master candidates
1542 self._DistributeConfig(feedback_fn)
1544 # Write ssconf files on all nodes (including locally)
1545 if self._last_cluster_serial < self._config_data.cluster.serial_no:
1546 if not self._offline:
1547 result = rpc.RpcRunner.call_write_ssconf_files(
1548 self._UnlockedGetOnlineNodeList(),
1549 self._UnlockedGetSsconfValues())
1551 for nname, nresu in result.items():
1552 msg = nresu.fail_msg
1554 errmsg = ("Error while uploading ssconf files to"
1555 " node %s: %s" % (nname, msg))
1556 logging.warning(errmsg)
1561 self._last_cluster_serial = self._config_data.cluster.serial_no
1563 def _UnlockedGetSsconfValues(self):
1564 """Return the values needed by ssconf.
1567 @return: a dictionary with keys the ssconf names and values their
1572 instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
1573 node_names = utils.NiceSort(self._UnlockedGetNodeList())
1574 node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
1575 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
1576 for ninfo in node_info]
1577 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
1578 for ninfo in node_info]
1580 instance_data = fn(instance_names)
1581 off_data = fn(node.name for node in node_info if node.offline)
1582 on_data = fn(node.name for node in node_info if not node.offline)
1583 mc_data = fn(node.name for node in node_info if node.master_candidate)
1584 mc_ips_data = fn(node.primary_ip for node in node_info
1585 if node.master_candidate)
1586 node_data = fn(node_names)
1587 node_pri_ips_data = fn(node_pri_ips)
1588 node_snd_ips_data = fn(node_snd_ips)
1590 cluster = self._config_data.cluster
1591 cluster_tags = fn(cluster.GetTags())
1593 hypervisor_list = fn(cluster.enabled_hypervisors)
1595 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
1597 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
1598 self._config_data.nodegroups.values()]
1599 nodegroups_data = fn(utils.NiceSort(nodegroups))
1602 constants.SS_CLUSTER_NAME: cluster.cluster_name,
1603 constants.SS_CLUSTER_TAGS: cluster_tags,
1604 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
1605 constants.SS_MASTER_CANDIDATES: mc_data,
1606 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
1607 constants.SS_MASTER_IP: cluster.master_ip,
1608 constants.SS_MASTER_NETDEV: cluster.master_netdev,
1609 constants.SS_MASTER_NODE: cluster.master_node,
1610 constants.SS_NODE_LIST: node_data,
1611 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
1612 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
1613 constants.SS_OFFLINE_NODES: off_data,
1614 constants.SS_ONLINE_NODES: on_data,
1615 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
1616 constants.SS_INSTANCE_LIST: instance_data,
1617 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
1618 constants.SS_HYPERVISOR_LIST: hypervisor_list,
1619 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
1620 constants.SS_UID_POOL: uid_pool,
1621 constants.SS_NODEGROUPS: nodegroups_data,
1624 @locking.ssynchronized(_config_lock, shared=1)
1625 def GetSsconfValues(self):
1626 """Wrapper using lock around _UnlockedGetSsconf().
1629 return self._UnlockedGetSsconfValues()
1631 @locking.ssynchronized(_config_lock, shared=1)
1632 def GetVGName(self):
1633 """Return the volume group name.
1636 return self._config_data.cluster.volume_group_name
1638 @locking.ssynchronized(_config_lock)
1639 def SetVGName(self, vg_name):
1640 """Set the volume group name.
1643 self._config_data.cluster.volume_group_name = vg_name
1644 self._config_data.cluster.serial_no += 1
1647 @locking.ssynchronized(_config_lock, shared=1)
1648 def GetDRBDHelper(self):
1649 """Return DRBD usermode helper.
1652 return self._config_data.cluster.drbd_usermode_helper
1654 @locking.ssynchronized(_config_lock)
1655 def SetDRBDHelper(self, drbd_helper):
1656 """Set DRBD usermode helper.
1659 self._config_data.cluster.drbd_usermode_helper = drbd_helper
1660 self._config_data.cluster.serial_no += 1
1663 @locking.ssynchronized(_config_lock, shared=1)
1664 def GetMACPrefix(self):
1665 """Return the mac prefix.
1668 return self._config_data.cluster.mac_prefix
1670 @locking.ssynchronized(_config_lock, shared=1)
1671 def GetClusterInfo(self):
1672 """Returns information about the cluster
1674 @rtype: L{objects.Cluster}
1675 @return: the cluster object
1678 return self._config_data.cluster
1680 @locking.ssynchronized(_config_lock, shared=1)
1681 def HasAnyDiskOfType(self, dev_type):
1682 """Check if in there is at disk of the given type in the configuration.
1685 return self._config_data.HasAnyDiskOfType(dev_type)
1687 @locking.ssynchronized(_config_lock)
1688 def Update(self, target, feedback_fn):
1689 """Notify function to be called after updates.
1691 This function must be called when an object (as returned by
1692 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
1693 caller wants the modifications saved to the backing store. Note
1694 that all modified objects will be saved, but the target argument
1695 is the one the caller wants to ensure that it's saved.
1697 @param target: an instance of either L{objects.Cluster},
1698 L{objects.Node} or L{objects.Instance} which is existing in
1700 @param feedback_fn: Callable feedback function
1703 if self._config_data is None:
1704 raise errors.ProgrammerError("Configuration file not read,"
1706 update_serial = False
1707 if isinstance(target, objects.Cluster):
1708 test = target == self._config_data.cluster
1709 elif isinstance(target, objects.Node):
1710 test = target in self._config_data.nodes.values()
1711 update_serial = True
1712 elif isinstance(target, objects.Instance):
1713 test = target in self._config_data.instances.values()
1715 raise errors.ProgrammerError("Invalid object type (%s) passed to"
1716 " ConfigWriter.Update" % type(target))
1718 raise errors.ConfigurationError("Configuration updated since object"
1719 " has been read or unknown object")
1720 target.serial_no += 1
1721 target.mtime = now = time.time()
1724 # for node updates, we need to increase the cluster serial too
1725 self._config_data.cluster.serial_no += 1
1726 self._config_data.cluster.mtime = now
1728 if isinstance(target, objects.Instance):
1729 self._UnlockedReleaseDRBDMinors(target.name)
1731 self._WriteConfig(feedback_fn=feedback_fn)
1733 @locking.ssynchronized(_config_lock)
1734 def DropECReservations(self, ec_id):
1735 """Drop per-execution-context reservations
1738 for rm in self._all_rms:
1739 rm.DropECReservations(ec_id)