4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Configuration management for Ganeti
24 This module provides the interface to the Ganeti cluster configuration.
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
29 Currently, the data storage format is JSON. YAML was slow and consuming too
39 from ganeti import errors
40 from ganeti import locking
41 from ganeti import utils
42 from ganeti import constants
43 from ganeti import rpc
44 from ganeti import objects
45 from ganeti import serializer
48 _config_lock = locking.SharedLock()
51 def _ValidateConfig(data):
52 """Verifies that a configuration objects looks valid.
54 This only verifies the version of the configuration.
56 @raise errors.ConfigurationError: if the version differs from what
60 if data.version != constants.CONFIG_VERSION:
61 raise errors.ConfigurationError("Cluster configuration version"
62 " mismatch, got %s instead of %s" %
64 constants.CONFIG_VERSION))
68 """The interface to the cluster configuration.
71 def __init__(self, cfg_file=None, offline=False):
73 self._lock = _config_lock
74 self._config_data = None
75 self._offline = offline
77 self._cfg_file = constants.CLUSTER_CONF_FILE
79 self._cfg_file = cfg_file
80 self._temporary_ids = set()
81 self._temporary_drbds = {}
82 self._temporary_macs = set()
83 # Note: in order to prevent errors when resolving our name in
84 # _DistributeConfig, we compute it here once and reuse it; it's
85 # better to raise an error before starting to modify the config
86 # file than after it was modified
87 self._my_hostname = utils.HostInfo().name
88 self._last_cluster_serial = -1
91 # this method needs to be static, so that we can call it on the class
94 """Check if the cluster is configured.
97 return os.path.exists(constants.CLUSTER_CONF_FILE)
99 @locking.ssynchronized(_config_lock, shared=1)
100 def GenerateMAC(self):
101 """Generate a MAC for an instance.
103 This should check the current instances for duplicates.
106 prefix = self._config_data.cluster.mac_prefix
107 all_macs = self._AllMACs()
110 byte1 = random.randrange(0, 256)
111 byte2 = random.randrange(0, 256)
112 byte3 = random.randrange(0, 256)
113 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
114 if mac not in all_macs and mac not in self._temporary_macs:
118 raise errors.ConfigurationError("Can't generate unique MAC")
119 self._temporary_macs.add(mac)
122 @locking.ssynchronized(_config_lock, shared=1)
123 def IsMacInUse(self, mac):
124 """Predicate: check if the specified MAC is in use in the Ganeti cluster.
126 This only checks instances managed by this cluster, it does not
127 check for potential collisions elsewhere.
130 all_macs = self._AllMACs()
131 return mac in all_macs or mac in self._temporary_macs
133 @locking.ssynchronized(_config_lock, shared=1)
134 def GenerateDRBDSecret(self):
135 """Generate a DRBD secret.
137 This checks the current disks for duplicates.
140 all_secrets = self._AllDRBDSecrets()
143 secret = utils.GenerateSecret()
144 if secret not in all_secrets:
148 raise errors.ConfigurationError("Can't generate unique DRBD secret")
152 """Compute the list of all LVs.
156 for instance in self._config_data.instances.values():
157 node_data = instance.MapLVsByNode()
158 for lv_list in node_data.values():
159 lvnames.update(lv_list)
162 def _AllIDs(self, include_temporary):
163 """Compute the list of all UUIDs and names we have.
165 @type include_temporary: boolean
166 @param include_temporary: whether to include the _temporary_ids set
168 @return: a set of IDs
172 if include_temporary:
173 existing.update(self._temporary_ids)
174 existing.update(self._AllLVs())
175 existing.update(self._config_data.instances.keys())
176 existing.update(self._config_data.nodes.keys())
177 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
180 def _GenerateUniqueID(self, exceptions=None):
181 """Generate an unique UUID.
183 This checks the current node, instances and disk names for
186 @param exceptions: a list with some other names which should be
187 checked for uniqueness (used for example when you want to get
188 more than one id at one time without adding each one in turn
192 @return: the unique id
195 existing = self._AllIDs(include_temporary=True)
196 if exceptions is not None:
197 existing.update(exceptions)
200 unique_id = utils.NewUUID()
201 if unique_id not in existing and unique_id is not None:
204 raise errors.ConfigurationError("Not able generate an unique ID"
205 " (last tried ID: %s" % unique_id)
206 self._temporary_ids.add(unique_id)
209 @locking.ssynchronized(_config_lock, shared=1)
210 def GenerateUniqueID(self, exceptions=None):
211 """Generate an unique ID.
213 This is just a wrapper over the unlocked version.
216 return self._GenerateUniqueID(exceptions=exceptions)
218 def _CleanupTemporaryIDs(self):
219 """Cleanups the _temporary_ids structure.
222 existing = self._AllIDs(include_temporary=False)
223 self._temporary_ids = self._temporary_ids - existing
226 """Return all MACs present in the config.
229 @return: the list of all MACs
233 for instance in self._config_data.instances.values():
234 for nic in instance.nics:
235 result.append(nic.mac)
239 def _AllDRBDSecrets(self):
240 """Return all DRBD secrets present in the config.
243 @return: the list of all DRBD secrets
246 def helper(disk, result):
247 """Recursively gather secrets from this disk."""
248 if disk.dev_type == constants.DT_DRBD8:
249 result.append(disk.logical_id[5])
251 for child in disk.children:
252 helper(child, result)
255 for instance in self._config_data.instances.values():
256 for disk in instance.disks:
261 def _CheckDiskIDs(self, disk, l_ids, p_ids):
262 """Compute duplicate disk IDs
264 @type disk: L{objects.Disk}
265 @param disk: the disk at which to start searching
267 @param l_ids: list of current logical ids
269 @param p_ids: list of current physical ids
271 @return: a list of error messages
275 if disk.logical_id is not None:
276 if disk.logical_id in l_ids:
277 result.append("duplicate logical id %s" % str(disk.logical_id))
279 l_ids.append(disk.logical_id)
280 if disk.physical_id is not None:
281 if disk.physical_id in p_ids:
282 result.append("duplicate physical id %s" % str(disk.physical_id))
284 p_ids.append(disk.physical_id)
287 for child in disk.children:
288 result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
291 def _UnlockedVerifyConfig(self):
295 @return: a list of error messages; a non-empty list signifies
302 data = self._config_data
306 # global cluster checks
307 if not data.cluster.enabled_hypervisors:
308 result.append("enabled hypervisors list doesn't have any entries")
309 invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
311 result.append("enabled hypervisors contains invalid entries: %s" %
314 if data.cluster.master_node not in data.nodes:
315 result.append("cluster has invalid primary node '%s'" %
316 data.cluster.master_node)
318 # per-instance checks
319 for instance_name in data.instances:
320 instance = data.instances[instance_name]
321 if instance.primary_node not in data.nodes:
322 result.append("instance '%s' has invalid primary node '%s'" %
323 (instance_name, instance.primary_node))
324 for snode in instance.secondary_nodes:
325 if snode not in data.nodes:
326 result.append("instance '%s' has invalid secondary node '%s'" %
327 (instance_name, snode))
328 for idx, nic in enumerate(instance.nics):
329 if nic.mac in seen_macs:
330 result.append("instance '%s' has NIC %d mac %s duplicate" %
331 (instance_name, idx, nic.mac))
333 seen_macs.append(nic.mac)
335 # gather the drbd ports for duplicate checks
336 for dsk in instance.disks:
337 if dsk.dev_type in constants.LDS_DRBD:
338 tcp_port = dsk.logical_id[2]
339 if tcp_port not in ports:
341 ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
342 # gather network port reservation
343 net_port = getattr(instance, "network_port", None)
344 if net_port is not None:
345 if net_port not in ports:
347 ports[net_port].append((instance.name, "network port"))
349 # instance disk verify
350 for idx, disk in enumerate(instance.disks):
351 result.extend(["instance '%s' disk %d error: %s" %
352 (instance.name, idx, msg) for msg in disk.Verify()])
353 result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
355 # cluster-wide pool of free ports
356 for free_port in data.cluster.tcpudp_port_pool:
357 if free_port not in ports:
358 ports[free_port] = []
359 ports[free_port].append(("cluster", "port marked as free"))
361 # compute tcp/udp duplicate ports
367 txt = ", ".join(["%s/%s" % val for val in pdata])
368 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
370 # highest used tcp port check
372 if keys[-1] > data.cluster.highest_used_port:
373 result.append("Highest used port mismatch, saved %s, computed %s" %
374 (data.cluster.highest_used_port, keys[-1]))
376 if not data.nodes[data.cluster.master_node].master_candidate:
377 result.append("Master node is not a master candidate")
379 # master candidate checks
380 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
382 result.append("Not enough master candidates: actual %d, target %d" %
386 for node in data.nodes.values():
387 if [node.master_candidate, node.drained, node.offline].count(True) > 1:
388 result.append("Node %s state is invalid: master_candidate=%s,"
389 " drain=%s, offline=%s" %
390 (node.name, node.master_candidate, node.drain,
394 d_map, duplicates = self._UnlockedComputeDRBDMap()
395 for node, minor, instance_a, instance_b in duplicates:
396 result.append("DRBD minor %d on node %s is assigned twice to instances"
397 " %s and %s" % (minor, node, instance_a, instance_b))
400 ips = { data.cluster.master_ip: ["cluster_ip"] }
401 def _helper(ip, name):
407 for node in data.nodes.values():
408 _helper(node.primary_ip, "node:%s/primary" % node.name)
409 if node.secondary_ip != node.primary_ip:
410 _helper(node.secondary_ip, "node:%s/secondary" % node.name)
412 for ip, owners in ips.items():
414 result.append("IP address %s is used by multiple owners: %s" %
415 (ip, ", ".join(owners)))
418 @locking.ssynchronized(_config_lock, shared=1)
419 def VerifyConfig(self):
422 This is just a wrapper over L{_UnlockedVerifyConfig}.
425 @return: a list of error messages; a non-empty list signifies
429 return self._UnlockedVerifyConfig()
431 def _UnlockedSetDiskID(self, disk, node_name):
432 """Convert the unique ID to the ID needed on the target nodes.
434 This is used only for drbd, which needs ip/port configuration.
436 The routine descends down and updates its children also, because
437 this helps when the only the top device is passed to the remote
440 This function is for internal use, when the config lock is already held.
444 for child in disk.children:
445 self._UnlockedSetDiskID(child, node_name)
447 if disk.logical_id is None and disk.physical_id is not None:
449 if disk.dev_type == constants.LD_DRBD8:
450 pnode, snode, port, pminor, sminor, secret = disk.logical_id
451 if node_name not in (pnode, snode):
452 raise errors.ConfigurationError("DRBD device not knowing node %s" %
454 pnode_info = self._UnlockedGetNodeInfo(pnode)
455 snode_info = self._UnlockedGetNodeInfo(snode)
456 if pnode_info is None or snode_info is None:
457 raise errors.ConfigurationError("Can't find primary or secondary node"
458 " for %s" % str(disk))
459 p_data = (pnode_info.secondary_ip, port)
460 s_data = (snode_info.secondary_ip, port)
461 if pnode == node_name:
462 disk.physical_id = p_data + s_data + (pminor, secret)
463 else: # it must be secondary, we tested above
464 disk.physical_id = s_data + p_data + (sminor, secret)
466 disk.physical_id = disk.logical_id
469 @locking.ssynchronized(_config_lock)
470 def SetDiskID(self, disk, node_name):
471 """Convert the unique ID to the ID needed on the target nodes.
473 This is used only for drbd, which needs ip/port configuration.
475 The routine descends down and updates its children also, because
476 this helps when the only the top device is passed to the remote
480 return self._UnlockedSetDiskID(disk, node_name)
482 @locking.ssynchronized(_config_lock)
483 def AddTcpUdpPort(self, port):
484 """Adds a new port to the available port pool.
487 if not isinstance(port, int):
488 raise errors.ProgrammerError("Invalid type passed for port")
490 self._config_data.cluster.tcpudp_port_pool.add(port)
493 @locking.ssynchronized(_config_lock, shared=1)
494 def GetPortList(self):
495 """Returns a copy of the current port list.
498 return self._config_data.cluster.tcpudp_port_pool.copy()
500 @locking.ssynchronized(_config_lock)
501 def AllocatePort(self):
504 The port will be taken from the available port pool or from the
505 default port range (and in this case we increase
509 # If there are TCP/IP ports configured, we use them first.
510 if self._config_data.cluster.tcpudp_port_pool:
511 port = self._config_data.cluster.tcpudp_port_pool.pop()
513 port = self._config_data.cluster.highest_used_port + 1
514 if port >= constants.LAST_DRBD_PORT:
515 raise errors.ConfigurationError("The highest used port is greater"
516 " than %s. Aborting." %
517 constants.LAST_DRBD_PORT)
518 self._config_data.cluster.highest_used_port = port
523 def _UnlockedComputeDRBDMap(self):
524 """Compute the used DRBD minor/nodes.
527 @return: dictionary of node_name: dict of minor: instance_name;
528 the returned dict will have all the nodes in it (even if with
529 an empty list), and a list of duplicates; if the duplicates
530 list is not empty, the configuration is corrupted and its caller
531 should raise an exception
534 def _AppendUsedPorts(instance_name, disk, used):
536 if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
537 node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
538 for node, port in ((node_a, minor_a), (node_b, minor_b)):
539 assert node in used, ("Node '%s' of instance '%s' not found"
540 " in node list" % (node, instance_name))
541 if port in used[node]:
542 duplicates.append((node, port, instance_name, used[node][port]))
544 used[node][port] = instance_name
546 for child in disk.children:
547 duplicates.extend(_AppendUsedPorts(instance_name, child, used))
551 my_dict = dict((node, {}) for node in self._config_data.nodes)
552 for instance in self._config_data.instances.itervalues():
553 for disk in instance.disks:
554 duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
555 for (node, minor), instance in self._temporary_drbds.iteritems():
556 if minor in my_dict[node] and my_dict[node][minor] != instance:
557 duplicates.append((node, minor, instance, my_dict[node][minor]))
559 my_dict[node][minor] = instance
560 return my_dict, duplicates
562 @locking.ssynchronized(_config_lock)
563 def ComputeDRBDMap(self):
564 """Compute the used DRBD minor/nodes.
566 This is just a wrapper over L{_UnlockedComputeDRBDMap}.
568 @return: dictionary of node_name: dict of minor: instance_name;
569 the returned dict will have all the nodes in it (even if with
573 d_map, duplicates = self._UnlockedComputeDRBDMap()
575 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
579 @locking.ssynchronized(_config_lock)
580 def AllocateDRBDMinor(self, nodes, instance):
581 """Allocate a drbd minor.
583 The free minor will be automatically computed from the existing
584 devices. A node can be given multiple times in order to allocate
585 multiple minors. The result is the list of minors, in the same
586 order as the passed nodes.
588 @type instance: string
589 @param instance: the instance for which we allocate minors
592 assert isinstance(instance, basestring), \
593 "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
595 d_map, duplicates = self._UnlockedComputeDRBDMap()
597 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
603 # no minors used, we can start at 0
606 self._temporary_drbds[(nname, 0)] = instance
610 ffree = utils.FirstFree(keys)
612 # return the next minor
613 # TODO: implement high-limit check
617 # double-check minor against current instances
618 assert minor not in d_map[nname], \
619 ("Attempt to reuse allocated DRBD minor %d on node %s,"
620 " already allocated to instance %s" %
621 (minor, nname, d_map[nname][minor]))
622 ndata[minor] = instance
623 # double-check minor against reservation
624 r_key = (nname, minor)
625 assert r_key not in self._temporary_drbds, \
626 ("Attempt to reuse reserved DRBD minor %d on node %s,"
627 " reserved for instance %s" %
628 (minor, nname, self._temporary_drbds[r_key]))
629 self._temporary_drbds[r_key] = instance
631 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
635 def _UnlockedReleaseDRBDMinors(self, instance):
636 """Release temporary drbd minors allocated for a given instance.
638 @type instance: string
639 @param instance: the instance for which temporary minors should be
643 assert isinstance(instance, basestring), \
644 "Invalid argument passed to ReleaseDRBDMinors"
645 for key, name in self._temporary_drbds.items():
647 del self._temporary_drbds[key]
649 @locking.ssynchronized(_config_lock)
650 def ReleaseDRBDMinors(self, instance):
651 """Release temporary drbd minors allocated for a given instance.
653 This should be called on the error paths, on the success paths
654 it's automatically called by the ConfigWriter add and update
657 This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
659 @type instance: string
660 @param instance: the instance for which temporary minors should be
664 self._UnlockedReleaseDRBDMinors(instance)
666 @locking.ssynchronized(_config_lock, shared=1)
667 def GetConfigVersion(self):
668 """Get the configuration version.
670 @return: Config version
673 return self._config_data.version
675 @locking.ssynchronized(_config_lock, shared=1)
676 def GetClusterName(self):
679 @return: Cluster name
682 return self._config_data.cluster.cluster_name
684 @locking.ssynchronized(_config_lock, shared=1)
685 def GetMasterNode(self):
686 """Get the hostname of the master node for this cluster.
688 @return: Master hostname
691 return self._config_data.cluster.master_node
693 @locking.ssynchronized(_config_lock, shared=1)
694 def GetMasterIP(self):
695 """Get the IP of the master node for this cluster.
700 return self._config_data.cluster.master_ip
702 @locking.ssynchronized(_config_lock, shared=1)
703 def GetMasterNetdev(self):
704 """Get the master network device for this cluster.
707 return self._config_data.cluster.master_netdev
709 @locking.ssynchronized(_config_lock, shared=1)
710 def GetFileStorageDir(self):
711 """Get the file storage dir for this cluster.
714 return self._config_data.cluster.file_storage_dir
716 @locking.ssynchronized(_config_lock, shared=1)
717 def GetHypervisorType(self):
718 """Get the hypervisor type for this cluster.
721 return self._config_data.cluster.enabled_hypervisors[0]
723 @locking.ssynchronized(_config_lock, shared=1)
724 def GetHostKey(self):
725 """Return the rsa hostkey from the config.
728 @return: the rsa hostkey
731 return self._config_data.cluster.rsahostkeypub
733 @locking.ssynchronized(_config_lock)
734 def AddInstance(self, instance):
735 """Add an instance to the config.
737 This should be used after creating a new instance.
739 @type instance: L{objects.Instance}
740 @param instance: the instance object
743 if not isinstance(instance, objects.Instance):
744 raise errors.ProgrammerError("Invalid type passed to AddInstance")
746 if instance.disk_template != constants.DT_DISKLESS:
747 all_lvs = instance.MapLVsByNode()
748 logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
750 all_macs = self._AllMACs()
751 for nic in instance.nics:
752 if nic.mac in all_macs:
753 raise errors.ConfigurationError("Cannot add instance %s:"
754 " MAC address '%s' already in use." %
755 (instance.name, nic.mac))
757 self._EnsureUUID(instance)
759 instance.serial_no = 1
760 instance.ctime = instance.mtime = time.time()
761 self._config_data.instances[instance.name] = instance
762 self._config_data.cluster.serial_no += 1
763 self._UnlockedReleaseDRBDMinors(instance.name)
764 for nic in instance.nics:
765 self._temporary_macs.discard(nic.mac)
768 def _EnsureUUID(self, item):
769 """Ensures a given object has a valid UUID.
771 @param item: the instance or node to be checked
775 item.uuid = self._GenerateUniqueID()
776 elif item.uuid in self._AllIDs(temporary=True):
777 raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
778 (item.name, item.uuid))
780 def _SetInstanceStatus(self, instance_name, status):
781 """Set the instance's status to a given value.
784 assert isinstance(status, bool), \
785 "Invalid status '%s' passed to SetInstanceStatus" % (status,)
787 if instance_name not in self._config_data.instances:
788 raise errors.ConfigurationError("Unknown instance '%s'" %
790 instance = self._config_data.instances[instance_name]
791 if instance.admin_up != status:
792 instance.admin_up = status
793 instance.serial_no += 1
794 instance.mtime = time.time()
797 @locking.ssynchronized(_config_lock)
798 def MarkInstanceUp(self, instance_name):
799 """Mark the instance status to up in the config.
802 self._SetInstanceStatus(instance_name, True)
804 @locking.ssynchronized(_config_lock)
805 def RemoveInstance(self, instance_name):
806 """Remove the instance from the configuration.
809 if instance_name not in self._config_data.instances:
810 raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
811 del self._config_data.instances[instance_name]
812 self._config_data.cluster.serial_no += 1
815 @locking.ssynchronized(_config_lock)
816 def RenameInstance(self, old_name, new_name):
817 """Rename an instance.
819 This needs to be done in ConfigWriter and not by RemoveInstance
820 combined with AddInstance as only we can guarantee an atomic
824 if old_name not in self._config_data.instances:
825 raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
826 inst = self._config_data.instances[old_name]
827 del self._config_data.instances[old_name]
830 for disk in inst.disks:
831 if disk.dev_type == constants.LD_FILE:
832 # rename the file paths in logical and physical id
833 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
834 disk.physical_id = disk.logical_id = (disk.logical_id[0],
835 os.path.join(file_storage_dir,
839 self._config_data.instances[inst.name] = inst
842 @locking.ssynchronized(_config_lock)
843 def MarkInstanceDown(self, instance_name):
844 """Mark the status of an instance to down in the configuration.
847 self._SetInstanceStatus(instance_name, False)
849 def _UnlockedGetInstanceList(self):
850 """Get the list of instances.
852 This function is for internal use, when the config lock is already held.
855 return self._config_data.instances.keys()
857 @locking.ssynchronized(_config_lock, shared=1)
858 def GetInstanceList(self):
859 """Get the list of instances.
861 @return: array of instances, ex. ['instance2.example.com',
862 'instance1.example.com']
865 return self._UnlockedGetInstanceList()
867 @locking.ssynchronized(_config_lock, shared=1)
868 def ExpandInstanceName(self, short_name):
869 """Attempt to expand an incomplete instance name.
872 return utils.MatchNameComponent(short_name,
873 self._config_data.instances.keys(),
874 case_sensitive=False)
876 def _UnlockedGetInstanceInfo(self, instance_name):
877 """Returns information about an instance.
879 This function is for internal use, when the config lock is already held.
882 if instance_name not in self._config_data.instances:
885 return self._config_data.instances[instance_name]
887 @locking.ssynchronized(_config_lock, shared=1)
888 def GetInstanceInfo(self, instance_name):
889 """Returns information about an instance.
891 It takes the information from the configuration file. Other information of
892 an instance are taken from the live systems.
894 @param instance_name: name of the instance, e.g.
895 I{instance1.example.com}
897 @rtype: L{objects.Instance}
898 @return: the instance object
901 return self._UnlockedGetInstanceInfo(instance_name)
903 @locking.ssynchronized(_config_lock, shared=1)
904 def GetAllInstancesInfo(self):
905 """Get the configuration of all instances.
908 @return: dict of (instance, instance_info), where instance_info is what
909 would GetInstanceInfo return for the node
912 my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
913 for instance in self._UnlockedGetInstanceList()])
916 @locking.ssynchronized(_config_lock)
917 def AddNode(self, node):
918 """Add a node to the configuration.
920 @type node: L{objects.Node}
921 @param node: a Node instance
924 logging.info("Adding node %s to configuration", node.name)
926 self._EnsureUUID(node)
929 node.ctime = node.mtime = time.time()
930 self._config_data.nodes[node.name] = node
931 self._config_data.cluster.serial_no += 1
934 @locking.ssynchronized(_config_lock)
935 def RemoveNode(self, node_name):
936 """Remove a node from the configuration.
939 logging.info("Removing node %s from configuration", node_name)
941 if node_name not in self._config_data.nodes:
942 raise errors.ConfigurationError("Unknown node '%s'" % node_name)
944 del self._config_data.nodes[node_name]
945 self._config_data.cluster.serial_no += 1
948 @locking.ssynchronized(_config_lock, shared=1)
949 def ExpandNodeName(self, short_name):
950 """Attempt to expand an incomplete instance name.
953 return utils.MatchNameComponent(short_name,
954 self._config_data.nodes.keys(),
955 case_sensitive=False)
957 def _UnlockedGetNodeInfo(self, node_name):
958 """Get the configuration of a node, as stored in the config.
960 This function is for internal use, when the config lock is already
963 @param node_name: the node name, e.g. I{node1.example.com}
965 @rtype: L{objects.Node}
966 @return: the node object
969 if node_name not in self._config_data.nodes:
972 return self._config_data.nodes[node_name]
975 @locking.ssynchronized(_config_lock, shared=1)
976 def GetNodeInfo(self, node_name):
977 """Get the configuration of a node, as stored in the config.
979 This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
981 @param node_name: the node name, e.g. I{node1.example.com}
983 @rtype: L{objects.Node}
984 @return: the node object
987 return self._UnlockedGetNodeInfo(node_name)
989 def _UnlockedGetNodeList(self):
990 """Return the list of nodes which are in the configuration.
992 This function is for internal use, when the config lock is already
998 return self._config_data.nodes.keys()
1001 @locking.ssynchronized(_config_lock, shared=1)
1002 def GetNodeList(self):
1003 """Return the list of nodes which are in the configuration.
1006 return self._UnlockedGetNodeList()
1008 @locking.ssynchronized(_config_lock, shared=1)
1009 def GetOnlineNodeList(self):
1010 """Return the list of nodes which are online.
1013 all_nodes = [self._UnlockedGetNodeInfo(node)
1014 for node in self._UnlockedGetNodeList()]
1015 return [node.name for node in all_nodes if not node.offline]
1017 @locking.ssynchronized(_config_lock, shared=1)
1018 def GetAllNodesInfo(self):
1019 """Get the configuration of all nodes.
1022 @return: dict of (node, node_info), where node_info is what
1023 would GetNodeInfo return for the node
1026 my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
1027 for node in self._UnlockedGetNodeList()])
1030 def _UnlockedGetMasterCandidateStats(self, exceptions=None):
1031 """Get the number of current and maximum desired and possible candidates.
1033 @type exceptions: list
1034 @param exceptions: if passed, list of nodes that should be ignored
1036 @return: tuple of (current, desired and possible, possible)
1039 mc_now = mc_should = mc_max = 0
1040 for node in self._config_data.nodes.values():
1041 if exceptions and node.name in exceptions:
1043 if not (node.offline or node.drained):
1045 if node.master_candidate:
1047 mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
1048 return (mc_now, mc_should, mc_max)
1050 @locking.ssynchronized(_config_lock, shared=1)
1051 def GetMasterCandidateStats(self, exceptions=None):
1052 """Get the number of current and maximum possible candidates.
1054 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
1056 @type exceptions: list
1057 @param exceptions: if passed, list of nodes that should be ignored
1059 @return: tuple of (current, max)
1062 return self._UnlockedGetMasterCandidateStats(exceptions)
1064 @locking.ssynchronized(_config_lock)
1065 def MaintainCandidatePool(self, exceptions):
1066 """Try to grow the candidate pool to the desired size.
1068 @type exceptions: list
1069 @param exceptions: if passed, list of nodes that should be ignored
1071 @return: list with the adjusted nodes (L{objects.Node} instances)
1074 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
1077 node_list = self._config_data.nodes.keys()
1078 random.shuffle(node_list)
1079 for name in node_list:
1080 if mc_now >= mc_max:
1082 node = self._config_data.nodes[name]
1083 if (node.master_candidate or node.offline or node.drained or
1084 node.name in exceptions):
1086 mod_list.append(node)
1087 node.master_candidate = True
1090 if mc_now != mc_max:
1091 # this should not happen
1092 logging.warning("Warning: MaintainCandidatePool didn't manage to"
1093 " fill the candidate pool (%d/%d)", mc_now, mc_max)
1095 self._config_data.cluster.serial_no += 1
1100 def _BumpSerialNo(self):
1101 """Bump up the serial number of the config.
1104 self._config_data.serial_no += 1
1105 self._config_data.mtime = time.time()
1107 def _AllUUIDObjects(self):
1108 """Returns all objects with uuid attributes.
1111 return (self._config_data.instances.values() +
1112 self._config_data.nodes.values() +
1113 [self._config_data.cluster])
1115 def _OpenConfig(self):
1116 """Read the config data from disk.
1119 raw_data = utils.ReadFile(self._cfg_file)
1122 data = objects.ConfigData.FromDict(serializer.Load(raw_data))
1123 except Exception, err:
1124 raise errors.ConfigurationError(err)
1126 # Make sure the configuration has the right version
1127 _ValidateConfig(data)
1129 if (not hasattr(data, 'cluster') or
1130 not hasattr(data.cluster, 'rsahostkeypub')):
1131 raise errors.ConfigurationError("Incomplete configuration"
1132 " (missing cluster.rsahostkeypub)")
1134 # Upgrade configuration if needed
1135 data.UpgradeConfig()
1137 self._config_data = data
1138 # reset the last serial as -1 so that the next write will cause
1140 self._last_cluster_serial = -1
1142 # And finally run our (custom) config upgrade sequence
1143 self._UpgradeConfig()
1145 def _UpgradeConfig(self):
1146 """Run upgrade steps that cannot be done purely in the objects.
1148 This is because some data elements need uniqueness across the
1149 whole configuration, etc.
1151 @warning: this function will call L{_WriteConfig()}, so it needs
1152 to either be called with the lock held or from a safe place
1157 for item in self._AllUUIDObjects():
1158 if item.uuid is None:
1159 item.uuid = self._GenerateUniqueID()
1164 def _DistributeConfig(self, feedback_fn):
1165 """Distribute the configuration to the other nodes.
1167 Currently, this only copies the configuration file. In the future,
1168 it could be used to encapsulate the 2/3-phase update mechanism.
1178 myhostname = self._my_hostname
1179 # we can skip checking whether _UnlockedGetNodeInfo returns None
1180 # since the node list comes from _UnlocketGetNodeList, and we are
1181 # called with the lock held, so no modifications should take place
1183 for node_name in self._UnlockedGetNodeList():
1184 if node_name == myhostname:
1186 node_info = self._UnlockedGetNodeInfo(node_name)
1187 if not node_info.master_candidate:
1189 node_list.append(node_info.name)
1190 addr_list.append(node_info.primary_ip)
1192 result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
1193 address_list=addr_list)
1194 for to_node, to_result in result.items():
1195 msg = to_result.fail_msg
1197 msg = ("Copy of file %s to node %s failed: %s" %
1198 (self._cfg_file, to_node, msg))
1208 def _WriteConfig(self, destination=None, feedback_fn=None):
1209 """Write the configuration data to persistent storage.
1212 assert feedback_fn is None or callable(feedback_fn)
1214 # First, cleanup the _temporary_ids set, if an ID is now in the
1215 # other objects it should be discarded to prevent unbounded growth
1217 self._CleanupTemporaryIDs()
1219 # Warn on config errors, but don't abort the save - the
1220 # configuration has already been modified, and we can't revert;
1221 # the best we can do is to warn the user and save as is, leaving
1222 # recovery to the user
1223 config_errors = self._UnlockedVerifyConfig()
1225 errmsg = ("Configuration data is not consistent: %s" %
1226 (", ".join(config_errors)))
1227 logging.critical(errmsg)
1231 if destination is None:
1232 destination = self._cfg_file
1233 self._BumpSerialNo()
1234 txt = serializer.Dump(self._config_data.ToDict())
1236 utils.WriteFile(destination, data=txt)
1238 self.write_count += 1
1240 # and redistribute the config file to master candidates
1241 self._DistributeConfig(feedback_fn)
1243 # Write ssconf files on all nodes (including locally)
1244 if self._last_cluster_serial < self._config_data.cluster.serial_no:
1245 if not self._offline:
1246 result = rpc.RpcRunner.call_write_ssconf_files(
1247 self._UnlockedGetNodeList(),
1248 self._UnlockedGetSsconfValues())
1250 for nname, nresu in result.items():
1251 msg = nresu.fail_msg
1253 errmsg = ("Error while uploading ssconf files to"
1254 " node %s: %s" % (nname, msg))
1255 logging.warning(errmsg)
1260 self._last_cluster_serial = self._config_data.cluster.serial_no
1262 def _UnlockedGetSsconfValues(self):
1263 """Return the values needed by ssconf.
1266 @return: a dictionary with keys the ssconf names and values their
1271 instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
1272 node_names = utils.NiceSort(self._UnlockedGetNodeList())
1273 node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
1274 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
1275 for ninfo in node_info]
1276 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
1277 for ninfo in node_info]
1279 instance_data = fn(instance_names)
1280 off_data = fn(node.name for node in node_info if node.offline)
1281 on_data = fn(node.name for node in node_info if not node.offline)
1282 mc_data = fn(node.name for node in node_info if node.master_candidate)
1283 mc_ips_data = fn(node.primary_ip for node in node_info
1284 if node.master_candidate)
1285 node_data = fn(node_names)
1286 node_pri_ips_data = fn(node_pri_ips)
1287 node_snd_ips_data = fn(node_snd_ips)
1289 cluster = self._config_data.cluster
1290 cluster_tags = fn(cluster.GetTags())
1292 constants.SS_CLUSTER_NAME: cluster.cluster_name,
1293 constants.SS_CLUSTER_TAGS: cluster_tags,
1294 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
1295 constants.SS_MASTER_CANDIDATES: mc_data,
1296 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
1297 constants.SS_MASTER_IP: cluster.master_ip,
1298 constants.SS_MASTER_NETDEV: cluster.master_netdev,
1299 constants.SS_MASTER_NODE: cluster.master_node,
1300 constants.SS_NODE_LIST: node_data,
1301 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
1302 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
1303 constants.SS_OFFLINE_NODES: off_data,
1304 constants.SS_ONLINE_NODES: on_data,
1305 constants.SS_INSTANCE_LIST: instance_data,
1306 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
1309 @locking.ssynchronized(_config_lock, shared=1)
1310 def GetVGName(self):
1311 """Return the volume group name.
1314 return self._config_data.cluster.volume_group_name
1316 @locking.ssynchronized(_config_lock)
1317 def SetVGName(self, vg_name):
1318 """Set the volume group name.
1321 self._config_data.cluster.volume_group_name = vg_name
1322 self._config_data.cluster.serial_no += 1
1325 @locking.ssynchronized(_config_lock, shared=1)
1326 def GetMACPrefix(self):
1327 """Return the mac prefix.
1330 return self._config_data.cluster.mac_prefix
1332 @locking.ssynchronized(_config_lock, shared=1)
1333 def GetClusterInfo(self):
1334 """Returns information about the cluster
1336 @rtype: L{objects.Cluster}
1337 @return: the cluster object
1340 return self._config_data.cluster
1342 @locking.ssynchronized(_config_lock)
1343 def Update(self, target, feedback_fn):
1344 """Notify function to be called after updates.
1346 This function must be called when an object (as returned by
1347 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
1348 caller wants the modifications saved to the backing store. Note
1349 that all modified objects will be saved, but the target argument
1350 is the one the caller wants to ensure that it's saved.
1352 @param target: an instance of either L{objects.Cluster},
1353 L{objects.Node} or L{objects.Instance} which is existing in
1355 @param feedback_fn: Callable feedback function
1358 if self._config_data is None:
1359 raise errors.ProgrammerError("Configuration file not read,"
1361 update_serial = False
1362 if isinstance(target, objects.Cluster):
1363 test = target == self._config_data.cluster
1364 elif isinstance(target, objects.Node):
1365 test = target in self._config_data.nodes.values()
1366 update_serial = True
1367 elif isinstance(target, objects.Instance):
1368 test = target in self._config_data.instances.values()
1370 raise errors.ProgrammerError("Invalid object type (%s) passed to"
1371 " ConfigWriter.Update" % type(target))
1373 raise errors.ConfigurationError("Configuration updated since object"
1374 " has been read or unknown object")
1375 target.serial_no += 1
1376 target.mtime = now = time.time()
1379 # for node updates, we need to increase the cluster serial too
1380 self._config_data.cluster.serial_no += 1
1381 self._config_data.cluster.mtime = now
1383 if isinstance(target, objects.Instance):
1384 self._UnlockedReleaseDRBDMinors(target.name)
1385 for nic in target.nics:
1386 self._temporary_macs.discard(nic.mac)
1388 self._WriteConfig(feedback_fn=feedback_fn)