X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/cd195419a63977001dcd4fb9dcc0b96bbed13da9..ca83454f36da2226fe84b32f2cce81610f938568:/lib/ssconf.py diff --git a/lib/ssconf.py b/lib/ssconf.py index bf273df..fc79422 100644 --- a/lib/ssconf.py +++ b/lib/ssconf.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -27,212 +27,70 @@ configuration data, which is mostly static and available to all nodes. """ import sys -import re -import os +import errno +import logging +from ganeti import compat from ganeti import errors from ganeti import constants from ganeti import utils -from ganeti import serializer -from ganeti import objects +from ganeti import netutils +from ganeti import pathutils SSCONF_LOCK_TIMEOUT = 10 -RE_VALID_SSCONF_NAME = re.compile(r'^[-_a-z0-9]+$') - - -class SimpleConfigReader(object): - """Simple class to read configuration file. +#: Valid ssconf keys +_VALID_KEYS = compat.UniqueFrozenset([ + constants.SS_CLUSTER_NAME, + constants.SS_CLUSTER_TAGS, + constants.SS_FILE_STORAGE_DIR, + constants.SS_SHARED_FILE_STORAGE_DIR, + constants.SS_MASTER_CANDIDATES, + constants.SS_MASTER_CANDIDATES_IPS, + constants.SS_MASTER_IP, + constants.SS_MASTER_NETDEV, + constants.SS_MASTER_NETMASK, + constants.SS_MASTER_NODE, + constants.SS_NODE_LIST, + constants.SS_NODE_PRIMARY_IPS, + constants.SS_NODE_SECONDARY_IPS, + constants.SS_OFFLINE_NODES, + constants.SS_ONLINE_NODES, + constants.SS_PRIMARY_IP_FAMILY, + constants.SS_INSTANCE_LIST, + constants.SS_RELEASE_VERSION, + constants.SS_HYPERVISOR_LIST, + constants.SS_MAINTAIN_NODE_HEALTH, + constants.SS_UID_POOL, + constants.SS_NODEGROUPS, + constants.SS_NETWORKS, + ]) + +#: Maximum size for ssconf files +_MAX_SIZE = 128 * 1024 + + +def ReadSsconfFile(filename): + """Reads an ssconf file and verifies its size. + + @type filename: string + @param filename: Path to file + @rtype: string + @return: File contents without newlines at the end + @raise RuntimeError: When the file size exceeds L{_MAX_SIZE} """ - def __init__(self, file_name=constants.CLUSTER_CONF_FILE): - """Initializes this class. - - @type file_name: string - @param file_name: Configuration file path - - """ - self._file_name = file_name - self._last_inode = None - self._last_mtime = None - self._last_size = None - - self._config_data = None - self._inst_ips_by_link = None - self._ip_to_inst_by_link = None - self._mc_primary_ips = None - self._nodes_primary_ips = None - - # we need a forced reload at class init time, to initialize _last_* - self._Load(force=True) - - def _Load(self, force=False): - """Loads (or reloads) the config file. - - @type force: boolean - @param force: whether to force the reload without checking the mtime - @rtype: boolean - @return: boolean value that says whether we reloaded the configuration or - not (because we decided it was already up-to-date) - - """ - try: - cfg_stat = os.stat(self._file_name) - except EnvironmentError, err: - raise errors.ConfigurationError("Cannot stat config file %s: %s" % - (self._file_name, err)) - inode = cfg_stat.st_ino - mtime = cfg_stat.st_mtime - size = cfg_stat.st_size - - if (force or inode != self._last_inode or - mtime > self._last_mtime or - size != self._last_size): - self._last_inode = inode - self._last_mtime = mtime - self._last_size = size - else: - # Don't reload - return False + statcb = utils.FileStatHelper() - try: - self._config_data = serializer.Load(utils.ReadFile(self._file_name)) - except EnvironmentError, err: - raise errors.ConfigurationError("Cannot read config file %s: %s" % - (self._file_name, err)) - except ValueError, err: - raise errors.ConfigurationError("Cannot load config file %s: %s" % - (self._file_name, err)) - - self._ip_to_inst_by_link = {} - self._instances_ips = [] - self._inst_ips_by_link = {} - c_nparams = self._config_data['cluster']['nicparams'][constants.PP_DEFAULT] - for iname in self._config_data['instances']: - instance = self._config_data['instances'][iname] - for nic in instance['nics']: - if 'ip' in nic and nic['ip']: - params = objects.FillDict(c_nparams, nic['nicparams']) - if not params['link'] in self._inst_ips_by_link: - self._inst_ips_by_link[params['link']] = [] - self._ip_to_inst_by_link[params['link']] = {} - self._ip_to_inst_by_link[params['link']][nic['ip']] = iname - self._inst_ips_by_link[params['link']].append(nic['ip']) - - self._nodes_primary_ips = [] - self._mc_primary_ips = [] - for node_name in self._config_data["nodes"]: - node = self._config_data["nodes"][node_name] - self._nodes_primary_ips.append(node["primary_ip"]) - if node["master_candidate"]: - self._mc_primary_ips.append(node["primary_ip"]) - - return True - - # Clients can request a reload of the config file, so we export our internal - # _Load function as Reload. - Reload = _Load - - def GetClusterName(self): - return self._config_data["cluster"]["cluster_name"] - - def GetHostKey(self): - return self._config_data["cluster"]["rsahostkeypub"] - - def GetMasterNode(self): - return self._config_data["cluster"]["master_node"] + data = utils.ReadFile(filename, size=_MAX_SIZE, preread=statcb) - def GetMasterIP(self): - return self._config_data["cluster"]["master_ip"] - - def GetMasterNetdev(self): - return self._config_data["cluster"]["master_netdev"] - - def GetFileStorageDir(self): - return self._config_data["cluster"]["file_storage_dir"] - - def GetNodeList(self): - return self._config_data["nodes"].keys() + if statcb.st.st_size > _MAX_SIZE: + msg = ("File '%s' has a size of %s bytes (up to %s allowed)" % + (filename, statcb.st.st_size, _MAX_SIZE)) + raise RuntimeError(msg) - def GetConfigSerialNo(self): - return self._config_data["serial_no"] - - def GetClusterSerialNo(self): - return self._config_data["cluster"]["serial_no"] - - def GetDefaultNicParams(self): - return self._config_data["cluster"]["nicparams"][constants.PP_DEFAULT] - - def GetDefaultNicLink(self): - return self.GetDefaultNicParams()[constants.NIC_LINK] - - def GetNodeStatusFlags(self, node): - """Get a node's status flags - - @type node: string - @param node: node name - @rtype: (bool, bool, bool) - @return: (master_candidate, drained, offline) (or None if no such node) - - """ - if node not in self._config_data["nodes"]: - return None - - master_candidate = self._config_data["nodes"][node]["master_candidate"] - drained = self._config_data["nodes"][node]["drained"] - offline = self._config_data["nodes"][node]["offline"] - return master_candidate, drained, offline - - def GetInstanceByLinkIp(self, ip, link): - if not link: - link = self.GetDefaultNicLink() - if not link in self._ip_to_inst_by_link: - return None - if not ip in self._ip_to_inst_by_link[link]: - return None - return self._ip_to_inst_by_link[link][ip] - - def GetNodePrimaryIp(self, node): - """Get a node's primary ip - - @type node: string - @param node: node name - @rtype: string, or None - @return: node's primary ip, or None if no such node - - """ - if node not in self._config_data["nodes"]: - return None - return self._config_data["nodes"][node]["primary_ip"] - - def GetInstancePrimaryNode(self, instance): - """Get an instance's primary node - - @type instance: string - @param instance: instance name - @rtype: string, or None - @return: primary node, or None if no such instance - - """ - if instance not in self._config_data["instances"]: - return None - return self._config_data["instances"][instance]["primary_node"] - - def GetNodesPrimaryIps(self): - return self._nodes_primary_ips - - def GetMasterCandidatesPrimaryIps(self): - return self._mc_primary_ips - - def GetInstancesIps(self, link): - if not link: - link = self.GetDefaultNicLink() - - if link in self._inst_ips_by_link: - return self._inst_ips_by_link[link] - else: - return [] + return data.rstrip("\n") class SimpleStore(object): @@ -246,44 +104,26 @@ class SimpleStore(object): - keys are restricted to predefined values """ - _SS_FILEPREFIX = "ssconf_" - _VALID_KEYS = ( - constants.SS_CLUSTER_NAME, - constants.SS_CLUSTER_TAGS, - constants.SS_FILE_STORAGE_DIR, - constants.SS_MASTER_CANDIDATES, - constants.SS_MASTER_CANDIDATES_IPS, - constants.SS_MASTER_IP, - constants.SS_MASTER_NETDEV, - constants.SS_MASTER_NODE, - constants.SS_NODE_LIST, - constants.SS_NODE_PRIMARY_IPS, - constants.SS_NODE_SECONDARY_IPS, - constants.SS_OFFLINE_NODES, - constants.SS_ONLINE_NODES, - constants.SS_INSTANCE_LIST, - constants.SS_RELEASE_VERSION, - ) - _MAX_SIZE = 131072 - - def __init__(self, cfg_location=None): + def __init__(self, cfg_location=None, _lockfile=pathutils.SSCONF_LOCK_FILE): if cfg_location is None: - self._cfg_dir = constants.DATA_DIR + self._cfg_dir = pathutils.DATA_DIR else: self._cfg_dir = cfg_location + self._lockfile = _lockfile + def KeyToFilename(self, key): """Convert a given key into filename. """ - if key not in self._VALID_KEYS: + if key not in _VALID_KEYS: raise errors.ProgrammerError("Invalid key requested from SSConf: '%s'" % str(key)) - filename = self._cfg_dir + '/' + self._SS_FILEPREFIX + key + filename = self._cfg_dir + "/" + constants.SSCONF_FILEPREFIX + key return filename - def _ReadFile(self, key): + def _ReadFile(self, key, default=None): """Generic routine to read keys. This will read the file which holds the value requested. Errors @@ -292,21 +132,43 @@ class SimpleStore(object): """ filename = self.KeyToFilename(key) try: - data = utils.ReadFile(filename, size=self._MAX_SIZE) + return ReadSsconfFile(filename) except EnvironmentError, err: - raise errors.ConfigurationError("Can't read from the ssconf file:" - " '%s'" % str(err)) - data = data.rstrip('\n') - return data + if err.errno == errno.ENOENT and default is not None: + return default + raise errors.ConfigurationError("Can't read ssconf file %s: %s" % + (filename, str(err))) + + def ReadAll(self): + """Reads all keys and returns their values. - def WriteFiles(self, values): + @rtype: dict + @return: Dictionary, ssconf key as key, value as value + + """ + result = [] + + for key in _VALID_KEYS: + try: + value = self._ReadFile(key) + except errors.ConfigurationError: + # Ignore non-existing files + pass + else: + result.append((key, value)) + + return dict(result) + + def WriteFiles(self, values, dry_run=False): """Writes ssconf files used by external scripts. @type values: dict @param values: Dictionary of (name, value) + @type dry_run boolean + @param dry_run: Whether to perform a dry run """ - ssconf_lock = utils.FileLock(constants.SSCONF_LOCK_FILE) + ssconf_lock = utils.FileLock.Open(self._lockfile) # Get lock while writing files ssconf_lock.Exclusive(blocking=True, timeout=SSCONF_LOCK_TIMEOUT) @@ -314,7 +176,15 @@ class SimpleStore(object): for name, value in values.iteritems(): if value and not value.endswith("\n"): value += "\n" - utils.WriteFile(self.KeyToFilename(name), data=value, mode=0444) + + if len(value) > _MAX_SIZE: + msg = ("Value '%s' has a length of %s bytes, but only up to %s are" + " allowed" % (name, len(value), _MAX_SIZE)) + raise errors.ConfigurationError(msg) + + utils.WriteFile(self.KeyToFilename(name), data=value, + mode=constants.SS_FILE_PERMS, + dry_run=dry_run) finally: ssconf_lock.Unlock() @@ -324,7 +194,7 @@ class SimpleStore(object): This is used for computing node replication data. """ - return [self.KeyToFilename(key) for key in self._VALID_KEYS] + return [self.KeyToFilename(key) for key in _VALID_KEYS] def GetClusterName(self): """Get the cluster name. @@ -338,6 +208,12 @@ class SimpleStore(object): """ return self._ReadFile(constants.SS_FILE_STORAGE_DIR) + def GetSharedFileStorageDir(self): + """Get the shared file storage dir. + + """ + return self._ReadFile(constants.SS_SHARED_FILE_STORAGE_DIR) + def GetMasterCandidates(self): """Return the list of master candidates. @@ -366,6 +242,17 @@ class SimpleStore(object): """ return self._ReadFile(constants.SS_MASTER_NETDEV) + def GetMasterNetmask(self): + """Get the master netmask. + + """ + try: + return self._ReadFile(constants.SS_MASTER_NETMASK) + except errors.ConfigurationError: + family = self.GetPrimaryIPFamily() + ipcls = netutils.IPAddress.GetClassFromIpFamily(family) + return ipcls.iplen + def GetMasterNode(self): """Get the hostname of the master node for this cluster. @@ -396,6 +283,22 @@ class SimpleStore(object): nl = data.splitlines(False) return nl + def GetNodegroupList(self): + """Return the list of nodegroups. + + """ + data = self._ReadFile(constants.SS_NODEGROUPS) + nl = data.splitlines(False) + return nl + + def GetNetworkList(self): + """Return the list of networks. + + """ + data = self._ReadFile(constants.SS_NETWORKS) + nl = data.splitlines(False) + return nl + def GetClusterTags(self): """Return the cluster tags. @@ -404,6 +307,56 @@ class SimpleStore(object): nl = data.splitlines(False) return nl + def GetHypervisorList(self): + """Return the list of enabled hypervisors. + + """ + data = self._ReadFile(constants.SS_HYPERVISOR_LIST) + nl = data.splitlines(False) + return nl + + def GetMaintainNodeHealth(self): + """Return the value of the maintain_node_health option. + + """ + data = self._ReadFile(constants.SS_MAINTAIN_NODE_HEALTH) + # we rely on the bool serialization here + return data == "True" + + def GetUidPool(self): + """Return the user-id pool definition string. + + The separator character is a newline. + + The return value can be parsed using uidpool.ParseUidPool():: + + ss = ssconf.SimpleStore() + uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\\n") + + """ + data = self._ReadFile(constants.SS_UID_POOL) + return data + + def GetPrimaryIPFamily(self): + """Return the cluster-wide primary address family. + + """ + try: + return int(self._ReadFile(constants.SS_PRIMARY_IP_FAMILY, + default=netutils.IP4Address.family)) + except (ValueError, TypeError), err: + raise errors.ConfigurationError("Error while trying to parse primary IP" + " family: %s" % err) + + +def WriteSsconfFiles(values, dry_run=False): + """Update all ssconf files. + + Wrapper around L{SimpleStore.WriteFiles}. + + """ + SimpleStore().WriteFiles(values, dry_run=dry_run) + def GetMasterAndMyself(ss=None): """Get the master node and my own hostname. @@ -422,7 +375,7 @@ def GetMasterAndMyself(ss=None): """ if ss is None: ss = SimpleStore() - return ss.GetMasterNode(), utils.HostInfo().name + return ss.GetMasterNode(), netutils.Hostname.GetSysName() def CheckMaster(debug, ss=None): @@ -447,27 +400,33 @@ def CheckMaster(debug, ss=None): sys.exit(constants.EXIT_NOTMASTER) -def CheckMasterCandidate(debug, ss=None): - """Checks the node setup. +def VerifyClusterName(name, _cfg_location=None): + """Verifies cluster name against a local cluster name. - If this is a master candidate, the function will return. Otherwise it will - exit with an exit code based on the node status. + @type name: string + @param name: Cluster name """ + sstore = SimpleStore(cfg_location=_cfg_location) + try: - if ss is None: - ss = SimpleStore() - myself = utils.HostInfo().name - candidates = ss.GetMasterCandidates() + local_name = sstore.GetClusterName() except errors.ConfigurationError, err: - print "Cluster configuration incomplete: '%s'" % str(err) - sys.exit(constants.EXIT_NODESETUP_ERROR) - except errors.ResolverError, err: - sys.stderr.write("Cannot resolve my own name (%s)\n" % err.args[0]) - sys.exit(constants.EXIT_NODESETUP_ERROR) + logging.debug("Can't get local cluster name: %s", err) + else: + if name != local_name: + raise errors.GenericError("Current cluster name is '%s'" % local_name) - if myself not in candidates: - if debug: - sys.stderr.write("Not master candidate, exiting.\n") - sys.exit(constants.EXIT_NOTCANDIDATE) +def VerifyKeys(keys): + """Raises an exception if unknown ssconf keys are given. + + @type keys: sequence + @param keys: Key names to verify + @raise errors.GenericError: When invalid keys were found + + """ + invalid = frozenset(keys) - _VALID_KEYS + if invalid: + raise errors.GenericError("Invalid ssconf keys: %s" % + utils.CommaJoin(sorted(invalid)))