import sys
import errno
+import logging
+from ganeti import compat
from ganeti import errors
from ganeti import constants
from ganeti import utils
SSCONF_LOCK_TIMEOUT = 10
+#: Valid ssconf keys
+_VALID_KEYS = compat.UniqueFrozenset([
+ constants.SS_CLUSTER_NAME,
+ constants.SS_CLUSTER_TAGS,
+ constants.SS_FILE_STORAGE_DIR,
+ constants.SS_SHARED_FILE_STORAGE_DIR,
+ constants.SS_MASTER_CANDIDATES,
+ constants.SS_MASTER_CANDIDATES_IPS,
+ constants.SS_MASTER_IP,
+ constants.SS_MASTER_NETDEV,
+ constants.SS_MASTER_NETMASK,
+ constants.SS_MASTER_NODE,
+ constants.SS_NODE_LIST,
+ constants.SS_NODE_PRIMARY_IPS,
+ constants.SS_NODE_SECONDARY_IPS,
+ constants.SS_OFFLINE_NODES,
+ constants.SS_ONLINE_NODES,
+ constants.SS_PRIMARY_IP_FAMILY,
+ constants.SS_INSTANCE_LIST,
+ constants.SS_RELEASE_VERSION,
+ constants.SS_HYPERVISOR_LIST,
+ constants.SS_MAINTAIN_NODE_HEALTH,
+ constants.SS_UID_POOL,
+ constants.SS_NODEGROUPS,
+ constants.SS_NETWORKS,
+ ])
+
+#: Maximum size for ssconf files
+_MAX_SIZE = 128 * 1024
+
+
+def ReadSsconfFile(filename):
+ """Reads an ssconf file and verifies its size.
+
+ @type filename: string
+ @param filename: Path to file
+ @rtype: string
+ @return: File contents without newlines at the end
+ @raise RuntimeError: When the file size exceeds L{_MAX_SIZE}
+
+ """
+ statcb = utils.FileStatHelper()
+
+ data = utils.ReadFile(filename, size=_MAX_SIZE, preread=statcb)
+
+ if statcb.st.st_size > _MAX_SIZE:
+ msg = ("File '%s' has a size of %s bytes (up to %s allowed)" %
+ (filename, statcb.st.st_size, _MAX_SIZE))
+ raise RuntimeError(msg)
+
+ return data.rstrip("\n")
+
class SimpleStore(object):
"""Interface to static cluster data.
- keys are restricted to predefined values
"""
- _VALID_KEYS = (
- constants.SS_CLUSTER_NAME,
- constants.SS_CLUSTER_TAGS,
- constants.SS_FILE_STORAGE_DIR,
- constants.SS_SHARED_FILE_STORAGE_DIR,
- constants.SS_MASTER_CANDIDATES,
- constants.SS_MASTER_CANDIDATES_IPS,
- constants.SS_MASTER_IP,
- constants.SS_MASTER_NETDEV,
- constants.SS_MASTER_NETMASK,
- constants.SS_MASTER_NODE,
- constants.SS_NODE_LIST,
- constants.SS_NODE_PRIMARY_IPS,
- constants.SS_NODE_SECONDARY_IPS,
- constants.SS_OFFLINE_NODES,
- constants.SS_ONLINE_NODES,
- constants.SS_PRIMARY_IP_FAMILY,
- constants.SS_INSTANCE_LIST,
- constants.SS_RELEASE_VERSION,
- constants.SS_HYPERVISOR_LIST,
- constants.SS_MAINTAIN_NODE_HEALTH,
- constants.SS_UID_POOL,
- constants.SS_NODEGROUPS,
- constants.SS_NETWORKS,
- )
- _MAX_SIZE = 131072
-
- def __init__(self, cfg_location=None):
+ def __init__(self, cfg_location=None, _lockfile=pathutils.SSCONF_LOCK_FILE):
if cfg_location is None:
self._cfg_dir = pathutils.DATA_DIR
else:
self._cfg_dir = cfg_location
+ self._lockfile = _lockfile
+
def KeyToFilename(self, key):
"""Convert a given key into filename.
"""
- if key not in self._VALID_KEYS:
+ if key not in _VALID_KEYS:
raise errors.ProgrammerError("Invalid key requested from SSConf: '%s'"
% str(key))
"""
filename = self.KeyToFilename(key)
try:
- data = utils.ReadFile(filename, size=self._MAX_SIZE)
+ return ReadSsconfFile(filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT and default is not None:
return default
raise errors.ConfigurationError("Can't read ssconf file %s: %s" %
(filename, str(err)))
- return data.rstrip("\n")
+ def ReadAll(self):
+ """Reads all keys and returns their values.
- def WriteFiles(self, values):
+ @rtype: dict
+ @return: Dictionary, ssconf key as key, value as value
+
+ """
+ result = []
+
+ for key in _VALID_KEYS:
+ try:
+ value = self._ReadFile(key)
+ except errors.ConfigurationError:
+ # Ignore non-existing files
+ pass
+ else:
+ result.append((key, value))
+
+ return dict(result)
+
+ def WriteFiles(self, values, dry_run=False):
"""Writes ssconf files used by external scripts.
@type values: dict
@param values: Dictionary of (name, value)
+ @type dry_run boolean
+ @param dry_run: Whether to perform a dry run
"""
- ssconf_lock = utils.FileLock.Open(pathutils.SSCONF_LOCK_FILE)
+ ssconf_lock = utils.FileLock.Open(self._lockfile)
# Get lock while writing files
ssconf_lock.Exclusive(blocking=True, timeout=SSCONF_LOCK_TIMEOUT)
for name, value in values.iteritems():
if value and not value.endswith("\n"):
value += "\n"
- if len(value) > self._MAX_SIZE:
- raise errors.ConfigurationError("ssconf file %s above maximum size" %
- name)
+
+ if len(value) > _MAX_SIZE:
+ msg = ("Value '%s' has a length of %s bytes, but only up to %s are"
+ " allowed" % (name, len(value), _MAX_SIZE))
+ raise errors.ConfigurationError(msg)
+
utils.WriteFile(self.KeyToFilename(name), data=value,
- mode=constants.SS_FILE_PERMS)
+ mode=constants.SS_FILE_PERMS,
+ dry_run=dry_run)
finally:
ssconf_lock.Unlock()
This is used for computing node replication data.
"""
- return [self.KeyToFilename(key) for key in self._VALID_KEYS]
+ return [self.KeyToFilename(key) for key in _VALID_KEYS]
def GetClusterName(self):
"""Get the cluster name.
" family: %s" % err)
-def WriteSsconfFiles(values):
+def WriteSsconfFiles(values, dry_run=False):
"""Update all ssconf files.
Wrapper around L{SimpleStore.WriteFiles}.
"""
- SimpleStore().WriteFiles(values)
+ SimpleStore().WriteFiles(values, dry_run=dry_run)
def GetMasterAndMyself(ss=None):
if debug:
sys.stderr.write("Not master, exiting.\n")
sys.exit(constants.EXIT_NOTMASTER)
+
+
+def VerifyClusterName(name, _cfg_location=None):
+ """Verifies cluster name against a local cluster name.
+
+ @type name: string
+ @param name: Cluster name
+
+ """
+ sstore = SimpleStore(cfg_location=_cfg_location)
+
+ try:
+ local_name = sstore.GetClusterName()
+ except errors.ConfigurationError, err:
+ logging.debug("Can't get local cluster name: %s", err)
+ else:
+ if name != local_name:
+ raise errors.GenericError("Current cluster name is '%s'" % local_name)
+
+
+def VerifyKeys(keys):
+ """Raises an exception if unknown ssconf keys are given.
+
+ @type keys: sequence
+ @param keys: Key names to verify
+ @raise errors.GenericError: When invalid keys were found
+
+ """
+ invalid = frozenset(keys) - _VALID_KEYS
+ if invalid:
+ raise errors.GenericError("Invalid ssconf keys: %s" %
+ utils.CommaJoin(sorted(invalid)))