#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import uidpool
from ganeti import netutils
from ganeti import runtime
+from ganeti import pathutils
_config_lock = locking.SharedLock("ConfigWriter")
return utils.MatchNameComponent(short_name, names, case_sensitive=False)
+def _CheckInstanceDiskIvNames(disks):
+ """Checks if instance's disks' C{iv_name} attributes are in order.
+
+ @type disks: list of L{objects.Disk}
+ @param disks: List of disks
+ @rtype: list of tuples; (int, string, string)
+ @return: List of wrongly named disks, each tuple contains disk index,
+ expected and actual name
+
+ """
+ result = []
+
+ for (idx, disk) in enumerate(disks):
+ exp_iv_name = "disk/%s" % idx
+ if disk.iv_name != exp_iv_name:
+ result.append((idx, exp_iv_name, disk.iv_name))
+
+ return result
+
+
class ConfigWriter:
"""The interface to the cluster configuration.
self._config_data = None
self._offline = offline
if cfg_file is None:
- self._cfg_file = constants.CLUSTER_CONF_FILE
+ self._cfg_file = pathutils.CLUSTER_CONF_FILE
else:
self._cfg_file = cfg_file
self._getents = _getents
self._my_hostname = netutils.Hostname.GetSysName()
self._last_cluster_serial = -1
self._cfg_id = None
+ self._context = None
self._OpenConfig(accept_foreign)
+ def _GetRpc(self, address_list):
+ """Returns RPC runner for configuration.
+
+ """
+ return rpc.ConfigRunner(self._context, address_list)
+
+ def SetContext(self, context):
+ """Sets Ganeti context.
+
+ """
+ self._context = context
+
# this method needs to be static, so that we can call it on the class
@staticmethod
def IsCluster():
"""Check if the cluster is configured.
"""
- return os.path.exists(constants.CLUSTER_CONF_FILE)
+ return os.path.exists(pathutils.CLUSTER_CONF_FILE)
def _GenerateOneMAC(self):
"""Generate one mac address
return self._config_data.cluster.FillND(node, nodegroup)
@locking.ssynchronized(_config_lock, shared=1)
+ def GetInstanceDiskParams(self, instance):
+ """Get the disk params populated with inherit chain.
+
+ @type instance: L{objects.Instance}
+ @param instance: The instance we want to know the params for
+ @return: A dict with the filled in disk params
+
+ """
+ node = self._UnlockedGetNodeInfo(instance.primary_node)
+ nodegroup = self._UnlockedGetNodeGroup(node.group)
+ return self._UnlockedGetGroupDiskParams(nodegroup)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetGroupDiskParams(self, group):
+ """Get the disk params populated with inherit chain.
+
+ @type group: L{objects.NodeGroup}
+ @param group: The group we want to know the params for
+ @return: A dict with the filled in disk params
+
+ """
+ return self._UnlockedGetGroupDiskParams(group)
+
+ def _UnlockedGetGroupDiskParams(self, group):
+ """Get the disk params populated with inherit chain down to node-group.
+
+ @type group: L{objects.NodeGroup}
+ @param group: The group we want to know the params for
+ @return: A dict with the filled in disk params
+
+ """
+ return self._config_data.cluster.SimpleFillDP(group.diskparams)
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GenerateMAC(self, ec_id):
"""Generate a MAC for an instance.
except errors.ConfigurationError, err:
result.append("%s has invalid nicparams: %s" % (owner, err))
+ def _helper_ipolicy(owner, params, check_std):
+ try:
+ objects.InstancePolicy.CheckParameterSyntax(params, check_std)
+ except errors.ConfigurationError, err:
+ result.append("%s has invalid instance policy: %s" % (owner, err))
+
+ def _helper_ispecs(owner, params):
+ for key, value in params.items():
+ if key in constants.IPOLICY_ISPECS:
+ fullkey = "ipolicy/" + key
+ _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
+ else:
+ # FIXME: assuming list type
+ if key in constants.IPOLICY_PARAMETERS:
+ exp_type = float
+ else:
+ exp_type = list
+ if not isinstance(value, exp_type):
+ result.append("%s has invalid instance policy: for %s,"
+ " expecting %s, got %s" %
+ (owner, key, exp_type.__name__, type(value)))
+
# check cluster parameters
_helper("cluster", "beparams", cluster.SimpleFillBE({}),
constants.BES_PARAMETER_TYPES)
_helper_nic("cluster", cluster.SimpleFillNIC({}))
_helper("cluster", "ndparams", cluster.SimpleFillND({}),
constants.NDS_PARAMETER_TYPES)
+ _helper_ipolicy("cluster", cluster.SimpleFillIPolicy({}), True)
+ _helper_ispecs("cluster", cluster.SimpleFillIPolicy({}))
# per-instance checks
for instance_name in data.instances:
cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
# gather the drbd ports for duplicate checks
- for dsk in instance.disks:
+ for (idx, dsk) in enumerate(instance.disks):
if dsk.dev_type in constants.LDS_DRBD:
tcp_port = dsk.logical_id[2]
if tcp_port not in ports:
ports[tcp_port] = []
- ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
+ ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
# gather network port reservation
net_port = getattr(instance, "network_port", None)
if net_port is not None:
(instance.name, idx, msg) for msg in disk.Verify()])
result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
+ wrong_names = _CheckInstanceDiskIvNames(instance.disks)
+ if wrong_names:
+ tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
+ (idx, exp_name, actual_name))
+ for (idx, exp_name, actual_name) in wrong_names)
+
+ result.append("Instance '%s' has wrongly named disks: %s" %
+ (instance.name, tmp))
+
# cluster-wide pool of free ports
for free_port in cluster.tcpudp_port_pool:
if free_port not in ports:
result.append("duplicate node group name '%s'" % nodegroup.name)
else:
nodegroups_names.add(nodegroup.name)
+ group_name = "group %s" % nodegroup.name
+ _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
+ False)
+ _helper_ispecs(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy))
if nodegroup.ndparams:
- _helper("group %s" % nodegroup.name, "ndparams",
+ _helper(group_name, "ndparams",
cluster.SimpleFillND(nodegroup.ndparams),
constants.NDS_PARAMETER_TYPES)
def AddTcpUdpPort(self, port):
"""Adds a new port to the available port pool.
+ @warning: this method does not "flush" the configuration (via
+ L{_WriteConfig}); callers should do that themselves once the
+ configuration is stable
+
"""
if not isinstance(port, int):
raise errors.ProgrammerError("Invalid type passed for port")
self._config_data.cluster.tcpudp_port_pool.add(port)
- self._WriteConfig()
@locking.ssynchronized(_config_lock, shared=1)
def GetPortList(self):
return self._config_data.cluster.master_netdev
@locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNetmask(self):
+ """Get the netmask of the master node for this cluster.
+
+ """
+ return self._config_data.cluster.master_netmask
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetUseExternalMipScript(self):
+ """Get flag representing whether to use the external master IP setup script.
+
+ """
+ return self._config_data.cluster.use_external_mip_script
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetFileStorageDir(self):
"""Get the file storage dir for this cluster.
"""
return self._config_data.cluster.primary_ip_family
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNetworkParameters(self):
+ """Get network parameters of the master node.
+
+ @rtype: L{object.MasterNetworkParameters}
+ @return: network parameters of the master node
+
+ """
+ cluster = self._config_data.cluster
+ result = objects.MasterNetworkParameters(
+ name=cluster.master_node, ip=cluster.master_ip,
+ netmask=cluster.master_netmask, netdev=cluster.master_netdev,
+ ip_family=cluster.primary_ip_family)
+
+ return result
+
@locking.ssynchronized(_config_lock)
def AddNodeGroup(self, group, ec_id, check_uuid=True):
"""Add a node group to the configuration.
if target is None:
if len(self._config_data.nodegroups) != 1:
raise errors.OpPrereqError("More than one node group exists. Target"
- " group must be specified explicitely.")
+ " group must be specified explicitly.")
else:
return self._config_data.nodegroups.keys()[0]
if target in self._config_data.nodegroups:
for member_name in
self._UnlockedGetNodeGroup(ngfn(node_name)).members)
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMultiNodeGroupInfo(self, group_uuids):
+ """Get the configuration of multiple node groups.
+
+ @param group_uuids: List of node group UUIDs
+ @rtype: list
+ @return: List of tuples of (group_uuid, group_info)
+
+ """
+ return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
+
@locking.ssynchronized(_config_lock)
def AddInstance(self, instance, ec_id):
"""Add an instance to the config.
"""Set the instance's status to a given value.
"""
- assert isinstance(status, bool), \
+ assert status in constants.ADMINST_ALL, \
"Invalid status '%s' passed to SetInstanceStatus" % (status,)
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" %
instance_name)
instance = self._config_data.instances[instance_name]
- if instance.admin_up != status:
- instance.admin_up = status
+ if instance.admin_state != status:
+ instance.admin_state = status
instance.serial_no += 1
instance.mtime = time.time()
self._WriteConfig()
"""Mark the instance status to up in the config.
"""
- self._SetInstanceStatus(instance_name, True)
+ self._SetInstanceStatus(instance_name, constants.ADMINST_UP)
+
+ @locking.ssynchronized(_config_lock)
+ def MarkInstanceOffline(self, instance_name):
+ """Mark the instance status to down in the config.
+
+ """
+ self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE)
@locking.ssynchronized(_config_lock)
def RemoveInstance(self, instance_name):
"""
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
+
+ # If a network port has been allocated to the instance,
+ # return it to the pool of free ports.
+ inst = self._config_data.instances[instance_name]
+ network_port = getattr(inst, "network_port", None)
+ if network_port is not None:
+ self._config_data.cluster.tcpudp_port_pool.add(network_port)
+
del self._config_data.instances[instance_name]
self._config_data.cluster.serial_no += 1
self._WriteConfig()
"""
if old_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" % old_name)
- inst = self._config_data.instances[old_name]
- del self._config_data.instances[old_name]
+
+ # Operate on a copy to not loose instance object in case of a failure
+ inst = self._config_data.instances[old_name].Copy()
inst.name = new_name
- for disk in inst.disks:
+ for (idx, disk) in enumerate(inst.disks):
if disk.dev_type == constants.LD_FILE:
# rename the file paths in logical and physical id
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
- disk_fname = "disk%s" % disk.iv_name.split("/")[1]
- disk.physical_id = disk.logical_id = (disk.logical_id[0],
- utils.PathJoin(file_storage_dir,
- inst.name,
- disk_fname))
+ disk.logical_id = (disk.logical_id[0],
+ utils.PathJoin(file_storage_dir, inst.name,
+ "disk%s" % idx))
+ disk.physical_id = disk.logical_id
+
+ # Actually replace instance object
+ del self._config_data.instances[old_name]
+ self._config_data.instances[inst.name] = inst
# Force update of ssconf files
self._config_data.cluster.serial_no += 1
- self._config_data.instances[inst.name] = inst
self._WriteConfig()
@locking.ssynchronized(_config_lock)
"""Mark the status of an instance to down in the configuration.
"""
- self._SetInstanceStatus(instance_name, False)
+ self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN)
def _UnlockedGetInstanceList(self):
"""Get the list of instances.
for instance in self._UnlockedGetInstanceList()])
return my_dict
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetInstancesInfoByFilter(self, filter_fn):
+ """Get instance configuration with a filter.
+
+ @type filter_fn: callable
+ @param filter_fn: Filter function receiving instance object as parameter,
+ returning boolean. Important: this function is called while the
+ configuration locks is held. It must not do any complex work or call
+ functions potentially leading to a deadlock. Ideally it doesn't call any
+ other functions and just compares instance attributes.
+
+ """
+ return dict((name, inst)
+ for (name, inst) in self._config_data.instances.items()
+ if filter_fn(inst))
+
@locking.ssynchronized(_config_lock)
def AddNode(self, node, ec_id):
"""Add a node to the configuration.
would GetNodeInfo return for the node
"""
- my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
- for node in self._UnlockedGetNodeList()])
- return my_dict
+ return self._UnlockedGetAllNodesInfo()
+
+ def _UnlockedGetAllNodesInfo(self):
+ """Gets configuration of all nodes.
+
+ @note: See L{GetAllNodesInfo}
+
+ """
+ return dict([(node, self._UnlockedGetNodeInfo(node))
+ for node in self._UnlockedGetNodeList()])
@locking.ssynchronized(_config_lock, shared=1)
def GetNodeGroupsFromNodes(self, nodes):
"""Changes the group of a number of nodes.
@type mods: list of tuples; (node name, new group UUID)
- @param modes: Node membership modifications
+ @param mods: Node membership modifications
"""
groups = self._config_data.nodegroups
# Update timestamps and serials (only once per node/group object)
now = time.time()
- for obj in frozenset(itertools.chain(*resmod)): # pylint: disable-msg=W0142
+ for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142
obj.serial_no += 1
obj.mtime = now
# Make sure the configuration has the right version
_ValidateConfig(data)
- if (not hasattr(data, 'cluster') or
- not hasattr(data.cluster, 'rsahostkeypub')):
+ if (not hasattr(data, "cluster") or
+ not hasattr(data.cluster, "rsahostkeypub")):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
node_list.append(node_info.name)
addr_list.append(node_info.primary_ip)
- result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
- address_list=addr_list)
+ # TODO: Use dedicated resolver talking to config writer for name resolution
+ result = \
+ self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
# Write ssconf files on all nodes (including locally)
if self._last_cluster_serial < self._config_data.cluster.serial_no:
if not self._offline:
- result = rpc.RpcRunner.call_write_ssconf_files(
+ result = self._GetRpc(None).call_write_ssconf_files(
self._UnlockedGetOnlineNodeList(),
self._UnlockedGetSsconfValues())
constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
constants.SS_MASTER_IP: cluster.master_ip,
constants.SS_MASTER_NETDEV: cluster.master_netdev,
+ constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
constants.SS_MASTER_NODE: cluster.master_node,
constants.SS_NODE_LIST: node_data,
constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,