"""
-import simplejson
import ConfigParser
import re
+import copy
from cStringIO import StringIO
from ganeti import errors
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
- "OS", "Node", "Cluster"]
+ "OS", "Node", "Cluster", "FillDict"]
+def FillDict(defaults_dict, custom_dict):
+ """Basic function to apply settings on top a default dict.
-# Check whether the simplejson module supports indentation
-_JSON_INDENT = 2
-try:
- simplejson.dumps(1, indent=_JSON_INDENT)
-except TypeError:
- _JSON_INDENT = None
+ @type defaults_dict: dict
+ @param defaults_dict: dictionary holding the default values
+ @type custom_dict: dict
+ @param custom_dict: dictionary holding customized value
+ @rtype: dict
+ @return: dict with the 'full' values
+
+ """
+ ret_dict = copy.deepcopy(defaults_dict)
+ ret_dict.update(custom_dict)
+ return ret_dict
+
+
+def UpgradeGroupedParams(target, defaults):
+ """Update all groups for the target parameter.
+
+ @type target: dict of dicts
+ @param target: {group: {parameter: value}}
+ @type defaults: dict
+ @param defaults: default parameter values
+
+ """
+ if target is None:
+ target = {constants.PP_DEFAULT: defaults}
+ else:
+ for group in target:
+ target[group] = FillDict(defaults, target[group])
+ return target
class ConfigObject(object):
as None instead of raising an error
Classes derived from this must always declare __slots__ (we use many
- config objects and the memory reduction is useful.
+ config objects and the memory reduction is useful)
"""
__slots__ = []
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
+ self.UpgradeConfig()
def __getattr__(self, name):
if name not in self.__slots__:
if name in self.__slots__:
setattr(self, name, state[name])
- def Dump(self, fobj):
- """Dump to a file object.
-
- """
- data = self.ToDict()
- if _JSON_INDENT is None:
- simplejson.dump(data, fobj)
- else:
- simplejson.dump(data, fobj, indent=_JSON_INDENT)
-
- @classmethod
- def Load(cls, fobj):
- """Load data from the given stream.
-
- """
- return cls.FromDict(simplejson.load(fobj))
-
- def Dumps(self):
- """Dump and return the string representation."""
- buf = StringIO()
- self.Dump(buf)
- return buf.getvalue()
-
- @classmethod
- def Loads(cls, data):
- """Load data from a string."""
- return cls.Load(StringIO(data))
-
def ToDict(self):
"""Convert to a dict holding only standard python types.
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ This method will be called at object init time, and its implementation will
+ be object dependent.
+
+ """
+ pass
+
class TaggableObject(ConfigObject):
"""An generic class supporting tags.
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
- if not re.match("^[ \w.+*/:-]+$", tag):
+ if not re.match("^[\w.+*/:-]+$", tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
class ConfigData(ConfigObject):
"""Top-level config object."""
- __slots__ = ["cluster", "nodes", "instances"]
+ __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
def ToDict(self):
"""Custom function for top-level config data.
"""Config object representing a network card."""
__slots__ = ["mac", "ip", "bridge"]
+ @classmethod
+ def CheckParameterSyntax(cls, nicparams):
+ """Check the given parameters for validity.
+
+ @type nicparams: dict
+ @param nicparams: dictionary with parameter names/value
+ @raise errors.ConfigurationError: when a parameter is not valid
+
+ """
+ if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
+ err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
+ raise errors.ConfigurationError(err)
+
+ if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
+ not nicparams[constants.NIC_LINK]):
+ err = "Missing bridged nic link"
+ raise errors.ConfigurationError(err)
+
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size"]
+ "children", "iv_name", "size", "mode"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
- return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8,
- constants.LD_LV)
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
- return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8,
- constants.LD_LV)
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
devices needs to (or can) be assembled.
"""
- if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_MD_R1:
+ if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
- tree in the most compact way. For example, a md/drbd/lvm stack
- will be returned as (primary_node, md) and (secondary_node, drbd)
- which represents all the top-level devices on the nodes. This
- means that on the primary node we need to activate the the md (and
- recursively all its children) and on the secondary node we need to
- activate the drbd device (and its children, the two lvm volumes).
+ tree in the most compact way. For example, a drbd/lvm stack
+ will be returned as (primary_node, drbd) and (secondary_node, drbd)
+ which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node)
# be different)
return result
+ def RecordGrow(self, amount):
+ """Update the size of this disk after growth.
+
+ This method recurses over the disks's children and updates their
+ size correspondigly. The method needs to be kept in sync with the
+ actual algorithms from bdev.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ self.size += amount
+ elif self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].RecordGrow(amount)
+ self.size += amount
+ else:
+ raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
+ " disk type %s" % self.dev_type)
+
+ def SetPhysicalID(self, target_node, nodes_ip):
+ """Convert the logical ID to the physical ID.
+
+ This is used only for drbd, which needs ip/port configuration.
+
+ The routine descends down and updates its children also, because
+ this helps when the only the top device is passed to the remote
+ node.
+
+ Arguments:
+ - target_node: the node we wish to configure for
+ - nodes_ip: a mapping of node name to ip
+
+ The target_node must exist in in nodes_ip, and must be one of the
+ nodes in the logical ID for each of the DRBD devices encountered
+ in the disk tree.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.SetPhysicalID(target_node, nodes_ip)
+
+ if self.logical_id is None and self.physical_id is not None:
+ return
+ if self.dev_type in constants.LDS_DRBD:
+ pnode, snode, port, pminor, sminor, secret = self.logical_id
+ if target_node not in (pnode, snode):
+ raise errors.ConfigurationError("DRBD device not knowing node %s" %
+ target_node)
+ pnode_ip = nodes_ip.get(pnode, None)
+ snode_ip = nodes_ip.get(snode, None)
+ if pnode_ip is None or snode_ip is None:
+ raise errors.ConfigurationError("Can't find primary or secondary node"
+ " for %s" % str(self))
+ p_data = (pnode_ip, port)
+ s_data = (snode_ip, port)
+ if pnode == target_node:
+ self.physical_id = p_data + s_data + (pminor, secret)
+ else: # it must be secondary, we tested above
+ self.physical_id = s_data + p_data + (sminor, secret)
+ else:
+ self.physical_id = self.logical_id
+ return
+
def ToDict(self):
"""Disk-specific conversion to standard python types.
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
+ if obj.dev_type in constants.LDS_DRBD:
+ # we need a tuple of length six here
+ if len(obj.logical_id) < 6:
+ obj.logical_id += (None,) * (6 - len(obj.logical_id))
return obj
def __str__(self):
if self.dev_type == constants.LD_LV:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
- if self.dev_type == constants.LD_DRBD7:
- val = "<DRBD7("
- else:
- val = "<DRBD8("
+ node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
+ val = "<DRBD8("
if self.physical_id is None:
phy = "unconfigured"
else:
(self.physical_id[0], self.physical_id[1],
self.physical_id[2], self.physical_id[3]))
- val += ("hosts=%s-%s, port=%s, %s, " %
- (self.logical_id[0], self.logical_id[1], self.logical_id[2],
- phy))
+ val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
+ (node_a, minor_a, node_b, minor_b, port, phy))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
- elif self.dev_type == constants.LD_MD_R1:
- val = "<MD_R1(uuid=%s, children=%s" % (self.physical_id, self.children)
else:
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.physical_id, self.children))
val += ", not visible"
else:
val += ", visible as /dev/%s" % self.iv_name
- val += ", size=%dm)>" % self.size
+ if isinstance(self.size, int):
+ val += ", size=%dm)>" % self.size
+ else:
+ val += ", size='%s')>" % (self.size,)
return val
+ def Verify(self):
+ """Checks that this disk is correctly configured.
+
+ """
+ errors = []
+ if self.mode not in constants.DISK_ACCESS_SET:
+ errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
+ return errors
+
class Instance(TaggableObject):
"""Config object representing an instance."""
"name",
"primary_node",
"os",
- "status",
- "memory",
- "vcpus",
+ "hypervisor",
+ "hvparams",
+ "beparams",
+ "admin_up",
"nics",
"disks",
"disk_template",
"network_port",
- "kernel_path",
- "initrd_path",
- "hvm_boot_order",
+ "serial_no",
]
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
+ This is a simple wrapper over _ComputeAllNodes.
+
+ """
+ all_nodes = set(self._ComputeAllNodes())
+ all_nodes.discard(self.primary_node)
+ return tuple(all_nodes)
+
+ secondary_nodes = property(_ComputeSecondaryNodes, None, None,
+ "List of secondary nodes")
+
+ def _ComputeAllNodes(self):
+ """Compute the list of all nodes.
+
Since the data is already there (in the drbd disks), keeping it as
a separate normal attribute is redundant and if not properly
synchronised can cause problems. Thus it's better to compute it
dynamically.
"""
- def _Helper(primary, sec_nodes, device):
- """Recursively computes secondary nodes given a top device."""
+ def _Helper(nodes, device):
+ """Recursively computes nodes given a top device."""
if device.dev_type in constants.LDS_DRBD:
- nodea, nodeb, dummy = device.logical_id
- if nodea == primary:
- candidate = nodeb
- else:
- candidate = nodea
- if candidate not in sec_nodes:
- sec_nodes.append(candidate)
+ nodea, nodeb = device.logical_id[:2]
+ nodes.add(nodea)
+ nodes.add(nodeb)
if device.children:
for child in device.children:
- _Helper(primary, sec_nodes, child)
+ _Helper(nodes, child)
- secondary_nodes = []
+ all_nodes = set()
+ all_nodes.add(self.primary_node)
for device in self.disks:
- _Helper(self.primary_node, secondary_nodes, device)
- return tuple(secondary_nodes)
+ _Helper(all_nodes, device)
+ return tuple(all_nodes)
- secondary_nodes = property(_ComputeSecondaryNodes, None, None,
- "List of secondary nodes")
+ all_nodes = property(_ComputeAllNodes, None, None,
+ "List of all nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
- This function figures out what logical volumes should belong on which
- nodes, recursing through a device tree.
+ This function figures out what logical volumes should belong on
+ which nodes, recursing through a device tree.
- Args:
- lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
+ @param lvmap: optional dictionary to receive the
+ 'node' : ['lv', ...] data.
- Returns:
- None if lvmap arg is given.
- Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
+ @return: None if lvmap arg is given, otherwise, a dictionary
+ of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
"""
if node == None:
lvmap[node].append(dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
- if dev.logical_id[0] not in lvmap:
- lvmap[dev.logical_id[0]] = []
-
- if dev.logical_id[1] not in lvmap:
- lvmap[dev.logical_id[1]] = []
-
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
return ret
- def FindDisk(self, name):
- """Find a disk given having a specified name.
+ def FindDisk(self, idx):
+ """Find a disk given having a specified index.
- This will return the disk which has the given iv_name.
+ This is just a wrapper that does validation of the index.
- """
- for disk in self.disks:
- if disk.iv_name == name:
- return disk
+ @type idx: int
+ @param idx: the disk index
+ @rtype: L{Disk}
+ @return: the corresponding disk
+ @raise errors.OpPrereqError: when the given index is not valid
- return None
+ """
+ try:
+ idx = int(idx)
+ return self.disks[idx]
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
+ except IndexError:
+ raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
+ " 0 to %d" % (idx, len(self.disks)))
def ToDict(self):
"""Instance-specific conversion to standard python types.
"name",
"path",
"status",
- "api_version",
+ "api_versions",
"create_script",
"export_script",
"import_script",
"name",
"primary_ip",
"secondary_ip",
+ "serial_no",
+ "master_candidate",
+ "offline",
+ "drained",
]
class Cluster(TaggableObject):
"""Config object representing the cluster."""
__slots__ = TaggableObject.__slots__ + [
- "config_version",
"serial_no",
"rsahostkeypub",
"highest_used_port",
"mac_prefix",
"volume_group_name",
"default_bridge",
+ "default_hypervisor",
+ "master_node",
+ "master_ip",
+ "master_netdev",
+ "cluster_name",
+ "file_storage_dir",
+ "enabled_hypervisors",
+ "hvparams",
+ "beparams",
+ "nicparams",
+ "candidate_pool_size",
+ "modify_etc_hosts",
]
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ if self.hvparams is None:
+ self.hvparams = constants.HVC_DEFAULTS
+ else:
+ for hypervisor in self.hvparams:
+ self.hvparams[hypervisor] = FillDict(
+ constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
+
+ self.beparams = UpgradeGroupedParams(self.beparams,
+ constants.BEC_DEFAULTS)
+ migrate_default_bridge = not self.nicparams
+ self.nicparams = UpgradeGroupedParams(self.nicparams,
+ constants.NICC_DEFAULTS)
+ if migrate_default_bridge:
+ self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
+ self.default_bridge
+
+ if self.modify_etc_hosts is None:
+ self.modify_etc_hosts = True
+
def ToDict(self):
"""Custom function for cluster.
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
+ def FillHV(self, instance):
+ """Fill an instance's hvparams dict.
+
+ @type instance: object
+ @param instance: the instance parameter to fill
+ @rtype: dict
+ @return: a copy of the instance's hvparams with missing keys filled from
+ the cluster defaults
+
+ """
+ return FillDict(self.hvparams.get(instance.hypervisor, {}),
+ instance.hvparams)
+
+ def FillBE(self, instance):
+ """Fill an instance's beparams dict.
+
+ @type instance: object
+ @param instance: the instance parameter to fill
+ @rtype: dict
+ @return: a copy of the instance's beparams with missing keys filled from
+ the cluster defaults
+
+ """
+ return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
+ instance.beparams)
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.