"""
+# pylint: disable-msg=E0203,W0201
+
+# E0203: Access to member %r before its definition, since we use
+# objects.py which doesn't explicitely initialise its members
+
+# W0201: Attribute '%s' defined outside __init__
-import simplejson
import ConfigParser
import re
+import copy
from cStringIO import StringIO
from ganeti import errors
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
- "OS", "Node", "Cluster"]
+ "OS", "Node", "Cluster", "FillDict"]
+
+_TIMESTAMPS = ["ctime", "mtime"]
+_UUID = ["uuid"]
+
+
+def FillDict(defaults_dict, custom_dict, skip_keys=None):
+ """Basic function to apply settings on top a default dict.
+
+ @type defaults_dict: dict
+ @param defaults_dict: dictionary holding the default values
+ @type custom_dict: dict
+ @param custom_dict: dictionary holding customized value
+ @type skip_keys: list
+ @param skip_keys: which keys not to fill
+ @rtype: dict
+ @return: dict with the 'full' values
+
+ """
+ ret_dict = copy.deepcopy(defaults_dict)
+ ret_dict.update(custom_dict)
+ if skip_keys:
+ for k in skip_keys:
+ try:
+ del ret_dict[k]
+ except KeyError:
+ pass
+ return ret_dict
+
+
+def UpgradeGroupedParams(target, defaults):
+ """Update all groups for the target parameter.
+ @type target: dict of dicts
+ @param target: {group: {parameter: value}}
+ @type defaults: dict
+ @param defaults: default parameter values
-# Check whether the simplejson module supports indentation
-_JSON_INDENT = 2
-try:
- simplejson.dumps(1, indent=_JSON_INDENT)
-except TypeError:
- _JSON_INDENT = None
+ """
+ if target is None:
+ target = {constants.PP_DEFAULT: defaults}
+ else:
+ for group in target:
+ target[group] = FillDict(defaults, target[group])
+ return target
class ConfigObject(object):
as None instead of raising an error
Classes derived from this must always declare __slots__ (we use many
- config objects and the memory reduction is useful.
+ config objects and the memory reduction is useful)
"""
__slots__ = []
setattr(self, k, v)
def __getattr__(self, name):
- if name not in self.__slots__:
+ if name not in self._all_slots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
- def __setitem__(self, key, value):
- if key not in self.__slots__:
- raise KeyError(key)
- setattr(self, key, value)
-
- def __getstate__(self):
- state = {}
- for name in self.__slots__:
- if hasattr(self, name):
- state[name] = getattr(self, name)
- return state
-
def __setstate__(self, state):
+ slots = self._all_slots()
for name in state:
- if name in self.__slots__:
+ if name in slots:
setattr(self, name, state[name])
- def Dump(self, fobj):
- """Dump to a file object.
-
- """
- data = self.ToDict()
- if _JSON_INDENT is None:
- simplejson.dump(data, fobj)
- else:
- simplejson.dump(data, fobj, indent=_JSON_INDENT)
-
@classmethod
- def Load(cls, fobj):
- """Load data from the given stream.
+ def _all_slots(cls):
+ """Compute the list of all declared slots for a class.
"""
- return cls.FromDict(simplejson.load(fobj))
-
- def Dumps(self):
- """Dump and return the string representation."""
- buf = StringIO()
- self.Dump(buf)
- return buf.getvalue()
-
- @classmethod
- def Loads(cls, data):
- """Load data from a string."""
- return cls.Load(StringIO(data))
+ slots = []
+ for parent in cls.__mro__:
+ slots.extend(getattr(parent, "__slots__", []))
+ return slots
def ToDict(self):
"""Convert to a dict holding only standard python types.
make sure all objects returned are only standard python types.
"""
- return dict([(k, getattr(self, k, None)) for k in self.__slots__])
+ result = {}
+ for name in self._all_slots():
+ value = getattr(self, name, None)
+ if value is not None:
+ result[name] = value
+ return result
+
+ __getstate__ = ToDict
@classmethod
def FromDict(cls, val):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
val_str = dict([(str(k), v) for k, v in val.iteritems()])
- obj = cls(**val_str)
+ obj = cls(**val_str) # pylint: disable-msg=W0142
return obj
@staticmethod
" _ContainerFromDicts" % c_type)
return ret
+ def Copy(self):
+ """Makes a deep copy of the current object and its children.
+
+ """
+ dict_form = self.ToDict()
+ clone_obj = self.__class__.FromDict(dict_form)
+ return clone_obj
+
def __repr__(self):
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ This method will be called at configuration load time, and its
+ implementation will be object dependent.
+
+ """
+ pass
+
class TaggableObject(ConfigObject):
"""An generic class supporting tags.
"""
- __slots__ = ConfigObject.__slots__ + ["tags"]
+ __slots__ = ["tags"]
+ VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
- @staticmethod
- def ValidateTag(tag):
+ @classmethod
+ def ValidateTag(cls, tag):
"""Check if a tag is valid.
If the tag is invalid, an errors.TagError will be raised. The
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
- if not re.match("^[ \w.+*/:-]+$", tag):
+ if not cls.VALID_TAG_RE.match(tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
class ConfigData(ConfigObject):
"""Top-level config object."""
- __slots__ = ["cluster", "nodes", "instances"]
+ __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
+ _TIMESTAMPS)
def ToDict(self):
"""Custom function for top-level config data.
obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
return obj
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ self.cluster.UpgradeConfig()
+ for node in self.nodes.values():
+ node.UpgradeConfig()
+ for instance in self.instances.values():
+ instance.UpgradeConfig()
+
class NIC(ConfigObject):
"""Config object representing a network card."""
- __slots__ = ["mac", "ip", "bridge"]
+ __slots__ = ["mac", "ip", "bridge", "nicparams"]
+
+ @classmethod
+ def CheckParameterSyntax(cls, nicparams):
+ """Check the given parameters for validity.
+
+ @type nicparams: dict
+ @param nicparams: dictionary with parameter names/value
+ @raise errors.ConfigurationError: when a parameter is not valid
+
+ """
+ if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
+ err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
+ raise errors.ConfigurationError(err)
+
+ if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
+ not nicparams[constants.NIC_LINK]):
+ err = "Missing bridged nic link"
+ raise errors.ConfigurationError(err)
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ if self.nicparams is None:
+ self.nicparams = {}
+ if self.bridge is not None:
+ self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
+ self.nicparams[constants.NIC_LINK] = self.bridge
+ # bridge is no longer used it 2.1. The slot is left there to support
+ # upgrading, but will be removed in 2.2
+ if self.bridge is not None:
+ self.bridge = None
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size"]
+ "children", "iv_name", "size", "mode"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
- return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8,
- constants.LD_LV)
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
- return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8,
- constants.LD_LV)
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
return self.dev_type in (constants.LD_LV,)
+ def StaticDevPath(self):
+ """Return the device path if this device type has a static one.
+
+ Some devices (LVM for example) live always at the same /dev/ path,
+ irrespective of their status. For such devices, we return this
+ path, for others we return None.
+
+ @warning: The path returned is not a normalized pathname; callers
+ should check that it is a valid path.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
+ return None
+
+ def ChildrenNeeded(self):
+ """Compute the needed number of children for activation.
+
+ This method will return either -1 (all children) or a positive
+ number denoting the minimum number of children needed for
+ activation (only mirrored devices will usually return >=0).
+
+ Currently, only DRBD8 supports diskless activation (therefore we
+ return 0), for all other we keep the previous semantics and return
+ -1.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ return 0
+ return -1
+
def GetNodes(self, node):
"""This function returns the nodes this device lives on.
devices needs to (or can) be assembled.
"""
- if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_MD_R1:
+ if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
- tree in the most compact way. For example, a md/drbd/lvm stack
- will be returned as (primary_node, md) and (secondary_node, drbd)
- which represents all the top-level devices on the nodes. This
- means that on the primary node we need to activate the the md (and
- recursively all its children) and on the secondary node we need to
- activate the drbd device (and its children, the two lvm volumes).
+ tree in the most compact way. For example, a drbd/lvm stack
+ will be returned as (primary_node, drbd) and (secondary_node, drbd)
+ which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node)
# be different)
return result
+ def RecordGrow(self, amount):
+ """Update the size of this disk after growth.
+
+ This method recurses over the disks's children and updates their
+ size correspondigly. The method needs to be kept in sync with the
+ actual algorithms from bdev.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ self.size += amount
+ elif self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].RecordGrow(amount)
+ self.size += amount
+ else:
+ raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
+ " disk type %s" % self.dev_type)
+
+ def UnsetSize(self):
+ """Sets recursively the size to zero for the disk and its children.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.UnsetSize()
+ self.size = 0
+
+ def SetPhysicalID(self, target_node, nodes_ip):
+ """Convert the logical ID to the physical ID.
+
+ This is used only for drbd, which needs ip/port configuration.
+
+ The routine descends down and updates its children also, because
+ this helps when the only the top device is passed to the remote
+ node.
+
+ Arguments:
+ - target_node: the node we wish to configure for
+ - nodes_ip: a mapping of node name to ip
+
+ The target_node must exist in in nodes_ip, and must be one of the
+ nodes in the logical ID for each of the DRBD devices encountered
+ in the disk tree.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.SetPhysicalID(target_node, nodes_ip)
+
+ if self.logical_id is None and self.physical_id is not None:
+ return
+ if self.dev_type in constants.LDS_DRBD:
+ pnode, snode, port, pminor, sminor, secret = self.logical_id
+ if target_node not in (pnode, snode):
+ raise errors.ConfigurationError("DRBD device not knowing node %s" %
+ target_node)
+ pnode_ip = nodes_ip.get(pnode, None)
+ snode_ip = nodes_ip.get(snode, None)
+ if pnode_ip is None or snode_ip is None:
+ raise errors.ConfigurationError("Can't find primary or secondary node"
+ " for %s" % str(self))
+ p_data = (pnode_ip, port)
+ s_data = (snode_ip, port)
+ if pnode == target_node:
+ self.physical_id = p_data + s_data + (pminor, secret)
+ else: # it must be secondary, we tested above
+ self.physical_id = s_data + p_data + (sminor, secret)
+ else:
+ self.physical_id = self.logical_id
+ return
+
def ToDict(self):
"""Disk-specific conversion to standard python types.
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
+ if obj.dev_type in constants.LDS_DRBD:
+ # we need a tuple of length six here
+ if len(obj.logical_id) < 6:
+ obj.logical_id += (None,) * (6 - len(obj.logical_id))
return obj
def __str__(self):
if self.dev_type == constants.LD_LV:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
- if self.dev_type == constants.LD_DRBD7:
- val = "<DRBD7("
- else:
- val = "<DRBD8("
+ node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
+ val = "<DRBD8("
if self.physical_id is None:
phy = "unconfigured"
else:
(self.physical_id[0], self.physical_id[1],
self.physical_id[2], self.physical_id[3]))
- val += ("hosts=%s-%s, port=%s, %s, " %
- (self.logical_id[0], self.logical_id[1], self.logical_id[2],
- phy))
+ val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
+ (node_a, minor_a, node_b, minor_b, port, phy))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
- elif self.dev_type == constants.LD_MD_R1:
- val = "<MD_R1(uuid=%s, children=%s" % (self.physical_id, self.children)
else:
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.physical_id, self.children))
val += ", not visible"
else:
val += ", visible as /dev/%s" % self.iv_name
- val += ", size=%dm)>" % self.size
+ if isinstance(self.size, int):
+ val += ", size=%dm)>" % self.size
+ else:
+ val += ", size='%s')>" % (self.size,)
return val
+ def Verify(self):
+ """Checks that this disk is correctly configured.
+
+ """
+ all_errors = []
+ if self.mode not in constants.DISK_ACCESS_SET:
+ all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
+ return all_errors
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.UpgradeConfig()
+ # add here config upgrade for this disk
+
class Instance(TaggableObject):
"""Config object representing an instance."""
- __slots__ = TaggableObject.__slots__ + [
+ __slots__ = [
"name",
"primary_node",
"os",
- "status",
- "memory",
- "vcpus",
+ "hypervisor",
+ "hvparams",
+ "beparams",
+ "admin_up",
"nics",
"disks",
"disk_template",
- ]
+ "network_port",
+ "serial_no",
+ ] + _TIMESTAMPS + _UUID
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
+ This is a simple wrapper over _ComputeAllNodes.
+
+ """
+ all_nodes = set(self._ComputeAllNodes())
+ all_nodes.discard(self.primary_node)
+ return tuple(all_nodes)
+
+ secondary_nodes = property(_ComputeSecondaryNodes, None, None,
+ "List of secondary nodes")
+
+ def _ComputeAllNodes(self):
+ """Compute the list of all nodes.
+
Since the data is already there (in the drbd disks), keeping it as
a separate normal attribute is redundant and if not properly
synchronised can cause problems. Thus it's better to compute it
dynamically.
"""
- def _Helper(primary, sec_nodes, device):
- """Recursively computes secondary nodes given a top device."""
+ def _Helper(nodes, device):
+ """Recursively computes nodes given a top device."""
if device.dev_type in constants.LDS_DRBD:
- nodea, nodeb, dummy = device.logical_id
- if nodea == primary:
- candidate = nodeb
- else:
- candidate = nodea
- if candidate not in sec_nodes:
- sec_nodes.append(candidate)
+ nodea, nodeb = device.logical_id[:2]
+ nodes.add(nodea)
+ nodes.add(nodeb)
if device.children:
for child in device.children:
- _Helper(primary, sec_nodes, child)
+ _Helper(nodes, child)
- secondary_nodes = []
+ all_nodes = set()
+ all_nodes.add(self.primary_node)
for device in self.disks:
- _Helper(self.primary_node, secondary_nodes, device)
- return tuple(secondary_nodes)
+ _Helper(all_nodes, device)
+ return tuple(all_nodes)
- secondary_nodes = property(_ComputeSecondaryNodes, None, None,
- "List of secondary nodes")
+ all_nodes = property(_ComputeAllNodes, None, None,
+ "List of all nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
- This function figures out what logical volumes should belong on which
- nodes, recursing through a device tree.
+ This function figures out what logical volumes should belong on
+ which nodes, recursing through a device tree.
- Args:
- lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
+ @param lvmap: optional dictionary to receive the
+ 'node' : ['lv', ...] data.
- Returns:
- None if lvmap arg is given.
- Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
+ @return: None if lvmap arg is given, otherwise, a dictionary
+ of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
"""
if node == None:
lvmap[node].append(dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
- if dev.logical_id[0] not in lvmap:
- lvmap[dev.logical_id[0]] = []
-
- if dev.logical_id[1] not in lvmap:
- lvmap[dev.logical_id[1]] = []
-
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
return ret
- def FindDisk(self, name):
- """Find a disk given having a specified name.
+ def FindDisk(self, idx):
+ """Find a disk given having a specified index.
- This will return the disk which has the given iv_name.
+ This is just a wrapper that does validation of the index.
- """
- for disk in self.disks:
- if disk.iv_name == name:
- return disk
+ @type idx: int
+ @param idx: the disk index
+ @rtype: L{Disk}
+ @return: the corresponding disk
+ @raise errors.OpPrereqError: when the given index is not valid
- return None
+ """
+ try:
+ idx = int(idx)
+ return self.disks[idx]
+ except (TypeError, ValueError), err:
+ raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
+ errors.ECODE_INVAL)
+ except IndexError:
+ raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
+ " 0 to %d" % (idx, len(self.disks)),
+ errors.ECODE_INVAL)
def ToDict(self):
"""Instance-specific conversion to standard python types.
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
return obj
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ for nic in self.nics:
+ nic.UpgradeConfig()
+ for disk in self.disks:
+ disk.UpgradeConfig()
+ if self.hvparams:
+ for key in constants.HVC_GLOBALS:
+ try:
+ del self.hvparams[key]
+ except KeyError:
+ pass
+
class OS(ConfigObject):
"""Config object representing an operating system."""
__slots__ = [
"name",
"path",
- "status",
- "api_version",
+ "api_versions",
"create_script",
"export_script",
"import_script",
"rename_script",
+ "supported_variants",
]
- @classmethod
- def FromInvalidOS(cls, err):
- """Create an OS from an InvalidOS error.
-
- This routine knows how to convert an InvalidOS error to an OS
- object representing the broken OS with a meaningful error message.
-
- """
- if not isinstance(err, errors.InvalidOS):
- raise errors.ProgrammerError("Trying to initialize an OS from an"
- " invalid object of type %s" % type(err))
-
- return cls(name=err.args[0], path=err.args[1], status=err.args[2])
-
- def __nonzero__(self):
- return self.status == constants.OS_VALID_STATUS
-
- __bool__ = __nonzero__
class Node(TaggableObject):
"""Config object representing a node."""
- __slots__ = TaggableObject.__slots__ + [
+ __slots__ = [
"name",
"primary_ip",
"secondary_ip",
- ]
+ "serial_no",
+ "master_candidate",
+ "offline",
+ "drained",
+ ] + _TIMESTAMPS + _UUID
class Cluster(TaggableObject):
"""Config object representing the cluster."""
- __slots__ = TaggableObject.__slots__ + [
- "config_version",
+ __slots__ = [
"serial_no",
"rsahostkeypub",
"highest_used_port",
"mac_prefix",
"volume_group_name",
"default_bridge",
- ]
+ "default_hypervisor",
+ "master_node",
+ "master_ip",
+ "master_netdev",
+ "cluster_name",
+ "file_storage_dir",
+ "enabled_hypervisors",
+ "hvparams",
+ "os_hvp",
+ "beparams",
+ "nicparams",
+ "candidate_pool_size",
+ "modify_etc_hosts",
+ "modify_ssh_setup",
+ ] + _TIMESTAMPS + _UUID
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ # pylint: disable-msg=E0203
+ # because these are "defined" via slots, not manually
+ if self.hvparams is None:
+ self.hvparams = constants.HVC_DEFAULTS
+ else:
+ for hypervisor in self.hvparams:
+ self.hvparams[hypervisor] = FillDict(
+ constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
+
+ # TODO: Figure out if it's better to put this into OS than Cluster
+ if self.os_hvp is None:
+ self.os_hvp = {}
+
+ self.beparams = UpgradeGroupedParams(self.beparams,
+ constants.BEC_DEFAULTS)
+ migrate_default_bridge = not self.nicparams
+ self.nicparams = UpgradeGroupedParams(self.nicparams,
+ constants.NICC_DEFAULTS)
+ if migrate_default_bridge:
+ self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
+ self.default_bridge
+
+ if self.modify_etc_hosts is None:
+ self.modify_etc_hosts = True
+
+ if self.modify_ssh_setup is None:
+ self.modify_ssh_setup = True
+
+ # default_bridge is no longer used it 2.1. The slot is left there to
+ # support auto-upgrading, but will be removed in 2.2
+ if self.default_bridge is not None:
+ self.default_bridge = None
+
+ # default_hypervisor is just the first enabled one in 2.1
+ if self.default_hypervisor is not None:
+ self.enabled_hypervisors = ([self.default_hypervisor] +
+ [hvname for hvname in self.enabled_hypervisors
+ if hvname != self.default_hypervisor])
+ self.default_hypervisor = None
def ToDict(self):
"""Custom function for cluster.
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
+ def FillHV(self, instance, skip_globals=False):
+ """Fill an instance's hvparams dict.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance parameter to fill
+ @type skip_globals: boolean
+ @param skip_globals: if True, the global hypervisor parameters will
+ not be filled
+ @rtype: dict
+ @return: a copy of the instance's hvparams with missing keys filled from
+ the cluster defaults
+
+ """
+ if skip_globals:
+ skip_keys = constants.HVC_GLOBALS
+ else:
+ skip_keys = []
+
+ # We fill the list from least to most important override
+ fill_stack = [
+ self.hvparams.get(instance.hypervisor, {}),
+ self.os_hvp.get(instance.os, {}).get(instance.hypervisor, {}),
+ instance.hvparams,
+ ]
+
+ ret_dict = {}
+ for o_dict in fill_stack:
+ ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
+
+ return ret_dict
+
+ def FillBE(self, instance):
+ """Fill an instance's beparams dict.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance parameter to fill
+ @rtype: dict
+ @return: a copy of the instance's beparams with missing keys filled from
+ the cluster defaults
+
+ """
+ return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
+ instance.beparams)
+
+
+class BlockDevStatus(ConfigObject):
+ """Config object representing the status of a block device."""
+ __slots__ = [
+ "dev_path",
+ "major",
+ "minor",
+ "sync_percent",
+ "estimated_time",
+ "is_degraded",
+ "ldisk_status",
+ ]
+
+
+class ConfdRequest(ConfigObject):
+ """Object holding a confd request.
+
+ @ivar protocol: confd protocol version
+ @ivar type: confd query type
+ @ivar query: query request
+ @ivar rsalt: requested reply salt
+
+ """
+ __slots__ = [
+ "protocol",
+ "type",
+ "query",
+ "rsalt",
+ ]
+
+
+class ConfdReply(ConfigObject):
+ """Object holding a confd reply.
+
+ @ivar protocol: confd protocol version
+ @ivar status: reply status code (ok, error)
+ @ivar answer: confd query reply
+ @ivar serial: configuration serial number
+
+ """
+ __slots__ = [
+ "protocol",
+ "status",
+ "answer",
+ "serial",
+ ]
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.