#
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""
+# pylint: disable=E0203,W0201,R0902
+
+# E0203: Access to member %r before its definition, since we use
+# objects.py which doesn't explicitely initialise its members
+
+# W0201: Attribute '%s' defined outside __init__
+
+# R0902: Allow instances of these objects to have more than 20 attributes
import ConfigParser
import re
import copy
+import time
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
+from ganeti import netutils
+from ganeti import utils
+
+from socket import AF_INET
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
- "OS", "Node", "Cluster"]
+ "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
+
+_TIMESTAMPS = ["ctime", "mtime"]
+_UUID = ["uuid"]
+
+# constants used to create InstancePolicy dictionary
+TISPECS_GROUP_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ }
+
+TISPECS_CLUSTER_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ constants.ISPECS_STD: constants.VTYPE_INT,
+ }
+
+
+def FillDict(defaults_dict, custom_dict, skip_keys=None):
+ """Basic function to apply settings on top a default dict.
+
+ @type defaults_dict: dict
+ @param defaults_dict: dictionary holding the default values
+ @type custom_dict: dict
+ @param custom_dict: dictionary holding customized value
+ @type skip_keys: list
+ @param skip_keys: which keys not to fill
+ @rtype: dict
+ @return: dict with the 'full' values
+
+ """
+ ret_dict = copy.deepcopy(defaults_dict)
+ ret_dict.update(custom_dict)
+ if skip_keys:
+ for k in skip_keys:
+ try:
+ del ret_dict[k]
+ except KeyError:
+ pass
+ return ret_dict
+
+
+def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
+ """Fills an instance policy with defaults.
+
+ """
+ assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
+ ret_dict = {}
+ for key in constants.IPOLICY_ISPECS:
+ ret_dict[key] = FillDict(default_ipolicy[key],
+ custom_ipolicy.get(key, {}),
+ skip_keys=skip_keys)
+ # list items
+ for key in [constants.IPOLICY_DTS]:
+ ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
+ # other items which we know we can directly copy (immutables)
+ for key in constants.IPOLICY_PARAMETERS:
+ ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
+
+ return ret_dict
+
+
+def UpgradeGroupedParams(target, defaults):
+ """Update all groups for the target parameter.
+
+ @type target: dict of dicts
+ @param target: {group: {parameter: value}}
+ @type defaults: dict
+ @param defaults: default parameter values
+
+ """
+ if target is None:
+ target = {constants.PP_DEFAULT: defaults}
+ else:
+ for group in target:
+ target[group] = FillDict(defaults, target[group])
+ return target
+
+
+def UpgradeBeParams(target):
+ """Update the be parameters dict to the new format.
+
+ @type target: dict
+ @param target: "be" parameters dict
+
+ """
+ if constants.BE_MEMORY in target:
+ memory = target[constants.BE_MEMORY]
+ target[constants.BE_MAXMEM] = memory
+ target[constants.BE_MINMEM] = memory
+ del target[constants.BE_MEMORY]
+
+
+def UpgradeDiskParams(diskparams):
+ """Upgrade the disk parameters.
+
+ @type diskparams: dict
+ @param diskparams: disk parameters to upgrade
+ @rtype: dict
+ @return: the upgraded disk parameters dit
+
+ """
+ result = dict()
+ if diskparams is None:
+ result = constants.DISK_DT_DEFAULTS.copy()
+ else:
+ # Update the disk parameter values for each disk template.
+ # The code iterates over constants.DISK_TEMPLATES because new templates
+ # might have been added.
+ for template in constants.DISK_TEMPLATES:
+ if template not in diskparams:
+ result[template] = constants.DISK_DT_DEFAULTS[template].copy()
+ else:
+ result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
+ diskparams[template])
+
+ return result
+
+
+def MakeEmptyIPolicy():
+ """Create empty IPolicy dictionary.
+
+ """
+ return dict([
+ (constants.ISPECS_MIN, {}),
+ (constants.ISPECS_MAX, {}),
+ (constants.ISPECS_STD, {}),
+ ])
+
+
+def CreateIPolicyFromOpts(ispecs_mem_size=None,
+ ispecs_cpu_count=None,
+ ispecs_disk_count=None,
+ ispecs_disk_size=None,
+ ispecs_nic_count=None,
+ ipolicy_disk_templates=None,
+ ipolicy_vcpu_ratio=None,
+ group_ipolicy=False,
+ allowed_values=None,
+ fill_all=False):
+ """Creation of instance policy based on command line options.
+
+ @param fill_all: whether for cluster policies we should ensure that
+ all values are filled
+
+
+ """
+ # prepare ipolicy dict
+ ipolicy_transposed = {
+ constants.ISPEC_MEM_SIZE: ispecs_mem_size,
+ constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
+ constants.ISPEC_DISK_COUNT: ispecs_disk_count,
+ constants.ISPEC_DISK_SIZE: ispecs_disk_size,
+ constants.ISPEC_NIC_COUNT: ispecs_nic_count,
+ }
+
+ # first, check that the values given are correct
+ if group_ipolicy:
+ forced_type = TISPECS_GROUP_TYPES
+ else:
+ forced_type = TISPECS_CLUSTER_TYPES
+
+ for specs in ipolicy_transposed.values():
+ utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
+
+ # then transpose
+ ipolicy_out = MakeEmptyIPolicy()
+ for name, specs in ipolicy_transposed.iteritems():
+ assert name in constants.ISPECS_PARAMETERS
+ for key, val in specs.items(): # {min: .. ,max: .., std: ..}
+ ipolicy_out[key][name] = val
+
+ # no filldict for non-dicts
+ if not group_ipolicy and fill_all:
+ if ipolicy_disk_templates is None:
+ ipolicy_disk_templates = constants.DISK_TEMPLATES
+ if ipolicy_vcpu_ratio is None:
+ ipolicy_vcpu_ratio = \
+ constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
+ if ipolicy_disk_templates is not None:
+ ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
+ if ipolicy_vcpu_ratio is not None:
+ ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
+
+ assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
+
+ return ipolicy_out
class ConfigObject(object):
setattr(self, k, v)
def __getattr__(self, name):
- if name not in self.__slots__:
+ if name not in self._all_slots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
- def __setitem__(self, key, value):
- if key not in self.__slots__:
- raise KeyError(key)
- setattr(self, key, value)
-
- def __getstate__(self):
- state = {}
- for name in self.__slots__:
- if hasattr(self, name):
- state[name] = getattr(self, name)
- return state
-
def __setstate__(self, state):
+ slots = self._all_slots()
for name in state:
- if name in self.__slots__:
+ if name in slots:
setattr(self, name, state[name])
+ @classmethod
+ def _all_slots(cls):
+ """Compute the list of all declared slots for a class.
+
+ """
+ slots = []
+ for parent in cls.__mro__:
+ slots.extend(getattr(parent, "__slots__", []))
+ return slots
+
def ToDict(self):
"""Convert to a dict holding only standard python types.
make sure all objects returned are only standard python types.
"""
- return dict([(k, getattr(self, k, None)) for k in self.__slots__])
+ result = {}
+ for name in self._all_slots():
+ value = getattr(self, name, None)
+ if value is not None:
+ result[name] = value
+ return result
+
+ __getstate__ = ToDict
@classmethod
def FromDict(cls, val):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
val_str = dict([(str(k), v) for k, v in val.iteritems()])
- obj = cls(**val_str)
+ obj = cls(**val_str) # pylint: disable=W0142
return obj
@staticmethod
if not isinstance(c_type, type):
raise TypeError("Container type %s passed to _ContainerFromDicts is"
" not a type" % type(c_type))
+ if source is None:
+ source = c_type()
if c_type is dict:
ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
elif c_type in (list, tuple, set, frozenset):
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ This method will be called at configuration load time, and its
+ implementation will be object dependent.
+
+ """
+ pass
+
class TaggableObject(ConfigObject):
"""An generic class supporting tags.
"""
- __slots__ = ConfigObject.__slots__ + ["tags"]
+ __slots__ = ["tags"]
+ VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
- @staticmethod
- def ValidateTag(tag):
+ @classmethod
+ def ValidateTag(cls, tag):
"""Check if a tag is valid.
If the tag is invalid, an errors.TagError will be raised. The
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
- if not re.match("^[\w.+*/:-]+$", tag):
+ if not cls.VALID_TAG_RE.match(tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
return obj
+class MasterNetworkParameters(ConfigObject):
+ """Network configuration parameters for the master
+
+ @ivar name: master name
+ @ivar ip: master IP
+ @ivar netmask: master netmask
+ @ivar netdev: master network device
+ @ivar ip_family: master IP family
+
+ """
+ __slots__ = [
+ "name",
+ "ip",
+ "netmask",
+ "netdev",
+ "ip_family"
+ ]
+
+
class ConfigData(ConfigObject):
"""Top-level config object."""
- __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
+ __slots__ = [
+ "version",
+ "cluster",
+ "nodes",
+ "nodegroups",
+ "instances",
+ "serial_no",
+ ] + _TIMESTAMPS
def ToDict(self):
"""Custom function for top-level config data.
"""
mydict = super(ConfigData, self).ToDict()
mydict["cluster"] = mydict["cluster"].ToDict()
- for key in "nodes", "instances":
+ for key in "nodes", "instances", "nodegroups":
mydict[key] = self._ContainerToDicts(mydict[key])
return mydict
obj.cluster = Cluster.FromDict(obj.cluster)
obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
+ obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
return obj
+ def HasAnyDiskOfType(self, dev_type):
+ """Check if in there is at disk of the given type in the configuration.
+
+ @type dev_type: L{constants.LDS_BLOCK}
+ @param dev_type: the type to look for
+ @rtype: boolean
+ @return: boolean indicating if a disk of the given type was found or not
+
+ """
+ for instance in self.instances.values():
+ for disk in instance.disks:
+ if disk.IsBasedOnDiskType(dev_type):
+ return True
+ return False
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ self.cluster.UpgradeConfig()
+ for node in self.nodes.values():
+ node.UpgradeConfig()
+ for instance in self.instances.values():
+ instance.UpgradeConfig()
+ if self.nodegroups is None:
+ self.nodegroups = {}
+ for nodegroup in self.nodegroups.values():
+ nodegroup.UpgradeConfig()
+ if self.cluster.drbd_usermode_helper is None:
+ # To decide if we set an helper let's check if at least one instance has
+ # a DRBD disk. This does not cover all the possible scenarios but it
+ # gives a good approximation.
+ if self.HasAnyDiskOfType(constants.LD_DRBD8):
+ self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
+
class NIC(ConfigObject):
"""Config object representing a network card."""
- __slots__ = ["mac", "ip", "bridge"]
+ __slots__ = ["mac", "ip", "nicparams"]
+
+ @classmethod
+ def CheckParameterSyntax(cls, nicparams):
+ """Check the given parameters for validity.
+
+ @type nicparams: dict
+ @param nicparams: dictionary with parameter names/value
+ @raise errors.ConfigurationError: when a parameter is not valid
+
+ """
+ if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
+ nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
+ err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
+ raise errors.ConfigurationError(err)
+
+ if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
+ not nicparams[constants.NIC_LINK]):
+ err = "Missing bridged nic link"
+ raise errors.ConfigurationError(err)
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size", "mode"]
+ "children", "iv_name", "size", "mode", "params"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
irrespective of their status. For such devices, we return this
path, for others we return None.
+ @warning: The path returned is not a normalized pathname; callers
+ should check that it is a valid path.
+
"""
if self.dev_type == constants.LD_LV:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
+ elif self.dev_type == constants.LD_BLOCKDEV:
+ return self.logical_id[1]
+ elif self.dev_type == constants.LD_RBD:
+ return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
def ChildrenNeeded(self):
return 0
return -1
+ def IsBasedOnDiskType(self, dev_type):
+ """Check if the disk or its children are based on the given type.
+
+ @type dev_type: L{constants.LDS_BLOCK}
+ @param dev_type: the type to look for
+ @rtype: boolean
+ @return: boolean indicating if a device of the given type was found or not
+
+ """
+ if self.children:
+ for child in self.children:
+ if child.IsBasedOnDiskType(dev_type):
+ return True
+ return self.dev_type == dev_type
+
def GetNodes(self, node):
"""This function returns the nodes this device lives on.
devices needs to (or can) be assembled.
"""
- if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
+ if self.dev_type in [constants.LD_LV, constants.LD_FILE,
+ constants.LD_BLOCKDEV, constants.LD_RBD]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
# be different)
return result
+ def ComputeGrowth(self, amount):
+ """Compute the per-VG growth requirements.
+
+ This only works for VG-based disks.
+
+ @type amount: integer
+ @param amount: the desired increase in (user-visible) disk space
+ @rtype: dict
+ @return: a dictionary of volume-groups and the required size
+
+ """
+ if self.dev_type == constants.LD_LV:
+ return {self.logical_id[0]: amount}
+ elif self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ return self.children[0].ComputeGrowth(amount)
+ else:
+ return {}
+ else:
+ # Other disk types do not require VG space
+ return {}
+
def RecordGrow(self, amount):
"""Update the size of this disk after growth.
actual algorithms from bdev.
"""
- if self.dev_type == constants.LD_LV:
+ if self.dev_type in (constants.LD_LV, constants.LD_FILE,
+ constants.LD_RBD):
self.size += amount
elif self.dev_type == constants.LD_DRBD8:
if self.children:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
+ def Update(self, size=None, mode=None):
+ """Apply changes to size and mode.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].Update(size=size, mode=mode)
+ else:
+ assert not self.children
+
+ if size is not None:
+ self.size = size
+ if mode is not None:
+ self.mode = mode
+
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
"""
if self.dev_type == constants.LD_LV:
- val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
+ val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
return all_errors
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.UpgradeConfig()
+
+ if not self.params:
+ self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
+ else:
+ self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
+ self.params)
+ # add here config upgrade for this disk
+
+
+class InstancePolicy(ConfigObject):
+ """Config object representing instance policy limits dictionary.
+
+
+ Note that this object is not actually used in the config, it's just
+ used as a placeholder for a few functions.
+
+ """
+ @classmethod
+ def CheckParameterSyntax(cls, ipolicy):
+ """ Check the instance policy for validity.
+
+ """
+ for param in constants.ISPECS_PARAMETERS:
+ InstancePolicy.CheckISpecSyntax(ipolicy, param)
+ if constants.IPOLICY_DTS in ipolicy:
+ InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
+ for key in constants.IPOLICY_PARAMETERS:
+ if key in ipolicy:
+ InstancePolicy.CheckParameter(key, ipolicy[key])
+ wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
+ if wrong_keys:
+ raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
+ utils.CommaJoin(wrong_keys))
+
+ @classmethod
+ def CheckISpecSyntax(cls, ipolicy, name):
+ """Check the instance policy for validity on a given key.
+
+ We check if the instance policy makes sense for a given key, that is
+ if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
+
+ @type ipolicy: dict
+ @param ipolicy: dictionary with min, max, std specs
+ @type name: string
+ @param name: what are the limits for
+ @raise errors.ConfigureError: when specs for given name are not valid
+
+ """
+ min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
+ std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
+ max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
+ err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
+ (name,
+ ipolicy[constants.ISPECS_MIN].get(name, "-"),
+ ipolicy[constants.ISPECS_MAX].get(name, "-"),
+ ipolicy[constants.ISPECS_STD].get(name, "-")))
+ if min_v > std_v or std_v > max_v:
+ raise errors.ConfigurationError(err)
+
+ @classmethod
+ def CheckDiskTemplates(cls, disk_templates):
+ """Checks the disk templates for validity.
+
+ """
+ wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
+ if wrong:
+ raise errors.ConfigurationError("Invalid disk template(s) %s" %
+ utils.CommaJoin(wrong))
+
+ @classmethod
+ def CheckParameter(cls, key, value):
+ """Checks a parameter.
+
+ Currently we expect all parameters to be float values.
+
+ """
+ try:
+ float(value)
+ except (TypeError, ValueError), err:
+ raise errors.ConfigurationError("Invalid value for key" " '%s':"
+ " '%s', error: %s" % (key, value, err))
+
class Instance(TaggableObject):
"""Config object representing an instance."""
- __slots__ = TaggableObject.__slots__ + [
+ __slots__ = [
"name",
"primary_node",
"os",
"hypervisor",
"hvparams",
"beparams",
- "admin_up",
+ "osparams",
+ "admin_state",
"nics",
"disks",
"disk_template",
"network_port",
"serial_no",
- ]
+ ] + _TIMESTAMPS + _UUID
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
@param lvmap: optional dictionary to receive the
'node' : ['lv', ...] data.
- @return: None if lvmap arg is given, otherwise, a dictionary
- of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
+ @return: None if lvmap arg is given, otherwise, a dictionary of
+ the form { 'nodename' : ['volume1', 'volume2', ...], ... };
+ volumeN is of the form "vg_name/lv_name", compatible with
+ GetVolumeList()
"""
if node == None:
node = self.primary_node
if lvmap is None:
- lvmap = { node : [] }
+ lvmap = {
+ node: [],
+ }
ret = lvmap
else:
if not node in lvmap:
for dev in devs:
if dev.dev_type == constants.LD_LV:
- lvmap[node].append(dev.logical_id[1])
+ lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
if dev.children:
try:
idx = int(idx)
return self.disks[idx]
- except ValueError, err:
- raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
+ except (TypeError, ValueError), err:
+ raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
+ errors.ECODE_INVAL)
except IndexError:
raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
- " 0 to %d" % (idx, len(self.disks)))
+ " 0 to %d" % (idx, len(self.disks) - 1),
+ errors.ECODE_INVAL)
def ToDict(self):
"""Instance-specific conversion to standard python types.
"""Custom function for instances.
"""
+ if "admin_state" not in val:
+ if val.get("admin_up", False):
+ val["admin_state"] = constants.ADMINST_UP
+ else:
+ val["admin_state"] = constants.ADMINST_DOWN
+ if "admin_up" in val:
+ del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
return obj
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ for nic in self.nics:
+ nic.UpgradeConfig()
+ for disk in self.disks:
+ disk.UpgradeConfig()
+ if self.hvparams:
+ for key in constants.HVC_GLOBALS:
+ try:
+ del self.hvparams[key]
+ except KeyError:
+ pass
+ if self.osparams is None:
+ self.osparams = {}
+ UpgradeBeParams(self.beparams)
+
class OS(ConfigObject):
- """Config object representing an operating system."""
+ """Config object representing an operating system.
+
+ @type supported_parameters: list
+ @ivar supported_parameters: a list of tuples, name and description,
+ containing the supported parameters by this OS
+
+ @type VARIANT_DELIM: string
+ @cvar VARIANT_DELIM: the variant delimiter
+
+ """
__slots__ = [
"name",
"path",
- "status",
"api_versions",
"create_script",
"export_script",
"import_script",
"rename_script",
+ "verify_script",
+ "supported_variants",
+ "supported_parameters",
]
+ VARIANT_DELIM = "+"
+
+ @classmethod
+ def SplitNameVariant(cls, name):
+ """Splits the name into the proper name and variant.
+
+ @param name: the OS (unprocessed) name
+ @rtype: list
+ @return: a list of two elements; if the original name didn't
+ contain a variant, it's returned as an empty string
+
+ """
+ nv = name.split(cls.VARIANT_DELIM, 1)
+ if len(nv) == 1:
+ nv.append("")
+ return nv
+
@classmethod
- def FromInvalidOS(cls, err):
- """Create an OS from an InvalidOS error.
+ def GetName(cls, name):
+ """Returns the proper name of the os (without the variant).
- This routine knows how to convert an InvalidOS error to an OS
- object representing the broken OS with a meaningful error message.
+ @param name: the OS (unprocessed) name
"""
- if not isinstance(err, errors.InvalidOS):
- raise errors.ProgrammerError("Trying to initialize an OS from an"
- " invalid object of type %s" % type(err))
+ return cls.SplitNameVariant(name)[0]
- return cls(name=err.args[0], path=err.args[1], status=err.args[2])
+ @classmethod
+ def GetVariant(cls, name):
+ """Returns the variant the os (without the base name).
- def __nonzero__(self):
- return self.status == constants.OS_VALID_STATUS
+ @param name: the OS (unprocessed) name
- __bool__ = __nonzero__
+ """
+ return cls.SplitNameVariant(name)[1]
+
+
+class NodeHvState(ConfigObject):
+ """Hypvervisor state on a node.
+
+ @ivar mem_total: Total amount of memory
+ @ivar mem_node: Memory used by, or reserved for, the node itself (not always
+ available)
+ @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
+ rounding
+ @ivar mem_inst: Memory used by instances living on node
+ @ivar cpu_total: Total node CPU core count
+ @ivar cpu_node: Number of CPU cores reserved for the node itself
+
+ """
+ __slots__ = [
+ "mem_total",
+ "mem_node",
+ "mem_hv",
+ "mem_inst",
+ "cpu_total",
+ "cpu_node",
+ ] + _TIMESTAMPS
+
+
+class NodeDiskState(ConfigObject):
+ """Disk state on a node.
+
+ """
+ __slots__ = [
+ "total",
+ "reserved",
+ "overhead",
+ ] + _TIMESTAMPS
class Node(TaggableObject):
- """Config object representing a node."""
- __slots__ = TaggableObject.__slots__ + [
+ """Config object representing a node.
+
+ @ivar hv_state: Hypervisor state (e.g. number of CPUs)
+ @ivar hv_state_static: Hypervisor state overriden by user
+ @ivar disk_state: Disk state (e.g. free space)
+ @ivar disk_state_static: Disk state overriden by user
+
+ """
+ __slots__ = [
"name",
"primary_ip",
"secondary_ip",
"master_candidate",
"offline",
"drained",
- ]
+ "group",
+ "master_capable",
+ "vm_capable",
+ "ndparams",
+ "powered",
+ "hv_state",
+ "hv_state_static",
+ "disk_state",
+ "disk_state_static",
+ ] + _TIMESTAMPS + _UUID
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ # pylint: disable=E0203
+ # because these are "defined" via slots, not manually
+ if self.master_capable is None:
+ self.master_capable = True
+
+ if self.vm_capable is None:
+ self.vm_capable = True
+
+ if self.ndparams is None:
+ self.ndparams = {}
+
+ if self.powered is None:
+ self.powered = True
+
+ def ToDict(self):
+ """Custom function for serializing.
+
+ """
+ data = super(Node, self).ToDict()
+
+ hv_state = data.get("hv_state", None)
+ if hv_state is not None:
+ data["hv_state"] = self._ContainerToDicts(hv_state)
+
+ disk_state = data.get("disk_state", None)
+ if disk_state is not None:
+ data["disk_state"] = \
+ dict((key, self._ContainerToDicts(value))
+ for (key, value) in disk_state.items())
+
+ return data
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for deserializing.
+
+ """
+ obj = super(Node, cls).FromDict(val)
+
+ if obj.hv_state is not None:
+ obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
+
+ if obj.disk_state is not None:
+ obj.disk_state = \
+ dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
+ for (key, value) in obj.disk_state.items())
+
+ return obj
+
+
+class NodeGroup(TaggableObject):
+ """Config object representing a node group."""
+ __slots__ = [
+ "name",
+ "members",
+ "ndparams",
+ "diskparams",
+ "ipolicy",
+ "serial_no",
+ "hv_state_static",
+ "disk_state_static",
+ "alloc_policy",
+ ] + _TIMESTAMPS + _UUID
+
+ def ToDict(self):
+ """Custom function for nodegroup.
+
+ This discards the members object, which gets recalculated and is only kept
+ in memory.
+
+ """
+ mydict = super(NodeGroup, self).ToDict()
+ del mydict["members"]
+ return mydict
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for nodegroup.
+
+ The members slot is initialized to an empty list, upon deserialization.
+
+ """
+ obj = super(NodeGroup, cls).FromDict(val)
+ obj.members = []
+ return obj
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ if self.ndparams is None:
+ self.ndparams = {}
+
+ if self.serial_no is None:
+ self.serial_no = 1
+
+ if self.alloc_policy is None:
+ self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
+
+ # We only update mtime, and not ctime, since we would not be able
+ # to provide a correct value for creation time.
+ if self.mtime is None:
+ self.mtime = time.time()
+
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.ipolicy is None:
+ self.ipolicy = MakeEmptyIPolicy()
+
+ def FillND(self, node):
+ """Return filled out ndparams for L{objects.Node}
+
+ @type node: L{objects.Node}
+ @param node: A Node object to fill
+ @return a copy of the node's ndparams with defaults filled
+
+ """
+ return self.SimpleFillND(node.ndparams)
+
+ def SimpleFillND(self, ndparams):
+ """Fill a given ndparams dict with defaults.
+
+ @type ndparams: dict
+ @param ndparams: the dict to fill
+ @rtype: dict
+ @return: a copy of the passed in ndparams with missing keys filled
+ from the node group defaults
+
+ """
+ return FillDict(self.ndparams, ndparams)
class Cluster(TaggableObject):
"""Config object representing the cluster."""
- __slots__ = TaggableObject.__slots__ + [
+ __slots__ = [
"serial_no",
"rsahostkeypub",
"highest_used_port",
"tcpudp_port_pool",
"mac_prefix",
"volume_group_name",
+ "reserved_lvs",
+ "drbd_usermode_helper",
"default_bridge",
"default_hypervisor",
"master_node",
"master_ip",
"master_netdev",
+ "master_netmask",
+ "use_external_mip_script",
"cluster_name",
"file_storage_dir",
+ "shared_file_storage_dir",
"enabled_hypervisors",
"hvparams",
+ "ipolicy",
+ "os_hvp",
"beparams",
+ "osparams",
+ "nicparams",
+ "ndparams",
+ "diskparams",
"candidate_pool_size",
- ]
+ "modify_etc_hosts",
+ "modify_ssh_setup",
+ "maintain_node_health",
+ "uid_pool",
+ "default_iallocator",
+ "hidden_os",
+ "blacklisted_os",
+ "primary_ip_family",
+ "prealloc_wipe_disks",
+ "hv_state_static",
+ "disk_state_static",
+ ] + _TIMESTAMPS + _UUID
+
+ def UpgradeConfig(self):
+ """Fill defaults for missing configuration values.
+
+ """
+ # pylint: disable=E0203
+ # because these are "defined" via slots, not manually
+ if self.hvparams is None:
+ self.hvparams = constants.HVC_DEFAULTS
+ else:
+ for hypervisor in self.hvparams:
+ self.hvparams[hypervisor] = FillDict(
+ constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
+
+ if self.os_hvp is None:
+ self.os_hvp = {}
+
+ # osparams added before 2.2
+ if self.osparams is None:
+ self.osparams = {}
+
+ if self.ndparams is None:
+ self.ndparams = constants.NDC_DEFAULTS
+
+ self.beparams = UpgradeGroupedParams(self.beparams,
+ constants.BEC_DEFAULTS)
+ for beparams_group in self.beparams:
+ UpgradeBeParams(self.beparams[beparams_group])
+
+ migrate_default_bridge = not self.nicparams
+ self.nicparams = UpgradeGroupedParams(self.nicparams,
+ constants.NICC_DEFAULTS)
+ if migrate_default_bridge:
+ self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
+ self.default_bridge
+
+ if self.modify_etc_hosts is None:
+ self.modify_etc_hosts = True
+
+ if self.modify_ssh_setup is None:
+ self.modify_ssh_setup = True
+
+ # default_bridge is no longer used in 2.1. The slot is left there to
+ # support auto-upgrading. It can be removed once we decide to deprecate
+ # upgrading straight from 2.0.
+ if self.default_bridge is not None:
+ self.default_bridge = None
+
+ # default_hypervisor is just the first enabled one in 2.1. This slot and
+ # code can be removed once upgrading straight from 2.0 is deprecated.
+ if self.default_hypervisor is not None:
+ self.enabled_hypervisors = ([self.default_hypervisor] +
+ [hvname for hvname in self.enabled_hypervisors
+ if hvname != self.default_hypervisor])
+ self.default_hypervisor = None
+
+ # maintain_node_health added after 2.1.1
+ if self.maintain_node_health is None:
+ self.maintain_node_health = False
+
+ if self.uid_pool is None:
+ self.uid_pool = []
+
+ if self.default_iallocator is None:
+ self.default_iallocator = ""
+
+ # reserved_lvs added before 2.2
+ if self.reserved_lvs is None:
+ self.reserved_lvs = []
+
+ # hidden and blacklisted operating systems added before 2.2.1
+ if self.hidden_os is None:
+ self.hidden_os = []
+
+ if self.blacklisted_os is None:
+ self.blacklisted_os = []
+
+ # primary_ip_family added before 2.3
+ if self.primary_ip_family is None:
+ self.primary_ip_family = AF_INET
+
+ if self.master_netmask is None:
+ ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
+ self.master_netmask = ipcls.iplen
+
+ if self.prealloc_wipe_disks is None:
+ self.prealloc_wipe_disks = False
+
+ # shared_file_storage_dir added before 2.5
+ if self.shared_file_storage_dir is None:
+ self.shared_file_storage_dir = ""
+
+ if self.use_external_mip_script is None:
+ self.use_external_mip_script = False
+
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+
+ # instance policy added before 2.6
+ if self.ipolicy is None:
+ self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
+ else:
+ # we can either make sure to upgrade the ipolicy always, or only
+ # do it in some corner cases (e.g. missing keys); note that this
+ # will break any removal of keys from the ipolicy dict
+ self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
+
+ @property
+ def primary_hypervisor(self):
+ """The first hypervisor is the primary.
+
+ Useful, for example, for L{Node}'s hv/disk state.
+
+ """
+ return self.enabled_hypervisors[0]
def ToDict(self):
"""Custom function for cluster.
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
- @staticmethod
- def FillDict(defaults_dict, custom_dict):
- """Basic function to apply settings on top a default dict.
+ def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
+ """Get the default hypervisor parameters for the cluster.
- @type defaults_dict: dict
- @param defaults_dict: dictionary holding the default values
- @type custom_dict: dict
- @param custom_dict: dictionary holding customized value
- @rtype: dict
- @return: dict with the 'full' values
+ @param hypervisor: the hypervisor name
+ @param os_name: if specified, we'll also update the defaults for this OS
+ @param skip_keys: if passed, list of keys not to use
+ @return: the defaults dict
"""
- ret_dict = copy.deepcopy(defaults_dict)
- ret_dict.update(custom_dict)
+ if skip_keys is None:
+ skip_keys = []
+
+ fill_stack = [self.hvparams.get(hypervisor, {})]
+ if os_name is not None:
+ os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
+ fill_stack.append(os_hvp)
+
+ ret_dict = {}
+ for o_dict in fill_stack:
+ ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
+
return ret_dict
- def FillHV(self, instance):
- """Fill an instance's hvparams dict.
+ def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
+ """Fill a given hvparams dict with cluster defaults.
+
+ @type hv_name: string
+ @param hv_name: the hypervisor to use
+ @type os_name: string
+ @param os_name: the OS to use for overriding the hypervisor defaults
+ @type skip_globals: boolean
+ @param skip_globals: if True, the global hypervisor parameters will
+ not be filled
+ @rtype: dict
+ @return: a copy of the given hvparams with missing keys filled from
+ the cluster defaults
+
+ """
+ if skip_globals:
+ skip_keys = constants.HVC_GLOBALS
+ else:
+ skip_keys = []
+
+ def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
+ return FillDict(def_dict, hvparams, skip_keys=skip_keys)
+
+ def FillHV(self, instance, skip_globals=False):
+ """Fill an instance's hvparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
+ @type skip_globals: boolean
+ @param skip_globals: if True, the global hypervisor parameters will
+ not be filled
@rtype: dict
@return: a copy of the instance's hvparams with missing keys filled from
the cluster defaults
"""
- return self.FillDict(self.hvparams.get(instance.hypervisor, {}),
- instance.hvparams)
+ return self.SimpleFillHV(instance.hypervisor, instance.os,
+ instance.hvparams, skip_globals)
+
+ def SimpleFillBE(self, beparams):
+ """Fill a given beparams dict with cluster defaults.
+
+ @type beparams: dict
+ @param beparams: the dict to fill
+ @rtype: dict
+ @return: a copy of the passed in beparams with missing keys filled
+ from the cluster defaults
+
+ """
+ return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
def FillBE(self, instance):
- """Fill an instance's beparams dict.
+ """Fill an instance's beparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
the cluster defaults
"""
- return self.FillDict(self.beparams.get(constants.BEGR_DEFAULT, {}),
- instance.beparams)
+ return self.SimpleFillBE(instance.beparams)
+
+ def SimpleFillNIC(self, nicparams):
+ """Fill a given nicparams dict with cluster defaults.
+
+ @type nicparams: dict
+ @param nicparams: the dict to fill
+ @rtype: dict
+ @return: a copy of the passed in nicparams with missing keys filled
+ from the cluster defaults
+
+ """
+ return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
+
+ def SimpleFillOS(self, os_name, os_params):
+ """Fill an instance's osparams dict with cluster defaults.
+
+ @type os_name: string
+ @param os_name: the OS name to use
+ @type os_params: dict
+ @param os_params: the dict to fill with default values
+ @rtype: dict
+ @return: a copy of the instance's osparams with missing keys filled from
+ the cluster defaults
+
+ """
+ name_only = os_name.split("+", 1)[0]
+ # base OS
+ result = self.osparams.get(name_only, {})
+ # OS with variant
+ result = FillDict(result, self.osparams.get(os_name, {}))
+ # specified params
+ return FillDict(result, os_params)
+
+ @staticmethod
+ def SimpleFillHvState(hv_state):
+ """Fill an hv_state sub dict with cluster defaults.
+
+ """
+ return FillDict(constants.HVST_DEFAULTS, hv_state)
+
+ @staticmethod
+ def SimpleFillDiskState(disk_state):
+ """Fill an disk_state sub dict with cluster defaults.
+
+ """
+ return FillDict(constants.DS_DEFAULTS, disk_state)
+
+ def FillND(self, node, nodegroup):
+ """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
+
+ @type node: L{objects.Node}
+ @param node: A Node object to fill
+ @type nodegroup: L{objects.NodeGroup}
+ @param nodegroup: A Node object to fill
+ @return a copy of the node's ndparams with defaults filled
+
+ """
+ return self.SimpleFillND(nodegroup.FillND(node))
+
+ def SimpleFillND(self, ndparams):
+ """Fill a given ndparams dict with defaults.
+
+ @type ndparams: dict
+ @param ndparams: the dict to fill
+ @rtype: dict
+ @return: a copy of the passed in ndparams with missing keys filled
+ from the cluster defaults
+
+ """
+ return FillDict(self.ndparams, ndparams)
+
+ def SimpleFillIPolicy(self, ipolicy):
+ """ Fill instance policy dict with defaults.
+
+ @type ipolicy: dict
+ @param ipolicy: the dict to fill
+ @rtype: dict
+ @return: a copy of passed ipolicy with missing keys filled from
+ the cluster defaults
+
+ """
+ return FillIPolicy(self.ipolicy, ipolicy)
+
+
+class BlockDevStatus(ConfigObject):
+ """Config object representing the status of a block device."""
+ __slots__ = [
+ "dev_path",
+ "major",
+ "minor",
+ "sync_percent",
+ "estimated_time",
+ "is_degraded",
+ "ldisk_status",
+ ]
+
+
+class ImportExportStatus(ConfigObject):
+ """Config object representing the status of an import or export."""
+ __slots__ = [
+ "recent_output",
+ "listen_port",
+ "connected",
+ "progress_mbytes",
+ "progress_throughput",
+ "progress_eta",
+ "progress_percent",
+ "exit_status",
+ "error_message",
+ ] + _TIMESTAMPS
+
+
+class ImportExportOptions(ConfigObject):
+ """Options for import/export daemon
+
+ @ivar key_name: X509 key name (None for cluster certificate)
+ @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
+ @ivar compress: Compression method (one of L{constants.IEC_ALL})
+ @ivar magic: Used to ensure the connection goes to the right disk
+ @ivar ipv6: Whether to use IPv6
+ @ivar connect_timeout: Number of seconds for establishing connection
+
+ """
+ __slots__ = [
+ "key_name",
+ "ca_pem",
+ "compress",
+ "magic",
+ "ipv6",
+ "connect_timeout",
+ ]
+
+
+class ConfdRequest(ConfigObject):
+ """Object holding a confd request.
+
+ @ivar protocol: confd protocol version
+ @ivar type: confd query type
+ @ivar query: query request
+ @ivar rsalt: requested reply salt
+
+ """
+ __slots__ = [
+ "protocol",
+ "type",
+ "query",
+ "rsalt",
+ ]
+
+
+class ConfdReply(ConfigObject):
+ """Object holding a confd reply.
+
+ @ivar protocol: confd protocol version
+ @ivar status: reply status code (ok, error)
+ @ivar answer: confd query reply
+ @ivar serial: configuration serial number
+
+ """
+ __slots__ = [
+ "protocol",
+ "status",
+ "answer",
+ "serial",
+ ]
+
+
+class QueryFieldDefinition(ConfigObject):
+ """Object holding a query field definition.
+
+ @ivar name: Field name
+ @ivar title: Human-readable title
+ @ivar kind: Field type
+ @ivar doc: Human-readable description
+
+ """
+ __slots__ = [
+ "name",
+ "title",
+ "kind",
+ "doc",
+ ]
+
+
+class _QueryResponseBase(ConfigObject):
+ __slots__ = [
+ "fields",
+ ]
+
+ def ToDict(self):
+ """Custom function for serializing.
+
+ """
+ mydict = super(_QueryResponseBase, self).ToDict()
+ mydict["fields"] = self._ContainerToDicts(mydict["fields"])
+ return mydict
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for de-serializing.
+
+ """
+ obj = super(_QueryResponseBase, cls).FromDict(val)
+ obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
+ return obj
+
+
+class QueryRequest(ConfigObject):
+ """Object holding a query request.
+
+ """
+ __slots__ = [
+ "what",
+ "fields",
+ "qfilter",
+ ]
+
+
+class QueryResponse(_QueryResponseBase):
+ """Object holding the response to a query.
+
+ @ivar fields: List of L{QueryFieldDefinition} objects
+ @ivar data: Requested data
+
+ """
+ __slots__ = [
+ "data",
+ ]
+
+
+class QueryFieldsRequest(ConfigObject):
+ """Object holding a request for querying available fields.
+
+ """
+ __slots__ = [
+ "what",
+ "fields",
+ ]
+
+
+class QueryFieldsResponse(_QueryResponseBase):
+ """Object holding the response to a query for fields.
+
+ @ivar fields: List of L{QueryFieldDefinition} objects
+
+ """
+ __slots__ = [
+ ]
+
+
+class MigrationStatus(ConfigObject):
+ """Object holding the status of a migration.
+
+ """
+ __slots__ = [
+ "status",
+ "transferred_ram",
+ "total_ram",
+ ]
+
+
+class InstanceConsole(ConfigObject):
+ """Object describing how to access the console of an instance.
+
+ """
+ __slots__ = [
+ "instance",
+ "kind",
+ "message",
+ "host",
+ "port",
+ "user",
+ "command",
+ "display",
+ ]
+
+ def Validate(self):
+ """Validates contents of this object.
+
+ """
+ assert self.kind in constants.CONS_ALL, "Unknown console type"
+ assert self.instance, "Missing instance name"
+ assert self.message or self.kind in [constants.CONS_SSH,
+ constants.CONS_SPICE,
+ constants.CONS_VNC]
+ assert self.host or self.kind == constants.CONS_MESSAGE
+ assert self.port or self.kind in [constants.CONS_MESSAGE,
+ constants.CONS_SSH]
+ assert self.user or self.kind in [constants.CONS_MESSAGE,
+ constants.CONS_SPICE,
+ constants.CONS_VNC]
+ assert self.command or self.kind in [constants.CONS_MESSAGE,
+ constants.CONS_SPICE,
+ constants.CONS_VNC]
+ assert self.display or self.kind in [constants.CONS_MESSAGE,
+ constants.CONS_SPICE,
+ constants.CONS_SSH]
+ return True
class SerializableConfigParser(ConfigParser.SafeConfigParser):
self.write(buf)
return buf.getvalue()
- @staticmethod
- def Loads(data):
+ @classmethod
+ def Loads(cls, data):
"""Load data from a string."""
buf = StringIO(data)
- cfp = SerializableConfigParser()
+ cfp = cls()
cfp.readfp(buf)
return cfp