#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# pylint: disable=E0203,W0201,R0902
# E0203: Access to member %r before its definition, since we use
-# objects.py which doesn't explicitely initialise its members
+# objects.py which doesn't explicitly initialise its members
# W0201: Attribute '%s' defined outside __init__
import ConfigParser
import re
import copy
+import logging
import time
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import netutils
+from ganeti import outils
from ganeti import utils
from socket import AF_INET
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
- "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
+ "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
-# constants used to create InstancePolicy dictionary
-TISPECS_GROUP_TYPES = {
- constants.MIN_ISPECS: constants.VTYPE_INT,
- constants.MAX_ISPECS: constants.VTYPE_INT,
-}
-
-TISPECS_CLUSTER_TYPES = {
- constants.MIN_ISPECS: constants.VTYPE_INT,
- constants.MAX_ISPECS: constants.VTYPE_INT,
- constants.STD_ISPECS: constants.VTYPE_INT,
- }
-
def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
return ret_dict
-def FillDictOfDicts(defaults_dict, custom_dict, skip_keys=None):
- """Run FillDict for each key in dictionary.
+def FillIPolicy(default_ipolicy, custom_ipolicy):
+ """Fills an instance policy with defaults.
"""
- ret_dict = {}
- for key in defaults_dict.keys():
- ret_dict[key] = FillDict(defaults_dict[key],
- custom_dict.get(key, {}),
- skip_keys=skip_keys)
+ assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
+ ret_dict = copy.deepcopy(custom_ipolicy)
+ for key in default_ipolicy:
+ if key not in ret_dict:
+ ret_dict[key] = copy.deepcopy(default_ipolicy[key])
+ elif key == constants.ISPECS_STD:
+ ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
return ret_dict
+def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
+ """Fills the disk parameter defaults.
+
+ @see: L{FillDict} for parameters and return value
+
+ """
+ assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
+
+ return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
+ skip_keys=skip_keys))
+ for dt in constants.DISK_TEMPLATES)
+
+
def UpgradeGroupedParams(target, defaults):
"""Update all groups for the target parameter.
@type diskparams: dict
@param diskparams: disk parameters to upgrade
@rtype: dict
- @return: the upgraded disk parameters dit
+ @return: the upgraded disk parameters dict
"""
- result = dict()
- if diskparams is None:
- result = constants.DISK_DT_DEFAULTS.copy()
+ if not diskparams:
+ result = {}
else:
- # Update the disk parameter values for each disk template.
- # The code iterates over constants.DISK_TEMPLATES because new templates
- # might have been added.
- for template in constants.DISK_TEMPLATES:
- if template not in diskparams:
- result[template] = constants.DISK_DT_DEFAULTS[template].copy()
- else:
- result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
- diskparams[template])
+ result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
return result
-def MakeEmptyIPolicy():
- """Create empty IPolicy dictionary.
-
- """
- return dict([
- (constants.MIN_ISPECS, dict()),
- (constants.MAX_ISPECS, dict()),
- (constants.STD_ISPECS, dict()),
- ])
-
-
-def CreateIPolicyFromOpts(ispecs_mem_size=None,
- ispecs_cpu_count=None,
- ispecs_disk_count=None,
- ispecs_disk_size=None,
- ispecs_nic_count=None,
- group_ipolicy=False,
- allowed_values=None):
- """Creation of instane policy based on command line options.
+def UpgradeNDParams(ndparams):
+ """Upgrade ndparams structure.
+ @type ndparams: dict
+ @param ndparams: disk parameters to upgrade
+ @rtype: dict
+ @return: the upgraded node parameters dict
"""
- # prepare ipolicy dict
- ipolicy_transposed = {
- constants.MEM_SIZE_SPEC: ispecs_mem_size,
- constants.CPU_COUNT_SPEC: ispecs_cpu_count,
- constants.DISK_COUNT_SPEC: ispecs_disk_count,
- constants.DISK_SIZE_SPEC: ispecs_disk_size,
- constants.NIC_COUNT_SPEC: ispecs_nic_count,
- }
+ if ndparams is None:
+ ndparams = {}
- # first, check that the values given are correct
- if group_ipolicy:
- forced_type = TISPECS_GROUP_TYPES
- else:
- forced_type = TISPECS_CLUSTER_TYPES
+ if (constants.ND_OOB_PROGRAM in ndparams and
+ ndparams[constants.ND_OOB_PROGRAM] is None):
+ # will be reset by the line below
+ del ndparams[constants.ND_OOB_PROGRAM]
+ return FillDict(constants.NDC_DEFAULTS, ndparams)
- for specs in ipolicy_transposed.values():
- utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
- # then transpose
- ipolicy_out = MakeEmptyIPolicy()
- for name, specs in ipolicy_transposed.iteritems():
- assert name in constants.ISPECS_PARAMETERS
- for key, val in specs.items(): # {min: .. ,max: .., std: ..}
- ipolicy_out[key][name] = val
+def MakeEmptyIPolicy():
+ """Create empty IPolicy dictionary.
- return ipolicy_out
+ """
+ return {}
-class ConfigObject(object):
+class ConfigObject(outils.ValidatedSlots):
"""A generic config object.
It has the following properties:
"""
__slots__ = []
- def __init__(self, **kwargs):
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
def __getattr__(self, name):
- if name not in self._all_slots():
+ if name not in self.GetAllSlots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
def __setstate__(self, state):
- slots = self._all_slots()
+ slots = self.GetAllSlots()
for name in state:
if name in slots:
setattr(self, name, state[name])
- @classmethod
- def _all_slots(cls):
- """Compute the list of all declared slots for a class.
+ def Validate(self):
+ """Validates the slots.
"""
- slots = []
- for parent in cls.__mro__:
- slots.extend(getattr(parent, "__slots__", []))
- return slots
def ToDict(self):
"""Convert to a dict holding only standard python types.
"""
result = {}
- for name in self._all_slots():
+ for name in self.GetAllSlots():
value = getattr(self, name, None)
if value is not None:
result[name] = value
obj = cls(**val_str) # pylint: disable=W0142
return obj
- @staticmethod
- def _ContainerToDicts(container):
- """Convert the elements of a container to standard python types.
-
- This method converts a container with elements derived from
- ConfigData to standard python types. If the container is a dict,
- we don't touch the keys, only the values.
-
- """
- if isinstance(container, dict):
- ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
- elif isinstance(container, (list, tuple, set, frozenset)):
- ret = [elem.ToDict() for elem in container]
- else:
- raise TypeError("Invalid type %s passed to _ContainerToDicts" %
- type(container))
- return ret
-
- @staticmethod
- def _ContainerFromDicts(source, c_type, e_type):
- """Convert a container from standard python types.
-
- This method converts a container with standard python types to
- ConfigData objects. If the container is a dict, we don't touch the
- keys, only the values.
-
- """
- if not isinstance(c_type, type):
- raise TypeError("Container type %s passed to _ContainerFromDicts is"
- " not a type" % type(c_type))
- if source is None:
- source = c_type()
- if c_type is dict:
- ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
- elif c_type in (list, tuple, set, frozenset):
- ret = c_type([e_type.FromDict(elem) for elem in source])
- else:
- raise TypeError("Invalid container type %s passed to"
- " _ContainerFromDicts" % c_type)
- return ret
-
def Copy(self):
"""Makes a deep copy of the current object and its children.
"ip",
"netmask",
"netdev",
- "ip_family"
+ "ip_family",
]
"nodes",
"nodegroups",
"instances",
+ "networks",
"serial_no",
] + _TIMESTAMPS
"""
mydict = super(ConfigData, self).ToDict()
mydict["cluster"] = mydict["cluster"].ToDict()
- for key in "nodes", "instances", "nodegroups":
- mydict[key] = self._ContainerToDicts(mydict[key])
+ for key in "nodes", "instances", "nodegroups", "networks":
+ mydict[key] = outils.ContainerToDicts(mydict[key])
return mydict
"""
obj = super(ConfigData, cls).FromDict(val)
obj.cluster = Cluster.FromDict(obj.cluster)
- obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
- obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
- obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
+ obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
+ obj.instances = \
+ outils.ContainerFromDicts(obj.instances, dict, Instance)
+ obj.nodegroups = \
+ outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
+ obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
return obj
def HasAnyDiskOfType(self, dev_type):
# gives a good approximation.
if self.HasAnyDiskOfType(constants.LD_DRBD8):
self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
+ if self.networks is None:
+ self.networks = {}
+ for network in self.networks.values():
+ network.UpgradeConfig()
+ self._UpgradeEnabledDiskTemplates()
+
+ def _UpgradeEnabledDiskTemplates(self):
+ """Upgrade the cluster's enabled disk templates by inspecting the currently
+ enabled and/or used disk templates.
+
+ """
+ # enabled_disk_templates in the cluster config were introduced in 2.8.
+ # Remove this code once upgrading from earlier versions is deprecated.
+ if not self.cluster.enabled_disk_templates:
+ template_set = \
+ set([inst.disk_template for inst in self.instances.values()])
+ # Add drbd and plain, if lvm is enabled (by specifying a volume group)
+ if self.cluster.volume_group_name:
+ template_set.add(constants.DT_DRBD8)
+ template_set.add(constants.DT_PLAIN)
+ # FIXME: Adapt this when dis/enabling at configure time is removed.
+ # Enable 'file' and 'sharedfile', if they are enabled, even though they
+ # might currently not be used.
+ if constants.ENABLE_FILE_STORAGE:
+ template_set.add(constants.DT_FILE)
+ if constants.ENABLE_SHARED_FILE_STORAGE:
+ template_set.add(constants.DT_SHARED_FILE)
+ # Set enabled_disk_templates to the inferred disk templates. Order them
+ # according to a preference list that is based on Ganeti's history of
+ # supported disk templates.
+ self.cluster.enabled_disk_templates = []
+ for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
+ if preferred_template in template_set:
+ self.cluster.enabled_disk_templates.append(preferred_template)
+ template_set.remove(preferred_template)
+ self.cluster.enabled_disk_templates.extend(list(template_set))
class NIC(ConfigObject):
"""Config object representing a network card."""
- __slots__ = ["mac", "ip", "nicparams"]
+ __slots__ = ["name", "mac", "ip", "network",
+ "nicparams", "netinfo", "pci"] + _UUID
@classmethod
def CheckParameterSyntax(cls, nicparams):
@raise errors.ConfigurationError: when a parameter is not valid
"""
- if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
- nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
- err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
- raise errors.ConfigurationError(err)
+ mode = nicparams[constants.NIC_MODE]
+ if (mode not in constants.NIC_VALID_MODES and
+ mode != constants.VALUE_AUTO):
+ raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
- if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
+ if (mode == constants.NIC_MODE_BRIDGED and
not nicparams[constants.NIC_LINK]):
- err = "Missing bridged nic link"
- raise errors.ConfigurationError(err)
+ raise errors.ConfigurationError("Missing bridged NIC link")
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for NICs.
+
+ Remove deprecated idx. Add dummy UUID if not found.
+ Needed for old runtime files.
+
+ """
+ if "idx" in val:
+ del val["idx"]
+ obj = super(NIC, cls).FromDict(val)
+ return obj
class Disk(ConfigObject):
"""Config object representing a block device."""
- __slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size", "mode", "params"]
+ __slots__ = ["name", "dev_type", "logical_id", "physical_id",
+ "children", "iv_name", "size", "mode", "params", "pci"] + _UUID
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
elif self.dev_type == constants.LD_BLOCKDEV:
return self.logical_id[1]
+ elif self.dev_type == constants.LD_RBD:
+ return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
def ChildrenNeeded(self):
"""
if self.dev_type in [constants.LD_LV, constants.LD_FILE,
- constants.LD_BLOCKDEV]:
+ constants.LD_BLOCKDEV, constants.LD_RBD,
+ constants.LD_EXT]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
actual algorithms from bdev.
"""
- if self.dev_type in (constants.LD_LV, constants.LD_FILE):
+ if self.dev_type in (constants.LD_LV, constants.LD_FILE,
+ constants.LD_RBD, constants.LD_EXT):
self.size += amount
elif self.dev_type == constants.LD_DRBD8:
if self.children:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
+ def Update(self, size=None, mode=None):
+ """Apply changes to size and mode.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].Update(size=size, mode=mode)
+ else:
+ assert not self.children
+
+ if size is not None:
+ self.size = size
+ if mode is not None:
+ self.mode = mode
+
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
for attr in ("children",):
alist = bo.get(attr, None)
if alist:
- bo[attr] = self._ContainerToDicts(alist)
+ bo[attr] = outils.ContainerToDicts(alist)
return bo
@classmethod
"""Custom function for Disks
"""
+ if "idx" in val:
+ del val["idx"]
obj = super(Disk, cls).FromDict(val)
if obj.children:
- obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
+ obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
if obj.logical_id and isinstance(obj.logical_id, list):
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
for child in self.children:
child.UpgradeConfig()
- if not self.params:
- self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
- else:
- self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
- self.params)
+ # FIXME: Make this configurable in Ganeti 2.7
+ # Params should be an empty dict that gets filled any time needed
+ # In case of ext template we allow arbitrary params that should not
+ # be overrided during a config reload/upgrade.
+ if not self.params or not isinstance(self.params, dict):
+ self.params = {}
+
# add here config upgrade for this disk
+ # If the file driver is empty, fill it up with the default value
+ if self.dev_type == constants.LD_FILE and self.physical_id[0] is None:
+ self.physical_id[0] = constants.FD_DEFAULT
+
+ @staticmethod
+ def ComputeLDParams(disk_template, disk_params):
+ """Computes Logical Disk parameters from Disk Template parameters.
+
+ @type disk_template: string
+ @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
+ @type disk_params: dict
+ @param disk_params: disk template parameters;
+ dict(template_name -> parameters
+ @rtype: list(dict)
+ @return: a list of dicts, one for each node of the disk hierarchy. Each dict
+ contains the LD parameters of the node. The tree is flattened in-order.
+
+ """
+ if disk_template not in constants.DISK_TEMPLATES:
+ raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
+
+ assert disk_template in disk_params
+
+ result = list()
+ dt_params = disk_params[disk_template]
+ if disk_template == constants.DT_DRBD8:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
+ constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
+ constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
+ constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
+ constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
+ constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
+ constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
+ constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
+ constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
+ constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
+ constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
+ constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
+ constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
+ }))
+
+ # data LV
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
+ }))
+
+ # metadata LV
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
+ }))
+
+ elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
+ result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
+
+ elif disk_template == constants.DT_PLAIN:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
+ constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
+ }))
+
+ elif disk_template == constants.DT_BLOCK:
+ result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
+
+ elif disk_template == constants.DT_RBD:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
+ constants.LDP_POOL: dt_params[constants.RBD_POOL],
+ }))
+
+ elif disk_template == constants.DT_EXT:
+ result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
+
+ return result
+
class InstancePolicy(ConfigObject):
- """Config object representing instance policy limits dictionary."""
- __slots__ = ["min", "max", "std"]
+ """Config object representing instance policy limits dictionary.
+ Note that this object is not actually used in the config, it's just
+ used as a placeholder for a few functions.
+
+ """
@classmethod
- def CheckParameterSyntax(cls, ipolicy):
+ def CheckParameterSyntax(cls, ipolicy, check_std):
""" Check the instance policy for validity.
- """
- for param in constants.ISPECS_PARAMETERS:
- InstancePolicy.CheckISpecSyntax(ipolicy, param)
+ @type ipolicy: dict
+ @param ipolicy: dictionary with min/max/std specs and policies
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @raise errors.ConfigurationError: when the policy is not legal
+
+ """
+ InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
+ if constants.IPOLICY_DTS in ipolicy:
+ InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
+ for key in constants.IPOLICY_PARAMETERS:
+ if key in ipolicy:
+ InstancePolicy.CheckParameter(key, ipolicy[key])
+ wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
+ if wrong_keys:
+ raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
+ utils.CommaJoin(wrong_keys))
@classmethod
- def CheckISpecSyntax(cls, ipolicy, name):
- """Check the instance policy for validity on a given key.
+ def _CheckIncompleteSpec(cls, spec, keyname):
+ missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
+ if missing_params:
+ msg = ("Missing instance specs parameters for %s: %s" %
+ (keyname, utils.CommaJoin(missing_params)))
+ raise errors.ConfigurationError(msg)
- We check if the instance policy makes sense for a given key, that is
- if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
+ @classmethod
+ def CheckISpecSyntax(cls, ipolicy, check_std):
+ """Check the instance policy specs for validity.
@type ipolicy: dict
- @param ipolicy: dictionary with min, max, std specs
+ @param ipolicy: dictionary with min/max/std specs
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @raise errors.ConfigurationError: when specs are not valid
+
+ """
+ if constants.ISPECS_MINMAX not in ipolicy:
+ # Nothing to check
+ return
+
+ if check_std and constants.ISPECS_STD not in ipolicy:
+ msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
+ raise errors.ConfigurationError(msg)
+ stdspec = ipolicy.get(constants.ISPECS_STD)
+ if check_std:
+ InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
+
+ if not ipolicy[constants.ISPECS_MINMAX]:
+ raise errors.ConfigurationError("Empty minmax specifications")
+ std_is_good = False
+ for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
+ missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
+ if missing:
+ msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
+ raise errors.ConfigurationError(msg)
+ for (key, spec) in minmaxspecs.items():
+ InstancePolicy._CheckIncompleteSpec(spec, key)
+
+ spec_std_ok = True
+ for param in constants.ISPECS_PARAMETERS:
+ par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
+ param, check_std)
+ spec_std_ok = spec_std_ok and par_std_ok
+ std_is_good = std_is_good or spec_std_ok
+ if not std_is_good:
+ raise errors.ConfigurationError("Invalid std specifications")
+
+ @classmethod
+ def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
+ """Check the instance policy specs for validity on a given key.
+
+ We check if the instance specs makes sense for a given key, that is
+ if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
+
+ @type minmaxspecs: dict
+ @param minmaxspecs: dictionary with min and max instance spec
+ @type stdspec: dict
+ @param stdspec: dictionary with standard instance spec
@type name: string
@param name: what are the limits for
- @raise errors.ConfigureError: when specs for given name are not valid
-
- """
- min_v = ipolicy[constants.MIN_ISPECS].get(name, 0)
- std_v = ipolicy[constants.STD_ISPECS].get(name, min_v)
- max_v = ipolicy[constants.MAX_ISPECS].get(name, std_v)
- err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
- (name,
- ipolicy[constants.MIN_ISPECS].get(name, "-"),
- ipolicy[constants.MAX_ISPECS].get(name, "-"),
- ipolicy[constants.STD_ISPECS].get(name, "-")))
- if min_v > std_v or std_v > max_v:
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @rtype: bool
+ @return: C{True} when specs are valid, C{False} when standard spec for the
+ given name is not valid
+ @raise errors.ConfigurationError: when min/max specs for the given name
+ are not valid
+
+ """
+ minspec = minmaxspecs[constants.ISPECS_MIN]
+ maxspec = minmaxspecs[constants.ISPECS_MAX]
+ min_v = minspec[name]
+ max_v = maxspec[name]
+
+ if min_v > max_v:
+ err = ("Invalid specification of min/max values for %s: %s/%s" %
+ (name, min_v, max_v))
raise errors.ConfigurationError(err)
+ elif check_std:
+ std_v = stdspec.get(name, min_v)
+ return std_v >= min_v and std_v <= max_v
+ else:
+ return True
+
+ @classmethod
+ def CheckDiskTemplates(cls, disk_templates):
+ """Checks the disk templates for validity.
+
+ """
+ if not disk_templates:
+ raise errors.ConfigurationError("Instance policy must contain" +
+ " at least one disk template")
+ wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
+ if wrong:
+ raise errors.ConfigurationError("Invalid disk template(s) %s" %
+ utils.CommaJoin(wrong))
+
+ @classmethod
+ def CheckParameter(cls, key, value):
+ """Checks a parameter.
+
+ Currently we expect all parameters to be float values.
+
+ """
+ try:
+ float(value)
+ except (TypeError, ValueError), err:
+ raise errors.ConfigurationError("Invalid value for key" " '%s':"
+ " '%s', error: %s" % (key, value, err))
class Instance(TaggableObject):
"nics",
"disks",
"disk_template",
+ "disks_active",
"network_port",
"serial_no",
] + _TIMESTAMPS + _UUID
return tuple(all_nodes)
secondary_nodes = property(_ComputeSecondaryNodes, None, None,
- "List of secondary nodes")
+ "List of names of secondary nodes")
def _ComputeAllNodes(self):
"""Compute the list of all nodes.
return tuple(all_nodes)
all_nodes = property(_ComputeAllNodes, None, None,
- "List of all nodes of the instance")
+ "List of names of all the nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
GetVolumeList()
"""
- if node == None:
+ if node is None:
node = self.primary_node
if lvmap is None:
for attr in "nics", "disks":
alist = bo.get(attr, None)
if alist:
- nlist = self._ContainerToDicts(alist)
+ nlist = outils.ContainerToDicts(alist)
else:
nlist = []
bo[attr] = nlist
if "admin_up" in val:
del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
- obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
- obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
+ obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
+ obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
return obj
def UpgradeConfig(self):
if self.osparams is None:
self.osparams = {}
UpgradeBeParams(self.beparams)
+ if self.disks_active is None:
+ self.disks_active = self.admin_state == constants.ADMINST_UP
class OS(ConfigObject):
return cls.SplitNameVariant(name)[1]
+class ExtStorage(ConfigObject):
+ """Config object representing an External Storage Provider.
+
+ """
+ __slots__ = [
+ "name",
+ "path",
+ "create_script",
+ "remove_script",
+ "grow_script",
+ "attach_script",
+ "detach_script",
+ "setinfo_script",
+ "verify_script",
+ "supported_parameters",
+ ]
+
+
class NodeHvState(ConfigObject):
"""Hypvervisor state on a node.
if self.ndparams is None:
self.ndparams = {}
+ # And remove any global parameter
+ for key in constants.NDC_GLOBALS:
+ if key in self.ndparams:
+ logging.warning("Ignoring %s node parameter for node %s",
+ key, self.name)
+ del self.ndparams[key]
if self.powered is None:
self.powered = True
hv_state = data.get("hv_state", None)
if hv_state is not None:
- data["hv_state"] = self._ContainerToDicts(hv_state)
+ data["hv_state"] = outils.ContainerToDicts(hv_state)
disk_state = data.get("disk_state", None)
if disk_state is not None:
data["disk_state"] = \
- dict((key, self._ContainerToDicts(value))
+ dict((key, outils.ContainerToDicts(value))
for (key, value) in disk_state.items())
return data
obj = super(Node, cls).FromDict(val)
if obj.hv_state is not None:
- obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
+ obj.hv_state = \
+ outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
if obj.disk_state is not None:
obj.disk_state = \
- dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
+ dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
for (key, value) in obj.disk_state.items())
return obj
"members",
"ndparams",
"diskparams",
+ "ipolicy",
"serial_no",
"hv_state_static",
"disk_state_static",
"alloc_policy",
+ "networks",
] + _TIMESTAMPS + _UUID
def ToDict(self):
if self.alloc_policy is None:
self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
- # We only update mtime, and not ctime, since we would not be able to provide
- # a correct value for creation time.
+ # We only update mtime, and not ctime, since we would not be able
+ # to provide a correct value for creation time.
if self.mtime is None:
self.mtime = time.time()
- self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.diskparams is None:
+ self.diskparams = {}
+ if self.ipolicy is None:
+ self.ipolicy = MakeEmptyIPolicy()
+
+ if self.networks is None:
+ self.networks = {}
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
__slots__ = [
"serial_no",
"rsahostkeypub",
+ "dsahostkeypub",
"highest_used_port",
"tcpudp_port_pool",
"mac_prefix",
"prealloc_wipe_disks",
"hv_state_static",
"disk_state_static",
+ "enabled_disk_templates",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
if self.osparams is None:
self.osparams = {}
- if self.ndparams is None:
- self.ndparams = constants.NDC_DEFAULTS
+ self.ndparams = UpgradeNDParams(self.ndparams)
self.beparams = UpgradeGroupedParams(self.beparams,
constants.BEC_DEFAULTS)
# code can be removed once upgrading straight from 2.0 is deprecated.
if self.default_hypervisor is not None:
self.enabled_hypervisors = ([self.default_hypervisor] +
- [hvname for hvname in self.enabled_hypervisors
- if hvname != self.default_hypervisor])
+ [hvname for hvname in self.enabled_hypervisors
+ if hvname != self.default_hypervisor])
self.default_hypervisor = None
# maintain_node_health added after 2.1.1
if self.use_external_mip_script is None:
self.use_external_mip_script = False
- self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.diskparams:
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+ else:
+ self.diskparams = constants.DISK_DT_DEFAULTS.copy()
# instance policy added before 2.6
if self.ipolicy is None:
- self.ipolicy = MakeEmptyIPolicy()
+ self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
+ else:
+ # we can either make sure to upgrade the ipolicy always, or only
+ # do it in some corner cases (e.g. missing keys); note that this
+ # will break any removal of keys from the ipolicy dict
+ wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
+ if wrongkeys:
+ # These keys would be silently removed by FillIPolicy()
+ msg = ("Cluster instance policy contains spurious keys: %s" %
+ utils.CommaJoin(wrongkeys))
+ raise errors.ConfigurationError(msg)
+ self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
@property
def primary_hypervisor(self):
"""
mydict = super(Cluster, self).ToDict()
- mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
+
+ if self.tcpudp_port_pool is None:
+ tcpudp_port_pool = []
+ else:
+ tcpudp_port_pool = list(self.tcpudp_port_pool)
+
+ mydict["tcpudp_port_pool"] = tcpudp_port_pool
+
return mydict
@classmethod
"""
obj = super(Cluster, cls).FromDict(val)
- if not isinstance(obj.tcpudp_port_pool, set):
+
+ if obj.tcpudp_port_pool is None:
+ obj.tcpudp_port_pool = set()
+ elif not isinstance(obj.tcpudp_port_pool, set):
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
+
return obj
+ def SimpleFillDP(self, diskparams):
+ """Fill a given diskparams dict with cluster defaults.
+
+ @param diskparams: The diskparams
+ @return: The defaults dict
+
+ """
+ return FillDiskParams(self.diskparams, diskparams)
+
def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
"""Get the default hypervisor parameters for the cluster.
the cluster defaults
"""
- return FillDictOfDicts(self.ipolicy, ipolicy)
+ return FillIPolicy(self.ipolicy, ipolicy)
class BlockDevStatus(ConfigObject):
"""
mydict = super(_QueryResponseBase, self).ToDict()
- mydict["fields"] = self._ContainerToDicts(mydict["fields"])
+ mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
return mydict
@classmethod
"""
obj = super(_QueryResponseBase, cls).FromDict(val)
- obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
+ obj.fields = \
+ outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
return obj
-class QueryRequest(ConfigObject):
- """Object holding a query request.
-
- """
- __slots__ = [
- "what",
- "fields",
- "qfilter",
- ]
-
-
class QueryResponse(_QueryResponseBase):
"""Object holding the response to a query.
@ivar fields: List of L{QueryFieldDefinition} objects
"""
- __slots__ = [
- ]
+ __slots__ = []
class MigrationStatus(ConfigObject):
return True
+class Network(TaggableObject):
+ """Object representing a network definition for ganeti.
+
+ """
+ __slots__ = [
+ "name",
+ "serial_no",
+ "mac_prefix",
+ "network",
+ "network6",
+ "gateway",
+ "gateway6",
+ "reservations",
+ "ext_reservations",
+ ] + _TIMESTAMPS + _UUID
+
+ def HooksDict(self, prefix=""):
+ """Export a dictionary used by hooks with a network's information.
+
+ @type prefix: String
+ @param prefix: Prefix to prepend to the dict entries
+
+ """
+ result = {
+ "%sNETWORK_NAME" % prefix: self.name,
+ "%sNETWORK_UUID" % prefix: self.uuid,
+ "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
+ }
+ if self.network:
+ result["%sNETWORK_SUBNET" % prefix] = self.network
+ if self.gateway:
+ result["%sNETWORK_GATEWAY" % prefix] = self.gateway
+ if self.network6:
+ result["%sNETWORK_SUBNET6" % prefix] = self.network6
+ if self.gateway6:
+ result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
+ if self.mac_prefix:
+ result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
+
+ return result
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for networks.
+
+ Remove deprecated network_type and family.
+
+ """
+ if "network_type" in val:
+ del val["network_type"]
+ if "family" in val:
+ del val["family"]
+ obj = super(Network, cls).FromDict(val)
+ return obj
+
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.
cfp = cls()
cfp.readfp(buf)
return cfp
+
+
+class LvmPvInfo(ConfigObject):
+ """Information about an LVM physical volume (PV).
+
+ @type name: string
+ @ivar name: name of the PV
+ @type vg_name: string
+ @ivar vg_name: name of the volume group containing the PV
+ @type size: float
+ @ivar size: size of the PV in MiB
+ @type free: float
+ @ivar free: free space in the PV, in MiB
+ @type attributes: string
+ @ivar attributes: PV attributes
+ @type lv_list: list of strings
+ @ivar lv_list: names of the LVs hosted on the PV
+ """
+ __slots__ = [
+ "name",
+ "vg_name",
+ "size",
+ "free",
+ "attributes",
+ "lv_list"
+ ]
+
+ def IsEmpty(self):
+ """Is this PV empty?
+
+ """
+ return self.size <= (self.free + 1)
+
+ def IsAllocatable(self):
+ """Is this PV allocatable?
+
+ """
+ return ("a" in self.attributes)