#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# pylint: disable=E0203,W0201,R0902
# E0203: Access to member %r before its definition, since we use
-# objects.py which doesn't explicitely initialise its members
+# objects.py which doesn't explicitly initialise its members
# W0201: Attribute '%s' defined outside __init__
import ConfigParser
import re
import copy
+import logging
import time
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import netutils
+from ganeti import outils
from ganeti import utils
from socket import AF_INET
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
- "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
+ "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
-# constants used to create InstancePolicy dictionary
-TISPECS_GROUP_TYPES = {
- constants.ISPECS_MIN: constants.VTYPE_INT,
- constants.ISPECS_MAX: constants.VTYPE_INT,
- }
-
-TISPECS_CLUSTER_TYPES = {
- constants.ISPECS_MIN: constants.VTYPE_INT,
- constants.ISPECS_MAX: constants.VTYPE_INT,
- constants.ISPECS_STD: constants.VTYPE_INT,
- }
-
def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
return ret_dict
-def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
+def FillIPolicy(default_ipolicy, custom_ipolicy):
"""Fills an instance policy with defaults.
"""
assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
- ret_dict = {}
- for key in constants.IPOLICY_ISPECS:
- ret_dict[key] = FillDict(default_ipolicy[key],
- custom_ipolicy.get(key, {}),
- skip_keys=skip_keys)
- # list items
- for key in [constants.IPOLICY_DTS]:
- ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
- # other items which we know we can directly copy (immutables)
- for key in constants.IPOLICY_PARAMETERS:
- ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
-
+ ret_dict = copy.deepcopy(custom_ipolicy)
+ for key in default_ipolicy:
+ if key not in ret_dict:
+ ret_dict[key] = copy.deepcopy(default_ipolicy[key])
+ elif key == constants.ISPECS_STD:
+ ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
return ret_dict
+def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
+ """Fills the disk parameter defaults.
+
+ @see: L{FillDict} for parameters and return value
+
+ """
+ assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
+
+ return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
+ skip_keys=skip_keys))
+ for dt in constants.DISK_TEMPLATES)
+
+
def UpgradeGroupedParams(target, defaults):
"""Update all groups for the target parameter.
@return: the upgraded disk parameters dict
"""
- if diskparams is None:
- result = constants.DISK_DT_DEFAULTS.copy()
+ if not diskparams:
+ result = {}
else:
- # Update the disk parameter values for each disk template.
- # The code iterates over constants.DISK_TEMPLATES because new templates
- # might have been added.
- result = dict((dt, FillDict(constants.DISK_DT_DEFAULTS[dt],
- diskparams.get(dt, {})))
- for dt in constants.DISK_TEMPLATES)
+ result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
return result
if ndparams is None:
ndparams = {}
+ if (constants.ND_OOB_PROGRAM in ndparams and
+ ndparams[constants.ND_OOB_PROGRAM] is None):
+ # will be reset by the line below
+ del ndparams[constants.ND_OOB_PROGRAM]
return FillDict(constants.NDC_DEFAULTS, ndparams)
"""Create empty IPolicy dictionary.
"""
- return dict([
- (constants.ISPECS_MIN, {}),
- (constants.ISPECS_MAX, {}),
- (constants.ISPECS_STD, {}),
- ])
-
-
-def CreateIPolicyFromOpts(ispecs_mem_size=None,
- ispecs_cpu_count=None,
- ispecs_disk_count=None,
- ispecs_disk_size=None,
- ispecs_nic_count=None,
- ipolicy_disk_templates=None,
- ipolicy_vcpu_ratio=None,
- group_ipolicy=False,
- allowed_values=None,
- fill_all=False):
- """Creation of instance policy based on command line options.
-
- @param fill_all: whether for cluster policies we should ensure that
- all values are filled
-
-
- """
- # prepare ipolicy dict
- ipolicy_transposed = {
- constants.ISPEC_MEM_SIZE: ispecs_mem_size,
- constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
- constants.ISPEC_DISK_COUNT: ispecs_disk_count,
- constants.ISPEC_DISK_SIZE: ispecs_disk_size,
- constants.ISPEC_NIC_COUNT: ispecs_nic_count,
- }
-
- # first, check that the values given are correct
- if group_ipolicy:
- forced_type = TISPECS_GROUP_TYPES
- else:
- forced_type = TISPECS_CLUSTER_TYPES
-
- for specs in ipolicy_transposed.values():
- utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
-
- # then transpose
- ipolicy_out = MakeEmptyIPolicy()
- for name, specs in ipolicy_transposed.iteritems():
- assert name in constants.ISPECS_PARAMETERS
- for key, val in specs.items(): # {min: .. ,max: .., std: ..}
- ipolicy_out[key][name] = val
-
- # no filldict for non-dicts
- if not group_ipolicy and fill_all:
- if ipolicy_disk_templates is None:
- ipolicy_disk_templates = constants.DISK_TEMPLATES
- if ipolicy_vcpu_ratio is None:
- ipolicy_vcpu_ratio = \
- constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
- if ipolicy_disk_templates is not None:
- ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
- if ipolicy_vcpu_ratio is not None:
- ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
-
- assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
+ return {}
- return ipolicy_out
-
-class ConfigObject(object):
+class ConfigObject(outils.ValidatedSlots):
"""A generic config object.
It has the following properties:
"""
__slots__ = []
- def __init__(self, **kwargs):
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
def __getattr__(self, name):
- if name not in self._all_slots():
+ if name not in self.GetAllSlots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
def __setstate__(self, state):
- slots = self._all_slots()
+ slots = self.GetAllSlots()
for name in state:
if name in slots:
setattr(self, name, state[name])
- @classmethod
- def _all_slots(cls):
- """Compute the list of all declared slots for a class.
+ def Validate(self):
+ """Validates the slots.
"""
- slots = []
- for parent in cls.__mro__:
- slots.extend(getattr(parent, "__slots__", []))
- return slots
-
- #: Public getter for the defined slots
- GetAllSlots = _all_slots
def ToDict(self):
"""Convert to a dict holding only standard python types.
"""
result = {}
- for name in self._all_slots():
+ for name in self.GetAllSlots():
value = getattr(self, name, None)
if value is not None:
result[name] = value
obj = cls(**val_str) # pylint: disable=W0142
return obj
- @staticmethod
- def _ContainerToDicts(container):
- """Convert the elements of a container to standard python types.
-
- This method converts a container with elements derived from
- ConfigData to standard python types. If the container is a dict,
- we don't touch the keys, only the values.
-
- """
- if isinstance(container, dict):
- ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
- elif isinstance(container, (list, tuple, set, frozenset)):
- ret = [elem.ToDict() for elem in container]
- else:
- raise TypeError("Invalid type %s passed to _ContainerToDicts" %
- type(container))
- return ret
-
- @staticmethod
- def _ContainerFromDicts(source, c_type, e_type):
- """Convert a container from standard python types.
-
- This method converts a container with standard python types to
- ConfigData objects. If the container is a dict, we don't touch the
- keys, only the values.
-
- """
- if not isinstance(c_type, type):
- raise TypeError("Container type %s passed to _ContainerFromDicts is"
- " not a type" % type(c_type))
- if source is None:
- source = c_type()
- if c_type is dict:
- ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
- elif c_type in (list, tuple, set, frozenset):
- ret = c_type([e_type.FromDict(elem) for elem in source])
- else:
- raise TypeError("Invalid container type %s passed to"
- " _ContainerFromDicts" % c_type)
- return ret
-
def Copy(self):
"""Makes a deep copy of the current object and its children.
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
+ def __eq__(self, other):
+ """Implement __eq__ for ConfigObjects."""
+ return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
+
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
__slots__ = ["tags"]
- VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
+ VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
@classmethod
def ValidateTag(cls, tag):
class MasterNetworkParameters(ConfigObject):
"""Network configuration parameters for the master
- @ivar name: master name
+ @ivar uuid: master nodes UUID
@ivar ip: master IP
@ivar netmask: master netmask
@ivar netdev: master network device
"""
__slots__ = [
- "name",
+ "uuid",
"ip",
"netmask",
"netdev",
- "ip_family"
+ "ip_family",
]
"nodes",
"nodegroups",
"instances",
+ "networks",
"serial_no",
] + _TIMESTAMPS
"""
mydict = super(ConfigData, self).ToDict()
mydict["cluster"] = mydict["cluster"].ToDict()
- for key in "nodes", "instances", "nodegroups":
- mydict[key] = self._ContainerToDicts(mydict[key])
+ for key in "nodes", "instances", "nodegroups", "networks":
+ mydict[key] = outils.ContainerToDicts(mydict[key])
return mydict
"""
obj = super(ConfigData, cls).FromDict(val)
obj.cluster = Cluster.FromDict(obj.cluster)
- obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
- obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
- obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
+ obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
+ obj.instances = \
+ outils.ContainerFromDicts(obj.instances, dict, Instance)
+ obj.nodegroups = \
+ outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
+ obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
return obj
def HasAnyDiskOfType(self, dev_type):
"""Check if in there is at disk of the given type in the configuration.
- @type dev_type: L{constants.LDS_BLOCK}
+ @type dev_type: L{constants.DTS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a disk of the given type was found or not
node.UpgradeConfig()
for instance in self.instances.values():
instance.UpgradeConfig()
+ self._UpgradeEnabledDiskTemplates()
if self.nodegroups is None:
self.nodegroups = {}
for nodegroup in self.nodegroups.values():
nodegroup.UpgradeConfig()
+ InstancePolicy.UpgradeDiskTemplates(
+ nodegroup.ipolicy, self.cluster.enabled_disk_templates)
if self.cluster.drbd_usermode_helper is None:
- # To decide if we set an helper let's check if at least one instance has
- # a DRBD disk. This does not cover all the possible scenarios but it
- # gives a good approximation.
- if self.HasAnyDiskOfType(constants.LD_DRBD8):
+ if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
+ if self.networks is None:
+ self.networks = {}
+ for network in self.networks.values():
+ network.UpgradeConfig()
+
+ def _UpgradeEnabledDiskTemplates(self):
+ """Upgrade the cluster's enabled disk templates by inspecting the currently
+ enabled and/or used disk templates.
+
+ """
+ if not self.cluster.enabled_disk_templates:
+ template_set = \
+ set([inst.disk_template for inst in self.instances.values()])
+ # Add drbd and plain, if lvm is enabled (by specifying a volume group)
+ if self.cluster.volume_group_name:
+ template_set.add(constants.DT_DRBD8)
+ template_set.add(constants.DT_PLAIN)
+ # Set enabled_disk_templates to the inferred disk templates. Order them
+ # according to a preference list that is based on Ganeti's history of
+ # supported disk templates.
+ self.cluster.enabled_disk_templates = []
+ for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
+ if preferred_template in template_set:
+ self.cluster.enabled_disk_templates.append(preferred_template)
+ template_set.remove(preferred_template)
+ self.cluster.enabled_disk_templates.extend(list(template_set))
+ InstancePolicy.UpgradeDiskTemplates(
+ self.cluster.ipolicy, self.cluster.enabled_disk_templates)
class NIC(ConfigObject):
"""Config object representing a network card."""
- __slots__ = ["mac", "ip", "nicparams"]
+ __slots__ = ["name", "mac", "ip", "network",
+ "nicparams", "netinfo", "pci"] + _UUID
@classmethod
def CheckParameterSyntax(cls, nicparams):
@raise errors.ConfigurationError: when a parameter is not valid
"""
- if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
- nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
- err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
- raise errors.ConfigurationError(err)
+ mode = nicparams[constants.NIC_MODE]
+ if (mode not in constants.NIC_VALID_MODES and
+ mode != constants.VALUE_AUTO):
+ raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
- if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
+ if (mode == constants.NIC_MODE_BRIDGED and
not nicparams[constants.NIC_LINK]):
- err = "Missing bridged nic link"
- raise errors.ConfigurationError(err)
+ raise errors.ConfigurationError("Missing bridged NIC link")
class Disk(ConfigObject):
"""Config object representing a block device."""
- __slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size", "mode", "params"]
+ __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
+ "size", "mode", "params", "spindles", "pci"] + _UUID +
+ # dynamic_params is special. It depends on the node this instance
+ # is sent to, and should not be persisted.
+ ["dynamic_params"])
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
- return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
+ return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
- return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
+ return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
- return self.dev_type in (constants.LD_LV,)
+ return self.dev_type in (constants.DT_PLAIN,)
def StaticDevPath(self):
"""Return the device path if this device type has a static one.
should check that it is a valid path.
"""
- if self.dev_type == constants.LD_LV:
+ if self.dev_type == constants.DT_PLAIN:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
- elif self.dev_type == constants.LD_BLOCKDEV:
+ elif self.dev_type == constants.DT_BLOCK:
return self.logical_id[1]
- elif self.dev_type == constants.LD_RBD:
+ elif self.dev_type == constants.DT_RBD:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
-1.
"""
- if self.dev_type == constants.LD_DRBD8:
+ if self.dev_type == constants.DT_DRBD8:
return 0
return -1
def IsBasedOnDiskType(self, dev_type):
"""Check if the disk or its children are based on the given type.
- @type dev_type: L{constants.LDS_BLOCK}
+ @type dev_type: L{constants.DTS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a device of the given type was found or not
return True
return self.dev_type == dev_type
- def GetNodes(self, node):
+ def GetNodes(self, node_uuid):
"""This function returns the nodes this device lives on.
Given the node on which the parent of the device lives on (or, in
devices needs to (or can) be assembled.
"""
- if self.dev_type in [constants.LD_LV, constants.LD_FILE,
- constants.LD_BLOCKDEV, constants.LD_RBD]:
- result = [node]
- elif self.dev_type in constants.LDS_DRBD:
+ if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
+ constants.DT_BLOCK, constants.DT_RBD,
+ constants.DT_EXT, constants.DT_SHARED_FILE]:
+ result = [node_uuid]
+ elif self.dev_type in constants.DTS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
- if node not in result:
+ if node_uuid not in result:
raise errors.ConfigurationError("DRBD device passed unknown node")
else:
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
return result
- def ComputeNodeTree(self, parent_node):
+ def ComputeNodeTree(self, parent_node_uuid):
"""Compute the node/disk tree for this disk and its children.
This method, given the node on which the parent disk lives, will
- return the list of all (node, disk) pairs which describe the disk
+ return the list of all (node UUID, disk) pairs which describe the disk
tree in the most compact way. For example, a drbd/lvm stack
will be returned as (primary_node, drbd) and (secondary_node, drbd)
which represents all the top-level devices on the nodes.
"""
- my_nodes = self.GetNodes(parent_node)
+ my_nodes = self.GetNodes(parent_node_uuid)
result = [(node, self) for node in my_nodes]
if not self.children:
# leaf device
@return: a dictionary of volume-groups and the required size
"""
- if self.dev_type == constants.LD_LV:
+ if self.dev_type == constants.DT_PLAIN:
return {self.logical_id[0]: amount}
- elif self.dev_type == constants.LD_DRBD8:
+ elif self.dev_type == constants.DT_DRBD8:
if self.children:
return self.children[0].ComputeGrowth(amount)
else:
actual algorithms from bdev.
"""
- if self.dev_type in (constants.LD_LV, constants.LD_FILE,
- constants.LD_RBD):
+ if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
+ constants.DT_RBD, constants.DT_EXT,
+ constants.DT_SHARED_FILE):
self.size += amount
- elif self.dev_type == constants.LD_DRBD8:
+ elif self.dev_type == constants.DT_DRBD8:
if self.children:
self.children[0].RecordGrow(amount)
self.size += amount
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
- def Update(self, size=None, mode=None):
- """Apply changes to size and mode.
+ def Update(self, size=None, mode=None, spindles=None):
+ """Apply changes to size, spindles and mode.
"""
- if self.dev_type == constants.LD_DRBD8:
+ if self.dev_type == constants.DT_DRBD8:
if self.children:
self.children[0].Update(size=size, mode=mode)
else:
self.size = size
if mode is not None:
self.mode = mode
+ if spindles is not None:
+ self.spindles = spindles
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
child.UnsetSize()
self.size = 0
- def SetPhysicalID(self, target_node, nodes_ip):
- """Convert the logical ID to the physical ID.
+ def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
+ """Updates the dynamic disk params for the given node.
- This is used only for drbd, which needs ip/port configuration.
-
- The routine descends down and updates its children also, because
- this helps when the only the top device is passed to the remote
- node.
+ This is mainly used for drbd, which needs ip/port configuration.
Arguments:
- - target_node: the node we wish to configure for
+ - target_node_uuid: the node UUID we wish to configure for
- nodes_ip: a mapping of node name to ip
- The target_node must exist in in nodes_ip, and must be one of the
- nodes in the logical ID for each of the DRBD devices encountered
- in the disk tree.
+ The target_node must exist in nodes_ip, and should be one of the
+ nodes in the logical ID if this device is a DRBD device.
"""
if self.children:
for child in self.children:
- child.SetPhysicalID(target_node, nodes_ip)
-
- if self.logical_id is None and self.physical_id is not None:
- return
- if self.dev_type in constants.LDS_DRBD:
- pnode, snode, port, pminor, sminor, secret = self.logical_id
- if target_node not in (pnode, snode):
- raise errors.ConfigurationError("DRBD device not knowing node %s" %
- target_node)
- pnode_ip = nodes_ip.get(pnode, None)
- snode_ip = nodes_ip.get(snode, None)
+ child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
+
+ dyn_disk_params = {}
+ if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
+ pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
+ if target_node_uuid not in (pnode_uuid, snode_uuid):
+ # disk object is being sent to neither the primary nor the secondary
+ # node. reset the dynamic parameters, the target node is not
+ # supposed to use them.
+ self.dynamic_params = dyn_disk_params
+ return
+
+ pnode_ip = nodes_ip.get(pnode_uuid, None)
+ snode_ip = nodes_ip.get(snode_uuid, None)
if pnode_ip is None or snode_ip is None:
raise errors.ConfigurationError("Can't find primary or secondary node"
" for %s" % str(self))
- p_data = (pnode_ip, port)
- s_data = (snode_ip, port)
- if pnode == target_node:
- self.physical_id = p_data + s_data + (pminor, secret)
+ if pnode_uuid == target_node_uuid:
+ dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
+ dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
+ dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
+ dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
else: # it must be secondary, we tested above
- self.physical_id = s_data + p_data + (sminor, secret)
- else:
- self.physical_id = self.logical_id
- return
+ dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
+ dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
+ dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
+ dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
- def ToDict(self):
+ self.dynamic_params = dyn_disk_params
+
+ # pylint: disable=W0221
+ def ToDict(self, include_dynamic_params=False):
"""Disk-specific conversion to standard python types.
This replaces the children lists of objects with lists of
"""
bo = super(Disk, self).ToDict()
+ if not include_dynamic_params and "dynamic_params" in bo:
+ del bo["dynamic_params"]
for attr in ("children",):
alist = bo.get(attr, None)
if alist:
- bo[attr] = self._ContainerToDicts(alist)
+ bo[attr] = outils.ContainerToDicts(alist)
return bo
@classmethod
"""
obj = super(Disk, cls).FromDict(val)
if obj.children:
- obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
+ obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
if obj.logical_id and isinstance(obj.logical_id, list):
obj.logical_id = tuple(obj.logical_id)
- if obj.physical_id and isinstance(obj.physical_id, list):
- obj.physical_id = tuple(obj.physical_id)
- if obj.dev_type in constants.LDS_DRBD:
+ if obj.dev_type in constants.DTS_DRBD:
# we need a tuple of length six here
if len(obj.logical_id) < 6:
obj.logical_id += (None,) * (6 - len(obj.logical_id))
"""Custom str() formatter for disks.
"""
- if self.dev_type == constants.LD_LV:
+ if self.dev_type == constants.DT_PLAIN:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
- elif self.dev_type in constants.LDS_DRBD:
+ elif self.dev_type in constants.DTS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
- if self.physical_id is None:
- phy = "unconfigured"
- else:
- phy = ("configured as %s:%s %s:%s" %
- (self.physical_id[0], self.physical_id[1],
- self.physical_id[2], self.physical_id[3]))
- val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
- (node_a, minor_a, node_b, minor_b, port, phy))
+ val += ("hosts=%s/%d-%s/%d, port=%s, " %
+ (node_a, minor_a, node_b, minor_b, port))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
else:
- val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
- (self.dev_type, self.logical_id, self.physical_id, self.children))
+ val = ("<Disk(type=%s, logical_id=%s, children=%s" %
+ (self.dev_type, self.logical_id, self.children))
if self.iv_name is None:
val += ", not visible"
else:
val += ", visible as /dev/%s" % self.iv_name
+ if self.spindles is not None:
+ val += ", spindles=%s" % self.spindles
if isinstance(self.size, int):
val += ", size=%dm)>" % self.size
else:
for child in self.children:
child.UpgradeConfig()
- if not self.params:
- self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
- else:
- self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
- self.params)
+ # FIXME: Make this configurable in Ganeti 2.7
+ self.params = {}
# add here config upgrade for this disk
+ # map of legacy device types (mapping differing LD constants to new
+ # DT constants)
+ LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
+ if self.dev_type in LEG_DEV_TYPE_MAP:
+ self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
+
+ @staticmethod
+ def ComputeLDParams(disk_template, disk_params):
+ """Computes Logical Disk parameters from Disk Template parameters.
+
+ @type disk_template: string
+ @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
+ @type disk_params: dict
+ @param disk_params: disk template parameters;
+ dict(template_name -> parameters
+ @rtype: list(dict)
+ @return: a list of dicts, one for each node of the disk hierarchy. Each dict
+ contains the LD parameters of the node. The tree is flattened in-order.
+
+ """
+ if disk_template not in constants.DISK_TEMPLATES:
+ raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
+
+ assert disk_template in disk_params
+
+ result = list()
+ dt_params = disk_params[disk_template]
+ if disk_template == constants.DT_DRBD8:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
+ constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
+ constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
+ constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
+ constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
+ constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
+ constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
+ constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
+ constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
+ constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
+ constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
+ constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
+ constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
+ constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
+ }))
+
+ # data LV
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
+ }))
+
+ # metadata LV
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
+ }))
+
+ elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
+ result.append(constants.DISK_LD_DEFAULTS[disk_template])
+
+ elif disk_template == constants.DT_PLAIN:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
+ constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
+ }))
+
+ elif disk_template == constants.DT_BLOCK:
+ result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK])
+
+ elif disk_template == constants.DT_RBD:
+ result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], {
+ constants.LDP_POOL: dt_params[constants.RBD_POOL],
+ constants.LDP_ACCESS: dt_params[constants.RBD_ACCESS],
+ }))
+
+ elif disk_template == constants.DT_EXT:
+ result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT])
+
+ return result
+
class InstancePolicy(ConfigObject):
"""Config object representing instance policy limits dictionary.
-
Note that this object is not actually used in the config, it's just
used as a placeholder for a few functions.
"""
@classmethod
- def CheckParameterSyntax(cls, ipolicy):
+ def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
+ """Upgrades the ipolicy configuration."""
+ if constants.IPOLICY_DTS in ipolicy:
+ if not set(ipolicy[constants.IPOLICY_DTS]).issubset(
+ set(enabled_disk_templates)):
+ ipolicy[constants.IPOLICY_DTS] = list(
+ set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
+
+ @classmethod
+ def CheckParameterSyntax(cls, ipolicy, check_std):
""" Check the instance policy for validity.
+ @type ipolicy: dict
+ @param ipolicy: dictionary with min/max/std specs and policies
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @raise errors.ConfigurationError: when the policy is not legal
+
"""
- for param in constants.ISPECS_PARAMETERS:
- InstancePolicy.CheckISpecSyntax(ipolicy, param)
+ InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
if constants.IPOLICY_DTS in ipolicy:
InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
for key in constants.IPOLICY_PARAMETERS:
utils.CommaJoin(wrong_keys))
@classmethod
- def CheckISpecSyntax(cls, ipolicy, name):
- """Check the instance policy for validity on a given key.
+ def _CheckIncompleteSpec(cls, spec, keyname):
+ missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
+ if missing_params:
+ msg = ("Missing instance specs parameters for %s: %s" %
+ (keyname, utils.CommaJoin(missing_params)))
+ raise errors.ConfigurationError(msg)
- We check if the instance policy makes sense for a given key, that is
- if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
+ @classmethod
+ def CheckISpecSyntax(cls, ipolicy, check_std):
+ """Check the instance policy specs for validity.
@type ipolicy: dict
- @param ipolicy: dictionary with min, max, std specs
+ @param ipolicy: dictionary with min/max/std specs
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @raise errors.ConfigurationError: when specs are not valid
+
+ """
+ if constants.ISPECS_MINMAX not in ipolicy:
+ # Nothing to check
+ return
+
+ if check_std and constants.ISPECS_STD not in ipolicy:
+ msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
+ raise errors.ConfigurationError(msg)
+ stdspec = ipolicy.get(constants.ISPECS_STD)
+ if check_std:
+ InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
+
+ if not ipolicy[constants.ISPECS_MINMAX]:
+ raise errors.ConfigurationError("Empty minmax specifications")
+ std_is_good = False
+ for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
+ missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
+ if missing:
+ msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
+ raise errors.ConfigurationError(msg)
+ for (key, spec) in minmaxspecs.items():
+ InstancePolicy._CheckIncompleteSpec(spec, key)
+
+ spec_std_ok = True
+ for param in constants.ISPECS_PARAMETERS:
+ par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
+ param, check_std)
+ spec_std_ok = spec_std_ok and par_std_ok
+ std_is_good = std_is_good or spec_std_ok
+ if not std_is_good:
+ raise errors.ConfigurationError("Invalid std specifications")
+
+ @classmethod
+ def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
+ """Check the instance policy specs for validity on a given key.
+
+ We check if the instance specs makes sense for a given key, that is
+ if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
+
+ @type minmaxspecs: dict
+ @param minmaxspecs: dictionary with min and max instance spec
+ @type stdspec: dict
+ @param stdspec: dictionary with standard instance spec
@type name: string
@param name: what are the limits for
- @raise errors.ConfigureError: when specs for given name are not valid
-
- """
- min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
- std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
- max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
- err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
- (name,
- ipolicy[constants.ISPECS_MIN].get(name, "-"),
- ipolicy[constants.ISPECS_MAX].get(name, "-"),
- ipolicy[constants.ISPECS_STD].get(name, "-")))
- if min_v > std_v or std_v > max_v:
+ @type check_std: bool
+ @param check_std: Whether to check std value or just assume compliance
+ @rtype: bool
+ @return: C{True} when specs are valid, C{False} when standard spec for the
+ given name is not valid
+ @raise errors.ConfigurationError: when min/max specs for the given name
+ are not valid
+
+ """
+ minspec = minmaxspecs[constants.ISPECS_MIN]
+ maxspec = minmaxspecs[constants.ISPECS_MAX]
+ min_v = minspec[name]
+ max_v = maxspec[name]
+
+ if min_v > max_v:
+ err = ("Invalid specification of min/max values for %s: %s/%s" %
+ (name, min_v, max_v))
raise errors.ConfigurationError(err)
+ elif check_std:
+ std_v = stdspec.get(name, min_v)
+ return std_v >= min_v and std_v <= max_v
+ else:
+ return True
@classmethod
def CheckDiskTemplates(cls, disk_templates):
"""Checks the disk templates for validity.
"""
+ if not disk_templates:
+ raise errors.ConfigurationError("Instance policy must contain" +
+ " at least one disk template")
wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
if wrong:
raise errors.ConfigurationError("Invalid disk template(s) %s" %
"nics",
"disks",
"disk_template",
+ "disks_active",
"network_port",
"serial_no",
] + _TIMESTAMPS + _UUID
return tuple(all_nodes)
secondary_nodes = property(_ComputeSecondaryNodes, None, None,
- "List of secondary nodes")
+ "List of names of secondary nodes")
def _ComputeAllNodes(self):
"""Compute the list of all nodes.
"""
def _Helper(nodes, device):
"""Recursively computes nodes given a top device."""
- if device.dev_type in constants.LDS_DRBD:
+ if device.dev_type in constants.DTS_DRBD:
nodea, nodeb = device.logical_id[:2]
nodes.add(nodea)
nodes.add(nodeb)
return tuple(all_nodes)
all_nodes = property(_ComputeAllNodes, None, None,
- "List of all nodes of the instance")
+ "List of names of all the nodes of the instance")
- def MapLVsByNode(self, lvmap=None, devs=None, node=None):
+ def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
"""Provide a mapping of nodes to LVs this instance owns.
This function figures out what logical volumes should belong on
which nodes, recursing through a device tree.
+ @type lvmap: dict
@param lvmap: optional dictionary to receive the
'node' : ['lv', ...] data.
-
+ @type devs: list of L{Disk}
+ @param devs: disks to get the LV name for. If None, all disk of this
+ instance are used.
+ @type node_uuid: string
+ @param node_uuid: UUID of the node to get the LV names for. If None, the
+ primary node of this instance is used.
@return: None if lvmap arg is given, otherwise, a dictionary of
- the form { 'nodename' : ['volume1', 'volume2', ...], ... };
+ the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
volumeN is of the form "vg_name/lv_name", compatible with
GetVolumeList()
"""
- if node == None:
- node = self.primary_node
+ if node_uuid is None:
+ node_uuid = self.primary_node
if lvmap is None:
lvmap = {
- node: [],
+ node_uuid: [],
}
ret = lvmap
else:
- if not node in lvmap:
- lvmap[node] = []
+ if not node_uuid in lvmap:
+ lvmap[node_uuid] = []
ret = None
if not devs:
devs = self.disks
for dev in devs:
- if dev.dev_type == constants.LD_LV:
- lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
+ if dev.dev_type == constants.DT_PLAIN:
+ lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
- elif dev.dev_type in constants.LDS_DRBD:
+ elif dev.dev_type in constants.DTS_DRBD:
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
elif dev.children:
- self.MapLVsByNode(lvmap, dev.children, node)
+ self.MapLVsByNode(lvmap, dev.children, node_uuid)
return ret
for attr in "nics", "disks":
alist = bo.get(attr, None)
if alist:
- nlist = self._ContainerToDicts(alist)
+ nlist = outils.ContainerToDicts(alist)
else:
nlist = []
bo[attr] = nlist
if "admin_up" in val:
del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
- obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
- obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
+ obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
+ obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
return obj
def UpgradeConfig(self):
if self.osparams is None:
self.osparams = {}
UpgradeBeParams(self.beparams)
+ if self.disks_active is None:
+ self.disks_active = self.admin_state == constants.ADMINST_UP
class OS(ConfigObject):
return cls.SplitNameVariant(name)[1]
+class ExtStorage(ConfigObject):
+ """Config object representing an External Storage Provider.
+
+ """
+ __slots__ = [
+ "name",
+ "path",
+ "create_script",
+ "remove_script",
+ "grow_script",
+ "attach_script",
+ "detach_script",
+ "setinfo_script",
+ "verify_script",
+ "supported_parameters",
+ ]
+
+
class NodeHvState(ConfigObject):
"""Hypvervisor state on a node.
if self.ndparams is None:
self.ndparams = {}
+ # And remove any global parameter
+ for key in constants.NDC_GLOBALS:
+ if key in self.ndparams:
+ logging.warning("Ignoring %s node parameter for node %s",
+ key, self.name)
+ del self.ndparams[key]
if self.powered is None:
self.powered = True
hv_state = data.get("hv_state", None)
if hv_state is not None:
- data["hv_state"] = self._ContainerToDicts(hv_state)
+ data["hv_state"] = outils.ContainerToDicts(hv_state)
disk_state = data.get("disk_state", None)
if disk_state is not None:
data["disk_state"] = \
- dict((key, self._ContainerToDicts(value))
+ dict((key, outils.ContainerToDicts(value))
for (key, value) in disk_state.items())
return data
obj = super(Node, cls).FromDict(val)
if obj.hv_state is not None:
- obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
+ obj.hv_state = \
+ outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
if obj.disk_state is not None:
obj.disk_state = \
- dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
+ dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
for (key, value) in obj.disk_state.items())
return obj
"hv_state_static",
"disk_state_static",
"alloc_policy",
+ "networks",
] + _TIMESTAMPS + _UUID
def ToDict(self):
if self.mtime is None:
self.mtime = time.time()
- self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.diskparams is None:
+ self.diskparams = {}
if self.ipolicy is None:
self.ipolicy = MakeEmptyIPolicy()
+ if self.networks is None:
+ self.networks = {}
+
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
__slots__ = [
"serial_no",
"rsahostkeypub",
+ "dsahostkeypub",
"highest_used_port",
"tcpudp_port_pool",
"mac_prefix",
"prealloc_wipe_disks",
"hv_state_static",
"disk_state_static",
+ "enabled_disk_templates",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
# code can be removed once upgrading straight from 2.0 is deprecated.
if self.default_hypervisor is not None:
self.enabled_hypervisors = ([self.default_hypervisor] +
- [hvname for hvname in self.enabled_hypervisors
- if hvname != self.default_hypervisor])
+ [hvname for hvname in self.enabled_hypervisors
+ if hvname != self.default_hypervisor])
self.default_hypervisor = None
# maintain_node_health added after 2.1.1
if self.use_external_mip_script is None:
self.use_external_mip_script = False
- self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.diskparams:
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+ else:
+ self.diskparams = constants.DISK_DT_DEFAULTS.copy()
# instance policy added before 2.6
if self.ipolicy is None:
# we can either make sure to upgrade the ipolicy always, or only
# do it in some corner cases (e.g. missing keys); note that this
# will break any removal of keys from the ipolicy dict
+ wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
+ if wrongkeys:
+ # These keys would be silently removed by FillIPolicy()
+ msg = ("Cluster instance policy contains spurious keys: %s" %
+ utils.CommaJoin(wrongkeys))
+ raise errors.ConfigurationError(msg)
self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
@property
"""
mydict = super(Cluster, self).ToDict()
- mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
+
+ if self.tcpudp_port_pool is None:
+ tcpudp_port_pool = []
+ else:
+ tcpudp_port_pool = list(self.tcpudp_port_pool)
+
+ mydict["tcpudp_port_pool"] = tcpudp_port_pool
+
return mydict
@classmethod
"""
obj = super(Cluster, cls).FromDict(val)
- if not isinstance(obj.tcpudp_port_pool, set):
+
+ if obj.tcpudp_port_pool is None:
+ obj.tcpudp_port_pool = set()
+ elif not isinstance(obj.tcpudp_port_pool, set):
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
+
return obj
+ def SimpleFillDP(self, diskparams):
+ """Fill a given diskparams dict with cluster defaults.
+
+ @param diskparams: The diskparams
+ @return: The defaults dict
+
+ """
+ return FillDiskParams(self.diskparams, diskparams)
+
def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
"""Get the default hypervisor parameters for the cluster.
"""
return FillIPolicy(self.ipolicy, ipolicy)
+ def IsDiskTemplateEnabled(self, disk_template):
+ """Checks if a particular disk template is enabled.
+
+ """
+ return utils.storage.IsDiskTemplateEnabled(
+ disk_template, self.enabled_disk_templates)
+
+ def IsFileStorageEnabled(self):
+ """Checks if file storage is enabled.
+
+ """
+ return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
+
+ def IsSharedFileStorageEnabled(self):
+ """Checks if shared file storage is enabled.
+
+ """
+ return utils.storage.IsSharedFileStorageEnabled(
+ self.enabled_disk_templates)
+
class BlockDevStatus(ConfigObject):
"""Config object representing the status of a block device."""
"""
mydict = super(_QueryResponseBase, self).ToDict()
- mydict["fields"] = self._ContainerToDicts(mydict["fields"])
+ mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
return mydict
@classmethod
"""
obj = super(_QueryResponseBase, cls).FromDict(val)
- obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
+ obj.fields = \
+ outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
return obj
@ivar fields: List of L{QueryFieldDefinition} objects
"""
- __slots__ = [
- ]
+ __slots__ = []
class MigrationStatus(ConfigObject):
return True
+class Network(TaggableObject):
+ """Object representing a network definition for ganeti.
+
+ """
+ __slots__ = [
+ "name",
+ "serial_no",
+ "mac_prefix",
+ "network",
+ "network6",
+ "gateway",
+ "gateway6",
+ "reservations",
+ "ext_reservations",
+ ] + _TIMESTAMPS + _UUID
+
+ def HooksDict(self, prefix=""):
+ """Export a dictionary used by hooks with a network's information.
+
+ @type prefix: String
+ @param prefix: Prefix to prepend to the dict entries
+
+ """
+ result = {
+ "%sNETWORK_NAME" % prefix: self.name,
+ "%sNETWORK_UUID" % prefix: self.uuid,
+ "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
+ }
+ if self.network:
+ result["%sNETWORK_SUBNET" % prefix] = self.network
+ if self.gateway:
+ result["%sNETWORK_GATEWAY" % prefix] = self.gateway
+ if self.network6:
+ result["%sNETWORK_SUBNET6" % prefix] = self.network6
+ if self.gateway6:
+ result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
+ if self.mac_prefix:
+ result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
+
+ return result
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for networks.
+
+ Remove deprecated network_type and family.
+
+ """
+ if "network_type" in val:
+ del val["network_type"]
+ if "family" in val:
+ del val["family"]
+ obj = super(Network, cls).FromDict(val)
+ return obj
+
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.
cfp = cls()
cfp.readfp(buf)
return cfp
+
+
+class LvmPvInfo(ConfigObject):
+ """Information about an LVM physical volume (PV).
+
+ @type name: string
+ @ivar name: name of the PV
+ @type vg_name: string
+ @ivar vg_name: name of the volume group containing the PV
+ @type size: float
+ @ivar size: size of the PV in MiB
+ @type free: float
+ @ivar free: free space in the PV, in MiB
+ @type attributes: string
+ @ivar attributes: PV attributes
+ @type lv_list: list of strings
+ @ivar lv_list: names of the LVs hosted on the PV
+ """
+ __slots__ = [
+ "name",
+ "vg_name",
+ "size",
+ "free",
+ "attributes",
+ "lv_list"
+ ]
+
+ def IsEmpty(self):
+ """Is this PV empty?
+
+ """
+ return self.size <= (self.free + 1)
+
+ def IsAllocatable(self):
+ """Is this PV allocatable?
+
+ """
+ return ("a" in self.attributes)