-#!/usr/bin/python
+#
#
# Copyright (C) 2006, 2007 Google Inc.
"""
-import cPickle
-from cStringIO import StringIO
import ConfigParser
import re
+import copy
+from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
as None instead of raising an error
Classes derived from this must always declare __slots__ (we use many
- config objects and the memory reduction is useful.
+ config objects and the memory reduction is useful)
"""
__slots__ = []
def __init__(self, **kwargs):
- for i in kwargs:
- setattr(self, i, kwargs[i])
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__slots__:
if name in self.__slots__:
setattr(self, name, state[name])
- @staticmethod
- def FindGlobal(module, name):
- """Function filtering the allowed classes to be un-pickled.
-
- Currently, we only allow the classes from this module which are
- derived from ConfigObject.
-
- """
- # Also support the old module name (ganeti.config)
- cls = None
- if module == "ganeti.config" or module == "ganeti.objects":
- if name == "ConfigData":
- cls = ConfigData
- elif name == "NIC":
- cls = NIC
- elif name == "Disk" or name == "BlockDev":
- cls = Disk
- elif name == "Instance":
- cls = Instance
- elif name == "OS":
- cls = OS
- elif name == "Node":
- cls = Node
- elif name == "Cluster":
- cls = Cluster
- elif module == "__builtin__":
- if name == "set":
- cls = set
- if cls is None:
- raise cPickle.UnpicklingError("Class %s.%s not allowed due to"
- " security concerns" % (module, name))
- return cls
-
- def Dump(self, fobj):
- """Dump this instance to a file object.
-
- Note that we use the HIGHEST_PROTOCOL, as it brings benefits for
- the new classes.
-
- """
- dumper = cPickle.Pickler(fobj, cPickle.HIGHEST_PROTOCOL)
- dumper.dump(self)
-
- @staticmethod
- def Load(fobj):
- """Unpickle data from the given stream.
-
- This uses the `FindGlobal` function to filter the allowed classes.
-
- """
- loader = cPickle.Unpickler(fobj)
- loader.find_global = ConfigObject.FindGlobal
- return loader.load()
-
- def Dumps(self):
- """Dump this instance and return the string representation."""
- buf = StringIO()
- self.Dump(buf)
- return buf.getvalue()
-
- @staticmethod
- def Loads(data):
- """Load data from a string."""
- return ConfigObject.Load(StringIO(data))
-
def ToDict(self):
"""Convert to a dict holding only standard python types.
if not isinstance(val, dict):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
- obj = cls(**val)
+ val_str = dict([(str(k), v) for k, v in val.iteritems()])
+ obj = cls(**val_str)
return obj
@staticmethod
if not isinstance(tag, basestring):
raise errors.TagError("Invalid tag type (not a string)")
if len(tag) > constants.MAX_TAG_LEN:
- raise errors.TagError("Tag too long (>%d)" % constants.MAX_TAG_LEN)
+ raise errors.TagError("Tag too long (>%d characters)" %
+ constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
- if not re.match("^[ \w.+*/:-]+$", tag):
+ if not re.match("^[\w.+*/:-]+$", tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
class ConfigData(ConfigObject):
"""Top-level config object."""
- __slots__ = ["cluster", "nodes", "instances"]
+ __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
def ToDict(self):
"""Custom function for top-level config data.
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size"]
+ "children", "iv_name", "size", "mode"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
- return self.dev_type in ("drbd", "lvm")
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
- return self.dev_type in ("drbd", "lvm")
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
- return self.dev_type in ("lvm",)
+ return self.dev_type in (constants.LD_LV,)
+
+ def StaticDevPath(self):
+ """Return the device path if this device type has a static one.
+
+ Some devices (LVM for example) live always at the same /dev/ path,
+ irrespective of their status. For such devices, we return this
+ path, for others we return None.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
+ return None
+
+ def ChildrenNeeded(self):
+ """Compute the needed number of children for activation.
+
+ This method will return either -1 (all children) or a positive
+ number denoting the minimum number of children needed for
+ activation (only mirrored devices will usually return >=0).
+
+ Currently, only DRBD8 supports diskless activation (therefore we
+ return 0), for all other we keep the previous semantics and return
+ -1.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ return 0
+ return -1
def GetNodes(self, node):
"""This function returns the nodes this device lives on.
devices needs to (or can) be assembled.
"""
- if self.dev_type == "lvm" or self.dev_type == "md_raid1":
+ if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
result = [node]
- elif self.dev_type == "drbd":
+ elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
if node not in result:
raise errors.ConfigurationError("DRBD device passed unknown node")
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
- tree in the most compact way. For example, a md/drbd/lvm stack
- will be returned as (primary_node, md) and (secondary_node, drbd)
- which represents all the top-level devices on the nodes. This
- means that on the primary node we need to activate the the md (and
- recursively all its children) and on the secondary node we need to
- activate the drbd device (and its children, the two lvm volumes).
+ tree in the most compact way. For example, a drbd/lvm stack
+ will be returned as (primary_node, drbd) and (secondary_node, drbd)
+ which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node)
# be different)
return result
+ def RecordGrow(self, amount):
+ """Update the size of this disk after growth.
+
+ This method recurses over the disks's children and updates their
+ size correspondigly. The method needs to be kept in sync with the
+ actual algorithms from bdev.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ self.size += amount
+ elif self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].RecordGrow(amount)
+ self.size += amount
+ else:
+ raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
+ " disk type %s" % self.dev_type)
+
+ def SetPhysicalID(self, target_node, nodes_ip):
+ """Convert the logical ID to the physical ID.
+
+ This is used only for drbd, which needs ip/port configuration.
+
+ The routine descends down and updates its children also, because
+ this helps when the only the top device is passed to the remote
+ node.
+
+ Arguments:
+ - target_node: the node we wish to configure for
+ - nodes_ip: a mapping of node name to ip
+
+ The target_node must exist in in nodes_ip, and must be one of the
+ nodes in the logical ID for each of the DRBD devices encountered
+ in the disk tree.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.SetPhysicalID(target_node, nodes_ip)
+
+ if self.logical_id is None and self.physical_id is not None:
+ return
+ if self.dev_type in constants.LDS_DRBD:
+ pnode, snode, port, pminor, sminor, secret = self.logical_id
+ if target_node not in (pnode, snode):
+ raise errors.ConfigurationError("DRBD device not knowing node %s" %
+ target_node)
+ pnode_ip = nodes_ip.get(pnode, None)
+ snode_ip = nodes_ip.get(snode, None)
+ if pnode_ip is None or snode_ip is None:
+ raise errors.ConfigurationError("Can't find primary or secondary node"
+ " for %s" % str(self))
+ p_data = (pnode_ip, port)
+ s_data = (snode_ip, port)
+ if pnode == target_node:
+ self.physical_id = p_data + s_data + (pminor, secret)
+ else: # it must be secondary, we tested above
+ self.physical_id = s_data + p_data + (sminor, secret)
+ else:
+ self.physical_id = self.logical_id
+ return
+
def ToDict(self):
"""Disk-specific conversion to standard python types.
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
+ if obj.dev_type in constants.LDS_DRBD:
+ # we need a tuple of length six here
+ if len(obj.logical_id) < 6:
+ obj.logical_id += (None,) * (6 - len(obj.logical_id))
return obj
+ def __str__(self):
+ """Custom str() formatter for disks.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
+ elif self.dev_type in constants.LDS_DRBD:
+ node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
+ val = "<DRBD8("
+ if self.physical_id is None:
+ phy = "unconfigured"
+ else:
+ phy = ("configured as %s:%s %s:%s" %
+ (self.physical_id[0], self.physical_id[1],
+ self.physical_id[2], self.physical_id[3]))
+
+ val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
+ (node_a, minor_a, node_b, minor_b, port, phy))
+ if self.children and self.children.count(None) == 0:
+ val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
+ else:
+ val += "no local storage"
+ else:
+ val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
+ (self.dev_type, self.logical_id, self.physical_id, self.children))
+ if self.iv_name is None:
+ val += ", not visible"
+ else:
+ val += ", visible as /dev/%s" % self.iv_name
+ if isinstance(self.size, int):
+ val += ", size=%dm)>" % self.size
+ else:
+ val += ", size='%s')>" % (self.size,)
+ return val
+
+ def Verify(self):
+ """Checks that this disk is correctly configured.
+
+ """
+ errors = []
+ if self.mode not in constants.DISK_ACCESS_SET:
+ errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
+ return errors
+
class Instance(TaggableObject):
"""Config object representing an instance."""
"name",
"primary_node",
"os",
- "status",
- "memory",
- "vcpus",
+ "hypervisor",
+ "hvparams",
+ "beparams",
+ "admin_up",
"nics",
"disks",
"disk_template",
+ "network_port",
+ "serial_no",
]
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
+ This is a simple wrapper over _ComputeAllNodes.
+
+ """
+ all_nodes = set(self._ComputeAllNodes())
+ all_nodes.discard(self.primary_node)
+ return tuple(all_nodes)
+
+ secondary_nodes = property(_ComputeSecondaryNodes, None, None,
+ "List of secondary nodes")
+
+ def _ComputeAllNodes(self):
+ """Compute the list of all nodes.
+
Since the data is already there (in the drbd disks), keeping it as
a separate normal attribute is redundant and if not properly
synchronised can cause problems. Thus it's better to compute it
dynamically.
"""
- def _Helper(primary, sec_nodes, device):
- """Recursively computes secondary nodes given a top device."""
- if device.dev_type == 'drbd':
- nodea, nodeb, dummy = device.logical_id
- if nodea == primary:
- candidate = nodeb
- else:
- candidate = nodea
- if candidate not in sec_nodes:
- sec_nodes.append(candidate)
+ def _Helper(nodes, device):
+ """Recursively computes nodes given a top device."""
+ if device.dev_type in constants.LDS_DRBD:
+ nodea, nodeb = device.logical_id[:2]
+ nodes.add(nodea)
+ nodes.add(nodeb)
if device.children:
for child in device.children:
- _Helper(primary, sec_nodes, child)
+ _Helper(nodes, child)
- secondary_nodes = []
+ all_nodes = set()
+ all_nodes.add(self.primary_node)
for device in self.disks:
- _Helper(self.primary_node, secondary_nodes, device)
- return tuple(secondary_nodes)
+ _Helper(all_nodes, device)
+ return tuple(all_nodes)
- secondary_nodes = property(_ComputeSecondaryNodes, None, None,
- "List of secondary nodes")
+ all_nodes = property(_ComputeAllNodes, None, None,
+ "List of all nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
- This function figures out what logical volumes should belong on which
- nodes, recursing through a device tree.
+ This function figures out what logical volumes should belong on
+ which nodes, recursing through a device tree.
- Args:
- lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
+ @param lvmap: optional dictionary to receive the
+ 'node' : ['lv', ...] data.
- Returns:
- None if lvmap arg is given.
- Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
+ @return: None if lvmap arg is given, otherwise, a dictionary
+ of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
"""
if node == None:
devs = self.disks
for dev in devs:
- if dev.dev_type == "lvm":
+ if dev.dev_type == constants.LD_LV:
lvmap[node].append(dev.logical_id[1])
- elif dev.dev_type == "drbd":
- if dev.logical_id[0] not in lvmap:
- lvmap[dev.logical_id[0]] = []
-
- if dev.logical_id[1] not in lvmap:
- lvmap[dev.logical_id[1]] = []
-
+ elif dev.dev_type in constants.LDS_DRBD:
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
return ret
- def FindDisk(self, name):
- """Find a disk given having a specified name.
+ def FindDisk(self, idx):
+ """Find a disk given having a specified index.
- This will return the disk which has the given iv_name.
+ This is just a wrapper that does validation of the index.
- """
- for disk in self.disks:
- if disk.iv_name == name:
- return disk
+ @type idx: int
+ @param idx: the disk index
+ @rtype: L{Disk}
+ @return: the corresponding disk
+ @raise errors.OpPrereqError: when the given index is not valid
- return None
+ """
+ try:
+ idx = int(idx)
+ return self.disks[idx]
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
+ except IndexError:
+ raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
+ " 0 to %d" % (idx, len(self.disks)))
def ToDict(self):
"""Instance-specific conversion to standard python types.
__slots__ = [
"name",
"path",
- "api_version",
+ "status",
+ "api_versions",
"create_script",
"export_script",
"import_script",
"rename_script",
]
+ @classmethod
+ def FromInvalidOS(cls, err):
+ """Create an OS from an InvalidOS error.
+
+ This routine knows how to convert an InvalidOS error to an OS
+ object representing the broken OS with a meaningful error message.
+
+ """
+ if not isinstance(err, errors.InvalidOS):
+ raise errors.ProgrammerError("Trying to initialize an OS from an"
+ " invalid object of type %s" % type(err))
+
+ return cls(name=err.args[0], path=err.args[1], status=err.args[2])
+
+ def __nonzero__(self):
+ return self.status == constants.OS_VALID_STATUS
+
+ __bool__ = __nonzero__
+
class Node(TaggableObject):
"""Config object representing a node."""
"name",
"primary_ip",
"secondary_ip",
+ "serial_no",
+ "master_candidate",
+ "offline",
+ "drained",
]
class Cluster(TaggableObject):
"""Config object representing the cluster."""
__slots__ = TaggableObject.__slots__ + [
- "config_version",
"serial_no",
"rsahostkeypub",
"highest_used_port",
"mac_prefix",
"volume_group_name",
"default_bridge",
+ "default_hypervisor",
+ "master_node",
+ "master_ip",
+ "master_netdev",
+ "cluster_name",
+ "file_storage_dir",
+ "enabled_hypervisors",
+ "hvparams",
+ "beparams",
+ "candidate_pool_size",
]
+ def ToDict(self):
+ """Custom function for cluster.
+
+ """
+ mydict = super(Cluster, self).ToDict()
+ mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
+ return mydict
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for cluster.
+
+ """
+ obj = super(Cluster, cls).FromDict(val)
+ if not isinstance(obj.tcpudp_port_pool, set):
+ obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
+ return obj
+
+ @staticmethod
+ def FillDict(defaults_dict, custom_dict):
+ """Basic function to apply settings on top a default dict.
+
+ @type defaults_dict: dict
+ @param defaults_dict: dictionary holding the default values
+ @type custom_dict: dict
+ @param custom_dict: dictionary holding customized value
+ @rtype: dict
+ @return: dict with the 'full' values
+
+ """
+ ret_dict = copy.deepcopy(defaults_dict)
+ ret_dict.update(custom_dict)
+ return ret_dict
+
+ def FillHV(self, instance):
+ """Fill an instance's hvparams dict.
+
+ @type instance: object
+ @param instance: the instance parameter to fill
+ @rtype: dict
+ @return: a copy of the instance's hvparams with missing keys filled from
+ the cluster defaults
+
+ """
+ return self.FillDict(self.hvparams.get(instance.hypervisor, {}),
+ instance.hvparams)
+
+ def FillBE(self, instance):
+ """Fill an instance's beparams dict.
+
+ @type instance: object
+ @param instance: the instance parameter to fill
+ @rtype: dict
+ @return: a copy of the instance's beparams with missing keys filled from
+ the cluster defaults
+
+ """
+ return self.FillDict(self.beparams.get(constants.BEGR_DEFAULT, {}),
+ instance.beparams)
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.