-#!/usr/bin/python
+#
#
# Copyright (C) 2006, 2007 Google Inc.
"""
-import cPickle
-from cStringIO import StringIO
import ConfigParser
+import re
+from cStringIO import StringIO
from ganeti import errors
+from ganeti import constants
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
__slots__ = []
def __init__(self, **kwargs):
- for i in kwargs:
- setattr(self, i, kwargs[i])
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__slots__:
- raise AttributeError, ("Invalid object attribute %s.%s" %
- (type(self).__name__, name))
+ raise AttributeError("Invalid object attribute %s.%s" %
+ (type(self).__name__, name))
return None
def __setitem__(self, key, value):
if key not in self.__slots__:
- raise KeyError, key
+ raise KeyError(key)
setattr(self, key, value)
def __getstate__(self):
if name in self.__slots__:
setattr(self, name, state[name])
- @staticmethod
- def FindGlobal(module, name):
- """Function filtering the allowed classes to be un-pickled.
+ def ToDict(self):
+ """Convert to a dict holding only standard python types.
- Currently, we only allow the classes from this module which are
- derived from ConfigObject.
+ The generic routine just dumps all of this object's attributes in
+ a dict. It does not work if the class has children who are
+ ConfigObjects themselves (e.g. the nics list in an Instance), in
+ which case the object should subclass the function in order to
+ make sure all objects returned are only standard python types.
"""
- # Also support the old module name (ganeti.config)
- cls = None
- if module == "ganeti.config" or module == "ganeti.objects":
- if name == "ConfigData":
- cls = ConfigData
- elif name == "NIC":
- cls = NIC
- elif name == "Disk" or name == "BlockDev":
- cls = Disk
- elif name == "Instance":
- cls = Instance
- elif name == "OS":
- cls = OS
- elif name == "Node":
- cls = Node
- elif name == "Cluster":
- cls = Cluster
- elif module == "__builtin__":
- if name == "set":
- cls = set
- if cls is None:
- raise cPickle.UnpicklingError, ("Class %s.%s not allowed due to"
- " security concerns" % (module, name))
- return cls
-
- def Dump(self, fobj):
- """Dump this instance to a file object.
-
- Note that we use the HIGHEST_PROTOCOL, as it brings benefits for
- the new classes.
+ return dict([(k, getattr(self, k, None)) for k in self.__slots__])
+
+ @classmethod
+ def FromDict(cls, val):
+ """Create an object from a dictionary.
+
+ This generic routine takes a dict, instantiates a new instance of
+ the given class, and sets attributes based on the dict content.
+
+ As for `ToDict`, this does not work if the class has children
+ who are ConfigObjects themselves (e.g. the nics list in an
+ Instance), in which case the object should subclass the function
+ and alter the objects.
"""
- dumper = cPickle.Pickler(fobj, cPickle.HIGHEST_PROTOCOL)
- dumper.dump(self)
+ if not isinstance(val, dict):
+ raise errors.ConfigurationError("Invalid object passed to FromDict:"
+ " expected dict, got %s" % type(val))
+ val_str = dict([(str(k), v) for k, v in val.iteritems()])
+ obj = cls(**val_str)
+ return obj
@staticmethod
- def Load(fobj):
- """Unpickle data from the given stream.
+ def _ContainerToDicts(container):
+ """Convert the elements of a container to standard python types.
- This uses the `FindGlobal` function to filter the allowed classes.
+ This method converts a container with elements derived from
+ ConfigData to standard python types. If the container is a dict,
+ we don't touch the keys, only the values.
"""
- loader = cPickle.Unpickler(fobj)
- loader.find_global = ConfigObject.FindGlobal
- return loader.load()
+ if isinstance(container, dict):
+ ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
+ elif isinstance(container, (list, tuple, set, frozenset)):
+ ret = [elem.ToDict() for elem in container]
+ else:
+ raise TypeError("Invalid type %s passed to _ContainerToDicts" %
+ type(container))
+ return ret
- def Dumps(self):
- """Dump this instance and return the string representation."""
- buf = StringIO()
- self.Dump(buf)
- return buf.getvalue()
+ @staticmethod
+ def _ContainerFromDicts(source, c_type, e_type):
+ """Convert a container from standard python types.
+
+ This method converts a container with standard python types to
+ ConfigData objects. If the container is a dict, we don't touch the
+ keys, only the values.
+
+ """
+ if not isinstance(c_type, type):
+ raise TypeError("Container type %s passed to _ContainerFromDicts is"
+ " not a type" % type(c_type))
+ if c_type is dict:
+ ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
+ elif c_type in (list, tuple, set, frozenset):
+ ret = c_type([e_type.FromDict(elem) for elem in source])
+ else:
+ raise TypeError("Invalid container type %s passed to"
+ " _ContainerFromDicts" % c_type)
+ return ret
+
+ def __repr__(self):
+ """Implement __repr__ for ConfigObjects."""
+ return repr(self.ToDict())
+
+
+class TaggableObject(ConfigObject):
+ """An generic class supporting tags.
+
+ """
+ __slots__ = ConfigObject.__slots__ + ["tags"]
@staticmethod
- def Loads(data):
- """Load data from a string."""
- return ConfigObject.Load(StringIO(data))
+ def ValidateTag(tag):
+ """Check if a tag is valid.
+
+ If the tag is invalid, an errors.TagError will be raised. The
+ function has no return value.
+
+ """
+ if not isinstance(tag, basestring):
+ raise errors.TagError("Invalid tag type (not a string)")
+ if len(tag) > constants.MAX_TAG_LEN:
+ raise errors.TagError("Tag too long (>%d characters)" %
+ constants.MAX_TAG_LEN)
+ if not tag:
+ raise errors.TagError("Tags cannot be empty")
+ if not re.match("^[ \w.+*/:-]+$", tag):
+ raise errors.TagError("Tag contains invalid characters")
+
+ def GetTags(self):
+ """Return the tags list.
+
+ """
+ tags = getattr(self, "tags", None)
+ if tags is None:
+ tags = self.tags = set()
+ return tags
+
+ def AddTag(self, tag):
+ """Add a new tag.
+
+ """
+ self.ValidateTag(tag)
+ tags = self.GetTags()
+ if len(tags) >= constants.MAX_TAGS_PER_OBJ:
+ raise errors.TagError("Too many tags")
+ self.GetTags().add(tag)
+
+ def RemoveTag(self, tag):
+ """Remove a tag.
+
+ """
+ self.ValidateTag(tag)
+ tags = self.GetTags()
+ try:
+ tags.remove(tag)
+ except KeyError:
+ raise errors.TagError("Tag not found")
+
+ def ToDict(self):
+ """Taggable-object-specific conversion to standard python types.
+
+ This replaces the tags set with a list.
+
+ """
+ bo = super(TaggableObject, self).ToDict()
+
+ tags = bo.get("tags", None)
+ if isinstance(tags, set):
+ bo["tags"] = list(tags)
+ return bo
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for instances.
+
+ """
+ obj = super(TaggableObject, cls).FromDict(val)
+ if hasattr(obj, "tags") and isinstance(obj.tags, list):
+ obj.tags = set(obj.tags)
+ return obj
class ConfigData(ConfigObject):
"""Top-level config object."""
__slots__ = ["cluster", "nodes", "instances"]
+ def ToDict(self):
+ """Custom function for top-level config data.
+
+ This just replaces the list of instances, nodes and the cluster
+ with standard python types.
+
+ """
+ mydict = super(ConfigData, self).ToDict()
+ mydict["cluster"] = mydict["cluster"].ToDict()
+ for key in "nodes", "instances":
+ mydict[key] = self._ContainerToDicts(mydict[key])
+
+ return mydict
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for top-level config data
+
+ """
+ obj = super(ConfigData, cls).FromDict(val)
+ obj.cluster = Cluster.FromDict(obj.cluster)
+ obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
+ obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
+ return obj
+
class NIC(ConfigObject):
"""Config object representing a network card."""
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
- return self.dev_type in ("drbd", "lvm")
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
- return self.dev_type in ("drbd", "lvm")
+ return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
- return self.dev_type in ("lvm",)
+ return self.dev_type in (constants.LD_LV,)
+
+ def StaticDevPath(self):
+ """Return the device path if this device type has a static one.
+
+ Some devices (LVM for example) live always at the same /dev/ path,
+ irrespective of their status. For such devices, we return this
+ path, for others we return None.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
+ return None
+
+ def ChildrenNeeded(self):
+ """Compute the needed number of children for activation.
+
+ This method will return either -1 (all children) or a positive
+ number denoting the minimum number of children needed for
+ activation (only mirrored devices will usually return >=0).
+
+ Currently, only DRBD8 supports diskless activation (therefore we
+ return 0), for all other we keep the previous semantics and return
+ -1.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ return 0
+ return -1
def GetNodes(self, node):
"""This function returns the nodes this device lives on.
devices needs to (or can) be assembled.
"""
- if self.dev_type == "lvm" or self.dev_type == "md_raid1":
+ if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
result = [node]
- elif self.dev_type == "drbd":
+ elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
if node not in result:
- raise errors.ConfigurationError, ("DRBD device passed unknown node")
+ raise errors.ConfigurationError("DRBD device passed unknown node")
else:
- raise errors.ProgrammerError, "Unhandled device type %s" % self.dev_type
+ raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
return result
def ComputeNodeTree(self, parent_node):
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
- tree in the most compact way. For example, a md/drbd/lvm stack
- will be returned as (primary_node, md) and (secondary_node, drbd)
- which represents all the top-level devices on the nodes. This
- means that on the primary node we need to activate the the md (and
- recursively all its children) and on the secondary node we need to
- activate the drbd device (and its children, the two lvm volumes).
+ tree in the most compact way. For example, a drbd/lvm stack
+ will be returned as (primary_node, drbd) and (secondary_node, drbd)
+ which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node)
# be different)
return result
+ def RecordGrow(self, amount):
+ """Update the size of this disk after growth.
+
+ This method recurses over the disks's children and updates their
+ size correspondigly. The method needs to be kept in sync with the
+ actual algorithms from bdev.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ self.size += amount
+ elif self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].RecordGrow(amount)
+ self.size += amount
+ else:
+ raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
+ " disk type %s" % self.dev_type)
+
+ def SetPhysicalID(self, target_node, nodes_ip):
+ """Convert the logical ID to the physical ID.
+
+ This is used only for drbd, which needs ip/port configuration.
+
+ The routine descends down and updates its children also, because
+ this helps when the only the top device is passed to the remote
+ node.
+
+ Arguments:
+ - target_node: the node we wish to configure for
+ - nodes_ip: a mapping of node name to ip
+
+ The target_node must exist in in nodes_ip, and must be one of the
+ nodes in the logical ID for each of the DRBD devices encountered
+ in the disk tree.
+
+ """
+ if self.children:
+ for child in self.children:
+ child.SetPhysicalID(target_node, nodes_ip)
+
+ if self.logical_id is None and self.physical_id is not None:
+ return
+ if self.dev_type in constants.LDS_DRBD:
+ pnode, snode, port = self.logical_id
+ if target_node not in (pnode, snode):
+ raise errors.ConfigurationError("DRBD device not knowing node %s" %
+ target_node)
+ pnode_ip = nodes_ip.get(pnode, None)
+ snode_ip = nodes_ip.get(snode, None)
+ if pnode_ip is None or snode_ip is None:
+ raise errors.ConfigurationError("Can't find primary or secondary node"
+ " for %s" % str(self))
+ if pnode == target_node:
+ self.physical_id = (pnode_ip, port,
+ snode_ip, port)
+ else: # it must be secondary, we tested above
+ self.physical_id = (snode_ip, port,
+ pnode_ip, port)
+ else:
+ self.physical_id = self.logical_id
+ return
+
+ def ToDict(self):
+ """Disk-specific conversion to standard python types.
-class Instance(ConfigObject):
+ This replaces the children lists of objects with lists of
+ standard python types.
+
+ """
+ bo = super(Disk, self).ToDict()
+
+ for attr in ("children",):
+ alist = bo.get(attr, None)
+ if alist:
+ bo[attr] = self._ContainerToDicts(alist)
+ return bo
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for Disks
+
+ """
+ obj = super(Disk, cls).FromDict(val)
+ if obj.children:
+ obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
+ if obj.logical_id and isinstance(obj.logical_id, list):
+ obj.logical_id = tuple(obj.logical_id)
+ if obj.physical_id and isinstance(obj.physical_id, list):
+ obj.physical_id = tuple(obj.physical_id)
+ return obj
+
+ def __str__(self):
+ """Custom str() formatter for disks.
+
+ """
+ if self.dev_type == constants.LD_LV:
+ val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
+ elif self.dev_type in constants.LDS_DRBD:
+ val = "<DRBD8("
+ if self.physical_id is None:
+ phy = "unconfigured"
+ else:
+ phy = ("configured as %s:%s %s:%s" %
+ (self.physical_id[0], self.physical_id[1],
+ self.physical_id[2], self.physical_id[3]))
+
+ val += ("hosts=%s-%s, port=%s, %s, " %
+ (self.logical_id[0], self.logical_id[1], self.logical_id[2],
+ phy))
+ if self.children and self.children.count(None) == 0:
+ val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
+ else:
+ val += "no local storage"
+ else:
+ val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
+ (self.dev_type, self.logical_id, self.physical_id, self.children))
+ if self.iv_name is None:
+ val += ", not visible"
+ else:
+ val += ", visible as /dev/%s" % self.iv_name
+ val += ", size=%dm)>" % self.size
+ return val
+
+
+class Instance(TaggableObject):
"""Config object representing an instance."""
- __slots__ = [
+ __slots__ = TaggableObject.__slots__ + [
"name",
"primary_node",
"os",
"nics",
"disks",
"disk_template",
+ "network_port",
+ "kernel_path",
+ "initrd_path",
+ "hvm_boot_order",
+ "hvm_acpi",
+ "hvm_pae",
+ "hvm_cdrom_image_path",
+ "hvm_nic_type",
+ "hvm_disk_type",
+ "vnc_bind_address",
]
def _ComputeSecondaryNodes(self):
"""
def _Helper(primary, sec_nodes, device):
"""Recursively computes secondary nodes given a top device."""
- if device.dev_type == 'drbd':
+ if device.dev_type in constants.LDS_DRBD:
nodea, nodeb, dummy = device.logical_id
if nodea == primary:
candidate = nodeb
devs = self.disks
for dev in devs:
- if dev.dev_type == "lvm":
+ if dev.dev_type == constants.LD_LV:
lvmap[node].append(dev.logical_id[1])
- elif dev.dev_type == "drbd":
+ elif dev.dev_type in constants.LDS_DRBD:
if dev.logical_id[0] not in lvmap:
lvmap[dev.logical_id[0]] = []
return ret
+ def FindDisk(self, name):
+ """Find a disk given having a specified name.
+
+ This will return the disk which has the given iv_name.
+
+ """
+ for disk in self.disks:
+ if disk.iv_name == name:
+ return disk
+
+ return None
+
+ def ToDict(self):
+ """Instance-specific conversion to standard python types.
+
+ This replaces the children lists of objects with lists of standard
+ python types.
+
+ """
+ bo = super(Instance, self).ToDict()
+
+ for attr in "nics", "disks":
+ alist = bo.get(attr, None)
+ if alist:
+ nlist = self._ContainerToDicts(alist)
+ else:
+ nlist = []
+ bo[attr] = nlist
+ return bo
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for instances.
+
+ """
+ obj = super(Instance, cls).FromDict(val)
+ obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
+ obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
+ return obj
+
class OS(ConfigObject):
"""Config object representing an operating system."""
__slots__ = [
"name",
"path",
+ "status",
"api_version",
"create_script",
"export_script",
- "import_script"
+ "import_script",
+ "rename_script",
]
+ @classmethod
+ def FromInvalidOS(cls, err):
+ """Create an OS from an InvalidOS error.
+
+ This routine knows how to convert an InvalidOS error to an OS
+ object representing the broken OS with a meaningful error message.
+
+ """
+ if not isinstance(err, errors.InvalidOS):
+ raise errors.ProgrammerError("Trying to initialize an OS from an"
+ " invalid object of type %s" % type(err))
+
+ return cls(name=err.args[0], path=err.args[1], status=err.args[2])
+
+ def __nonzero__(self):
+ return self.status == constants.OS_VALID_STATUS
+
+ __bool__ = __nonzero__
-class Node(ConfigObject):
+
+class Node(TaggableObject):
"""Config object representing a node."""
- __slots__ = ["name", "primary_ip", "secondary_ip"]
+ __slots__ = TaggableObject.__slots__ + [
+ "name",
+ "primary_ip",
+ "secondary_ip",
+ ]
-class Cluster(ConfigObject):
+class Cluster(TaggableObject):
"""Config object representing the cluster."""
- __slots__ = [
- "config_version",
+ __slots__ = TaggableObject.__slots__ + [
"serial_no",
"rsahostkeypub",
"highest_used_port",
"default_bridge",
]
+ def ToDict(self):
+ """Custom function for cluster.
+
+ """
+ mydict = super(Cluster, self).ToDict()
+ mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
+ return mydict
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for cluster.
+
+ """
+ obj = super(Cluster, cls).FromDict(val)
+ if not isinstance(obj.tcpudp_port_pool, set):
+ obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
+ return obj
+
+
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.