4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
33 from cStringIO import StringIO
35 from ganeti import errors
36 from ganeti import constants
39 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40 "OS", "Node", "Cluster"]
43 class ConfigObject(object):
44 """A generic config object.
46 It has the following properties:
48 - provides somewhat safe recursive unpickling and pickling for its classes
49 - unset attributes which are defined in slots are always returned
50 as None instead of raising an error
52 Classes derived from this must always declare __slots__ (we use many
53 config objects and the memory reduction is useful)
58 def __init__(self, **kwargs):
59 for k, v in kwargs.iteritems():
62 def __getattr__(self, name):
63 if name not in self.__slots__:
64 raise AttributeError("Invalid object attribute %s.%s" %
65 (type(self).__name__, name))
68 def __setitem__(self, key, value):
69 if key not in self.__slots__:
71 setattr(self, key, value)
73 def __getstate__(self):
75 for name in self.__slots__:
76 if hasattr(self, name):
77 state[name] = getattr(self, name)
80 def __setstate__(self, state):
82 if name in self.__slots__:
83 setattr(self, name, state[name])
86 """Convert to a dict holding only standard python types.
88 The generic routine just dumps all of this object's attributes in
89 a dict. It does not work if the class has children who are
90 ConfigObjects themselves (e.g. the nics list in an Instance), in
91 which case the object should subclass the function in order to
92 make sure all objects returned are only standard python types.
95 return dict([(k, getattr(self, k, None)) for k in self.__slots__])
98 def FromDict(cls, val):
99 """Create an object from a dictionary.
101 This generic routine takes a dict, instantiates a new instance of
102 the given class, and sets attributes based on the dict content.
104 As for `ToDict`, this does not work if the class has children
105 who are ConfigObjects themselves (e.g. the nics list in an
106 Instance), in which case the object should subclass the function
107 and alter the objects.
110 if not isinstance(val, dict):
111 raise errors.ConfigurationError("Invalid object passed to FromDict:"
112 " expected dict, got %s" % type(val))
113 val_str = dict([(str(k), v) for k, v in val.iteritems()])
118 def _ContainerToDicts(container):
119 """Convert the elements of a container to standard python types.
121 This method converts a container with elements derived from
122 ConfigData to standard python types. If the container is a dict,
123 we don't touch the keys, only the values.
126 if isinstance(container, dict):
127 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
128 elif isinstance(container, (list, tuple, set, frozenset)):
129 ret = [elem.ToDict() for elem in container]
131 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
136 def _ContainerFromDicts(source, c_type, e_type):
137 """Convert a container from standard python types.
139 This method converts a container with standard python types to
140 ConfigData objects. If the container is a dict, we don't touch the
141 keys, only the values.
144 if not isinstance(c_type, type):
145 raise TypeError("Container type %s passed to _ContainerFromDicts is"
146 " not a type" % type(c_type))
148 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
149 elif c_type in (list, tuple, set, frozenset):
150 ret = c_type([e_type.FromDict(elem) for elem in source])
152 raise TypeError("Invalid container type %s passed to"
153 " _ContainerFromDicts" % c_type)
157 """Implement __repr__ for ConfigObjects."""
158 return repr(self.ToDict())
161 class TaggableObject(ConfigObject):
162 """An generic class supporting tags.
165 __slots__ = ConfigObject.__slots__ + ["tags"]
168 def ValidateTag(tag):
169 """Check if a tag is valid.
171 If the tag is invalid, an errors.TagError will be raised. The
172 function has no return value.
175 if not isinstance(tag, basestring):
176 raise errors.TagError("Invalid tag type (not a string)")
177 if len(tag) > constants.MAX_TAG_LEN:
178 raise errors.TagError("Tag too long (>%d characters)" %
179 constants.MAX_TAG_LEN)
181 raise errors.TagError("Tags cannot be empty")
182 if not re.match("^[ \w.+*/:-]+$", tag):
183 raise errors.TagError("Tag contains invalid characters")
186 """Return the tags list.
189 tags = getattr(self, "tags", None)
191 tags = self.tags = set()
194 def AddTag(self, tag):
198 self.ValidateTag(tag)
199 tags = self.GetTags()
200 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
201 raise errors.TagError("Too many tags")
202 self.GetTags().add(tag)
204 def RemoveTag(self, tag):
208 self.ValidateTag(tag)
209 tags = self.GetTags()
213 raise errors.TagError("Tag not found")
216 """Taggable-object-specific conversion to standard python types.
218 This replaces the tags set with a list.
221 bo = super(TaggableObject, self).ToDict()
223 tags = bo.get("tags", None)
224 if isinstance(tags, set):
225 bo["tags"] = list(tags)
229 def FromDict(cls, val):
230 """Custom function for instances.
233 obj = super(TaggableObject, cls).FromDict(val)
234 if hasattr(obj, "tags") and isinstance(obj.tags, list):
235 obj.tags = set(obj.tags)
239 class ConfigData(ConfigObject):
240 """Top-level config object."""
241 __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
244 """Custom function for top-level config data.
246 This just replaces the list of instances, nodes and the cluster
247 with standard python types.
250 mydict = super(ConfigData, self).ToDict()
251 mydict["cluster"] = mydict["cluster"].ToDict()
252 for key in "nodes", "instances":
253 mydict[key] = self._ContainerToDicts(mydict[key])
258 def FromDict(cls, val):
259 """Custom function for top-level config data
262 obj = super(ConfigData, cls).FromDict(val)
263 obj.cluster = Cluster.FromDict(obj.cluster)
264 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
265 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
269 class NIC(ConfigObject):
270 """Config object representing a network card."""
271 __slots__ = ["mac", "ip", "bridge"]
274 class Disk(ConfigObject):
275 """Config object representing a block device."""
276 __slots__ = ["dev_type", "logical_id", "physical_id",
277 "children", "iv_name", "size", "mode"]
279 def CreateOnSecondary(self):
280 """Test if this device needs to be created on a secondary node."""
281 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
283 def AssembleOnSecondary(self):
284 """Test if this device needs to be assembled on a secondary node."""
285 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
287 def OpenOnSecondary(self):
288 """Test if this device needs to be opened on a secondary node."""
289 return self.dev_type in (constants.LD_LV,)
291 def StaticDevPath(self):
292 """Return the device path if this device type has a static one.
294 Some devices (LVM for example) live always at the same /dev/ path,
295 irrespective of their status. For such devices, we return this
296 path, for others we return None.
299 if self.dev_type == constants.LD_LV:
300 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
303 def ChildrenNeeded(self):
304 """Compute the needed number of children for activation.
306 This method will return either -1 (all children) or a positive
307 number denoting the minimum number of children needed for
308 activation (only mirrored devices will usually return >=0).
310 Currently, only DRBD8 supports diskless activation (therefore we
311 return 0), for all other we keep the previous semantics and return
315 if self.dev_type == constants.LD_DRBD8:
319 def GetNodes(self, node):
320 """This function returns the nodes this device lives on.
322 Given the node on which the parent of the device lives on (or, in
323 case of a top-level device, the primary node of the devices'
324 instance), this function will return a list of nodes on which this
325 devices needs to (or can) be assembled.
328 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
330 elif self.dev_type in constants.LDS_DRBD:
331 result = [self.logical_id[0], self.logical_id[1]]
332 if node not in result:
333 raise errors.ConfigurationError("DRBD device passed unknown node")
335 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
338 def ComputeNodeTree(self, parent_node):
339 """Compute the node/disk tree for this disk and its children.
341 This method, given the node on which the parent disk lives, will
342 return the list of all (node, disk) pairs which describe the disk
343 tree in the most compact way. For example, a drbd/lvm stack
344 will be returned as (primary_node, drbd) and (secondary_node, drbd)
345 which represents all the top-level devices on the nodes.
348 my_nodes = self.GetNodes(parent_node)
349 result = [(node, self) for node in my_nodes]
350 if not self.children:
353 for node in my_nodes:
354 for child in self.children:
355 child_result = child.ComputeNodeTree(node)
356 if len(child_result) == 1:
357 # child (and all its descendants) is simple, doesn't split
358 # over multiple hosts, so we don't need to describe it, our
359 # own entry for this node describes it completely
362 # check if child nodes differ from my nodes; note that
363 # subdisk can differ from the child itself, and be instead
364 # one of its descendants
365 for subnode, subdisk in child_result:
366 if subnode not in my_nodes:
367 result.append((subnode, subdisk))
368 # otherwise child is under our own node, so we ignore this
369 # entry (but probably the other results in the list will
373 def RecordGrow(self, amount):
374 """Update the size of this disk after growth.
376 This method recurses over the disks's children and updates their
377 size correspondigly. The method needs to be kept in sync with the
378 actual algorithms from bdev.
381 if self.dev_type == constants.LD_LV:
383 elif self.dev_type == constants.LD_DRBD8:
385 self.children[0].RecordGrow(amount)
388 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
389 " disk type %s" % self.dev_type)
391 def SetPhysicalID(self, target_node, nodes_ip):
392 """Convert the logical ID to the physical ID.
394 This is used only for drbd, which needs ip/port configuration.
396 The routine descends down and updates its children also, because
397 this helps when the only the top device is passed to the remote
401 - target_node: the node we wish to configure for
402 - nodes_ip: a mapping of node name to ip
404 The target_node must exist in in nodes_ip, and must be one of the
405 nodes in the logical ID for each of the DRBD devices encountered
410 for child in self.children:
411 child.SetPhysicalID(target_node, nodes_ip)
413 if self.logical_id is None and self.physical_id is not None:
415 if self.dev_type in constants.LDS_DRBD:
416 pnode, snode, port, pminor, sminor, secret = self.logical_id
417 if target_node not in (pnode, snode):
418 raise errors.ConfigurationError("DRBD device not knowing node %s" %
420 pnode_ip = nodes_ip.get(pnode, None)
421 snode_ip = nodes_ip.get(snode, None)
422 if pnode_ip is None or snode_ip is None:
423 raise errors.ConfigurationError("Can't find primary or secondary node"
424 " for %s" % str(self))
425 p_data = (pnode_ip, port)
426 s_data = (snode_ip, port)
427 if pnode == target_node:
428 self.physical_id = p_data + s_data + (pminor, secret)
429 else: # it must be secondary, we tested above
430 self.physical_id = s_data + p_data + (sminor, secret)
432 self.physical_id = self.logical_id
436 """Disk-specific conversion to standard python types.
438 This replaces the children lists of objects with lists of
439 standard python types.
442 bo = super(Disk, self).ToDict()
444 for attr in ("children",):
445 alist = bo.get(attr, None)
447 bo[attr] = self._ContainerToDicts(alist)
451 def FromDict(cls, val):
452 """Custom function for Disks
455 obj = super(Disk, cls).FromDict(val)
457 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
458 if obj.logical_id and isinstance(obj.logical_id, list):
459 obj.logical_id = tuple(obj.logical_id)
460 if obj.physical_id and isinstance(obj.physical_id, list):
461 obj.physical_id = tuple(obj.physical_id)
462 if obj.dev_type in constants.LDS_DRBD:
463 # we need a tuple of length six here
464 if len(obj.logical_id) < 6:
465 obj.logical_id += (None,) * (6 - len(obj.logical_id))
469 """Custom str() formatter for disks.
472 if self.dev_type == constants.LD_LV:
473 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
474 elif self.dev_type in constants.LDS_DRBD:
475 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
477 if self.physical_id is None:
480 phy = ("configured as %s:%s %s:%s" %
481 (self.physical_id[0], self.physical_id[1],
482 self.physical_id[2], self.physical_id[3]))
484 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
485 (node_a, minor_a, node_b, minor_b, port, phy))
486 if self.children and self.children.count(None) == 0:
487 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
489 val += "no local storage"
491 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
492 (self.dev_type, self.logical_id, self.physical_id, self.children))
493 if self.iv_name is None:
494 val += ", not visible"
496 val += ", visible as /dev/%s" % self.iv_name
497 val += ", size=%dm)>" % self.size
501 class Instance(TaggableObject):
502 """Config object representing an instance."""
503 __slots__ = TaggableObject.__slots__ + [
518 def _ComputeSecondaryNodes(self):
519 """Compute the list of secondary nodes.
521 This is a simple wrapper over _ComputeAllNodes.
524 all_nodes = set(self._ComputeAllNodes())
525 all_nodes.discard(self.primary_node)
526 return tuple(all_nodes)
528 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
529 "List of secondary nodes")
531 def _ComputeAllNodes(self):
532 """Compute the list of all nodes.
534 Since the data is already there (in the drbd disks), keeping it as
535 a separate normal attribute is redundant and if not properly
536 synchronised can cause problems. Thus it's better to compute it
540 def _Helper(nodes, device):
541 """Recursively computes nodes given a top device."""
542 if device.dev_type in constants.LDS_DRBD:
543 nodea, nodeb = device.logical_id[:2]
547 for child in device.children:
548 _Helper(nodes, child)
551 all_nodes.add(self.primary_node)
552 for device in self.disks:
553 _Helper(all_nodes, device)
554 return tuple(all_nodes)
556 all_nodes = property(_ComputeAllNodes, None, None,
557 "List of all nodes of the instance")
559 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
560 """Provide a mapping of nodes to LVs this instance owns.
562 This function figures out what logical volumes should belong on
563 which nodes, recursing through a device tree.
565 @param lvmap: optional dictionary to receive the
566 'node' : ['lv', ...] data.
568 @return: None if lvmap arg is given, otherwise, a dictionary
569 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
573 node = self.primary_node
576 lvmap = { node : [] }
579 if not node in lvmap:
587 if dev.dev_type == constants.LD_LV:
588 lvmap[node].append(dev.logical_id[1])
590 elif dev.dev_type in constants.LDS_DRBD:
592 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
593 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
596 self.MapLVsByNode(lvmap, dev.children, node)
600 def FindDisk(self, idx):
601 """Find a disk given having a specified index.
603 This is just a wrapper that does validation of the index.
606 @param idx: the disk index
608 @return: the corresponding disk
609 @raise errors.OpPrereqError: when the given index is not valid
614 return self.disks[idx]
615 except ValueError, err:
616 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
618 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
619 " 0 to %d" % (idx, len(self.disks)))
622 """Instance-specific conversion to standard python types.
624 This replaces the children lists of objects with lists of standard
628 bo = super(Instance, self).ToDict()
630 for attr in "nics", "disks":
631 alist = bo.get(attr, None)
633 nlist = self._ContainerToDicts(alist)
640 def FromDict(cls, val):
641 """Custom function for instances.
644 obj = super(Instance, cls).FromDict(val)
645 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
646 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
650 class OS(ConfigObject):
651 """Config object representing an operating system."""
664 def FromInvalidOS(cls, err):
665 """Create an OS from an InvalidOS error.
667 This routine knows how to convert an InvalidOS error to an OS
668 object representing the broken OS with a meaningful error message.
671 if not isinstance(err, errors.InvalidOS):
672 raise errors.ProgrammerError("Trying to initialize an OS from an"
673 " invalid object of type %s" % type(err))
675 return cls(name=err.args[0], path=err.args[1], status=err.args[2])
677 def __nonzero__(self):
678 return self.status == constants.OS_VALID_STATUS
680 __bool__ = __nonzero__
683 class Node(TaggableObject):
684 """Config object representing a node."""
685 __slots__ = TaggableObject.__slots__ + [
695 class Cluster(TaggableObject):
696 """Config object representing the cluster."""
697 __slots__ = TaggableObject.__slots__ + [
705 "default_hypervisor",
711 "enabled_hypervisors",
714 "candidate_pool_size",
718 """Custom function for cluster.
721 mydict = super(Cluster, self).ToDict()
722 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
726 def FromDict(cls, val):
727 """Custom function for cluster.
730 obj = super(Cluster, cls).FromDict(val)
731 if not isinstance(obj.tcpudp_port_pool, set):
732 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
736 def FillDict(defaults_dict, custom_dict):
737 """Basic function to apply settings on top a default dict.
739 @type defaults_dict: dict
740 @param defaults_dict: dictionary holding the default values
741 @type custom_dict: dict
742 @param custom_dict: dictionary holding customized value
744 @return: dict with the 'full' values
747 ret_dict = copy.deepcopy(defaults_dict)
748 ret_dict.update(custom_dict)
751 def FillHV(self, instance):
752 """Fill an instance's hvparams dict.
754 @type instance: object
755 @param instance: the instance parameter to fill
757 @return: a copy of the instance's hvparams with missing keys filled from
761 return self.FillDict(self.hvparams.get(instance.hypervisor, {}),
764 def FillBE(self, instance):
765 """Fill an instance's beparams dict.
767 @type instance: object
768 @param instance: the instance parameter to fill
770 @return: a copy of the instance's beparams with missing keys filled from
774 return self.FillDict(self.beparams.get(constants.BEGR_DEFAULT, {}),
778 class SerializableConfigParser(ConfigParser.SafeConfigParser):
779 """Simple wrapper over ConfigParse that allows serialization.
781 This class is basically ConfigParser.SafeConfigParser with two
782 additional methods that allow it to serialize/unserialize to/from a
787 """Dump this instance and return the string representation."""
790 return buf.getvalue()
794 """Load data from a string."""
796 cfp = SerializableConfigParser()