root / lib / objects.py @ 8b3fd458
History | View | Annotate | Download (23.2 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Transportable objects for Ganeti.
|
23 |
|
24 |
This module provides small, mostly data-only objects which are safe to
|
25 |
pass to and from external parties.
|
26 |
|
27 |
"""
|
28 |
|
29 |
|
30 |
import ConfigParser |
31 |
import re |
32 |
import copy |
33 |
from cStringIO import StringIO |
34 |
|
35 |
from ganeti import errors |
36 |
from ganeti import constants |
37 |
|
38 |
|
39 |
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", |
40 |
"OS", "Node", "Cluster"] |
41 |
|
42 |
|
43 |
class ConfigObject(object): |
44 |
"""A generic config object.
|
45 |
|
46 |
It has the following properties:
|
47 |
|
48 |
- provides somewhat safe recursive unpickling and pickling for its classes
|
49 |
- unset attributes which are defined in slots are always returned
|
50 |
as None instead of raising an error
|
51 |
|
52 |
Classes derived from this must always declare __slots__ (we use many
|
53 |
config objects and the memory reduction is useful.
|
54 |
|
55 |
"""
|
56 |
__slots__ = [] |
57 |
|
58 |
def __init__(self, **kwargs): |
59 |
for k, v in kwargs.iteritems(): |
60 |
setattr(self, k, v) |
61 |
|
62 |
def __getattr__(self, name): |
63 |
if name not in self.__slots__: |
64 |
raise AttributeError("Invalid object attribute %s.%s" % |
65 |
(type(self).__name__, name)) |
66 |
return None |
67 |
|
68 |
def __setitem__(self, key, value): |
69 |
if key not in self.__slots__: |
70 |
raise KeyError(key) |
71 |
setattr(self, key, value) |
72 |
|
73 |
def __getstate__(self): |
74 |
state = {} |
75 |
for name in self.__slots__: |
76 |
if hasattr(self, name): |
77 |
state[name] = getattr(self, name) |
78 |
return state
|
79 |
|
80 |
def __setstate__(self, state): |
81 |
for name in state: |
82 |
if name in self.__slots__: |
83 |
setattr(self, name, state[name]) |
84 |
|
85 |
def ToDict(self): |
86 |
"""Convert to a dict holding only standard python types.
|
87 |
|
88 |
The generic routine just dumps all of this object's attributes in
|
89 |
a dict. It does not work if the class has children who are
|
90 |
ConfigObjects themselves (e.g. the nics list in an Instance), in
|
91 |
which case the object should subclass the function in order to
|
92 |
make sure all objects returned are only standard python types.
|
93 |
|
94 |
"""
|
95 |
return dict([(k, getattr(self, k, None)) for k in self.__slots__]) |
96 |
|
97 |
@classmethod
|
98 |
def FromDict(cls, val): |
99 |
"""Create an object from a dictionary.
|
100 |
|
101 |
This generic routine takes a dict, instantiates a new instance of
|
102 |
the given class, and sets attributes based on the dict content.
|
103 |
|
104 |
As for `ToDict`, this does not work if the class has children
|
105 |
who are ConfigObjects themselves (e.g. the nics list in an
|
106 |
Instance), in which case the object should subclass the function
|
107 |
and alter the objects.
|
108 |
|
109 |
"""
|
110 |
if not isinstance(val, dict): |
111 |
raise errors.ConfigurationError("Invalid object passed to FromDict:" |
112 |
" expected dict, got %s" % type(val)) |
113 |
val_str = dict([(str(k), v) for k, v in val.iteritems()]) |
114 |
obj = cls(**val_str) |
115 |
return obj
|
116 |
|
117 |
@staticmethod
|
118 |
def _ContainerToDicts(container): |
119 |
"""Convert the elements of a container to standard python types.
|
120 |
|
121 |
This method converts a container with elements derived from
|
122 |
ConfigData to standard python types. If the container is a dict,
|
123 |
we don't touch the keys, only the values.
|
124 |
|
125 |
"""
|
126 |
if isinstance(container, dict): |
127 |
ret = dict([(k, v.ToDict()) for k, v in container.iteritems()]) |
128 |
elif isinstance(container, (list, tuple, set, frozenset)): |
129 |
ret = [elem.ToDict() for elem in container] |
130 |
else:
|
131 |
raise TypeError("Invalid type %s passed to _ContainerToDicts" % |
132 |
type(container))
|
133 |
return ret
|
134 |
|
135 |
@staticmethod
|
136 |
def _ContainerFromDicts(source, c_type, e_type): |
137 |
"""Convert a container from standard python types.
|
138 |
|
139 |
This method converts a container with standard python types to
|
140 |
ConfigData objects. If the container is a dict, we don't touch the
|
141 |
keys, only the values.
|
142 |
|
143 |
"""
|
144 |
if not isinstance(c_type, type): |
145 |
raise TypeError("Container type %s passed to _ContainerFromDicts is" |
146 |
" not a type" % type(c_type)) |
147 |
if c_type is dict: |
148 |
ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) |
149 |
elif c_type in (list, tuple, set, frozenset): |
150 |
ret = c_type([e_type.FromDict(elem) for elem in source]) |
151 |
else:
|
152 |
raise TypeError("Invalid container type %s passed to" |
153 |
" _ContainerFromDicts" % c_type)
|
154 |
return ret
|
155 |
|
156 |
def __repr__(self): |
157 |
"""Implement __repr__ for ConfigObjects."""
|
158 |
return repr(self.ToDict()) |
159 |
|
160 |
|
161 |
class TaggableObject(ConfigObject): |
162 |
"""An generic class supporting tags.
|
163 |
|
164 |
"""
|
165 |
__slots__ = ConfigObject.__slots__ + ["tags"]
|
166 |
|
167 |
@staticmethod
|
168 |
def ValidateTag(tag): |
169 |
"""Check if a tag is valid.
|
170 |
|
171 |
If the tag is invalid, an errors.TagError will be raised. The
|
172 |
function has no return value.
|
173 |
|
174 |
"""
|
175 |
if not isinstance(tag, basestring): |
176 |
raise errors.TagError("Invalid tag type (not a string)") |
177 |
if len(tag) > constants.MAX_TAG_LEN: |
178 |
raise errors.TagError("Tag too long (>%d characters)" % |
179 |
constants.MAX_TAG_LEN) |
180 |
if not tag: |
181 |
raise errors.TagError("Tags cannot be empty") |
182 |
if not re.match("^[ \w.+*/:-]+$", tag): |
183 |
raise errors.TagError("Tag contains invalid characters") |
184 |
|
185 |
def GetTags(self): |
186 |
"""Return the tags list.
|
187 |
|
188 |
"""
|
189 |
tags = getattr(self, "tags", None) |
190 |
if tags is None: |
191 |
tags = self.tags = set() |
192 |
return tags
|
193 |
|
194 |
def AddTag(self, tag): |
195 |
"""Add a new tag.
|
196 |
|
197 |
"""
|
198 |
self.ValidateTag(tag)
|
199 |
tags = self.GetTags()
|
200 |
if len(tags) >= constants.MAX_TAGS_PER_OBJ: |
201 |
raise errors.TagError("Too many tags") |
202 |
self.GetTags().add(tag)
|
203 |
|
204 |
def RemoveTag(self, tag): |
205 |
"""Remove a tag.
|
206 |
|
207 |
"""
|
208 |
self.ValidateTag(tag)
|
209 |
tags = self.GetTags()
|
210 |
try:
|
211 |
tags.remove(tag) |
212 |
except KeyError: |
213 |
raise errors.TagError("Tag not found") |
214 |
|
215 |
def ToDict(self): |
216 |
"""Taggable-object-specific conversion to standard python types.
|
217 |
|
218 |
This replaces the tags set with a list.
|
219 |
|
220 |
"""
|
221 |
bo = super(TaggableObject, self).ToDict() |
222 |
|
223 |
tags = bo.get("tags", None) |
224 |
if isinstance(tags, set): |
225 |
bo["tags"] = list(tags) |
226 |
return bo
|
227 |
|
228 |
@classmethod
|
229 |
def FromDict(cls, val): |
230 |
"""Custom function for instances.
|
231 |
|
232 |
"""
|
233 |
obj = super(TaggableObject, cls).FromDict(val)
|
234 |
if hasattr(obj, "tags") and isinstance(obj.tags, list): |
235 |
obj.tags = set(obj.tags)
|
236 |
return obj
|
237 |
|
238 |
|
239 |
class ConfigData(ConfigObject): |
240 |
"""Top-level config object."""
|
241 |
__slots__ = ["version", "cluster", "nodes", "instances", "serial_no"] |
242 |
|
243 |
def ToDict(self): |
244 |
"""Custom function for top-level config data.
|
245 |
|
246 |
This just replaces the list of instances, nodes and the cluster
|
247 |
with standard python types.
|
248 |
|
249 |
"""
|
250 |
mydict = super(ConfigData, self).ToDict() |
251 |
mydict["cluster"] = mydict["cluster"].ToDict() |
252 |
for key in "nodes", "instances": |
253 |
mydict[key] = self._ContainerToDicts(mydict[key])
|
254 |
|
255 |
return mydict
|
256 |
|
257 |
@classmethod
|
258 |
def FromDict(cls, val): |
259 |
"""Custom function for top-level config data
|
260 |
|
261 |
"""
|
262 |
obj = super(ConfigData, cls).FromDict(val)
|
263 |
obj.cluster = Cluster.FromDict(obj.cluster) |
264 |
obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
|
265 |
obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
|
266 |
return obj
|
267 |
|
268 |
|
269 |
class NIC(ConfigObject): |
270 |
"""Config object representing a network card."""
|
271 |
__slots__ = ["mac", "ip", "bridge"] |
272 |
|
273 |
|
274 |
class Disk(ConfigObject): |
275 |
"""Config object representing a block device."""
|
276 |
__slots__ = ["dev_type", "logical_id", "physical_id", |
277 |
"children", "iv_name", "size"] |
278 |
|
279 |
def CreateOnSecondary(self): |
280 |
"""Test if this device needs to be created on a secondary node."""
|
281 |
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV) |
282 |
|
283 |
def AssembleOnSecondary(self): |
284 |
"""Test if this device needs to be assembled on a secondary node."""
|
285 |
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV) |
286 |
|
287 |
def OpenOnSecondary(self): |
288 |
"""Test if this device needs to be opened on a secondary node."""
|
289 |
return self.dev_type in (constants.LD_LV,) |
290 |
|
291 |
def StaticDevPath(self): |
292 |
"""Return the device path if this device type has a static one.
|
293 |
|
294 |
Some devices (LVM for example) live always at the same /dev/ path,
|
295 |
irrespective of their status. For such devices, we return this
|
296 |
path, for others we return None.
|
297 |
|
298 |
"""
|
299 |
if self.dev_type == constants.LD_LV: |
300 |
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) |
301 |
return None |
302 |
|
303 |
def ChildrenNeeded(self): |
304 |
"""Compute the needed number of children for activation.
|
305 |
|
306 |
This method will return either -1 (all children) or a positive
|
307 |
number denoting the minimum number of children needed for
|
308 |
activation (only mirrored devices will usually return >=0).
|
309 |
|
310 |
Currently, only DRBD8 supports diskless activation (therefore we
|
311 |
return 0), for all other we keep the previous semantics and return
|
312 |
-1.
|
313 |
|
314 |
"""
|
315 |
if self.dev_type == constants.LD_DRBD8: |
316 |
return 0 |
317 |
return -1 |
318 |
|
319 |
def GetNodes(self, node): |
320 |
"""This function returns the nodes this device lives on.
|
321 |
|
322 |
Given the node on which the parent of the device lives on (or, in
|
323 |
case of a top-level device, the primary node of the devices'
|
324 |
instance), this function will return a list of nodes on which this
|
325 |
devices needs to (or can) be assembled.
|
326 |
|
327 |
"""
|
328 |
if self.dev_type in [constants.LD_LV, constants.LD_FILE]: |
329 |
result = [node] |
330 |
elif self.dev_type in constants.LDS_DRBD: |
331 |
result = [self.logical_id[0], self.logical_id[1]] |
332 |
if node not in result: |
333 |
raise errors.ConfigurationError("DRBD device passed unknown node") |
334 |
else:
|
335 |
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) |
336 |
return result
|
337 |
|
338 |
def ComputeNodeTree(self, parent_node): |
339 |
"""Compute the node/disk tree for this disk and its children.
|
340 |
|
341 |
This method, given the node on which the parent disk lives, will
|
342 |
return the list of all (node, disk) pairs which describe the disk
|
343 |
tree in the most compact way. For example, a drbd/lvm stack
|
344 |
will be returned as (primary_node, drbd) and (secondary_node, drbd)
|
345 |
which represents all the top-level devices on the nodes.
|
346 |
|
347 |
"""
|
348 |
my_nodes = self.GetNodes(parent_node)
|
349 |
result = [(node, self) for node in my_nodes] |
350 |
if not self.children: |
351 |
# leaf device
|
352 |
return result
|
353 |
for node in my_nodes: |
354 |
for child in self.children: |
355 |
child_result = child.ComputeNodeTree(node) |
356 |
if len(child_result) == 1: |
357 |
# child (and all its descendants) is simple, doesn't split
|
358 |
# over multiple hosts, so we don't need to describe it, our
|
359 |
# own entry for this node describes it completely
|
360 |
continue
|
361 |
else:
|
362 |
# check if child nodes differ from my nodes; note that
|
363 |
# subdisk can differ from the child itself, and be instead
|
364 |
# one of its descendants
|
365 |
for subnode, subdisk in child_result: |
366 |
if subnode not in my_nodes: |
367 |
result.append((subnode, subdisk)) |
368 |
# otherwise child is under our own node, so we ignore this
|
369 |
# entry (but probably the other results in the list will
|
370 |
# be different)
|
371 |
return result
|
372 |
|
373 |
def RecordGrow(self, amount): |
374 |
"""Update the size of this disk after growth.
|
375 |
|
376 |
This method recurses over the disks's children and updates their
|
377 |
size correspondigly. The method needs to be kept in sync with the
|
378 |
actual algorithms from bdev.
|
379 |
|
380 |
"""
|
381 |
if self.dev_type == constants.LD_LV: |
382 |
self.size += amount
|
383 |
elif self.dev_type == constants.LD_DRBD8: |
384 |
if self.children: |
385 |
self.children[0].RecordGrow(amount) |
386 |
self.size += amount
|
387 |
else:
|
388 |
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" |
389 |
" disk type %s" % self.dev_type) |
390 |
|
391 |
def SetPhysicalID(self, target_node, nodes_ip): |
392 |
"""Convert the logical ID to the physical ID.
|
393 |
|
394 |
This is used only for drbd, which needs ip/port configuration.
|
395 |
|
396 |
The routine descends down and updates its children also, because
|
397 |
this helps when the only the top device is passed to the remote
|
398 |
node.
|
399 |
|
400 |
Arguments:
|
401 |
- target_node: the node we wish to configure for
|
402 |
- nodes_ip: a mapping of node name to ip
|
403 |
|
404 |
The target_node must exist in in nodes_ip, and must be one of the
|
405 |
nodes in the logical ID for each of the DRBD devices encountered
|
406 |
in the disk tree.
|
407 |
|
408 |
"""
|
409 |
if self.children: |
410 |
for child in self.children: |
411 |
child.SetPhysicalID(target_node, nodes_ip) |
412 |
|
413 |
if self.logical_id is None and self.physical_id is not None: |
414 |
return
|
415 |
if self.dev_type in constants.LDS_DRBD: |
416 |
pnode, snode, port, pminor, sminor, secret = self.logical_id
|
417 |
if target_node not in (pnode, snode): |
418 |
raise errors.ConfigurationError("DRBD device not knowing node %s" % |
419 |
target_node) |
420 |
pnode_ip = nodes_ip.get(pnode, None)
|
421 |
snode_ip = nodes_ip.get(snode, None)
|
422 |
if pnode_ip is None or snode_ip is None: |
423 |
raise errors.ConfigurationError("Can't find primary or secondary node" |
424 |
" for %s" % str(self)) |
425 |
p_data = (pnode_ip, port) |
426 |
s_data = (snode_ip, port) |
427 |
if pnode == target_node:
|
428 |
self.physical_id = p_data + s_data + (pminor, secret)
|
429 |
else: # it must be secondary, we tested above |
430 |
self.physical_id = s_data + p_data + (sminor, secret)
|
431 |
else:
|
432 |
self.physical_id = self.logical_id |
433 |
return
|
434 |
|
435 |
def ToDict(self): |
436 |
"""Disk-specific conversion to standard python types.
|
437 |
|
438 |
This replaces the children lists of objects with lists of
|
439 |
standard python types.
|
440 |
|
441 |
"""
|
442 |
bo = super(Disk, self).ToDict() |
443 |
|
444 |
for attr in ("children",): |
445 |
alist = bo.get(attr, None)
|
446 |
if alist:
|
447 |
bo[attr] = self._ContainerToDicts(alist)
|
448 |
return bo
|
449 |
|
450 |
@classmethod
|
451 |
def FromDict(cls, val): |
452 |
"""Custom function for Disks
|
453 |
|
454 |
"""
|
455 |
obj = super(Disk, cls).FromDict(val)
|
456 |
if obj.children:
|
457 |
obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
|
458 |
if obj.logical_id and isinstance(obj.logical_id, list): |
459 |
obj.logical_id = tuple(obj.logical_id)
|
460 |
if obj.physical_id and isinstance(obj.physical_id, list): |
461 |
obj.physical_id = tuple(obj.physical_id)
|
462 |
if obj.dev_type in constants.LDS_DRBD: |
463 |
# we need a tuple of length six here
|
464 |
if len(obj.logical_id) < 6: |
465 |
obj.logical_id += (None,) * (6 - len(obj.logical_id)) |
466 |
return obj
|
467 |
|
468 |
def __str__(self): |
469 |
"""Custom str() formatter for disks.
|
470 |
|
471 |
"""
|
472 |
if self.dev_type == constants.LD_LV: |
473 |
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id |
474 |
elif self.dev_type in constants.LDS_DRBD: |
475 |
val = "<DRBD8("
|
476 |
if self.physical_id is None: |
477 |
phy = "unconfigured"
|
478 |
else:
|
479 |
phy = ("configured as %s:%s %s:%s" %
|
480 |
(self.physical_id[0], self.physical_id[1], |
481 |
self.physical_id[2], self.physical_id[3])) |
482 |
|
483 |
val += ("hosts=%s-%s, port=%s, %s, " %
|
484 |
(self.logical_id[0], self.logical_id[1], self.logical_id[2], |
485 |
phy)) |
486 |
if self.children and self.children.count(None) == 0: |
487 |
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) |
488 |
else:
|
489 |
val += "no local storage"
|
490 |
else:
|
491 |
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
|
492 |
(self.dev_type, self.logical_id, self.physical_id, self.children)) |
493 |
if self.iv_name is None: |
494 |
val += ", not visible"
|
495 |
else:
|
496 |
val += ", visible as /dev/%s" % self.iv_name |
497 |
val += ", size=%dm)>" % self.size |
498 |
return val
|
499 |
|
500 |
|
501 |
class Instance(TaggableObject): |
502 |
"""Config object representing an instance."""
|
503 |
__slots__ = TaggableObject.__slots__ + [ |
504 |
"name",
|
505 |
"primary_node",
|
506 |
"os",
|
507 |
"hypervisor",
|
508 |
"hvparams",
|
509 |
"beparams",
|
510 |
"status",
|
511 |
"memory",
|
512 |
"vcpus",
|
513 |
"nics",
|
514 |
"disks",
|
515 |
"disk_template",
|
516 |
"network_port",
|
517 |
"serial_no",
|
518 |
] |
519 |
|
520 |
def _ComputeSecondaryNodes(self): |
521 |
"""Compute the list of secondary nodes.
|
522 |
|
523 |
Since the data is already there (in the drbd disks), keeping it as
|
524 |
a separate normal attribute is redundant and if not properly
|
525 |
synchronised can cause problems. Thus it's better to compute it
|
526 |
dynamically.
|
527 |
|
528 |
"""
|
529 |
def _Helper(primary, sec_nodes, device): |
530 |
"""Recursively computes secondary nodes given a top device."""
|
531 |
if device.dev_type in constants.LDS_DRBD: |
532 |
nodea, nodeb, dummy = device.logical_id[:3]
|
533 |
if nodea == primary:
|
534 |
candidate = nodeb |
535 |
else:
|
536 |
candidate = nodea |
537 |
if candidate not in sec_nodes: |
538 |
sec_nodes.append(candidate) |
539 |
if device.children:
|
540 |
for child in device.children: |
541 |
_Helper(primary, sec_nodes, child) |
542 |
|
543 |
secondary_nodes = [] |
544 |
for device in self.disks: |
545 |
_Helper(self.primary_node, secondary_nodes, device)
|
546 |
return tuple(secondary_nodes) |
547 |
|
548 |
secondary_nodes = property(_ComputeSecondaryNodes, None, None, |
549 |
"List of secondary nodes")
|
550 |
|
551 |
def MapLVsByNode(self, lvmap=None, devs=None, node=None): |
552 |
"""Provide a mapping of nodes to LVs this instance owns.
|
553 |
|
554 |
This function figures out what logical volumes should belong on which
|
555 |
nodes, recursing through a device tree.
|
556 |
|
557 |
Args:
|
558 |
lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
|
559 |
|
560 |
Returns:
|
561 |
None if lvmap arg is given.
|
562 |
Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
|
563 |
|
564 |
"""
|
565 |
if node == None: |
566 |
node = self.primary_node
|
567 |
|
568 |
if lvmap is None: |
569 |
lvmap = { node : [] } |
570 |
ret = lvmap |
571 |
else:
|
572 |
if not node in lvmap: |
573 |
lvmap[node] = [] |
574 |
ret = None
|
575 |
|
576 |
if not devs: |
577 |
devs = self.disks
|
578 |
|
579 |
for dev in devs: |
580 |
if dev.dev_type == constants.LD_LV:
|
581 |
lvmap[node].append(dev.logical_id[1])
|
582 |
|
583 |
elif dev.dev_type in constants.LDS_DRBD: |
584 |
if dev.logical_id[0] not in lvmap: |
585 |
lvmap[dev.logical_id[0]] = []
|
586 |
|
587 |
if dev.logical_id[1] not in lvmap: |
588 |
lvmap[dev.logical_id[1]] = []
|
589 |
|
590 |
if dev.children:
|
591 |
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) |
592 |
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) |
593 |
|
594 |
elif dev.children:
|
595 |
self.MapLVsByNode(lvmap, dev.children, node)
|
596 |
|
597 |
return ret
|
598 |
|
599 |
def FindDisk(self, name): |
600 |
"""Find a disk given having a specified name.
|
601 |
|
602 |
This will return the disk which has the given iv_name.
|
603 |
|
604 |
"""
|
605 |
for disk in self.disks: |
606 |
if disk.iv_name == name:
|
607 |
return disk
|
608 |
|
609 |
return None |
610 |
|
611 |
def ToDict(self): |
612 |
"""Instance-specific conversion to standard python types.
|
613 |
|
614 |
This replaces the children lists of objects with lists of standard
|
615 |
python types.
|
616 |
|
617 |
"""
|
618 |
bo = super(Instance, self).ToDict() |
619 |
|
620 |
for attr in "nics", "disks": |
621 |
alist = bo.get(attr, None)
|
622 |
if alist:
|
623 |
nlist = self._ContainerToDicts(alist)
|
624 |
else:
|
625 |
nlist = [] |
626 |
bo[attr] = nlist |
627 |
return bo
|
628 |
|
629 |
@classmethod
|
630 |
def FromDict(cls, val): |
631 |
"""Custom function for instances.
|
632 |
|
633 |
"""
|
634 |
obj = super(Instance, cls).FromDict(val)
|
635 |
obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
|
636 |
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
|
637 |
return obj
|
638 |
|
639 |
|
640 |
class OS(ConfigObject): |
641 |
"""Config object representing an operating system."""
|
642 |
__slots__ = [ |
643 |
"name",
|
644 |
"path",
|
645 |
"status",
|
646 |
"api_versions",
|
647 |
"create_script",
|
648 |
"export_script",
|
649 |
"import_script",
|
650 |
"rename_script",
|
651 |
] |
652 |
|
653 |
@classmethod
|
654 |
def FromInvalidOS(cls, err): |
655 |
"""Create an OS from an InvalidOS error.
|
656 |
|
657 |
This routine knows how to convert an InvalidOS error to an OS
|
658 |
object representing the broken OS with a meaningful error message.
|
659 |
|
660 |
"""
|
661 |
if not isinstance(err, errors.InvalidOS): |
662 |
raise errors.ProgrammerError("Trying to initialize an OS from an" |
663 |
" invalid object of type %s" % type(err)) |
664 |
|
665 |
return cls(name=err.args[0], path=err.args[1], status=err.args[2]) |
666 |
|
667 |
def __nonzero__(self): |
668 |
return self.status == constants.OS_VALID_STATUS |
669 |
|
670 |
__bool__ = __nonzero__ |
671 |
|
672 |
|
673 |
class Node(TaggableObject): |
674 |
"""Config object representing a node."""
|
675 |
__slots__ = TaggableObject.__slots__ + [ |
676 |
"name",
|
677 |
"primary_ip",
|
678 |
"secondary_ip",
|
679 |
"serial_no",
|
680 |
] |
681 |
|
682 |
|
683 |
class Cluster(TaggableObject): |
684 |
"""Config object representing the cluster."""
|
685 |
__slots__ = TaggableObject.__slots__ + [ |
686 |
"serial_no",
|
687 |
"rsahostkeypub",
|
688 |
"highest_used_port",
|
689 |
"tcpudp_port_pool",
|
690 |
"mac_prefix",
|
691 |
"volume_group_name",
|
692 |
"default_bridge",
|
693 |
"hypervisor",
|
694 |
"master_node",
|
695 |
"master_ip",
|
696 |
"master_netdev",
|
697 |
"cluster_name",
|
698 |
"file_storage_dir",
|
699 |
"enabled_hypervisors",
|
700 |
"hvparams",
|
701 |
"beparams",
|
702 |
] |
703 |
|
704 |
def ToDict(self): |
705 |
"""Custom function for cluster.
|
706 |
|
707 |
"""
|
708 |
mydict = super(Cluster, self).ToDict() |
709 |
mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool) |
710 |
return mydict
|
711 |
|
712 |
@classmethod
|
713 |
def FromDict(cls, val): |
714 |
"""Custom function for cluster.
|
715 |
|
716 |
"""
|
717 |
obj = super(Cluster, cls).FromDict(val)
|
718 |
if not isinstance(obj.tcpudp_port_pool, set): |
719 |
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
|
720 |
return obj
|
721 |
|
722 |
@staticmethod
|
723 |
def FillDict(defaults_dict, custom_dict): |
724 |
"""Basic function to apply settings on top a default dict.
|
725 |
|
726 |
@type defaults_dict: dict
|
727 |
@param defaults_dict: dictionary holding the default values
|
728 |
@type custom_dict: dict
|
729 |
@param custom_dict: dictionary holding customized value
|
730 |
@rtype: dict
|
731 |
@return: dict with the 'full' values
|
732 |
|
733 |
"""
|
734 |
ret_dict = copy.deepcopy(defaults_dict) |
735 |
ret_dict.update(custom_dict) |
736 |
return ret_dict
|
737 |
|
738 |
def FillHV(self, instance): |
739 |
"""Fill an instance's hvparams dict.
|
740 |
|
741 |
@type instance: object
|
742 |
@param instance: the instance parameter to fill
|
743 |
@rtype: dict
|
744 |
@return: a copy of the instance's hvparams with missing keys filled from
|
745 |
the cluster defaults
|
746 |
|
747 |
"""
|
748 |
return self.FillDict(self.hvparams.get(instance.hypervisor, {}), |
749 |
instance.hvparams) |
750 |
|
751 |
def FillBE(self, instance): |
752 |
"""Fill an instance's beparams dict.
|
753 |
|
754 |
@type instance: object
|
755 |
@param instance: the instance parameter to fill
|
756 |
@rtype: dict
|
757 |
@return: a copy of the instance's beparams with missing keys filled from
|
758 |
the cluster defaults
|
759 |
|
760 |
"""
|
761 |
return self.FillDict(self.beparams.get(constants.BEGR_DEFAULT, {}), |
762 |
instance.beparams) |
763 |
|
764 |
|
765 |
class SerializableConfigParser(ConfigParser.SafeConfigParser): |
766 |
"""Simple wrapper over ConfigParse that allows serialization.
|
767 |
|
768 |
This class is basically ConfigParser.SafeConfigParser with two
|
769 |
additional methods that allow it to serialize/unserialize to/from a
|
770 |
buffer.
|
771 |
|
772 |
"""
|
773 |
def Dumps(self): |
774 |
"""Dump this instance and return the string representation."""
|
775 |
buf = StringIO() |
776 |
self.write(buf)
|
777 |
return buf.getvalue()
|
778 |
|
779 |
@staticmethod
|
780 |
def Loads(data): |
781 |
"""Load data from a string."""
|
782 |
buf = StringIO(data) |
783 |
cfp = SerializableConfigParser() |
784 |
cfp.readfp(buf) |
785 |
return cfp
|