root / lib / objects.py @ 644eeef9
History | View | Annotate | Download (12.4 kB)
1 |
#!/usr/bin/python
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Transportable objects for Ganeti.
|
23 |
|
24 |
This module provides small, mostly data-only objects which are safe to
|
25 |
pass to and from external parties.
|
26 |
|
27 |
"""
|
28 |
|
29 |
|
30 |
import cPickle |
31 |
from cStringIO import StringIO |
32 |
import ConfigParser |
33 |
import re |
34 |
|
35 |
from ganeti import errors |
36 |
from ganeti import constants |
37 |
|
38 |
|
39 |
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", |
40 |
"OS", "Node", "Cluster"] |
41 |
|
42 |
|
43 |
class ConfigObject(object): |
44 |
"""A generic config object.
|
45 |
|
46 |
It has the following properties:
|
47 |
|
48 |
- provides somewhat safe recursive unpickling and pickling for its classes
|
49 |
- unset attributes which are defined in slots are always returned
|
50 |
as None instead of raising an error
|
51 |
|
52 |
Classes derived from this must always declare __slots__ (we use many
|
53 |
config objects and the memory reduction is useful.
|
54 |
|
55 |
"""
|
56 |
__slots__ = [] |
57 |
|
58 |
def __init__(self, **kwargs): |
59 |
for i in kwargs: |
60 |
setattr(self, i, kwargs[i]) |
61 |
|
62 |
def __getattr__(self, name): |
63 |
if name not in self.__slots__: |
64 |
raise AttributeError("Invalid object attribute %s.%s" % |
65 |
(type(self).__name__, name)) |
66 |
return None |
67 |
|
68 |
def __setitem__(self, key, value): |
69 |
if key not in self.__slots__: |
70 |
raise KeyError(key) |
71 |
setattr(self, key, value) |
72 |
|
73 |
def __getstate__(self): |
74 |
state = {} |
75 |
for name in self.__slots__: |
76 |
if hasattr(self, name): |
77 |
state[name] = getattr(self, name) |
78 |
return state
|
79 |
|
80 |
def __setstate__(self, state): |
81 |
for name in state: |
82 |
if name in self.__slots__: |
83 |
setattr(self, name, state[name]) |
84 |
|
85 |
@staticmethod
|
86 |
def FindGlobal(module, name): |
87 |
"""Function filtering the allowed classes to be un-pickled.
|
88 |
|
89 |
Currently, we only allow the classes from this module which are
|
90 |
derived from ConfigObject.
|
91 |
|
92 |
"""
|
93 |
# Also support the old module name (ganeti.config)
|
94 |
cls = None
|
95 |
if module == "ganeti.config" or module == "ganeti.objects": |
96 |
if name == "ConfigData": |
97 |
cls = ConfigData |
98 |
elif name == "NIC": |
99 |
cls = NIC |
100 |
elif name == "Disk" or name == "BlockDev": |
101 |
cls = Disk |
102 |
elif name == "Instance": |
103 |
cls = Instance |
104 |
elif name == "OS": |
105 |
cls = OS |
106 |
elif name == "Node": |
107 |
cls = Node |
108 |
elif name == "Cluster": |
109 |
cls = Cluster |
110 |
elif module == "__builtin__": |
111 |
if name == "set": |
112 |
cls = set
|
113 |
if cls is None: |
114 |
raise cPickle.UnpicklingError("Class %s.%s not allowed due to" |
115 |
" security concerns" % (module, name))
|
116 |
return cls
|
117 |
|
118 |
def Dump(self, fobj): |
119 |
"""Dump this instance to a file object.
|
120 |
|
121 |
Note that we use the HIGHEST_PROTOCOL, as it brings benefits for
|
122 |
the new classes.
|
123 |
|
124 |
"""
|
125 |
dumper = cPickle.Pickler(fobj, cPickle.HIGHEST_PROTOCOL) |
126 |
dumper.dump(self)
|
127 |
|
128 |
@staticmethod
|
129 |
def Load(fobj): |
130 |
"""Unpickle data from the given stream.
|
131 |
|
132 |
This uses the `FindGlobal` function to filter the allowed classes.
|
133 |
|
134 |
"""
|
135 |
loader = cPickle.Unpickler(fobj) |
136 |
loader.find_global = ConfigObject.FindGlobal |
137 |
return loader.load()
|
138 |
|
139 |
def Dumps(self): |
140 |
"""Dump this instance and return the string representation."""
|
141 |
buf = StringIO() |
142 |
self.Dump(buf)
|
143 |
return buf.getvalue()
|
144 |
|
145 |
@staticmethod
|
146 |
def Loads(data): |
147 |
"""Load data from a string."""
|
148 |
return ConfigObject.Load(StringIO(data))
|
149 |
|
150 |
|
151 |
class TaggableObject(object): |
152 |
"""An generic class supporting tags.
|
153 |
|
154 |
"""
|
155 |
@staticmethod
|
156 |
def ValidateTag(tag): |
157 |
"""Check if a tag is valid.
|
158 |
|
159 |
If the tag is invalid, an errors.TagError will be raised. The
|
160 |
function has no return value.
|
161 |
|
162 |
"""
|
163 |
if not isinstance(tag, basestring): |
164 |
raise errors.TagError("Invalid tag type (not a string)") |
165 |
if len(tag) > constants.MAX_TAG_LEN: |
166 |
raise errors.TagError("Tag too long (>%d)" % constants.MAX_TAG_LEN) |
167 |
if not tag: |
168 |
raise errors.TagError("Tags cannot be empty") |
169 |
if not re.match("^[ \w.+*/:-]+$", tag): |
170 |
raise errors.TagError("Tag contains invalid characters") |
171 |
|
172 |
def GetTags(self): |
173 |
"""Return the tags list.
|
174 |
|
175 |
"""
|
176 |
tags = getattr(self, "tags", None) |
177 |
if tags is None: |
178 |
tags = self.tags = set() |
179 |
return tags
|
180 |
|
181 |
def AddTag(self, tag): |
182 |
"""Add a new tag.
|
183 |
|
184 |
"""
|
185 |
self.ValidateTag(tag)
|
186 |
tags = self.GetTags()
|
187 |
if len(tags) >= constants.MAX_TAGS_PER_OBJ: |
188 |
raise errors.TagError("Too many tags") |
189 |
self.GetTags().add(tag)
|
190 |
|
191 |
def RemoveTag(self, tag): |
192 |
"""Remove a tag.
|
193 |
|
194 |
"""
|
195 |
self.ValidateTag(tag)
|
196 |
tags = self.GetTags()
|
197 |
try:
|
198 |
tags.remove(tag) |
199 |
except KeyError: |
200 |
raise errors.TagError("Tag not found") |
201 |
|
202 |
|
203 |
class ConfigData(ConfigObject): |
204 |
"""Top-level config object."""
|
205 |
__slots__ = ["cluster", "nodes", "instances"] |
206 |
|
207 |
|
208 |
class NIC(ConfigObject): |
209 |
"""Config object representing a network card."""
|
210 |
__slots__ = ["mac", "ip", "bridge"] |
211 |
|
212 |
|
213 |
class Disk(ConfigObject): |
214 |
"""Config object representing a block device."""
|
215 |
__slots__ = ["dev_type", "logical_id", "physical_id", |
216 |
"children", "iv_name", "size"] |
217 |
|
218 |
def CreateOnSecondary(self): |
219 |
"""Test if this device needs to be created on a secondary node."""
|
220 |
return self.dev_type in ("drbd", "lvm") |
221 |
|
222 |
def AssembleOnSecondary(self): |
223 |
"""Test if this device needs to be assembled on a secondary node."""
|
224 |
return self.dev_type in ("drbd", "lvm") |
225 |
|
226 |
def OpenOnSecondary(self): |
227 |
"""Test if this device needs to be opened on a secondary node."""
|
228 |
return self.dev_type in ("lvm",) |
229 |
|
230 |
def GetNodes(self, node): |
231 |
"""This function returns the nodes this device lives on.
|
232 |
|
233 |
Given the node on which the parent of the device lives on (or, in
|
234 |
case of a top-level device, the primary node of the devices'
|
235 |
instance), this function will return a list of nodes on which this
|
236 |
devices needs to (or can) be assembled.
|
237 |
|
238 |
"""
|
239 |
if self.dev_type == "lvm" or self.dev_type == "md_raid1": |
240 |
result = [node] |
241 |
elif self.dev_type == "drbd": |
242 |
result = [self.logical_id[0], self.logical_id[1]] |
243 |
if node not in result: |
244 |
raise errors.ConfigurationError("DRBD device passed unknown node") |
245 |
else:
|
246 |
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) |
247 |
return result
|
248 |
|
249 |
def ComputeNodeTree(self, parent_node): |
250 |
"""Compute the node/disk tree for this disk and its children.
|
251 |
|
252 |
This method, given the node on which the parent disk lives, will
|
253 |
return the list of all (node, disk) pairs which describe the disk
|
254 |
tree in the most compact way. For example, a md/drbd/lvm stack
|
255 |
will be returned as (primary_node, md) and (secondary_node, drbd)
|
256 |
which represents all the top-level devices on the nodes. This
|
257 |
means that on the primary node we need to activate the the md (and
|
258 |
recursively all its children) and on the secondary node we need to
|
259 |
activate the drbd device (and its children, the two lvm volumes).
|
260 |
|
261 |
"""
|
262 |
my_nodes = self.GetNodes(parent_node)
|
263 |
result = [(node, self) for node in my_nodes] |
264 |
if not self.children: |
265 |
# leaf device
|
266 |
return result
|
267 |
for node in my_nodes: |
268 |
for child in self.children: |
269 |
child_result = child.ComputeNodeTree(node) |
270 |
if len(child_result) == 1: |
271 |
# child (and all its descendants) is simple, doesn't split
|
272 |
# over multiple hosts, so we don't need to describe it, our
|
273 |
# own entry for this node describes it completely
|
274 |
continue
|
275 |
else:
|
276 |
# check if child nodes differ from my nodes; note that
|
277 |
# subdisk can differ from the child itself, and be instead
|
278 |
# one of its descendants
|
279 |
for subnode, subdisk in child_result: |
280 |
if subnode not in my_nodes: |
281 |
result.append((subnode, subdisk)) |
282 |
# otherwise child is under our own node, so we ignore this
|
283 |
# entry (but probably the other results in the list will
|
284 |
# be different)
|
285 |
return result
|
286 |
|
287 |
|
288 |
class Instance(ConfigObject, TaggableObject): |
289 |
"""Config object representing an instance."""
|
290 |
__slots__ = [ |
291 |
"name",
|
292 |
"primary_node",
|
293 |
"os",
|
294 |
"status",
|
295 |
"memory",
|
296 |
"vcpus",
|
297 |
"nics",
|
298 |
"disks",
|
299 |
"disk_template",
|
300 |
"tags",
|
301 |
] |
302 |
|
303 |
def _ComputeSecondaryNodes(self): |
304 |
"""Compute the list of secondary nodes.
|
305 |
|
306 |
Since the data is already there (in the drbd disks), keeping it as
|
307 |
a separate normal attribute is redundant and if not properly
|
308 |
synchronised can cause problems. Thus it's better to compute it
|
309 |
dynamically.
|
310 |
|
311 |
"""
|
312 |
def _Helper(primary, sec_nodes, device): |
313 |
"""Recursively computes secondary nodes given a top device."""
|
314 |
if device.dev_type == 'drbd': |
315 |
nodea, nodeb, dummy = device.logical_id |
316 |
if nodea == primary:
|
317 |
candidate = nodeb |
318 |
else:
|
319 |
candidate = nodea |
320 |
if candidate not in sec_nodes: |
321 |
sec_nodes.append(candidate) |
322 |
if device.children:
|
323 |
for child in device.children: |
324 |
_Helper(primary, sec_nodes, child) |
325 |
|
326 |
secondary_nodes = [] |
327 |
for device in self.disks: |
328 |
_Helper(self.primary_node, secondary_nodes, device)
|
329 |
return tuple(secondary_nodes) |
330 |
|
331 |
secondary_nodes = property(_ComputeSecondaryNodes, None, None, |
332 |
"List of secondary nodes")
|
333 |
|
334 |
def MapLVsByNode(self, lvmap=None, devs=None, node=None): |
335 |
"""Provide a mapping of nodes to LVs this instance owns.
|
336 |
|
337 |
This function figures out what logical volumes should belong on which
|
338 |
nodes, recursing through a device tree.
|
339 |
|
340 |
Args:
|
341 |
lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
|
342 |
|
343 |
Returns:
|
344 |
None if lvmap arg is given.
|
345 |
Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
|
346 |
|
347 |
"""
|
348 |
if node == None: |
349 |
node = self.primary_node
|
350 |
|
351 |
if lvmap is None: |
352 |
lvmap = { node : [] } |
353 |
ret = lvmap |
354 |
else:
|
355 |
if not node in lvmap: |
356 |
lvmap[node] = [] |
357 |
ret = None
|
358 |
|
359 |
if not devs: |
360 |
devs = self.disks
|
361 |
|
362 |
for dev in devs: |
363 |
if dev.dev_type == "lvm": |
364 |
lvmap[node].append(dev.logical_id[1])
|
365 |
|
366 |
elif dev.dev_type == "drbd": |
367 |
if dev.logical_id[0] not in lvmap: |
368 |
lvmap[dev.logical_id[0]] = []
|
369 |
|
370 |
if dev.logical_id[1] not in lvmap: |
371 |
lvmap[dev.logical_id[1]] = []
|
372 |
|
373 |
if dev.children:
|
374 |
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) |
375 |
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) |
376 |
|
377 |
elif dev.children:
|
378 |
self.MapLVsByNode(lvmap, dev.children, node)
|
379 |
|
380 |
return ret
|
381 |
|
382 |
def FindDisk(self, name): |
383 |
"""Find a disk given having a specified name.
|
384 |
|
385 |
This will return the disk which has the given iv_name.
|
386 |
|
387 |
"""
|
388 |
for disk in self.disks: |
389 |
if disk.iv_name == name:
|
390 |
return disk
|
391 |
|
392 |
return None |
393 |
|
394 |
|
395 |
class OS(ConfigObject): |
396 |
"""Config object representing an operating system."""
|
397 |
__slots__ = [ |
398 |
"name",
|
399 |
"path",
|
400 |
"api_version",
|
401 |
"create_script",
|
402 |
"export_script",
|
403 |
"import_script"
|
404 |
] |
405 |
|
406 |
|
407 |
class Node(ConfigObject, TaggableObject): |
408 |
"""Config object representing a node."""
|
409 |
__slots__ = ["name", "primary_ip", "secondary_ip", "tags"] |
410 |
|
411 |
|
412 |
class Cluster(ConfigObject, TaggableObject): |
413 |
"""Config object representing the cluster."""
|
414 |
__slots__ = [ |
415 |
"config_version",
|
416 |
"serial_no",
|
417 |
"rsahostkeypub",
|
418 |
"highest_used_port",
|
419 |
"tcpudp_port_pool",
|
420 |
"mac_prefix",
|
421 |
"volume_group_name",
|
422 |
"default_bridge",
|
423 |
"tags",
|
424 |
] |
425 |
|
426 |
|
427 |
class SerializableConfigParser(ConfigParser.SafeConfigParser): |
428 |
"""Simple wrapper over ConfigParse that allows serialization.
|
429 |
|
430 |
This class is basically ConfigParser.SafeConfigParser with two
|
431 |
additional methods that allow it to serialize/unserialize to/from a
|
432 |
buffer.
|
433 |
|
434 |
"""
|
435 |
def Dumps(self): |
436 |
"""Dump this instance and return the string representation."""
|
437 |
buf = StringIO() |
438 |
self.write(buf)
|
439 |
return buf.getvalue()
|
440 |
|
441 |
@staticmethod
|
442 |
def Loads(data): |
443 |
"""Load data from a string."""
|
444 |
buf = StringIO(data) |
445 |
cfp = SerializableConfigParser() |
446 |
cfp.readfp(buf) |
447 |
return cfp
|