root / lib / config.py @ 89ff8e15
History | View | Annotate | Download (20.4 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Configuration management for Ganeti
|
23 |
|
24 |
This module provides the interface to the Ganeti cluster configuration.
|
25 |
|
26 |
The configuration data is stored on every node but is updated on the master
|
27 |
only. After each update, the master distributes the data to the other nodes.
|
28 |
|
29 |
Currently, the data storage format is JSON. YAML was slow and consuming too
|
30 |
much memory.
|
31 |
|
32 |
"""
|
33 |
|
34 |
import os |
35 |
import tempfile |
36 |
import random |
37 |
|
38 |
from ganeti import errors |
39 |
from ganeti import logger |
40 |
from ganeti import utils |
41 |
from ganeti import constants |
42 |
from ganeti import rpc |
43 |
from ganeti import objects |
44 |
|
45 |
|
46 |
class ConfigWriter: |
47 |
"""The interface to the cluster configuration.
|
48 |
|
49 |
"""
|
50 |
def __init__(self, cfg_file=None, offline=False): |
51 |
self.write_count = 0 |
52 |
self._config_data = None |
53 |
self._config_time = None |
54 |
self._config_size = None |
55 |
self._config_inode = None |
56 |
self._offline = offline
|
57 |
if cfg_file is None: |
58 |
self._cfg_file = constants.CLUSTER_CONF_FILE
|
59 |
else:
|
60 |
self._cfg_file = cfg_file
|
61 |
self._temporary_ids = set() |
62 |
# Note: in order to prevent errors when resolving our name in
|
63 |
# _DistributeConfig, we compute it here once and reuse it; it's
|
64 |
# better to raise an error before starting to modify the config
|
65 |
# file than after it was modified
|
66 |
self._my_hostname = utils.HostInfo().name
|
67 |
|
68 |
# this method needs to be static, so that we can call it on the class
|
69 |
@staticmethod
|
70 |
def IsCluster(): |
71 |
"""Check if the cluster is configured.
|
72 |
|
73 |
"""
|
74 |
return os.path.exists(constants.CLUSTER_CONF_FILE)
|
75 |
|
76 |
def GenerateMAC(self): |
77 |
"""Generate a MAC for an instance.
|
78 |
|
79 |
This should check the current instances for duplicates.
|
80 |
|
81 |
"""
|
82 |
self._OpenConfig()
|
83 |
self._ReleaseLock()
|
84 |
prefix = self._config_data.cluster.mac_prefix
|
85 |
all_macs = self._AllMACs()
|
86 |
retries = 64
|
87 |
while retries > 0: |
88 |
byte1 = random.randrange(0, 256) |
89 |
byte2 = random.randrange(0, 256) |
90 |
byte3 = random.randrange(0, 256) |
91 |
mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
|
92 |
if mac not in all_macs: |
93 |
break
|
94 |
retries -= 1
|
95 |
else:
|
96 |
raise errors.ConfigurationError("Can't generate unique MAC") |
97 |
return mac
|
98 |
|
99 |
def IsMacInUse(self, mac): |
100 |
"""Predicate: check if the specified MAC is in use in the Ganeti cluster.
|
101 |
|
102 |
This only checks instances managed by this cluster, it does not
|
103 |
check for potential collisions elsewhere.
|
104 |
|
105 |
"""
|
106 |
self._OpenConfig()
|
107 |
self._ReleaseLock()
|
108 |
all_macs = self._AllMACs()
|
109 |
return mac in all_macs |
110 |
|
111 |
def _ComputeAllLVs(self): |
112 |
"""Compute the list of all LVs.
|
113 |
|
114 |
"""
|
115 |
self._OpenConfig()
|
116 |
self._ReleaseLock()
|
117 |
lvnames = set()
|
118 |
for instance in self._config_data.instances.values(): |
119 |
node_data = instance.MapLVsByNode() |
120 |
for lv_list in node_data.values(): |
121 |
lvnames.update(lv_list) |
122 |
return lvnames
|
123 |
|
124 |
def GenerateUniqueID(self, exceptions=None): |
125 |
"""Generate an unique disk name.
|
126 |
|
127 |
This checks the current node, instances and disk names for
|
128 |
duplicates.
|
129 |
|
130 |
Args:
|
131 |
- exceptions: a list with some other names which should be checked
|
132 |
for uniqueness (used for example when you want to get
|
133 |
more than one id at one time without adding each one in
|
134 |
turn to the config file
|
135 |
|
136 |
Returns: the unique id as a string
|
137 |
|
138 |
"""
|
139 |
existing = set()
|
140 |
existing.update(self._temporary_ids)
|
141 |
existing.update(self._ComputeAllLVs())
|
142 |
existing.update(self._config_data.instances.keys())
|
143 |
existing.update(self._config_data.nodes.keys())
|
144 |
if exceptions is not None: |
145 |
existing.update(exceptions) |
146 |
retries = 64
|
147 |
while retries > 0: |
148 |
unique_id = utils.NewUUID() |
149 |
if unique_id not in existing and unique_id is not None: |
150 |
break
|
151 |
else:
|
152 |
raise errors.ConfigurationError("Not able generate an unique ID" |
153 |
" (last tried ID: %s" % unique_id)
|
154 |
self._temporary_ids.add(unique_id)
|
155 |
return unique_id
|
156 |
|
157 |
def _AllMACs(self): |
158 |
"""Return all MACs present in the config.
|
159 |
|
160 |
"""
|
161 |
self._OpenConfig()
|
162 |
self._ReleaseLock()
|
163 |
|
164 |
result = [] |
165 |
for instance in self._config_data.instances.values(): |
166 |
for nic in instance.nics: |
167 |
result.append(nic.mac) |
168 |
|
169 |
return result
|
170 |
|
171 |
def VerifyConfig(self): |
172 |
"""Stub verify function.
|
173 |
"""
|
174 |
self._OpenConfig()
|
175 |
self._ReleaseLock()
|
176 |
|
177 |
result = [] |
178 |
seen_macs = [] |
179 |
data = self._config_data
|
180 |
for instance_name in data.instances: |
181 |
instance = data.instances[instance_name] |
182 |
if instance.primary_node not in data.nodes: |
183 |
result.append("instance '%s' has invalid primary node '%s'" %
|
184 |
(instance_name, instance.primary_node)) |
185 |
for snode in instance.secondary_nodes: |
186 |
if snode not in data.nodes: |
187 |
result.append("instance '%s' has invalid secondary node '%s'" %
|
188 |
(instance_name, snode)) |
189 |
for idx, nic in enumerate(instance.nics): |
190 |
if nic.mac in seen_macs: |
191 |
result.append("instance '%s' has NIC %d mac %s duplicate" %
|
192 |
(instance_name, idx, nic.mac)) |
193 |
else:
|
194 |
seen_macs.append(nic.mac) |
195 |
return result
|
196 |
|
197 |
def SetDiskID(self, disk, node_name): |
198 |
"""Convert the unique ID to the ID needed on the target nodes.
|
199 |
|
200 |
This is used only for drbd, which needs ip/port configuration.
|
201 |
|
202 |
The routine descends down and updates its children also, because
|
203 |
this helps when the only the top device is passed to the remote
|
204 |
node.
|
205 |
|
206 |
"""
|
207 |
if disk.children:
|
208 |
for child in disk.children: |
209 |
self.SetDiskID(child, node_name)
|
210 |
|
211 |
if disk.logical_id is None and disk.physical_id is not None: |
212 |
return
|
213 |
if disk.dev_type in constants.LDS_DRBD: |
214 |
pnode, snode, port = disk.logical_id |
215 |
if node_name not in (pnode, snode): |
216 |
raise errors.ConfigurationError("DRBD device not knowing node %s" % |
217 |
node_name) |
218 |
pnode_info = self.GetNodeInfo(pnode)
|
219 |
snode_info = self.GetNodeInfo(snode)
|
220 |
if pnode_info is None or snode_info is None: |
221 |
raise errors.ConfigurationError("Can't find primary or secondary node" |
222 |
" for %s" % str(disk)) |
223 |
if pnode == node_name:
|
224 |
disk.physical_id = (pnode_info.secondary_ip, port, |
225 |
snode_info.secondary_ip, port) |
226 |
else: # it must be secondary, we tested above |
227 |
disk.physical_id = (snode_info.secondary_ip, port, |
228 |
pnode_info.secondary_ip, port) |
229 |
else:
|
230 |
disk.physical_id = disk.logical_id |
231 |
return
|
232 |
|
233 |
def AddTcpUdpPort(self, port): |
234 |
"""Adds a new port to the available port pool.
|
235 |
|
236 |
"""
|
237 |
if not isinstance(port, int): |
238 |
raise errors.ProgrammerError("Invalid type passed for port") |
239 |
|
240 |
self._OpenConfig()
|
241 |
self._config_data.cluster.tcpudp_port_pool.add(port)
|
242 |
self._WriteConfig()
|
243 |
|
244 |
def GetPortList(self): |
245 |
"""Returns a copy of the current port list.
|
246 |
|
247 |
"""
|
248 |
self._OpenConfig()
|
249 |
self._ReleaseLock()
|
250 |
return self._config_data.cluster.tcpudp_port_pool.copy() |
251 |
|
252 |
def AllocatePort(self): |
253 |
"""Allocate a port.
|
254 |
|
255 |
The port will be taken from the available port pool or from the
|
256 |
default port range (and in this case we increase
|
257 |
highest_used_port).
|
258 |
|
259 |
"""
|
260 |
self._OpenConfig()
|
261 |
|
262 |
# If there are TCP/IP ports configured, we use them first.
|
263 |
if self._config_data.cluster.tcpudp_port_pool: |
264 |
port = self._config_data.cluster.tcpudp_port_pool.pop()
|
265 |
else:
|
266 |
port = self._config_data.cluster.highest_used_port + 1 |
267 |
if port >= constants.LAST_DRBD_PORT:
|
268 |
raise errors.ConfigurationError("The highest used port is greater" |
269 |
" than %s. Aborting." %
|
270 |
constants.LAST_DRBD_PORT) |
271 |
self._config_data.cluster.highest_used_port = port
|
272 |
|
273 |
self._WriteConfig()
|
274 |
return port
|
275 |
|
276 |
def GetHostKey(self): |
277 |
"""Return the rsa hostkey from the config.
|
278 |
|
279 |
Args: None
|
280 |
|
281 |
Returns: rsa hostkey
|
282 |
"""
|
283 |
self._OpenConfig()
|
284 |
self._ReleaseLock()
|
285 |
return self._config_data.cluster.rsahostkeypub |
286 |
|
287 |
def AddInstance(self, instance): |
288 |
"""Add an instance to the config.
|
289 |
|
290 |
This should be used after creating a new instance.
|
291 |
|
292 |
Args:
|
293 |
instance: the instance object
|
294 |
"""
|
295 |
if not isinstance(instance, objects.Instance): |
296 |
raise errors.ProgrammerError("Invalid type passed to AddInstance") |
297 |
|
298 |
if instance.disk_template != constants.DT_DISKLESS:
|
299 |
all_lvs = instance.MapLVsByNode() |
300 |
logger.Info("Instance '%s' DISK_LAYOUT: %s" % (instance.name, all_lvs))
|
301 |
|
302 |
self._OpenConfig()
|
303 |
self._config_data.instances[instance.name] = instance
|
304 |
self._WriteConfig()
|
305 |
|
306 |
def MarkInstanceUp(self, instance_name): |
307 |
"""Mark the instance status to up in the config.
|
308 |
|
309 |
"""
|
310 |
self._OpenConfig()
|
311 |
|
312 |
if instance_name not in self._config_data.instances: |
313 |
raise errors.ConfigurationError("Unknown instance '%s'" % |
314 |
instance_name) |
315 |
instance = self._config_data.instances[instance_name]
|
316 |
instance.status = "up"
|
317 |
self._WriteConfig()
|
318 |
|
319 |
def RemoveInstance(self, instance_name): |
320 |
"""Remove the instance from the configuration.
|
321 |
|
322 |
"""
|
323 |
self._OpenConfig()
|
324 |
|
325 |
if instance_name not in self._config_data.instances: |
326 |
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) |
327 |
del self._config_data.instances[instance_name] |
328 |
self._WriteConfig()
|
329 |
|
330 |
def RenameInstance(self, old_name, new_name): |
331 |
"""Rename an instance.
|
332 |
|
333 |
This needs to be done in ConfigWriter and not by RemoveInstance
|
334 |
combined with AddInstance as only we can guarantee an atomic
|
335 |
rename.
|
336 |
|
337 |
"""
|
338 |
self._OpenConfig()
|
339 |
if old_name not in self._config_data.instances: |
340 |
raise errors.ConfigurationError("Unknown instance '%s'" % old_name) |
341 |
inst = self._config_data.instances[old_name]
|
342 |
del self._config_data.instances[old_name] |
343 |
inst.name = new_name |
344 |
self._config_data.instances[inst.name] = inst
|
345 |
self._WriteConfig()
|
346 |
|
347 |
def MarkInstanceDown(self, instance_name): |
348 |
"""Mark the status of an instance to down in the configuration.
|
349 |
|
350 |
"""
|
351 |
self._OpenConfig()
|
352 |
|
353 |
if instance_name not in self._config_data.instances: |
354 |
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) |
355 |
instance = self._config_data.instances[instance_name]
|
356 |
instance.status = "down"
|
357 |
self._WriteConfig()
|
358 |
|
359 |
def GetInstanceList(self): |
360 |
"""Get the list of instances.
|
361 |
|
362 |
Returns:
|
363 |
array of instances, ex. ['instance2.example.com','instance1.example.com']
|
364 |
these contains all the instances, also the ones in Admin_down state
|
365 |
|
366 |
"""
|
367 |
self._OpenConfig()
|
368 |
self._ReleaseLock()
|
369 |
|
370 |
return self._config_data.instances.keys() |
371 |
|
372 |
def ExpandInstanceName(self, short_name): |
373 |
"""Attempt to expand an incomplete instance name.
|
374 |
|
375 |
"""
|
376 |
self._OpenConfig()
|
377 |
self._ReleaseLock()
|
378 |
|
379 |
return utils.MatchNameComponent(short_name,
|
380 |
self._config_data.instances.keys())
|
381 |
|
382 |
def GetInstanceInfo(self, instance_name): |
383 |
"""Returns informations about an instance.
|
384 |
|
385 |
It takes the information from the configuration file. Other informations of
|
386 |
an instance are taken from the live systems.
|
387 |
|
388 |
Args:
|
389 |
instance: name of the instance, ex instance1.example.com
|
390 |
|
391 |
Returns:
|
392 |
the instance object
|
393 |
|
394 |
"""
|
395 |
self._OpenConfig()
|
396 |
self._ReleaseLock()
|
397 |
|
398 |
if instance_name not in self._config_data.instances: |
399 |
return None |
400 |
|
401 |
return self._config_data.instances[instance_name] |
402 |
|
403 |
def AddNode(self, node): |
404 |
"""Add a node to the configuration.
|
405 |
|
406 |
Args:
|
407 |
node: an object.Node instance
|
408 |
|
409 |
"""
|
410 |
self._OpenConfig()
|
411 |
self._config_data.nodes[node.name] = node
|
412 |
self._WriteConfig()
|
413 |
|
414 |
def RemoveNode(self, node_name): |
415 |
"""Remove a node from the configuration.
|
416 |
|
417 |
"""
|
418 |
self._OpenConfig()
|
419 |
if node_name not in self._config_data.nodes: |
420 |
raise errors.ConfigurationError("Unknown node '%s'" % node_name) |
421 |
|
422 |
del self._config_data.nodes[node_name] |
423 |
self._WriteConfig()
|
424 |
|
425 |
def ExpandNodeName(self, short_name): |
426 |
"""Attempt to expand an incomplete instance name.
|
427 |
|
428 |
"""
|
429 |
self._OpenConfig()
|
430 |
self._ReleaseLock()
|
431 |
|
432 |
return utils.MatchNameComponent(short_name,
|
433 |
self._config_data.nodes.keys())
|
434 |
|
435 |
def GetNodeInfo(self, node_name): |
436 |
"""Get the configuration of a node, as stored in the config.
|
437 |
|
438 |
Args: node: nodename (tuple) of the node
|
439 |
|
440 |
Returns: the node object
|
441 |
|
442 |
"""
|
443 |
self._OpenConfig()
|
444 |
self._ReleaseLock()
|
445 |
|
446 |
if node_name not in self._config_data.nodes: |
447 |
return None |
448 |
|
449 |
return self._config_data.nodes[node_name] |
450 |
|
451 |
def GetNodeList(self): |
452 |
"""Return the list of nodes which are in the configuration.
|
453 |
|
454 |
"""
|
455 |
self._OpenConfig()
|
456 |
self._ReleaseLock()
|
457 |
return self._config_data.nodes.keys() |
458 |
|
459 |
def DumpConfig(self): |
460 |
"""Return the entire configuration of the cluster.
|
461 |
"""
|
462 |
self._OpenConfig()
|
463 |
self._ReleaseLock()
|
464 |
return self._config_data |
465 |
|
466 |
def _BumpSerialNo(self): |
467 |
"""Bump up the serial number of the config.
|
468 |
|
469 |
"""
|
470 |
self._config_data.cluster.serial_no += 1 |
471 |
|
472 |
def _OpenConfig(self): |
473 |
"""Read the config data from disk.
|
474 |
|
475 |
In case we already have configuration data and the config file has
|
476 |
the same mtime as when we read it, we skip the parsing of the
|
477 |
file, since de-serialisation could be slow.
|
478 |
|
479 |
"""
|
480 |
try:
|
481 |
st = os.stat(self._cfg_file)
|
482 |
except OSError, err: |
483 |
raise errors.ConfigurationError("Can't stat config file: %s" % err) |
484 |
if (self._config_data is not None and |
485 |
self._config_time is not None and |
486 |
self._config_time == st.st_mtime and |
487 |
self._config_size == st.st_size and |
488 |
self._config_inode == st.st_ino):
|
489 |
# data is current, so skip loading of config file
|
490 |
return
|
491 |
f = open(self._cfg_file, 'r') |
492 |
try:
|
493 |
try:
|
494 |
data = objects.ConfigData.Load(f) |
495 |
except Exception, err: |
496 |
raise errors.ConfigurationError(err)
|
497 |
finally:
|
498 |
f.close() |
499 |
if (not hasattr(data, 'cluster') or |
500 |
not hasattr(data.cluster, 'config_version')): |
501 |
raise errors.ConfigurationError("Incomplete configuration" |
502 |
" (missing cluster.config_version)")
|
503 |
if data.cluster.config_version != constants.CONFIG_VERSION:
|
504 |
raise errors.ConfigurationError("Cluster configuration version" |
505 |
" mismatch, got %s instead of %s" %
|
506 |
(data.cluster.config_version, |
507 |
constants.CONFIG_VERSION)) |
508 |
self._config_data = data
|
509 |
self._config_time = st.st_mtime
|
510 |
self._config_size = st.st_size
|
511 |
self._config_inode = st.st_ino
|
512 |
|
513 |
def _ReleaseLock(self): |
514 |
"""xxxx
|
515 |
"""
|
516 |
|
517 |
def _DistributeConfig(self): |
518 |
"""Distribute the configuration to the other nodes.
|
519 |
|
520 |
Currently, this only copies the configuration file. In the future,
|
521 |
it could be used to encapsulate the 2/3-phase update mechanism.
|
522 |
|
523 |
"""
|
524 |
if self._offline: |
525 |
return True |
526 |
bad = False
|
527 |
nodelist = self.GetNodeList()
|
528 |
myhostname = self._my_hostname
|
529 |
|
530 |
tgt_list = [] |
531 |
for node in nodelist: |
532 |
nodeinfo = self.GetNodeInfo(node)
|
533 |
if nodeinfo.name == myhostname:
|
534 |
continue
|
535 |
tgt_list.append(node) |
536 |
|
537 |
result = rpc.call_upload_file(tgt_list, self._cfg_file)
|
538 |
for node in tgt_list: |
539 |
if not result[node]: |
540 |
logger.Error("copy of file %s to node %s failed" %
|
541 |
(self._cfg_file, node))
|
542 |
bad = True
|
543 |
return not bad |
544 |
|
545 |
def _WriteConfig(self, destination=None): |
546 |
"""Write the configuration data to persistent storage.
|
547 |
|
548 |
"""
|
549 |
if destination is None: |
550 |
destination = self._cfg_file
|
551 |
self._BumpSerialNo()
|
552 |
dir_name, file_name = os.path.split(destination) |
553 |
fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
|
554 |
f = os.fdopen(fd, 'w')
|
555 |
try:
|
556 |
self._config_data.Dump(f)
|
557 |
os.fsync(f.fileno()) |
558 |
finally:
|
559 |
f.close() |
560 |
# we don't need to do os.close(fd) as f.close() did it
|
561 |
os.rename(name, destination) |
562 |
self.write_count += 1 |
563 |
# re-set our cache as not to re-read the config file
|
564 |
try:
|
565 |
st = os.stat(destination) |
566 |
except OSError, err: |
567 |
raise errors.ConfigurationError("Can't stat config file: %s" % err) |
568 |
self._config_time = st.st_mtime
|
569 |
self._config_size = st.st_size
|
570 |
self._config_inode = st.st_ino
|
571 |
# and redistribute the config file
|
572 |
self._DistributeConfig()
|
573 |
|
574 |
def InitConfig(self, node, primary_ip, secondary_ip, |
575 |
hostkeypub, mac_prefix, vg_name, def_bridge): |
576 |
"""Create the initial cluster configuration.
|
577 |
|
578 |
It will contain the current node, which will also be the master
|
579 |
node, and no instances or operating systmes.
|
580 |
|
581 |
Args:
|
582 |
node: the nodename of the initial node
|
583 |
primary_ip: the IP address of the current host
|
584 |
secondary_ip: the secondary IP of the current host or None
|
585 |
hostkeypub: the public hostkey of this host
|
586 |
|
587 |
"""
|
588 |
hu_port = constants.FIRST_DRBD_PORT - 1
|
589 |
globalconfig = objects.Cluster(config_version=constants.CONFIG_VERSION, |
590 |
serial_no=1,
|
591 |
rsahostkeypub=hostkeypub, |
592 |
highest_used_port=hu_port, |
593 |
mac_prefix=mac_prefix, |
594 |
volume_group_name=vg_name, |
595 |
default_bridge=def_bridge, |
596 |
tcpudp_port_pool=set())
|
597 |
if secondary_ip is None: |
598 |
secondary_ip = primary_ip |
599 |
nodeconfig = objects.Node(name=node, primary_ip=primary_ip, |
600 |
secondary_ip=secondary_ip) |
601 |
|
602 |
self._config_data = objects.ConfigData(nodes={node: nodeconfig},
|
603 |
instances={}, |
604 |
cluster=globalconfig) |
605 |
self._WriteConfig()
|
606 |
|
607 |
def GetVGName(self): |
608 |
"""Return the volume group name.
|
609 |
|
610 |
"""
|
611 |
self._OpenConfig()
|
612 |
self._ReleaseLock()
|
613 |
return self._config_data.cluster.volume_group_name |
614 |
|
615 |
def SetVGName(self, vg_name): |
616 |
"""Set the volume group name.
|
617 |
|
618 |
"""
|
619 |
self._OpenConfig()
|
620 |
self._config_data.cluster["volume_group_name"] = vg_name |
621 |
self._WriteConfig()
|
622 |
|
623 |
def GetDefBridge(self): |
624 |
"""Return the default bridge.
|
625 |
|
626 |
"""
|
627 |
self._OpenConfig()
|
628 |
self._ReleaseLock()
|
629 |
return self._config_data.cluster.default_bridge |
630 |
|
631 |
def GetMACPrefix(self): |
632 |
"""Return the mac prefix.
|
633 |
|
634 |
"""
|
635 |
self._OpenConfig()
|
636 |
self._ReleaseLock()
|
637 |
return self._config_data.cluster.mac_prefix |
638 |
|
639 |
def GetClusterInfo(self): |
640 |
"""Returns informations about the cluster
|
641 |
|
642 |
Returns:
|
643 |
the cluster object
|
644 |
|
645 |
"""
|
646 |
self._OpenConfig()
|
647 |
self._ReleaseLock()
|
648 |
|
649 |
return self._config_data.cluster |
650 |
|
651 |
def Update(self, target): |
652 |
"""Notify function to be called after updates.
|
653 |
|
654 |
This function must be called when an object (as returned by
|
655 |
GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
|
656 |
caller wants the modifications saved to the backing store. Note
|
657 |
that all modified objects will be saved, but the target argument
|
658 |
is the one the caller wants to ensure that it's saved.
|
659 |
|
660 |
"""
|
661 |
if self._config_data is None: |
662 |
raise errors.ProgrammerError("Configuration file not read," |
663 |
" cannot save.")
|
664 |
if isinstance(target, objects.Cluster): |
665 |
test = target == self._config_data.cluster
|
666 |
elif isinstance(target, objects.Node): |
667 |
test = target in self._config_data.nodes.values() |
668 |
elif isinstance(target, objects.Instance): |
669 |
test = target in self._config_data.instances.values() |
670 |
else:
|
671 |
raise errors.ProgrammerError("Invalid object type (%s) passed to" |
672 |
" ConfigWriter.Update" % type(target)) |
673 |
if not test: |
674 |
raise errors.ConfigurationError("Configuration updated since object" |
675 |
" has been read or unknown object")
|
676 |
self._WriteConfig()
|