root / lib / config.py @ 48ce9fd9
History | View | Annotate | Download (29.3 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Configuration management for Ganeti
|
23 |
|
24 |
This module provides the interface to the Ganeti cluster configuration.
|
25 |
|
26 |
The configuration data is stored on every node but is updated on the master
|
27 |
only. After each update, the master distributes the data to the other nodes.
|
28 |
|
29 |
Currently, the data storage format is JSON. YAML was slow and consuming too
|
30 |
much memory.
|
31 |
|
32 |
"""
|
33 |
|
34 |
import os |
35 |
import tempfile |
36 |
import random |
37 |
import logging |
38 |
|
39 |
from ganeti import errors |
40 |
from ganeti import locking |
41 |
from ganeti import utils |
42 |
from ganeti import constants |
43 |
from ganeti import rpc |
44 |
from ganeti import objects |
45 |
from ganeti import serializer |
46 |
from ganeti import ssconf |
47 |
|
48 |
|
49 |
_config_lock = locking.SharedLock() |
50 |
|
51 |
|
52 |
def ValidateConfig(): |
53 |
sstore = ssconf.SimpleStore() |
54 |
|
55 |
if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
|
56 |
raise errors.ConfigurationError("Cluster configuration version" |
57 |
" mismatch, got %s instead of %s" %
|
58 |
(sstore.GetConfigVersion(), |
59 |
constants.CONFIG_VERSION)) |
60 |
|
61 |
|
62 |
class ConfigWriter: |
63 |
"""The interface to the cluster configuration.
|
64 |
|
65 |
"""
|
66 |
def __init__(self, cfg_file=None, offline=False): |
67 |
self.write_count = 0 |
68 |
self._lock = _config_lock
|
69 |
self._config_data = None |
70 |
self._config_time = None |
71 |
self._config_size = None |
72 |
self._config_inode = None |
73 |
self._offline = offline
|
74 |
if cfg_file is None: |
75 |
self._cfg_file = constants.CLUSTER_CONF_FILE
|
76 |
else:
|
77 |
self._cfg_file = cfg_file
|
78 |
self._temporary_ids = set() |
79 |
self._temporary_drbds = {}
|
80 |
# Note: in order to prevent errors when resolving our name in
|
81 |
# _DistributeConfig, we compute it here once and reuse it; it's
|
82 |
# better to raise an error before starting to modify the config
|
83 |
# file than after it was modified
|
84 |
self._my_hostname = utils.HostInfo().name
|
85 |
|
86 |
# this method needs to be static, so that we can call it on the class
|
87 |
@staticmethod
|
88 |
def IsCluster(): |
89 |
"""Check if the cluster is configured.
|
90 |
|
91 |
"""
|
92 |
return os.path.exists(constants.CLUSTER_CONF_FILE)
|
93 |
|
94 |
@locking.ssynchronized(_config_lock, shared=1) |
95 |
def GenerateMAC(self): |
96 |
"""Generate a MAC for an instance.
|
97 |
|
98 |
This should check the current instances for duplicates.
|
99 |
|
100 |
"""
|
101 |
self._OpenConfig()
|
102 |
prefix = self._config_data.cluster.mac_prefix
|
103 |
all_macs = self._AllMACs()
|
104 |
retries = 64
|
105 |
while retries > 0: |
106 |
byte1 = random.randrange(0, 256) |
107 |
byte2 = random.randrange(0, 256) |
108 |
byte3 = random.randrange(0, 256) |
109 |
mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
|
110 |
if mac not in all_macs: |
111 |
break
|
112 |
retries -= 1
|
113 |
else:
|
114 |
raise errors.ConfigurationError("Can't generate unique MAC") |
115 |
return mac
|
116 |
|
117 |
@locking.ssynchronized(_config_lock, shared=1) |
118 |
def IsMacInUse(self, mac): |
119 |
"""Predicate: check if the specified MAC is in use in the Ganeti cluster.
|
120 |
|
121 |
This only checks instances managed by this cluster, it does not
|
122 |
check for potential collisions elsewhere.
|
123 |
|
124 |
"""
|
125 |
self._OpenConfig()
|
126 |
all_macs = self._AllMACs()
|
127 |
return mac in all_macs |
128 |
|
129 |
def _ComputeAllLVs(self): |
130 |
"""Compute the list of all LVs.
|
131 |
|
132 |
"""
|
133 |
self._OpenConfig()
|
134 |
lvnames = set()
|
135 |
for instance in self._config_data.instances.values(): |
136 |
node_data = instance.MapLVsByNode() |
137 |
for lv_list in node_data.values(): |
138 |
lvnames.update(lv_list) |
139 |
return lvnames
|
140 |
|
141 |
@locking.ssynchronized(_config_lock, shared=1) |
142 |
def GenerateUniqueID(self, exceptions=None): |
143 |
"""Generate an unique disk name.
|
144 |
|
145 |
This checks the current node, instances and disk names for
|
146 |
duplicates.
|
147 |
|
148 |
Args:
|
149 |
- exceptions: a list with some other names which should be checked
|
150 |
for uniqueness (used for example when you want to get
|
151 |
more than one id at one time without adding each one in
|
152 |
turn to the config file
|
153 |
|
154 |
Returns: the unique id as a string
|
155 |
|
156 |
"""
|
157 |
existing = set()
|
158 |
existing.update(self._temporary_ids)
|
159 |
existing.update(self._ComputeAllLVs())
|
160 |
existing.update(self._config_data.instances.keys())
|
161 |
existing.update(self._config_data.nodes.keys())
|
162 |
if exceptions is not None: |
163 |
existing.update(exceptions) |
164 |
retries = 64
|
165 |
while retries > 0: |
166 |
unique_id = utils.NewUUID() |
167 |
if unique_id not in existing and unique_id is not None: |
168 |
break
|
169 |
else:
|
170 |
raise errors.ConfigurationError("Not able generate an unique ID" |
171 |
" (last tried ID: %s" % unique_id)
|
172 |
self._temporary_ids.add(unique_id)
|
173 |
return unique_id
|
174 |
|
175 |
def _AllMACs(self): |
176 |
"""Return all MACs present in the config.
|
177 |
|
178 |
"""
|
179 |
self._OpenConfig()
|
180 |
|
181 |
result = [] |
182 |
for instance in self._config_data.instances.values(): |
183 |
for nic in instance.nics: |
184 |
result.append(nic.mac) |
185 |
|
186 |
return result
|
187 |
|
188 |
@locking.ssynchronized(_config_lock, shared=1) |
189 |
def VerifyConfig(self): |
190 |
"""Stub verify function.
|
191 |
"""
|
192 |
self._OpenConfig()
|
193 |
|
194 |
result = [] |
195 |
seen_macs = [] |
196 |
ports = {} |
197 |
data = self._config_data
|
198 |
for instance_name in data.instances: |
199 |
instance = data.instances[instance_name] |
200 |
if instance.primary_node not in data.nodes: |
201 |
result.append("instance '%s' has invalid primary node '%s'" %
|
202 |
(instance_name, instance.primary_node)) |
203 |
for snode in instance.secondary_nodes: |
204 |
if snode not in data.nodes: |
205 |
result.append("instance '%s' has invalid secondary node '%s'" %
|
206 |
(instance_name, snode)) |
207 |
for idx, nic in enumerate(instance.nics): |
208 |
if nic.mac in seen_macs: |
209 |
result.append("instance '%s' has NIC %d mac %s duplicate" %
|
210 |
(instance_name, idx, nic.mac)) |
211 |
else:
|
212 |
seen_macs.append(nic.mac) |
213 |
|
214 |
# gather the drbd ports for duplicate checks
|
215 |
for dsk in instance.disks: |
216 |
if dsk.dev_type in constants.LDS_DRBD: |
217 |
tcp_port = dsk.logical_id[2]
|
218 |
if tcp_port not in ports: |
219 |
ports[tcp_port] = [] |
220 |
ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
|
221 |
# gather network port reservation
|
222 |
net_port = getattr(instance, "network_port", None) |
223 |
if net_port is not None: |
224 |
if net_port not in ports: |
225 |
ports[net_port] = [] |
226 |
ports[net_port].append((instance.name, "network port"))
|
227 |
|
228 |
# cluster-wide pool of free ports
|
229 |
for free_port in self._config_data.cluster.tcpudp_port_pool: |
230 |
if free_port not in ports: |
231 |
ports[free_port] = [] |
232 |
ports[free_port].append(("cluster", "port marked as free")) |
233 |
|
234 |
# compute tcp/udp duplicate ports
|
235 |
keys = ports.keys() |
236 |
keys.sort() |
237 |
for pnum in keys: |
238 |
pdata = ports[pnum] |
239 |
if len(pdata) > 1: |
240 |
txt = ", ".join(["%s/%s" % val for val in pdata]) |
241 |
result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
|
242 |
|
243 |
# highest used tcp port check
|
244 |
if keys:
|
245 |
if keys[-1] > self._config_data.cluster.highest_used_port: |
246 |
result.append("Highest used port mismatch, saved %s, computed %s" %
|
247 |
(self._config_data.cluster.highest_used_port,
|
248 |
keys[-1]))
|
249 |
|
250 |
return result
|
251 |
|
252 |
def _UnlockedSetDiskID(self, disk, node_name): |
253 |
"""Convert the unique ID to the ID needed on the target nodes.
|
254 |
|
255 |
This is used only for drbd, which needs ip/port configuration.
|
256 |
|
257 |
The routine descends down and updates its children also, because
|
258 |
this helps when the only the top device is passed to the remote
|
259 |
node.
|
260 |
|
261 |
This function is for internal use, when the config lock is already held.
|
262 |
|
263 |
"""
|
264 |
if disk.children:
|
265 |
for child in disk.children: |
266 |
self._UnlockedSetDiskID(child, node_name)
|
267 |
|
268 |
if disk.logical_id is None and disk.physical_id is not None: |
269 |
return
|
270 |
if disk.dev_type == constants.LD_DRBD8:
|
271 |
pnode, snode, port, pminor, sminor = disk.logical_id |
272 |
if node_name not in (pnode, snode): |
273 |
raise errors.ConfigurationError("DRBD device not knowing node %s" % |
274 |
node_name) |
275 |
pnode_info = self._UnlockedGetNodeInfo(pnode)
|
276 |
snode_info = self._UnlockedGetNodeInfo(snode)
|
277 |
if pnode_info is None or snode_info is None: |
278 |
raise errors.ConfigurationError("Can't find primary or secondary node" |
279 |
" for %s" % str(disk)) |
280 |
p_data = (pnode_info.secondary_ip, port) |
281 |
s_data = (snode_info.secondary_ip, port) |
282 |
if pnode == node_name:
|
283 |
disk.physical_id = p_data + s_data + (pminor,) |
284 |
else: # it must be secondary, we tested above |
285 |
disk.physical_id = s_data + p_data + (sminor,) |
286 |
else:
|
287 |
disk.physical_id = disk.logical_id |
288 |
return
|
289 |
|
290 |
@locking.ssynchronized(_config_lock)
|
291 |
def SetDiskID(self, disk, node_name): |
292 |
"""Convert the unique ID to the ID needed on the target nodes.
|
293 |
|
294 |
This is used only for drbd, which needs ip/port configuration.
|
295 |
|
296 |
The routine descends down and updates its children also, because
|
297 |
this helps when the only the top device is passed to the remote
|
298 |
node.
|
299 |
|
300 |
"""
|
301 |
return self._UnlockedSetDiskID(disk, node_name) |
302 |
|
303 |
@locking.ssynchronized(_config_lock)
|
304 |
def AddTcpUdpPort(self, port): |
305 |
"""Adds a new port to the available port pool.
|
306 |
|
307 |
"""
|
308 |
if not isinstance(port, int): |
309 |
raise errors.ProgrammerError("Invalid type passed for port") |
310 |
|
311 |
self._OpenConfig()
|
312 |
self._config_data.cluster.tcpudp_port_pool.add(port)
|
313 |
self._WriteConfig()
|
314 |
|
315 |
@locking.ssynchronized(_config_lock, shared=1) |
316 |
def GetPortList(self): |
317 |
"""Returns a copy of the current port list.
|
318 |
|
319 |
"""
|
320 |
self._OpenConfig()
|
321 |
return self._config_data.cluster.tcpudp_port_pool.copy() |
322 |
|
323 |
@locking.ssynchronized(_config_lock)
|
324 |
def AllocatePort(self): |
325 |
"""Allocate a port.
|
326 |
|
327 |
The port will be taken from the available port pool or from the
|
328 |
default port range (and in this case we increase
|
329 |
highest_used_port).
|
330 |
|
331 |
"""
|
332 |
self._OpenConfig()
|
333 |
|
334 |
# If there are TCP/IP ports configured, we use them first.
|
335 |
if self._config_data.cluster.tcpudp_port_pool: |
336 |
port = self._config_data.cluster.tcpudp_port_pool.pop()
|
337 |
else:
|
338 |
port = self._config_data.cluster.highest_used_port + 1 |
339 |
if port >= constants.LAST_DRBD_PORT:
|
340 |
raise errors.ConfigurationError("The highest used port is greater" |
341 |
" than %s. Aborting." %
|
342 |
constants.LAST_DRBD_PORT) |
343 |
self._config_data.cluster.highest_used_port = port
|
344 |
|
345 |
self._WriteConfig()
|
346 |
return port
|
347 |
|
348 |
def _ComputeDRBDMap(self, instance): |
349 |
"""Compute the used DRBD minor/nodes.
|
350 |
|
351 |
Return: dictionary of node_name: dict of minor: instance_name. The
|
352 |
returned dict will have all the nodes in it (even if with an empty
|
353 |
list).
|
354 |
|
355 |
"""
|
356 |
def _AppendUsedPorts(instance_name, disk, used): |
357 |
if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) == 5: |
358 |
nodeA, nodeB, dummy, minorA, minorB = disk.logical_id |
359 |
for node, port in ((nodeA, minorA), (nodeB, minorB)): |
360 |
assert node in used, "Instance node not found in node list" |
361 |
if port in used[node]: |
362 |
raise errors.ProgrammerError("DRBD minor already used:" |
363 |
" %s/%s, %s/%s" %
|
364 |
(node, port, instance_name, |
365 |
used[node][port])) |
366 |
|
367 |
used[node][port] = instance_name |
368 |
if disk.children:
|
369 |
for child in disk.children: |
370 |
_AppendUsedPorts(instance_name, child, used) |
371 |
|
372 |
my_dict = dict((node, {}) for node in self._config_data.nodes) |
373 |
for (node, minor), instance in self._temporary_drbds.iteritems(): |
374 |
my_dict[node][minor] = instance |
375 |
for instance in self._config_data.instances.itervalues(): |
376 |
for disk in instance.disks: |
377 |
_AppendUsedPorts(instance.name, disk, my_dict) |
378 |
return my_dict
|
379 |
|
380 |
@locking.ssynchronized(_config_lock)
|
381 |
def AllocateDRBDMinor(self, nodes, instance): |
382 |
"""Allocate a drbd minor.
|
383 |
|
384 |
The free minor will be automatically computed from the existing
|
385 |
devices. A node can be given multiple times in order to allocate
|
386 |
multiple minors. The result is the list of minors, in the same
|
387 |
order as the passed nodes.
|
388 |
|
389 |
"""
|
390 |
self._OpenConfig()
|
391 |
|
392 |
d_map = self._ComputeDRBDMap(instance)
|
393 |
result = [] |
394 |
for nname in nodes: |
395 |
ndata = d_map[nname] |
396 |
if not ndata: |
397 |
# no minors used, we can start at 0
|
398 |
result.append(0)
|
399 |
ndata[0] = instance
|
400 |
continue
|
401 |
keys = ndata.keys() |
402 |
keys.sort() |
403 |
ffree = utils.FirstFree(keys) |
404 |
if ffree is None: |
405 |
# return the next minor
|
406 |
# TODO: implement high-limit check
|
407 |
minor = keys[-1] + 1 |
408 |
else:
|
409 |
minor = ffree |
410 |
result.append(minor) |
411 |
ndata[minor] = instance |
412 |
assert (nname, minor) not in self._temporary_drbds, \ |
413 |
"Attempt to reuse reserved DRBD minor"
|
414 |
self._temporary_drbds[(nname, minor)] = instance
|
415 |
logging.debug("Request to allocate drbd minors, input: %s, returning %s",
|
416 |
nodes, result) |
417 |
return result
|
418 |
|
419 |
@locking.ssynchronized(_config_lock)
|
420 |
def ReleaseDRBDMinors(self, instance): |
421 |
"""Release temporary drbd minors allocated for a given instance.
|
422 |
|
423 |
This should be called on both the error paths and on the success
|
424 |
paths (after the instance has been added or updated).
|
425 |
|
426 |
@type instance: string
|
427 |
@param instance: the instance for which temporary minors should be
|
428 |
released
|
429 |
|
430 |
"""
|
431 |
for key, name in self._temporary_drbds.items(): |
432 |
if name == instance:
|
433 |
del self._temporary_drbds[key] |
434 |
|
435 |
@locking.ssynchronized(_config_lock, shared=1) |
436 |
def GetHostKey(self): |
437 |
"""Return the rsa hostkey from the config.
|
438 |
|
439 |
Args: None
|
440 |
|
441 |
Returns: rsa hostkey
|
442 |
"""
|
443 |
self._OpenConfig()
|
444 |
return self._config_data.cluster.rsahostkeypub |
445 |
|
446 |
@locking.ssynchronized(_config_lock)
|
447 |
def AddInstance(self, instance): |
448 |
"""Add an instance to the config.
|
449 |
|
450 |
This should be used after creating a new instance.
|
451 |
|
452 |
Args:
|
453 |
instance: the instance object
|
454 |
"""
|
455 |
if not isinstance(instance, objects.Instance): |
456 |
raise errors.ProgrammerError("Invalid type passed to AddInstance") |
457 |
|
458 |
if instance.disk_template != constants.DT_DISKLESS:
|
459 |
all_lvs = instance.MapLVsByNode() |
460 |
logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
|
461 |
|
462 |
self._OpenConfig()
|
463 |
instance.serial_no = 1
|
464 |
self._config_data.instances[instance.name] = instance
|
465 |
self._config_data.cluster.serial_no += 1 |
466 |
self._WriteConfig()
|
467 |
|
468 |
def _SetInstanceStatus(self, instance_name, status): |
469 |
"""Set the instance's status to a given value.
|
470 |
|
471 |
"""
|
472 |
if status not in ("up", "down"): |
473 |
raise errors.ProgrammerError("Invalid status '%s' passed to" |
474 |
" ConfigWriter._SetInstanceStatus()" %
|
475 |
status) |
476 |
self._OpenConfig()
|
477 |
|
478 |
if instance_name not in self._config_data.instances: |
479 |
raise errors.ConfigurationError("Unknown instance '%s'" % |
480 |
instance_name) |
481 |
instance = self._config_data.instances[instance_name]
|
482 |
if instance.status != status:
|
483 |
instance.status = status |
484 |
instance.serial_no += 1
|
485 |
self._WriteConfig()
|
486 |
|
487 |
@locking.ssynchronized(_config_lock)
|
488 |
def MarkInstanceUp(self, instance_name): |
489 |
"""Mark the instance status to up in the config.
|
490 |
|
491 |
"""
|
492 |
self._SetInstanceStatus(instance_name, "up") |
493 |
|
494 |
@locking.ssynchronized(_config_lock)
|
495 |
def RemoveInstance(self, instance_name): |
496 |
"""Remove the instance from the configuration.
|
497 |
|
498 |
"""
|
499 |
self._OpenConfig()
|
500 |
|
501 |
if instance_name not in self._config_data.instances: |
502 |
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) |
503 |
del self._config_data.instances[instance_name] |
504 |
self._config_data.cluster.serial_no += 1 |
505 |
self._WriteConfig()
|
506 |
|
507 |
@locking.ssynchronized(_config_lock)
|
508 |
def RenameInstance(self, old_name, new_name): |
509 |
"""Rename an instance.
|
510 |
|
511 |
This needs to be done in ConfigWriter and not by RemoveInstance
|
512 |
combined with AddInstance as only we can guarantee an atomic
|
513 |
rename.
|
514 |
|
515 |
"""
|
516 |
self._OpenConfig()
|
517 |
if old_name not in self._config_data.instances: |
518 |
raise errors.ConfigurationError("Unknown instance '%s'" % old_name) |
519 |
inst = self._config_data.instances[old_name]
|
520 |
del self._config_data.instances[old_name] |
521 |
inst.name = new_name |
522 |
|
523 |
for disk in inst.disks: |
524 |
if disk.dev_type == constants.LD_FILE:
|
525 |
# rename the file paths in logical and physical id
|
526 |
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
|
527 |
disk.physical_id = disk.logical_id = (disk.logical_id[0],
|
528 |
os.path.join(file_storage_dir, |
529 |
inst.name, |
530 |
disk.iv_name)) |
531 |
|
532 |
self._config_data.instances[inst.name] = inst
|
533 |
self._config_data.cluster.serial_no += 1 |
534 |
self._WriteConfig()
|
535 |
|
536 |
@locking.ssynchronized(_config_lock)
|
537 |
def MarkInstanceDown(self, instance_name): |
538 |
"""Mark the status of an instance to down in the configuration.
|
539 |
|
540 |
"""
|
541 |
self._SetInstanceStatus(instance_name, "down") |
542 |
|
543 |
def _UnlockedGetInstanceList(self): |
544 |
"""Get the list of instances.
|
545 |
|
546 |
This function is for internal use, when the config lock is already held.
|
547 |
|
548 |
"""
|
549 |
self._OpenConfig()
|
550 |
return self._config_data.instances.keys() |
551 |
|
552 |
@locking.ssynchronized(_config_lock, shared=1) |
553 |
def GetInstanceList(self): |
554 |
"""Get the list of instances.
|
555 |
|
556 |
Returns:
|
557 |
array of instances, ex. ['instance2.example.com','instance1.example.com']
|
558 |
these contains all the instances, also the ones in Admin_down state
|
559 |
|
560 |
"""
|
561 |
return self._UnlockedGetInstanceList() |
562 |
|
563 |
@locking.ssynchronized(_config_lock, shared=1) |
564 |
def ExpandInstanceName(self, short_name): |
565 |
"""Attempt to expand an incomplete instance name.
|
566 |
|
567 |
"""
|
568 |
self._OpenConfig()
|
569 |
|
570 |
return utils.MatchNameComponent(short_name,
|
571 |
self._config_data.instances.keys())
|
572 |
|
573 |
def _UnlockedGetInstanceInfo(self, instance_name): |
574 |
"""Returns informations about an instance.
|
575 |
|
576 |
This function is for internal use, when the config lock is already held.
|
577 |
|
578 |
"""
|
579 |
self._OpenConfig()
|
580 |
|
581 |
if instance_name not in self._config_data.instances: |
582 |
return None |
583 |
|
584 |
return self._config_data.instances[instance_name] |
585 |
|
586 |
@locking.ssynchronized(_config_lock, shared=1) |
587 |
def GetInstanceInfo(self, instance_name): |
588 |
"""Returns informations about an instance.
|
589 |
|
590 |
It takes the information from the configuration file. Other informations of
|
591 |
an instance are taken from the live systems.
|
592 |
|
593 |
Args:
|
594 |
instance: name of the instance, ex instance1.example.com
|
595 |
|
596 |
Returns:
|
597 |
the instance object
|
598 |
|
599 |
"""
|
600 |
return self._UnlockedGetInstanceInfo(instance_name) |
601 |
|
602 |
@locking.ssynchronized(_config_lock, shared=1) |
603 |
def GetAllInstancesInfo(self): |
604 |
"""Get the configuration of all instances.
|
605 |
|
606 |
@rtype: dict
|
607 |
@returns: dict of (instance, instance_info), where instance_info is what
|
608 |
would GetInstanceInfo return for the node
|
609 |
|
610 |
"""
|
611 |
my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance)) |
612 |
for instance in self._UnlockedGetInstanceList()]) |
613 |
return my_dict
|
614 |
|
615 |
@locking.ssynchronized(_config_lock)
|
616 |
def AddNode(self, node): |
617 |
"""Add a node to the configuration.
|
618 |
|
619 |
Args:
|
620 |
node: an object.Node instance
|
621 |
|
622 |
"""
|
623 |
logging.info("Adding node %s to configuration" % node.name)
|
624 |
|
625 |
self._OpenConfig()
|
626 |
node.serial_no = 1
|
627 |
self._config_data.nodes[node.name] = node
|
628 |
self._config_data.cluster.serial_no += 1 |
629 |
self._WriteConfig()
|
630 |
|
631 |
@locking.ssynchronized(_config_lock)
|
632 |
def RemoveNode(self, node_name): |
633 |
"""Remove a node from the configuration.
|
634 |
|
635 |
"""
|
636 |
logging.info("Removing node %s from configuration" % node_name)
|
637 |
|
638 |
self._OpenConfig()
|
639 |
if node_name not in self._config_data.nodes: |
640 |
raise errors.ConfigurationError("Unknown node '%s'" % node_name) |
641 |
|
642 |
del self._config_data.nodes[node_name] |
643 |
self._config_data.cluster.serial_no += 1 |
644 |
self._WriteConfig()
|
645 |
|
646 |
@locking.ssynchronized(_config_lock, shared=1) |
647 |
def ExpandNodeName(self, short_name): |
648 |
"""Attempt to expand an incomplete instance name.
|
649 |
|
650 |
"""
|
651 |
self._OpenConfig()
|
652 |
|
653 |
return utils.MatchNameComponent(short_name,
|
654 |
self._config_data.nodes.keys())
|
655 |
|
656 |
def _UnlockedGetNodeInfo(self, node_name): |
657 |
"""Get the configuration of a node, as stored in the config.
|
658 |
|
659 |
This function is for internal use, when the config lock is already held.
|
660 |
|
661 |
Args: node: nodename (tuple) of the node
|
662 |
|
663 |
Returns: the node object
|
664 |
|
665 |
"""
|
666 |
self._OpenConfig()
|
667 |
|
668 |
if node_name not in self._config_data.nodes: |
669 |
return None |
670 |
|
671 |
return self._config_data.nodes[node_name] |
672 |
|
673 |
|
674 |
@locking.ssynchronized(_config_lock, shared=1) |
675 |
def GetNodeInfo(self, node_name): |
676 |
"""Get the configuration of a node, as stored in the config.
|
677 |
|
678 |
Args: node: nodename (tuple) of the node
|
679 |
|
680 |
Returns: the node object
|
681 |
|
682 |
"""
|
683 |
return self._UnlockedGetNodeInfo(node_name) |
684 |
|
685 |
def _UnlockedGetNodeList(self): |
686 |
"""Return the list of nodes which are in the configuration.
|
687 |
|
688 |
This function is for internal use, when the config lock is already held.
|
689 |
|
690 |
"""
|
691 |
self._OpenConfig()
|
692 |
return self._config_data.nodes.keys() |
693 |
|
694 |
|
695 |
@locking.ssynchronized(_config_lock, shared=1) |
696 |
def GetNodeList(self): |
697 |
"""Return the list of nodes which are in the configuration.
|
698 |
|
699 |
"""
|
700 |
return self._UnlockedGetNodeList() |
701 |
|
702 |
@locking.ssynchronized(_config_lock, shared=1) |
703 |
def GetAllNodesInfo(self): |
704 |
"""Get the configuration of all nodes.
|
705 |
|
706 |
@rtype: dict
|
707 |
@returns: dict of (node, node_info), where node_info is what
|
708 |
would GetNodeInfo return for the node
|
709 |
|
710 |
"""
|
711 |
my_dict = dict([(node, self._UnlockedGetNodeInfo(node)) |
712 |
for node in self._UnlockedGetNodeList()]) |
713 |
return my_dict
|
714 |
|
715 |
@locking.ssynchronized(_config_lock, shared=1) |
716 |
def DumpConfig(self): |
717 |
"""Return the entire configuration of the cluster.
|
718 |
"""
|
719 |
self._OpenConfig()
|
720 |
return self._config_data |
721 |
|
722 |
def _BumpSerialNo(self): |
723 |
"""Bump up the serial number of the config.
|
724 |
|
725 |
"""
|
726 |
self._config_data.serial_no += 1 |
727 |
|
728 |
def _OpenConfig(self): |
729 |
"""Read the config data from disk.
|
730 |
|
731 |
In case we already have configuration data and the config file has
|
732 |
the same mtime as when we read it, we skip the parsing of the
|
733 |
file, since de-serialisation could be slow.
|
734 |
|
735 |
"""
|
736 |
try:
|
737 |
st = os.stat(self._cfg_file)
|
738 |
except OSError, err: |
739 |
raise errors.ConfigurationError("Can't stat config file: %s" % err) |
740 |
if (self._config_data is not None and |
741 |
self._config_time is not None and |
742 |
self._config_time == st.st_mtime and |
743 |
self._config_size == st.st_size and |
744 |
self._config_inode == st.st_ino):
|
745 |
# data is current, so skip loading of config file
|
746 |
return
|
747 |
|
748 |
# Make sure the configuration has the right version
|
749 |
ValidateConfig() |
750 |
|
751 |
f = open(self._cfg_file, 'r') |
752 |
try:
|
753 |
try:
|
754 |
data = objects.ConfigData.FromDict(serializer.Load(f.read())) |
755 |
except Exception, err: |
756 |
raise errors.ConfigurationError(err)
|
757 |
finally:
|
758 |
f.close() |
759 |
if (not hasattr(data, 'cluster') or |
760 |
not hasattr(data.cluster, 'rsahostkeypub')): |
761 |
raise errors.ConfigurationError("Incomplete configuration" |
762 |
" (missing cluster.rsahostkeypub)")
|
763 |
self._config_data = data
|
764 |
self._config_time = st.st_mtime
|
765 |
self._config_size = st.st_size
|
766 |
self._config_inode = st.st_ino
|
767 |
|
768 |
def _DistributeConfig(self): |
769 |
"""Distribute the configuration to the other nodes.
|
770 |
|
771 |
Currently, this only copies the configuration file. In the future,
|
772 |
it could be used to encapsulate the 2/3-phase update mechanism.
|
773 |
|
774 |
"""
|
775 |
if self._offline: |
776 |
return True |
777 |
bad = False
|
778 |
nodelist = self._UnlockedGetNodeList()
|
779 |
myhostname = self._my_hostname
|
780 |
|
781 |
try:
|
782 |
nodelist.remove(myhostname) |
783 |
except ValueError: |
784 |
pass
|
785 |
|
786 |
result = rpc.call_upload_file(nodelist, self._cfg_file)
|
787 |
for node in nodelist: |
788 |
if not result[node]: |
789 |
logging.error("copy of file %s to node %s failed",
|
790 |
self._cfg_file, node)
|
791 |
bad = True
|
792 |
return not bad |
793 |
|
794 |
def _WriteConfig(self, destination=None): |
795 |
"""Write the configuration data to persistent storage.
|
796 |
|
797 |
"""
|
798 |
if destination is None: |
799 |
destination = self._cfg_file
|
800 |
self._BumpSerialNo()
|
801 |
txt = serializer.Dump(self._config_data.ToDict())
|
802 |
dir_name, file_name = os.path.split(destination) |
803 |
fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
|
804 |
f = os.fdopen(fd, 'w')
|
805 |
try:
|
806 |
f.write(txt) |
807 |
os.fsync(f.fileno()) |
808 |
finally:
|
809 |
f.close() |
810 |
# we don't need to do os.close(fd) as f.close() did it
|
811 |
os.rename(name, destination) |
812 |
self.write_count += 1 |
813 |
# re-set our cache as not to re-read the config file
|
814 |
try:
|
815 |
st = os.stat(destination) |
816 |
except OSError, err: |
817 |
raise errors.ConfigurationError("Can't stat config file: %s" % err) |
818 |
self._config_time = st.st_mtime
|
819 |
self._config_size = st.st_size
|
820 |
self._config_inode = st.st_ino
|
821 |
# and redistribute the config file
|
822 |
self._DistributeConfig()
|
823 |
|
824 |
@locking.ssynchronized(_config_lock)
|
825 |
def InitConfig(self, node, primary_ip, secondary_ip, |
826 |
hostkeypub, mac_prefix, vg_name, def_bridge): |
827 |
"""Create the initial cluster configuration.
|
828 |
|
829 |
It will contain the current node, which will also be the master
|
830 |
node, and no instances or operating systmes.
|
831 |
|
832 |
Args:
|
833 |
node: the nodename of the initial node
|
834 |
primary_ip: the IP address of the current host
|
835 |
secondary_ip: the secondary IP of the current host or None
|
836 |
hostkeypub: the public hostkey of this host
|
837 |
|
838 |
"""
|
839 |
hu_port = constants.FIRST_DRBD_PORT - 1
|
840 |
globalconfig = objects.Cluster(serial_no=1,
|
841 |
rsahostkeypub=hostkeypub, |
842 |
highest_used_port=hu_port, |
843 |
mac_prefix=mac_prefix, |
844 |
volume_group_name=vg_name, |
845 |
default_bridge=def_bridge, |
846 |
tcpudp_port_pool=set())
|
847 |
if secondary_ip is None: |
848 |
secondary_ip = primary_ip |
849 |
nodeconfig = objects.Node(name=node, primary_ip=primary_ip, |
850 |
secondary_ip=secondary_ip, serial_no=1)
|
851 |
|
852 |
self._config_data = objects.ConfigData(nodes={node: nodeconfig},
|
853 |
instances={}, |
854 |
cluster=globalconfig, |
855 |
serial_no=1)
|
856 |
self._WriteConfig()
|
857 |
|
858 |
@locking.ssynchronized(_config_lock, shared=1) |
859 |
def GetVGName(self): |
860 |
"""Return the volume group name.
|
861 |
|
862 |
"""
|
863 |
self._OpenConfig()
|
864 |
return self._config_data.cluster.volume_group_name |
865 |
|
866 |
@locking.ssynchronized(_config_lock)
|
867 |
def SetVGName(self, vg_name): |
868 |
"""Set the volume group name.
|
869 |
|
870 |
"""
|
871 |
self._OpenConfig()
|
872 |
self._config_data.cluster.volume_group_name = vg_name
|
873 |
self._config_data.cluster.serial_no += 1 |
874 |
self._WriteConfig()
|
875 |
|
876 |
@locking.ssynchronized(_config_lock, shared=1) |
877 |
def GetDefBridge(self): |
878 |
"""Return the default bridge.
|
879 |
|
880 |
"""
|
881 |
self._OpenConfig()
|
882 |
return self._config_data.cluster.default_bridge |
883 |
|
884 |
@locking.ssynchronized(_config_lock, shared=1) |
885 |
def GetMACPrefix(self): |
886 |
"""Return the mac prefix.
|
887 |
|
888 |
"""
|
889 |
self._OpenConfig()
|
890 |
return self._config_data.cluster.mac_prefix |
891 |
|
892 |
@locking.ssynchronized(_config_lock, shared=1) |
893 |
def GetClusterInfo(self): |
894 |
"""Returns informations about the cluster
|
895 |
|
896 |
Returns:
|
897 |
the cluster object
|
898 |
|
899 |
"""
|
900 |
self._OpenConfig()
|
901 |
|
902 |
return self._config_data.cluster |
903 |
|
904 |
@locking.ssynchronized(_config_lock)
|
905 |
def Update(self, target): |
906 |
"""Notify function to be called after updates.
|
907 |
|
908 |
This function must be called when an object (as returned by
|
909 |
GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
|
910 |
caller wants the modifications saved to the backing store. Note
|
911 |
that all modified objects will be saved, but the target argument
|
912 |
is the one the caller wants to ensure that it's saved.
|
913 |
|
914 |
"""
|
915 |
if self._config_data is None: |
916 |
raise errors.ProgrammerError("Configuration file not read," |
917 |
" cannot save.")
|
918 |
if isinstance(target, objects.Cluster): |
919 |
test = target == self._config_data.cluster
|
920 |
elif isinstance(target, objects.Node): |
921 |
test = target in self._config_data.nodes.values() |
922 |
elif isinstance(target, objects.Instance): |
923 |
test = target in self._config_data.instances.values() |
924 |
else:
|
925 |
raise errors.ProgrammerError("Invalid object type (%s) passed to" |
926 |
" ConfigWriter.Update" % type(target)) |
927 |
if not test: |
928 |
raise errors.ConfigurationError("Configuration updated since object" |
929 |
" has been read or unknown object")
|
930 |
target.serial_no += 1
|
931 |
|
932 |
self._WriteConfig()
|