root / lib / config.py @ 8d9c3bef
History | View | Annotate | Download (45.7 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Configuration management for Ganeti
|
23 |
|
24 |
This module provides the interface to the Ganeti cluster configuration.
|
25 |
|
26 |
The configuration data is stored on every node but is updated on the master
|
27 |
only. After each update, the master distributes the data to the other nodes.
|
28 |
|
29 |
Currently, the data storage format is JSON. YAML was slow and consuming too
|
30 |
much memory.
|
31 |
|
32 |
"""
|
33 |
|
34 |
import os |
35 |
import random |
36 |
import logging |
37 |
import time |
38 |
|
39 |
from ganeti import errors |
40 |
from ganeti import locking |
41 |
from ganeti import utils |
42 |
from ganeti import constants |
43 |
from ganeti import rpc |
44 |
from ganeti import objects |
45 |
from ganeti import serializer |
46 |
|
47 |
|
48 |
_config_lock = locking.SharedLock() |
49 |
|
50 |
# job id used for resource management at config upgrade time
|
51 |
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
|
52 |
|
53 |
|
54 |
def _ValidateConfig(data): |
55 |
"""Verifies that a configuration objects looks valid.
|
56 |
|
57 |
This only verifies the version of the configuration.
|
58 |
|
59 |
@raise errors.ConfigurationError: if the version differs from what
|
60 |
we expect
|
61 |
|
62 |
"""
|
63 |
if data.version != constants.CONFIG_VERSION:
|
64 |
raise errors.ConfigurationError("Cluster configuration version" |
65 |
" mismatch, got %s instead of %s" %
|
66 |
(data.version, |
67 |
constants.CONFIG_VERSION)) |
68 |
|
69 |
|
70 |
class TemporaryReservationManager: |
71 |
"""A temporary resource reservation manager.
|
72 |
|
73 |
This is used to reserve resources in a job, before using them, making sure
|
74 |
other jobs cannot get them in the meantime.
|
75 |
|
76 |
"""
|
77 |
def __init__(self): |
78 |
self._ec_reserved = {}
|
79 |
|
80 |
def Reserved(self, resource): |
81 |
for holder_reserved in self._ec_reserved.items(): |
82 |
if resource in holder_reserved: |
83 |
return True |
84 |
return False |
85 |
|
86 |
def Reserve(self, ec_id, resource): |
87 |
if self.Reserved(resource): |
88 |
raise errors.ReservationError("Duplicate reservation for resource: %s." % |
89 |
(resource)) |
90 |
if ec_id not in self._ec_reserved: |
91 |
self._ec_reserved[ec_id] = set([resource]) |
92 |
else:
|
93 |
self._ec_reserved[ec_id].add(resource)
|
94 |
|
95 |
def DropECReservations(self, ec_id): |
96 |
if ec_id in self._ec_reserved: |
97 |
del self._ec_reserved[ec_id] |
98 |
|
99 |
def GetReserved(self): |
100 |
all_reserved = set()
|
101 |
for holder_reserved in self._ec_reserved.values(): |
102 |
all_reserved.update(holder_reserved) |
103 |
return all_reserved
|
104 |
|
105 |
def Generate(self, existing, generate_one_fn, ec_id): |
106 |
"""Generate a new resource of this type
|
107 |
|
108 |
"""
|
109 |
assert callable(generate_one_fn) |
110 |
|
111 |
all_elems = self.GetReserved()
|
112 |
all_elems.update(existing) |
113 |
retries = 64
|
114 |
while retries > 0: |
115 |
new_resource = generate_one_fn() |
116 |
if new_resource is not None and new_resource not in all_elems: |
117 |
break
|
118 |
else:
|
119 |
raise errors.ConfigurationError("Not able generate new resource" |
120 |
" (last tried: %s)" % new_resource)
|
121 |
self.Reserve(ec_id, new_resource)
|
122 |
return new_resource
|
123 |
|
124 |
|
125 |
class ConfigWriter: |
126 |
"""The interface to the cluster configuration.
|
127 |
|
128 |
"""
|
129 |
def __init__(self, cfg_file=None, offline=False): |
130 |
self.write_count = 0 |
131 |
self._lock = _config_lock
|
132 |
self._config_data = None |
133 |
self._offline = offline
|
134 |
if cfg_file is None: |
135 |
self._cfg_file = constants.CLUSTER_CONF_FILE
|
136 |
else:
|
137 |
self._cfg_file = cfg_file
|
138 |
self._temporary_ids = TemporaryReservationManager()
|
139 |
self._temporary_drbds = {}
|
140 |
self._temporary_macs = TemporaryReservationManager()
|
141 |
self._temporary_secrets = TemporaryReservationManager()
|
142 |
# Note: in order to prevent errors when resolving our name in
|
143 |
# _DistributeConfig, we compute it here once and reuse it; it's
|
144 |
# better to raise an error before starting to modify the config
|
145 |
# file than after it was modified
|
146 |
self._my_hostname = utils.HostInfo().name
|
147 |
self._last_cluster_serial = -1 |
148 |
self._OpenConfig()
|
149 |
|
150 |
# this method needs to be static, so that we can call it on the class
|
151 |
@staticmethod
|
152 |
def IsCluster(): |
153 |
"""Check if the cluster is configured.
|
154 |
|
155 |
"""
|
156 |
return os.path.exists(constants.CLUSTER_CONF_FILE)
|
157 |
|
158 |
def _GenerateOneMAC(self): |
159 |
"""Generate one mac address
|
160 |
|
161 |
"""
|
162 |
prefix = self._config_data.cluster.mac_prefix
|
163 |
byte1 = random.randrange(0, 256) |
164 |
byte2 = random.randrange(0, 256) |
165 |
byte3 = random.randrange(0, 256) |
166 |
mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
|
167 |
return mac
|
168 |
|
169 |
@locking.ssynchronized(_config_lock, shared=1) |
170 |
def GenerateMAC(self, ec_id): |
171 |
"""Generate a MAC for an instance.
|
172 |
|
173 |
This should check the current instances for duplicates.
|
174 |
|
175 |
"""
|
176 |
existing = self._AllMACs()
|
177 |
return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id) |
178 |
|
179 |
@locking.ssynchronized(_config_lock, shared=1) |
180 |
def ReserveMAC(self, mac, ec_id): |
181 |
"""Reserve a MAC for an instance.
|
182 |
|
183 |
This only checks instances managed by this cluster, it does not
|
184 |
check for potential collisions elsewhere.
|
185 |
|
186 |
"""
|
187 |
all_macs = self._AllMACs()
|
188 |
if mac in all_macs: |
189 |
raise errors.ReservationError("mac already in use") |
190 |
else:
|
191 |
self._temporary_macs.Reserve(mac, ec_id)
|
192 |
|
193 |
@locking.ssynchronized(_config_lock, shared=1) |
194 |
def GenerateDRBDSecret(self, ec_id): |
195 |
"""Generate a DRBD secret.
|
196 |
|
197 |
This checks the current disks for duplicates.
|
198 |
|
199 |
"""
|
200 |
return self._temporary_secrets.Generate(self._AllDRBDSecrets(), |
201 |
utils.GenerateSecret, |
202 |
ec_id) |
203 |
|
204 |
def _AllLVs(self): |
205 |
"""Compute the list of all LVs.
|
206 |
|
207 |
"""
|
208 |
lvnames = set()
|
209 |
for instance in self._config_data.instances.values(): |
210 |
node_data = instance.MapLVsByNode() |
211 |
for lv_list in node_data.values(): |
212 |
lvnames.update(lv_list) |
213 |
return lvnames
|
214 |
|
215 |
def _AllIDs(self, include_temporary): |
216 |
"""Compute the list of all UUIDs and names we have.
|
217 |
|
218 |
@type include_temporary: boolean
|
219 |
@param include_temporary: whether to include the _temporary_ids set
|
220 |
@rtype: set
|
221 |
@return: a set of IDs
|
222 |
|
223 |
"""
|
224 |
existing = set()
|
225 |
if include_temporary:
|
226 |
existing.update(self._temporary_ids.GetReserved())
|
227 |
existing.update(self._AllLVs())
|
228 |
existing.update(self._config_data.instances.keys())
|
229 |
existing.update(self._config_data.nodes.keys())
|
230 |
existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid]) |
231 |
return existing
|
232 |
|
233 |
def _GenerateUniqueID(self, ec_id): |
234 |
"""Generate an unique UUID.
|
235 |
|
236 |
This checks the current node, instances and disk names for
|
237 |
duplicates.
|
238 |
|
239 |
@rtype: string
|
240 |
@return: the unique id
|
241 |
|
242 |
"""
|
243 |
existing = self._AllIDs(include_temporary=False) |
244 |
return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id) |
245 |
|
246 |
@locking.ssynchronized(_config_lock, shared=1) |
247 |
def GenerateUniqueID(self, ec_id): |
248 |
"""Generate an unique ID.
|
249 |
|
250 |
This is just a wrapper over the unlocked version.
|
251 |
|
252 |
@type ec_id: string
|
253 |
@param ec_id: unique id for the job to reserve the id to
|
254 |
|
255 |
"""
|
256 |
return self._GenerateUniqueID(ec_id) |
257 |
|
258 |
def _AllMACs(self): |
259 |
"""Return all MACs present in the config.
|
260 |
|
261 |
@rtype: list
|
262 |
@return: the list of all MACs
|
263 |
|
264 |
"""
|
265 |
result = [] |
266 |
for instance in self._config_data.instances.values(): |
267 |
for nic in instance.nics: |
268 |
result.append(nic.mac) |
269 |
|
270 |
return result
|
271 |
|
272 |
def _AllDRBDSecrets(self): |
273 |
"""Return all DRBD secrets present in the config.
|
274 |
|
275 |
@rtype: list
|
276 |
@return: the list of all DRBD secrets
|
277 |
|
278 |
"""
|
279 |
def helper(disk, result): |
280 |
"""Recursively gather secrets from this disk."""
|
281 |
if disk.dev_type == constants.DT_DRBD8:
|
282 |
result.append(disk.logical_id[5])
|
283 |
if disk.children:
|
284 |
for child in disk.children: |
285 |
helper(child, result) |
286 |
|
287 |
result = [] |
288 |
for instance in self._config_data.instances.values(): |
289 |
for disk in instance.disks: |
290 |
helper(disk, result) |
291 |
|
292 |
return result
|
293 |
|
294 |
def _CheckDiskIDs(self, disk, l_ids, p_ids): |
295 |
"""Compute duplicate disk IDs
|
296 |
|
297 |
@type disk: L{objects.Disk}
|
298 |
@param disk: the disk at which to start searching
|
299 |
@type l_ids: list
|
300 |
@param l_ids: list of current logical ids
|
301 |
@type p_ids: list
|
302 |
@param p_ids: list of current physical ids
|
303 |
@rtype: list
|
304 |
@return: a list of error messages
|
305 |
|
306 |
"""
|
307 |
result = [] |
308 |
if disk.logical_id is not None: |
309 |
if disk.logical_id in l_ids: |
310 |
result.append("duplicate logical id %s" % str(disk.logical_id)) |
311 |
else:
|
312 |
l_ids.append(disk.logical_id) |
313 |
if disk.physical_id is not None: |
314 |
if disk.physical_id in p_ids: |
315 |
result.append("duplicate physical id %s" % str(disk.physical_id)) |
316 |
else:
|
317 |
p_ids.append(disk.physical_id) |
318 |
|
319 |
if disk.children:
|
320 |
for child in disk.children: |
321 |
result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
|
322 |
return result
|
323 |
|
324 |
def _UnlockedVerifyConfig(self): |
325 |
"""Verify function.
|
326 |
|
327 |
@rtype: list
|
328 |
@return: a list of error messages; a non-empty list signifies
|
329 |
configuration errors
|
330 |
|
331 |
"""
|
332 |
result = [] |
333 |
seen_macs = [] |
334 |
ports = {} |
335 |
data = self._config_data
|
336 |
seen_lids = [] |
337 |
seen_pids = [] |
338 |
|
339 |
# global cluster checks
|
340 |
if not data.cluster.enabled_hypervisors: |
341 |
result.append("enabled hypervisors list doesn't have any entries")
|
342 |
invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
|
343 |
if invalid_hvs:
|
344 |
result.append("enabled hypervisors contains invalid entries: %s" %
|
345 |
invalid_hvs) |
346 |
|
347 |
if data.cluster.master_node not in data.nodes: |
348 |
result.append("cluster has invalid primary node '%s'" %
|
349 |
data.cluster.master_node) |
350 |
|
351 |
# per-instance checks
|
352 |
for instance_name in data.instances: |
353 |
instance = data.instances[instance_name] |
354 |
if instance.primary_node not in data.nodes: |
355 |
result.append("instance '%s' has invalid primary node '%s'" %
|
356 |
(instance_name, instance.primary_node)) |
357 |
for snode in instance.secondary_nodes: |
358 |
if snode not in data.nodes: |
359 |
result.append("instance '%s' has invalid secondary node '%s'" %
|
360 |
(instance_name, snode)) |
361 |
for idx, nic in enumerate(instance.nics): |
362 |
if nic.mac in seen_macs: |
363 |
result.append("instance '%s' has NIC %d mac %s duplicate" %
|
364 |
(instance_name, idx, nic.mac)) |
365 |
else:
|
366 |
seen_macs.append(nic.mac) |
367 |
|
368 |
# gather the drbd ports for duplicate checks
|
369 |
for dsk in instance.disks: |
370 |
if dsk.dev_type in constants.LDS_DRBD: |
371 |
tcp_port = dsk.logical_id[2]
|
372 |
if tcp_port not in ports: |
373 |
ports[tcp_port] = [] |
374 |
ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
|
375 |
# gather network port reservation
|
376 |
net_port = getattr(instance, "network_port", None) |
377 |
if net_port is not None: |
378 |
if net_port not in ports: |
379 |
ports[net_port] = [] |
380 |
ports[net_port].append((instance.name, "network port"))
|
381 |
|
382 |
# instance disk verify
|
383 |
for idx, disk in enumerate(instance.disks): |
384 |
result.extend(["instance '%s' disk %d error: %s" %
|
385 |
(instance.name, idx, msg) for msg in disk.Verify()]) |
386 |
result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
|
387 |
|
388 |
# cluster-wide pool of free ports
|
389 |
for free_port in data.cluster.tcpudp_port_pool: |
390 |
if free_port not in ports: |
391 |
ports[free_port] = [] |
392 |
ports[free_port].append(("cluster", "port marked as free")) |
393 |
|
394 |
# compute tcp/udp duplicate ports
|
395 |
keys = ports.keys() |
396 |
keys.sort() |
397 |
for pnum in keys: |
398 |
pdata = ports[pnum] |
399 |
if len(pdata) > 1: |
400 |
txt = ", ".join(["%s/%s" % val for val in pdata]) |
401 |
result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
|
402 |
|
403 |
# highest used tcp port check
|
404 |
if keys:
|
405 |
if keys[-1] > data.cluster.highest_used_port: |
406 |
result.append("Highest used port mismatch, saved %s, computed %s" %
|
407 |
(data.cluster.highest_used_port, keys[-1]))
|
408 |
|
409 |
if not data.nodes[data.cluster.master_node].master_candidate: |
410 |
result.append("Master node is not a master candidate")
|
411 |
|
412 |
# master candidate checks
|
413 |
mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
|
414 |
if mc_now < mc_max:
|
415 |
result.append("Not enough master candidates: actual %d, target %d" %
|
416 |
(mc_now, mc_max)) |
417 |
|
418 |
# node checks
|
419 |
for node in data.nodes.values(): |
420 |
if [node.master_candidate, node.drained, node.offline].count(True) > 1: |
421 |
result.append("Node %s state is invalid: master_candidate=%s,"
|
422 |
" drain=%s, offline=%s" %
|
423 |
(node.name, node.master_candidate, node.drain, |
424 |
node.offline)) |
425 |
|
426 |
# drbd minors check
|
427 |
d_map, duplicates = self._UnlockedComputeDRBDMap()
|
428 |
for node, minor, instance_a, instance_b in duplicates: |
429 |
result.append("DRBD minor %d on node %s is assigned twice to instances"
|
430 |
" %s and %s" % (minor, node, instance_a, instance_b))
|
431 |
|
432 |
# IP checks
|
433 |
ips = { data.cluster.master_ip: ["cluster_ip"] }
|
434 |
def _helper(ip, name): |
435 |
if ip in ips: |
436 |
ips[ip].append(name) |
437 |
else:
|
438 |
ips[ip] = [name] |
439 |
|
440 |
for node in data.nodes.values(): |
441 |
_helper(node.primary_ip, "node:%s/primary" % node.name)
|
442 |
if node.secondary_ip != node.primary_ip:
|
443 |
_helper(node.secondary_ip, "node:%s/secondary" % node.name)
|
444 |
|
445 |
for ip, owners in ips.items(): |
446 |
if len(owners) > 1: |
447 |
result.append("IP address %s is used by multiple owners: %s" %
|
448 |
(ip, ", ".join(owners)))
|
449 |
return result
|
450 |
|
451 |
@locking.ssynchronized(_config_lock, shared=1) |
452 |
def VerifyConfig(self): |
453 |
"""Verify function.
|
454 |
|
455 |
This is just a wrapper over L{_UnlockedVerifyConfig}.
|
456 |
|
457 |
@rtype: list
|
458 |
@return: a list of error messages; a non-empty list signifies
|
459 |
configuration errors
|
460 |
|
461 |
"""
|
462 |
return self._UnlockedVerifyConfig() |
463 |
|
464 |
def _UnlockedSetDiskID(self, disk, node_name): |
465 |
"""Convert the unique ID to the ID needed on the target nodes.
|
466 |
|
467 |
This is used only for drbd, which needs ip/port configuration.
|
468 |
|
469 |
The routine descends down and updates its children also, because
|
470 |
this helps when the only the top device is passed to the remote
|
471 |
node.
|
472 |
|
473 |
This function is for internal use, when the config lock is already held.
|
474 |
|
475 |
"""
|
476 |
if disk.children:
|
477 |
for child in disk.children: |
478 |
self._UnlockedSetDiskID(child, node_name)
|
479 |
|
480 |
if disk.logical_id is None and disk.physical_id is not None: |
481 |
return
|
482 |
if disk.dev_type == constants.LD_DRBD8:
|
483 |
pnode, snode, port, pminor, sminor, secret = disk.logical_id |
484 |
if node_name not in (pnode, snode): |
485 |
raise errors.ConfigurationError("DRBD device not knowing node %s" % |
486 |
node_name) |
487 |
pnode_info = self._UnlockedGetNodeInfo(pnode)
|
488 |
snode_info = self._UnlockedGetNodeInfo(snode)
|
489 |
if pnode_info is None or snode_info is None: |
490 |
raise errors.ConfigurationError("Can't find primary or secondary node" |
491 |
" for %s" % str(disk)) |
492 |
p_data = (pnode_info.secondary_ip, port) |
493 |
s_data = (snode_info.secondary_ip, port) |
494 |
if pnode == node_name:
|
495 |
disk.physical_id = p_data + s_data + (pminor, secret) |
496 |
else: # it must be secondary, we tested above |
497 |
disk.physical_id = s_data + p_data + (sminor, secret) |
498 |
else:
|
499 |
disk.physical_id = disk.logical_id |
500 |
return
|
501 |
|
502 |
@locking.ssynchronized(_config_lock)
|
503 |
def SetDiskID(self, disk, node_name): |
504 |
"""Convert the unique ID to the ID needed on the target nodes.
|
505 |
|
506 |
This is used only for drbd, which needs ip/port configuration.
|
507 |
|
508 |
The routine descends down and updates its children also, because
|
509 |
this helps when the only the top device is passed to the remote
|
510 |
node.
|
511 |
|
512 |
"""
|
513 |
return self._UnlockedSetDiskID(disk, node_name) |
514 |
|
515 |
@locking.ssynchronized(_config_lock)
|
516 |
def AddTcpUdpPort(self, port): |
517 |
"""Adds a new port to the available port pool.
|
518 |
|
519 |
"""
|
520 |
if not isinstance(port, int): |
521 |
raise errors.ProgrammerError("Invalid type passed for port") |
522 |
|
523 |
self._config_data.cluster.tcpudp_port_pool.add(port)
|
524 |
self._WriteConfig()
|
525 |
|
526 |
@locking.ssynchronized(_config_lock, shared=1) |
527 |
def GetPortList(self): |
528 |
"""Returns a copy of the current port list.
|
529 |
|
530 |
"""
|
531 |
return self._config_data.cluster.tcpudp_port_pool.copy() |
532 |
|
533 |
@locking.ssynchronized(_config_lock)
|
534 |
def AllocatePort(self): |
535 |
"""Allocate a port.
|
536 |
|
537 |
The port will be taken from the available port pool or from the
|
538 |
default port range (and in this case we increase
|
539 |
highest_used_port).
|
540 |
|
541 |
"""
|
542 |
# If there are TCP/IP ports configured, we use them first.
|
543 |
if self._config_data.cluster.tcpudp_port_pool: |
544 |
port = self._config_data.cluster.tcpudp_port_pool.pop()
|
545 |
else:
|
546 |
port = self._config_data.cluster.highest_used_port + 1 |
547 |
if port >= constants.LAST_DRBD_PORT:
|
548 |
raise errors.ConfigurationError("The highest used port is greater" |
549 |
" than %s. Aborting." %
|
550 |
constants.LAST_DRBD_PORT) |
551 |
self._config_data.cluster.highest_used_port = port
|
552 |
|
553 |
self._WriteConfig()
|
554 |
return port
|
555 |
|
556 |
def _UnlockedComputeDRBDMap(self): |
557 |
"""Compute the used DRBD minor/nodes.
|
558 |
|
559 |
@rtype: (dict, list)
|
560 |
@return: dictionary of node_name: dict of minor: instance_name;
|
561 |
the returned dict will have all the nodes in it (even if with
|
562 |
an empty list), and a list of duplicates; if the duplicates
|
563 |
list is not empty, the configuration is corrupted and its caller
|
564 |
should raise an exception
|
565 |
|
566 |
"""
|
567 |
def _AppendUsedPorts(instance_name, disk, used): |
568 |
duplicates = [] |
569 |
if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5: |
570 |
node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
|
571 |
for node, port in ((node_a, minor_a), (node_b, minor_b)): |
572 |
assert node in used, ("Node '%s' of instance '%s' not found" |
573 |
" in node list" % (node, instance_name))
|
574 |
if port in used[node]: |
575 |
duplicates.append((node, port, instance_name, used[node][port])) |
576 |
else:
|
577 |
used[node][port] = instance_name |
578 |
if disk.children:
|
579 |
for child in disk.children: |
580 |
duplicates.extend(_AppendUsedPorts(instance_name, child, used)) |
581 |
return duplicates
|
582 |
|
583 |
duplicates = [] |
584 |
my_dict = dict((node, {}) for node in self._config_data.nodes) |
585 |
for instance in self._config_data.instances.itervalues(): |
586 |
for disk in instance.disks: |
587 |
duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict)) |
588 |
for (node, minor), instance in self._temporary_drbds.iteritems(): |
589 |
if minor in my_dict[node] and my_dict[node][minor] != instance: |
590 |
duplicates.append((node, minor, instance, my_dict[node][minor])) |
591 |
else:
|
592 |
my_dict[node][minor] = instance |
593 |
return my_dict, duplicates
|
594 |
|
595 |
@locking.ssynchronized(_config_lock)
|
596 |
def ComputeDRBDMap(self): |
597 |
"""Compute the used DRBD minor/nodes.
|
598 |
|
599 |
This is just a wrapper over L{_UnlockedComputeDRBDMap}.
|
600 |
|
601 |
@return: dictionary of node_name: dict of minor: instance_name;
|
602 |
the returned dict will have all the nodes in it (even if with
|
603 |
an empty list).
|
604 |
|
605 |
"""
|
606 |
d_map, duplicates = self._UnlockedComputeDRBDMap()
|
607 |
if duplicates:
|
608 |
raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" % |
609 |
str(duplicates))
|
610 |
return d_map
|
611 |
|
612 |
@locking.ssynchronized(_config_lock)
|
613 |
def AllocateDRBDMinor(self, nodes, instance): |
614 |
"""Allocate a drbd minor.
|
615 |
|
616 |
The free minor will be automatically computed from the existing
|
617 |
devices. A node can be given multiple times in order to allocate
|
618 |
multiple minors. The result is the list of minors, in the same
|
619 |
order as the passed nodes.
|
620 |
|
621 |
@type instance: string
|
622 |
@param instance: the instance for which we allocate minors
|
623 |
|
624 |
"""
|
625 |
assert isinstance(instance, basestring), \ |
626 |
"Invalid argument '%s' passed to AllocateDRBDMinor" % instance
|
627 |
|
628 |
d_map, duplicates = self._UnlockedComputeDRBDMap()
|
629 |
if duplicates:
|
630 |
raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" % |
631 |
str(duplicates))
|
632 |
result = [] |
633 |
for nname in nodes: |
634 |
ndata = d_map[nname] |
635 |
if not ndata: |
636 |
# no minors used, we can start at 0
|
637 |
result.append(0)
|
638 |
ndata[0] = instance
|
639 |
self._temporary_drbds[(nname, 0)] = instance |
640 |
continue
|
641 |
keys = ndata.keys() |
642 |
keys.sort() |
643 |
ffree = utils.FirstFree(keys) |
644 |
if ffree is None: |
645 |
# return the next minor
|
646 |
# TODO: implement high-limit check
|
647 |
minor = keys[-1] + 1 |
648 |
else:
|
649 |
minor = ffree |
650 |
# double-check minor against current instances
|
651 |
assert minor not in d_map[nname], \ |
652 |
("Attempt to reuse allocated DRBD minor %d on node %s,"
|
653 |
" already allocated to instance %s" %
|
654 |
(minor, nname, d_map[nname][minor])) |
655 |
ndata[minor] = instance |
656 |
# double-check minor against reservation
|
657 |
r_key = (nname, minor) |
658 |
assert r_key not in self._temporary_drbds, \ |
659 |
("Attempt to reuse reserved DRBD minor %d on node %s,"
|
660 |
" reserved for instance %s" %
|
661 |
(minor, nname, self._temporary_drbds[r_key]))
|
662 |
self._temporary_drbds[r_key] = instance
|
663 |
result.append(minor) |
664 |
logging.debug("Request to allocate drbd minors, input: %s, returning %s",
|
665 |
nodes, result) |
666 |
return result
|
667 |
|
668 |
def _UnlockedReleaseDRBDMinors(self, instance): |
669 |
"""Release temporary drbd minors allocated for a given instance.
|
670 |
|
671 |
@type instance: string
|
672 |
@param instance: the instance for which temporary minors should be
|
673 |
released
|
674 |
|
675 |
"""
|
676 |
assert isinstance(instance, basestring), \ |
677 |
"Invalid argument passed to ReleaseDRBDMinors"
|
678 |
for key, name in self._temporary_drbds.items(): |
679 |
if name == instance:
|
680 |
del self._temporary_drbds[key] |
681 |
|
682 |
@locking.ssynchronized(_config_lock)
|
683 |
def ReleaseDRBDMinors(self, instance): |
684 |
"""Release temporary drbd minors allocated for a given instance.
|
685 |
|
686 |
This should be called on the error paths, on the success paths
|
687 |
it's automatically called by the ConfigWriter add and update
|
688 |
functions.
|
689 |
|
690 |
This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
|
691 |
|
692 |
@type instance: string
|
693 |
@param instance: the instance for which temporary minors should be
|
694 |
released
|
695 |
|
696 |
"""
|
697 |
self._UnlockedReleaseDRBDMinors(instance)
|
698 |
|
699 |
@locking.ssynchronized(_config_lock, shared=1) |
700 |
def GetConfigVersion(self): |
701 |
"""Get the configuration version.
|
702 |
|
703 |
@return: Config version
|
704 |
|
705 |
"""
|
706 |
return self._config_data.version |
707 |
|
708 |
@locking.ssynchronized(_config_lock, shared=1) |
709 |
def GetClusterName(self): |
710 |
"""Get cluster name.
|
711 |
|
712 |
@return: Cluster name
|
713 |
|
714 |
"""
|
715 |
return self._config_data.cluster.cluster_name |
716 |
|
717 |
@locking.ssynchronized(_config_lock, shared=1) |
718 |
def GetMasterNode(self): |
719 |
"""Get the hostname of the master node for this cluster.
|
720 |
|
721 |
@return: Master hostname
|
722 |
|
723 |
"""
|
724 |
return self._config_data.cluster.master_node |
725 |
|
726 |
@locking.ssynchronized(_config_lock, shared=1) |
727 |
def GetMasterIP(self): |
728 |
"""Get the IP of the master node for this cluster.
|
729 |
|
730 |
@return: Master IP
|
731 |
|
732 |
"""
|
733 |
return self._config_data.cluster.master_ip |
734 |
|
735 |
@locking.ssynchronized(_config_lock, shared=1) |
736 |
def GetMasterNetdev(self): |
737 |
"""Get the master network device for this cluster.
|
738 |
|
739 |
"""
|
740 |
return self._config_data.cluster.master_netdev |
741 |
|
742 |
@locking.ssynchronized(_config_lock, shared=1) |
743 |
def GetFileStorageDir(self): |
744 |
"""Get the file storage dir for this cluster.
|
745 |
|
746 |
"""
|
747 |
return self._config_data.cluster.file_storage_dir |
748 |
|
749 |
@locking.ssynchronized(_config_lock, shared=1) |
750 |
def GetHypervisorType(self): |
751 |
"""Get the hypervisor type for this cluster.
|
752 |
|
753 |
"""
|
754 |
return self._config_data.cluster.enabled_hypervisors[0] |
755 |
|
756 |
@locking.ssynchronized(_config_lock, shared=1) |
757 |
def GetHostKey(self): |
758 |
"""Return the rsa hostkey from the config.
|
759 |
|
760 |
@rtype: string
|
761 |
@return: the rsa hostkey
|
762 |
|
763 |
"""
|
764 |
return self._config_data.cluster.rsahostkeypub |
765 |
|
766 |
@locking.ssynchronized(_config_lock)
|
767 |
def AddInstance(self, instance, ec_id): |
768 |
"""Add an instance to the config.
|
769 |
|
770 |
This should be used after creating a new instance.
|
771 |
|
772 |
@type instance: L{objects.Instance}
|
773 |
@param instance: the instance object
|
774 |
|
775 |
"""
|
776 |
if not isinstance(instance, objects.Instance): |
777 |
raise errors.ProgrammerError("Invalid type passed to AddInstance") |
778 |
|
779 |
if instance.disk_template != constants.DT_DISKLESS:
|
780 |
all_lvs = instance.MapLVsByNode() |
781 |
logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
|
782 |
|
783 |
all_macs = self._AllMACs()
|
784 |
for nic in instance.nics: |
785 |
if nic.mac in all_macs: |
786 |
raise errors.ConfigurationError("Cannot add instance %s:" |
787 |
" MAC address '%s' already in use." %
|
788 |
(instance.name, nic.mac)) |
789 |
|
790 |
self._EnsureUUID(instance, ec_id)
|
791 |
|
792 |
instance.serial_no = 1
|
793 |
instance.ctime = instance.mtime = time.time() |
794 |
self._config_data.instances[instance.name] = instance
|
795 |
self._config_data.cluster.serial_no += 1 |
796 |
self._UnlockedReleaseDRBDMinors(instance.name)
|
797 |
self._WriteConfig()
|
798 |
|
799 |
def _EnsureUUID(self, item, ec_id): |
800 |
"""Ensures a given object has a valid UUID.
|
801 |
|
802 |
@param item: the instance or node to be checked
|
803 |
@param ec_id: the execution context id for the uuid reservation
|
804 |
|
805 |
"""
|
806 |
if not item.uuid: |
807 |
item.uuid = self._GenerateUniqueID(ec_id)
|
808 |
elif item.uuid in self._AllIDs(temporary=True): |
809 |
raise errors.ConfigurationError("Cannot add '%s': UUID already in use" % |
810 |
(item.name, item.uuid)) |
811 |
|
812 |
def _SetInstanceStatus(self, instance_name, status): |
813 |
"""Set the instance's status to a given value.
|
814 |
|
815 |
"""
|
816 |
assert isinstance(status, bool), \ |
817 |
"Invalid status '%s' passed to SetInstanceStatus" % (status,)
|
818 |
|
819 |
if instance_name not in self._config_data.instances: |
820 |
raise errors.ConfigurationError("Unknown instance '%s'" % |
821 |
instance_name) |
822 |
instance = self._config_data.instances[instance_name]
|
823 |
if instance.admin_up != status:
|
824 |
instance.admin_up = status |
825 |
instance.serial_no += 1
|
826 |
instance.mtime = time.time() |
827 |
self._WriteConfig()
|
828 |
|
829 |
@locking.ssynchronized(_config_lock)
|
830 |
def MarkInstanceUp(self, instance_name): |
831 |
"""Mark the instance status to up in the config.
|
832 |
|
833 |
"""
|
834 |
self._SetInstanceStatus(instance_name, True) |
835 |
|
836 |
@locking.ssynchronized(_config_lock)
|
837 |
def RemoveInstance(self, instance_name): |
838 |
"""Remove the instance from the configuration.
|
839 |
|
840 |
"""
|
841 |
if instance_name not in self._config_data.instances: |
842 |
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) |
843 |
del self._config_data.instances[instance_name] |
844 |
self._config_data.cluster.serial_no += 1 |
845 |
self._WriteConfig()
|
846 |
|
847 |
@locking.ssynchronized(_config_lock)
|
848 |
def RenameInstance(self, old_name, new_name): |
849 |
"""Rename an instance.
|
850 |
|
851 |
This needs to be done in ConfigWriter and not by RemoveInstance
|
852 |
combined with AddInstance as only we can guarantee an atomic
|
853 |
rename.
|
854 |
|
855 |
"""
|
856 |
if old_name not in self._config_data.instances: |
857 |
raise errors.ConfigurationError("Unknown instance '%s'" % old_name) |
858 |
inst = self._config_data.instances[old_name]
|
859 |
del self._config_data.instances[old_name] |
860 |
inst.name = new_name |
861 |
|
862 |
for disk in inst.disks: |
863 |
if disk.dev_type == constants.LD_FILE:
|
864 |
# rename the file paths in logical and physical id
|
865 |
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
|
866 |
disk.physical_id = disk.logical_id = (disk.logical_id[0],
|
867 |
os.path.join(file_storage_dir, |
868 |
inst.name, |
869 |
disk.iv_name)) |
870 |
|
871 |
self._config_data.instances[inst.name] = inst
|
872 |
self._WriteConfig()
|
873 |
|
874 |
@locking.ssynchronized(_config_lock)
|
875 |
def MarkInstanceDown(self, instance_name): |
876 |
"""Mark the status of an instance to down in the configuration.
|
877 |
|
878 |
"""
|
879 |
self._SetInstanceStatus(instance_name, False) |
880 |
|
881 |
def _UnlockedGetInstanceList(self): |
882 |
"""Get the list of instances.
|
883 |
|
884 |
This function is for internal use, when the config lock is already held.
|
885 |
|
886 |
"""
|
887 |
return self._config_data.instances.keys() |
888 |
|
889 |
@locking.ssynchronized(_config_lock, shared=1) |
890 |
def GetInstanceList(self): |
891 |
"""Get the list of instances.
|
892 |
|
893 |
@return: array of instances, ex. ['instance2.example.com',
|
894 |
'instance1.example.com']
|
895 |
|
896 |
"""
|
897 |
return self._UnlockedGetInstanceList() |
898 |
|
899 |
@locking.ssynchronized(_config_lock, shared=1) |
900 |
def ExpandInstanceName(self, short_name): |
901 |
"""Attempt to expand an incomplete instance name.
|
902 |
|
903 |
"""
|
904 |
return utils.MatchNameComponent(short_name,
|
905 |
self._config_data.instances.keys(),
|
906 |
case_sensitive=False)
|
907 |
|
908 |
def _UnlockedGetInstanceInfo(self, instance_name): |
909 |
"""Returns information about an instance.
|
910 |
|
911 |
This function is for internal use, when the config lock is already held.
|
912 |
|
913 |
"""
|
914 |
if instance_name not in self._config_data.instances: |
915 |
return None |
916 |
|
917 |
return self._config_data.instances[instance_name] |
918 |
|
919 |
@locking.ssynchronized(_config_lock, shared=1) |
920 |
def GetInstanceInfo(self, instance_name): |
921 |
"""Returns information about an instance.
|
922 |
|
923 |
It takes the information from the configuration file. Other information of
|
924 |
an instance are taken from the live systems.
|
925 |
|
926 |
@param instance_name: name of the instance, e.g.
|
927 |
I{instance1.example.com}
|
928 |
|
929 |
@rtype: L{objects.Instance}
|
930 |
@return: the instance object
|
931 |
|
932 |
"""
|
933 |
return self._UnlockedGetInstanceInfo(instance_name) |
934 |
|
935 |
@locking.ssynchronized(_config_lock, shared=1) |
936 |
def GetAllInstancesInfo(self): |
937 |
"""Get the configuration of all instances.
|
938 |
|
939 |
@rtype: dict
|
940 |
@return: dict of (instance, instance_info), where instance_info is what
|
941 |
would GetInstanceInfo return for the node
|
942 |
|
943 |
"""
|
944 |
my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance)) |
945 |
for instance in self._UnlockedGetInstanceList()]) |
946 |
return my_dict
|
947 |
|
948 |
@locking.ssynchronized(_config_lock)
|
949 |
def AddNode(self, node, ec_id): |
950 |
"""Add a node to the configuration.
|
951 |
|
952 |
@type node: L{objects.Node}
|
953 |
@param node: a Node instance
|
954 |
|
955 |
"""
|
956 |
logging.info("Adding node %s to configuration", node.name)
|
957 |
|
958 |
self._EnsureUUID(node, ec_id)
|
959 |
|
960 |
node.serial_no = 1
|
961 |
node.ctime = node.mtime = time.time() |
962 |
self._config_data.nodes[node.name] = node
|
963 |
self._config_data.cluster.serial_no += 1 |
964 |
self._WriteConfig()
|
965 |
|
966 |
@locking.ssynchronized(_config_lock)
|
967 |
def RemoveNode(self, node_name): |
968 |
"""Remove a node from the configuration.
|
969 |
|
970 |
"""
|
971 |
logging.info("Removing node %s from configuration", node_name)
|
972 |
|
973 |
if node_name not in self._config_data.nodes: |
974 |
raise errors.ConfigurationError("Unknown node '%s'" % node_name) |
975 |
|
976 |
del self._config_data.nodes[node_name] |
977 |
self._config_data.cluster.serial_no += 1 |
978 |
self._WriteConfig()
|
979 |
|
980 |
@locking.ssynchronized(_config_lock, shared=1) |
981 |
def ExpandNodeName(self, short_name): |
982 |
"""Attempt to expand an incomplete instance name.
|
983 |
|
984 |
"""
|
985 |
return utils.MatchNameComponent(short_name,
|
986 |
self._config_data.nodes.keys(),
|
987 |
case_sensitive=False)
|
988 |
|
989 |
def _UnlockedGetNodeInfo(self, node_name): |
990 |
"""Get the configuration of a node, as stored in the config.
|
991 |
|
992 |
This function is for internal use, when the config lock is already
|
993 |
held.
|
994 |
|
995 |
@param node_name: the node name, e.g. I{node1.example.com}
|
996 |
|
997 |
@rtype: L{objects.Node}
|
998 |
@return: the node object
|
999 |
|
1000 |
"""
|
1001 |
if node_name not in self._config_data.nodes: |
1002 |
return None |
1003 |
|
1004 |
return self._config_data.nodes[node_name] |
1005 |
|
1006 |
@locking.ssynchronized(_config_lock, shared=1) |
1007 |
def GetNodeInfo(self, node_name): |
1008 |
"""Get the configuration of a node, as stored in the config.
|
1009 |
|
1010 |
This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
|
1011 |
|
1012 |
@param node_name: the node name, e.g. I{node1.example.com}
|
1013 |
|
1014 |
@rtype: L{objects.Node}
|
1015 |
@return: the node object
|
1016 |
|
1017 |
"""
|
1018 |
return self._UnlockedGetNodeInfo(node_name) |
1019 |
|
1020 |
def _UnlockedGetNodeList(self): |
1021 |
"""Return the list of nodes which are in the configuration.
|
1022 |
|
1023 |
This function is for internal use, when the config lock is already
|
1024 |
held.
|
1025 |
|
1026 |
@rtype: list
|
1027 |
|
1028 |
"""
|
1029 |
return self._config_data.nodes.keys() |
1030 |
|
1031 |
@locking.ssynchronized(_config_lock, shared=1) |
1032 |
def GetNodeList(self): |
1033 |
"""Return the list of nodes which are in the configuration.
|
1034 |
|
1035 |
"""
|
1036 |
return self._UnlockedGetNodeList() |
1037 |
|
1038 |
@locking.ssynchronized(_config_lock, shared=1) |
1039 |
def GetOnlineNodeList(self): |
1040 |
"""Return the list of nodes which are online.
|
1041 |
|
1042 |
"""
|
1043 |
all_nodes = [self._UnlockedGetNodeInfo(node)
|
1044 |
for node in self._UnlockedGetNodeList()] |
1045 |
return [node.name for node in all_nodes if not node.offline] |
1046 |
|
1047 |
@locking.ssynchronized(_config_lock, shared=1) |
1048 |
def GetAllNodesInfo(self): |
1049 |
"""Get the configuration of all nodes.
|
1050 |
|
1051 |
@rtype: dict
|
1052 |
@return: dict of (node, node_info), where node_info is what
|
1053 |
would GetNodeInfo return for the node
|
1054 |
|
1055 |
"""
|
1056 |
my_dict = dict([(node, self._UnlockedGetNodeInfo(node)) |
1057 |
for node in self._UnlockedGetNodeList()]) |
1058 |
return my_dict
|
1059 |
|
1060 |
def _UnlockedGetMasterCandidateStats(self, exceptions=None): |
1061 |
"""Get the number of current and maximum desired and possible candidates.
|
1062 |
|
1063 |
@type exceptions: list
|
1064 |
@param exceptions: if passed, list of nodes that should be ignored
|
1065 |
@rtype: tuple
|
1066 |
@return: tuple of (current, desired and possible, possible)
|
1067 |
|
1068 |
"""
|
1069 |
mc_now = mc_should = mc_max = 0
|
1070 |
for node in self._config_data.nodes.values(): |
1071 |
if exceptions and node.name in exceptions: |
1072 |
continue
|
1073 |
if not (node.offline or node.drained): |
1074 |
mc_max += 1
|
1075 |
if node.master_candidate:
|
1076 |
mc_now += 1
|
1077 |
mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size) |
1078 |
return (mc_now, mc_should, mc_max)
|
1079 |
|
1080 |
@locking.ssynchronized(_config_lock, shared=1) |
1081 |
def GetMasterCandidateStats(self, exceptions=None): |
1082 |
"""Get the number of current and maximum possible candidates.
|
1083 |
|
1084 |
This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
|
1085 |
|
1086 |
@type exceptions: list
|
1087 |
@param exceptions: if passed, list of nodes that should be ignored
|
1088 |
@rtype: tuple
|
1089 |
@return: tuple of (current, max)
|
1090 |
|
1091 |
"""
|
1092 |
return self._UnlockedGetMasterCandidateStats(exceptions) |
1093 |
|
1094 |
@locking.ssynchronized(_config_lock)
|
1095 |
def MaintainCandidatePool(self, exceptions): |
1096 |
"""Try to grow the candidate pool to the desired size.
|
1097 |
|
1098 |
@type exceptions: list
|
1099 |
@param exceptions: if passed, list of nodes that should be ignored
|
1100 |
@rtype: list
|
1101 |
@return: list with the adjusted nodes (L{objects.Node} instances)
|
1102 |
|
1103 |
"""
|
1104 |
mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
|
1105 |
mod_list = [] |
1106 |
if mc_now < mc_max:
|
1107 |
node_list = self._config_data.nodes.keys()
|
1108 |
random.shuffle(node_list) |
1109 |
for name in node_list: |
1110 |
if mc_now >= mc_max:
|
1111 |
break
|
1112 |
node = self._config_data.nodes[name]
|
1113 |
if (node.master_candidate or node.offline or node.drained or |
1114 |
node.name in exceptions):
|
1115 |
continue
|
1116 |
mod_list.append(node) |
1117 |
node.master_candidate = True
|
1118 |
node.serial_no += 1
|
1119 |
mc_now += 1
|
1120 |
if mc_now != mc_max:
|
1121 |
# this should not happen
|
1122 |
logging.warning("Warning: MaintainCandidatePool didn't manage to"
|
1123 |
" fill the candidate pool (%d/%d)", mc_now, mc_max)
|
1124 |
if mod_list:
|
1125 |
self._config_data.cluster.serial_no += 1 |
1126 |
self._WriteConfig()
|
1127 |
|
1128 |
return mod_list
|
1129 |
|
1130 |
def _BumpSerialNo(self): |
1131 |
"""Bump up the serial number of the config.
|
1132 |
|
1133 |
"""
|
1134 |
self._config_data.serial_no += 1 |
1135 |
self._config_data.mtime = time.time()
|
1136 |
|
1137 |
def _AllUUIDObjects(self): |
1138 |
"""Returns all objects with uuid attributes.
|
1139 |
|
1140 |
"""
|
1141 |
return (self._config_data.instances.values() + |
1142 |
self._config_data.nodes.values() +
|
1143 |
[self._config_data.cluster])
|
1144 |
|
1145 |
def _OpenConfig(self): |
1146 |
"""Read the config data from disk.
|
1147 |
|
1148 |
"""
|
1149 |
raw_data = utils.ReadFile(self._cfg_file)
|
1150 |
|
1151 |
try:
|
1152 |
data = objects.ConfigData.FromDict(serializer.Load(raw_data)) |
1153 |
except Exception, err: |
1154 |
raise errors.ConfigurationError(err)
|
1155 |
|
1156 |
# Make sure the configuration has the right version
|
1157 |
_ValidateConfig(data) |
1158 |
|
1159 |
if (not hasattr(data, 'cluster') or |
1160 |
not hasattr(data.cluster, 'rsahostkeypub')): |
1161 |
raise errors.ConfigurationError("Incomplete configuration" |
1162 |
" (missing cluster.rsahostkeypub)")
|
1163 |
|
1164 |
# Upgrade configuration if needed
|
1165 |
data.UpgradeConfig() |
1166 |
|
1167 |
self._config_data = data
|
1168 |
# reset the last serial as -1 so that the next write will cause
|
1169 |
# ssconf update
|
1170 |
self._last_cluster_serial = -1 |
1171 |
|
1172 |
# And finally run our (custom) config upgrade sequence
|
1173 |
self._UpgradeConfig()
|
1174 |
|
1175 |
def _UpgradeConfig(self): |
1176 |
"""Run upgrade steps that cannot be done purely in the objects.
|
1177 |
|
1178 |
This is because some data elements need uniqueness across the
|
1179 |
whole configuration, etc.
|
1180 |
|
1181 |
@warning: this function will call L{_WriteConfig()}, so it needs
|
1182 |
to either be called with the lock held or from a safe place
|
1183 |
(the constructor)
|
1184 |
|
1185 |
"""
|
1186 |
modified = False
|
1187 |
for item in self._AllUUIDObjects(): |
1188 |
if item.uuid is None: |
1189 |
item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
|
1190 |
modified = True
|
1191 |
if modified:
|
1192 |
self._WriteConfig()
|
1193 |
# This is ok even if it acquires the internal lock, as _UpgradeConfig is
|
1194 |
# only called at config init time, without the lock held
|
1195 |
self.DropECReservations(_UPGRADE_CONFIG_JID)
|
1196 |
|
1197 |
def _DistributeConfig(self, feedback_fn): |
1198 |
"""Distribute the configuration to the other nodes.
|
1199 |
|
1200 |
Currently, this only copies the configuration file. In the future,
|
1201 |
it could be used to encapsulate the 2/3-phase update mechanism.
|
1202 |
|
1203 |
"""
|
1204 |
if self._offline: |
1205 |
return True |
1206 |
|
1207 |
bad = False
|
1208 |
|
1209 |
node_list = [] |
1210 |
addr_list = [] |
1211 |
myhostname = self._my_hostname
|
1212 |
# we can skip checking whether _UnlockedGetNodeInfo returns None
|
1213 |
# since the node list comes from _UnlocketGetNodeList, and we are
|
1214 |
# called with the lock held, so no modifications should take place
|
1215 |
# in between
|
1216 |
for node_name in self._UnlockedGetNodeList(): |
1217 |
if node_name == myhostname:
|
1218 |
continue
|
1219 |
node_info = self._UnlockedGetNodeInfo(node_name)
|
1220 |
if not node_info.master_candidate: |
1221 |
continue
|
1222 |
node_list.append(node_info.name) |
1223 |
addr_list.append(node_info.primary_ip) |
1224 |
|
1225 |
result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
|
1226 |
address_list=addr_list) |
1227 |
for to_node, to_result in result.items(): |
1228 |
msg = to_result.fail_msg |
1229 |
if msg:
|
1230 |
msg = ("Copy of file %s to node %s failed: %s" %
|
1231 |
(self._cfg_file, to_node, msg))
|
1232 |
logging.error(msg) |
1233 |
|
1234 |
if feedback_fn:
|
1235 |
feedback_fn(msg) |
1236 |
|
1237 |
bad = True
|
1238 |
|
1239 |
return not bad |
1240 |
|
1241 |
def _WriteConfig(self, destination=None, feedback_fn=None): |
1242 |
"""Write the configuration data to persistent storage.
|
1243 |
|
1244 |
"""
|
1245 |
assert feedback_fn is None or callable(feedback_fn) |
1246 |
|
1247 |
# Warn on config errors, but don't abort the save - the
|
1248 |
# configuration has already been modified, and we can't revert;
|
1249 |
# the best we can do is to warn the user and save as is, leaving
|
1250 |
# recovery to the user
|
1251 |
config_errors = self._UnlockedVerifyConfig()
|
1252 |
if config_errors:
|
1253 |
errmsg = ("Configuration data is not consistent: %s" %
|
1254 |
(", ".join(config_errors)))
|
1255 |
logging.critical(errmsg) |
1256 |
if feedback_fn:
|
1257 |
feedback_fn(errmsg) |
1258 |
|
1259 |
if destination is None: |
1260 |
destination = self._cfg_file
|
1261 |
self._BumpSerialNo()
|
1262 |
txt = serializer.Dump(self._config_data.ToDict())
|
1263 |
|
1264 |
utils.WriteFile(destination, data=txt) |
1265 |
|
1266 |
self.write_count += 1 |
1267 |
|
1268 |
# and redistribute the config file to master candidates
|
1269 |
self._DistributeConfig(feedback_fn)
|
1270 |
|
1271 |
# Write ssconf files on all nodes (including locally)
|
1272 |
if self._last_cluster_serial < self._config_data.cluster.serial_no: |
1273 |
if not self._offline: |
1274 |
result = rpc.RpcRunner.call_write_ssconf_files( |
1275 |
self._UnlockedGetNodeList(),
|
1276 |
self._UnlockedGetSsconfValues())
|
1277 |
|
1278 |
for nname, nresu in result.items(): |
1279 |
msg = nresu.fail_msg |
1280 |
if msg:
|
1281 |
errmsg = ("Error while uploading ssconf files to"
|
1282 |
" node %s: %s" % (nname, msg))
|
1283 |
logging.warning(errmsg) |
1284 |
|
1285 |
if feedback_fn:
|
1286 |
feedback_fn(errmsg) |
1287 |
|
1288 |
self._last_cluster_serial = self._config_data.cluster.serial_no |
1289 |
|
1290 |
def _UnlockedGetSsconfValues(self): |
1291 |
"""Return the values needed by ssconf.
|
1292 |
|
1293 |
@rtype: dict
|
1294 |
@return: a dictionary with keys the ssconf names and values their
|
1295 |
associated value
|
1296 |
|
1297 |
"""
|
1298 |
fn = "\n".join
|
1299 |
instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
|
1300 |
node_names = utils.NiceSort(self._UnlockedGetNodeList())
|
1301 |
node_info = [self._UnlockedGetNodeInfo(name) for name in node_names] |
1302 |
node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
|
1303 |
for ninfo in node_info] |
1304 |
node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
|
1305 |
for ninfo in node_info] |
1306 |
|
1307 |
instance_data = fn(instance_names) |
1308 |
off_data = fn(node.name for node in node_info if node.offline) |
1309 |
on_data = fn(node.name for node in node_info if not node.offline) |
1310 |
mc_data = fn(node.name for node in node_info if node.master_candidate) |
1311 |
mc_ips_data = fn(node.primary_ip for node in node_info |
1312 |
if node.master_candidate)
|
1313 |
node_data = fn(node_names) |
1314 |
node_pri_ips_data = fn(node_pri_ips) |
1315 |
node_snd_ips_data = fn(node_snd_ips) |
1316 |
|
1317 |
cluster = self._config_data.cluster
|
1318 |
cluster_tags = fn(cluster.GetTags()) |
1319 |
return {
|
1320 |
constants.SS_CLUSTER_NAME: cluster.cluster_name, |
1321 |
constants.SS_CLUSTER_TAGS: cluster_tags, |
1322 |
constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, |
1323 |
constants.SS_MASTER_CANDIDATES: mc_data, |
1324 |
constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, |
1325 |
constants.SS_MASTER_IP: cluster.master_ip, |
1326 |
constants.SS_MASTER_NETDEV: cluster.master_netdev, |
1327 |
constants.SS_MASTER_NODE: cluster.master_node, |
1328 |
constants.SS_NODE_LIST: node_data, |
1329 |
constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, |
1330 |
constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, |
1331 |
constants.SS_OFFLINE_NODES: off_data, |
1332 |
constants.SS_ONLINE_NODES: on_data, |
1333 |
constants.SS_INSTANCE_LIST: instance_data, |
1334 |
constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, |
1335 |
} |
1336 |
|
1337 |
@locking.ssynchronized(_config_lock, shared=1) |
1338 |
def GetVGName(self): |
1339 |
"""Return the volume group name.
|
1340 |
|
1341 |
"""
|
1342 |
return self._config_data.cluster.volume_group_name |
1343 |
|
1344 |
@locking.ssynchronized(_config_lock)
|
1345 |
def SetVGName(self, vg_name): |
1346 |
"""Set the volume group name.
|
1347 |
|
1348 |
"""
|
1349 |
self._config_data.cluster.volume_group_name = vg_name
|
1350 |
self._config_data.cluster.serial_no += 1 |
1351 |
self._WriteConfig()
|
1352 |
|
1353 |
@locking.ssynchronized(_config_lock, shared=1) |
1354 |
def GetMACPrefix(self): |
1355 |
"""Return the mac prefix.
|
1356 |
|
1357 |
"""
|
1358 |
return self._config_data.cluster.mac_prefix |
1359 |
|
1360 |
@locking.ssynchronized(_config_lock, shared=1) |
1361 |
def GetClusterInfo(self): |
1362 |
"""Returns information about the cluster
|
1363 |
|
1364 |
@rtype: L{objects.Cluster}
|
1365 |
@return: the cluster object
|
1366 |
|
1367 |
"""
|
1368 |
return self._config_data.cluster |
1369 |
|
1370 |
@locking.ssynchronized(_config_lock)
|
1371 |
def Update(self, target, feedback_fn): |
1372 |
"""Notify function to be called after updates.
|
1373 |
|
1374 |
This function must be called when an object (as returned by
|
1375 |
GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
|
1376 |
caller wants the modifications saved to the backing store. Note
|
1377 |
that all modified objects will be saved, but the target argument
|
1378 |
is the one the caller wants to ensure that it's saved.
|
1379 |
|
1380 |
@param target: an instance of either L{objects.Cluster},
|
1381 |
L{objects.Node} or L{objects.Instance} which is existing in
|
1382 |
the cluster
|
1383 |
@param feedback_fn: Callable feedback function
|
1384 |
|
1385 |
"""
|
1386 |
if self._config_data is None: |
1387 |
raise errors.ProgrammerError("Configuration file not read," |
1388 |
" cannot save.")
|
1389 |
update_serial = False
|
1390 |
if isinstance(target, objects.Cluster): |
1391 |
test = target == self._config_data.cluster
|
1392 |
elif isinstance(target, objects.Node): |
1393 |
test = target in self._config_data.nodes.values() |
1394 |
update_serial = True
|
1395 |
elif isinstance(target, objects.Instance): |
1396 |
test = target in self._config_data.instances.values() |
1397 |
else:
|
1398 |
raise errors.ProgrammerError("Invalid object type (%s) passed to" |
1399 |
" ConfigWriter.Update" % type(target)) |
1400 |
if not test: |
1401 |
raise errors.ConfigurationError("Configuration updated since object" |
1402 |
" has been read or unknown object")
|
1403 |
target.serial_no += 1
|
1404 |
target.mtime = now = time.time() |
1405 |
|
1406 |
if update_serial:
|
1407 |
# for node updates, we need to increase the cluster serial too
|
1408 |
self._config_data.cluster.serial_no += 1 |
1409 |
self._config_data.cluster.mtime = now
|
1410 |
|
1411 |
if isinstance(target, objects.Instance): |
1412 |
self._UnlockedReleaseDRBDMinors(target.name)
|
1413 |
|
1414 |
self._WriteConfig(feedback_fn=feedback_fn)
|
1415 |
|
1416 |
@locking.ssynchronized(_config_lock)
|
1417 |
def DropECReservations(self, ec_id): |
1418 |
"""Drop per-execution-context reservations
|
1419 |
|
1420 |
"""
|
1421 |
self._temporary_ids.DropECReservations(ec_id)
|
1422 |
self._temporary_macs.DropECReservations(ec_id)
|
1423 |
self._temporary_secrets.DropECReservations(ec_id)
|
1424 |
|