root / lib / backend.py @ 1db993d5
History | View | Annotate | Download (112 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Functions used by the node daemon
|
23 |
|
24 |
@var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
|
25 |
the L{UploadFile} function
|
26 |
@var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
|
27 |
in the L{_CleanDirectory} function
|
28 |
|
29 |
"""
|
30 |
|
31 |
# pylint: disable=E1103
|
32 |
|
33 |
# E1103: %s %r has no %r member (but some types could not be
|
34 |
# inferred), because the _TryOSFromDisk returns either (True, os_obj)
|
35 |
# or (False, "string") which confuses pylint
|
36 |
|
37 |
|
38 |
import os |
39 |
import os.path |
40 |
import shutil |
41 |
import time |
42 |
import stat |
43 |
import errno |
44 |
import re |
45 |
import random |
46 |
import logging |
47 |
import tempfile |
48 |
import zlib |
49 |
import base64 |
50 |
import signal |
51 |
|
52 |
from ganeti import errors |
53 |
from ganeti import utils |
54 |
from ganeti import ssh |
55 |
from ganeti import hypervisor |
56 |
from ganeti import constants |
57 |
from ganeti import bdev |
58 |
from ganeti import objects |
59 |
from ganeti import ssconf |
60 |
from ganeti import serializer |
61 |
from ganeti import netutils |
62 |
from ganeti import runtime |
63 |
from ganeti import mcpu |
64 |
from ganeti import compat |
65 |
|
66 |
|
67 |
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
|
68 |
_ALLOWED_CLEAN_DIRS = frozenset([
|
69 |
constants.DATA_DIR, |
70 |
constants.JOB_QUEUE_ARCHIVE_DIR, |
71 |
constants.QUEUE_DIR, |
72 |
constants.CRYPTO_KEYS_DIR, |
73 |
]) |
74 |
_MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60 |
75 |
_X509_KEY_FILE = "key"
|
76 |
_X509_CERT_FILE = "cert"
|
77 |
_IES_STATUS_FILE = "status"
|
78 |
_IES_PID_FILE = "pid"
|
79 |
_IES_CA_FILE = "ca"
|
80 |
|
81 |
#: Valid LVS output line regex
|
82 |
_LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6})\|?$")
|
83 |
|
84 |
# Actions for the master setup script
|
85 |
_MASTER_START = "start"
|
86 |
_MASTER_STOP = "stop"
|
87 |
|
88 |
|
89 |
class RPCFail(Exception): |
90 |
"""Class denoting RPC failure.
|
91 |
|
92 |
Its argument is the error message.
|
93 |
|
94 |
"""
|
95 |
|
96 |
|
97 |
def _Fail(msg, *args, **kwargs): |
98 |
"""Log an error and the raise an RPCFail exception.
|
99 |
|
100 |
This exception is then handled specially in the ganeti daemon and
|
101 |
turned into a 'failed' return type. As such, this function is a
|
102 |
useful shortcut for logging the error and returning it to the master
|
103 |
daemon.
|
104 |
|
105 |
@type msg: string
|
106 |
@param msg: the text of the exception
|
107 |
@raise RPCFail
|
108 |
|
109 |
"""
|
110 |
if args:
|
111 |
msg = msg % args |
112 |
if "log" not in kwargs or kwargs["log"]: # if we should log this error |
113 |
if "exc" in kwargs and kwargs["exc"]: |
114 |
logging.exception(msg) |
115 |
else:
|
116 |
logging.error(msg) |
117 |
raise RPCFail(msg)
|
118 |
|
119 |
|
120 |
def _GetConfig(): |
121 |
"""Simple wrapper to return a SimpleStore.
|
122 |
|
123 |
@rtype: L{ssconf.SimpleStore}
|
124 |
@return: a SimpleStore instance
|
125 |
|
126 |
"""
|
127 |
return ssconf.SimpleStore()
|
128 |
|
129 |
|
130 |
def _GetSshRunner(cluster_name): |
131 |
"""Simple wrapper to return an SshRunner.
|
132 |
|
133 |
@type cluster_name: str
|
134 |
@param cluster_name: the cluster name, which is needed
|
135 |
by the SshRunner constructor
|
136 |
@rtype: L{ssh.SshRunner}
|
137 |
@return: an SshRunner instance
|
138 |
|
139 |
"""
|
140 |
return ssh.SshRunner(cluster_name)
|
141 |
|
142 |
|
143 |
def _Decompress(data): |
144 |
"""Unpacks data compressed by the RPC client.
|
145 |
|
146 |
@type data: list or tuple
|
147 |
@param data: Data sent by RPC client
|
148 |
@rtype: str
|
149 |
@return: Decompressed data
|
150 |
|
151 |
"""
|
152 |
assert isinstance(data, (list, tuple)) |
153 |
assert len(data) == 2 |
154 |
(encoding, content) = data |
155 |
if encoding == constants.RPC_ENCODING_NONE:
|
156 |
return content
|
157 |
elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
|
158 |
return zlib.decompress(base64.b64decode(content))
|
159 |
else:
|
160 |
raise AssertionError("Unknown data encoding") |
161 |
|
162 |
|
163 |
def _CleanDirectory(path, exclude=None): |
164 |
"""Removes all regular files in a directory.
|
165 |
|
166 |
@type path: str
|
167 |
@param path: the directory to clean
|
168 |
@type exclude: list
|
169 |
@param exclude: list of files to be excluded, defaults
|
170 |
to the empty list
|
171 |
|
172 |
"""
|
173 |
if path not in _ALLOWED_CLEAN_DIRS: |
174 |
_Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
|
175 |
path) |
176 |
|
177 |
if not os.path.isdir(path): |
178 |
return
|
179 |
if exclude is None: |
180 |
exclude = [] |
181 |
else:
|
182 |
# Normalize excluded paths
|
183 |
exclude = [os.path.normpath(i) for i in exclude] |
184 |
|
185 |
for rel_name in utils.ListVisibleFiles(path): |
186 |
full_name = utils.PathJoin(path, rel_name) |
187 |
if full_name in exclude: |
188 |
continue
|
189 |
if os.path.isfile(full_name) and not os.path.islink(full_name): |
190 |
utils.RemoveFile(full_name) |
191 |
|
192 |
|
193 |
def _BuildUploadFileList(): |
194 |
"""Build the list of allowed upload files.
|
195 |
|
196 |
This is abstracted so that it's built only once at module import time.
|
197 |
|
198 |
"""
|
199 |
allowed_files = set([
|
200 |
constants.CLUSTER_CONF_FILE, |
201 |
constants.ETC_HOSTS, |
202 |
constants.SSH_KNOWN_HOSTS_FILE, |
203 |
constants.VNC_PASSWORD_FILE, |
204 |
constants.RAPI_CERT_FILE, |
205 |
constants.SPICE_CERT_FILE, |
206 |
constants.SPICE_CACERT_FILE, |
207 |
constants.RAPI_USERS_FILE, |
208 |
constants.CONFD_HMAC_KEY, |
209 |
constants.CLUSTER_DOMAIN_SECRET_FILE, |
210 |
]) |
211 |
|
212 |
for hv_name in constants.HYPER_TYPES: |
213 |
hv_class = hypervisor.GetHypervisorClass(hv_name) |
214 |
allowed_files.update(hv_class.GetAncillaryFiles()[0])
|
215 |
|
216 |
return frozenset(allowed_files) |
217 |
|
218 |
|
219 |
_ALLOWED_UPLOAD_FILES = _BuildUploadFileList() |
220 |
|
221 |
|
222 |
def JobQueuePurge(): |
223 |
"""Removes job queue files and archived jobs.
|
224 |
|
225 |
@rtype: tuple
|
226 |
@return: True, None
|
227 |
|
228 |
"""
|
229 |
_CleanDirectory(constants.QUEUE_DIR, exclude=[constants.JOB_QUEUE_LOCK_FILE]) |
230 |
_CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR) |
231 |
|
232 |
|
233 |
def GetMasterInfo(): |
234 |
"""Returns master information.
|
235 |
|
236 |
This is an utility function to compute master information, either
|
237 |
for consumption here or from the node daemon.
|
238 |
|
239 |
@rtype: tuple
|
240 |
@return: master_netdev, master_ip, master_name, primary_ip_family,
|
241 |
master_netmask
|
242 |
@raise RPCFail: in case of errors
|
243 |
|
244 |
"""
|
245 |
try:
|
246 |
cfg = _GetConfig() |
247 |
master_netdev = cfg.GetMasterNetdev() |
248 |
master_ip = cfg.GetMasterIP() |
249 |
master_netmask = cfg.GetMasterNetmask() |
250 |
master_node = cfg.GetMasterNode() |
251 |
primary_ip_family = cfg.GetPrimaryIPFamily() |
252 |
except errors.ConfigurationError, err:
|
253 |
_Fail("Cluster configuration incomplete: %s", err, exc=True) |
254 |
return (master_netdev, master_ip, master_node, primary_ip_family,
|
255 |
master_netmask) |
256 |
|
257 |
|
258 |
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn): |
259 |
"""Decorator that runs hooks before and after the decorated function.
|
260 |
|
261 |
@type hook_opcode: string
|
262 |
@param hook_opcode: opcode of the hook
|
263 |
@type hooks_path: string
|
264 |
@param hooks_path: path of the hooks
|
265 |
@type env_builder_fn: function
|
266 |
@param env_builder_fn: function that returns a dictionary containing the
|
267 |
environment variables for the hooks. Will get all the parameters of the
|
268 |
decorated function.
|
269 |
@raise RPCFail: in case of pre-hook failure
|
270 |
|
271 |
"""
|
272 |
def decorator(fn): |
273 |
def wrapper(*args, **kwargs): |
274 |
_, myself = ssconf.GetMasterAndMyself() |
275 |
nodes = ([myself], [myself]) # these hooks run locally
|
276 |
|
277 |
env_fn = compat.partial(env_builder_fn, *args, **kwargs) |
278 |
|
279 |
cfg = _GetConfig() |
280 |
hr = HooksRunner() |
281 |
hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks, |
282 |
None, env_fn, logging.warning, cfg.GetClusterName(),
|
283 |
cfg.GetMasterNode()) |
284 |
|
285 |
hm.RunPhase(constants.HOOKS_PHASE_PRE) |
286 |
result = fn(*args, **kwargs) |
287 |
hm.RunPhase(constants.HOOKS_PHASE_POST) |
288 |
|
289 |
return result
|
290 |
return wrapper
|
291 |
return decorator
|
292 |
|
293 |
|
294 |
def _BuildMasterIpEnv(master_params, use_external_mip_script=None): |
295 |
"""Builds environment variables for master IP hooks.
|
296 |
|
297 |
@type master_params: L{objects.MasterNetworkParameters}
|
298 |
@param master_params: network parameters of the master
|
299 |
@type use_external_mip_script: boolean
|
300 |
@param use_external_mip_script: whether to use an external master IP
|
301 |
address setup script (unused, but necessary per the implementation of the
|
302 |
_RunLocalHooks decorator)
|
303 |
|
304 |
"""
|
305 |
# pylint: disable=W0613
|
306 |
ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family) |
307 |
env = { |
308 |
"MASTER_NETDEV": master_params.netdev,
|
309 |
"MASTER_IP": master_params.ip,
|
310 |
"MASTER_NETMASK": str(master_params.netmask), |
311 |
"CLUSTER_IP_VERSION": str(ver), |
312 |
} |
313 |
|
314 |
return env
|
315 |
|
316 |
|
317 |
def _RunMasterSetupScript(master_params, action, use_external_mip_script): |
318 |
"""Execute the master IP address setup script.
|
319 |
|
320 |
@type master_params: L{objects.MasterNetworkParameters}
|
321 |
@param master_params: network parameters of the master
|
322 |
@type action: string
|
323 |
@param action: action to pass to the script. Must be one of
|
324 |
L{backend._MASTER_START} or L{backend._MASTER_STOP}
|
325 |
@type use_external_mip_script: boolean
|
326 |
@param use_external_mip_script: whether to use an external master IP
|
327 |
address setup script
|
328 |
@raise backend.RPCFail: if there are errors during the execution of the
|
329 |
script
|
330 |
|
331 |
"""
|
332 |
env = _BuildMasterIpEnv(master_params) |
333 |
|
334 |
if use_external_mip_script:
|
335 |
setup_script = constants.EXTERNAL_MASTER_SETUP_SCRIPT |
336 |
else:
|
337 |
setup_script = constants.DEFAULT_MASTER_SETUP_SCRIPT |
338 |
|
339 |
result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
|
340 |
|
341 |
if result.failed:
|
342 |
_Fail("Failed to %s the master IP. Script return value: %s" %
|
343 |
(action, result.exit_code), log=True)
|
344 |
|
345 |
|
346 |
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup", |
347 |
_BuildMasterIpEnv) |
348 |
def ActivateMasterIp(master_params, use_external_mip_script): |
349 |
"""Activate the IP address of the master daemon.
|
350 |
|
351 |
@type master_params: L{objects.MasterNetworkParameters}
|
352 |
@param master_params: network parameters of the master
|
353 |
@type use_external_mip_script: boolean
|
354 |
@param use_external_mip_script: whether to use an external master IP
|
355 |
address setup script
|
356 |
@raise RPCFail: in case of errors during the IP startup
|
357 |
|
358 |
"""
|
359 |
_RunMasterSetupScript(master_params, _MASTER_START, |
360 |
use_external_mip_script) |
361 |
|
362 |
|
363 |
def StartMasterDaemons(no_voting): |
364 |
"""Activate local node as master node.
|
365 |
|
366 |
The function will start the master daemons (ganeti-masterd and ganeti-rapi).
|
367 |
|
368 |
@type no_voting: boolean
|
369 |
@param no_voting: whether to start ganeti-masterd without a node vote
|
370 |
but still non-interactively
|
371 |
@rtype: None
|
372 |
|
373 |
"""
|
374 |
|
375 |
if no_voting:
|
376 |
masterd_args = "--no-voting --yes-do-it"
|
377 |
else:
|
378 |
masterd_args = ""
|
379 |
|
380 |
env = { |
381 |
"EXTRA_MASTERD_ARGS": masterd_args,
|
382 |
} |
383 |
|
384 |
result = utils.RunCmd([constants.DAEMON_UTIL, "start-master"], env=env)
|
385 |
if result.failed:
|
386 |
msg = "Can't start Ganeti master: %s" % result.output
|
387 |
logging.error(msg) |
388 |
_Fail(msg) |
389 |
|
390 |
|
391 |
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown", |
392 |
_BuildMasterIpEnv) |
393 |
def DeactivateMasterIp(master_params, use_external_mip_script): |
394 |
"""Deactivate the master IP on this node.
|
395 |
|
396 |
@type master_params: L{objects.MasterNetworkParameters}
|
397 |
@param master_params: network parameters of the master
|
398 |
@type use_external_mip_script: boolean
|
399 |
@param use_external_mip_script: whether to use an external master IP
|
400 |
address setup script
|
401 |
@raise RPCFail: in case of errors during the IP turndown
|
402 |
|
403 |
"""
|
404 |
_RunMasterSetupScript(master_params, _MASTER_STOP, |
405 |
use_external_mip_script) |
406 |
|
407 |
|
408 |
def StopMasterDaemons(): |
409 |
"""Stop the master daemons on this node.
|
410 |
|
411 |
Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
|
412 |
|
413 |
@rtype: None
|
414 |
|
415 |
"""
|
416 |
# TODO: log and report back to the caller the error failures; we
|
417 |
# need to decide in which case we fail the RPC for this
|
418 |
|
419 |
result = utils.RunCmd([constants.DAEMON_UTIL, "stop-master"])
|
420 |
if result.failed:
|
421 |
logging.error("Could not stop Ganeti master, command %s had exitcode %s"
|
422 |
" and error %s",
|
423 |
result.cmd, result.exit_code, result.output) |
424 |
|
425 |
|
426 |
def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev): |
427 |
"""Change the netmask of the master IP.
|
428 |
|
429 |
@param old_netmask: the old value of the netmask
|
430 |
@param netmask: the new value of the netmask
|
431 |
@param master_ip: the master IP
|
432 |
@param master_netdev: the master network device
|
433 |
|
434 |
"""
|
435 |
if old_netmask == netmask:
|
436 |
return
|
437 |
|
438 |
if not netutils.IPAddress.Own(master_ip): |
439 |
_Fail("The master IP address is not up, not attempting to change its"
|
440 |
" netmask")
|
441 |
|
442 |
result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add", |
443 |
"%s/%s" % (master_ip, netmask),
|
444 |
"dev", master_netdev, "label", |
445 |
"%s:0" % master_netdev])
|
446 |
if result.failed:
|
447 |
_Fail("Could not set the new netmask on the master IP address")
|
448 |
|
449 |
result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del", |
450 |
"%s/%s" % (master_ip, old_netmask),
|
451 |
"dev", master_netdev, "label", |
452 |
"%s:0" % master_netdev])
|
453 |
if result.failed:
|
454 |
_Fail("Could not bring down the master IP address with the old netmask")
|
455 |
|
456 |
|
457 |
def EtcHostsModify(mode, host, ip): |
458 |
"""Modify a host entry in /etc/hosts.
|
459 |
|
460 |
@param mode: The mode to operate. Either add or remove entry
|
461 |
@param host: The host to operate on
|
462 |
@param ip: The ip associated with the entry
|
463 |
|
464 |
"""
|
465 |
if mode == constants.ETC_HOSTS_ADD:
|
466 |
if not ip: |
467 |
RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
|
468 |
" present")
|
469 |
utils.AddHostToEtcHosts(host, ip) |
470 |
elif mode == constants.ETC_HOSTS_REMOVE:
|
471 |
if ip:
|
472 |
RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
|
473 |
" parameter is present")
|
474 |
utils.RemoveHostFromEtcHosts(host) |
475 |
else:
|
476 |
RPCFail("Mode not supported")
|
477 |
|
478 |
|
479 |
def LeaveCluster(modify_ssh_setup): |
480 |
"""Cleans up and remove the current node.
|
481 |
|
482 |
This function cleans up and prepares the current node to be removed
|
483 |
from the cluster.
|
484 |
|
485 |
If processing is successful, then it raises an
|
486 |
L{errors.QuitGanetiException} which is used as a special case to
|
487 |
shutdown the node daemon.
|
488 |
|
489 |
@param modify_ssh_setup: boolean
|
490 |
|
491 |
"""
|
492 |
_CleanDirectory(constants.DATA_DIR) |
493 |
_CleanDirectory(constants.CRYPTO_KEYS_DIR) |
494 |
JobQueuePurge() |
495 |
|
496 |
if modify_ssh_setup:
|
497 |
try:
|
498 |
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) |
499 |
|
500 |
utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key)) |
501 |
|
502 |
utils.RemoveFile(priv_key) |
503 |
utils.RemoveFile(pub_key) |
504 |
except errors.OpExecError:
|
505 |
logging.exception("Error while processing ssh files")
|
506 |
|
507 |
try:
|
508 |
utils.RemoveFile(constants.CONFD_HMAC_KEY) |
509 |
utils.RemoveFile(constants.RAPI_CERT_FILE) |
510 |
utils.RemoveFile(constants.SPICE_CERT_FILE) |
511 |
utils.RemoveFile(constants.SPICE_CACERT_FILE) |
512 |
utils.RemoveFile(constants.NODED_CERT_FILE) |
513 |
except: # pylint: disable=W0702 |
514 |
logging.exception("Error while removing cluster secrets")
|
515 |
|
516 |
result = utils.RunCmd([constants.DAEMON_UTIL, "stop", constants.CONFD])
|
517 |
if result.failed:
|
518 |
logging.error("Command %s failed with exitcode %s and error %s",
|
519 |
result.cmd, result.exit_code, result.output) |
520 |
|
521 |
# Raise a custom exception (handled in ganeti-noded)
|
522 |
raise errors.QuitGanetiException(True, "Shutdown scheduled") |
523 |
|
524 |
|
525 |
def GetNodeInfo(vgname, hypervisor_type): |
526 |
"""Gives back a hash with different information about the node.
|
527 |
|
528 |
@type vgname: C{string}
|
529 |
@param vgname: the name of the volume group to ask for disk space information
|
530 |
@type hypervisor_type: C{str}
|
531 |
@param hypervisor_type: the name of the hypervisor to ask for
|
532 |
memory information
|
533 |
@rtype: C{dict}
|
534 |
@return: dictionary with the following keys:
|
535 |
- vg_size is the size of the configured volume group in MiB
|
536 |
- vg_free is the free size of the volume group in MiB
|
537 |
- memory_dom0 is the memory allocated for domain0 in MiB
|
538 |
- memory_free is the currently available (free) ram in MiB
|
539 |
- memory_total is the total number of ram in MiB
|
540 |
- hv_version: the hypervisor version, if available
|
541 |
|
542 |
"""
|
543 |
outputarray = {} |
544 |
|
545 |
if vgname is not None: |
546 |
vginfo = bdev.LogicalVolume.GetVGInfo([vgname]) |
547 |
vg_free = vg_size = None
|
548 |
if vginfo:
|
549 |
vg_free = int(round(vginfo[0][0], 0)) |
550 |
vg_size = int(round(vginfo[0][1], 0)) |
551 |
outputarray["vg_size"] = vg_size
|
552 |
outputarray["vg_free"] = vg_free
|
553 |
|
554 |
if hypervisor_type is not None: |
555 |
hyper = hypervisor.GetHypervisor(hypervisor_type) |
556 |
hyp_info = hyper.GetNodeInfo() |
557 |
if hyp_info is not None: |
558 |
outputarray.update(hyp_info) |
559 |
|
560 |
outputarray["bootid"] = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n") |
561 |
|
562 |
return outputarray
|
563 |
|
564 |
|
565 |
def VerifyNode(what, cluster_name): |
566 |
"""Verify the status of the local node.
|
567 |
|
568 |
Based on the input L{what} parameter, various checks are done on the
|
569 |
local node.
|
570 |
|
571 |
If the I{filelist} key is present, this list of
|
572 |
files is checksummed and the file/checksum pairs are returned.
|
573 |
|
574 |
If the I{nodelist} key is present, we check that we have
|
575 |
connectivity via ssh with the target nodes (and check the hostname
|
576 |
report).
|
577 |
|
578 |
If the I{node-net-test} key is present, we check that we have
|
579 |
connectivity to the given nodes via both primary IP and, if
|
580 |
applicable, secondary IPs.
|
581 |
|
582 |
@type what: C{dict}
|
583 |
@param what: a dictionary of things to check:
|
584 |
- filelist: list of files for which to compute checksums
|
585 |
- nodelist: list of nodes we should check ssh communication with
|
586 |
- node-net-test: list of nodes we should check node daemon port
|
587 |
connectivity with
|
588 |
- hypervisor: list with hypervisors to run the verify for
|
589 |
@rtype: dict
|
590 |
@return: a dictionary with the same keys as the input dict, and
|
591 |
values representing the result of the checks
|
592 |
|
593 |
"""
|
594 |
result = {} |
595 |
my_name = netutils.Hostname.GetSysName() |
596 |
port = netutils.GetDaemonPort(constants.NODED) |
597 |
vm_capable = my_name not in what.get(constants.NV_VMNODES, []) |
598 |
|
599 |
if constants.NV_HYPERVISOR in what and vm_capable: |
600 |
result[constants.NV_HYPERVISOR] = tmp = {} |
601 |
for hv_name in what[constants.NV_HYPERVISOR]: |
602 |
try:
|
603 |
val = hypervisor.GetHypervisor(hv_name).Verify() |
604 |
except errors.HypervisorError, err:
|
605 |
val = "Error while checking hypervisor: %s" % str(err) |
606 |
tmp[hv_name] = val |
607 |
|
608 |
if constants.NV_HVPARAMS in what and vm_capable: |
609 |
result[constants.NV_HVPARAMS] = tmp = [] |
610 |
for source, hv_name, hvparms in what[constants.NV_HVPARAMS]: |
611 |
try:
|
612 |
logging.info("Validating hv %s, %s", hv_name, hvparms)
|
613 |
hypervisor.GetHypervisor(hv_name).ValidateParameters(hvparms) |
614 |
except errors.HypervisorError, err:
|
615 |
tmp.append((source, hv_name, str(err)))
|
616 |
|
617 |
if constants.NV_FILELIST in what: |
618 |
result[constants.NV_FILELIST] = utils.FingerprintFiles( |
619 |
what[constants.NV_FILELIST]) |
620 |
|
621 |
if constants.NV_NODELIST in what: |
622 |
(nodes, bynode) = what[constants.NV_NODELIST] |
623 |
|
624 |
# Add nodes from other groups (different for each node)
|
625 |
try:
|
626 |
nodes.extend(bynode[my_name]) |
627 |
except KeyError: |
628 |
pass
|
629 |
|
630 |
# Use a random order
|
631 |
random.shuffle(nodes) |
632 |
|
633 |
# Try to contact all nodes
|
634 |
val = {} |
635 |
for node in nodes: |
636 |
success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node) |
637 |
if not success: |
638 |
val[node] = message |
639 |
|
640 |
result[constants.NV_NODELIST] = val |
641 |
|
642 |
if constants.NV_NODENETTEST in what: |
643 |
result[constants.NV_NODENETTEST] = tmp = {} |
644 |
my_pip = my_sip = None
|
645 |
for name, pip, sip in what[constants.NV_NODENETTEST]: |
646 |
if name == my_name:
|
647 |
my_pip = pip |
648 |
my_sip = sip |
649 |
break
|
650 |
if not my_pip: |
651 |
tmp[my_name] = ("Can't find my own primary/secondary IP"
|
652 |
" in the node list")
|
653 |
else:
|
654 |
for name, pip, sip in what[constants.NV_NODENETTEST]: |
655 |
fail = [] |
656 |
if not netutils.TcpPing(pip, port, source=my_pip): |
657 |
fail.append("primary")
|
658 |
if sip != pip:
|
659 |
if not netutils.TcpPing(sip, port, source=my_sip): |
660 |
fail.append("secondary")
|
661 |
if fail:
|
662 |
tmp[name] = ("failure using the %s interface(s)" %
|
663 |
" and ".join(fail))
|
664 |
|
665 |
if constants.NV_MASTERIP in what: |
666 |
# FIXME: add checks on incoming data structures (here and in the
|
667 |
# rest of the function)
|
668 |
master_name, master_ip = what[constants.NV_MASTERIP] |
669 |
if master_name == my_name:
|
670 |
source = constants.IP4_ADDRESS_LOCALHOST |
671 |
else:
|
672 |
source = None
|
673 |
result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port, |
674 |
source=source) |
675 |
|
676 |
if constants.NV_USERSCRIPTS in what: |
677 |
result[constants.NV_USERSCRIPTS] = \ |
678 |
[script for script in what[constants.NV_USERSCRIPTS] |
679 |
if not (os.path.exists(script) and os.access(script, os.X_OK))] |
680 |
|
681 |
if constants.NV_OOB_PATHS in what: |
682 |
result[constants.NV_OOB_PATHS] = tmp = [] |
683 |
for path in what[constants.NV_OOB_PATHS]: |
684 |
try:
|
685 |
st = os.stat(path) |
686 |
except OSError, err: |
687 |
tmp.append("error stating out of band helper: %s" % err)
|
688 |
else:
|
689 |
if stat.S_ISREG(st.st_mode):
|
690 |
if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
|
691 |
tmp.append(None)
|
692 |
else:
|
693 |
tmp.append("out of band helper %s is not executable" % path)
|
694 |
else:
|
695 |
tmp.append("out of band helper %s is not a file" % path)
|
696 |
|
697 |
if constants.NV_LVLIST in what and vm_capable: |
698 |
try:
|
699 |
val = GetVolumeList(utils.ListVolumeGroups().keys()) |
700 |
except RPCFail, err:
|
701 |
val = str(err)
|
702 |
result[constants.NV_LVLIST] = val |
703 |
|
704 |
if constants.NV_INSTANCELIST in what and vm_capable: |
705 |
# GetInstanceList can fail
|
706 |
try:
|
707 |
val = GetInstanceList(what[constants.NV_INSTANCELIST]) |
708 |
except RPCFail, err:
|
709 |
val = str(err)
|
710 |
result[constants.NV_INSTANCELIST] = val |
711 |
|
712 |
if constants.NV_VGLIST in what and vm_capable: |
713 |
result[constants.NV_VGLIST] = utils.ListVolumeGroups() |
714 |
|
715 |
if constants.NV_PVLIST in what and vm_capable: |
716 |
result[constants.NV_PVLIST] = \ |
717 |
bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST], |
718 |
filter_allocatable=False)
|
719 |
|
720 |
if constants.NV_VERSION in what: |
721 |
result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION, |
722 |
constants.RELEASE_VERSION) |
723 |
|
724 |
if constants.NV_HVINFO in what and vm_capable: |
725 |
hyper = hypervisor.GetHypervisor(what[constants.NV_HVINFO]) |
726 |
result[constants.NV_HVINFO] = hyper.GetNodeInfo() |
727 |
|
728 |
if constants.NV_DRBDLIST in what and vm_capable: |
729 |
try:
|
730 |
used_minors = bdev.DRBD8.GetUsedDevs().keys() |
731 |
except errors.BlockDeviceError, err:
|
732 |
logging.warning("Can't get used minors list", exc_info=True) |
733 |
used_minors = str(err)
|
734 |
result[constants.NV_DRBDLIST] = used_minors |
735 |
|
736 |
if constants.NV_DRBDHELPER in what and vm_capable: |
737 |
status = True
|
738 |
try:
|
739 |
payload = bdev.BaseDRBD.GetUsermodeHelper() |
740 |
except errors.BlockDeviceError, err:
|
741 |
logging.error("Can't get DRBD usermode helper: %s", str(err)) |
742 |
status = False
|
743 |
payload = str(err)
|
744 |
result[constants.NV_DRBDHELPER] = (status, payload) |
745 |
|
746 |
if constants.NV_NODESETUP in what: |
747 |
result[constants.NV_NODESETUP] = tmpr = [] |
748 |
if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"): |
749 |
tmpr.append("The sysfs filesytem doesn't seem to be mounted"
|
750 |
" under /sys, missing required directories /sys/block"
|
751 |
" and /sys/class/net")
|
752 |
if (not os.path.isdir("/proc/sys") or |
753 |
not os.path.isfile("/proc/sysrq-trigger")): |
754 |
tmpr.append("The procfs filesystem doesn't seem to be mounted"
|
755 |
" under /proc, missing required directory /proc/sys and"
|
756 |
" the file /proc/sysrq-trigger")
|
757 |
|
758 |
if constants.NV_TIME in what: |
759 |
result[constants.NV_TIME] = utils.SplitTime(time.time()) |
760 |
|
761 |
if constants.NV_OSLIST in what and vm_capable: |
762 |
result[constants.NV_OSLIST] = DiagnoseOS() |
763 |
|
764 |
if constants.NV_BRIDGES in what and vm_capable: |
765 |
result[constants.NV_BRIDGES] = [bridge |
766 |
for bridge in what[constants.NV_BRIDGES] |
767 |
if not utils.BridgeExists(bridge)] |
768 |
return result
|
769 |
|
770 |
|
771 |
def GetBlockDevSizes(devices): |
772 |
"""Return the size of the given block devices
|
773 |
|
774 |
@type devices: list
|
775 |
@param devices: list of block device nodes to query
|
776 |
@rtype: dict
|
777 |
@return:
|
778 |
dictionary of all block devices under /dev (key). The value is their
|
779 |
size in MiB.
|
780 |
|
781 |
{'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
|
782 |
|
783 |
"""
|
784 |
DEV_PREFIX = "/dev/"
|
785 |
blockdevs = {} |
786 |
|
787 |
for devpath in devices: |
788 |
if not utils.IsBelowDir(DEV_PREFIX, devpath): |
789 |
continue
|
790 |
|
791 |
try:
|
792 |
st = os.stat(devpath) |
793 |
except EnvironmentError, err: |
794 |
logging.warning("Error stat()'ing device %s: %s", devpath, str(err)) |
795 |
continue
|
796 |
|
797 |
if stat.S_ISBLK(st.st_mode):
|
798 |
result = utils.RunCmd(["blockdev", "--getsize64", devpath]) |
799 |
if result.failed:
|
800 |
# We don't want to fail, just do not list this device as available
|
801 |
logging.warning("Cannot get size for block device %s", devpath)
|
802 |
continue
|
803 |
|
804 |
size = int(result.stdout) / (1024 * 1024) |
805 |
blockdevs[devpath] = size |
806 |
return blockdevs
|
807 |
|
808 |
|
809 |
def GetVolumeList(vg_names): |
810 |
"""Compute list of logical volumes and their size.
|
811 |
|
812 |
@type vg_names: list
|
813 |
@param vg_names: the volume groups whose LVs we should list, or
|
814 |
empty for all volume groups
|
815 |
@rtype: dict
|
816 |
@return:
|
817 |
dictionary of all partions (key) with value being a tuple of
|
818 |
their size (in MiB), inactive and online status::
|
819 |
|
820 |
{'xenvg/test1': ('20.06', True, True)}
|
821 |
|
822 |
in case of errors, a string is returned with the error
|
823 |
details.
|
824 |
|
825 |
"""
|
826 |
lvs = {} |
827 |
sep = "|"
|
828 |
if not vg_names: |
829 |
vg_names = [] |
830 |
result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", |
831 |
"--separator=%s" % sep,
|
832 |
"-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
|
833 |
if result.failed:
|
834 |
_Fail("Failed to list logical volumes, lvs output: %s", result.output)
|
835 |
|
836 |
for line in result.stdout.splitlines(): |
837 |
line = line.strip() |
838 |
match = _LVSLINE_REGEX.match(line) |
839 |
if not match: |
840 |
logging.error("Invalid line returned from lvs output: '%s'", line)
|
841 |
continue
|
842 |
vg_name, name, size, attr = match.groups() |
843 |
inactive = attr[4] == "-" |
844 |
online = attr[5] == "o" |
845 |
virtual = attr[0] == "v" |
846 |
if virtual:
|
847 |
# we don't want to report such volumes as existing, since they
|
848 |
# don't really hold data
|
849 |
continue
|
850 |
lvs[vg_name + "/" + name] = (size, inactive, online)
|
851 |
|
852 |
return lvs
|
853 |
|
854 |
|
855 |
def ListVolumeGroups(): |
856 |
"""List the volume groups and their size.
|
857 |
|
858 |
@rtype: dict
|
859 |
@return: dictionary with keys volume name and values the
|
860 |
size of the volume
|
861 |
|
862 |
"""
|
863 |
return utils.ListVolumeGroups()
|
864 |
|
865 |
|
866 |
def NodeVolumes(): |
867 |
"""List all volumes on this node.
|
868 |
|
869 |
@rtype: list
|
870 |
@return:
|
871 |
A list of dictionaries, each having four keys:
|
872 |
- name: the logical volume name,
|
873 |
- size: the size of the logical volume
|
874 |
- dev: the physical device on which the LV lives
|
875 |
- vg: the volume group to which it belongs
|
876 |
|
877 |
In case of errors, we return an empty list and log the
|
878 |
error.
|
879 |
|
880 |
Note that since a logical volume can live on multiple physical
|
881 |
volumes, the resulting list might include a logical volume
|
882 |
multiple times.
|
883 |
|
884 |
"""
|
885 |
result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", |
886 |
"--separator=|",
|
887 |
"--options=lv_name,lv_size,devices,vg_name"])
|
888 |
if result.failed:
|
889 |
_Fail("Failed to list logical volumes, lvs output: %s",
|
890 |
result.output) |
891 |
|
892 |
def parse_dev(dev): |
893 |
return dev.split("(")[0] |
894 |
|
895 |
def handle_dev(dev): |
896 |
return [parse_dev(x) for x in dev.split(",")] |
897 |
|
898 |
def map_line(line): |
899 |
line = [v.strip() for v in line] |
900 |
return [{"name": line[0], "size": line[1], |
901 |
"dev": dev, "vg": line[3]} for dev in handle_dev(line[2])] |
902 |
|
903 |
all_devs = [] |
904 |
for line in result.stdout.splitlines(): |
905 |
if line.count("|") >= 3: |
906 |
all_devs.extend(map_line(line.split("|")))
|
907 |
else:
|
908 |
logging.warning("Strange line in the output from lvs: '%s'", line)
|
909 |
return all_devs
|
910 |
|
911 |
|
912 |
def BridgesExist(bridges_list): |
913 |
"""Check if a list of bridges exist on the current node.
|
914 |
|
915 |
@rtype: boolean
|
916 |
@return: C{True} if all of them exist, C{False} otherwise
|
917 |
|
918 |
"""
|
919 |
missing = [] |
920 |
for bridge in bridges_list: |
921 |
if not utils.BridgeExists(bridge): |
922 |
missing.append(bridge) |
923 |
|
924 |
if missing:
|
925 |
_Fail("Missing bridges %s", utils.CommaJoin(missing))
|
926 |
|
927 |
|
928 |
def GetInstanceList(hypervisor_list): |
929 |
"""Provides a list of instances.
|
930 |
|
931 |
@type hypervisor_list: list
|
932 |
@param hypervisor_list: the list of hypervisors to query information
|
933 |
|
934 |
@rtype: list
|
935 |
@return: a list of all running instances on the current node
|
936 |
- instance1.example.com
|
937 |
- instance2.example.com
|
938 |
|
939 |
"""
|
940 |
results = [] |
941 |
for hname in hypervisor_list: |
942 |
try:
|
943 |
names = hypervisor.GetHypervisor(hname).ListInstances() |
944 |
results.extend(names) |
945 |
except errors.HypervisorError, err:
|
946 |
_Fail("Error enumerating instances (hypervisor %s): %s",
|
947 |
hname, err, exc=True)
|
948 |
|
949 |
return results
|
950 |
|
951 |
|
952 |
def GetInstanceInfo(instance, hname): |
953 |
"""Gives back the information about an instance as a dictionary.
|
954 |
|
955 |
@type instance: string
|
956 |
@param instance: the instance name
|
957 |
@type hname: string
|
958 |
@param hname: the hypervisor type of the instance
|
959 |
|
960 |
@rtype: dict
|
961 |
@return: dictionary with the following keys:
|
962 |
- memory: memory size of instance (int)
|
963 |
- state: xen state of instance (string)
|
964 |
- time: cpu time of instance (float)
|
965 |
|
966 |
"""
|
967 |
output = {} |
968 |
|
969 |
iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance) |
970 |
if iinfo is not None: |
971 |
output["memory"] = iinfo[2] |
972 |
output["state"] = iinfo[4] |
973 |
output["time"] = iinfo[5] |
974 |
|
975 |
return output
|
976 |
|
977 |
|
978 |
def GetInstanceMigratable(instance): |
979 |
"""Gives whether an instance can be migrated.
|
980 |
|
981 |
@type instance: L{objects.Instance}
|
982 |
@param instance: object representing the instance to be checked.
|
983 |
|
984 |
@rtype: tuple
|
985 |
@return: tuple of (result, description) where:
|
986 |
- result: whether the instance can be migrated or not
|
987 |
- description: a description of the issue, if relevant
|
988 |
|
989 |
"""
|
990 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
991 |
iname = instance.name |
992 |
if iname not in hyper.ListInstances(): |
993 |
_Fail("Instance %s is not running", iname)
|
994 |
|
995 |
for idx in range(len(instance.disks)): |
996 |
link_name = _GetBlockDevSymlinkPath(iname, idx) |
997 |
if not os.path.islink(link_name): |
998 |
logging.warning("Instance %s is missing symlink %s for disk %d",
|
999 |
iname, link_name, idx) |
1000 |
|
1001 |
|
1002 |
def GetAllInstancesInfo(hypervisor_list): |
1003 |
"""Gather data about all instances.
|
1004 |
|
1005 |
This is the equivalent of L{GetInstanceInfo}, except that it
|
1006 |
computes data for all instances at once, thus being faster if one
|
1007 |
needs data about more than one instance.
|
1008 |
|
1009 |
@type hypervisor_list: list
|
1010 |
@param hypervisor_list: list of hypervisors to query for instance data
|
1011 |
|
1012 |
@rtype: dict
|
1013 |
@return: dictionary of instance: data, with data having the following keys:
|
1014 |
- memory: memory size of instance (int)
|
1015 |
- state: xen state of instance (string)
|
1016 |
- time: cpu time of instance (float)
|
1017 |
- vcpus: the number of vcpus
|
1018 |
|
1019 |
"""
|
1020 |
output = {} |
1021 |
|
1022 |
for hname in hypervisor_list: |
1023 |
iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo() |
1024 |
if iinfo:
|
1025 |
for name, _, memory, vcpus, state, times in iinfo: |
1026 |
value = { |
1027 |
"memory": memory,
|
1028 |
"vcpus": vcpus,
|
1029 |
"state": state,
|
1030 |
"time": times,
|
1031 |
} |
1032 |
if name in output: |
1033 |
# we only check static parameters, like memory and vcpus,
|
1034 |
# and not state and time which can change between the
|
1035 |
# invocations of the different hypervisors
|
1036 |
for key in "memory", "vcpus": |
1037 |
if value[key] != output[name][key]:
|
1038 |
_Fail("Instance %s is running twice"
|
1039 |
" with different parameters", name)
|
1040 |
output[name] = value |
1041 |
|
1042 |
return output
|
1043 |
|
1044 |
|
1045 |
def _InstanceLogName(kind, os_name, instance, component): |
1046 |
"""Compute the OS log filename for a given instance and operation.
|
1047 |
|
1048 |
The instance name and os name are passed in as strings since not all
|
1049 |
operations have these as part of an instance object.
|
1050 |
|
1051 |
@type kind: string
|
1052 |
@param kind: the operation type (e.g. add, import, etc.)
|
1053 |
@type os_name: string
|
1054 |
@param os_name: the os name
|
1055 |
@type instance: string
|
1056 |
@param instance: the name of the instance being imported/added/etc.
|
1057 |
@type component: string or None
|
1058 |
@param component: the name of the component of the instance being
|
1059 |
transferred
|
1060 |
|
1061 |
"""
|
1062 |
# TODO: Use tempfile.mkstemp to create unique filename
|
1063 |
if component:
|
1064 |
assert "/" not in component |
1065 |
c_msg = "-%s" % component
|
1066 |
else:
|
1067 |
c_msg = ""
|
1068 |
base = ("%s-%s-%s%s-%s.log" %
|
1069 |
(kind, os_name, instance, c_msg, utils.TimestampForFilename())) |
1070 |
return utils.PathJoin(constants.LOG_OS_DIR, base)
|
1071 |
|
1072 |
|
1073 |
def InstanceOsAdd(instance, reinstall, debug): |
1074 |
"""Add an OS to an instance.
|
1075 |
|
1076 |
@type instance: L{objects.Instance}
|
1077 |
@param instance: Instance whose OS is to be installed
|
1078 |
@type reinstall: boolean
|
1079 |
@param reinstall: whether this is an instance reinstall
|
1080 |
@type debug: integer
|
1081 |
@param debug: debug level, passed to the OS scripts
|
1082 |
@rtype: None
|
1083 |
|
1084 |
"""
|
1085 |
inst_os = OSFromDisk(instance.os) |
1086 |
|
1087 |
create_env = OSEnvironment(instance, inst_os, debug) |
1088 |
if reinstall:
|
1089 |
create_env["INSTANCE_REINSTALL"] = "1" |
1090 |
|
1091 |
logfile = _InstanceLogName("add", instance.os, instance.name, None) |
1092 |
|
1093 |
result = utils.RunCmd([inst_os.create_script], env=create_env, |
1094 |
cwd=inst_os.path, output=logfile, reset_env=True)
|
1095 |
if result.failed:
|
1096 |
logging.error("os create command '%s' returned error: %s, logfile: %s,"
|
1097 |
" output: %s", result.cmd, result.fail_reason, logfile,
|
1098 |
result.output) |
1099 |
lines = [utils.SafeEncode(val) |
1100 |
for val in utils.TailFile(logfile, lines=20)] |
1101 |
_Fail("OS create script failed (%s), last lines in the"
|
1102 |
" log file:\n%s", result.fail_reason, "\n".join(lines), log=False) |
1103 |
|
1104 |
|
1105 |
def RunRenameInstance(instance, old_name, debug): |
1106 |
"""Run the OS rename script for an instance.
|
1107 |
|
1108 |
@type instance: L{objects.Instance}
|
1109 |
@param instance: Instance whose OS is to be installed
|
1110 |
@type old_name: string
|
1111 |
@param old_name: previous instance name
|
1112 |
@type debug: integer
|
1113 |
@param debug: debug level, passed to the OS scripts
|
1114 |
@rtype: boolean
|
1115 |
@return: the success of the operation
|
1116 |
|
1117 |
"""
|
1118 |
inst_os = OSFromDisk(instance.os) |
1119 |
|
1120 |
rename_env = OSEnvironment(instance, inst_os, debug) |
1121 |
rename_env["OLD_INSTANCE_NAME"] = old_name
|
1122 |
|
1123 |
logfile = _InstanceLogName("rename", instance.os,
|
1124 |
"%s-%s" % (old_name, instance.name), None) |
1125 |
|
1126 |
result = utils.RunCmd([inst_os.rename_script], env=rename_env, |
1127 |
cwd=inst_os.path, output=logfile, reset_env=True)
|
1128 |
|
1129 |
if result.failed:
|
1130 |
logging.error("os create command '%s' returned error: %s output: %s",
|
1131 |
result.cmd, result.fail_reason, result.output) |
1132 |
lines = [utils.SafeEncode(val) |
1133 |
for val in utils.TailFile(logfile, lines=20)] |
1134 |
_Fail("OS rename script failed (%s), last lines in the"
|
1135 |
" log file:\n%s", result.fail_reason, "\n".join(lines), log=False) |
1136 |
|
1137 |
|
1138 |
def _GetBlockDevSymlinkPath(instance_name, idx): |
1139 |
return utils.PathJoin(constants.DISK_LINKS_DIR, "%s%s%d" % |
1140 |
(instance_name, constants.DISK_SEPARATOR, idx)) |
1141 |
|
1142 |
|
1143 |
def _SymlinkBlockDev(instance_name, device_path, idx): |
1144 |
"""Set up symlinks to a instance's block device.
|
1145 |
|
1146 |
This is an auxiliary function run when an instance is start (on the primary
|
1147 |
node) or when an instance is migrated (on the target node).
|
1148 |
|
1149 |
|
1150 |
@param instance_name: the name of the target instance
|
1151 |
@param device_path: path of the physical block device, on the node
|
1152 |
@param idx: the disk index
|
1153 |
@return: absolute path to the disk's symlink
|
1154 |
|
1155 |
"""
|
1156 |
link_name = _GetBlockDevSymlinkPath(instance_name, idx) |
1157 |
try:
|
1158 |
os.symlink(device_path, link_name) |
1159 |
except OSError, err: |
1160 |
if err.errno == errno.EEXIST:
|
1161 |
if (not os.path.islink(link_name) or |
1162 |
os.readlink(link_name) != device_path): |
1163 |
os.remove(link_name) |
1164 |
os.symlink(device_path, link_name) |
1165 |
else:
|
1166 |
raise
|
1167 |
|
1168 |
return link_name
|
1169 |
|
1170 |
|
1171 |
def _RemoveBlockDevLinks(instance_name, disks): |
1172 |
"""Remove the block device symlinks belonging to the given instance.
|
1173 |
|
1174 |
"""
|
1175 |
for idx, _ in enumerate(disks): |
1176 |
link_name = _GetBlockDevSymlinkPath(instance_name, idx) |
1177 |
if os.path.islink(link_name):
|
1178 |
try:
|
1179 |
os.remove(link_name) |
1180 |
except OSError: |
1181 |
logging.exception("Can't remove symlink '%s'", link_name)
|
1182 |
|
1183 |
|
1184 |
def _GatherAndLinkBlockDevs(instance): |
1185 |
"""Set up an instance's block device(s).
|
1186 |
|
1187 |
This is run on the primary node at instance startup. The block
|
1188 |
devices must be already assembled.
|
1189 |
|
1190 |
@type instance: L{objects.Instance}
|
1191 |
@param instance: the instance whose disks we shoul assemble
|
1192 |
@rtype: list
|
1193 |
@return: list of (disk_object, device_path)
|
1194 |
|
1195 |
"""
|
1196 |
block_devices = [] |
1197 |
for idx, disk in enumerate(instance.disks): |
1198 |
device = _RecursiveFindBD(disk) |
1199 |
if device is None: |
1200 |
raise errors.BlockDeviceError("Block device '%s' is not set up." % |
1201 |
str(disk))
|
1202 |
device.Open() |
1203 |
try:
|
1204 |
link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx) |
1205 |
except OSError, e: |
1206 |
raise errors.BlockDeviceError("Cannot create block device symlink: %s" % |
1207 |
e.strerror) |
1208 |
|
1209 |
block_devices.append((disk, link_name)) |
1210 |
|
1211 |
return block_devices
|
1212 |
|
1213 |
|
1214 |
def StartInstance(instance, startup_paused): |
1215 |
"""Start an instance.
|
1216 |
|
1217 |
@type instance: L{objects.Instance}
|
1218 |
@param instance: the instance object
|
1219 |
@type startup_paused: bool
|
1220 |
@param instance: pause instance at startup?
|
1221 |
@rtype: None
|
1222 |
|
1223 |
"""
|
1224 |
running_instances = GetInstanceList([instance.hypervisor]) |
1225 |
|
1226 |
if instance.name in running_instances: |
1227 |
logging.info("Instance %s already running, not starting", instance.name)
|
1228 |
return
|
1229 |
|
1230 |
try:
|
1231 |
block_devices = _GatherAndLinkBlockDevs(instance) |
1232 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1233 |
hyper.StartInstance(instance, block_devices, startup_paused) |
1234 |
except errors.BlockDeviceError, err:
|
1235 |
_Fail("Block device error: %s", err, exc=True) |
1236 |
except errors.HypervisorError, err:
|
1237 |
_RemoveBlockDevLinks(instance.name, instance.disks) |
1238 |
_Fail("Hypervisor error: %s", err, exc=True) |
1239 |
|
1240 |
|
1241 |
def InstanceShutdown(instance, timeout): |
1242 |
"""Shut an instance down.
|
1243 |
|
1244 |
@note: this functions uses polling with a hardcoded timeout.
|
1245 |
|
1246 |
@type instance: L{objects.Instance}
|
1247 |
@param instance: the instance object
|
1248 |
@type timeout: integer
|
1249 |
@param timeout: maximum timeout for soft shutdown
|
1250 |
@rtype: None
|
1251 |
|
1252 |
"""
|
1253 |
hv_name = instance.hypervisor |
1254 |
hyper = hypervisor.GetHypervisor(hv_name) |
1255 |
iname = instance.name |
1256 |
|
1257 |
if instance.name not in hyper.ListInstances(): |
1258 |
logging.info("Instance %s not running, doing nothing", iname)
|
1259 |
return
|
1260 |
|
1261 |
class _TryShutdown: |
1262 |
def __init__(self): |
1263 |
self.tried_once = False |
1264 |
|
1265 |
def __call__(self): |
1266 |
if iname not in hyper.ListInstances(): |
1267 |
return
|
1268 |
|
1269 |
try:
|
1270 |
hyper.StopInstance(instance, retry=self.tried_once)
|
1271 |
except errors.HypervisorError, err:
|
1272 |
if iname not in hyper.ListInstances(): |
1273 |
# if the instance is no longer existing, consider this a
|
1274 |
# success and go to cleanup
|
1275 |
return
|
1276 |
|
1277 |
_Fail("Failed to stop instance %s: %s", iname, err)
|
1278 |
|
1279 |
self.tried_once = True |
1280 |
|
1281 |
raise utils.RetryAgain()
|
1282 |
|
1283 |
try:
|
1284 |
utils.Retry(_TryShutdown(), 5, timeout)
|
1285 |
except utils.RetryTimeout:
|
1286 |
# the shutdown did not succeed
|
1287 |
logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
|
1288 |
|
1289 |
try:
|
1290 |
hyper.StopInstance(instance, force=True)
|
1291 |
except errors.HypervisorError, err:
|
1292 |
if iname in hyper.ListInstances(): |
1293 |
# only raise an error if the instance still exists, otherwise
|
1294 |
# the error could simply be "instance ... unknown"!
|
1295 |
_Fail("Failed to force stop instance %s: %s", iname, err)
|
1296 |
|
1297 |
time.sleep(1)
|
1298 |
|
1299 |
if iname in hyper.ListInstances(): |
1300 |
_Fail("Could not shutdown instance %s even by destroy", iname)
|
1301 |
|
1302 |
try:
|
1303 |
hyper.CleanupInstance(instance.name) |
1304 |
except errors.HypervisorError, err:
|
1305 |
logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
|
1306 |
|
1307 |
_RemoveBlockDevLinks(iname, instance.disks) |
1308 |
|
1309 |
|
1310 |
def InstanceReboot(instance, reboot_type, shutdown_timeout): |
1311 |
"""Reboot an instance.
|
1312 |
|
1313 |
@type instance: L{objects.Instance}
|
1314 |
@param instance: the instance object to reboot
|
1315 |
@type reboot_type: str
|
1316 |
@param reboot_type: the type of reboot, one the following
|
1317 |
constants:
|
1318 |
- L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
|
1319 |
instance OS, do not recreate the VM
|
1320 |
- L{constants.INSTANCE_REBOOT_HARD}: tear down and
|
1321 |
restart the VM (at the hypervisor level)
|
1322 |
- the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
|
1323 |
not accepted here, since that mode is handled differently, in
|
1324 |
cmdlib, and translates into full stop and start of the
|
1325 |
instance (instead of a call_instance_reboot RPC)
|
1326 |
@type shutdown_timeout: integer
|
1327 |
@param shutdown_timeout: maximum timeout for soft shutdown
|
1328 |
@rtype: None
|
1329 |
|
1330 |
"""
|
1331 |
running_instances = GetInstanceList([instance.hypervisor]) |
1332 |
|
1333 |
if instance.name not in running_instances: |
1334 |
_Fail("Cannot reboot instance %s that is not running", instance.name)
|
1335 |
|
1336 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1337 |
if reboot_type == constants.INSTANCE_REBOOT_SOFT:
|
1338 |
try:
|
1339 |
hyper.RebootInstance(instance) |
1340 |
except errors.HypervisorError, err:
|
1341 |
_Fail("Failed to soft reboot instance %s: %s", instance.name, err)
|
1342 |
elif reboot_type == constants.INSTANCE_REBOOT_HARD:
|
1343 |
try:
|
1344 |
InstanceShutdown(instance, shutdown_timeout) |
1345 |
return StartInstance(instance, False) |
1346 |
except errors.HypervisorError, err:
|
1347 |
_Fail("Failed to hard reboot instance %s: %s", instance.name, err)
|
1348 |
else:
|
1349 |
_Fail("Invalid reboot_type received: %s", reboot_type)
|
1350 |
|
1351 |
|
1352 |
def MigrationInfo(instance): |
1353 |
"""Gather information about an instance to be migrated.
|
1354 |
|
1355 |
@type instance: L{objects.Instance}
|
1356 |
@param instance: the instance definition
|
1357 |
|
1358 |
"""
|
1359 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1360 |
try:
|
1361 |
info = hyper.MigrationInfo(instance) |
1362 |
except errors.HypervisorError, err:
|
1363 |
_Fail("Failed to fetch migration information: %s", err, exc=True) |
1364 |
return info
|
1365 |
|
1366 |
|
1367 |
def AcceptInstance(instance, info, target): |
1368 |
"""Prepare the node to accept an instance.
|
1369 |
|
1370 |
@type instance: L{objects.Instance}
|
1371 |
@param instance: the instance definition
|
1372 |
@type info: string/data (opaque)
|
1373 |
@param info: migration information, from the source node
|
1374 |
@type target: string
|
1375 |
@param target: target host (usually ip), on this node
|
1376 |
|
1377 |
"""
|
1378 |
# TODO: why is this required only for DTS_EXT_MIRROR?
|
1379 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
1380 |
# Create the symlinks, as the disks are not active
|
1381 |
# in any way
|
1382 |
try:
|
1383 |
_GatherAndLinkBlockDevs(instance) |
1384 |
except errors.BlockDeviceError, err:
|
1385 |
_Fail("Block device error: %s", err, exc=True) |
1386 |
|
1387 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1388 |
try:
|
1389 |
hyper.AcceptInstance(instance, info, target) |
1390 |
except errors.HypervisorError, err:
|
1391 |
if instance.disk_template in constants.DTS_EXT_MIRROR: |
1392 |
_RemoveBlockDevLinks(instance.name, instance.disks) |
1393 |
_Fail("Failed to accept instance: %s", err, exc=True) |
1394 |
|
1395 |
|
1396 |
def FinalizeMigrationDst(instance, info, success): |
1397 |
"""Finalize any preparation to accept an instance.
|
1398 |
|
1399 |
@type instance: L{objects.Instance}
|
1400 |
@param instance: the instance definition
|
1401 |
@type info: string/data (opaque)
|
1402 |
@param info: migration information, from the source node
|
1403 |
@type success: boolean
|
1404 |
@param success: whether the migration was a success or a failure
|
1405 |
|
1406 |
"""
|
1407 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1408 |
try:
|
1409 |
hyper.FinalizeMigrationDst(instance, info, success) |
1410 |
except errors.HypervisorError, err:
|
1411 |
_Fail("Failed to finalize migration on the target node: %s", err, exc=True) |
1412 |
|
1413 |
|
1414 |
def MigrateInstance(instance, target, live): |
1415 |
"""Migrates an instance to another node.
|
1416 |
|
1417 |
@type instance: L{objects.Instance}
|
1418 |
@param instance: the instance definition
|
1419 |
@type target: string
|
1420 |
@param target: the target node name
|
1421 |
@type live: boolean
|
1422 |
@param live: whether the migration should be done live or not (the
|
1423 |
interpretation of this parameter is left to the hypervisor)
|
1424 |
@raise RPCFail: if migration fails for some reason
|
1425 |
|
1426 |
"""
|
1427 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1428 |
|
1429 |
try:
|
1430 |
hyper.MigrateInstance(instance, target, live) |
1431 |
except errors.HypervisorError, err:
|
1432 |
_Fail("Failed to migrate instance: %s", err, exc=True) |
1433 |
|
1434 |
|
1435 |
def FinalizeMigrationSource(instance, success, live): |
1436 |
"""Finalize the instance migration on the source node.
|
1437 |
|
1438 |
@type instance: L{objects.Instance}
|
1439 |
@param instance: the instance definition of the migrated instance
|
1440 |
@type success: bool
|
1441 |
@param success: whether the migration succeeded or not
|
1442 |
@type live: bool
|
1443 |
@param live: whether the user requested a live migration or not
|
1444 |
@raise RPCFail: If the execution fails for some reason
|
1445 |
|
1446 |
"""
|
1447 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1448 |
|
1449 |
try:
|
1450 |
hyper.FinalizeMigrationSource(instance, success, live) |
1451 |
except Exception, err: # pylint: disable=W0703 |
1452 |
_Fail("Failed to finalize the migration on the source node: %s", err,
|
1453 |
exc=True)
|
1454 |
|
1455 |
|
1456 |
def GetMigrationStatus(instance): |
1457 |
"""Get the migration status
|
1458 |
|
1459 |
@type instance: L{objects.Instance}
|
1460 |
@param instance: the instance that is being migrated
|
1461 |
@rtype: L{objects.MigrationStatus}
|
1462 |
@return: the status of the current migration (one of
|
1463 |
L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
|
1464 |
progress info that can be retrieved from the hypervisor
|
1465 |
@raise RPCFail: If the migration status cannot be retrieved
|
1466 |
|
1467 |
"""
|
1468 |
hyper = hypervisor.GetHypervisor(instance.hypervisor) |
1469 |
try:
|
1470 |
return hyper.GetMigrationStatus(instance)
|
1471 |
except Exception, err: # pylint: disable=W0703 |
1472 |
_Fail("Failed to get migration status: %s", err, exc=True) |
1473 |
|
1474 |
|
1475 |
def BlockdevCreate(disk, size, owner, on_primary, info): |
1476 |
"""Creates a block device for an instance.
|
1477 |
|
1478 |
@type disk: L{objects.Disk}
|
1479 |
@param disk: the object describing the disk we should create
|
1480 |
@type size: int
|
1481 |
@param size: the size of the physical underlying device, in MiB
|
1482 |
@type owner: str
|
1483 |
@param owner: the name of the instance for which disk is created,
|
1484 |
used for device cache data
|
1485 |
@type on_primary: boolean
|
1486 |
@param on_primary: indicates if it is the primary node or not
|
1487 |
@type info: string
|
1488 |
@param info: string that will be sent to the physical device
|
1489 |
creation, used for example to set (LVM) tags on LVs
|
1490 |
|
1491 |
@return: the new unique_id of the device (this can sometime be
|
1492 |
computed only after creation), or None. On secondary nodes,
|
1493 |
it's not required to return anything.
|
1494 |
|
1495 |
"""
|
1496 |
# TODO: remove the obsolete "size" argument
|
1497 |
# pylint: disable=W0613
|
1498 |
clist = [] |
1499 |
if disk.children:
|
1500 |
for child in disk.children: |
1501 |
try:
|
1502 |
crdev = _RecursiveAssembleBD(child, owner, on_primary) |
1503 |
except errors.BlockDeviceError, err:
|
1504 |
_Fail("Can't assemble device %s: %s", child, err)
|
1505 |
if on_primary or disk.AssembleOnSecondary(): |
1506 |
# we need the children open in case the device itself has to
|
1507 |
# be assembled
|
1508 |
try:
|
1509 |
# pylint: disable=E1103
|
1510 |
crdev.Open() |
1511 |
except errors.BlockDeviceError, err:
|
1512 |
_Fail("Can't make child '%s' read-write: %s", child, err)
|
1513 |
clist.append(crdev) |
1514 |
|
1515 |
try:
|
1516 |
device = bdev.Create(disk.dev_type, disk.physical_id, clist, disk.size) |
1517 |
except errors.BlockDeviceError, err:
|
1518 |
_Fail("Can't create block device: %s", err)
|
1519 |
|
1520 |
if on_primary or disk.AssembleOnSecondary(): |
1521 |
try:
|
1522 |
device.Assemble() |
1523 |
except errors.BlockDeviceError, err:
|
1524 |
_Fail("Can't assemble device after creation, unusual event: %s", err)
|
1525 |
if on_primary or disk.OpenOnSecondary(): |
1526 |
try:
|
1527 |
device.Open(force=True)
|
1528 |
except errors.BlockDeviceError, err:
|
1529 |
_Fail("Can't make device r/w after creation, unusual event: %s", err)
|
1530 |
DevCacheManager.UpdateCache(device.dev_path, owner, |
1531 |
on_primary, disk.iv_name) |
1532 |
|
1533 |
device.SetInfo(info) |
1534 |
|
1535 |
return device.unique_id
|
1536 |
|
1537 |
|
1538 |
def _WipeDevice(path, offset, size): |
1539 |
"""This function actually wipes the device.
|
1540 |
|
1541 |
@param path: The path to the device to wipe
|
1542 |
@param offset: The offset in MiB in the file
|
1543 |
@param size: The size in MiB to write
|
1544 |
|
1545 |
"""
|
1546 |
cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset, |
1547 |
"bs=%d" % constants.WIPE_BLOCK_SIZE, "oflag=direct", "of=%s" % path, |
1548 |
"count=%d" % size]
|
1549 |
result = utils.RunCmd(cmd) |
1550 |
|
1551 |
if result.failed:
|
1552 |
_Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd,
|
1553 |
result.fail_reason, result.output) |
1554 |
|
1555 |
|
1556 |
def BlockdevWipe(disk, offset, size): |
1557 |
"""Wipes a block device.
|
1558 |
|
1559 |
@type disk: L{objects.Disk}
|
1560 |
@param disk: the disk object we want to wipe
|
1561 |
@type offset: int
|
1562 |
@param offset: The offset in MiB in the file
|
1563 |
@type size: int
|
1564 |
@param size: The size in MiB to write
|
1565 |
|
1566 |
"""
|
1567 |
try:
|
1568 |
rdev = _RecursiveFindBD(disk) |
1569 |
except errors.BlockDeviceError:
|
1570 |
rdev = None
|
1571 |
|
1572 |
if not rdev: |
1573 |
_Fail("Cannot execute wipe for device %s: device not found", disk.iv_name)
|
1574 |
|
1575 |
# Do cross verify some of the parameters
|
1576 |
if offset > rdev.size:
|
1577 |
_Fail("Offset is bigger than device size")
|
1578 |
if (offset + size) > rdev.size:
|
1579 |
_Fail("The provided offset and size to wipe is bigger than device size")
|
1580 |
|
1581 |
_WipeDevice(rdev.dev_path, offset, size) |
1582 |
|
1583 |
|
1584 |
def BlockdevPauseResumeSync(disks, pause): |
1585 |
"""Pause or resume the sync of the block device.
|
1586 |
|
1587 |
@type disks: list of L{objects.Disk}
|
1588 |
@param disks: the disks object we want to pause/resume
|
1589 |
@type pause: bool
|
1590 |
@param pause: Wheater to pause or resume
|
1591 |
|
1592 |
"""
|
1593 |
success = [] |
1594 |
for disk in disks: |
1595 |
try:
|
1596 |
rdev = _RecursiveFindBD(disk) |
1597 |
except errors.BlockDeviceError:
|
1598 |
rdev = None
|
1599 |
|
1600 |
if not rdev: |
1601 |
success.append((False, ("Cannot change sync for device %s:" |
1602 |
" device not found" % disk.iv_name)))
|
1603 |
continue
|
1604 |
|
1605 |
result = rdev.PauseResumeSync(pause) |
1606 |
|
1607 |
if result:
|
1608 |
success.append((result, None))
|
1609 |
else:
|
1610 |
if pause:
|
1611 |
msg = "Pause"
|
1612 |
else:
|
1613 |
msg = "Resume"
|
1614 |
success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
|
1615 |
|
1616 |
return success
|
1617 |
|
1618 |
|
1619 |
def BlockdevRemove(disk): |
1620 |
"""Remove a block device.
|
1621 |
|
1622 |
@note: This is intended to be called recursively.
|
1623 |
|
1624 |
@type disk: L{objects.Disk}
|
1625 |
@param disk: the disk object we should remove
|
1626 |
@rtype: boolean
|
1627 |
@return: the success of the operation
|
1628 |
|
1629 |
"""
|
1630 |
msgs = [] |
1631 |
try:
|
1632 |
rdev = _RecursiveFindBD(disk) |
1633 |
except errors.BlockDeviceError, err:
|
1634 |
# probably can't attach
|
1635 |
logging.info("Can't attach to device %s in remove", disk)
|
1636 |
rdev = None
|
1637 |
if rdev is not None: |
1638 |
r_path = rdev.dev_path |
1639 |
try:
|
1640 |
rdev.Remove() |
1641 |
except errors.BlockDeviceError, err:
|
1642 |
msgs.append(str(err))
|
1643 |
if not msgs: |
1644 |
DevCacheManager.RemoveCache(r_path) |
1645 |
|
1646 |
if disk.children:
|
1647 |
for child in disk.children: |
1648 |
try:
|
1649 |
BlockdevRemove(child) |
1650 |
except RPCFail, err:
|
1651 |
msgs.append(str(err))
|
1652 |
|
1653 |
if msgs:
|
1654 |
_Fail("; ".join(msgs))
|
1655 |
|
1656 |
|
1657 |
def _RecursiveAssembleBD(disk, owner, as_primary): |
1658 |
"""Activate a block device for an instance.
|
1659 |
|
1660 |
This is run on the primary and secondary nodes for an instance.
|
1661 |
|
1662 |
@note: this function is called recursively.
|
1663 |
|
1664 |
@type disk: L{objects.Disk}
|
1665 |
@param disk: the disk we try to assemble
|
1666 |
@type owner: str
|
1667 |
@param owner: the name of the instance which owns the disk
|
1668 |
@type as_primary: boolean
|
1669 |
@param as_primary: if we should make the block device
|
1670 |
read/write
|
1671 |
|
1672 |
@return: the assembled device or None (in case no device
|
1673 |
was assembled)
|
1674 |
@raise errors.BlockDeviceError: in case there is an error
|
1675 |
during the activation of the children or the device
|
1676 |
itself
|
1677 |
|
1678 |
"""
|
1679 |
children = [] |
1680 |
if disk.children:
|
1681 |
mcn = disk.ChildrenNeeded() |
1682 |
if mcn == -1: |
1683 |
mcn = 0 # max number of Nones allowed |
1684 |
else:
|
1685 |
mcn = len(disk.children) - mcn # max number of Nones |
1686 |
for chld_disk in disk.children: |
1687 |
try:
|
1688 |
cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary) |
1689 |
except errors.BlockDeviceError, err:
|
1690 |
if children.count(None) >= mcn: |
1691 |
raise
|
1692 |
cdev = None
|
1693 |
logging.error("Error in child activation (but continuing): %s",
|
1694 |
str(err))
|
1695 |
children.append(cdev) |
1696 |
|
1697 |
if as_primary or disk.AssembleOnSecondary(): |
1698 |
r_dev = bdev.Assemble(disk.dev_type, disk.physical_id, children, disk.size) |
1699 |
result = r_dev |
1700 |
if as_primary or disk.OpenOnSecondary(): |
1701 |
r_dev.Open() |
1702 |
DevCacheManager.UpdateCache(r_dev.dev_path, owner, |
1703 |
as_primary, disk.iv_name) |
1704 |
|
1705 |
else:
|
1706 |
result = True
|
1707 |
return result
|
1708 |
|
1709 |
|
1710 |
def BlockdevAssemble(disk, owner, as_primary, idx): |
1711 |
"""Activate a block device for an instance.
|
1712 |
|
1713 |
This is a wrapper over _RecursiveAssembleBD.
|
1714 |
|
1715 |
@rtype: str or boolean
|
1716 |
@return: a C{/dev/...} path for primary nodes, and
|
1717 |
C{True} for secondary nodes
|
1718 |
|
1719 |
"""
|
1720 |
try:
|
1721 |
result = _RecursiveAssembleBD(disk, owner, as_primary) |
1722 |
if isinstance(result, bdev.BlockDev): |
1723 |
# pylint: disable=E1103
|
1724 |
result = result.dev_path |
1725 |
if as_primary:
|
1726 |
_SymlinkBlockDev(owner, result, idx) |
1727 |
except errors.BlockDeviceError, err:
|
1728 |
_Fail("Error while assembling disk: %s", err, exc=True) |
1729 |
except OSError, err: |
1730 |
_Fail("Error while symlinking disk: %s", err, exc=True) |
1731 |
|
1732 |
return result
|
1733 |
|
1734 |
|
1735 |
def BlockdevShutdown(disk): |
1736 |
"""Shut down a block device.
|
1737 |
|
1738 |
First, if the device is assembled (Attach() is successful), then
|
1739 |
the device is shutdown. Then the children of the device are
|
1740 |
shutdown.
|
1741 |
|
1742 |
This function is called recursively. Note that we don't cache the
|
1743 |
children or such, as oppossed to assemble, shutdown of different
|
1744 |
devices doesn't require that the upper device was active.
|
1745 |
|
1746 |
@type disk: L{objects.Disk}
|
1747 |
@param disk: the description of the disk we should
|
1748 |
shutdown
|
1749 |
@rtype: None
|
1750 |
|
1751 |
"""
|
1752 |
msgs = [] |
1753 |
r_dev = _RecursiveFindBD(disk) |
1754 |
if r_dev is not None: |
1755 |
r_path = r_dev.dev_path |
1756 |
try:
|
1757 |
r_dev.Shutdown() |
1758 |
DevCacheManager.RemoveCache(r_path) |
1759 |
except errors.BlockDeviceError, err:
|
1760 |
msgs.append(str(err))
|
1761 |
|
1762 |
if disk.children:
|
1763 |
for child in disk.children: |
1764 |
try:
|
1765 |
BlockdevShutdown(child) |
1766 |
except RPCFail, err:
|
1767 |
msgs.append(str(err))
|
1768 |
|
1769 |
if msgs:
|
1770 |
_Fail("; ".join(msgs))
|
1771 |
|
1772 |
|
1773 |
def BlockdevAddchildren(parent_cdev, new_cdevs): |
1774 |
"""Extend a mirrored block device.
|
1775 |
|
1776 |
@type parent_cdev: L{objects.Disk}
|
1777 |
@param parent_cdev: the disk to which we should add children
|
1778 |
@type new_cdevs: list of L{objects.Disk}
|
1779 |
@param new_cdevs: the list of children which we should add
|
1780 |
@rtype: None
|
1781 |
|
1782 |
"""
|
1783 |
parent_bdev = _RecursiveFindBD(parent_cdev) |
1784 |
if parent_bdev is None: |
1785 |
_Fail("Can't find parent device '%s' in add children", parent_cdev)
|
1786 |
new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs] |
1787 |
if new_bdevs.count(None) > 0: |
1788 |
_Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
|
1789 |
parent_bdev.AddChildren(new_bdevs) |
1790 |
|
1791 |
|
1792 |
def BlockdevRemovechildren(parent_cdev, new_cdevs): |
1793 |
"""Shrink a mirrored block device.
|
1794 |
|
1795 |
@type parent_cdev: L{objects.Disk}
|
1796 |
@param parent_cdev: the disk from which we should remove children
|
1797 |
@type new_cdevs: list of L{objects.Disk}
|
1798 |
@param new_cdevs: the list of children which we should remove
|
1799 |
@rtype: None
|
1800 |
|
1801 |
"""
|
1802 |
parent_bdev = _RecursiveFindBD(parent_cdev) |
1803 |
if parent_bdev is None: |
1804 |
_Fail("Can't find parent device '%s' in remove children", parent_cdev)
|
1805 |
devs = [] |
1806 |
for disk in new_cdevs: |
1807 |
rpath = disk.StaticDevPath() |
1808 |
if rpath is None: |
1809 |
bd = _RecursiveFindBD(disk) |
1810 |
if bd is None: |
1811 |
_Fail("Can't find device %s while removing children", disk)
|
1812 |
else:
|
1813 |
devs.append(bd.dev_path) |
1814 |
else:
|
1815 |
if not utils.IsNormAbsPath(rpath): |
1816 |
_Fail("Strange path returned from StaticDevPath: '%s'", rpath)
|
1817 |
devs.append(rpath) |
1818 |
parent_bdev.RemoveChildren(devs) |
1819 |
|
1820 |
|
1821 |
def BlockdevGetmirrorstatus(disks): |
1822 |
"""Get the mirroring status of a list of devices.
|
1823 |
|
1824 |
@type disks: list of L{objects.Disk}
|
1825 |
@param disks: the list of disks which we should query
|
1826 |
@rtype: disk
|
1827 |
@return: List of L{objects.BlockDevStatus}, one for each disk
|
1828 |
@raise errors.BlockDeviceError: if any of the disks cannot be
|
1829 |
found
|
1830 |
|
1831 |
"""
|
1832 |
stats = [] |
1833 |
for dsk in disks: |
1834 |
rbd = _RecursiveFindBD(dsk) |
1835 |
if rbd is None: |
1836 |
_Fail("Can't find device %s", dsk)
|
1837 |
|
1838 |
stats.append(rbd.CombinedSyncStatus()) |
1839 |
|
1840 |
return stats
|
1841 |
|
1842 |
|
1843 |
def BlockdevGetmirrorstatusMulti(disks): |
1844 |
"""Get the mirroring status of a list of devices.
|
1845 |
|
1846 |
@type disks: list of L{objects.Disk}
|
1847 |
@param disks: the list of disks which we should query
|
1848 |
@rtype: disk
|
1849 |
@return: List of tuples, (bool, status), one for each disk; bool denotes
|
1850 |
success/failure, status is L{objects.BlockDevStatus} on success, string
|
1851 |
otherwise
|
1852 |
|
1853 |
"""
|
1854 |
result = [] |
1855 |
for disk in disks: |
1856 |
try:
|
1857 |
rbd = _RecursiveFindBD(disk) |
1858 |
if rbd is None: |
1859 |
result.append((False, "Can't find device %s" % disk)) |
1860 |
continue
|
1861 |
|
1862 |
status = rbd.CombinedSyncStatus() |
1863 |
except errors.BlockDeviceError, err:
|
1864 |
logging.exception("Error while getting disk status")
|
1865 |
result.append((False, str(err))) |
1866 |
else:
|
1867 |
result.append((True, status))
|
1868 |
|
1869 |
assert len(disks) == len(result) |
1870 |
|
1871 |
return result
|
1872 |
|
1873 |
|
1874 |
def _RecursiveFindBD(disk): |
1875 |
"""Check if a device is activated.
|
1876 |
|
1877 |
If so, return information about the real device.
|
1878 |
|
1879 |
@type disk: L{objects.Disk}
|
1880 |
@param disk: the disk object we need to find
|
1881 |
|
1882 |
@return: None if the device can't be found,
|
1883 |
otherwise the device instance
|
1884 |
|
1885 |
"""
|
1886 |
children = [] |
1887 |
if disk.children:
|
1888 |
for chdisk in disk.children: |
1889 |
children.append(_RecursiveFindBD(chdisk)) |
1890 |
|
1891 |
return bdev.FindDevice(disk.dev_type, disk.physical_id, children, disk.size)
|
1892 |
|
1893 |
|
1894 |
def _OpenRealBD(disk): |
1895 |
"""Opens the underlying block device of a disk.
|
1896 |
|
1897 |
@type disk: L{objects.Disk}
|
1898 |
@param disk: the disk object we want to open
|
1899 |
|
1900 |
"""
|
1901 |
real_disk = _RecursiveFindBD(disk) |
1902 |
if real_disk is None: |
1903 |
_Fail("Block device '%s' is not set up", disk)
|
1904 |
|
1905 |
real_disk.Open() |
1906 |
|
1907 |
return real_disk
|
1908 |
|
1909 |
|
1910 |
def BlockdevFind(disk): |
1911 |
"""Check if a device is activated.
|
1912 |
|
1913 |
If it is, return information about the real device.
|
1914 |
|
1915 |
@type disk: L{objects.Disk}
|
1916 |
@param disk: the disk to find
|
1917 |
@rtype: None or objects.BlockDevStatus
|
1918 |
@return: None if the disk cannot be found, otherwise a the current
|
1919 |
information
|
1920 |
|
1921 |
"""
|
1922 |
try:
|
1923 |
rbd = _RecursiveFindBD(disk) |
1924 |
except errors.BlockDeviceError, err:
|
1925 |
_Fail("Failed to find device: %s", err, exc=True) |
1926 |
|
1927 |
if rbd is None: |
1928 |
return None |
1929 |
|
1930 |
return rbd.GetSyncStatus()
|
1931 |
|
1932 |
|
1933 |
def BlockdevGetsize(disks): |
1934 |
"""Computes the size of the given disks.
|
1935 |
|
1936 |
If a disk is not found, returns None instead.
|
1937 |
|
1938 |
@type disks: list of L{objects.Disk}
|
1939 |
@param disks: the list of disk to compute the size for
|
1940 |
@rtype: list
|
1941 |
@return: list with elements None if the disk cannot be found,
|
1942 |
otherwise the size
|
1943 |
|
1944 |
"""
|
1945 |
result = [] |
1946 |
for cf in disks: |
1947 |
try:
|
1948 |
rbd = _RecursiveFindBD(cf) |
1949 |
except errors.BlockDeviceError:
|
1950 |
result.append(None)
|
1951 |
continue
|
1952 |
if rbd is None: |
1953 |
result.append(None)
|
1954 |
else:
|
1955 |
result.append(rbd.GetActualSize()) |
1956 |
return result
|
1957 |
|
1958 |
|
1959 |
def BlockdevExport(disk, dest_node, dest_path, cluster_name): |
1960 |
"""Export a block device to a remote node.
|
1961 |
|
1962 |
@type disk: L{objects.Disk}
|
1963 |
@param disk: the description of the disk to export
|
1964 |
@type dest_node: str
|
1965 |
@param dest_node: the destination node to export to
|
1966 |
@type dest_path: str
|
1967 |
@param dest_path: the destination path on the target node
|
1968 |
@type cluster_name: str
|
1969 |
@param cluster_name: the cluster name, needed for SSH hostalias
|
1970 |
@rtype: None
|
1971 |
|
1972 |
"""
|
1973 |
real_disk = _OpenRealBD(disk) |
1974 |
|
1975 |
# the block size on the read dd is 1MiB to match our units
|
1976 |
expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
|
1977 |
"dd if=%s bs=1048576 count=%s",
|
1978 |
real_disk.dev_path, str(disk.size))
|
1979 |
|
1980 |
# we set here a smaller block size as, due to ssh buffering, more
|
1981 |
# than 64-128k will mostly ignored; we use nocreat to fail if the
|
1982 |
# device is not already there or we pass a wrong path; we use
|
1983 |
# notrunc to no attempt truncate on an LV device; we use oflag=dsync
|
1984 |
# to not buffer too much memory; this means that at best, we flush
|
1985 |
# every 64k, which will not be very fast
|
1986 |
destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
|
1987 |
" oflag=dsync", dest_path)
|
1988 |
|
1989 |
remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node, |
1990 |
constants.GANETI_RUNAS, |
1991 |
destcmd) |
1992 |
|
1993 |
# all commands have been checked, so we're safe to combine them
|
1994 |
command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)])
|
1995 |
|
1996 |
result = utils.RunCmd(["bash", "-c", command]) |
1997 |
|
1998 |
if result.failed:
|
1999 |
_Fail("Disk copy command '%s' returned error: %s"
|
2000 |
" output: %s", command, result.fail_reason, result.output)
|
2001 |
|
2002 |
|
2003 |
def UploadFile(file_name, data, mode, uid, gid, atime, mtime): |
2004 |
"""Write a file to the filesystem.
|
2005 |
|
2006 |
This allows the master to overwrite(!) a file. It will only perform
|
2007 |
the operation if the file belongs to a list of configuration files.
|
2008 |
|
2009 |
@type file_name: str
|
2010 |
@param file_name: the target file name
|
2011 |
@type data: str
|
2012 |
@param data: the new contents of the file
|
2013 |
@type mode: int
|
2014 |
@param mode: the mode to give the file (can be None)
|
2015 |
@type uid: string
|
2016 |
@param uid: the owner of the file
|
2017 |
@type gid: string
|
2018 |
@param gid: the group of the file
|
2019 |
@type atime: float
|
2020 |
@param atime: the atime to set on the file (can be None)
|
2021 |
@type mtime: float
|
2022 |
@param mtime: the mtime to set on the file (can be None)
|
2023 |
@rtype: None
|
2024 |
|
2025 |
"""
|
2026 |
if not os.path.isabs(file_name): |
2027 |
_Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
|
2028 |
|
2029 |
if file_name not in _ALLOWED_UPLOAD_FILES: |
2030 |
_Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
|
2031 |
file_name) |
2032 |
|
2033 |
raw_data = _Decompress(data) |
2034 |
|
2035 |
if not (isinstance(uid, basestring) and isinstance(gid, basestring)): |
2036 |
_Fail("Invalid username/groupname type")
|
2037 |
|
2038 |
getents = runtime.GetEnts() |
2039 |
uid = getents.LookupUser(uid) |
2040 |
gid = getents.LookupGroup(gid) |
2041 |
|
2042 |
utils.SafeWriteFile(file_name, None,
|
2043 |
data=raw_data, mode=mode, uid=uid, gid=gid, |
2044 |
atime=atime, mtime=mtime) |
2045 |
|
2046 |
|
2047 |
def RunOob(oob_program, command, node, timeout): |
2048 |
"""Executes oob_program with given command on given node.
|
2049 |
|
2050 |
@param oob_program: The path to the executable oob_program
|
2051 |
@param command: The command to invoke on oob_program
|
2052 |
@param node: The node given as an argument to the program
|
2053 |
@param timeout: Timeout after which we kill the oob program
|
2054 |
|
2055 |
@return: stdout
|
2056 |
@raise RPCFail: If execution fails for some reason
|
2057 |
|
2058 |
"""
|
2059 |
result = utils.RunCmd([oob_program, command, node], timeout=timeout) |
2060 |
|
2061 |
if result.failed:
|
2062 |
_Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
|
2063 |
result.fail_reason, result.output) |
2064 |
|
2065 |
return result.stdout
|
2066 |
|
2067 |
|
2068 |
def WriteSsconfFiles(values): |
2069 |
"""Update all ssconf files.
|
2070 |
|
2071 |
Wrapper around the SimpleStore.WriteFiles.
|
2072 |
|
2073 |
"""
|
2074 |
ssconf.SimpleStore().WriteFiles(values) |
2075 |
|
2076 |
|
2077 |
def _ErrnoOrStr(err): |
2078 |
"""Format an EnvironmentError exception.
|
2079 |
|
2080 |
If the L{err} argument has an errno attribute, it will be looked up
|
2081 |
and converted into a textual C{E...} description. Otherwise the
|
2082 |
string representation of the error will be returned.
|
2083 |
|
2084 |
@type err: L{EnvironmentError}
|
2085 |
@param err: the exception to format
|
2086 |
|
2087 |
"""
|
2088 |
if hasattr(err, "errno"): |
2089 |
detail = errno.errorcode[err.errno] |
2090 |
else:
|
2091 |
detail = str(err)
|
2092 |
return detail
|
2093 |
|
2094 |
|
2095 |
def _OSOndiskAPIVersion(os_dir): |
2096 |
"""Compute and return the API version of a given OS.
|
2097 |
|
2098 |
This function will try to read the API version of the OS residing in
|
2099 |
the 'os_dir' directory.
|
2100 |
|
2101 |
@type os_dir: str
|
2102 |
@param os_dir: the directory in which we should look for the OS
|
2103 |
@rtype: tuple
|
2104 |
@return: tuple (status, data) with status denoting the validity and
|
2105 |
data holding either the vaid versions or an error message
|
2106 |
|
2107 |
"""
|
2108 |
api_file = utils.PathJoin(os_dir, constants.OS_API_FILE) |
2109 |
|
2110 |
try:
|
2111 |
st = os.stat(api_file) |
2112 |
except EnvironmentError, err: |
2113 |
return False, ("Required file '%s' not found under path %s: %s" % |
2114 |
(constants.OS_API_FILE, os_dir, _ErrnoOrStr(err))) |
2115 |
|
2116 |
if not stat.S_ISREG(stat.S_IFMT(st.st_mode)): |
2117 |
return False, ("File '%s' in %s is not a regular file" % |
2118 |
(constants.OS_API_FILE, os_dir)) |
2119 |
|
2120 |
try:
|
2121 |
api_versions = utils.ReadFile(api_file).splitlines() |
2122 |
except EnvironmentError, err: |
2123 |
return False, ("Error while reading the API version file at %s: %s" % |
2124 |
(api_file, _ErrnoOrStr(err))) |
2125 |
|
2126 |
try:
|
2127 |
api_versions = [int(version.strip()) for version in api_versions] |
2128 |
except (TypeError, ValueError), err: |
2129 |
return False, ("API version(s) can't be converted to integer: %s" % |
2130 |
str(err))
|
2131 |
|
2132 |
return True, api_versions |
2133 |
|
2134 |
|
2135 |
def DiagnoseOS(top_dirs=None): |
2136 |
"""Compute the validity for all OSes.
|
2137 |
|
2138 |
@type top_dirs: list
|
2139 |
@param top_dirs: the list of directories in which to
|
2140 |
search (if not given defaults to
|
2141 |
L{constants.OS_SEARCH_PATH})
|
2142 |
@rtype: list of L{objects.OS}
|
2143 |
@return: a list of tuples (name, path, status, diagnose, variants,
|
2144 |
parameters, api_version) for all (potential) OSes under all
|
2145 |
search paths, where:
|
2146 |
- name is the (potential) OS name
|
2147 |
- path is the full path to the OS
|
2148 |
- status True/False is the validity of the OS
|
2149 |
- diagnose is the error message for an invalid OS, otherwise empty
|
2150 |
- variants is a list of supported OS variants, if any
|
2151 |
- parameters is a list of (name, help) parameters, if any
|
2152 |
- api_version is a list of support OS API versions
|
2153 |
|
2154 |
"""
|
2155 |
if top_dirs is None: |
2156 |
top_dirs = constants.OS_SEARCH_PATH |
2157 |
|
2158 |
result = [] |
2159 |
for dir_name in top_dirs: |
2160 |
if os.path.isdir(dir_name):
|
2161 |
try:
|
2162 |
f_names = utils.ListVisibleFiles(dir_name) |
2163 |
except EnvironmentError, err: |
2164 |
logging.exception("Can't list the OS directory %s: %s", dir_name, err)
|
2165 |
break
|
2166 |
for name in f_names: |
2167 |
os_path = utils.PathJoin(dir_name, name) |
2168 |
status, os_inst = _TryOSFromDisk(name, base_dir=dir_name) |
2169 |
if status:
|
2170 |
diagnose = ""
|
2171 |
variants = os_inst.supported_variants |
2172 |
parameters = os_inst.supported_parameters |
2173 |
api_versions = os_inst.api_versions |
2174 |
else:
|
2175 |
diagnose = os_inst |
2176 |
variants = parameters = api_versions = [] |
2177 |
result.append((name, os_path, status, diagnose, variants, |
2178 |
parameters, api_versions)) |
2179 |
|
2180 |
return result
|
2181 |
|
2182 |
|
2183 |
def _TryOSFromDisk(name, base_dir=None): |
2184 |
"""Create an OS instance from disk.
|
2185 |
|
2186 |
This function will return an OS instance if the given name is a
|
2187 |
valid OS name.
|
2188 |
|
2189 |
@type base_dir: string
|
2190 |
@keyword base_dir: Base directory containing OS installations.
|
2191 |
Defaults to a search in all the OS_SEARCH_PATH dirs.
|
2192 |
@rtype: tuple
|
2193 |
@return: success and either the OS instance if we find a valid one,
|
2194 |
or error message
|
2195 |
|
2196 |
"""
|
2197 |
if base_dir is None: |
2198 |
os_dir = utils.FindFile(name, constants.OS_SEARCH_PATH, os.path.isdir) |
2199 |
else:
|
2200 |
os_dir = utils.FindFile(name, [base_dir], os.path.isdir) |
2201 |
|
2202 |
if os_dir is None: |
2203 |
return False, "Directory for OS %s not found in search path" % name |
2204 |
|
2205 |
status, api_versions = _OSOndiskAPIVersion(os_dir) |
2206 |
if not status: |
2207 |
# push the error up
|
2208 |
return status, api_versions
|
2209 |
|
2210 |
if not constants.OS_API_VERSIONS.intersection(api_versions): |
2211 |
return False, ("API version mismatch for path '%s': found %s, want %s." % |
2212 |
(os_dir, api_versions, constants.OS_API_VERSIONS)) |
2213 |
|
2214 |
# OS Files dictionary, we will populate it with the absolute path
|
2215 |
# names; if the value is True, then it is a required file, otherwise
|
2216 |
# an optional one
|
2217 |
os_files = dict.fromkeys(constants.OS_SCRIPTS, True) |
2218 |
|
2219 |
if max(api_versions) >= constants.OS_API_V15: |
2220 |
os_files[constants.OS_VARIANTS_FILE] = False
|
2221 |
|
2222 |
if max(api_versions) >= constants.OS_API_V20: |
2223 |
os_files[constants.OS_PARAMETERS_FILE] = True
|
2224 |
else:
|
2225 |
del os_files[constants.OS_SCRIPT_VERIFY]
|
2226 |
|
2227 |
for (filename, required) in os_files.items(): |
2228 |
os_files[filename] = utils.PathJoin(os_dir, filename) |
2229 |
|
2230 |
try:
|
2231 |
st = os.stat(os_files[filename]) |
2232 |
except EnvironmentError, err: |
2233 |
if err.errno == errno.ENOENT and not required: |
2234 |
del os_files[filename]
|
2235 |
continue
|
2236 |
return False, ("File '%s' under path '%s' is missing (%s)" % |
2237 |
(filename, os_dir, _ErrnoOrStr(err))) |
2238 |
|
2239 |
if not stat.S_ISREG(stat.S_IFMT(st.st_mode)): |
2240 |
return False, ("File '%s' under path '%s' is not a regular file" % |
2241 |
(filename, os_dir)) |
2242 |
|
2243 |
if filename in constants.OS_SCRIPTS: |
2244 |
if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
|
2245 |
return False, ("File '%s' under path '%s' is not executable" % |
2246 |
(filename, os_dir)) |
2247 |
|
2248 |
variants = [] |
2249 |
if constants.OS_VARIANTS_FILE in os_files: |
2250 |
variants_file = os_files[constants.OS_VARIANTS_FILE] |
2251 |
try:
|
2252 |
variants = utils.ReadFile(variants_file).splitlines() |
2253 |
except EnvironmentError, err: |
2254 |
# we accept missing files, but not other errors
|
2255 |
if err.errno != errno.ENOENT:
|
2256 |
return False, ("Error while reading the OS variants file at %s: %s" % |
2257 |
(variants_file, _ErrnoOrStr(err))) |
2258 |
|
2259 |
parameters = [] |
2260 |
if constants.OS_PARAMETERS_FILE in os_files: |
2261 |
parameters_file = os_files[constants.OS_PARAMETERS_FILE] |
2262 |
try:
|
2263 |
parameters = utils.ReadFile(parameters_file).splitlines() |
2264 |
except EnvironmentError, err: |
2265 |
return False, ("Error while reading the OS parameters file at %s: %s" % |
2266 |
(parameters_file, _ErrnoOrStr(err))) |
2267 |
parameters = [v.split(None, 1) for v in parameters] |
2268 |
|
2269 |
os_obj = objects.OS(name=name, path=os_dir, |
2270 |
create_script=os_files[constants.OS_SCRIPT_CREATE], |
2271 |
export_script=os_files[constants.OS_SCRIPT_EXPORT], |
2272 |
import_script=os_files[constants.OS_SCRIPT_IMPORT], |
2273 |
rename_script=os_files[constants.OS_SCRIPT_RENAME], |
2274 |
verify_script=os_files.get(constants.OS_SCRIPT_VERIFY, |
2275 |
None),
|
2276 |
supported_variants=variants, |
2277 |
supported_parameters=parameters, |
2278 |
api_versions=api_versions) |
2279 |
return True, os_obj |
2280 |
|
2281 |
|
2282 |
def OSFromDisk(name, base_dir=None): |
2283 |
"""Create an OS instance from disk.
|
2284 |
|
2285 |
This function will return an OS instance if the given name is a
|
2286 |
valid OS name. Otherwise, it will raise an appropriate
|
2287 |
L{RPCFail} exception, detailing why this is not a valid OS.
|
2288 |
|
2289 |
This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
|
2290 |
an exception but returns true/false status data.
|
2291 |
|
2292 |
@type base_dir: string
|
2293 |
@keyword base_dir: Base directory containing OS installations.
|
2294 |
Defaults to a search in all the OS_SEARCH_PATH dirs.
|
2295 |
@rtype: L{objects.OS}
|
2296 |
@return: the OS instance if we find a valid one
|
2297 |
@raise RPCFail: if we don't find a valid OS
|
2298 |
|
2299 |
"""
|
2300 |
name_only = objects.OS.GetName(name) |
2301 |
status, payload = _TryOSFromDisk(name_only, base_dir) |
2302 |
|
2303 |
if not status: |
2304 |
_Fail(payload) |
2305 |
|
2306 |
return payload
|
2307 |
|
2308 |
|
2309 |
def OSCoreEnv(os_name, inst_os, os_params, debug=0): |
2310 |
"""Calculate the basic environment for an os script.
|
2311 |
|
2312 |
@type os_name: str
|
2313 |
@param os_name: full operating system name (including variant)
|
2314 |
@type inst_os: L{objects.OS}
|
2315 |
@param inst_os: operating system for which the environment is being built
|
2316 |
@type os_params: dict
|
2317 |
@param os_params: the OS parameters
|
2318 |
@type debug: integer
|
2319 |
@param debug: debug level (0 or 1, for OS Api 10)
|
2320 |
@rtype: dict
|
2321 |
@return: dict of environment variables
|
2322 |
@raise errors.BlockDeviceError: if the block device
|
2323 |
cannot be found
|
2324 |
|
2325 |
"""
|
2326 |
result = {} |
2327 |
api_version = \ |
2328 |
max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
|
2329 |
result["OS_API_VERSION"] = "%d" % api_version |
2330 |
result["OS_NAME"] = inst_os.name
|
2331 |
result["DEBUG_LEVEL"] = "%d" % debug |
2332 |
|
2333 |
# OS variants
|
2334 |
if api_version >= constants.OS_API_V15 and inst_os.supported_variants: |
2335 |
variant = objects.OS.GetVariant(os_name) |
2336 |
if not variant: |
2337 |
variant = inst_os.supported_variants[0]
|
2338 |
else:
|
2339 |
variant = ""
|
2340 |
result["OS_VARIANT"] = variant
|
2341 |
|
2342 |
# OS params
|
2343 |
for pname, pvalue in os_params.items(): |
2344 |
result["OSP_%s" % pname.upper()] = pvalue
|
2345 |
|
2346 |
return result
|
2347 |
|
2348 |
|
2349 |
def OSEnvironment(instance, inst_os, debug=0): |
2350 |
"""Calculate the environment for an os script.
|
2351 |
|
2352 |
@type instance: L{objects.Instance}
|
2353 |
@param instance: target instance for the os script run
|
2354 |
@type inst_os: L{objects.OS}
|
2355 |
@param inst_os: operating system for which the environment is being built
|
2356 |
@type debug: integer
|
2357 |
@param debug: debug level (0 or 1, for OS Api 10)
|
2358 |
@rtype: dict
|
2359 |
@return: dict of environment variables
|
2360 |
@raise errors.BlockDeviceError: if the block device
|
2361 |
cannot be found
|
2362 |
|
2363 |
"""
|
2364 |
result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug) |
2365 |
|
2366 |
for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]: |
2367 |
result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr)) |
2368 |
|
2369 |
result["HYPERVISOR"] = instance.hypervisor
|
2370 |
result["DISK_COUNT"] = "%d" % len(instance.disks) |
2371 |
result["NIC_COUNT"] = "%d" % len(instance.nics) |
2372 |
result["INSTANCE_SECONDARY_NODES"] = \
|
2373 |
("%s" % " ".join(instance.secondary_nodes)) |
2374 |
|
2375 |
# Disks
|
2376 |
for idx, disk in enumerate(instance.disks): |
2377 |
real_disk = _OpenRealBD(disk) |
2378 |
result["DISK_%d_PATH" % idx] = real_disk.dev_path
|
2379 |
result["DISK_%d_ACCESS" % idx] = disk.mode
|
2380 |
if constants.HV_DISK_TYPE in instance.hvparams: |
2381 |
result["DISK_%d_FRONTEND_TYPE" % idx] = \
|
2382 |
instance.hvparams[constants.HV_DISK_TYPE] |
2383 |
if disk.dev_type in constants.LDS_BLOCK: |
2384 |
result["DISK_%d_BACKEND_TYPE" % idx] = "block" |
2385 |
elif disk.dev_type == constants.LD_FILE:
|
2386 |
result["DISK_%d_BACKEND_TYPE" % idx] = \
|
2387 |
"file:%s" % disk.physical_id[0] |
2388 |
|
2389 |
# NICs
|
2390 |
for idx, nic in enumerate(instance.nics): |
2391 |
result["NIC_%d_MAC" % idx] = nic.mac
|
2392 |
if nic.ip:
|
2393 |
result["NIC_%d_IP" % idx] = nic.ip
|
2394 |
result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
|
2395 |
if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
|
2396 |
result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
|
2397 |
if nic.nicparams[constants.NIC_LINK]:
|
2398 |
result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
|
2399 |
if constants.HV_NIC_TYPE in instance.hvparams: |
2400 |
result["NIC_%d_FRONTEND_TYPE" % idx] = \
|
2401 |
instance.hvparams[constants.HV_NIC_TYPE] |
2402 |
|
2403 |
# HV/BE params
|
2404 |
for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]: |
2405 |
for key, value in source.items(): |
2406 |
result["INSTANCE_%s_%s" % (kind, key)] = str(value) |
2407 |
|
2408 |
return result
|
2409 |
|
2410 |
|
2411 |
def BlockdevGrow(disk, amount, dryrun): |
2412 |
"""Grow a stack of block devices.
|
2413 |
|
2414 |
This function is called recursively, with the childrens being the
|
2415 |
first ones to resize.
|
2416 |
|
2417 |
@type disk: L{objects.Disk}
|
2418 |
@param disk: the disk to be grown
|
2419 |
@type amount: integer
|
2420 |
@param amount: the amount (in mebibytes) to grow with
|
2421 |
@type dryrun: boolean
|
2422 |
@param dryrun: whether to execute the operation in simulation mode
|
2423 |
only, without actually increasing the size
|
2424 |
@rtype: (status, result)
|
2425 |
@return: a tuple with the status of the operation (True/False), and
|
2426 |
the errors message if status is False
|
2427 |
|
2428 |
"""
|
2429 |
r_dev = _RecursiveFindBD(disk) |
2430 |
if r_dev is None: |
2431 |
_Fail("Cannot find block device %s", disk)
|
2432 |
|
2433 |
try:
|
2434 |
r_dev.Grow(amount, dryrun) |
2435 |
except errors.BlockDeviceError, err:
|
2436 |
_Fail("Failed to grow block device: %s", err, exc=True) |
2437 |
|
2438 |
|
2439 |
def BlockdevSnapshot(disk): |
2440 |
"""Create a snapshot copy of a block device.
|
2441 |
|
2442 |
This function is called recursively, and the snapshot is actually created
|
2443 |
just for the leaf lvm backend device.
|
2444 |
|
2445 |
@type disk: L{objects.Disk}
|
2446 |
@param disk: the disk to be snapshotted
|
2447 |
@rtype: string
|
2448 |
@return: snapshot disk ID as (vg, lv)
|
2449 |
|
2450 |
"""
|
2451 |
if disk.dev_type == constants.LD_DRBD8:
|
2452 |
if not disk.children: |
2453 |
_Fail("DRBD device '%s' without backing storage cannot be snapshotted",
|
2454 |
disk.unique_id) |
2455 |
return BlockdevSnapshot(disk.children[0]) |
2456 |
elif disk.dev_type == constants.LD_LV:
|
2457 |
r_dev = _RecursiveFindBD(disk) |
2458 |
if r_dev is not None: |
2459 |
# FIXME: choose a saner value for the snapshot size
|
2460 |
# let's stay on the safe side and ask for the full size, for now
|
2461 |
return r_dev.Snapshot(disk.size)
|
2462 |
else:
|
2463 |
_Fail("Cannot find block device %s", disk)
|
2464 |
else:
|
2465 |
_Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
|
2466 |
disk.unique_id, disk.dev_type) |
2467 |
|
2468 |
|
2469 |
def FinalizeExport(instance, snap_disks): |
2470 |
"""Write out the export configuration information.
|
2471 |
|
2472 |
@type instance: L{objects.Instance}
|
2473 |
@param instance: the instance which we export, used for
|
2474 |
saving configuration
|
2475 |
@type snap_disks: list of L{objects.Disk}
|
2476 |
@param snap_disks: list of snapshot block devices, which
|
2477 |
will be used to get the actual name of the dump file
|
2478 |
|
2479 |
@rtype: None
|
2480 |
|
2481 |
"""
|
2482 |
destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new")
|
2483 |
finaldestdir = utils.PathJoin(constants.EXPORT_DIR, instance.name) |
2484 |
|
2485 |
config = objects.SerializableConfigParser() |
2486 |
|
2487 |
config.add_section(constants.INISECT_EXP) |
2488 |
config.set(constants.INISECT_EXP, "version", "0") |
2489 |
config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time())) |
2490 |
config.set(constants.INISECT_EXP, "source", instance.primary_node)
|
2491 |
config.set(constants.INISECT_EXP, "os", instance.os)
|
2492 |
config.set(constants.INISECT_EXP, "compression", "none") |
2493 |
|
2494 |
config.add_section(constants.INISECT_INS) |
2495 |
config.set(constants.INISECT_INS, "name", instance.name)
|
2496 |
config.set(constants.INISECT_INS, "maxmem", "%d" % |
2497 |
instance.beparams[constants.BE_MAXMEM]) |
2498 |
config.set(constants.INISECT_INS, "minmem", "%d" % |
2499 |
instance.beparams[constants.BE_MINMEM]) |
2500 |
# "memory" is deprecated, but useful for exporting to old ganeti versions
|
2501 |
config.set(constants.INISECT_INS, "memory", "%d" % |
2502 |
instance.beparams[constants.BE_MAXMEM]) |
2503 |
config.set(constants.INISECT_INS, "vcpus", "%d" % |
2504 |
instance.beparams[constants.BE_VCPUS]) |
2505 |
config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
|
2506 |
config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
|
2507 |
config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags())) |
2508 |
|
2509 |
nic_total = 0
|
2510 |
for nic_count, nic in enumerate(instance.nics): |
2511 |
nic_total += 1
|
2512 |
config.set(constants.INISECT_INS, "nic%d_mac" %
|
2513 |
nic_count, "%s" % nic.mac)
|
2514 |
config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip) |
2515 |
for param in constants.NICS_PARAMETER_TYPES: |
2516 |
config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
|
2517 |
"%s" % nic.nicparams.get(param, None)) |
2518 |
# TODO: redundant: on load can read nics until it doesn't exist
|
2519 |
config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total) |
2520 |
|
2521 |
disk_total = 0
|
2522 |
for disk_count, disk in enumerate(snap_disks): |
2523 |
if disk:
|
2524 |
disk_total += 1
|
2525 |
config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
|
2526 |
("%s" % disk.iv_name))
|
2527 |
config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
|
2528 |
("%s" % disk.physical_id[1])) |
2529 |
config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
|
2530 |
("%d" % disk.size))
|
2531 |
|
2532 |
config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total) |
2533 |
|
2534 |
# New-style hypervisor/backend parameters
|
2535 |
|
2536 |
config.add_section(constants.INISECT_HYP) |
2537 |
for name, value in instance.hvparams.items(): |
2538 |
if name not in constants.HVC_GLOBALS: |
2539 |
config.set(constants.INISECT_HYP, name, str(value))
|
2540 |
|
2541 |
config.add_section(constants.INISECT_BEP) |
2542 |
for name, value in instance.beparams.items(): |
2543 |
config.set(constants.INISECT_BEP, name, str(value))
|
2544 |
|
2545 |
config.add_section(constants.INISECT_OSP) |
2546 |
for name, value in instance.osparams.items(): |
2547 |
config.set(constants.INISECT_OSP, name, str(value))
|
2548 |
|
2549 |
utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE), |
2550 |
data=config.Dumps()) |
2551 |
shutil.rmtree(finaldestdir, ignore_errors=True)
|
2552 |
shutil.move(destdir, finaldestdir) |
2553 |
|
2554 |
|
2555 |
def ExportInfo(dest): |
2556 |
"""Get export configuration information.
|
2557 |
|
2558 |
@type dest: str
|
2559 |
@param dest: directory containing the export
|
2560 |
|
2561 |
@rtype: L{objects.SerializableConfigParser}
|
2562 |
@return: a serializable config file containing the
|
2563 |
export info
|
2564 |
|
2565 |
"""
|
2566 |
cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE) |
2567 |
|
2568 |
config = objects.SerializableConfigParser() |
2569 |
config.read(cff) |
2570 |
|
2571 |
if (not config.has_section(constants.INISECT_EXP) or |
2572 |
not config.has_section(constants.INISECT_INS)):
|
2573 |
_Fail("Export info file doesn't have the required fields")
|
2574 |
|
2575 |
return config.Dumps()
|
2576 |
|
2577 |
|
2578 |
def ListExports(): |
2579 |
"""Return a list of exports currently available on this machine.
|
2580 |
|
2581 |
@rtype: list
|
2582 |
@return: list of the exports
|
2583 |
|
2584 |
"""
|
2585 |
if os.path.isdir(constants.EXPORT_DIR):
|
2586 |
return sorted(utils.ListVisibleFiles(constants.EXPORT_DIR)) |
2587 |
else:
|
2588 |
_Fail("No exports directory")
|
2589 |
|
2590 |
|
2591 |
def RemoveExport(export): |
2592 |
"""Remove an existing export from the node.
|
2593 |
|
2594 |
@type export: str
|
2595 |
@param export: the name of the export to remove
|
2596 |
@rtype: None
|
2597 |
|
2598 |
"""
|
2599 |
target = utils.PathJoin(constants.EXPORT_DIR, export) |
2600 |
|
2601 |
try:
|
2602 |
shutil.rmtree(target) |
2603 |
except EnvironmentError, err: |
2604 |
_Fail("Error while removing the export: %s", err, exc=True) |
2605 |
|
2606 |
|
2607 |
def BlockdevRename(devlist): |
2608 |
"""Rename a list of block devices.
|
2609 |
|
2610 |
@type devlist: list of tuples
|
2611 |
@param devlist: list of tuples of the form (disk,
|
2612 |
new_logical_id, new_physical_id); disk is an
|
2613 |
L{objects.Disk} object describing the current disk,
|
2614 |
and new logical_id/physical_id is the name we
|
2615 |
rename it to
|
2616 |
@rtype: boolean
|
2617 |
@return: True if all renames succeeded, False otherwise
|
2618 |
|
2619 |
"""
|
2620 |
msgs = [] |
2621 |
result = True
|
2622 |
for disk, unique_id in devlist: |
2623 |
dev = _RecursiveFindBD(disk) |
2624 |
if dev is None: |
2625 |
msgs.append("Can't find device %s in rename" % str(disk)) |
2626 |
result = False
|
2627 |
continue
|
2628 |
try:
|
2629 |
old_rpath = dev.dev_path |
2630 |
dev.Rename(unique_id) |
2631 |
new_rpath = dev.dev_path |
2632 |
if old_rpath != new_rpath:
|
2633 |
DevCacheManager.RemoveCache(old_rpath) |
2634 |
# FIXME: we should add the new cache information here, like:
|
2635 |
# DevCacheManager.UpdateCache(new_rpath, owner, ...)
|
2636 |
# but we don't have the owner here - maybe parse from existing
|
2637 |
# cache? for now, we only lose lvm data when we rename, which
|
2638 |
# is less critical than DRBD or MD
|
2639 |
except errors.BlockDeviceError, err:
|
2640 |
msgs.append("Can't rename device '%s' to '%s': %s" %
|
2641 |
(dev, unique_id, err)) |
2642 |
logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
|
2643 |
result = False
|
2644 |
if not result: |
2645 |
_Fail("; ".join(msgs))
|
2646 |
|
2647 |
|
2648 |
def _TransformFileStorageDir(fs_dir): |
2649 |
"""Checks whether given file_storage_dir is valid.
|
2650 |
|
2651 |
Checks wheter the given fs_dir is within the cluster-wide default
|
2652 |
file_storage_dir or the shared_file_storage_dir, which are stored in
|
2653 |
SimpleStore. Only paths under those directories are allowed.
|
2654 |
|
2655 |
@type fs_dir: str
|
2656 |
@param fs_dir: the path to check
|
2657 |
|
2658 |
@return: the normalized path if valid, None otherwise
|
2659 |
|
2660 |
"""
|
2661 |
if not constants.ENABLE_FILE_STORAGE: |
2662 |
_Fail("File storage disabled at configure time")
|
2663 |
cfg = _GetConfig() |
2664 |
fs_dir = os.path.normpath(fs_dir) |
2665 |
base_fstore = cfg.GetFileStorageDir() |
2666 |
base_shared = cfg.GetSharedFileStorageDir() |
2667 |
if not (utils.IsBelowDir(base_fstore, fs_dir) or |
2668 |
utils.IsBelowDir(base_shared, fs_dir)): |
2669 |
_Fail("File storage directory '%s' is not under base file"
|
2670 |
" storage directory '%s' or shared storage directory '%s'",
|
2671 |
fs_dir, base_fstore, base_shared) |
2672 |
return fs_dir
|
2673 |
|
2674 |
|
2675 |
def CreateFileStorageDir(file_storage_dir): |
2676 |
"""Create file storage directory.
|
2677 |
|
2678 |
@type file_storage_dir: str
|
2679 |
@param file_storage_dir: directory to create
|
2680 |
|
2681 |
@rtype: tuple
|
2682 |
@return: tuple with first element a boolean indicating wheter dir
|
2683 |
creation was successful or not
|
2684 |
|
2685 |
"""
|
2686 |
file_storage_dir = _TransformFileStorageDir(file_storage_dir) |
2687 |
if os.path.exists(file_storage_dir):
|
2688 |
if not os.path.isdir(file_storage_dir): |
2689 |
_Fail("Specified storage dir '%s' is not a directory",
|
2690 |
file_storage_dir) |
2691 |
else:
|
2692 |
try:
|
2693 |
os.makedirs(file_storage_dir, 0750)
|
2694 |
except OSError, err: |
2695 |
_Fail("Cannot create file storage directory '%s': %s",
|
2696 |
file_storage_dir, err, exc=True)
|
2697 |
|
2698 |
|
2699 |
def RemoveFileStorageDir(file_storage_dir): |
2700 |
"""Remove file storage directory.
|
2701 |
|
2702 |
Remove it only if it's empty. If not log an error and return.
|
2703 |
|
2704 |
@type file_storage_dir: str
|
2705 |
@param file_storage_dir: the directory we should cleanup
|
2706 |
@rtype: tuple (success,)
|
2707 |
@return: tuple of one element, C{success}, denoting
|
2708 |
whether the operation was successful
|
2709 |
|
2710 |
"""
|
2711 |
file_storage_dir = _TransformFileStorageDir(file_storage_dir) |
2712 |
if os.path.exists(file_storage_dir):
|
2713 |
if not os.path.isdir(file_storage_dir): |
2714 |
_Fail("Specified Storage directory '%s' is not a directory",
|
2715 |
file_storage_dir) |
2716 |
# deletes dir only if empty, otherwise we want to fail the rpc call
|
2717 |
try:
|
2718 |
os.rmdir(file_storage_dir) |
2719 |
except OSError, err: |
2720 |
_Fail("Cannot remove file storage directory '%s': %s",
|
2721 |
file_storage_dir, err) |
2722 |
|
2723 |
|
2724 |
def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir): |
2725 |
"""Rename the file storage directory.
|
2726 |
|
2727 |
@type old_file_storage_dir: str
|
2728 |
@param old_file_storage_dir: the current path
|
2729 |
@type new_file_storage_dir: str
|
2730 |
@param new_file_storage_dir: the name we should rename to
|
2731 |
@rtype: tuple (success,)
|
2732 |
@return: tuple of one element, C{success}, denoting
|
2733 |
whether the operation was successful
|
2734 |
|
2735 |
"""
|
2736 |
old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir) |
2737 |
new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir) |
2738 |
if not os.path.exists(new_file_storage_dir): |
2739 |
if os.path.isdir(old_file_storage_dir):
|
2740 |
try:
|
2741 |
os.rename(old_file_storage_dir, new_file_storage_dir) |
2742 |
except OSError, err: |
2743 |
_Fail("Cannot rename '%s' to '%s': %s",
|
2744 |
old_file_storage_dir, new_file_storage_dir, err) |
2745 |
else:
|
2746 |
_Fail("Specified storage dir '%s' is not a directory",
|
2747 |
old_file_storage_dir) |
2748 |
else:
|
2749 |
if os.path.exists(old_file_storage_dir):
|
2750 |
_Fail("Cannot rename '%s' to '%s': both locations exist",
|
2751 |
old_file_storage_dir, new_file_storage_dir) |
2752 |
|
2753 |
|
2754 |
def _EnsureJobQueueFile(file_name): |
2755 |
"""Checks whether the given filename is in the queue directory.
|
2756 |
|
2757 |
@type file_name: str
|
2758 |
@param file_name: the file name we should check
|
2759 |
@rtype: None
|
2760 |
@raises RPCFail: if the file is not valid
|
2761 |
|
2762 |
"""
|
2763 |
queue_dir = os.path.normpath(constants.QUEUE_DIR) |
2764 |
result = (os.path.commonprefix([queue_dir, file_name]) == queue_dir) |
2765 |
|
2766 |
if not result: |
2767 |
_Fail("Passed job queue file '%s' does not belong to"
|
2768 |
" the queue directory '%s'", file_name, queue_dir)
|
2769 |
|
2770 |
|
2771 |
def JobQueueUpdate(file_name, content): |
2772 |
"""Updates a file in the queue directory.
|
2773 |
|
2774 |
This is just a wrapper over L{utils.io.WriteFile}, with proper
|
2775 |
checking.
|
2776 |
|
2777 |
@type file_name: str
|
2778 |
@param file_name: the job file name
|
2779 |
@type content: str
|
2780 |
@param content: the new job contents
|
2781 |
@rtype: boolean
|
2782 |
@return: the success of the operation
|
2783 |
|
2784 |
"""
|
2785 |
_EnsureJobQueueFile(file_name) |
2786 |
getents = runtime.GetEnts() |
2787 |
|
2788 |
# Write and replace the file atomically
|
2789 |
utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid, |
2790 |
gid=getents.masterd_gid) |
2791 |
|
2792 |
|
2793 |
def JobQueueRename(old, new): |
2794 |
"""Renames a job queue file.
|
2795 |
|
2796 |
This is just a wrapper over os.rename with proper checking.
|
2797 |
|
2798 |
@type old: str
|
2799 |
@param old: the old (actual) file name
|
2800 |
@type new: str
|
2801 |
@param new: the desired file name
|
2802 |
@rtype: tuple
|
2803 |
@return: the success of the operation and payload
|
2804 |
|
2805 |
"""
|
2806 |
_EnsureJobQueueFile(old) |
2807 |
_EnsureJobQueueFile(new) |
2808 |
|
2809 |
getents = runtime.GetEnts() |
2810 |
|
2811 |
utils.RenameFile(old, new, mkdir=True, mkdir_mode=0700, |
2812 |
dir_uid=getents.masterd_uid, dir_gid=getents.masterd_gid) |
2813 |
|
2814 |
|
2815 |
def BlockdevClose(instance_name, disks): |
2816 |
"""Closes the given block devices.
|
2817 |
|
2818 |
This means they will be switched to secondary mode (in case of
|
2819 |
DRBD).
|
2820 |
|
2821 |
@param instance_name: if the argument is not empty, the symlinks
|
2822 |
of this instance will be removed
|
2823 |
@type disks: list of L{objects.Disk}
|
2824 |
@param disks: the list of disks to be closed
|
2825 |
@rtype: tuple (success, message)
|
2826 |
@return: a tuple of success and message, where success
|
2827 |
indicates the succes of the operation, and message
|
2828 |
which will contain the error details in case we
|
2829 |
failed
|
2830 |
|
2831 |
"""
|
2832 |
bdevs = [] |
2833 |
for cf in disks: |
2834 |
rd = _RecursiveFindBD(cf) |
2835 |
if rd is None: |
2836 |
_Fail("Can't find device %s", cf)
|
2837 |
bdevs.append(rd) |
2838 |
|
2839 |
msg = [] |
2840 |
for rd in bdevs: |
2841 |
try:
|
2842 |
rd.Close() |
2843 |
except errors.BlockDeviceError, err:
|
2844 |
msg.append(str(err))
|
2845 |
if msg:
|
2846 |
_Fail("Can't make devices secondary: %s", ",".join(msg)) |
2847 |
else:
|
2848 |
if instance_name:
|
2849 |
_RemoveBlockDevLinks(instance_name, disks) |
2850 |
|
2851 |
|
2852 |
def ValidateHVParams(hvname, hvparams): |
2853 |
"""Validates the given hypervisor parameters.
|
2854 |
|
2855 |
@type hvname: string
|
2856 |
@param hvname: the hypervisor name
|
2857 |
@type hvparams: dict
|
2858 |
@param hvparams: the hypervisor parameters to be validated
|
2859 |
@rtype: None
|
2860 |
|
2861 |
"""
|
2862 |
try:
|
2863 |
hv_type = hypervisor.GetHypervisor(hvname) |
2864 |
hv_type.ValidateParameters(hvparams) |
2865 |
except errors.HypervisorError, err:
|
2866 |
_Fail(str(err), log=False) |
2867 |
|
2868 |
|
2869 |
def _CheckOSPList(os_obj, parameters): |
2870 |
"""Check whether a list of parameters is supported by the OS.
|
2871 |
|
2872 |
@type os_obj: L{objects.OS}
|
2873 |
@param os_obj: OS object to check
|
2874 |
@type parameters: list
|
2875 |
@param parameters: the list of parameters to check
|
2876 |
|
2877 |
"""
|
2878 |
supported = [v[0] for v in os_obj.supported_parameters] |
2879 |
delta = frozenset(parameters).difference(supported)
|
2880 |
if delta:
|
2881 |
_Fail("The following parameters are not supported"
|
2882 |
" by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
|
2883 |
|
2884 |
|
2885 |
def ValidateOS(required, osname, checks, osparams): |
2886 |
"""Validate the given OS' parameters.
|
2887 |
|
2888 |
@type required: boolean
|
2889 |
@param required: whether absence of the OS should translate into
|
2890 |
failure or not
|
2891 |
@type osname: string
|
2892 |
@param osname: the OS to be validated
|
2893 |
@type checks: list
|
2894 |
@param checks: list of the checks to run (currently only 'parameters')
|
2895 |
@type osparams: dict
|
2896 |
@param osparams: dictionary with OS parameters
|
2897 |
@rtype: boolean
|
2898 |
@return: True if the validation passed, or False if the OS was not
|
2899 |
found and L{required} was false
|
2900 |
|
2901 |
"""
|
2902 |
if not constants.OS_VALIDATE_CALLS.issuperset(checks): |
2903 |
_Fail("Unknown checks required for OS %s: %s", osname,
|
2904 |
set(checks).difference(constants.OS_VALIDATE_CALLS))
|
2905 |
|
2906 |
name_only = objects.OS.GetName(osname) |
2907 |
status, tbv = _TryOSFromDisk(name_only, None)
|
2908 |
|
2909 |
if not status: |
2910 |
if required:
|
2911 |
_Fail(tbv) |
2912 |
else:
|
2913 |
return False |
2914 |
|
2915 |
if max(tbv.api_versions) < constants.OS_API_V20: |
2916 |
return True |
2917 |
|
2918 |
if constants.OS_VALIDATE_PARAMETERS in checks: |
2919 |
_CheckOSPList(tbv, osparams.keys()) |
2920 |
|
2921 |
validate_env = OSCoreEnv(osname, tbv, osparams) |
2922 |
result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env, |
2923 |
cwd=tbv.path, reset_env=True)
|
2924 |
if result.failed:
|
2925 |
logging.error("os validate command '%s' returned error: %s output: %s",
|
2926 |
result.cmd, result.fail_reason, result.output) |
2927 |
_Fail("OS validation script failed (%s), output: %s",
|
2928 |
result.fail_reason, result.output, log=False)
|
2929 |
|
2930 |
return True |
2931 |
|
2932 |
|
2933 |
def DemoteFromMC(): |
2934 |
"""Demotes the current node from master candidate role.
|
2935 |
|
2936 |
"""
|
2937 |
# try to ensure we're not the master by mistake
|
2938 |
master, myself = ssconf.GetMasterAndMyself() |
2939 |
if master == myself:
|
2940 |
_Fail("ssconf status shows I'm the master node, will not demote")
|
2941 |
|
2942 |
result = utils.RunCmd([constants.DAEMON_UTIL, "check", constants.MASTERD])
|
2943 |
if not result.failed: |
2944 |
_Fail("The master daemon is running, will not demote")
|
2945 |
|
2946 |
try:
|
2947 |
if os.path.isfile(constants.CLUSTER_CONF_FILE):
|
2948 |
utils.CreateBackup(constants.CLUSTER_CONF_FILE) |
2949 |
except EnvironmentError, err: |
2950 |
if err.errno != errno.ENOENT:
|
2951 |
_Fail("Error while backing up cluster file: %s", err, exc=True) |
2952 |
|
2953 |
utils.RemoveFile(constants.CLUSTER_CONF_FILE) |
2954 |
|
2955 |
|
2956 |
def _GetX509Filenames(cryptodir, name): |
2957 |
"""Returns the full paths for the private key and certificate.
|
2958 |
|
2959 |
"""
|
2960 |
return (utils.PathJoin(cryptodir, name),
|
2961 |
utils.PathJoin(cryptodir, name, _X509_KEY_FILE), |
2962 |
utils.PathJoin(cryptodir, name, _X509_CERT_FILE)) |
2963 |
|
2964 |
|
2965 |
def CreateX509Certificate(validity, cryptodir=constants.CRYPTO_KEYS_DIR): |
2966 |
"""Creates a new X509 certificate for SSL/TLS.
|
2967 |
|
2968 |
@type validity: int
|
2969 |
@param validity: Validity in seconds
|
2970 |
@rtype: tuple; (string, string)
|
2971 |
@return: Certificate name and public part
|
2972 |
|
2973 |
"""
|
2974 |
(key_pem, cert_pem) = \ |
2975 |
utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(), |
2976 |
min(validity, _MAX_SSL_CERT_VALIDITY))
|
2977 |
|
2978 |
cert_dir = tempfile.mkdtemp(dir=cryptodir, |
2979 |
prefix="x509-%s-" % utils.TimestampForFilename())
|
2980 |
try:
|
2981 |
name = os.path.basename(cert_dir) |
2982 |
assert len(name) > 5 |
2983 |
|
2984 |
(_, key_file, cert_file) = _GetX509Filenames(cryptodir, name) |
2985 |
|
2986 |
utils.WriteFile(key_file, mode=0400, data=key_pem)
|
2987 |
utils.WriteFile(cert_file, mode=0400, data=cert_pem)
|
2988 |
|
2989 |
# Never return private key as it shouldn't leave the node
|
2990 |
return (name, cert_pem)
|
2991 |
except Exception: |
2992 |
shutil.rmtree(cert_dir, ignore_errors=True)
|
2993 |
raise
|
2994 |
|
2995 |
|
2996 |
def RemoveX509Certificate(name, cryptodir=constants.CRYPTO_KEYS_DIR): |
2997 |
"""Removes a X509 certificate.
|
2998 |
|
2999 |
@type name: string
|
3000 |
@param name: Certificate name
|
3001 |
|
3002 |
"""
|
3003 |
(cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name) |
3004 |
|
3005 |
utils.RemoveFile(key_file) |
3006 |
utils.RemoveFile(cert_file) |
3007 |
|
3008 |
try:
|
3009 |
os.rmdir(cert_dir) |
3010 |
except EnvironmentError, err: |
3011 |
_Fail("Cannot remove certificate directory '%s': %s",
|
3012 |
cert_dir, err) |
3013 |
|
3014 |
|
3015 |
def _GetImportExportIoCommand(instance, mode, ieio, ieargs): |
3016 |
"""Returns the command for the requested input/output.
|
3017 |
|
3018 |
@type instance: L{objects.Instance}
|
3019 |
@param instance: The instance object
|
3020 |
@param mode: Import/export mode
|
3021 |
@param ieio: Input/output type
|
3022 |
@param ieargs: Input/output arguments
|
3023 |
|
3024 |
"""
|
3025 |
assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT) |
3026 |
|
3027 |
env = None
|
3028 |
prefix = None
|
3029 |
suffix = None
|
3030 |
exp_size = None
|
3031 |
|
3032 |
if ieio == constants.IEIO_FILE:
|
3033 |
(filename, ) = ieargs |
3034 |
|
3035 |
if not utils.IsNormAbsPath(filename): |
3036 |
_Fail("Path '%s' is not normalized or absolute", filename)
|
3037 |
|
3038 |
real_filename = os.path.realpath(filename) |
3039 |
directory = os.path.dirname(real_filename) |
3040 |
|
3041 |
if not utils.IsBelowDir(constants.EXPORT_DIR, real_filename): |
3042 |
_Fail("File '%s' is not under exports directory '%s': %s",
|
3043 |
filename, constants.EXPORT_DIR, real_filename) |
3044 |
|
3045 |
# Create directory
|
3046 |
utils.Makedirs(directory, mode=0750)
|
3047 |
|
3048 |
quoted_filename = utils.ShellQuote(filename) |
3049 |
|
3050 |
if mode == constants.IEM_IMPORT:
|
3051 |
suffix = "> %s" % quoted_filename
|
3052 |
elif mode == constants.IEM_EXPORT:
|
3053 |
suffix = "< %s" % quoted_filename
|
3054 |
|
3055 |
# Retrieve file size
|
3056 |
try:
|
3057 |
st = os.stat(filename) |
3058 |
except EnvironmentError, err: |
3059 |
logging.error("Can't stat(2) %s: %s", filename, err)
|
3060 |
else:
|
3061 |
exp_size = utils.BytesToMebibyte(st.st_size) |
3062 |
|
3063 |
elif ieio == constants.IEIO_RAW_DISK:
|
3064 |
(disk, ) = ieargs |
3065 |
|
3066 |
real_disk = _OpenRealBD(disk) |
3067 |
|
3068 |
if mode == constants.IEM_IMPORT:
|
3069 |
# we set here a smaller block size as, due to transport buffering, more
|
3070 |
# than 64-128k will mostly ignored; we use nocreat to fail if the device
|
3071 |
# is not already there or we pass a wrong path; we use notrunc to no
|
3072 |
# attempt truncate on an LV device; we use oflag=dsync to not buffer too
|
3073 |
# much memory; this means that at best, we flush every 64k, which will
|
3074 |
# not be very fast
|
3075 |
suffix = utils.BuildShellCmd(("| dd of=%s conv=nocreat,notrunc"
|
3076 |
" bs=%s oflag=dsync"),
|
3077 |
real_disk.dev_path, |
3078 |
str(64 * 1024)) |
3079 |
|
3080 |
elif mode == constants.IEM_EXPORT:
|
3081 |
# the block size on the read dd is 1MiB to match our units
|
3082 |
prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
|
3083 |
real_disk.dev_path, |
3084 |
str(1024 * 1024), # 1 MB |
3085 |
str(disk.size))
|
3086 |
exp_size = disk.size |
3087 |
|
3088 |
elif ieio == constants.IEIO_SCRIPT:
|
3089 |
(disk, disk_index, ) = ieargs |
3090 |
|
3091 |
assert isinstance(disk_index, (int, long)) |
3092 |
|
3093 |
real_disk = _OpenRealBD(disk) |
3094 |
|
3095 |
inst_os = OSFromDisk(instance.os) |
3096 |
env = OSEnvironment(instance, inst_os) |
3097 |
|
3098 |
if mode == constants.IEM_IMPORT:
|
3099 |
env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index] |
3100 |
env["IMPORT_INDEX"] = str(disk_index) |
3101 |
script = inst_os.import_script |
3102 |
|
3103 |
elif mode == constants.IEM_EXPORT:
|
3104 |
env["EXPORT_DEVICE"] = real_disk.dev_path
|
3105 |
env["EXPORT_INDEX"] = str(disk_index) |
3106 |
script = inst_os.export_script |
3107 |
|
3108 |
# TODO: Pass special environment only to script
|
3109 |
script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
|
3110 |
|
3111 |
if mode == constants.IEM_IMPORT:
|
3112 |
suffix = "| %s" % script_cmd
|
3113 |
|
3114 |
elif mode == constants.IEM_EXPORT:
|
3115 |
prefix = "%s |" % script_cmd
|
3116 |
|
3117 |
# Let script predict size
|
3118 |
exp_size = constants.IE_CUSTOM_SIZE |
3119 |
|
3120 |
else:
|
3121 |
_Fail("Invalid %s I/O mode %r", mode, ieio)
|
3122 |
|
3123 |
return (env, prefix, suffix, exp_size)
|
3124 |
|
3125 |
|
3126 |
def _CreateImportExportStatusDir(prefix): |
3127 |
"""Creates status directory for import/export.
|
3128 |
|
3129 |
"""
|
3130 |
return tempfile.mkdtemp(dir=constants.IMPORT_EXPORT_DIR,
|
3131 |
prefix=("%s-%s-" %
|
3132 |
(prefix, utils.TimestampForFilename()))) |
3133 |
|
3134 |
|
3135 |
def StartImportExportDaemon(mode, opts, host, port, instance, component, |
3136 |
ieio, ieioargs): |
3137 |
"""Starts an import or export daemon.
|
3138 |
|
3139 |
@param mode: Import/output mode
|
3140 |
@type opts: L{objects.ImportExportOptions}
|
3141 |
@param opts: Daemon options
|
3142 |
@type host: string
|
3143 |
@param host: Remote host for export (None for import)
|
3144 |
@type port: int
|
3145 |
@param port: Remote port for export (None for import)
|
3146 |
@type instance: L{objects.Instance}
|
3147 |
@param instance: Instance object
|
3148 |
@type component: string
|
3149 |
@param component: which part of the instance is transferred now,
|
3150 |
e.g. 'disk/0'
|
3151 |
@param ieio: Input/output type
|
3152 |
@param ieioargs: Input/output arguments
|
3153 |
|
3154 |
"""
|
3155 |
if mode == constants.IEM_IMPORT:
|
3156 |
prefix = "import"
|
3157 |
|
3158 |
if not (host is None and port is None): |
3159 |
_Fail("Can not specify host or port on import")
|
3160 |
|
3161 |
elif mode == constants.IEM_EXPORT:
|
3162 |
prefix = "export"
|
3163 |
|
3164 |
if host is None or port is None: |
3165 |
_Fail("Host and port must be specified for an export")
|
3166 |
|
3167 |
else:
|
3168 |
_Fail("Invalid mode %r", mode)
|
3169 |
|
3170 |
if (opts.key_name is None) ^ (opts.ca_pem is None): |
3171 |
_Fail("Cluster certificate can only be used for both key and CA")
|
3172 |
|
3173 |
(cmd_env, cmd_prefix, cmd_suffix, exp_size) = \ |
3174 |
_GetImportExportIoCommand(instance, mode, ieio, ieioargs) |
3175 |
|
3176 |
if opts.key_name is None: |
3177 |
# Use server.pem
|
3178 |
key_path = constants.NODED_CERT_FILE |
3179 |
cert_path = constants.NODED_CERT_FILE |
3180 |
assert opts.ca_pem is None |
3181 |
else:
|
3182 |
(_, key_path, cert_path) = _GetX509Filenames(constants.CRYPTO_KEYS_DIR, |
3183 |
opts.key_name) |
3184 |
assert opts.ca_pem is not None |
3185 |
|
3186 |
for i in [key_path, cert_path]: |
3187 |
if not os.path.exists(i): |
3188 |
_Fail("File '%s' does not exist" % i)
|
3189 |
|
3190 |
status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
|
3191 |
try:
|
3192 |
status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE) |
3193 |
pid_file = utils.PathJoin(status_dir, _IES_PID_FILE) |
3194 |
ca_file = utils.PathJoin(status_dir, _IES_CA_FILE) |
3195 |
|
3196 |
if opts.ca_pem is None: |
3197 |
# Use server.pem
|
3198 |
ca = utils.ReadFile(constants.NODED_CERT_FILE) |
3199 |
else:
|
3200 |
ca = opts.ca_pem |
3201 |
|
3202 |
# Write CA file
|
3203 |
utils.WriteFile(ca_file, data=ca, mode=0400)
|
3204 |
|
3205 |
cmd = [ |
3206 |
constants.IMPORT_EXPORT_DAEMON, |
3207 |
status_file, mode, |
3208 |
"--key=%s" % key_path,
|
3209 |
"--cert=%s" % cert_path,
|
3210 |
"--ca=%s" % ca_file,
|
3211 |
] |
3212 |
|
3213 |
if host:
|
3214 |
cmd.append("--host=%s" % host)
|
3215 |
|
3216 |
if port:
|
3217 |
cmd.append("--port=%s" % port)
|
3218 |
|
3219 |
if opts.ipv6:
|
3220 |
cmd.append("--ipv6")
|
3221 |
else:
|
3222 |
cmd.append("--ipv4")
|
3223 |
|
3224 |
if opts.compress:
|
3225 |
cmd.append("--compress=%s" % opts.compress)
|
3226 |
|
3227 |
if opts.magic:
|
3228 |
cmd.append("--magic=%s" % opts.magic)
|
3229 |
|
3230 |
if exp_size is not None: |
3231 |
cmd.append("--expected-size=%s" % exp_size)
|
3232 |
|
3233 |
if cmd_prefix:
|
3234 |
cmd.append("--cmd-prefix=%s" % cmd_prefix)
|
3235 |
|
3236 |
if cmd_suffix:
|
3237 |
cmd.append("--cmd-suffix=%s" % cmd_suffix)
|
3238 |
|
3239 |
if mode == constants.IEM_EXPORT:
|
3240 |
# Retry connection a few times when connecting to remote peer
|
3241 |
cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
|
3242 |
cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
|
3243 |
elif opts.connect_timeout is not None: |
3244 |
assert mode == constants.IEM_IMPORT
|
3245 |
# Overall timeout for establishing connection while listening
|
3246 |
cmd.append("--connect-timeout=%s" % opts.connect_timeout)
|
3247 |
|
3248 |
logfile = _InstanceLogName(prefix, instance.os, instance.name, component) |
3249 |
|
3250 |
# TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has
|
3251 |
# support for receiving a file descriptor for output
|
3252 |
utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file, |
3253 |
output=logfile) |
3254 |
|
3255 |
# The import/export name is simply the status directory name
|
3256 |
return os.path.basename(status_dir)
|
3257 |
|
3258 |
except Exception: |
3259 |
shutil.rmtree(status_dir, ignore_errors=True)
|
3260 |
raise
|
3261 |
|
3262 |
|
3263 |
def GetImportExportStatus(names): |
3264 |
"""Returns import/export daemon status.
|
3265 |
|
3266 |
@type names: sequence
|
3267 |
@param names: List of names
|
3268 |
@rtype: List of dicts
|
3269 |
@return: Returns a list of the state of each named import/export or None if a
|
3270 |
status couldn't be read
|
3271 |
|
3272 |
"""
|
3273 |
result = [] |
3274 |
|
3275 |
for name in names: |
3276 |
status_file = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name, |
3277 |
_IES_STATUS_FILE) |
3278 |
|
3279 |
try:
|
3280 |
data = utils.ReadFile(status_file) |
3281 |
except EnvironmentError, err: |
3282 |
if err.errno != errno.ENOENT:
|
3283 |
raise
|
3284 |
data = None
|
3285 |
|
3286 |
if not data: |
3287 |
result.append(None)
|
3288 |
continue
|
3289 |
|
3290 |
result.append(serializer.LoadJson(data)) |
3291 |
|
3292 |
return result
|
3293 |
|
3294 |
|
3295 |
def AbortImportExport(name): |
3296 |
"""Sends SIGTERM to a running import/export daemon.
|
3297 |
|
3298 |
"""
|
3299 |
logging.info("Abort import/export %s", name)
|
3300 |
|
3301 |
status_dir = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name) |
3302 |
pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE)) |
3303 |
|
3304 |
if pid:
|
3305 |
logging.info("Import/export %s is running with PID %s, sending SIGTERM",
|
3306 |
name, pid) |
3307 |
utils.IgnoreProcessNotFound(os.kill, pid, signal.SIGTERM) |
3308 |
|
3309 |
|
3310 |
def CleanupImportExport(name): |
3311 |
"""Cleanup after an import or export.
|
3312 |
|
3313 |
If the import/export daemon is still running it's killed. Afterwards the
|
3314 |
whole status directory is removed.
|
3315 |
|
3316 |
"""
|
3317 |
logging.info("Finalizing import/export %s", name)
|
3318 |
|
3319 |
status_dir = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name) |
3320 |
|
3321 |
pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE)) |
3322 |
|
3323 |
if pid:
|
3324 |
logging.info("Import/export %s is still running with PID %s",
|
3325 |
name, pid) |
3326 |
utils.KillProcess(pid, waitpid=False)
|
3327 |
|
3328 |
shutil.rmtree(status_dir, ignore_errors=True)
|
3329 |
|
3330 |
|
3331 |
def _FindDisks(nodes_ip, disks): |
3332 |
"""Sets the physical ID on disks and returns the block devices.
|
3333 |
|
3334 |
"""
|
3335 |
# set the correct physical ID
|
3336 |
my_name = netutils.Hostname.GetSysName() |
3337 |
for cf in disks: |
3338 |
cf.SetPhysicalID(my_name, nodes_ip) |
3339 |
|
3340 |
bdevs = [] |
3341 |
|
3342 |
for cf in disks: |
3343 |
rd = _RecursiveFindBD(cf) |
3344 |
if rd is None: |
3345 |
_Fail("Can't find device %s", cf)
|
3346 |
bdevs.append(rd) |
3347 |
return bdevs
|
3348 |
|
3349 |
|
3350 |
def DrbdDisconnectNet(nodes_ip, disks): |
3351 |
"""Disconnects the network on a list of drbd devices.
|
3352 |
|
3353 |
"""
|
3354 |
bdevs = _FindDisks(nodes_ip, disks) |
3355 |
|
3356 |
# disconnect disks
|
3357 |
for rd in bdevs: |
3358 |
try:
|
3359 |
rd.DisconnectNet() |
3360 |
except errors.BlockDeviceError, err:
|
3361 |
_Fail("Can't change network configuration to standalone mode: %s",
|
3362 |
err, exc=True)
|
3363 |
|
3364 |
|
3365 |
def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster): |
3366 |
"""Attaches the network on a list of drbd devices.
|
3367 |
|
3368 |
"""
|
3369 |
bdevs = _FindDisks(nodes_ip, disks) |
3370 |
|
3371 |
if multimaster:
|
3372 |
for idx, rd in enumerate(bdevs): |
3373 |
try:
|
3374 |
_SymlinkBlockDev(instance_name, rd.dev_path, idx) |
3375 |
except EnvironmentError, err: |
3376 |
_Fail("Can't create symlink: %s", err)
|
3377 |
# reconnect disks, switch to new master configuration and if
|
3378 |
# needed primary mode
|
3379 |
for rd in bdevs: |
3380 |
try:
|
3381 |
rd.AttachNet(multimaster) |
3382 |
except errors.BlockDeviceError, err:
|
3383 |
_Fail("Can't change network configuration: %s", err)
|
3384 |
|
3385 |
# wait until the disks are connected; we need to retry the re-attach
|
3386 |
# if the device becomes standalone, as this might happen if the one
|
3387 |
# node disconnects and reconnects in a different mode before the
|
3388 |
# other node reconnects; in this case, one or both of the nodes will
|
3389 |
# decide it has wrong configuration and switch to standalone
|
3390 |
|
3391 |
def _Attach(): |
3392 |
all_connected = True
|
3393 |
|
3394 |
for rd in bdevs: |
3395 |
stats = rd.GetProcStatus() |
3396 |
|
3397 |
all_connected = (all_connected and
|
3398 |
(stats.is_connected or stats.is_in_resync))
|
3399 |
|
3400 |
if stats.is_standalone:
|
3401 |
# peer had different config info and this node became
|
3402 |
# standalone, even though this should not happen with the
|
3403 |
# new staged way of changing disk configs
|
3404 |
try:
|
3405 |
rd.AttachNet(multimaster) |
3406 |
except errors.BlockDeviceError, err:
|
3407 |
_Fail("Can't change network configuration: %s", err)
|
3408 |
|
3409 |
if not all_connected: |
3410 |
raise utils.RetryAgain()
|
3411 |
|
3412 |
try:
|
3413 |
# Start with a delay of 100 miliseconds and go up to 5 seconds
|
3414 |
utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60) |
3415 |
except utils.RetryTimeout:
|
3416 |
_Fail("Timeout in disk reconnecting")
|
3417 |
|
3418 |
if multimaster:
|
3419 |
# change to primary mode
|
3420 |
for rd in bdevs: |
3421 |
try:
|
3422 |
rd.Open() |
3423 |
except errors.BlockDeviceError, err:
|
3424 |
_Fail("Can't change to primary mode: %s", err)
|
3425 |
|
3426 |
|
3427 |
def DrbdWaitSync(nodes_ip, disks): |
3428 |
"""Wait until DRBDs have synchronized.
|
3429 |
|
3430 |
"""
|
3431 |
def _helper(rd): |
3432 |
stats = rd.GetProcStatus() |
3433 |
if not (stats.is_connected or stats.is_in_resync): |
3434 |
raise utils.RetryAgain()
|
3435 |
return stats
|
3436 |
|
3437 |
bdevs = _FindDisks(nodes_ip, disks) |
3438 |
|
3439 |
min_resync = 100
|
3440 |
alldone = True
|
3441 |
for rd in bdevs: |
3442 |
try:
|
3443 |
# poll each second for 15 seconds
|
3444 |
stats = utils.Retry(_helper, 1, 15, args=[rd]) |
3445 |
except utils.RetryTimeout:
|
3446 |
stats = rd.GetProcStatus() |
3447 |
# last check
|
3448 |
if not (stats.is_connected or stats.is_in_resync): |
3449 |
_Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
|
3450 |
alldone = alldone and (not stats.is_in_resync) |
3451 |
if stats.sync_percent is not None: |
3452 |
min_resync = min(min_resync, stats.sync_percent)
|
3453 |
|
3454 |
return (alldone, min_resync)
|
3455 |
|
3456 |
|
3457 |
def GetDrbdUsermodeHelper(): |
3458 |
"""Returns DRBD usermode helper currently configured.
|
3459 |
|
3460 |
"""
|
3461 |
try:
|
3462 |
return bdev.BaseDRBD.GetUsermodeHelper()
|
3463 |
except errors.BlockDeviceError, err:
|
3464 |
_Fail(str(err))
|
3465 |
|
3466 |
|
3467 |
def PowercycleNode(hypervisor_type): |
3468 |
"""Hard-powercycle the node.
|
3469 |
|
3470 |
Because we need to return first, and schedule the powercycle in the
|
3471 |
background, we won't be able to report failures nicely.
|
3472 |
|
3473 |
"""
|
3474 |
hyper = hypervisor.GetHypervisor(hypervisor_type) |
3475 |
try:
|
3476 |
pid = os.fork() |
3477 |
except OSError: |
3478 |
# if we can't fork, we'll pretend that we're in the child process
|
3479 |
pid = 0
|
3480 |
if pid > 0: |
3481 |
return "Reboot scheduled in 5 seconds" |
3482 |
# ensure the child is running on ram
|
3483 |
try:
|
3484 |
utils.Mlockall() |
3485 |
except Exception: # pylint: disable=W0703 |
3486 |
pass
|
3487 |
time.sleep(5)
|
3488 |
hyper.PowercycleNode() |
3489 |
|
3490 |
|
3491 |
class HooksRunner(object): |
3492 |
"""Hook runner.
|
3493 |
|
3494 |
This class is instantiated on the node side (ganeti-noded) and not
|
3495 |
on the master side.
|
3496 |
|
3497 |
"""
|
3498 |
def __init__(self, hooks_base_dir=None): |
3499 |
"""Constructor for hooks runner.
|
3500 |
|
3501 |
@type hooks_base_dir: str or None
|
3502 |
@param hooks_base_dir: if not None, this overrides the
|
3503 |
L{constants.HOOKS_BASE_DIR} (useful for unittests)
|
3504 |
|
3505 |
"""
|
3506 |
if hooks_base_dir is None: |
3507 |
hooks_base_dir = constants.HOOKS_BASE_DIR |
3508 |
# yeah, _BASE_DIR is not valid for attributes, we use it like a
|
3509 |
# constant
|
3510 |
self._BASE_DIR = hooks_base_dir # pylint: disable=C0103 |
3511 |
|
3512 |
def RunLocalHooks(self, node_list, hpath, phase, env): |
3513 |
"""Check that the hooks will be run only locally and then run them.
|
3514 |
|
3515 |
"""
|
3516 |
assert len(node_list) == 1 |
3517 |
node = node_list[0]
|
3518 |
_, myself = ssconf.GetMasterAndMyself() |
3519 |
assert node == myself
|
3520 |
|
3521 |
results = self.RunHooks(hpath, phase, env)
|
3522 |
|
3523 |
# Return values in the form expected by HooksMaster
|
3524 |
return {node: (None, False, results)} |
3525 |
|
3526 |
def RunHooks(self, hpath, phase, env): |
3527 |
"""Run the scripts in the hooks directory.
|
3528 |
|
3529 |
@type hpath: str
|
3530 |
@param hpath: the path to the hooks directory which
|
3531 |
holds the scripts
|
3532 |
@type phase: str
|
3533 |
@param phase: either L{constants.HOOKS_PHASE_PRE} or
|
3534 |
L{constants.HOOKS_PHASE_POST}
|
3535 |
@type env: dict
|
3536 |
@param env: dictionary with the environment for the hook
|
3537 |
@rtype: list
|
3538 |
@return: list of 3-element tuples:
|
3539 |
- script path
|
3540 |
- script result, either L{constants.HKR_SUCCESS} or
|
3541 |
L{constants.HKR_FAIL}
|
3542 |
- output of the script
|
3543 |
|
3544 |
@raise errors.ProgrammerError: for invalid input
|
3545 |
parameters
|
3546 |
|
3547 |
"""
|
3548 |
if phase == constants.HOOKS_PHASE_PRE:
|
3549 |
suffix = "pre"
|
3550 |
elif phase == constants.HOOKS_PHASE_POST:
|
3551 |
suffix = "post"
|
3552 |
else:
|
3553 |
_Fail("Unknown hooks phase '%s'", phase)
|
3554 |
|
3555 |
subdir = "%s-%s.d" % (hpath, suffix)
|
3556 |
dir_name = utils.PathJoin(self._BASE_DIR, subdir)
|
3557 |
|
3558 |
results = [] |
3559 |
|
3560 |
if not os.path.isdir(dir_name): |
3561 |
# for non-existing/non-dirs, we simply exit instead of logging a
|
3562 |
# warning at every operation
|
3563 |
return results
|
3564 |
|
3565 |
runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
|
3566 |
|
3567 |
for (relname, relstatus, runresult) in runparts_results: |
3568 |
if relstatus == constants.RUNPARTS_SKIP:
|
3569 |
rrval = constants.HKR_SKIP |
3570 |
output = ""
|
3571 |
elif relstatus == constants.RUNPARTS_ERR:
|
3572 |
rrval = constants.HKR_FAIL |
3573 |
output = "Hook script execution error: %s" % runresult
|
3574 |
elif relstatus == constants.RUNPARTS_RUN:
|
3575 |
if runresult.failed:
|
3576 |
rrval = constants.HKR_FAIL |
3577 |
else:
|
3578 |
rrval = constants.HKR_SUCCESS |
3579 |
output = utils.SafeEncode(runresult.output.strip()) |
3580 |
results.append(("%s/%s" % (subdir, relname), rrval, output))
|
3581 |
|
3582 |
return results
|
3583 |
|
3584 |
|
3585 |
class IAllocatorRunner(object): |
3586 |
"""IAllocator runner.
|
3587 |
|
3588 |
This class is instantiated on the node side (ganeti-noded) and not on
|
3589 |
the master side.
|
3590 |
|
3591 |
"""
|
3592 |
@staticmethod
|
3593 |
def Run(name, idata): |
3594 |
"""Run an iallocator script.
|
3595 |
|
3596 |
@type name: str
|
3597 |
@param name: the iallocator script name
|
3598 |
@type idata: str
|
3599 |
@param idata: the allocator input data
|
3600 |
|
3601 |
@rtype: tuple
|
3602 |
@return: two element tuple of:
|
3603 |
- status
|
3604 |
- either error message or stdout of allocator (for success)
|
3605 |
|
3606 |
"""
|
3607 |
alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH, |
3608 |
os.path.isfile) |
3609 |
if alloc_script is None: |
3610 |
_Fail("iallocator module '%s' not found in the search path", name)
|
3611 |
|
3612 |
fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
|
3613 |
try:
|
3614 |
os.write(fd, idata) |
3615 |
os.close(fd) |
3616 |
result = utils.RunCmd([alloc_script, fin_name]) |
3617 |
if result.failed:
|
3618 |
_Fail("iallocator module '%s' failed: %s, output '%s'",
|
3619 |
name, result.fail_reason, result.output) |
3620 |
finally:
|
3621 |
os.unlink(fin_name) |
3622 |
|
3623 |
return result.stdout
|
3624 |
|
3625 |
|
3626 |
class DevCacheManager(object): |
3627 |
"""Simple class for managing a cache of block device information.
|
3628 |
|
3629 |
"""
|
3630 |
_DEV_PREFIX = "/dev/"
|
3631 |
_ROOT_DIR = constants.BDEV_CACHE_DIR |
3632 |
|
3633 |
@classmethod
|
3634 |
def _ConvertPath(cls, dev_path): |
3635 |
"""Converts a /dev/name path to the cache file name.
|
3636 |
|
3637 |
This replaces slashes with underscores and strips the /dev
|
3638 |
prefix. It then returns the full path to the cache file.
|
3639 |
|
3640 |
@type dev_path: str
|
3641 |
@param dev_path: the C{/dev/} path name
|
3642 |
@rtype: str
|
3643 |
@return: the converted path name
|
3644 |
|
3645 |
"""
|
3646 |
if dev_path.startswith(cls._DEV_PREFIX):
|
3647 |
dev_path = dev_path[len(cls._DEV_PREFIX):]
|
3648 |
dev_path = dev_path.replace("/", "_") |
3649 |
fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
|
3650 |
return fpath
|
3651 |
|
3652 |
@classmethod
|
3653 |
def UpdateCache(cls, dev_path, owner, on_primary, iv_name): |
3654 |
"""Updates the cache information for a given device.
|
3655 |
|
3656 |
@type dev_path: str
|
3657 |
@param dev_path: the pathname of the device
|
3658 |
@type owner: str
|
3659 |
@param owner: the owner (instance name) of the device
|
3660 |
@type on_primary: bool
|
3661 |
@param on_primary: whether this is the primary
|
3662 |
node nor not
|
3663 |
@type iv_name: str
|
3664 |
@param iv_name: the instance-visible name of the
|
3665 |
device, as in objects.Disk.iv_name
|
3666 |
|
3667 |
@rtype: None
|
3668 |
|
3669 |
"""
|
3670 |
if dev_path is None: |
3671 |
logging.error("DevCacheManager.UpdateCache got a None dev_path")
|
3672 |
return
|
3673 |
fpath = cls._ConvertPath(dev_path) |
3674 |
if on_primary:
|
3675 |
state = "primary"
|
3676 |
else:
|
3677 |
state = "secondary"
|
3678 |
if iv_name is None: |
3679 |
iv_name = "not_visible"
|
3680 |
fdata = "%s %s %s\n" % (str(owner), state, iv_name) |
3681 |
try:
|
3682 |
utils.WriteFile(fpath, data=fdata) |
3683 |
except EnvironmentError, err: |
3684 |
logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
|
3685 |
|
3686 |
@classmethod
|
3687 |
def RemoveCache(cls, dev_path): |
3688 |
"""Remove data for a dev_path.
|
3689 |
|
3690 |
This is just a wrapper over L{utils.io.RemoveFile} with a converted
|
3691 |
path name and logging.
|
3692 |
|
3693 |
@type dev_path: str
|
3694 |
@param dev_path: the pathname of the device
|
3695 |
|
3696 |
@rtype: None
|
3697 |
|
3698 |
"""
|
3699 |
if dev_path is None: |
3700 |
logging.error("DevCacheManager.RemoveCache got a None dev_path")
|
3701 |
return
|
3702 |
fpath = cls._ConvertPath(dev_path) |
3703 |
try:
|
3704 |
utils.RemoveFile(fpath) |
3705 |
except EnvironmentError, err: |
3706 |
logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
|