root / lib / client / gnt_cluster.py @ ae1a845c
History | View | Annotate | Download (43 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
"""Cluster related commands"""
|
22 |
|
23 |
# pylint: disable-msg=W0401,W0613,W0614,C0103
|
24 |
# W0401: Wildcard import ganeti.cli
|
25 |
# W0613: Unused argument, since all functions follow the same API
|
26 |
# W0614: Unused import %s from wildcard import (since we need cli)
|
27 |
# C0103: Invalid name gnt-cluster
|
28 |
|
29 |
import os.path |
30 |
import time |
31 |
import OpenSSL |
32 |
import itertools |
33 |
|
34 |
from ganeti.cli import * |
35 |
from ganeti import opcodes |
36 |
from ganeti import constants |
37 |
from ganeti import errors |
38 |
from ganeti import utils |
39 |
from ganeti import bootstrap |
40 |
from ganeti import ssh |
41 |
from ganeti import objects |
42 |
from ganeti import uidpool |
43 |
from ganeti import compat |
44 |
from ganeti import netutils |
45 |
|
46 |
|
47 |
ON_OPT = cli_option("--on", default=False, |
48 |
action="store_true", dest="on", |
49 |
help="Recover from an EPO")
|
50 |
|
51 |
GROUPS_OPT = cli_option("--groups", default=False, |
52 |
action="store_true", dest="groups", |
53 |
help="Arguments are node groups instead of nodes")
|
54 |
|
55 |
_EPO_PING_INTERVAL = 30 # 30 seconds between pings |
56 |
_EPO_PING_TIMEOUT = 1 # 1 second |
57 |
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes |
58 |
|
59 |
|
60 |
@UsesRPC
|
61 |
def InitCluster(opts, args): |
62 |
"""Initialize the cluster.
|
63 |
|
64 |
@param opts: the command line options selected by the user
|
65 |
@type args: list
|
66 |
@param args: should contain only one element, the desired
|
67 |
cluster name
|
68 |
@rtype: int
|
69 |
@return: the desired exit code
|
70 |
|
71 |
"""
|
72 |
if not opts.lvm_storage and opts.vg_name: |
73 |
ToStderr("Options --no-lvm-storage and --vg-name conflict.")
|
74 |
return 1 |
75 |
|
76 |
vg_name = opts.vg_name |
77 |
if opts.lvm_storage and not opts.vg_name: |
78 |
vg_name = constants.DEFAULT_VG |
79 |
|
80 |
if not opts.drbd_storage and opts.drbd_helper: |
81 |
ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
|
82 |
return 1 |
83 |
|
84 |
drbd_helper = opts.drbd_helper |
85 |
if opts.drbd_storage and not opts.drbd_helper: |
86 |
drbd_helper = constants.DEFAULT_DRBD_HELPER |
87 |
|
88 |
master_netdev = opts.master_netdev |
89 |
if master_netdev is None: |
90 |
master_netdev = constants.DEFAULT_BRIDGE |
91 |
|
92 |
hvlist = opts.enabled_hypervisors |
93 |
if hvlist is None: |
94 |
hvlist = constants.DEFAULT_ENABLED_HYPERVISOR |
95 |
hvlist = hvlist.split(",")
|
96 |
|
97 |
hvparams = dict(opts.hvparams)
|
98 |
beparams = opts.beparams |
99 |
nicparams = opts.nicparams |
100 |
|
101 |
# prepare beparams dict
|
102 |
beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams) |
103 |
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) |
104 |
|
105 |
# prepare nicparams dict
|
106 |
nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams) |
107 |
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) |
108 |
|
109 |
# prepare ndparams dict
|
110 |
if opts.ndparams is None: |
111 |
ndparams = dict(constants.NDC_DEFAULTS)
|
112 |
else:
|
113 |
ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams) |
114 |
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) |
115 |
|
116 |
# prepare hvparams dict
|
117 |
for hv in constants.HYPER_TYPES: |
118 |
if hv not in hvparams: |
119 |
hvparams[hv] = {} |
120 |
hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv]) |
121 |
utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES) |
122 |
|
123 |
if opts.candidate_pool_size is None: |
124 |
opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT |
125 |
|
126 |
if opts.mac_prefix is None: |
127 |
opts.mac_prefix = constants.DEFAULT_MAC_PREFIX |
128 |
|
129 |
uid_pool = opts.uid_pool |
130 |
if uid_pool is not None: |
131 |
uid_pool = uidpool.ParseUidPool(uid_pool) |
132 |
|
133 |
if opts.prealloc_wipe_disks is None: |
134 |
opts.prealloc_wipe_disks = False
|
135 |
|
136 |
try:
|
137 |
primary_ip_version = int(opts.primary_ip_version)
|
138 |
except (ValueError, TypeError), err: |
139 |
ToStderr("Invalid primary ip version value: %s" % str(err)) |
140 |
return 1 |
141 |
|
142 |
bootstrap.InitCluster(cluster_name=args[0],
|
143 |
secondary_ip=opts.secondary_ip, |
144 |
vg_name=vg_name, |
145 |
mac_prefix=opts.mac_prefix, |
146 |
master_netdev=master_netdev, |
147 |
file_storage_dir=opts.file_storage_dir, |
148 |
shared_file_storage_dir=opts.shared_file_storage_dir, |
149 |
enabled_hypervisors=hvlist, |
150 |
hvparams=hvparams, |
151 |
beparams=beparams, |
152 |
nicparams=nicparams, |
153 |
ndparams=ndparams, |
154 |
candidate_pool_size=opts.candidate_pool_size, |
155 |
modify_etc_hosts=opts.modify_etc_hosts, |
156 |
modify_ssh_setup=opts.modify_ssh_setup, |
157 |
maintain_node_health=opts.maintain_node_health, |
158 |
drbd_helper=drbd_helper, |
159 |
uid_pool=uid_pool, |
160 |
default_iallocator=opts.default_iallocator, |
161 |
primary_ip_version=primary_ip_version, |
162 |
prealloc_wipe_disks=opts.prealloc_wipe_disks, |
163 |
) |
164 |
op = opcodes.OpClusterPostInit() |
165 |
SubmitOpCode(op, opts=opts) |
166 |
return 0 |
167 |
|
168 |
|
169 |
@UsesRPC
|
170 |
def DestroyCluster(opts, args): |
171 |
"""Destroy the cluster.
|
172 |
|
173 |
@param opts: the command line options selected by the user
|
174 |
@type args: list
|
175 |
@param args: should be an empty list
|
176 |
@rtype: int
|
177 |
@return: the desired exit code
|
178 |
|
179 |
"""
|
180 |
if not opts.yes_do_it: |
181 |
ToStderr("Destroying a cluster is irreversible. If you really want"
|
182 |
" destroy this cluster, supply the --yes-do-it option.")
|
183 |
return 1 |
184 |
|
185 |
op = opcodes.OpClusterDestroy() |
186 |
master = SubmitOpCode(op, opts=opts) |
187 |
# if we reached this, the opcode didn't fail; we can proceed to
|
188 |
# shutdown all the daemons
|
189 |
bootstrap.FinalizeClusterDestroy(master) |
190 |
return 0 |
191 |
|
192 |
|
193 |
def RenameCluster(opts, args): |
194 |
"""Rename the cluster.
|
195 |
|
196 |
@param opts: the command line options selected by the user
|
197 |
@type args: list
|
198 |
@param args: should contain only one element, the new cluster name
|
199 |
@rtype: int
|
200 |
@return: the desired exit code
|
201 |
|
202 |
"""
|
203 |
cl = GetClient() |
204 |
|
205 |
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
|
206 |
|
207 |
new_name = args[0]
|
208 |
if not opts.force: |
209 |
usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
|
210 |
" connected over the network to the cluster name, the"
|
211 |
" operation is very dangerous as the IP address will be"
|
212 |
" removed from the node and the change may not go through."
|
213 |
" Continue?") % (cluster_name, new_name)
|
214 |
if not AskUser(usertext): |
215 |
return 1 |
216 |
|
217 |
op = opcodes.OpClusterRename(name=new_name) |
218 |
result = SubmitOpCode(op, opts=opts, cl=cl) |
219 |
|
220 |
if result:
|
221 |
ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
|
222 |
|
223 |
return 0 |
224 |
|
225 |
|
226 |
def RedistributeConfig(opts, args): |
227 |
"""Forces push of the cluster configuration.
|
228 |
|
229 |
@param opts: the command line options selected by the user
|
230 |
@type args: list
|
231 |
@param args: empty list
|
232 |
@rtype: int
|
233 |
@return: the desired exit code
|
234 |
|
235 |
"""
|
236 |
op = opcodes.OpClusterRedistConf() |
237 |
SubmitOrSend(op, opts) |
238 |
return 0 |
239 |
|
240 |
|
241 |
def ShowClusterVersion(opts, args): |
242 |
"""Write version of ganeti software to the standard output.
|
243 |
|
244 |
@param opts: the command line options selected by the user
|
245 |
@type args: list
|
246 |
@param args: should be an empty list
|
247 |
@rtype: int
|
248 |
@return: the desired exit code
|
249 |
|
250 |
"""
|
251 |
cl = GetClient() |
252 |
result = cl.QueryClusterInfo() |
253 |
ToStdout("Software version: %s", result["software_version"]) |
254 |
ToStdout("Internode protocol: %s", result["protocol_version"]) |
255 |
ToStdout("Configuration format: %s", result["config_version"]) |
256 |
ToStdout("OS api version: %s", result["os_api_version"]) |
257 |
ToStdout("Export interface: %s", result["export_version"]) |
258 |
return 0 |
259 |
|
260 |
|
261 |
def ShowClusterMaster(opts, args): |
262 |
"""Write name of master node to the standard output.
|
263 |
|
264 |
@param opts: the command line options selected by the user
|
265 |
@type args: list
|
266 |
@param args: should be an empty list
|
267 |
@rtype: int
|
268 |
@return: the desired exit code
|
269 |
|
270 |
"""
|
271 |
master = bootstrap.GetMaster() |
272 |
ToStdout(master) |
273 |
return 0 |
274 |
|
275 |
|
276 |
def _PrintGroupedParams(paramsdict, level=1, roman=False): |
277 |
"""Print Grouped parameters (be, nic, disk) by group.
|
278 |
|
279 |
@type paramsdict: dict of dicts
|
280 |
@param paramsdict: {group: {param: value, ...}, ...}
|
281 |
@type level: int
|
282 |
@param level: Level of indention
|
283 |
|
284 |
"""
|
285 |
indent = " " * level
|
286 |
for item, val in sorted(paramsdict.items()): |
287 |
if isinstance(val, dict): |
288 |
ToStdout("%s- %s:", indent, item)
|
289 |
_PrintGroupedParams(val, level=level + 1, roman=roman)
|
290 |
elif roman and isinstance(val, int): |
291 |
ToStdout("%s %s: %s", indent, item, compat.TryToRoman(val))
|
292 |
else:
|
293 |
ToStdout("%s %s: %s", indent, item, val)
|
294 |
|
295 |
|
296 |
def ShowClusterConfig(opts, args): |
297 |
"""Shows cluster information.
|
298 |
|
299 |
@param opts: the command line options selected by the user
|
300 |
@type args: list
|
301 |
@param args: should be an empty list
|
302 |
@rtype: int
|
303 |
@return: the desired exit code
|
304 |
|
305 |
"""
|
306 |
cl = GetClient() |
307 |
result = cl.QueryClusterInfo() |
308 |
|
309 |
ToStdout("Cluster name: %s", result["name"]) |
310 |
ToStdout("Cluster UUID: %s", result["uuid"]) |
311 |
|
312 |
ToStdout("Creation time: %s", utils.FormatTime(result["ctime"])) |
313 |
ToStdout("Modification time: %s", utils.FormatTime(result["mtime"])) |
314 |
|
315 |
ToStdout("Master node: %s", result["master"]) |
316 |
|
317 |
ToStdout("Architecture (this node): %s (%s)",
|
318 |
result["architecture"][0], result["architecture"][1]) |
319 |
|
320 |
if result["tags"]: |
321 |
tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
|
322 |
else:
|
323 |
tags = "(none)"
|
324 |
|
325 |
ToStdout("Tags: %s", tags)
|
326 |
|
327 |
ToStdout("Default hypervisor: %s", result["default_hypervisor"]) |
328 |
ToStdout("Enabled hypervisors: %s",
|
329 |
utils.CommaJoin(result["enabled_hypervisors"]))
|
330 |
|
331 |
ToStdout("Hypervisor parameters:")
|
332 |
_PrintGroupedParams(result["hvparams"])
|
333 |
|
334 |
ToStdout("OS-specific hypervisor parameters:")
|
335 |
_PrintGroupedParams(result["os_hvp"])
|
336 |
|
337 |
ToStdout("OS parameters:")
|
338 |
_PrintGroupedParams(result["osparams"])
|
339 |
|
340 |
ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"])) |
341 |
ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"])) |
342 |
|
343 |
ToStdout("Cluster parameters:")
|
344 |
ToStdout(" - candidate pool size: %s",
|
345 |
compat.TryToRoman(result["candidate_pool_size"],
|
346 |
convert=opts.roman_integers)) |
347 |
ToStdout(" - master netdev: %s", result["master_netdev"]) |
348 |
ToStdout(" - lvm volume group: %s", result["volume_group_name"]) |
349 |
if result["reserved_lvs"]: |
350 |
reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
|
351 |
else:
|
352 |
reserved_lvs = "(none)"
|
353 |
ToStdout(" - lvm reserved volumes: %s", reserved_lvs)
|
354 |
ToStdout(" - drbd usermode helper: %s", result["drbd_usermode_helper"]) |
355 |
ToStdout(" - file storage path: %s", result["file_storage_dir"]) |
356 |
ToStdout(" - shared file storage path: %s",
|
357 |
result["shared_file_storage_dir"])
|
358 |
ToStdout(" - maintenance of node health: %s",
|
359 |
result["maintain_node_health"])
|
360 |
ToStdout(" - uid pool: %s",
|
361 |
uidpool.FormatUidPool(result["uid_pool"],
|
362 |
roman=opts.roman_integers)) |
363 |
ToStdout(" - default instance allocator: %s", result["default_iallocator"]) |
364 |
ToStdout(" - primary ip version: %d", result["primary_ip_version"]) |
365 |
ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"]) |
366 |
|
367 |
ToStdout("Default node parameters:")
|
368 |
_PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
|
369 |
|
370 |
ToStdout("Default instance parameters:")
|
371 |
_PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
|
372 |
|
373 |
ToStdout("Default nic parameters:")
|
374 |
_PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
|
375 |
|
376 |
return 0 |
377 |
|
378 |
|
379 |
def ClusterCopyFile(opts, args): |
380 |
"""Copy a file from master to some nodes.
|
381 |
|
382 |
@param opts: the command line options selected by the user
|
383 |
@type args: list
|
384 |
@param args: should contain only one element, the path of
|
385 |
the file to be copied
|
386 |
@rtype: int
|
387 |
@return: the desired exit code
|
388 |
|
389 |
"""
|
390 |
filename = args[0]
|
391 |
if not os.path.exists(filename): |
392 |
raise errors.OpPrereqError("No such filename '%s'" % filename, |
393 |
errors.ECODE_INVAL) |
394 |
|
395 |
cl = GetClient() |
396 |
|
397 |
cluster_name = cl.QueryConfigValues(["cluster_name"])[0] |
398 |
|
399 |
results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
|
400 |
secondary_ips=opts.use_replication_network, |
401 |
nodegroup=opts.nodegroup) |
402 |
|
403 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
404 |
for node in results: |
405 |
if not srun.CopyFileToNode(node, filename): |
406 |
ToStderr("Copy of file %s to node %s failed", filename, node)
|
407 |
|
408 |
return 0 |
409 |
|
410 |
|
411 |
def RunClusterCommand(opts, args): |
412 |
"""Run a command on some nodes.
|
413 |
|
414 |
@param opts: the command line options selected by the user
|
415 |
@type args: list
|
416 |
@param args: should contain the command to be run and its arguments
|
417 |
@rtype: int
|
418 |
@return: the desired exit code
|
419 |
|
420 |
"""
|
421 |
cl = GetClient() |
422 |
|
423 |
command = " ".join(args)
|
424 |
|
425 |
nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup) |
426 |
|
427 |
cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
|
428 |
"master_node"])
|
429 |
|
430 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
431 |
|
432 |
# Make sure master node is at list end
|
433 |
if master_node in nodes: |
434 |
nodes.remove(master_node) |
435 |
nodes.append(master_node) |
436 |
|
437 |
for name in nodes: |
438 |
result = srun.Run(name, "root", command)
|
439 |
ToStdout("------------------------------------------------")
|
440 |
ToStdout("node: %s", name)
|
441 |
ToStdout("%s", result.output)
|
442 |
ToStdout("return code = %s", result.exit_code)
|
443 |
|
444 |
return 0 |
445 |
|
446 |
|
447 |
def VerifyCluster(opts, args): |
448 |
"""Verify integrity of cluster, performing various test on nodes.
|
449 |
|
450 |
@param opts: the command line options selected by the user
|
451 |
@type args: list
|
452 |
@param args: should be an empty list
|
453 |
@rtype: int
|
454 |
@return: the desired exit code
|
455 |
|
456 |
"""
|
457 |
simulate = opts.simulate_errors |
458 |
skip_checks = [] |
459 |
|
460 |
if opts.nodegroup is None: |
461 |
# Verify cluster config.
|
462 |
op = opcodes.OpClusterVerifyConfig(verbose=opts.verbose, |
463 |
error_codes=opts.error_codes, |
464 |
debug_simulate_errors=simulate) |
465 |
|
466 |
success, all_groups = SubmitOpCode(op, opts=opts) |
467 |
else:
|
468 |
success = True
|
469 |
all_groups = [opts.nodegroup] |
470 |
|
471 |
if opts.skip_nplusone_mem:
|
472 |
skip_checks.append(constants.VERIFY_NPLUSONE_MEM) |
473 |
|
474 |
jex = JobExecutor(opts=opts, verbose=False)
|
475 |
|
476 |
for group in all_groups: |
477 |
op = opcodes.OpClusterVerifyGroup(group_name=group, |
478 |
skip_checks=skip_checks, |
479 |
verbose=opts.verbose, |
480 |
error_codes=opts.error_codes, |
481 |
debug_simulate_errors=simulate) |
482 |
jex.QueueJob("group " + group, op)
|
483 |
|
484 |
results = jex.GetResults() |
485 |
success &= compat.all(r[1][0] for r in results) |
486 |
|
487 |
if success:
|
488 |
return constants.EXIT_SUCCESS
|
489 |
else:
|
490 |
return constants.EXIT_FAILURE
|
491 |
|
492 |
|
493 |
def VerifyDisks(opts, args): |
494 |
"""Verify integrity of cluster disks.
|
495 |
|
496 |
@param opts: the command line options selected by the user
|
497 |
@type args: list
|
498 |
@param args: should be an empty list
|
499 |
@rtype: int
|
500 |
@return: the desired exit code
|
501 |
|
502 |
"""
|
503 |
cl = GetClient() |
504 |
|
505 |
op = opcodes.OpClusterVerifyDisks() |
506 |
|
507 |
result = SubmitOpCode(op, cl=cl, opts=opts) |
508 |
|
509 |
# Keep track of submitted jobs
|
510 |
jex = JobExecutor(cl=cl, opts=opts) |
511 |
|
512 |
for (status, job_id) in result[constants.JOB_IDS_KEY]: |
513 |
jex.AddJobId(None, status, job_id)
|
514 |
|
515 |
retcode = constants.EXIT_SUCCESS |
516 |
|
517 |
for (status, result) in jex.GetResults(): |
518 |
if not status: |
519 |
ToStdout("Job failed: %s", result)
|
520 |
continue
|
521 |
|
522 |
((bad_nodes, instances, missing), ) = result |
523 |
|
524 |
for node, text in bad_nodes.items(): |
525 |
ToStdout("Error gathering data on node %s: %s",
|
526 |
node, utils.SafeEncode(text[-400:]))
|
527 |
retcode = constants.EXIT_FAILURE |
528 |
ToStdout("You need to fix these nodes first before fixing instances")
|
529 |
|
530 |
for iname in instances: |
531 |
if iname in missing: |
532 |
continue
|
533 |
op = opcodes.OpInstanceActivateDisks(instance_name=iname) |
534 |
try:
|
535 |
ToStdout("Activating disks for instance '%s'", iname)
|
536 |
SubmitOpCode(op, opts=opts, cl=cl) |
537 |
except errors.GenericError, err:
|
538 |
nret, msg = FormatError(err) |
539 |
retcode |= nret |
540 |
ToStderr("Error activating disks for instance %s: %s", iname, msg)
|
541 |
|
542 |
if missing:
|
543 |
for iname, ival in missing.iteritems(): |
544 |
all_missing = compat.all(x[0] in bad_nodes for x in ival) |
545 |
if all_missing:
|
546 |
ToStdout("Instance %s cannot be verified as it lives on"
|
547 |
" broken nodes", iname)
|
548 |
else:
|
549 |
ToStdout("Instance %s has missing logical volumes:", iname)
|
550 |
ival.sort() |
551 |
for node, vol in ival: |
552 |
if node in bad_nodes: |
553 |
ToStdout("\tbroken node %s /dev/%s", node, vol)
|
554 |
else:
|
555 |
ToStdout("\t%s /dev/%s", node, vol)
|
556 |
|
557 |
ToStdout("You need to replace or recreate disks for all the above"
|
558 |
" instances if this message persists after fixing broken nodes.")
|
559 |
retcode = constants.EXIT_FAILURE |
560 |
|
561 |
return retcode
|
562 |
|
563 |
|
564 |
def RepairDiskSizes(opts, args): |
565 |
"""Verify sizes of cluster disks.
|
566 |
|
567 |
@param opts: the command line options selected by the user
|
568 |
@type args: list
|
569 |
@param args: optional list of instances to restrict check to
|
570 |
@rtype: int
|
571 |
@return: the desired exit code
|
572 |
|
573 |
"""
|
574 |
op = opcodes.OpClusterRepairDiskSizes(instances=args) |
575 |
SubmitOpCode(op, opts=opts) |
576 |
|
577 |
|
578 |
@UsesRPC
|
579 |
def MasterFailover(opts, args): |
580 |
"""Failover the master node.
|
581 |
|
582 |
This command, when run on a non-master node, will cause the current
|
583 |
master to cease being master, and the non-master to become new
|
584 |
master.
|
585 |
|
586 |
@param opts: the command line options selected by the user
|
587 |
@type args: list
|
588 |
@param args: should be an empty list
|
589 |
@rtype: int
|
590 |
@return: the desired exit code
|
591 |
|
592 |
"""
|
593 |
if opts.no_voting:
|
594 |
usertext = ("This will perform the failover even if most other nodes"
|
595 |
" are down, or if this node is outdated. This is dangerous"
|
596 |
" as it can lead to a non-consistent cluster. Check the"
|
597 |
" gnt-cluster(8) man page before proceeding. Continue?")
|
598 |
if not AskUser(usertext): |
599 |
return 1 |
600 |
|
601 |
return bootstrap.MasterFailover(no_voting=opts.no_voting)
|
602 |
|
603 |
|
604 |
def MasterPing(opts, args): |
605 |
"""Checks if the master is alive.
|
606 |
|
607 |
@param opts: the command line options selected by the user
|
608 |
@type args: list
|
609 |
@param args: should be an empty list
|
610 |
@rtype: int
|
611 |
@return: the desired exit code
|
612 |
|
613 |
"""
|
614 |
try:
|
615 |
cl = GetClient() |
616 |
cl.QueryClusterInfo() |
617 |
return 0 |
618 |
except Exception: # pylint: disable-msg=W0703 |
619 |
return 1 |
620 |
|
621 |
|
622 |
def SearchTags(opts, args): |
623 |
"""Searches the tags on all the cluster.
|
624 |
|
625 |
@param opts: the command line options selected by the user
|
626 |
@type args: list
|
627 |
@param args: should contain only one element, the tag pattern
|
628 |
@rtype: int
|
629 |
@return: the desired exit code
|
630 |
|
631 |
"""
|
632 |
op = opcodes.OpTagsSearch(pattern=args[0])
|
633 |
result = SubmitOpCode(op, opts=opts) |
634 |
if not result: |
635 |
return 1 |
636 |
result = list(result)
|
637 |
result.sort() |
638 |
for path, tag in result: |
639 |
ToStdout("%s %s", path, tag)
|
640 |
|
641 |
|
642 |
def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename, |
643 |
new_confd_hmac_key, new_cds, cds_filename, |
644 |
force): |
645 |
"""Renews cluster certificates, keys and secrets.
|
646 |
|
647 |
@type new_cluster_cert: bool
|
648 |
@param new_cluster_cert: Whether to generate a new cluster certificate
|
649 |
@type new_rapi_cert: bool
|
650 |
@param new_rapi_cert: Whether to generate a new RAPI certificate
|
651 |
@type rapi_cert_filename: string
|
652 |
@param rapi_cert_filename: Path to file containing new RAPI certificate
|
653 |
@type new_confd_hmac_key: bool
|
654 |
@param new_confd_hmac_key: Whether to generate a new HMAC key
|
655 |
@type new_cds: bool
|
656 |
@param new_cds: Whether to generate a new cluster domain secret
|
657 |
@type cds_filename: string
|
658 |
@param cds_filename: Path to file containing new cluster domain secret
|
659 |
@type force: bool
|
660 |
@param force: Whether to ask user for confirmation
|
661 |
|
662 |
"""
|
663 |
if new_rapi_cert and rapi_cert_filename: |
664 |
ToStderr("Only one of the --new-rapi-certficate and --rapi-certificate"
|
665 |
" options can be specified at the same time.")
|
666 |
return 1 |
667 |
|
668 |
if new_cds and cds_filename: |
669 |
ToStderr("Only one of the --new-cluster-domain-secret and"
|
670 |
" --cluster-domain-secret options can be specified at"
|
671 |
" the same time.")
|
672 |
return 1 |
673 |
|
674 |
if rapi_cert_filename:
|
675 |
# Read and verify new certificate
|
676 |
try:
|
677 |
rapi_cert_pem = utils.ReadFile(rapi_cert_filename) |
678 |
|
679 |
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, |
680 |
rapi_cert_pem) |
681 |
except Exception, err: # pylint: disable-msg=W0703 |
682 |
ToStderr("Can't load new RAPI certificate from %s: %s" %
|
683 |
(rapi_cert_filename, str(err)))
|
684 |
return 1 |
685 |
|
686 |
try:
|
687 |
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem) |
688 |
except Exception, err: # pylint: disable-msg=W0703 |
689 |
ToStderr("Can't load new RAPI private key from %s: %s" %
|
690 |
(rapi_cert_filename, str(err)))
|
691 |
return 1 |
692 |
|
693 |
else:
|
694 |
rapi_cert_pem = None
|
695 |
|
696 |
if cds_filename:
|
697 |
try:
|
698 |
cds = utils.ReadFile(cds_filename) |
699 |
except Exception, err: # pylint: disable-msg=W0703 |
700 |
ToStderr("Can't load new cluster domain secret from %s: %s" %
|
701 |
(cds_filename, str(err)))
|
702 |
return 1 |
703 |
else:
|
704 |
cds = None
|
705 |
|
706 |
if not force: |
707 |
usertext = ("This requires all daemons on all nodes to be restarted and"
|
708 |
" may take some time. Continue?")
|
709 |
if not AskUser(usertext): |
710 |
return 1 |
711 |
|
712 |
def _RenewCryptoInner(ctx): |
713 |
ctx.feedback_fn("Updating certificates and keys")
|
714 |
bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, |
715 |
new_confd_hmac_key, |
716 |
new_cds, |
717 |
rapi_cert_pem=rapi_cert_pem, |
718 |
cds=cds) |
719 |
|
720 |
files_to_copy = [] |
721 |
|
722 |
if new_cluster_cert:
|
723 |
files_to_copy.append(constants.NODED_CERT_FILE) |
724 |
|
725 |
if new_rapi_cert or rapi_cert_pem: |
726 |
files_to_copy.append(constants.RAPI_CERT_FILE) |
727 |
|
728 |
if new_confd_hmac_key:
|
729 |
files_to_copy.append(constants.CONFD_HMAC_KEY) |
730 |
|
731 |
if new_cds or cds: |
732 |
files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE) |
733 |
|
734 |
if files_to_copy:
|
735 |
for node_name in ctx.nonmaster_nodes: |
736 |
ctx.feedback_fn("Copying %s to %s" %
|
737 |
(", ".join(files_to_copy), node_name))
|
738 |
for file_name in files_to_copy: |
739 |
ctx.ssh.CopyFileToNode(node_name, file_name) |
740 |
|
741 |
RunWhileClusterStopped(ToStdout, _RenewCryptoInner) |
742 |
|
743 |
ToStdout("All requested certificates and keys have been replaced."
|
744 |
" Running \"gnt-cluster verify\" now is recommended.")
|
745 |
|
746 |
return 0 |
747 |
|
748 |
|
749 |
def RenewCrypto(opts, args): |
750 |
"""Renews cluster certificates, keys and secrets.
|
751 |
|
752 |
"""
|
753 |
return _RenewCrypto(opts.new_cluster_cert,
|
754 |
opts.new_rapi_cert, |
755 |
opts.rapi_cert, |
756 |
opts.new_confd_hmac_key, |
757 |
opts.new_cluster_domain_secret, |
758 |
opts.cluster_domain_secret, |
759 |
opts.force) |
760 |
|
761 |
|
762 |
def SetClusterParams(opts, args): |
763 |
"""Modify the cluster.
|
764 |
|
765 |
@param opts: the command line options selected by the user
|
766 |
@type args: list
|
767 |
@param args: should be an empty list
|
768 |
@rtype: int
|
769 |
@return: the desired exit code
|
770 |
|
771 |
"""
|
772 |
if not (not opts.lvm_storage or opts.vg_name or |
773 |
not opts.drbd_storage or opts.drbd_helper or |
774 |
opts.enabled_hypervisors or opts.hvparams or |
775 |
opts.beparams or opts.nicparams or opts.ndparams or |
776 |
opts.candidate_pool_size is not None or |
777 |
opts.uid_pool is not None or |
778 |
opts.maintain_node_health is not None or |
779 |
opts.add_uids is not None or |
780 |
opts.remove_uids is not None or |
781 |
opts.default_iallocator is not None or |
782 |
opts.reserved_lvs is not None or |
783 |
opts.master_netdev is not None or |
784 |
opts.prealloc_wipe_disks is not None): |
785 |
ToStderr("Please give at least one of the parameters.")
|
786 |
return 1 |
787 |
|
788 |
vg_name = opts.vg_name |
789 |
if not opts.lvm_storage and opts.vg_name: |
790 |
ToStderr("Options --no-lvm-storage and --vg-name conflict.")
|
791 |
return 1 |
792 |
|
793 |
if not opts.lvm_storage: |
794 |
vg_name = ""
|
795 |
|
796 |
drbd_helper = opts.drbd_helper |
797 |
if not opts.drbd_storage and opts.drbd_helper: |
798 |
ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
|
799 |
return 1 |
800 |
|
801 |
if not opts.drbd_storage: |
802 |
drbd_helper = ""
|
803 |
|
804 |
hvlist = opts.enabled_hypervisors |
805 |
if hvlist is not None: |
806 |
hvlist = hvlist.split(",")
|
807 |
|
808 |
# a list of (name, dict) we can pass directly to dict() (or [])
|
809 |
hvparams = dict(opts.hvparams)
|
810 |
for hv_params in hvparams.values(): |
811 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
812 |
|
813 |
beparams = opts.beparams |
814 |
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) |
815 |
|
816 |
nicparams = opts.nicparams |
817 |
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) |
818 |
|
819 |
ndparams = opts.ndparams |
820 |
if ndparams is not None: |
821 |
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) |
822 |
|
823 |
mnh = opts.maintain_node_health |
824 |
|
825 |
uid_pool = opts.uid_pool |
826 |
if uid_pool is not None: |
827 |
uid_pool = uidpool.ParseUidPool(uid_pool) |
828 |
|
829 |
add_uids = opts.add_uids |
830 |
if add_uids is not None: |
831 |
add_uids = uidpool.ParseUidPool(add_uids) |
832 |
|
833 |
remove_uids = opts.remove_uids |
834 |
if remove_uids is not None: |
835 |
remove_uids = uidpool.ParseUidPool(remove_uids) |
836 |
|
837 |
if opts.reserved_lvs is not None: |
838 |
if opts.reserved_lvs == "": |
839 |
opts.reserved_lvs = [] |
840 |
else:
|
841 |
opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
|
842 |
|
843 |
op = opcodes.OpClusterSetParams(vg_name=vg_name, |
844 |
drbd_helper=drbd_helper, |
845 |
enabled_hypervisors=hvlist, |
846 |
hvparams=hvparams, |
847 |
os_hvp=None,
|
848 |
beparams=beparams, |
849 |
nicparams=nicparams, |
850 |
ndparams=ndparams, |
851 |
candidate_pool_size=opts.candidate_pool_size, |
852 |
maintain_node_health=mnh, |
853 |
uid_pool=uid_pool, |
854 |
add_uids=add_uids, |
855 |
remove_uids=remove_uids, |
856 |
default_iallocator=opts.default_iallocator, |
857 |
prealloc_wipe_disks=opts.prealloc_wipe_disks, |
858 |
master_netdev=opts.master_netdev, |
859 |
reserved_lvs=opts.reserved_lvs) |
860 |
SubmitOpCode(op, opts=opts) |
861 |
return 0 |
862 |
|
863 |
|
864 |
def QueueOps(opts, args): |
865 |
"""Queue operations.
|
866 |
|
867 |
@param opts: the command line options selected by the user
|
868 |
@type args: list
|
869 |
@param args: should contain only one element, the subcommand
|
870 |
@rtype: int
|
871 |
@return: the desired exit code
|
872 |
|
873 |
"""
|
874 |
command = args[0]
|
875 |
client = GetClient() |
876 |
if command in ("drain", "undrain"): |
877 |
drain_flag = command == "drain"
|
878 |
client.SetQueueDrainFlag(drain_flag) |
879 |
elif command == "info": |
880 |
result = client.QueryConfigValues(["drain_flag"])
|
881 |
if result[0]: |
882 |
val = "set"
|
883 |
else:
|
884 |
val = "unset"
|
885 |
ToStdout("The drain flag is %s" % val)
|
886 |
else:
|
887 |
raise errors.OpPrereqError("Command '%s' is not valid." % command, |
888 |
errors.ECODE_INVAL) |
889 |
|
890 |
return 0 |
891 |
|
892 |
|
893 |
def _ShowWatcherPause(until): |
894 |
if until is None or until < time.time(): |
895 |
ToStdout("The watcher is not paused.")
|
896 |
else:
|
897 |
ToStdout("The watcher is paused until %s.", time.ctime(until))
|
898 |
|
899 |
|
900 |
def WatcherOps(opts, args): |
901 |
"""Watcher operations.
|
902 |
|
903 |
@param opts: the command line options selected by the user
|
904 |
@type args: list
|
905 |
@param args: should contain only one element, the subcommand
|
906 |
@rtype: int
|
907 |
@return: the desired exit code
|
908 |
|
909 |
"""
|
910 |
command = args[0]
|
911 |
client = GetClient() |
912 |
|
913 |
if command == "continue": |
914 |
client.SetWatcherPause(None)
|
915 |
ToStdout("The watcher is no longer paused.")
|
916 |
|
917 |
elif command == "pause": |
918 |
if len(args) < 2: |
919 |
raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL) |
920 |
|
921 |
result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
|
922 |
_ShowWatcherPause(result) |
923 |
|
924 |
elif command == "info": |
925 |
result = client.QueryConfigValues(["watcher_pause"])
|
926 |
_ShowWatcherPause(result[0])
|
927 |
|
928 |
else:
|
929 |
raise errors.OpPrereqError("Command '%s' is not valid." % command, |
930 |
errors.ECODE_INVAL) |
931 |
|
932 |
return 0 |
933 |
|
934 |
|
935 |
def _OobPower(opts, node_list, power): |
936 |
"""Puts the node in the list to desired power state.
|
937 |
|
938 |
@param opts: The command line options selected by the user
|
939 |
@param node_list: The list of nodes to operate on
|
940 |
@param power: True if they should be powered on, False otherwise
|
941 |
@return: The success of the operation (none failed)
|
942 |
|
943 |
"""
|
944 |
if power:
|
945 |
command = constants.OOB_POWER_ON |
946 |
else:
|
947 |
command = constants.OOB_POWER_OFF |
948 |
|
949 |
op = opcodes.OpOobCommand(node_names=node_list, |
950 |
command=command, |
951 |
ignore_status=True,
|
952 |
timeout=opts.oob_timeout, |
953 |
power_delay=opts.power_delay) |
954 |
result = SubmitOpCode(op, opts=opts) |
955 |
errs = 0
|
956 |
for node_result in result: |
957 |
(node_tuple, data_tuple) = node_result |
958 |
(_, node_name) = node_tuple |
959 |
(data_status, _) = data_tuple |
960 |
if data_status != constants.RS_NORMAL:
|
961 |
assert data_status != constants.RS_UNAVAIL
|
962 |
errs += 1
|
963 |
ToStderr("There was a problem changing power for %s, please investigate",
|
964 |
node_name) |
965 |
|
966 |
if errs > 0: |
967 |
return False |
968 |
|
969 |
return True |
970 |
|
971 |
|
972 |
def _InstanceStart(opts, inst_list, start): |
973 |
"""Puts the instances in the list to desired state.
|
974 |
|
975 |
@param opts: The command line options selected by the user
|
976 |
@param inst_list: The list of instances to operate on
|
977 |
@param start: True if they should be started, False for shutdown
|
978 |
@return: The success of the operation (none failed)
|
979 |
|
980 |
"""
|
981 |
if start:
|
982 |
opcls = opcodes.OpInstanceStartup |
983 |
text_submit, text_success, text_failed = ("startup", "started", "starting") |
984 |
else:
|
985 |
opcls = compat.partial(opcodes.OpInstanceShutdown, |
986 |
timeout=opts.shutdown_timeout) |
987 |
text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping") |
988 |
|
989 |
jex = JobExecutor(opts=opts) |
990 |
|
991 |
for inst in inst_list: |
992 |
ToStdout("Submit %s of instance %s", text_submit, inst)
|
993 |
op = opcls(instance_name=inst) |
994 |
jex.QueueJob(inst, op) |
995 |
|
996 |
results = jex.GetResults() |
997 |
bad_cnt = len([1 for (success, _) in results if not success]) |
998 |
|
999 |
if bad_cnt == 0: |
1000 |
ToStdout("All instances have been %s successfully", text_success)
|
1001 |
else:
|
1002 |
ToStderr("There were errors while %s instances:\n"
|
1003 |
"%d error(s) out of %d instance(s)", text_failed, bad_cnt,
|
1004 |
len(results))
|
1005 |
return False |
1006 |
|
1007 |
return True |
1008 |
|
1009 |
|
1010 |
class _RunWhenNodesReachableHelper: |
1011 |
"""Helper class to make shared internal state sharing easier.
|
1012 |
|
1013 |
@ivar success: Indicates if all action_cb calls were successful
|
1014 |
|
1015 |
"""
|
1016 |
def __init__(self, node_list, action_cb, node2ip, port, feedback_fn, |
1017 |
_ping_fn=netutils.TcpPing, _sleep_fn=time.sleep): |
1018 |
"""Init the object.
|
1019 |
|
1020 |
@param node_list: The list of nodes to be reachable
|
1021 |
@param action_cb: Callback called when a new host is reachable
|
1022 |
@type node2ip: dict
|
1023 |
@param node2ip: Node to ip mapping
|
1024 |
@param port: The port to use for the TCP ping
|
1025 |
@param feedback_fn: The function used for feedback
|
1026 |
@param _ping_fn: Function to check reachabilty (for unittest use only)
|
1027 |
@param _sleep_fn: Function to sleep (for unittest use only)
|
1028 |
|
1029 |
"""
|
1030 |
self.down = set(node_list) |
1031 |
self.up = set() |
1032 |
self.node2ip = node2ip
|
1033 |
self.success = True |
1034 |
self.action_cb = action_cb
|
1035 |
self.port = port
|
1036 |
self.feedback_fn = feedback_fn
|
1037 |
self._ping_fn = _ping_fn
|
1038 |
self._sleep_fn = _sleep_fn
|
1039 |
|
1040 |
def __call__(self): |
1041 |
"""When called we run action_cb.
|
1042 |
|
1043 |
@raises utils.RetryAgain: When there are still down nodes
|
1044 |
|
1045 |
"""
|
1046 |
if not self.action_cb(self.up): |
1047 |
self.success = False |
1048 |
|
1049 |
if self.down: |
1050 |
raise utils.RetryAgain()
|
1051 |
else:
|
1052 |
return self.success |
1053 |
|
1054 |
def Wait(self, secs): |
1055 |
"""Checks if a host is up or waits remaining seconds.
|
1056 |
|
1057 |
@param secs: The secs remaining
|
1058 |
|
1059 |
"""
|
1060 |
start = time.time() |
1061 |
for node in self.down: |
1062 |
if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT, |
1063 |
live_port_needed=True):
|
1064 |
self.feedback_fn("Node %s became available" % node) |
1065 |
self.up.add(node)
|
1066 |
self.down -= self.up |
1067 |
# If we have a node available there is the possibility to run the
|
1068 |
# action callback successfully, therefore we don't wait and return
|
1069 |
return
|
1070 |
|
1071 |
self._sleep_fn(max(0.0, start + secs - time.time())) |
1072 |
|
1073 |
|
1074 |
def _RunWhenNodesReachable(node_list, action_cb, interval): |
1075 |
"""Run action_cb when nodes become reachable.
|
1076 |
|
1077 |
@param node_list: The list of nodes to be reachable
|
1078 |
@param action_cb: Callback called when a new host is reachable
|
1079 |
@param interval: The earliest time to retry
|
1080 |
|
1081 |
"""
|
1082 |
client = GetClient() |
1083 |
cluster_info = client.QueryClusterInfo() |
1084 |
if cluster_info["primary_ip_version"] == constants.IP4_VERSION: |
1085 |
family = netutils.IPAddress.family |
1086 |
else:
|
1087 |
family = netutils.IP6Address.family |
1088 |
|
1089 |
node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
|
1090 |
for node in node_list) |
1091 |
|
1092 |
port = netutils.GetDaemonPort(constants.NODED) |
1093 |
helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port, |
1094 |
ToStdout) |
1095 |
|
1096 |
try:
|
1097 |
return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
|
1098 |
wait_fn=helper.Wait) |
1099 |
except utils.RetryTimeout:
|
1100 |
ToStderr("Time exceeded while waiting for nodes to become reachable"
|
1101 |
" again:\n - %s", " - ".join(helper.down)) |
1102 |
return False |
1103 |
|
1104 |
|
1105 |
def _MaybeInstanceStartup(opts, inst_map, nodes_online, |
1106 |
_instance_start_fn=_InstanceStart): |
1107 |
"""Start the instances conditional based on node_states.
|
1108 |
|
1109 |
@param opts: The command line options selected by the user
|
1110 |
@param inst_map: A dict of inst -> nodes mapping
|
1111 |
@param nodes_online: A list of nodes online
|
1112 |
@param _instance_start_fn: Callback to start instances (unittest use only)
|
1113 |
@return: Success of the operation on all instances
|
1114 |
|
1115 |
"""
|
1116 |
start_inst_list = [] |
1117 |
for (inst, nodes) in inst_map.items(): |
1118 |
if not (nodes - nodes_online): |
1119 |
# All nodes the instance lives on are back online
|
1120 |
start_inst_list.append(inst) |
1121 |
|
1122 |
for inst in start_inst_list: |
1123 |
del inst_map[inst]
|
1124 |
|
1125 |
if start_inst_list:
|
1126 |
return _instance_start_fn(opts, start_inst_list, True) |
1127 |
|
1128 |
return True |
1129 |
|
1130 |
|
1131 |
def _EpoOn(opts, full_node_list, node_list, inst_map): |
1132 |
"""Does the actual power on.
|
1133 |
|
1134 |
@param opts: The command line options selected by the user
|
1135 |
@param full_node_list: All nodes to operate on (includes nodes not supporting
|
1136 |
OOB)
|
1137 |
@param node_list: The list of nodes to operate on (all need to support OOB)
|
1138 |
@param inst_map: A dict of inst -> nodes mapping
|
1139 |
@return: The desired exit status
|
1140 |
|
1141 |
"""
|
1142 |
if node_list and not _OobPower(opts, node_list, False): |
1143 |
ToStderr("Not all nodes seem to get back up, investigate and start"
|
1144 |
" manually if needed")
|
1145 |
|
1146 |
# Wait for the nodes to be back up
|
1147 |
action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
|
1148 |
|
1149 |
ToStdout("Waiting until all nodes are available again")
|
1150 |
if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL): |
1151 |
ToStderr("Please investigate and start stopped instances manually")
|
1152 |
return constants.EXIT_FAILURE
|
1153 |
|
1154 |
return constants.EXIT_SUCCESS
|
1155 |
|
1156 |
|
1157 |
def _EpoOff(opts, node_list, inst_map): |
1158 |
"""Does the actual power off.
|
1159 |
|
1160 |
@param opts: The command line options selected by the user
|
1161 |
@param node_list: The list of nodes to operate on (all need to support OOB)
|
1162 |
@param inst_map: A dict of inst -> nodes mapping
|
1163 |
@return: The desired exit status
|
1164 |
|
1165 |
"""
|
1166 |
if not _InstanceStart(opts, inst_map.keys(), False): |
1167 |
ToStderr("Please investigate and stop instances manually before continuing")
|
1168 |
return constants.EXIT_FAILURE
|
1169 |
|
1170 |
if not node_list: |
1171 |
return constants.EXIT_SUCCESS
|
1172 |
|
1173 |
if _OobPower(opts, node_list, False): |
1174 |
return constants.EXIT_SUCCESS
|
1175 |
else:
|
1176 |
return constants.EXIT_FAILURE
|
1177 |
|
1178 |
|
1179 |
def Epo(opts, args): |
1180 |
"""EPO operations.
|
1181 |
|
1182 |
@param opts: the command line options selected by the user
|
1183 |
@type args: list
|
1184 |
@param args: should contain only one element, the subcommand
|
1185 |
@rtype: int
|
1186 |
@return: the desired exit code
|
1187 |
|
1188 |
"""
|
1189 |
if opts.groups and opts.show_all: |
1190 |
ToStderr("Only one of --groups or --all are allowed")
|
1191 |
return constants.EXIT_FAILURE
|
1192 |
elif args and opts.show_all: |
1193 |
ToStderr("Arguments in combination with --all are not allowed")
|
1194 |
return constants.EXIT_FAILURE
|
1195 |
|
1196 |
client = GetClient() |
1197 |
|
1198 |
if opts.groups:
|
1199 |
node_query_list = itertools.chain(*client.QueryGroups(names=args, |
1200 |
fields=["node_list"],
|
1201 |
use_locking=False))
|
1202 |
else:
|
1203 |
node_query_list = args |
1204 |
|
1205 |
result = client.QueryNodes(names=node_query_list, |
1206 |
fields=["name", "master", "pinst_list", |
1207 |
"sinst_list", "powered", "offline"], |
1208 |
use_locking=False)
|
1209 |
node_list = [] |
1210 |
inst_map = {} |
1211 |
for (idx, (node, master, pinsts, sinsts, powered,
|
1212 |
offline)) in enumerate(result): |
1213 |
# Normalize the node_query_list as well
|
1214 |
if not opts.show_all: |
1215 |
node_query_list[idx] = node |
1216 |
if not offline: |
1217 |
for inst in (pinsts + sinsts): |
1218 |
if inst in inst_map: |
1219 |
if not master: |
1220 |
inst_map[inst].add(node) |
1221 |
elif master:
|
1222 |
inst_map[inst] = set()
|
1223 |
else:
|
1224 |
inst_map[inst] = set([node])
|
1225 |
|
1226 |
if master and opts.on: |
1227 |
# We ignore the master for turning on the machines, in fact we are
|
1228 |
# already operating on the master at this point :)
|
1229 |
continue
|
1230 |
elif master and not opts.show_all: |
1231 |
ToStderr("%s is the master node, please do a master-failover to another"
|
1232 |
" node not affected by the EPO or use --all if you intend to"
|
1233 |
" shutdown the whole cluster", node)
|
1234 |
return constants.EXIT_FAILURE
|
1235 |
elif powered is None: |
1236 |
ToStdout("Node %s does not support out-of-band handling, it can not be"
|
1237 |
" handled in a fully automated manner", node)
|
1238 |
elif powered == opts.on:
|
1239 |
ToStdout("Node %s is already in desired power state, skipping", node)
|
1240 |
elif not offline or (offline and powered): |
1241 |
node_list.append(node) |
1242 |
|
1243 |
if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"): |
1244 |
return constants.EXIT_FAILURE
|
1245 |
|
1246 |
if opts.on:
|
1247 |
return _EpoOn(opts, node_query_list, node_list, inst_map)
|
1248 |
else:
|
1249 |
return _EpoOff(opts, node_list, inst_map)
|
1250 |
|
1251 |
|
1252 |
commands = { |
1253 |
"init": (
|
1254 |
InitCluster, [ArgHost(min=1, max=1)], |
1255 |
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT, |
1256 |
HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT, |
1257 |
NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT, |
1258 |
SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT, |
1259 |
UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, |
1260 |
DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT, |
1261 |
NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT], |
1262 |
"[opts...] <cluster_name>", "Initialises a new cluster configuration"), |
1263 |
"destroy": (
|
1264 |
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT], |
1265 |
"", "Destroy cluster"), |
1266 |
"rename": (
|
1267 |
RenameCluster, [ArgHost(min=1, max=1)], |
1268 |
[FORCE_OPT, DRY_RUN_OPT], |
1269 |
"<new_name>",
|
1270 |
"Renames the cluster"),
|
1271 |
"redist-conf": (
|
1272 |
RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], |
1273 |
"", "Forces a push of the configuration file and ssconf files" |
1274 |
" to the nodes in the cluster"),
|
1275 |
"verify": (
|
1276 |
VerifyCluster, ARGS_NONE, |
1277 |
[VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT, |
1278 |
DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT], |
1279 |
"", "Does a check on the cluster configuration"), |
1280 |
"verify-disks": (
|
1281 |
VerifyDisks, ARGS_NONE, [PRIORITY_OPT], |
1282 |
"", "Does a check on the cluster disk status"), |
1283 |
"repair-disk-sizes": (
|
1284 |
RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT], |
1285 |
"", "Updates mismatches in recorded disk sizes"), |
1286 |
"master-failover": (
|
1287 |
MasterFailover, ARGS_NONE, [NOVOTING_OPT], |
1288 |
"", "Makes the current node the master"), |
1289 |
"master-ping": (
|
1290 |
MasterPing, ARGS_NONE, [], |
1291 |
"", "Checks if the master is alive"), |
1292 |
"version": (
|
1293 |
ShowClusterVersion, ARGS_NONE, [], |
1294 |
"", "Shows the cluster version"), |
1295 |
"getmaster": (
|
1296 |
ShowClusterMaster, ARGS_NONE, [], |
1297 |
"", "Shows the cluster master"), |
1298 |
"copyfile": (
|
1299 |
ClusterCopyFile, [ArgFile(min=1, max=1)], |
1300 |
[NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT], |
1301 |
"[-n node...] <filename>", "Copies a file to all (or only some) nodes"), |
1302 |
"command": (
|
1303 |
RunClusterCommand, [ArgCommand(min=1)],
|
1304 |
[NODE_LIST_OPT, NODEGROUP_OPT], |
1305 |
"[-n node...] <command>", "Runs a command on all (or only some) nodes"), |
1306 |
"info": (
|
1307 |
ShowClusterConfig, ARGS_NONE, [ROMAN_OPT], |
1308 |
"[--roman]", "Show cluster configuration"), |
1309 |
"list-tags": (
|
1310 |
ListTags, ARGS_NONE, [], "", "List the tags of the cluster"), |
1311 |
"add-tags": (
|
1312 |
AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], |
1313 |
"tag...", "Add tags to the cluster"), |
1314 |
"remove-tags": (
|
1315 |
RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], |
1316 |
"tag...", "Remove tags from the cluster"), |
1317 |
"search-tags": (
|
1318 |
SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "", |
1319 |
"Searches the tags on all objects on"
|
1320 |
" the cluster for a given pattern (regex)"),
|
1321 |
"queue": (
|
1322 |
QueueOps, |
1323 |
[ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])], |
1324 |
[], "drain|undrain|info", "Change queue properties"), |
1325 |
"watcher": (
|
1326 |
WatcherOps, |
1327 |
[ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]), |
1328 |
ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])], |
1329 |
[], |
1330 |
"{pause <timespec>|continue|info}", "Change watcher properties"), |
1331 |
"modify": (
|
1332 |
SetClusterParams, ARGS_NONE, |
1333 |
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT, |
1334 |
NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT, |
1335 |
UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT, |
1336 |
NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT, |
1337 |
DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT], |
1338 |
"[opts...]",
|
1339 |
"Alters the parameters of the cluster"),
|
1340 |
"renew-crypto": (
|
1341 |
RenewCrypto, ARGS_NONE, |
1342 |
[NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT, |
1343 |
NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT, |
1344 |
NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT], |
1345 |
"[opts...]",
|
1346 |
"Renews cluster certificates, keys and secrets"),
|
1347 |
"epo": (
|
1348 |
Epo, [ArgUnknown()], |
1349 |
[FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT, |
1350 |
SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT], |
1351 |
"[opts...] [args]",
|
1352 |
"Performs an emergency power-off on given args"),
|
1353 |
} |
1354 |
|
1355 |
|
1356 |
#: dictionary with aliases for commands
|
1357 |
aliases = { |
1358 |
"masterfailover": "master-failover", |
1359 |
} |
1360 |
|
1361 |
|
1362 |
def Main(): |
1363 |
return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER}, |
1364 |
aliases=aliases) |