root / lib / client / gnt_node.py @ 2af8b9c9
History | View | Annotate | Download (37.7 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
"""Node related commands"""
|
22 |
|
23 |
# pylint: disable=W0401,W0613,W0614,C0103
|
24 |
# W0401: Wildcard import ganeti.cli
|
25 |
# W0613: Unused argument, since all functions follow the same API
|
26 |
# W0614: Unused import %s from wildcard import (since we need cli)
|
27 |
# C0103: Invalid name gnt-node
|
28 |
|
29 |
import itertools |
30 |
import errno |
31 |
|
32 |
from ganeti.cli import * |
33 |
from ganeti import cli |
34 |
from ganeti import bootstrap |
35 |
from ganeti import opcodes |
36 |
from ganeti import utils |
37 |
from ganeti import constants |
38 |
from ganeti import errors |
39 |
from ganeti import netutils |
40 |
from ganeti import pathutils |
41 |
from ganeti import ssh |
42 |
from ganeti import compat |
43 |
|
44 |
from ganeti import confd |
45 |
from ganeti.confd import client as confd_client |
46 |
|
47 |
#: default list of field for L{ListNodes}
|
48 |
_LIST_DEF_FIELDS = [ |
49 |
"name", "dtotal", "dfree", |
50 |
"mtotal", "mnode", "mfree", |
51 |
"pinst_cnt", "sinst_cnt", |
52 |
] |
53 |
|
54 |
|
55 |
#: Default field list for L{ListVolumes}
|
56 |
_LIST_VOL_DEF_FIELDS = ["node", "phys", "vg", "name", "size", "instance"] |
57 |
|
58 |
|
59 |
#: default list of field for L{ListStorage}
|
60 |
_LIST_STOR_DEF_FIELDS = [ |
61 |
constants.SF_NODE, |
62 |
constants.SF_TYPE, |
63 |
constants.SF_NAME, |
64 |
constants.SF_SIZE, |
65 |
constants.SF_USED, |
66 |
constants.SF_FREE, |
67 |
constants.SF_ALLOCATABLE, |
68 |
] |
69 |
|
70 |
|
71 |
#: default list of power commands
|
72 |
_LIST_POWER_COMMANDS = ["on", "off", "cycle", "status"] |
73 |
|
74 |
|
75 |
#: headers (and full field list) for L{ListStorage}
|
76 |
_LIST_STOR_HEADERS = { |
77 |
constants.SF_NODE: "Node",
|
78 |
constants.SF_TYPE: "Type",
|
79 |
constants.SF_NAME: "Name",
|
80 |
constants.SF_SIZE: "Size",
|
81 |
constants.SF_USED: "Used",
|
82 |
constants.SF_FREE: "Free",
|
83 |
constants.SF_ALLOCATABLE: "Allocatable",
|
84 |
} |
85 |
|
86 |
|
87 |
#: User-facing storage unit types
|
88 |
_USER_STORAGE_TYPE = { |
89 |
constants.ST_FILE: "file",
|
90 |
constants.ST_LVM_PV: "lvm-pv",
|
91 |
constants.ST_LVM_VG: "lvm-vg",
|
92 |
constants.ST_SHARED_FILE: "sharedfile",
|
93 |
} |
94 |
|
95 |
_STORAGE_TYPE_OPT = \ |
96 |
cli_option("-t", "--storage-type", |
97 |
dest="user_storage_type",
|
98 |
choices=_USER_STORAGE_TYPE.keys(), |
99 |
default=None,
|
100 |
metavar="STORAGE_TYPE",
|
101 |
help=("Storage type (%s)" %
|
102 |
utils.CommaJoin(_USER_STORAGE_TYPE.keys()))) |
103 |
|
104 |
_REPAIRABLE_STORAGE_TYPES = \ |
105 |
[st for st, so in constants.VALID_STORAGE_OPERATIONS.iteritems() |
106 |
if constants.SO_FIX_CONSISTENCY in so] |
107 |
|
108 |
_MODIFIABLE_STORAGE_TYPES = constants.MODIFIABLE_STORAGE_FIELDS.keys() |
109 |
|
110 |
_OOB_COMMAND_ASK = compat.UniqueFrozenset([ |
111 |
constants.OOB_POWER_OFF, |
112 |
constants.OOB_POWER_CYCLE, |
113 |
]) |
114 |
|
115 |
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
|
116 |
|
117 |
NONODE_SETUP_OPT = cli_option("--no-node-setup", default=True, |
118 |
action="store_false", dest="node_setup", |
119 |
help=("Do not make initial SSH setup on remote"
|
120 |
" node (needs to be done manually)"))
|
121 |
|
122 |
IGNORE_STATUS_OPT = cli_option("--ignore-status", default=False, |
123 |
action="store_true", dest="ignore_status", |
124 |
help=("Ignore the Node(s) offline status"
|
125 |
" (potentially DANGEROUS)"))
|
126 |
|
127 |
|
128 |
def ConvertStorageType(user_storage_type): |
129 |
"""Converts a user storage type to its internal name.
|
130 |
|
131 |
"""
|
132 |
try:
|
133 |
return _USER_STORAGE_TYPE[user_storage_type]
|
134 |
except KeyError: |
135 |
raise errors.OpPrereqError("Unknown storage type: %s" % user_storage_type, |
136 |
errors.ECODE_INVAL) |
137 |
|
138 |
|
139 |
def _TryReadFile(path): |
140 |
"""Tries to read a file.
|
141 |
|
142 |
If the file is not found, C{None} is returned.
|
143 |
|
144 |
@type path: string
|
145 |
@param path: Filename
|
146 |
@rtype: None or string
|
147 |
@todo: Consider adding a generic ENOENT wrapper
|
148 |
|
149 |
"""
|
150 |
try:
|
151 |
return utils.ReadFile(path)
|
152 |
except EnvironmentError, err: |
153 |
if err.errno == errno.ENOENT:
|
154 |
return None |
155 |
else:
|
156 |
raise
|
157 |
|
158 |
|
159 |
def _ReadSshKeys(keyfiles, _tostderr_fn=ToStderr): |
160 |
"""Reads SSH keys according to C{keyfiles}.
|
161 |
|
162 |
@type keyfiles: dict
|
163 |
@param keyfiles: Dictionary with keys of L{constants.SSHK_ALL} and two-values
|
164 |
tuples (private and public key file)
|
165 |
@rtype: list
|
166 |
@return: List of three-values tuples (L{constants.SSHK_ALL}, private and
|
167 |
public key as strings)
|
168 |
|
169 |
"""
|
170 |
result = [] |
171 |
|
172 |
for (kind, (private_file, public_file)) in keyfiles.items(): |
173 |
private_key = _TryReadFile(private_file) |
174 |
public_key = _TryReadFile(public_file) |
175 |
|
176 |
if public_key and private_key: |
177 |
result.append((kind, private_key, public_key)) |
178 |
elif public_key or private_key: |
179 |
_tostderr_fn("Couldn't find a complete set of keys for kind '%s'; files"
|
180 |
" '%s' and '%s'", kind, private_file, public_file)
|
181 |
|
182 |
return result
|
183 |
|
184 |
|
185 |
def _SetupSSH(options, cluster_name, node, ssh_port): |
186 |
"""Configures a destination node's SSH daemon.
|
187 |
|
188 |
@param options: Command line options
|
189 |
@type cluster_name
|
190 |
@param cluster_name: Cluster name
|
191 |
@type node: string
|
192 |
@param node: Destination node name
|
193 |
@type ssh_port: int
|
194 |
@param ssh_port: Destination node ssh port
|
195 |
|
196 |
"""
|
197 |
if options.force_join:
|
198 |
ToStderr("The \"--force-join\" option is no longer supported and will be"
|
199 |
" ignored.")
|
200 |
|
201 |
host_keys = _ReadSshKeys(constants.SSH_DAEMON_KEYFILES) |
202 |
|
203 |
(_, root_keyfiles) = \ |
204 |
ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False) |
205 |
|
206 |
root_keys = _ReadSshKeys(root_keyfiles) |
207 |
|
208 |
(_, cert_pem) = \ |
209 |
utils.ExtractX509Certificate(utils.ReadFile(pathutils.NODED_CERT_FILE)) |
210 |
|
211 |
data = { |
212 |
constants.SSHS_CLUSTER_NAME: cluster_name, |
213 |
constants.SSHS_NODE_DAEMON_CERTIFICATE: cert_pem, |
214 |
constants.SSHS_SSH_HOST_KEY: host_keys, |
215 |
constants.SSHS_SSH_ROOT_KEY: root_keys, |
216 |
} |
217 |
|
218 |
bootstrap.RunNodeSetupCmd(cluster_name, node, pathutils.PREPARE_NODE_JOIN, |
219 |
options.debug, options.verbose, False,
|
220 |
options.ssh_key_check, options.ssh_key_check, |
221 |
ssh_port, data) |
222 |
|
223 |
|
224 |
@UsesRPC
|
225 |
def AddNode(opts, args): |
226 |
"""Add a node to the cluster.
|
227 |
|
228 |
@param opts: the command line options selected by the user
|
229 |
@type args: list
|
230 |
@param args: should contain only one element, the new node name
|
231 |
@rtype: int
|
232 |
@return: the desired exit code
|
233 |
|
234 |
"""
|
235 |
cl = GetClient() |
236 |
node = netutils.GetHostname(name=args[0]).name
|
237 |
readd = opts.readd |
238 |
|
239 |
# Retrieve relevant parameters of the node group.
|
240 |
ssh_port = None
|
241 |
try:
|
242 |
# Passing [] to QueryGroups means query the default group:
|
243 |
node_groups = [opts.nodegroup] if opts.nodegroup is not None else [] |
244 |
output = cl.QueryGroups(names=node_groups, fields=["ndp/ssh_port"],
|
245 |
use_locking=False)
|
246 |
(ssh_port, ) = output[0]
|
247 |
except (errors.OpPrereqError, errors.OpExecError):
|
248 |
pass
|
249 |
|
250 |
try:
|
251 |
output = cl.QueryNodes(names=[node], |
252 |
fields=["name", "sip", "master", |
253 |
"ndp/ssh_port"],
|
254 |
use_locking=False)
|
255 |
node_exists, sip, is_master, ssh_port = output[0]
|
256 |
except (errors.OpPrereqError, errors.OpExecError):
|
257 |
node_exists = ""
|
258 |
sip = None
|
259 |
|
260 |
if readd:
|
261 |
if not node_exists: |
262 |
ToStderr("Node %s not in the cluster"
|
263 |
" - please retry without '--readd'", node)
|
264 |
return 1 |
265 |
if is_master:
|
266 |
ToStderr("Node %s is the master, cannot readd", node)
|
267 |
return 1 |
268 |
else:
|
269 |
if node_exists:
|
270 |
ToStderr("Node %s already in the cluster (as %s)"
|
271 |
" - please retry with '--readd'", node, node_exists)
|
272 |
return 1 |
273 |
sip = opts.secondary_ip |
274 |
|
275 |
# read the cluster name from the master
|
276 |
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
|
277 |
|
278 |
if not readd and opts.node_setup: |
279 |
ToStderr("-- WARNING -- \n"
|
280 |
"Performing this operation is going to replace the ssh daemon"
|
281 |
" keypair\n"
|
282 |
"on the target machine (%s) with the ones of the"
|
283 |
" current one\n"
|
284 |
"and grant full intra-cluster ssh root access to/from it\n", node)
|
285 |
|
286 |
if opts.node_setup:
|
287 |
_SetupSSH(opts, cluster_name, node, ssh_port) |
288 |
|
289 |
bootstrap.SetupNodeDaemon(opts, cluster_name, node, ssh_port) |
290 |
|
291 |
if opts.disk_state:
|
292 |
disk_state = utils.FlatToDict(opts.disk_state) |
293 |
else:
|
294 |
disk_state = {} |
295 |
|
296 |
hv_state = dict(opts.hv_state)
|
297 |
|
298 |
op = opcodes.OpNodeAdd(node_name=args[0], secondary_ip=sip,
|
299 |
readd=opts.readd, group=opts.nodegroup, |
300 |
vm_capable=opts.vm_capable, ndparams=opts.ndparams, |
301 |
master_capable=opts.master_capable, |
302 |
disk_state=disk_state, |
303 |
hv_state=hv_state) |
304 |
SubmitOpCode(op, opts=opts) |
305 |
|
306 |
|
307 |
def ListNodes(opts, args): |
308 |
"""List nodes and their properties.
|
309 |
|
310 |
@param opts: the command line options selected by the user
|
311 |
@type args: list
|
312 |
@param args: nodes to list, or empty for all
|
313 |
@rtype: int
|
314 |
@return: the desired exit code
|
315 |
|
316 |
"""
|
317 |
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS) |
318 |
|
319 |
fmtoverride = dict.fromkeys(["pinst_list", "sinst_list", "tags"], |
320 |
(",".join, False)) |
321 |
|
322 |
cl = GetClient() |
323 |
|
324 |
return GenericList(constants.QR_NODE, selected_fields, args, opts.units,
|
325 |
opts.separator, not opts.no_headers,
|
326 |
format_override=fmtoverride, verbose=opts.verbose, |
327 |
force_filter=opts.force_filter, cl=cl) |
328 |
|
329 |
|
330 |
def ListNodeFields(opts, args): |
331 |
"""List node fields.
|
332 |
|
333 |
@param opts: the command line options selected by the user
|
334 |
@type args: list
|
335 |
@param args: fields to list, or empty for all
|
336 |
@rtype: int
|
337 |
@return: the desired exit code
|
338 |
|
339 |
"""
|
340 |
cl = GetClient() |
341 |
|
342 |
return GenericListFields(constants.QR_NODE, args, opts.separator,
|
343 |
not opts.no_headers, cl=cl)
|
344 |
|
345 |
|
346 |
def EvacuateNode(opts, args): |
347 |
"""Relocate all secondary instance from a node.
|
348 |
|
349 |
@param opts: the command line options selected by the user
|
350 |
@type args: list
|
351 |
@param args: should be an empty list
|
352 |
@rtype: int
|
353 |
@return: the desired exit code
|
354 |
|
355 |
"""
|
356 |
if opts.dst_node is not None: |
357 |
ToStderr("New secondary node given (disabling iallocator), hence evacuating"
|
358 |
" secondary instances only.")
|
359 |
opts.secondary_only = True
|
360 |
opts.primary_only = False
|
361 |
|
362 |
if opts.secondary_only and opts.primary_only: |
363 |
raise errors.OpPrereqError("Only one of the --primary-only and" |
364 |
" --secondary-only options can be passed",
|
365 |
errors.ECODE_INVAL) |
366 |
elif opts.primary_only:
|
367 |
mode = constants.NODE_EVAC_PRI |
368 |
elif opts.secondary_only:
|
369 |
mode = constants.NODE_EVAC_SEC |
370 |
else:
|
371 |
mode = constants.NODE_EVAC_ALL |
372 |
|
373 |
# Determine affected instances
|
374 |
fields = [] |
375 |
|
376 |
if not opts.secondary_only: |
377 |
fields.append("pinst_list")
|
378 |
if not opts.primary_only: |
379 |
fields.append("sinst_list")
|
380 |
|
381 |
cl = GetClient() |
382 |
|
383 |
qcl = GetClient() |
384 |
result = qcl.QueryNodes(names=args, fields=fields, use_locking=False)
|
385 |
qcl.Close() |
386 |
|
387 |
instances = set(itertools.chain(*itertools.chain(*itertools.chain(result))))
|
388 |
|
389 |
if not instances: |
390 |
# No instances to evacuate
|
391 |
ToStderr("No instances to evacuate on node(s) %s, exiting.",
|
392 |
utils.CommaJoin(args)) |
393 |
return constants.EXIT_SUCCESS
|
394 |
|
395 |
if not (opts.force or |
396 |
AskUser("Relocate instance(s) %s from node(s) %s?" %
|
397 |
(utils.CommaJoin(utils.NiceSort(instances)), |
398 |
utils.CommaJoin(args)))): |
399 |
return constants.EXIT_CONFIRMATION
|
400 |
|
401 |
# Evacuate node
|
402 |
op = opcodes.OpNodeEvacuate(node_name=args[0], mode=mode,
|
403 |
remote_node=opts.dst_node, |
404 |
iallocator=opts.iallocator, |
405 |
early_release=opts.early_release) |
406 |
result = SubmitOrSend(op, opts, cl=cl) |
407 |
|
408 |
# Keep track of submitted jobs
|
409 |
jex = JobExecutor(cl=cl, opts=opts) |
410 |
|
411 |
for (status, job_id) in result[constants.JOB_IDS_KEY]: |
412 |
jex.AddJobId(None, status, job_id)
|
413 |
|
414 |
results = jex.GetResults() |
415 |
bad_cnt = len([row for row in results if not row[0]]) |
416 |
if bad_cnt == 0: |
417 |
ToStdout("All instances evacuated successfully.")
|
418 |
rcode = constants.EXIT_SUCCESS |
419 |
else:
|
420 |
ToStdout("There were %s errors during the evacuation.", bad_cnt)
|
421 |
rcode = constants.EXIT_FAILURE |
422 |
|
423 |
return rcode
|
424 |
|
425 |
|
426 |
def FailoverNode(opts, args): |
427 |
"""Failover all primary instance on a node.
|
428 |
|
429 |
@param opts: the command line options selected by the user
|
430 |
@type args: list
|
431 |
@param args: should be an empty list
|
432 |
@rtype: int
|
433 |
@return: the desired exit code
|
434 |
|
435 |
"""
|
436 |
cl = GetClient() |
437 |
force = opts.force |
438 |
selected_fields = ["name", "pinst_list"] |
439 |
|
440 |
# these fields are static data anyway, so it doesn't matter, but
|
441 |
# locking=True should be safer
|
442 |
qcl = GetClient() |
443 |
result = qcl.QueryNodes(names=args, fields=selected_fields, |
444 |
use_locking=False)
|
445 |
qcl.Close() |
446 |
node, pinst = result[0]
|
447 |
|
448 |
if not pinst: |
449 |
ToStderr("No primary instances on node %s, exiting.", node)
|
450 |
return 0 |
451 |
|
452 |
pinst = utils.NiceSort(pinst) |
453 |
|
454 |
retcode = 0
|
455 |
|
456 |
if not force and not AskUser("Fail over instance(s) %s?" % |
457 |
(",".join("'%s'" % name for name in pinst))): |
458 |
return 2 |
459 |
|
460 |
jex = JobExecutor(cl=cl, opts=opts) |
461 |
for iname in pinst: |
462 |
op = opcodes.OpInstanceFailover(instance_name=iname, |
463 |
ignore_consistency=opts.ignore_consistency, |
464 |
iallocator=opts.iallocator) |
465 |
jex.QueueJob(iname, op) |
466 |
results = jex.GetResults() |
467 |
bad_cnt = len([row for row in results if not row[0]]) |
468 |
if bad_cnt == 0: |
469 |
ToStdout("All %d instance(s) failed over successfully.", len(results)) |
470 |
else:
|
471 |
ToStdout("There were errors during the failover:\n"
|
472 |
"%d error(s) out of %d instance(s).", bad_cnt, len(results)) |
473 |
return retcode
|
474 |
|
475 |
|
476 |
def MigrateNode(opts, args): |
477 |
"""Migrate all primary instance on a node.
|
478 |
|
479 |
"""
|
480 |
cl = GetClient() |
481 |
force = opts.force |
482 |
selected_fields = ["name", "pinst_list"] |
483 |
|
484 |
qcl = GetClient() |
485 |
result = qcl.QueryNodes(names=args, fields=selected_fields, use_locking=False)
|
486 |
qcl.Close() |
487 |
((node, pinst), ) = result |
488 |
|
489 |
if not pinst: |
490 |
ToStdout("No primary instances on node %s, exiting." % node)
|
491 |
return 0 |
492 |
|
493 |
pinst = utils.NiceSort(pinst) |
494 |
|
495 |
if not (force or |
496 |
AskUser("Migrate instance(s) %s?" %
|
497 |
utils.CommaJoin(utils.NiceSort(pinst)))): |
498 |
return constants.EXIT_CONFIRMATION
|
499 |
|
500 |
# this should be removed once --non-live is deprecated
|
501 |
if not opts.live and opts.migration_mode is not None: |
502 |
raise errors.OpPrereqError("Only one of the --non-live and " |
503 |
"--migration-mode options can be passed",
|
504 |
errors.ECODE_INVAL) |
505 |
if not opts.live: # --non-live passed |
506 |
mode = constants.HT_MIGRATION_NONLIVE |
507 |
else:
|
508 |
mode = opts.migration_mode |
509 |
|
510 |
op = opcodes.OpNodeMigrate(node_name=args[0], mode=mode,
|
511 |
iallocator=opts.iallocator, |
512 |
target_node=opts.dst_node, |
513 |
allow_runtime_changes=opts.allow_runtime_chgs, |
514 |
ignore_ipolicy=opts.ignore_ipolicy) |
515 |
|
516 |
result = SubmitOrSend(op, opts, cl=cl) |
517 |
|
518 |
# Keep track of submitted jobs
|
519 |
jex = JobExecutor(cl=cl, opts=opts) |
520 |
|
521 |
for (status, job_id) in result[constants.JOB_IDS_KEY]: |
522 |
jex.AddJobId(None, status, job_id)
|
523 |
|
524 |
results = jex.GetResults() |
525 |
bad_cnt = len([row for row in results if not row[0]]) |
526 |
if bad_cnt == 0: |
527 |
ToStdout("All instances migrated successfully.")
|
528 |
rcode = constants.EXIT_SUCCESS |
529 |
else:
|
530 |
ToStdout("There were %s errors during the node migration.", bad_cnt)
|
531 |
rcode = constants.EXIT_FAILURE |
532 |
|
533 |
return rcode
|
534 |
|
535 |
|
536 |
def _FormatNodeInfo(node_info): |
537 |
"""Format node information for L{cli.PrintGenericInfo()}.
|
538 |
|
539 |
"""
|
540 |
(name, primary_ip, secondary_ip, pinst, sinst, is_mc, drained, offline, |
541 |
master_capable, vm_capable, powered, ndparams, ndparams_custom) = node_info |
542 |
info = [ |
543 |
("Node name", name),
|
544 |
("primary ip", primary_ip),
|
545 |
("secondary ip", secondary_ip),
|
546 |
("master candidate", is_mc),
|
547 |
("drained", drained),
|
548 |
("offline", offline),
|
549 |
] |
550 |
if powered is not None: |
551 |
info.append(("powered", powered))
|
552 |
info.extend([ |
553 |
("master_capable", master_capable),
|
554 |
("vm_capable", vm_capable),
|
555 |
]) |
556 |
if vm_capable:
|
557 |
info.extend([ |
558 |
("primary for instances",
|
559 |
[iname for iname in utils.NiceSort(pinst)]), |
560 |
("secondary for instances",
|
561 |
[iname for iname in utils.NiceSort(sinst)]), |
562 |
]) |
563 |
info.append(("node parameters",
|
564 |
FormatParamsDictInfo(ndparams_custom, ndparams))) |
565 |
return info
|
566 |
|
567 |
|
568 |
def ShowNodeConfig(opts, args): |
569 |
"""Show node information.
|
570 |
|
571 |
@param opts: the command line options selected by the user
|
572 |
@type args: list
|
573 |
@param args: should either be an empty list, in which case
|
574 |
we show information about all nodes, or should contain
|
575 |
a list of nodes to be queried for information
|
576 |
@rtype: int
|
577 |
@return: the desired exit code
|
578 |
|
579 |
"""
|
580 |
cl = GetClient() |
581 |
result = cl.QueryNodes(fields=["name", "pip", "sip", |
582 |
"pinst_list", "sinst_list", |
583 |
"master_candidate", "drained", "offline", |
584 |
"master_capable", "vm_capable", "powered", |
585 |
"ndparams", "custom_ndparams"], |
586 |
names=args, use_locking=False)
|
587 |
PrintGenericInfo([ |
588 |
_FormatNodeInfo(node_info) |
589 |
for node_info in result |
590 |
]) |
591 |
return 0 |
592 |
|
593 |
|
594 |
def RemoveNode(opts, args): |
595 |
"""Remove a node from the cluster.
|
596 |
|
597 |
@param opts: the command line options selected by the user
|
598 |
@type args: list
|
599 |
@param args: should contain only one element, the name of
|
600 |
the node to be removed
|
601 |
@rtype: int
|
602 |
@return: the desired exit code
|
603 |
|
604 |
"""
|
605 |
op = opcodes.OpNodeRemove(node_name=args[0])
|
606 |
SubmitOpCode(op, opts=opts) |
607 |
return 0 |
608 |
|
609 |
|
610 |
def PowercycleNode(opts, args): |
611 |
"""Remove a node from the cluster.
|
612 |
|
613 |
@param opts: the command line options selected by the user
|
614 |
@type args: list
|
615 |
@param args: should contain only one element, the name of
|
616 |
the node to be removed
|
617 |
@rtype: int
|
618 |
@return: the desired exit code
|
619 |
|
620 |
"""
|
621 |
node = args[0]
|
622 |
if (not opts.confirm and |
623 |
not AskUser("Are you sure you want to hard powercycle node %s?" % node)): |
624 |
return 2 |
625 |
|
626 |
op = opcodes.OpNodePowercycle(node_name=node, force=opts.force) |
627 |
result = SubmitOrSend(op, opts) |
628 |
if result:
|
629 |
ToStderr(result) |
630 |
return 0 |
631 |
|
632 |
|
633 |
def PowerNode(opts, args): |
634 |
"""Change/ask power state of a node.
|
635 |
|
636 |
@param opts: the command line options selected by the user
|
637 |
@type args: list
|
638 |
@param args: should contain only one element, the name of
|
639 |
the node to be removed
|
640 |
@rtype: int
|
641 |
@return: the desired exit code
|
642 |
|
643 |
"""
|
644 |
command = args.pop(0)
|
645 |
|
646 |
if opts.no_headers:
|
647 |
headers = None
|
648 |
else:
|
649 |
headers = {"node": "Node", "status": "Status"} |
650 |
|
651 |
if command not in _LIST_POWER_COMMANDS: |
652 |
ToStderr("power subcommand %s not supported." % command)
|
653 |
return constants.EXIT_FAILURE
|
654 |
|
655 |
oob_command = "power-%s" % command
|
656 |
|
657 |
if oob_command in _OOB_COMMAND_ASK: |
658 |
if not args: |
659 |
ToStderr("Please provide at least one node for this command")
|
660 |
return constants.EXIT_FAILURE
|
661 |
elif not opts.force and not ConfirmOperation(args, "nodes", |
662 |
"power %s" % command):
|
663 |
return constants.EXIT_FAILURE
|
664 |
assert len(args) > 0 |
665 |
|
666 |
opcodelist = [] |
667 |
if not opts.ignore_status and oob_command == constants.OOB_POWER_OFF: |
668 |
# TODO: This is a little ugly as we can't catch and revert
|
669 |
for node in args: |
670 |
opcodelist.append(opcodes.OpNodeSetParams(node_name=node, offline=True,
|
671 |
auto_promote=opts.auto_promote)) |
672 |
|
673 |
opcodelist.append(opcodes.OpOobCommand(node_names=args, |
674 |
command=oob_command, |
675 |
ignore_status=opts.ignore_status, |
676 |
timeout=opts.oob_timeout, |
677 |
power_delay=opts.power_delay)) |
678 |
|
679 |
cli.SetGenericOpcodeOpts(opcodelist, opts) |
680 |
|
681 |
job_id = cli.SendJob(opcodelist) |
682 |
|
683 |
# We just want the OOB Opcode status
|
684 |
# If it fails PollJob gives us the error message in it
|
685 |
result = cli.PollJob(job_id)[-1]
|
686 |
|
687 |
errs = 0
|
688 |
data = [] |
689 |
for node_result in result: |
690 |
(node_tuple, data_tuple) = node_result |
691 |
(_, node_name) = node_tuple |
692 |
(data_status, data_node) = data_tuple |
693 |
if data_status == constants.RS_NORMAL:
|
694 |
if oob_command == constants.OOB_POWER_STATUS:
|
695 |
if data_node[constants.OOB_POWER_STATUS_POWERED]:
|
696 |
text = "powered"
|
697 |
else:
|
698 |
text = "unpowered"
|
699 |
data.append([node_name, text]) |
700 |
else:
|
701 |
# We don't expect data here, so we just say, it was successfully invoked
|
702 |
data.append([node_name, "invoked"])
|
703 |
else:
|
704 |
errs += 1
|
705 |
data.append([node_name, cli.FormatResultError(data_status, True)])
|
706 |
|
707 |
data = GenerateTable(separator=opts.separator, headers=headers, |
708 |
fields=["node", "status"], data=data) |
709 |
|
710 |
for line in data: |
711 |
ToStdout(line) |
712 |
|
713 |
if errs:
|
714 |
return constants.EXIT_FAILURE
|
715 |
else:
|
716 |
return constants.EXIT_SUCCESS
|
717 |
|
718 |
|
719 |
def Health(opts, args): |
720 |
"""Show health of a node using OOB.
|
721 |
|
722 |
@param opts: the command line options selected by the user
|
723 |
@type args: list
|
724 |
@param args: should contain only one element, the name of
|
725 |
the node to be removed
|
726 |
@rtype: int
|
727 |
@return: the desired exit code
|
728 |
|
729 |
"""
|
730 |
op = opcodes.OpOobCommand(node_names=args, command=constants.OOB_HEALTH, |
731 |
timeout=opts.oob_timeout) |
732 |
result = SubmitOpCode(op, opts=opts) |
733 |
|
734 |
if opts.no_headers:
|
735 |
headers = None
|
736 |
else:
|
737 |
headers = {"node": "Node", "status": "Status"} |
738 |
|
739 |
errs = 0
|
740 |
data = [] |
741 |
for node_result in result: |
742 |
(node_tuple, data_tuple) = node_result |
743 |
(_, node_name) = node_tuple |
744 |
(data_status, data_node) = data_tuple |
745 |
if data_status == constants.RS_NORMAL:
|
746 |
data.append([node_name, "%s=%s" % tuple(data_node[0])]) |
747 |
for item, status in data_node[1:]: |
748 |
data.append(["", "%s=%s" % (item, status)]) |
749 |
else:
|
750 |
errs += 1
|
751 |
data.append([node_name, cli.FormatResultError(data_status, True)])
|
752 |
|
753 |
data = GenerateTable(separator=opts.separator, headers=headers, |
754 |
fields=["node", "status"], data=data) |
755 |
|
756 |
for line in data: |
757 |
ToStdout(line) |
758 |
|
759 |
if errs:
|
760 |
return constants.EXIT_FAILURE
|
761 |
else:
|
762 |
return constants.EXIT_SUCCESS
|
763 |
|
764 |
|
765 |
def ListVolumes(opts, args): |
766 |
"""List logical volumes on node(s).
|
767 |
|
768 |
@param opts: the command line options selected by the user
|
769 |
@type args: list
|
770 |
@param args: should either be an empty list, in which case
|
771 |
we list data for all nodes, or contain a list of nodes
|
772 |
to display data only for those
|
773 |
@rtype: int
|
774 |
@return: the desired exit code
|
775 |
|
776 |
"""
|
777 |
selected_fields = ParseFields(opts.output, _LIST_VOL_DEF_FIELDS) |
778 |
|
779 |
op = opcodes.OpNodeQueryvols(nodes=args, output_fields=selected_fields) |
780 |
output = SubmitOpCode(op, opts=opts) |
781 |
|
782 |
if not opts.no_headers: |
783 |
headers = {"node": "Node", "phys": "PhysDev", |
784 |
"vg": "VG", "name": "Name", |
785 |
"size": "Size", "instance": "Instance"} |
786 |
else:
|
787 |
headers = None
|
788 |
|
789 |
unitfields = ["size"]
|
790 |
|
791 |
numfields = ["size"]
|
792 |
|
793 |
data = GenerateTable(separator=opts.separator, headers=headers, |
794 |
fields=selected_fields, unitfields=unitfields, |
795 |
numfields=numfields, data=output, units=opts.units) |
796 |
|
797 |
for line in data: |
798 |
ToStdout(line) |
799 |
|
800 |
return 0 |
801 |
|
802 |
|
803 |
def ListStorage(opts, args): |
804 |
"""List physical volumes on node(s).
|
805 |
|
806 |
@param opts: the command line options selected by the user
|
807 |
@type args: list
|
808 |
@param args: should either be an empty list, in which case
|
809 |
we list data for all nodes, or contain a list of nodes
|
810 |
to display data only for those
|
811 |
@rtype: int
|
812 |
@return: the desired exit code
|
813 |
|
814 |
"""
|
815 |
selected_fields = ParseFields(opts.output, _LIST_STOR_DEF_FIELDS) |
816 |
|
817 |
op = opcodes.OpNodeQueryStorage(nodes=args, |
818 |
storage_type=opts.user_storage_type, |
819 |
output_fields=selected_fields) |
820 |
output = SubmitOpCode(op, opts=opts) |
821 |
|
822 |
if not opts.no_headers: |
823 |
headers = { |
824 |
constants.SF_NODE: "Node",
|
825 |
constants.SF_TYPE: "Type",
|
826 |
constants.SF_NAME: "Name",
|
827 |
constants.SF_SIZE: "Size",
|
828 |
constants.SF_USED: "Used",
|
829 |
constants.SF_FREE: "Free",
|
830 |
constants.SF_ALLOCATABLE: "Allocatable",
|
831 |
} |
832 |
else:
|
833 |
headers = None
|
834 |
|
835 |
unitfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE] |
836 |
numfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE] |
837 |
|
838 |
# change raw values to nicer strings
|
839 |
for row in output: |
840 |
for idx, field in enumerate(selected_fields): |
841 |
val = row[idx] |
842 |
if field == constants.SF_ALLOCATABLE:
|
843 |
if val:
|
844 |
val = "Y"
|
845 |
else:
|
846 |
val = "N"
|
847 |
row[idx] = str(val)
|
848 |
|
849 |
data = GenerateTable(separator=opts.separator, headers=headers, |
850 |
fields=selected_fields, unitfields=unitfields, |
851 |
numfields=numfields, data=output, units=opts.units) |
852 |
|
853 |
for line in data: |
854 |
ToStdout(line) |
855 |
|
856 |
return 0 |
857 |
|
858 |
|
859 |
def ModifyStorage(opts, args): |
860 |
"""Modify storage volume on a node.
|
861 |
|
862 |
@param opts: the command line options selected by the user
|
863 |
@type args: list
|
864 |
@param args: should contain 3 items: node name, storage type and volume name
|
865 |
@rtype: int
|
866 |
@return: the desired exit code
|
867 |
|
868 |
"""
|
869 |
(node_name, user_storage_type, volume_name) = args |
870 |
|
871 |
storage_type = ConvertStorageType(user_storage_type) |
872 |
|
873 |
changes = {} |
874 |
|
875 |
if opts.allocatable is not None: |
876 |
changes[constants.SF_ALLOCATABLE] = opts.allocatable |
877 |
|
878 |
if changes:
|
879 |
op = opcodes.OpNodeModifyStorage(node_name=node_name, |
880 |
storage_type=storage_type, |
881 |
name=volume_name, |
882 |
changes=changes) |
883 |
SubmitOrSend(op, opts) |
884 |
else:
|
885 |
ToStderr("No changes to perform, exiting.")
|
886 |
|
887 |
|
888 |
def RepairStorage(opts, args): |
889 |
"""Repairs a storage volume on a node.
|
890 |
|
891 |
@param opts: the command line options selected by the user
|
892 |
@type args: list
|
893 |
@param args: should contain 3 items: node name, storage type and volume name
|
894 |
@rtype: int
|
895 |
@return: the desired exit code
|
896 |
|
897 |
"""
|
898 |
(node_name, user_storage_type, volume_name) = args |
899 |
|
900 |
storage_type = ConvertStorageType(user_storage_type) |
901 |
|
902 |
op = opcodes.OpRepairNodeStorage(node_name=node_name, |
903 |
storage_type=storage_type, |
904 |
name=volume_name, |
905 |
ignore_consistency=opts.ignore_consistency) |
906 |
SubmitOrSend(op, opts) |
907 |
|
908 |
|
909 |
def SetNodeParams(opts, args): |
910 |
"""Modifies a node.
|
911 |
|
912 |
@param opts: the command line options selected by the user
|
913 |
@type args: list
|
914 |
@param args: should contain only one element, the node name
|
915 |
@rtype: int
|
916 |
@return: the desired exit code
|
917 |
|
918 |
"""
|
919 |
all_changes = [opts.master_candidate, opts.drained, opts.offline, |
920 |
opts.master_capable, opts.vm_capable, opts.secondary_ip, |
921 |
opts.ndparams] |
922 |
if (all_changes.count(None) == len(all_changes) and |
923 |
not (opts.hv_state or opts.disk_state)): |
924 |
ToStderr("Please give at least one of the parameters.")
|
925 |
return 1 |
926 |
|
927 |
if opts.disk_state:
|
928 |
disk_state = utils.FlatToDict(opts.disk_state) |
929 |
else:
|
930 |
disk_state = {} |
931 |
|
932 |
hv_state = dict(opts.hv_state)
|
933 |
|
934 |
op = opcodes.OpNodeSetParams(node_name=args[0],
|
935 |
master_candidate=opts.master_candidate, |
936 |
offline=opts.offline, |
937 |
drained=opts.drained, |
938 |
master_capable=opts.master_capable, |
939 |
vm_capable=opts.vm_capable, |
940 |
secondary_ip=opts.secondary_ip, |
941 |
force=opts.force, |
942 |
ndparams=opts.ndparams, |
943 |
auto_promote=opts.auto_promote, |
944 |
powered=opts.node_powered, |
945 |
hv_state=hv_state, |
946 |
disk_state=disk_state) |
947 |
|
948 |
# even if here we process the result, we allow submit only
|
949 |
result = SubmitOrSend(op, opts) |
950 |
|
951 |
if result:
|
952 |
ToStdout("Modified node %s", args[0]) |
953 |
for param, data in result: |
954 |
ToStdout(" - %-5s -> %s", param, data)
|
955 |
return 0 |
956 |
|
957 |
|
958 |
def RestrictedCommand(opts, args): |
959 |
"""Runs a remote command on node(s).
|
960 |
|
961 |
@param opts: Command line options selected by user
|
962 |
@type args: list
|
963 |
@param args: Command line arguments
|
964 |
@rtype: int
|
965 |
@return: Exit code
|
966 |
|
967 |
"""
|
968 |
cl = GetClient() |
969 |
|
970 |
if len(args) > 1 or opts.nodegroup: |
971 |
# Expand node names
|
972 |
nodes = GetOnlineNodes(nodes=args[1:], cl=cl, nodegroup=opts.nodegroup)
|
973 |
else:
|
974 |
raise errors.OpPrereqError("Node group or node names must be given", |
975 |
errors.ECODE_INVAL) |
976 |
|
977 |
op = opcodes.OpRestrictedCommand(command=args[0], nodes=nodes,
|
978 |
use_locking=opts.do_locking) |
979 |
result = SubmitOrSend(op, opts, cl=cl) |
980 |
|
981 |
exit_code = constants.EXIT_SUCCESS |
982 |
|
983 |
for (node, (status, text)) in zip(nodes, result): |
984 |
ToStdout("------------------------------------------------")
|
985 |
if status:
|
986 |
if opts.show_machine_names:
|
987 |
for line in text.splitlines(): |
988 |
ToStdout("%s: %s", node, line)
|
989 |
else:
|
990 |
ToStdout("Node: %s", node)
|
991 |
ToStdout(text) |
992 |
else:
|
993 |
exit_code = constants.EXIT_FAILURE |
994 |
ToStdout(text) |
995 |
|
996 |
return exit_code
|
997 |
|
998 |
|
999 |
class ReplyStatus(object): |
1000 |
"""Class holding a reply status for synchronous confd clients.
|
1001 |
|
1002 |
"""
|
1003 |
def __init__(self): |
1004 |
self.failure = True |
1005 |
self.answer = False |
1006 |
|
1007 |
|
1008 |
def ListDrbd(opts, args): |
1009 |
"""Modifies a node.
|
1010 |
|
1011 |
@param opts: the command line options selected by the user
|
1012 |
@type args: list
|
1013 |
@param args: should contain only one element, the node name
|
1014 |
@rtype: int
|
1015 |
@return: the desired exit code
|
1016 |
|
1017 |
"""
|
1018 |
if len(args) != 1: |
1019 |
ToStderr("Please give one (and only one) node.")
|
1020 |
return constants.EXIT_FAILURE
|
1021 |
|
1022 |
if not constants.ENABLE_CONFD: |
1023 |
ToStderr("Error: this command requires confd support, but it has not"
|
1024 |
" been enabled at build time.")
|
1025 |
return constants.EXIT_FAILURE
|
1026 |
|
1027 |
status = ReplyStatus() |
1028 |
|
1029 |
def ListDrbdConfdCallback(reply): |
1030 |
"""Callback for confd queries"""
|
1031 |
if reply.type == confd_client.UPCALL_REPLY:
|
1032 |
answer = reply.server_reply.answer |
1033 |
reqtype = reply.orig_request.type |
1034 |
if reqtype == constants.CONFD_REQ_NODE_DRBD:
|
1035 |
if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK:
|
1036 |
ToStderr("Query gave non-ok status '%s': %s" %
|
1037 |
(reply.server_reply.status, |
1038 |
reply.server_reply.answer)) |
1039 |
status.failure = True
|
1040 |
return
|
1041 |
if not confd.HTNodeDrbd(answer): |
1042 |
ToStderr("Invalid response from server: expected %s, got %s",
|
1043 |
confd.HTNodeDrbd, answer) |
1044 |
status.failure = True
|
1045 |
else:
|
1046 |
status.failure = False
|
1047 |
status.answer = answer |
1048 |
else:
|
1049 |
ToStderr("Unexpected reply %s!?", reqtype)
|
1050 |
status.failure = True
|
1051 |
|
1052 |
node = args[0]
|
1053 |
hmac = utils.ReadFile(pathutils.CONFD_HMAC_KEY) |
1054 |
filter_callback = confd_client.ConfdFilterCallback(ListDrbdConfdCallback) |
1055 |
counting_callback = confd_client.ConfdCountingCallback(filter_callback) |
1056 |
cf_client = confd_client.ConfdClient(hmac, [constants.IP4_ADDRESS_LOCALHOST], |
1057 |
counting_callback) |
1058 |
req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_NODE_DRBD, |
1059 |
query=node) |
1060 |
|
1061 |
def DoConfdRequestReply(req): |
1062 |
counting_callback.RegisterQuery(req.rsalt) |
1063 |
cf_client.SendRequest(req, async=False)
|
1064 |
while not counting_callback.AllAnswered(): |
1065 |
if not cf_client.ReceiveReply(): |
1066 |
ToStderr("Did not receive all expected confd replies")
|
1067 |
break
|
1068 |
|
1069 |
DoConfdRequestReply(req) |
1070 |
|
1071 |
if status.failure:
|
1072 |
return constants.EXIT_FAILURE
|
1073 |
|
1074 |
fields = ["node", "minor", "instance", "disk", "role", "peer"] |
1075 |
if opts.no_headers:
|
1076 |
headers = None
|
1077 |
else:
|
1078 |
headers = {"node": "Node", "minor": "Minor", "instance": "Instance", |
1079 |
"disk": "Disk", "role": "Role", "peer": "PeerNode"} |
1080 |
|
1081 |
data = GenerateTable(separator=opts.separator, headers=headers, |
1082 |
fields=fields, data=sorted(status.answer),
|
1083 |
numfields=["minor"])
|
1084 |
for line in data: |
1085 |
ToStdout(line) |
1086 |
|
1087 |
return constants.EXIT_SUCCESS
|
1088 |
|
1089 |
|
1090 |
commands = { |
1091 |
"add": (
|
1092 |
AddNode, [ArgHost(min=1, max=1)], |
1093 |
[SECONDARY_IP_OPT, READD_OPT, NOSSH_KEYCHECK_OPT, NODE_FORCE_JOIN_OPT, |
1094 |
NONODE_SETUP_OPT, VERBOSE_OPT, NODEGROUP_OPT, PRIORITY_OPT, |
1095 |
CAPAB_MASTER_OPT, CAPAB_VM_OPT, NODE_PARAMS_OPT, HV_STATE_OPT, |
1096 |
DISK_STATE_OPT], |
1097 |
"[-s ip] [--readd] [--no-ssh-key-check] [--force-join]"
|
1098 |
" [--no-node-setup] [--verbose] [--network] <node_name>",
|
1099 |
"Add a node to the cluster"),
|
1100 |
"evacuate": (
|
1101 |
EvacuateNode, ARGS_ONE_NODE, |
1102 |
[FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT, |
1103 |
PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT] + SUBMIT_OPTS, |
1104 |
"[-f] {-I <iallocator> | -n <dst>} [-p | -s] [options...] <node>",
|
1105 |
"Relocate the primary and/or secondary instances from a node"),
|
1106 |
"failover": (
|
1107 |
FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT, |
1108 |
IALLOCATOR_OPT, PRIORITY_OPT], |
1109 |
"[-f] <node>",
|
1110 |
"Stops the primary instances on a node and start them on their"
|
1111 |
" secondary node (only for instances with drbd disk template)"),
|
1112 |
"migrate": (
|
1113 |
MigrateNode, ARGS_ONE_NODE, |
1114 |
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, DST_NODE_OPT, |
1115 |
IALLOCATOR_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT, |
1116 |
NORUNTIME_CHGS_OPT] + SUBMIT_OPTS, |
1117 |
"[-f] <node>",
|
1118 |
"Migrate all the primary instance on a node away from it"
|
1119 |
" (only for instances of type drbd)"),
|
1120 |
"info": (
|
1121 |
ShowNodeConfig, ARGS_MANY_NODES, [], |
1122 |
"[<node_name>...]", "Show information about the node(s)"), |
1123 |
"list": (
|
1124 |
ListNodes, ARGS_MANY_NODES, |
1125 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT, |
1126 |
FORCE_FILTER_OPT], |
1127 |
"[nodes...]",
|
1128 |
"Lists the nodes in the cluster. The available fields can be shown using"
|
1129 |
" the \"list-fields\" command (see the man page for details)."
|
1130 |
" The default field list is (in order): %s." %
|
1131 |
utils.CommaJoin(_LIST_DEF_FIELDS)), |
1132 |
"list-fields": (
|
1133 |
ListNodeFields, [ArgUnknown()], |
1134 |
[NOHDR_OPT, SEP_OPT], |
1135 |
"[fields...]",
|
1136 |
"Lists all available fields for nodes"),
|
1137 |
"modify": (
|
1138 |
SetNodeParams, ARGS_ONE_NODE, |
1139 |
[FORCE_OPT] + SUBMIT_OPTS + |
1140 |
[MC_OPT, DRAINED_OPT, OFFLINE_OPT, |
1141 |
CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT, |
1142 |
AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NODE_PARAMS_OPT, |
1143 |
NODE_POWERED_OPT, HV_STATE_OPT, DISK_STATE_OPT], |
1144 |
"<node_name>", "Alters the parameters of a node"), |
1145 |
"powercycle": (
|
1146 |
PowercycleNode, ARGS_ONE_NODE, |
1147 |
[FORCE_OPT, CONFIRM_OPT, DRY_RUN_OPT, PRIORITY_OPT] + SUBMIT_OPTS, |
1148 |
"<node_name>", "Tries to forcefully powercycle a node"), |
1149 |
"power": (
|
1150 |
PowerNode, |
1151 |
[ArgChoice(min=1, max=1, choices=_LIST_POWER_COMMANDS), |
1152 |
ArgNode()], |
1153 |
SUBMIT_OPTS + |
1154 |
[AUTO_PROMOTE_OPT, PRIORITY_OPT, |
1155 |
IGNORE_STATUS_OPT, FORCE_OPT, NOHDR_OPT, SEP_OPT, OOB_TIMEOUT_OPT, |
1156 |
POWER_DELAY_OPT], |
1157 |
"on|off|cycle|status [nodes...]",
|
1158 |
"Change power state of node by calling out-of-band helper."),
|
1159 |
"remove": (
|
1160 |
RemoveNode, ARGS_ONE_NODE, [DRY_RUN_OPT, PRIORITY_OPT], |
1161 |
"<node_name>", "Removes a node from the cluster"), |
1162 |
"volumes": (
|
1163 |
ListVolumes, [ArgNode()], |
1164 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, PRIORITY_OPT], |
1165 |
"[<node_name>...]", "List logical volumes on node(s)"), |
1166 |
"list-storage": (
|
1167 |
ListStorage, ARGS_MANY_NODES, |
1168 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT, |
1169 |
PRIORITY_OPT], |
1170 |
"[<node_name>...]", "List physical volumes on node(s). The available" |
1171 |
" fields are (see the man page for details): %s." %
|
1172 |
(utils.CommaJoin(_LIST_STOR_HEADERS))), |
1173 |
"modify-storage": (
|
1174 |
ModifyStorage, |
1175 |
[ArgNode(min=1, max=1), |
1176 |
ArgChoice(min=1, max=1, choices=_MODIFIABLE_STORAGE_TYPES), |
1177 |
ArgFile(min=1, max=1)], |
1178 |
[ALLOCATABLE_OPT, DRY_RUN_OPT, PRIORITY_OPT] + SUBMIT_OPTS, |
1179 |
"<node_name> <storage_type> <name>", "Modify storage volume on a node"), |
1180 |
"repair-storage": (
|
1181 |
RepairStorage, |
1182 |
[ArgNode(min=1, max=1), |
1183 |
ArgChoice(min=1, max=1, choices=_REPAIRABLE_STORAGE_TYPES), |
1184 |
ArgFile(min=1, max=1)], |
1185 |
[IGNORE_CONSIST_OPT, DRY_RUN_OPT, PRIORITY_OPT] + SUBMIT_OPTS, |
1186 |
"<node_name> <storage_type> <name>",
|
1187 |
"Repairs a storage volume on a node"),
|
1188 |
"list-tags": (
|
1189 |
ListTags, ARGS_ONE_NODE, [], |
1190 |
"<node_name>", "List the tags of the given node"), |
1191 |
"add-tags": (
|
1192 |
AddTags, [ArgNode(min=1, max=1), ArgUnknown()], |
1193 |
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS, |
1194 |
"<node_name> tag...", "Add tags to the given node"), |
1195 |
"remove-tags": (
|
1196 |
RemoveTags, [ArgNode(min=1, max=1), ArgUnknown()], |
1197 |
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS, |
1198 |
"<node_name> tag...", "Remove tags from the given node"), |
1199 |
"health": (
|
1200 |
Health, ARGS_MANY_NODES, |
1201 |
[NOHDR_OPT, SEP_OPT, PRIORITY_OPT, OOB_TIMEOUT_OPT], |
1202 |
"[<node_name>...]", "List health of node(s) using out-of-band"), |
1203 |
"list-drbd": (
|
1204 |
ListDrbd, ARGS_ONE_NODE, |
1205 |
[NOHDR_OPT, SEP_OPT], |
1206 |
"[<node_name>]", "Query the list of used DRBD minors on the given node"), |
1207 |
"restricted-command": (
|
1208 |
RestrictedCommand, [ArgUnknown(min=1, max=1)] + ARGS_MANY_NODES, |
1209 |
[SYNC_OPT, PRIORITY_OPT] + SUBMIT_OPTS + [SHOW_MACHINE_OPT, NODEGROUP_OPT], |
1210 |
"<command> <node_name> [<node_name>...]",
|
1211 |
"Executes a restricted command on node(s)"),
|
1212 |
} |
1213 |
|
1214 |
#: dictionary with aliases for commands
|
1215 |
aliases = { |
1216 |
"show": "info", |
1217 |
} |
1218 |
|
1219 |
|
1220 |
def Main(): |
1221 |
return GenericMain(commands, aliases=aliases,
|
1222 |
override={"tag_type": constants.TAG_NODE},
|
1223 |
env_override=_ENV_OVERRIDE) |