root / scripts / gnt-node @ f91e255a
History | View | Annotate | Download (23.5 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
"""Node related commands""" |
22 |
|
23 |
# pylint: disable-msg=W0401,W0613,W0614,C0103 |
24 |
# W0401: Wildcard import ganeti.cli |
25 |
# W0613: Unused argument, since all functions follow the same API |
26 |
# W0614: Unused import %s from wildcard import (since we need cli) |
27 |
# C0103: Invalid name gnt-node |
28 |
|
29 |
import sys |
30 |
|
31 |
from ganeti.cli import * |
32 |
from ganeti import bootstrap |
33 |
from ganeti import opcodes |
34 |
from ganeti import utils |
35 |
from ganeti import constants |
36 |
from ganeti import compat |
37 |
from ganeti import errors |
38 |
from ganeti import netutils |
39 |
|
40 |
|
41 |
#: default list of field for L{ListNodes} |
42 |
_LIST_DEF_FIELDS = [ |
43 |
"name", "dtotal", "dfree", |
44 |
"mtotal", "mnode", "mfree", |
45 |
"pinst_cnt", "sinst_cnt", |
46 |
] |
47 |
|
48 |
|
49 |
#: Default field list for L{ListVolumes} |
50 |
_LIST_VOL_DEF_FIELDS = ["node", "phys", "vg", "name", "size", "instance"] |
51 |
|
52 |
|
53 |
#: default list of field for L{ListStorage} |
54 |
_LIST_STOR_DEF_FIELDS = [ |
55 |
constants.SF_NODE, |
56 |
constants.SF_TYPE, |
57 |
constants.SF_NAME, |
58 |
constants.SF_SIZE, |
59 |
constants.SF_USED, |
60 |
constants.SF_FREE, |
61 |
constants.SF_ALLOCATABLE, |
62 |
] |
63 |
|
64 |
|
65 |
#: headers (and full field list for L{ListNodes} |
66 |
_LIST_HEADERS = { |
67 |
"name": "Node", "pinst_cnt": "Pinst", "sinst_cnt": "Sinst", |
68 |
"pinst_list": "PriInstances", "sinst_list": "SecInstances", |
69 |
"pip": "PrimaryIP", "sip": "SecondaryIP", |
70 |
"dtotal": "DTotal", "dfree": "DFree", |
71 |
"mtotal": "MTotal", "mnode": "MNode", "mfree": "MFree", |
72 |
"bootid": "BootID", |
73 |
"ctotal": "CTotal", "cnodes": "CNodes", "csockets": "CSockets", |
74 |
"tags": "Tags", |
75 |
"serial_no": "SerialNo", |
76 |
"master_candidate": "MasterC", |
77 |
"master": "IsMaster", |
78 |
"offline": "Offline", "drained": "Drained", |
79 |
"role": "Role", |
80 |
"ctime": "CTime", "mtime": "MTime", "uuid": "UUID", |
81 |
"master_capable": "MasterCapable", "vm_capable": "VMCapable", |
82 |
} |
83 |
|
84 |
|
85 |
#: headers (and full field list for L{ListStorage} |
86 |
_LIST_STOR_HEADERS = { |
87 |
constants.SF_NODE: "Node", |
88 |
constants.SF_TYPE: "Type", |
89 |
constants.SF_NAME: "Name", |
90 |
constants.SF_SIZE: "Size", |
91 |
constants.SF_USED: "Used", |
92 |
constants.SF_FREE: "Free", |
93 |
constants.SF_ALLOCATABLE: "Allocatable", |
94 |
} |
95 |
|
96 |
|
97 |
#: User-facing storage unit types |
98 |
_USER_STORAGE_TYPE = { |
99 |
constants.ST_FILE: "file", |
100 |
constants.ST_LVM_PV: "lvm-pv", |
101 |
constants.ST_LVM_VG: "lvm-vg", |
102 |
} |
103 |
|
104 |
_STORAGE_TYPE_OPT = \ |
105 |
cli_option("-t", "--storage-type", |
106 |
dest="user_storage_type", |
107 |
choices=_USER_STORAGE_TYPE.keys(), |
108 |
default=None, |
109 |
metavar="STORAGE_TYPE", |
110 |
help=("Storage type (%s)" % |
111 |
utils.CommaJoin(_USER_STORAGE_TYPE.keys()))) |
112 |
|
113 |
_REPAIRABLE_STORAGE_TYPES = \ |
114 |
[st for st, so in constants.VALID_STORAGE_OPERATIONS.iteritems() |
115 |
if constants.SO_FIX_CONSISTENCY in so] |
116 |
|
117 |
_MODIFIABLE_STORAGE_TYPES = constants.MODIFIABLE_STORAGE_FIELDS.keys() |
118 |
|
119 |
|
120 |
NONODE_SETUP_OPT = cli_option("--no-node-setup", default=True, |
121 |
action="store_false", dest="node_setup", |
122 |
help=("Do not make initial SSH setup on remote" |
123 |
" node (needs to be done manually)")) |
124 |
|
125 |
|
126 |
def ConvertStorageType(user_storage_type): |
127 |
"""Converts a user storage type to its internal name. |
128 |
|
129 |
""" |
130 |
try: |
131 |
return _USER_STORAGE_TYPE[user_storage_type] |
132 |
except KeyError: |
133 |
raise errors.OpPrereqError("Unknown storage type: %s" % user_storage_type, |
134 |
errors.ECODE_INVAL) |
135 |
|
136 |
|
137 |
def _RunSetupSSH(options, nodes): |
138 |
"""Wrapper around utils.RunCmd to call setup-ssh |
139 |
|
140 |
@param options: The command line options |
141 |
@param nodes: The nodes to setup |
142 |
|
143 |
""" |
144 |
cmd = [constants.SETUP_SSH] |
145 |
|
146 |
# Pass --debug|--verbose to the external script if set on our invocation |
147 |
# --debug overrides --verbose |
148 |
if options.debug: |
149 |
cmd.append("--debug") |
150 |
elif options.verbose: |
151 |
cmd.append("--verbose") |
152 |
if not options.ssh_key_check: |
153 |
cmd.append("--no-ssh-key-check") |
154 |
|
155 |
cmd.extend(nodes) |
156 |
|
157 |
result = utils.RunCmd(cmd, interactive=True) |
158 |
|
159 |
if result.failed: |
160 |
errmsg = ("Command '%s' failed with exit code %s; output %r" % |
161 |
(result.cmd, result.exit_code, result.output)) |
162 |
raise errors.OpExecError(errmsg) |
163 |
|
164 |
|
165 |
@UsesRPC |
166 |
def AddNode(opts, args): |
167 |
"""Add a node to the cluster. |
168 |
|
169 |
@param opts: the command line options selected by the user |
170 |
@type args: list |
171 |
@param args: should contain only one element, the new node name |
172 |
@rtype: int |
173 |
@return: the desired exit code |
174 |
|
175 |
""" |
176 |
cl = GetClient() |
177 |
node = netutils.GetHostname(name=args[0]).name |
178 |
readd = opts.readd |
179 |
|
180 |
try: |
181 |
output = cl.QueryNodes(names=[node], fields=['name', 'sip'], |
182 |
use_locking=False) |
183 |
node_exists, sip = output[0] |
184 |
except (errors.OpPrereqError, errors.OpExecError): |
185 |
node_exists = "" |
186 |
sip = None |
187 |
|
188 |
if readd: |
189 |
if not node_exists: |
190 |
ToStderr("Node %s not in the cluster" |
191 |
" - please retry without '--readd'", node) |
192 |
return 1 |
193 |
else: |
194 |
if node_exists: |
195 |
ToStderr("Node %s already in the cluster (as %s)" |
196 |
" - please retry with '--readd'", node, node_exists) |
197 |
return 1 |
198 |
sip = opts.secondary_ip |
199 |
|
200 |
# read the cluster name from the master |
201 |
output = cl.QueryConfigValues(['cluster_name']) |
202 |
cluster_name = output[0] |
203 |
|
204 |
if not readd and opts.node_setup: |
205 |
ToStderr("-- WARNING -- \n" |
206 |
"Performing this operation is going to replace the ssh daemon" |
207 |
" keypair\n" |
208 |
"on the target machine (%s) with the ones of the" |
209 |
" current one\n" |
210 |
"and grant full intra-cluster ssh root access to/from it\n", node) |
211 |
|
212 |
if opts.node_setup: |
213 |
_RunSetupSSH(opts, [node]) |
214 |
|
215 |
bootstrap.SetupNodeDaemon(cluster_name, node, opts.ssh_key_check) |
216 |
|
217 |
op = opcodes.OpAddNode(node_name=args[0], secondary_ip=sip, |
218 |
readd=opts.readd, group=opts.nodegroup) |
219 |
SubmitOpCode(op, opts=opts) |
220 |
|
221 |
|
222 |
def ListNodes(opts, args): |
223 |
"""List nodes and their properties. |
224 |
|
225 |
@param opts: the command line options selected by the user |
226 |
@type args: list |
227 |
@param args: should be an empty list |
228 |
@rtype: int |
229 |
@return: the desired exit code |
230 |
|
231 |
""" |
232 |
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS) |
233 |
|
234 |
output = GetClient().QueryNodes(args, selected_fields, opts.do_locking) |
235 |
|
236 |
if not opts.no_headers: |
237 |
headers = _LIST_HEADERS |
238 |
else: |
239 |
headers = None |
240 |
|
241 |
unitfields = ["dtotal", "dfree", "mtotal", "mnode", "mfree"] |
242 |
|
243 |
numfields = ["dtotal", "dfree", |
244 |
"mtotal", "mnode", "mfree", |
245 |
"pinst_cnt", "sinst_cnt", |
246 |
"ctotal", "serial_no"] |
247 |
|
248 |
list_type_fields = ("pinst_list", "sinst_list", "tags") |
249 |
# change raw values to nicer strings |
250 |
for row in output: |
251 |
for idx, field in enumerate(selected_fields): |
252 |
val = row[idx] |
253 |
if field in list_type_fields: |
254 |
val = ",".join(val) |
255 |
elif field in ('master', 'master_candidate', 'offline', 'drained', |
256 |
'master_capable', 'vm_capable'): |
257 |
if val: |
258 |
val = 'Y' |
259 |
else: |
260 |
val = 'N' |
261 |
elif field == "ctime" or field == "mtime": |
262 |
val = utils.FormatTime(val) |
263 |
elif val is None: |
264 |
val = "?" |
265 |
elif opts.roman_integers and isinstance(val, int): |
266 |
val = compat.TryToRoman(val) |
267 |
row[idx] = str(val) |
268 |
|
269 |
data = GenerateTable(separator=opts.separator, headers=headers, |
270 |
fields=selected_fields, unitfields=unitfields, |
271 |
numfields=numfields, data=output, units=opts.units) |
272 |
for line in data: |
273 |
ToStdout(line) |
274 |
|
275 |
return 0 |
276 |
|
277 |
|
278 |
def EvacuateNode(opts, args): |
279 |
"""Relocate all secondary instance from a node. |
280 |
|
281 |
@param opts: the command line options selected by the user |
282 |
@type args: list |
283 |
@param args: should be an empty list |
284 |
@rtype: int |
285 |
@return: the desired exit code |
286 |
|
287 |
""" |
288 |
cl = GetClient() |
289 |
force = opts.force |
290 |
|
291 |
dst_node = opts.dst_node |
292 |
iallocator = opts.iallocator |
293 |
|
294 |
op = opcodes.OpNodeEvacuationStrategy(nodes=args, |
295 |
iallocator=iallocator, |
296 |
remote_node=dst_node) |
297 |
|
298 |
result = SubmitOpCode(op, cl=cl, opts=opts) |
299 |
if not result: |
300 |
# no instances to migrate |
301 |
ToStderr("No secondary instances on node(s) %s, exiting.", |
302 |
utils.CommaJoin(args)) |
303 |
return constants.EXIT_SUCCESS |
304 |
|
305 |
if not force and not AskUser("Relocate instance(s) %s from node(s) %s?" % |
306 |
(",".join("'%s'" % name[0] for name in result), |
307 |
utils.CommaJoin(args))): |
308 |
return constants.EXIT_CONFIRMATION |
309 |
|
310 |
jex = JobExecutor(cl=cl, opts=opts) |
311 |
for row in result: |
312 |
iname = row[0] |
313 |
node = row[1] |
314 |
ToStdout("Will relocate instance %s to node %s", iname, node) |
315 |
op = opcodes.OpReplaceDisks(instance_name=iname, |
316 |
remote_node=node, disks=[], |
317 |
mode=constants.REPLACE_DISK_CHG, |
318 |
early_release=opts.early_release) |
319 |
jex.QueueJob(iname, op) |
320 |
results = jex.GetResults() |
321 |
bad_cnt = len([row for row in results if not row[0]]) |
322 |
if bad_cnt == 0: |
323 |
ToStdout("All %d instance(s) failed over successfully.", len(results)) |
324 |
rcode = constants.EXIT_SUCCESS |
325 |
else: |
326 |
ToStdout("There were errors during the failover:\n" |
327 |
"%d error(s) out of %d instance(s).", bad_cnt, len(results)) |
328 |
rcode = constants.EXIT_FAILURE |
329 |
return rcode |
330 |
|
331 |
|
332 |
def FailoverNode(opts, args): |
333 |
"""Failover all primary instance on a node. |
334 |
|
335 |
@param opts: the command line options selected by the user |
336 |
@type args: list |
337 |
@param args: should be an empty list |
338 |
@rtype: int |
339 |
@return: the desired exit code |
340 |
|
341 |
""" |
342 |
cl = GetClient() |
343 |
force = opts.force |
344 |
selected_fields = ["name", "pinst_list"] |
345 |
|
346 |
# these fields are static data anyway, so it doesn't matter, but |
347 |
# locking=True should be safer |
348 |
result = cl.QueryNodes(names=args, fields=selected_fields, |
349 |
use_locking=False) |
350 |
node, pinst = result[0] |
351 |
|
352 |
if not pinst: |
353 |
ToStderr("No primary instances on node %s, exiting.", node) |
354 |
return 0 |
355 |
|
356 |
pinst = utils.NiceSort(pinst) |
357 |
|
358 |
retcode = 0 |
359 |
|
360 |
if not force and not AskUser("Fail over instance(s) %s?" % |
361 |
(",".join("'%s'" % name for name in pinst))): |
362 |
return 2 |
363 |
|
364 |
jex = JobExecutor(cl=cl, opts=opts) |
365 |
for iname in pinst: |
366 |
op = opcodes.OpFailoverInstance(instance_name=iname, |
367 |
ignore_consistency=opts.ignore_consistency) |
368 |
jex.QueueJob(iname, op) |
369 |
results = jex.GetResults() |
370 |
bad_cnt = len([row for row in results if not row[0]]) |
371 |
if bad_cnt == 0: |
372 |
ToStdout("All %d instance(s) failed over successfully.", len(results)) |
373 |
else: |
374 |
ToStdout("There were errors during the failover:\n" |
375 |
"%d error(s) out of %d instance(s).", bad_cnt, len(results)) |
376 |
return retcode |
377 |
|
378 |
|
379 |
def MigrateNode(opts, args): |
380 |
"""Migrate all primary instance on a node. |
381 |
|
382 |
""" |
383 |
cl = GetClient() |
384 |
force = opts.force |
385 |
selected_fields = ["name", "pinst_list"] |
386 |
|
387 |
result = cl.QueryNodes(names=args, fields=selected_fields, use_locking=False) |
388 |
node, pinst = result[0] |
389 |
|
390 |
if not pinst: |
391 |
ToStdout("No primary instances on node %s, exiting." % node) |
392 |
return 0 |
393 |
|
394 |
pinst = utils.NiceSort(pinst) |
395 |
|
396 |
if not force and not AskUser("Migrate instance(s) %s?" % |
397 |
(",".join("'%s'" % name for name in pinst))): |
398 |
return 2 |
399 |
|
400 |
# this should be removed once --non-live is deprecated |
401 |
if not opts.live and opts.migration_mode is not None: |
402 |
raise errors.OpPrereqError("Only one of the --non-live and " |
403 |
"--migration-mode options can be passed", |
404 |
errors.ECODE_INVAL) |
405 |
if not opts.live: # --non-live passed |
406 |
mode = constants.HT_MIGRATION_NONLIVE |
407 |
else: |
408 |
mode = opts.migration_mode |
409 |
op = opcodes.OpMigrateNode(node_name=args[0], mode=mode) |
410 |
SubmitOpCode(op, cl=cl, opts=opts) |
411 |
|
412 |
|
413 |
def ShowNodeConfig(opts, args): |
414 |
"""Show node information. |
415 |
|
416 |
@param opts: the command line options selected by the user |
417 |
@type args: list |
418 |
@param args: should either be an empty list, in which case |
419 |
we show information about all nodes, or should contain |
420 |
a list of nodes to be queried for information |
421 |
@rtype: int |
422 |
@return: the desired exit code |
423 |
|
424 |
""" |
425 |
cl = GetClient() |
426 |
result = cl.QueryNodes(fields=["name", "pip", "sip", |
427 |
"pinst_list", "sinst_list", |
428 |
"master_candidate", "drained", "offline"], |
429 |
names=args, use_locking=False) |
430 |
|
431 |
for (name, primary_ip, secondary_ip, pinst, sinst, |
432 |
is_mc, drained, offline) in result: |
433 |
ToStdout("Node name: %s", name) |
434 |
ToStdout(" primary ip: %s", primary_ip) |
435 |
ToStdout(" secondary ip: %s", secondary_ip) |
436 |
ToStdout(" master candidate: %s", is_mc) |
437 |
ToStdout(" drained: %s", drained) |
438 |
ToStdout(" offline: %s", offline) |
439 |
if pinst: |
440 |
ToStdout(" primary for instances:") |
441 |
for iname in utils.NiceSort(pinst): |
442 |
ToStdout(" - %s", iname) |
443 |
else: |
444 |
ToStdout(" primary for no instances") |
445 |
if sinst: |
446 |
ToStdout(" secondary for instances:") |
447 |
for iname in utils.NiceSort(sinst): |
448 |
ToStdout(" - %s", iname) |
449 |
else: |
450 |
ToStdout(" secondary for no instances") |
451 |
|
452 |
return 0 |
453 |
|
454 |
|
455 |
def RemoveNode(opts, args): |
456 |
"""Remove a node from the cluster. |
457 |
|
458 |
@param opts: the command line options selected by the user |
459 |
@type args: list |
460 |
@param args: should contain only one element, the name of |
461 |
the node to be removed |
462 |
@rtype: int |
463 |
@return: the desired exit code |
464 |
|
465 |
""" |
466 |
op = opcodes.OpRemoveNode(node_name=args[0]) |
467 |
SubmitOpCode(op, opts=opts) |
468 |
return 0 |
469 |
|
470 |
|
471 |
def PowercycleNode(opts, args): |
472 |
"""Remove a node from the cluster. |
473 |
|
474 |
@param opts: the command line options selected by the user |
475 |
@type args: list |
476 |
@param args: should contain only one element, the name of |
477 |
the node to be removed |
478 |
@rtype: int |
479 |
@return: the desired exit code |
480 |
|
481 |
""" |
482 |
node = args[0] |
483 |
if (not opts.confirm and |
484 |
not AskUser("Are you sure you want to hard powercycle node %s?" % node)): |
485 |
return 2 |
486 |
|
487 |
op = opcodes.OpPowercycleNode(node_name=node, force=opts.force) |
488 |
result = SubmitOpCode(op, opts=opts) |
489 |
if result: |
490 |
ToStderr(result) |
491 |
return 0 |
492 |
|
493 |
|
494 |
def ListVolumes(opts, args): |
495 |
"""List logical volumes on node(s). |
496 |
|
497 |
@param opts: the command line options selected by the user |
498 |
@type args: list |
499 |
@param args: should either be an empty list, in which case |
500 |
we list data for all nodes, or contain a list of nodes |
501 |
to display data only for those |
502 |
@rtype: int |
503 |
@return: the desired exit code |
504 |
|
505 |
""" |
506 |
selected_fields = ParseFields(opts.output, _LIST_VOL_DEF_FIELDS) |
507 |
|
508 |
op = opcodes.OpQueryNodeVolumes(nodes=args, output_fields=selected_fields) |
509 |
output = SubmitOpCode(op, opts=opts) |
510 |
|
511 |
if not opts.no_headers: |
512 |
headers = {"node": "Node", "phys": "PhysDev", |
513 |
"vg": "VG", "name": "Name", |
514 |
"size": "Size", "instance": "Instance"} |
515 |
else: |
516 |
headers = None |
517 |
|
518 |
unitfields = ["size"] |
519 |
|
520 |
numfields = ["size"] |
521 |
|
522 |
data = GenerateTable(separator=opts.separator, headers=headers, |
523 |
fields=selected_fields, unitfields=unitfields, |
524 |
numfields=numfields, data=output, units=opts.units) |
525 |
|
526 |
for line in data: |
527 |
ToStdout(line) |
528 |
|
529 |
return 0 |
530 |
|
531 |
|
532 |
def ListStorage(opts, args): |
533 |
"""List physical volumes on node(s). |
534 |
|
535 |
@param opts: the command line options selected by the user |
536 |
@type args: list |
537 |
@param args: should either be an empty list, in which case |
538 |
we list data for all nodes, or contain a list of nodes |
539 |
to display data only for those |
540 |
@rtype: int |
541 |
@return: the desired exit code |
542 |
|
543 |
""" |
544 |
# TODO: Default to ST_FILE if LVM is disabled on the cluster |
545 |
if opts.user_storage_type is None: |
546 |
opts.user_storage_type = constants.ST_LVM_PV |
547 |
|
548 |
storage_type = ConvertStorageType(opts.user_storage_type) |
549 |
|
550 |
selected_fields = ParseFields(opts.output, _LIST_STOR_DEF_FIELDS) |
551 |
|
552 |
op = opcodes.OpQueryNodeStorage(nodes=args, |
553 |
storage_type=storage_type, |
554 |
output_fields=selected_fields) |
555 |
output = SubmitOpCode(op, opts=opts) |
556 |
|
557 |
if not opts.no_headers: |
558 |
headers = { |
559 |
constants.SF_NODE: "Node", |
560 |
constants.SF_TYPE: "Type", |
561 |
constants.SF_NAME: "Name", |
562 |
constants.SF_SIZE: "Size", |
563 |
constants.SF_USED: "Used", |
564 |
constants.SF_FREE: "Free", |
565 |
constants.SF_ALLOCATABLE: "Allocatable", |
566 |
} |
567 |
else: |
568 |
headers = None |
569 |
|
570 |
unitfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE] |
571 |
numfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE] |
572 |
|
573 |
# change raw values to nicer strings |
574 |
for row in output: |
575 |
for idx, field in enumerate(selected_fields): |
576 |
val = row[idx] |
577 |
if field == constants.SF_ALLOCATABLE: |
578 |
if val: |
579 |
val = "Y" |
580 |
else: |
581 |
val = "N" |
582 |
row[idx] = str(val) |
583 |
|
584 |
data = GenerateTable(separator=opts.separator, headers=headers, |
585 |
fields=selected_fields, unitfields=unitfields, |
586 |
numfields=numfields, data=output, units=opts.units) |
587 |
|
588 |
for line in data: |
589 |
ToStdout(line) |
590 |
|
591 |
return 0 |
592 |
|
593 |
|
594 |
def ModifyStorage(opts, args): |
595 |
"""Modify storage volume on a node. |
596 |
|
597 |
@param opts: the command line options selected by the user |
598 |
@type args: list |
599 |
@param args: should contain 3 items: node name, storage type and volume name |
600 |
@rtype: int |
601 |
@return: the desired exit code |
602 |
|
603 |
""" |
604 |
(node_name, user_storage_type, volume_name) = args |
605 |
|
606 |
storage_type = ConvertStorageType(user_storage_type) |
607 |
|
608 |
changes = {} |
609 |
|
610 |
if opts.allocatable is not None: |
611 |
changes[constants.SF_ALLOCATABLE] = opts.allocatable |
612 |
|
613 |
if changes: |
614 |
op = opcodes.OpModifyNodeStorage(node_name=node_name, |
615 |
storage_type=storage_type, |
616 |
name=volume_name, |
617 |
changes=changes) |
618 |
SubmitOpCode(op, opts=opts) |
619 |
else: |
620 |
ToStderr("No changes to perform, exiting.") |
621 |
|
622 |
|
623 |
def RepairStorage(opts, args): |
624 |
"""Repairs a storage volume on a node. |
625 |
|
626 |
@param opts: the command line options selected by the user |
627 |
@type args: list |
628 |
@param args: should contain 3 items: node name, storage type and volume name |
629 |
@rtype: int |
630 |
@return: the desired exit code |
631 |
|
632 |
""" |
633 |
(node_name, user_storage_type, volume_name) = args |
634 |
|
635 |
storage_type = ConvertStorageType(user_storage_type) |
636 |
|
637 |
op = opcodes.OpRepairNodeStorage(node_name=node_name, |
638 |
storage_type=storage_type, |
639 |
name=volume_name, |
640 |
ignore_consistency=opts.ignore_consistency) |
641 |
SubmitOpCode(op, opts=opts) |
642 |
|
643 |
|
644 |
def SetNodeParams(opts, args): |
645 |
"""Modifies a node. |
646 |
|
647 |
@param opts: the command line options selected by the user |
648 |
@type args: list |
649 |
@param args: should contain only one element, the node name |
650 |
@rtype: int |
651 |
@return: the desired exit code |
652 |
|
653 |
""" |
654 |
if [opts.master_candidate, opts.drained, opts.offline].count(None) == 3: |
655 |
ToStderr("Please give at least one of the parameters.") |
656 |
return 1 |
657 |
|
658 |
op = opcodes.OpSetNodeParams(node_name=args[0], |
659 |
master_candidate=opts.master_candidate, |
660 |
offline=opts.offline, |
661 |
drained=opts.drained, |
662 |
master_capable=opts.master_capable, |
663 |
force=opts.force, |
664 |
auto_promote=opts.auto_promote) |
665 |
|
666 |
# even if here we process the result, we allow submit only |
667 |
result = SubmitOrSend(op, opts) |
668 |
|
669 |
if result: |
670 |
ToStdout("Modified node %s", args[0]) |
671 |
for param, data in result: |
672 |
ToStdout(" - %-5s -> %s", param, data) |
673 |
return 0 |
674 |
|
675 |
|
676 |
commands = { |
677 |
'add': ( |
678 |
AddNode, [ArgHost(min=1, max=1)], |
679 |
[SECONDARY_IP_OPT, READD_OPT, NOSSH_KEYCHECK_OPT, NONODE_SETUP_OPT, |
680 |
VERBOSE_OPT, NODEGROUP_OPT, PRIORITY_OPT], |
681 |
"[-s ip] [--readd] [--no-ssh-key-check] [--no-node-setup] [--verbose] " |
682 |
" <node_name>", |
683 |
"Add a node to the cluster"), |
684 |
'evacuate': ( |
685 |
EvacuateNode, [ArgNode(min=1)], |
686 |
[FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT, |
687 |
PRIORITY_OPT], |
688 |
"[-f] {-I <iallocator> | -n <dst>} <node>", |
689 |
"Relocate the secondary instances from a node" |
690 |
" to other nodes (only for instances with drbd disk template)"), |
691 |
'failover': ( |
692 |
FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT, PRIORITY_OPT], |
693 |
"[-f] <node>", |
694 |
"Stops the primary instances on a node and start them on their" |
695 |
" secondary node (only for instances with drbd disk template)"), |
696 |
'migrate': ( |
697 |
MigrateNode, ARGS_ONE_NODE, |
698 |
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, PRIORITY_OPT], |
699 |
"[-f] <node>", |
700 |
"Migrate all the primary instance on a node away from it" |
701 |
" (only for instances of type drbd)"), |
702 |
'info': ( |
703 |
ShowNodeConfig, ARGS_MANY_NODES, [], |
704 |
"[<node_name>...]", "Show information about the node(s)"), |
705 |
'list': ( |
706 |
ListNodes, ARGS_MANY_NODES, |
707 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, SYNC_OPT, ROMAN_OPT], |
708 |
"[nodes...]", |
709 |
"Lists the nodes in the cluster. The available fields are (see the man" |
710 |
" page for details): %s. The default field list is (in order): %s." % |
711 |
(utils.CommaJoin(_LIST_HEADERS), utils.CommaJoin(_LIST_DEF_FIELDS))), |
712 |
'modify': ( |
713 |
SetNodeParams, ARGS_ONE_NODE, |
714 |
[FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT, CAPAB_MASTER_OPT, |
715 |
AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT], |
716 |
"<node_name>", "Alters the parameters of a node"), |
717 |
'powercycle': ( |
718 |
PowercycleNode, ARGS_ONE_NODE, |
719 |
[FORCE_OPT, CONFIRM_OPT, DRY_RUN_OPT, PRIORITY_OPT], |
720 |
"<node_name>", "Tries to forcefully powercycle a node"), |
721 |
'remove': ( |
722 |
RemoveNode, ARGS_ONE_NODE, [DRY_RUN_OPT, PRIORITY_OPT], |
723 |
"<node_name>", "Removes a node from the cluster"), |
724 |
'volumes': ( |
725 |
ListVolumes, [ArgNode()], |
726 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, PRIORITY_OPT], |
727 |
"[<node_name>...]", "List logical volumes on node(s)"), |
728 |
'list-storage': ( |
729 |
ListStorage, ARGS_MANY_NODES, |
730 |
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT, |
731 |
PRIORITY_OPT], |
732 |
"[<node_name>...]", "List physical volumes on node(s). The available" |
733 |
" fields are (see the man page for details): %s." % |
734 |
(utils.CommaJoin(_LIST_STOR_HEADERS))), |
735 |
'modify-storage': ( |
736 |
ModifyStorage, |
737 |
[ArgNode(min=1, max=1), |
738 |
ArgChoice(min=1, max=1, choices=_MODIFIABLE_STORAGE_TYPES), |
739 |
ArgFile(min=1, max=1)], |
740 |
[ALLOCATABLE_OPT, DRY_RUN_OPT, PRIORITY_OPT], |
741 |
"<node_name> <storage_type> <name>", "Modify storage volume on a node"), |
742 |
'repair-storage': ( |
743 |
RepairStorage, |
744 |
[ArgNode(min=1, max=1), |
745 |
ArgChoice(min=1, max=1, choices=_REPAIRABLE_STORAGE_TYPES), |
746 |
ArgFile(min=1, max=1)], |
747 |
[IGNORE_CONSIST_OPT, DRY_RUN_OPT, PRIORITY_OPT], |
748 |
"<node_name> <storage_type> <name>", |
749 |
"Repairs a storage volume on a node"), |
750 |
'list-tags': ( |
751 |
ListTags, ARGS_ONE_NODE, [], |
752 |
"<node_name>", "List the tags of the given node"), |
753 |
'add-tags': ( |
754 |
AddTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], |
755 |
"<node_name> tag...", "Add tags to the given node"), |
756 |
'remove-tags': ( |
757 |
RemoveTags, [ArgNode(min=1, max=1), ArgUnknown()], |
758 |
[TAG_SRC_OPT, PRIORITY_OPT], |
759 |
"<node_name> tag...", "Remove tags from the given node"), |
760 |
} |
761 |
|
762 |
|
763 |
if __name__ == '__main__': |
764 |
sys.exit(GenericMain(commands, override={"tag_type": constants.TAG_NODE})) |