root / scripts / gnt-cluster @ 2f79bd34
History | View | Annotate | Download (19.4 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
|
22 |
# pylint: disable-msg=W0401,W0614 |
23 |
# W0401: Wildcard import ganeti.cli |
24 |
# W0614: Unused import %s from wildcard import (since we need cli) |
25 |
|
26 |
import sys |
27 |
from optparse import make_option |
28 |
import os.path |
29 |
|
30 |
from ganeti.cli import * |
31 |
from ganeti import opcodes |
32 |
from ganeti import constants |
33 |
from ganeti import errors |
34 |
from ganeti import utils |
35 |
from ganeti import bootstrap |
36 |
from ganeti import ssh |
37 |
|
38 |
|
39 |
def InitCluster(opts, args): |
40 |
"""Initialize the cluster. |
41 |
|
42 |
Args: |
43 |
opts - class with options as members |
44 |
args - list of arguments, expected to be [clustername] |
45 |
|
46 |
""" |
47 |
if not opts.lvm_storage and opts.vg_name: |
48 |
ToStderr("Options --no-lvm-storage and --vg-name conflict.") |
49 |
return 1 |
50 |
|
51 |
vg_name = opts.vg_name |
52 |
if opts.lvm_storage and not opts.vg_name: |
53 |
vg_name = constants.DEFAULT_VG |
54 |
|
55 |
hvlist = opts.enabled_hypervisors |
56 |
if hvlist is not None: |
57 |
hvlist = hvlist.split(",") |
58 |
else: |
59 |
hvlist = constants.DEFAULT_ENABLED_HYPERVISOR |
60 |
|
61 |
hvparams = opts.hvparams |
62 |
if hvparams: |
63 |
# a list of (name, dict) we can pass directly to dict() |
64 |
hvparams = dict(opts.hvparams) |
65 |
else: |
66 |
# otherwise init as empty dict |
67 |
hvparams = {} |
68 |
|
69 |
beparams = opts.beparams |
70 |
# check for invalid parameters |
71 |
for parameter in beparams: |
72 |
if parameter not in constants.BES_PARAMETERS: |
73 |
print "Invalid backend parameter: %s" % parameter |
74 |
return 1 |
75 |
|
76 |
# prepare beparams dict |
77 |
for parameter in constants.BES_PARAMETERS: |
78 |
if parameter not in beparams: |
79 |
beparams[parameter] = constants.BEC_DEFAULTS[parameter] |
80 |
|
81 |
# type wrangling |
82 |
try: |
83 |
beparams[constants.BE_VCPUS] = int(beparams[constants.BE_VCPUS]) |
84 |
except ValueError: |
85 |
print "%s must be an integer" % constants.BE_VCPUS |
86 |
return 1 |
87 |
|
88 |
beparams[constants.BE_MEMORY] = utils.ParseUnit(beparams[constants.BE_MEMORY]) |
89 |
|
90 |
# prepare hvparams dict |
91 |
for hv in constants.HYPER_TYPES: |
92 |
if hv not in hvparams: |
93 |
hvparams[hv] = {} |
94 |
for parameter in constants.HVC_DEFAULTS[hv]: |
95 |
if parameter not in hvparams[hv]: |
96 |
hvparams[hv][parameter] = constants.HVC_DEFAULTS[hv][parameter] |
97 |
|
98 |
for hv in hvlist: |
99 |
if hv not in constants.HYPER_TYPES: |
100 |
print "invalid hypervisor: %s" % hv |
101 |
return 1 |
102 |
|
103 |
bootstrap.InitCluster(cluster_name=args[0], |
104 |
secondary_ip=opts.secondary_ip, |
105 |
vg_name=vg_name, |
106 |
mac_prefix=opts.mac_prefix, |
107 |
def_bridge=opts.def_bridge, |
108 |
master_netdev=opts.master_netdev, |
109 |
file_storage_dir=opts.file_storage_dir, |
110 |
enabled_hypervisors=hvlist, |
111 |
hvparams=hvparams, |
112 |
beparams=beparams) |
113 |
return 0 |
114 |
|
115 |
|
116 |
def DestroyCluster(opts, args): |
117 |
"""Destroy the cluster. |
118 |
|
119 |
Args: |
120 |
opts - class with options as members |
121 |
|
122 |
""" |
123 |
if not opts.yes_do_it: |
124 |
ToStderr("Destroying a cluster is irreversible. If you really want" |
125 |
" destroy this cluster, supply the --yes-do-it option.") |
126 |
return 1 |
127 |
|
128 |
op = opcodes.OpDestroyCluster() |
129 |
master = SubmitOpCode(op) |
130 |
# if we reached this, the opcode didn't fail; we can proceed to |
131 |
# shutdown all the daemons |
132 |
bootstrap.FinalizeClusterDestroy(master) |
133 |
return 0 |
134 |
|
135 |
|
136 |
def RenameCluster(opts, args): |
137 |
"""Rename the cluster. |
138 |
|
139 |
Args: |
140 |
opts - class with options as members, we use force only |
141 |
args - list of arguments, expected to be [new_name] |
142 |
|
143 |
""" |
144 |
name = args[0] |
145 |
if not opts.force: |
146 |
usertext = ("This will rename the cluster to '%s'. If you are connected" |
147 |
" over the network to the cluster name, the operation is very" |
148 |
" dangerous as the IP address will be removed from the node" |
149 |
" and the change may not go through. Continue?") % name |
150 |
if not AskUser(usertext): |
151 |
return 1 |
152 |
|
153 |
op = opcodes.OpRenameCluster(name=name) |
154 |
SubmitOpCode(op) |
155 |
return 0 |
156 |
|
157 |
|
158 |
def ShowClusterVersion(opts, args): |
159 |
"""Write version of ganeti software to the standard output. |
160 |
|
161 |
Args: |
162 |
opts - class with options as members |
163 |
|
164 |
""" |
165 |
op = opcodes.OpQueryClusterInfo() |
166 |
result = SubmitOpCode(op) |
167 |
ToStdout("Software version: %s", result["software_version"]) |
168 |
ToStdout("Internode protocol: %s", result["protocol_version"]) |
169 |
ToStdout("Configuration format: %s", result["config_version"]) |
170 |
ToStdout("OS api version: %s", result["os_api_version"]) |
171 |
ToStdout("Export interface: %s", result["export_version"]) |
172 |
return 0 |
173 |
|
174 |
|
175 |
def ShowClusterMaster(opts, args): |
176 |
"""Write name of master node to the standard output. |
177 |
|
178 |
Args: |
179 |
opts - class with options as members |
180 |
|
181 |
""" |
182 |
ToStdout("%s", GetClient().QueryConfigValues(["master_node"])[0]) |
183 |
return 0 |
184 |
|
185 |
|
186 |
def ShowClusterConfig(opts, args): |
187 |
"""Shows cluster information. |
188 |
|
189 |
""" |
190 |
op = opcodes.OpQueryClusterInfo() |
191 |
result = SubmitOpCode(op) |
192 |
|
193 |
ToStdout("Cluster name: %s", result["name"]) |
194 |
|
195 |
ToStdout("Master node: %s", result["master"]) |
196 |
|
197 |
ToStdout("Architecture (this node): %s (%s)", |
198 |
result["architecture"][0], result["architecture"][1]) |
199 |
|
200 |
ToStdout("Default hypervisor: %s", result["hypervisor_type"]) |
201 |
ToStdout("Enabled hypervisors: %s", ", ".join(result["enabled_hypervisors"])) |
202 |
|
203 |
ToStdout("Hypervisor parameters:") |
204 |
for hv_name, hv_dict in result["hvparams"].items(): |
205 |
ToStdout(" - %s:", hv_name) |
206 |
for item, val in hv_dict.iteritems(): |
207 |
ToStdout(" %s: %s", item, val) |
208 |
|
209 |
ToStdout("Cluster parameters:") |
210 |
for gr_name, gr_dict in result["beparams"].items(): |
211 |
ToStdout(" - %s:", gr_name) |
212 |
for item, val in gr_dict.iteritems(): |
213 |
ToStdout(" %s: %s", item, val) |
214 |
|
215 |
return 0 |
216 |
|
217 |
|
218 |
def ClusterCopyFile(opts, args): |
219 |
"""Copy a file from master to some nodes. |
220 |
|
221 |
Args: |
222 |
opts - class with options as members |
223 |
args - list containing a single element, the file name |
224 |
Opts used: |
225 |
nodes - list containing the name of target nodes; if empty, all nodes |
226 |
|
227 |
""" |
228 |
filename = args[0] |
229 |
if not os.path.exists(filename): |
230 |
raise errors.OpPrereqError("No such filename '%s'" % filename) |
231 |
|
232 |
cl = GetClient() |
233 |
|
234 |
myname = utils.HostInfo().name |
235 |
|
236 |
cluster_name = cl.QueryConfigValues(["cluster_name"])[0] |
237 |
|
238 |
op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes) |
239 |
results = [row[0] for row in SubmitOpCode(op, cl=cl) if row[0] != myname] |
240 |
|
241 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
242 |
for node in results: |
243 |
if not srun.CopyFileToNode(node, filename): |
244 |
ToStderr("Copy of file %s to node %s failed", filename, node) |
245 |
|
246 |
return 0 |
247 |
|
248 |
|
249 |
def RunClusterCommand(opts, args): |
250 |
"""Run a command on some nodes. |
251 |
|
252 |
Args: |
253 |
opts - class with options as members |
254 |
args - the command list as a list |
255 |
Opts used: |
256 |
nodes: list containing the name of target nodes; if empty, all nodes |
257 |
|
258 |
""" |
259 |
cl = GetClient() |
260 |
|
261 |
command = " ".join(args) |
262 |
op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes) |
263 |
nodes = [row[0] for row in SubmitOpCode(op, cl=cl)] |
264 |
|
265 |
cluster_name, master_node = cl.QueryConfigValues(["cluster_name", |
266 |
"master_node"]) |
267 |
|
268 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
269 |
|
270 |
# Make sure master node is at list end |
271 |
if master_node in nodes: |
272 |
nodes.remove(master_node) |
273 |
nodes.append(master_node) |
274 |
|
275 |
for name in nodes: |
276 |
result = srun.Run(name, "root", command) |
277 |
ToStdout("------------------------------------------------") |
278 |
ToStdout("node: %s", name) |
279 |
ToStdout("%s", result.output) |
280 |
ToStdout("return code = %s", result.exit_code) |
281 |
|
282 |
return 0 |
283 |
|
284 |
|
285 |
def VerifyCluster(opts, args): |
286 |
"""Verify integrity of cluster, performing various test on nodes. |
287 |
|
288 |
Args: |
289 |
opts - class with options as members |
290 |
|
291 |
""" |
292 |
skip_checks = [] |
293 |
if opts.skip_nplusone_mem: |
294 |
skip_checks.append(constants.VERIFY_NPLUSONE_MEM) |
295 |
op = opcodes.OpVerifyCluster(skip_checks=skip_checks) |
296 |
if SubmitOpCode(op): |
297 |
return 0 |
298 |
else: |
299 |
return 1 |
300 |
|
301 |
|
302 |
def VerifyDisks(opts, args): |
303 |
"""Verify integrity of cluster disks. |
304 |
|
305 |
Args: |
306 |
opts - class with options as members |
307 |
|
308 |
""" |
309 |
op = opcodes.OpVerifyDisks() |
310 |
result = SubmitOpCode(op) |
311 |
if not isinstance(result, (list, tuple)) or len(result) != 4: |
312 |
raise errors.ProgrammerError("Unknown result type for OpVerifyDisks") |
313 |
|
314 |
nodes, nlvm, instances, missing = result |
315 |
|
316 |
if nodes: |
317 |
ToStdout("Nodes unreachable or with bad data:") |
318 |
for name in nodes: |
319 |
ToStdout("\t%s", name) |
320 |
retcode = constants.EXIT_SUCCESS |
321 |
|
322 |
if nlvm: |
323 |
for node, text in nlvm.iteritems(): |
324 |
ToStdout("Error on node %s: LVM error: %s", |
325 |
node, text[-400:].encode('string_escape')) |
326 |
retcode |= 1 |
327 |
ToStdout("You need to fix these nodes first before fixing instances") |
328 |
|
329 |
if instances: |
330 |
for iname in instances: |
331 |
if iname in missing: |
332 |
continue |
333 |
op = opcodes.OpActivateInstanceDisks(instance_name=iname) |
334 |
try: |
335 |
ToStdout("Activating disks for instance '%s'", iname) |
336 |
SubmitOpCode(op) |
337 |
except errors.GenericError, err: |
338 |
nret, msg = FormatError(err) |
339 |
retcode |= nret |
340 |
ToStderr("Error activating disks for instance %s: %s", iname, msg) |
341 |
|
342 |
if missing: |
343 |
for iname, ival in missing.iteritems(): |
344 |
all_missing = utils.all(ival, lambda x: x[0] in nlvm) |
345 |
if all_missing: |
346 |
ToStdout("Instance %s cannot be verified as it lives on" |
347 |
" broken nodes", iname) |
348 |
else: |
349 |
ToStdout("Instance %s has missing logical volumes:", iname) |
350 |
ival.sort() |
351 |
for node, vol in ival: |
352 |
if node in nlvm: |
353 |
ToStdout("\tbroken node %s /dev/xenvg/%s", node, vol) |
354 |
else: |
355 |
ToStdout("\t%s /dev/xenvg/%s", node, vol) |
356 |
ToStdout("You need to run replace_disks for all the above" |
357 |
" instances, if this message persist after fixing nodes.") |
358 |
retcode |= 1 |
359 |
|
360 |
return retcode |
361 |
|
362 |
|
363 |
def MasterFailover(opts, args): |
364 |
"""Failover the master node. |
365 |
|
366 |
This command, when run on a non-master node, will cause the current |
367 |
master to cease being master, and the non-master to become new |
368 |
master. |
369 |
|
370 |
""" |
371 |
return bootstrap.MasterFailover() |
372 |
|
373 |
|
374 |
def SearchTags(opts, args): |
375 |
"""Searches the tags on all the cluster. |
376 |
|
377 |
""" |
378 |
op = opcodes.OpSearchTags(pattern=args[0]) |
379 |
result = SubmitOpCode(op) |
380 |
if not result: |
381 |
return 1 |
382 |
result = list(result) |
383 |
result.sort() |
384 |
for path, tag in result: |
385 |
ToStdout("%s %s", path, tag) |
386 |
|
387 |
|
388 |
def SetClusterParams(opts, args): |
389 |
"""Modify the cluster. |
390 |
|
391 |
Args: |
392 |
opts - class with options as members |
393 |
|
394 |
""" |
395 |
if not (not opts.lvm_storage or opts.vg_name or |
396 |
opts.enabled_hypervisors or opts.hvparams or |
397 |
opts.beparams): |
398 |
ToStderr("Please give at least one of the parameters.") |
399 |
return 1 |
400 |
|
401 |
vg_name = opts.vg_name |
402 |
if not opts.lvm_storage and opts.vg_name: |
403 |
ToStdout("Options --no-lvm-storage and --vg-name conflict.") |
404 |
return 1 |
405 |
|
406 |
hvlist = opts.enabled_hypervisors |
407 |
if hvlist is not None: |
408 |
hvlist = hvlist.split(",") |
409 |
|
410 |
hvparams = opts.hvparams |
411 |
if hvparams: |
412 |
# a list of (name, dict) we can pass directly to dict() |
413 |
hvparams = dict(opts.hvparams) |
414 |
|
415 |
beparams = opts.beparams |
416 |
|
417 |
op = opcodes.OpSetClusterParams(vg_name=opts.vg_name, |
418 |
enabled_hypervisors=hvlist, |
419 |
hvparams=hvparams, |
420 |
beparams=beparams) |
421 |
SubmitOpCode(op) |
422 |
return 0 |
423 |
|
424 |
|
425 |
def QueueOps(opts, args): |
426 |
"""Queue operations. |
427 |
|
428 |
""" |
429 |
command = args[0] |
430 |
client = GetClient() |
431 |
if command in ("drain", "undrain"): |
432 |
drain_flag = command == "drain" |
433 |
client.SetQueueDrainFlag(drain_flag) |
434 |
elif command == "info": |
435 |
result = client.QueryConfigValues(["drain_flag"]) |
436 |
if result[0]: |
437 |
val = "set" |
438 |
else: |
439 |
val = "unset" |
440 |
ToStdout("The drain flag is %s" % val) |
441 |
return 0 |
442 |
|
443 |
# this is an option common to more than one command, so we declare |
444 |
# it here and reuse it |
445 |
node_option = make_option("-n", "--node", action="append", dest="nodes", |
446 |
help="Node to copy to (if not given, all nodes)," |
447 |
" can be given multiple times", |
448 |
metavar="<node>", default=[]) |
449 |
|
450 |
commands = { |
451 |
'init': (InitCluster, ARGS_ONE, |
452 |
[DEBUG_OPT, |
453 |
make_option("-s", "--secondary-ip", dest="secondary_ip", |
454 |
help="Specify the secondary ip for this node;" |
455 |
" if given, the entire cluster must have secondary" |
456 |
" addresses", |
457 |
metavar="ADDRESS", default=None), |
458 |
make_option("-m", "--mac-prefix", dest="mac_prefix", |
459 |
help="Specify the mac prefix for the instance IP" |
460 |
" addresses, in the format XX:XX:XX", |
461 |
metavar="PREFIX", |
462 |
default="aa:00:00",), |
463 |
make_option("-g", "--vg-name", dest="vg_name", |
464 |
help="Specify the volume group name " |
465 |
" (cluster-wide) for disk allocation [xenvg]", |
466 |
metavar="VG", |
467 |
default=None,), |
468 |
make_option("-b", "--bridge", dest="def_bridge", |
469 |
help="Specify the default bridge name (cluster-wide)" |
470 |
" to connect the instances to [%s]" % |
471 |
constants.DEFAULT_BRIDGE, |
472 |
metavar="BRIDGE", |
473 |
default=constants.DEFAULT_BRIDGE,), |
474 |
make_option("--master-netdev", dest="master_netdev", |
475 |
help="Specify the node interface (cluster-wide)" |
476 |
" on which the master IP address will be added " |
477 |
" [%s]" % constants.DEFAULT_BRIDGE, |
478 |
metavar="NETDEV", |
479 |
default=constants.DEFAULT_BRIDGE,), |
480 |
make_option("--file-storage-dir", dest="file_storage_dir", |
481 |
help="Specify the default directory (cluster-wide)" |
482 |
" for storing the file-based disks [%s]" % |
483 |
constants.DEFAULT_FILE_STORAGE_DIR, |
484 |
metavar="DIR", |
485 |
default=constants.DEFAULT_FILE_STORAGE_DIR,), |
486 |
make_option("--no-lvm-storage", dest="lvm_storage", |
487 |
help="No support for lvm based instances" |
488 |
" (cluster-wide)", |
489 |
action="store_false", default=True,), |
490 |
make_option("--enabled-hypervisors", dest="enabled_hypervisors", |
491 |
help="Comma-separated list of hypervisors", |
492 |
type="string", default=None), |
493 |
ikv_option("-H", "--hypervisor-parameters", dest="hvparams", |
494 |
help="Hypervisor and hypervisor options, in the" |
495 |
" format" |
496 |
" hypervisor:option=value,option=value,...", |
497 |
default=[], |
498 |
action="append", |
499 |
type="identkeyval"), |
500 |
keyval_option("-B", "--backend-parameters", dest="beparams", |
501 |
type="keyval", default={}, |
502 |
help="Backend parameters"), |
503 |
], |
504 |
"[opts...] <cluster_name>", |
505 |
"Initialises a new cluster configuration"), |
506 |
'destroy': (DestroyCluster, ARGS_NONE, |
507 |
[DEBUG_OPT, |
508 |
make_option("--yes-do-it", dest="yes_do_it", |
509 |
help="Destroy cluster", |
510 |
action="store_true"), |
511 |
], |
512 |
"", "Destroy cluster"), |
513 |
'rename': (RenameCluster, ARGS_ONE, [DEBUG_OPT, FORCE_OPT], |
514 |
"<new_name>", |
515 |
"Renames the cluster"), |
516 |
'verify': (VerifyCluster, ARGS_NONE, [DEBUG_OPT, |
517 |
make_option("--no-nplus1-mem", dest="skip_nplusone_mem", |
518 |
help="Skip N+1 memory redundancy tests", |
519 |
action="store_true", |
520 |
default=False,), |
521 |
], |
522 |
"", "Does a check on the cluster configuration"), |
523 |
'verify-disks': (VerifyDisks, ARGS_NONE, [DEBUG_OPT], |
524 |
"", "Does a check on the cluster disk status"), |
525 |
'masterfailover': (MasterFailover, ARGS_NONE, [DEBUG_OPT], |
526 |
"", "Makes the current node the master"), |
527 |
'version': (ShowClusterVersion, ARGS_NONE, [DEBUG_OPT], |
528 |
"", "Shows the cluster version"), |
529 |
'getmaster': (ShowClusterMaster, ARGS_NONE, [DEBUG_OPT], |
530 |
"", "Shows the cluster master"), |
531 |
'copyfile': (ClusterCopyFile, ARGS_ONE, [DEBUG_OPT, node_option], |
532 |
"[-n node...] <filename>", |
533 |
"Copies a file to all (or only some) nodes"), |
534 |
'command': (RunClusterCommand, ARGS_ATLEAST(1), [DEBUG_OPT, node_option], |
535 |
"[-n node...] <command>", |
536 |
"Runs a command on all (or only some) nodes"), |
537 |
'info': (ShowClusterConfig, ARGS_NONE, [DEBUG_OPT], |
538 |
"", "Show cluster configuration"), |
539 |
'list-tags': (ListTags, ARGS_NONE, |
540 |
[DEBUG_OPT], "", "List the tags of the cluster"), |
541 |
'add-tags': (AddTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT], |
542 |
"tag...", "Add tags to the cluster"), |
543 |
'remove-tags': (RemoveTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT], |
544 |
"tag...", "Remove tags from the cluster"), |
545 |
'search-tags': (SearchTags, ARGS_ONE, |
546 |
[DEBUG_OPT], "", "Searches the tags on all objects on" |
547 |
" the cluster for a given pattern (regex)"), |
548 |
'queue': (QueueOps, ARGS_ONE, [DEBUG_OPT], |
549 |
"drain|undrain|info", "Change queue properties"), |
550 |
'modify': (SetClusterParams, ARGS_NONE, |
551 |
[DEBUG_OPT, |
552 |
make_option("-g", "--vg-name", dest="vg_name", |
553 |
help="Specify the volume group name " |
554 |
" (cluster-wide) for disk allocation " |
555 |
"and enable lvm based storage", |
556 |
metavar="VG",), |
557 |
make_option("--no-lvm-storage", dest="lvm_storage", |
558 |
help="Disable support for lvm based instances" |
559 |
" (cluster-wide)", |
560 |
action="store_false", default=True,), |
561 |
make_option("--enabled-hypervisors", dest="enabled_hypervisors", |
562 |
help="Comma-separated list of hypervisors", |
563 |
type="string", default=None), |
564 |
ikv_option("-H", "--hypervisor-parameters", dest="hvparams", |
565 |
help="Hypervisor and hypervisor options, in the" |
566 |
" format" |
567 |
" hypervisor:option=value,option=value,...", |
568 |
default=[], |
569 |
action="append", |
570 |
type="identkeyval"), |
571 |
keyval_option("-B", "--backend-parameters", dest="beparams", |
572 |
type="keyval", default={}, |
573 |
help="Backend parameters"), |
574 |
], |
575 |
"[opts...]", |
576 |
"Alters the parameters of the cluster"), |
577 |
} |
578 |
|
579 |
if __name__ == '__main__': |
580 |
sys.exit(GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER})) |