root / scripts / gnt-cluster @ 3a24c527
History | View | Annotate | Download (19.3 kB)
1 |
#!/usr/bin/python |
---|---|
2 |
# |
3 |
|
4 |
# Copyright (C) 2006, 2007 Google Inc. |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, but |
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
# General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
19 |
# 02110-1301, USA. |
20 |
|
21 |
|
22 |
import sys |
23 |
from optparse import make_option |
24 |
import os.path |
25 |
|
26 |
from ganeti.cli import * |
27 |
from ganeti import opcodes |
28 |
from ganeti import constants |
29 |
from ganeti import errors |
30 |
from ganeti import utils |
31 |
from ganeti import bootstrap |
32 |
from ganeti import ssh |
33 |
from ganeti import ssconf |
34 |
|
35 |
|
36 |
def InitCluster(opts, args): |
37 |
"""Initialize the cluster. |
38 |
|
39 |
Args: |
40 |
opts - class with options as members |
41 |
args - list of arguments, expected to be [clustername] |
42 |
|
43 |
""" |
44 |
if not opts.lvm_storage and opts.vg_name: |
45 |
ToStderr("Options --no-lvm-storage and --vg-name conflict.") |
46 |
return 1 |
47 |
|
48 |
vg_name = opts.vg_name |
49 |
if opts.lvm_storage and not opts.vg_name: |
50 |
vg_name = constants.DEFAULT_VG |
51 |
|
52 |
hvlist = opts.enabled_hypervisors |
53 |
if hvlist is not None: |
54 |
hvlist = hvlist.split(",") |
55 |
else: |
56 |
hvlist = constants.DEFAULT_ENABLED_HYPERVISOR |
57 |
|
58 |
hvparams = opts.hvparams |
59 |
if hvparams: |
60 |
# a list of (name, dict) we can pass directly to dict() |
61 |
hvparams = dict(opts.hvparams) |
62 |
else: |
63 |
# otherwise init as empty dict |
64 |
hvparams = {} |
65 |
|
66 |
beparams = opts.beparams |
67 |
# check for invalid parameters |
68 |
for parameter in beparams: |
69 |
if parameter not in constants.BES_PARAMETERS: |
70 |
print "Invalid backend parameter: %s" % parameter |
71 |
return 1 |
72 |
|
73 |
# prepare beparams dict |
74 |
for parameter in constants.BES_PARAMETERS: |
75 |
if parameter not in beparams: |
76 |
beparams[parameter] = constants.BEC_DEFAULTS[parameter] |
77 |
|
78 |
# type wrangling |
79 |
try: |
80 |
beparams[constants.BE_VCPUS] = int(beparams[constants.BE_VCPUS]) |
81 |
except ValueError: |
82 |
print "%s must be an integer" % constants.BE_VCPUS |
83 |
return 1 |
84 |
|
85 |
beparams[constants.BE_MEMORY] = utils.ParseUnit(beparams[constants.BE_MEMORY]) |
86 |
|
87 |
# prepare hvparams dict |
88 |
for hv in constants.HYPER_TYPES: |
89 |
if hv not in hvparams: |
90 |
hvparams[hv] = {} |
91 |
for parameter in constants.HVC_DEFAULTS[hv]: |
92 |
if parameter not in hvparams[hv]: |
93 |
hvparams[hv][parameter] = constants.HVC_DEFAULTS[hv][parameter] |
94 |
|
95 |
for hv in hvlist: |
96 |
if hv not in constants.HYPER_TYPES: |
97 |
print "invalid hypervisor: %s" % hv |
98 |
return 1 |
99 |
|
100 |
bootstrap.InitCluster(cluster_name=args[0], |
101 |
secondary_ip=opts.secondary_ip, |
102 |
vg_name=vg_name, |
103 |
mac_prefix=opts.mac_prefix, |
104 |
def_bridge=opts.def_bridge, |
105 |
master_netdev=opts.master_netdev, |
106 |
file_storage_dir=opts.file_storage_dir, |
107 |
enabled_hypervisors=hvlist, |
108 |
hvparams=hvparams, |
109 |
beparams=beparams) |
110 |
return 0 |
111 |
|
112 |
|
113 |
def DestroyCluster(opts, args): |
114 |
"""Destroy the cluster. |
115 |
|
116 |
Args: |
117 |
opts - class with options as members |
118 |
|
119 |
""" |
120 |
if not opts.yes_do_it: |
121 |
ToStderr("Destroying a cluster is irreversible. If you really want" |
122 |
" destroy this cluster, supply the --yes-do-it option.") |
123 |
return 1 |
124 |
|
125 |
op = opcodes.OpDestroyCluster() |
126 |
master = SubmitOpCode(op) |
127 |
# if we reached this, the opcode didn't fail; we can proceed to |
128 |
# shutdown all the daemons |
129 |
bootstrap.FinalizeClusterDestroy(master) |
130 |
return 0 |
131 |
|
132 |
|
133 |
def RenameCluster(opts, args): |
134 |
"""Rename the cluster. |
135 |
|
136 |
Args: |
137 |
opts - class with options as members, we use force only |
138 |
args - list of arguments, expected to be [new_name] |
139 |
|
140 |
""" |
141 |
name = args[0] |
142 |
if not opts.force: |
143 |
usertext = ("This will rename the cluster to '%s'. If you are connected" |
144 |
" over the network to the cluster name, the operation is very" |
145 |
" dangerous as the IP address will be removed from the node" |
146 |
" and the change may not go through. Continue?") % name |
147 |
if not AskUser(usertext): |
148 |
return 1 |
149 |
|
150 |
op = opcodes.OpRenameCluster(name=name) |
151 |
SubmitOpCode(op) |
152 |
return 0 |
153 |
|
154 |
|
155 |
def ShowClusterVersion(opts, args): |
156 |
"""Write version of ganeti software to the standard output. |
157 |
|
158 |
Args: |
159 |
opts - class with options as members |
160 |
|
161 |
""" |
162 |
op = opcodes.OpQueryClusterInfo() |
163 |
result = SubmitOpCode(op) |
164 |
ToStdout("Software version: %s", result["software_version"]) |
165 |
ToStdout("Internode protocol: %s", result["protocol_version"]) |
166 |
ToStdout("Configuration format: %s", result["config_version"]) |
167 |
ToStdout("OS api version: %s", result["os_api_version"]) |
168 |
ToStdout("Export interface: %s", result["export_version"]) |
169 |
return 0 |
170 |
|
171 |
|
172 |
def ShowClusterMaster(opts, args): |
173 |
"""Write name of master node to the standard output. |
174 |
|
175 |
Args: |
176 |
opts - class with options as members |
177 |
|
178 |
""" |
179 |
ToStdout("%s", GetClient().QueryConfigValues(["master_node"])[0]) |
180 |
return 0 |
181 |
|
182 |
|
183 |
def ShowClusterConfig(opts, args): |
184 |
"""Shows cluster information. |
185 |
|
186 |
""" |
187 |
op = opcodes.OpQueryClusterInfo() |
188 |
result = SubmitOpCode(op) |
189 |
|
190 |
ToStdout("Cluster name: %s", result["name"]) |
191 |
|
192 |
ToStdout("Master node: %s", result["master"]) |
193 |
|
194 |
ToStdout("Architecture (this node): %s (%s)", |
195 |
result["architecture"][0], result["architecture"][1]) |
196 |
|
197 |
ToStdout("Default hypervisor: %s", result["hypervisor_type"]) |
198 |
ToStdout("Enabled hypervisors: %s", ", ".join(result["enabled_hypervisors"])) |
199 |
|
200 |
ToStdout("Hypervisor parameters:") |
201 |
for hv_name, hv_dict in result["hvparams"].items(): |
202 |
ToStdout(" - %s:", hv_name) |
203 |
for item, val in hv_dict.iteritems(): |
204 |
ToStdout(" %s: %s", item, val) |
205 |
|
206 |
ToStdout("Cluster parameters:") |
207 |
for gr_name, gr_dict in result["beparams"].items(): |
208 |
ToStdout(" - %s:", gr_name) |
209 |
for item, val in gr_dict.iteritems(): |
210 |
ToStdout(" %s: %s", item, val) |
211 |
|
212 |
return 0 |
213 |
|
214 |
|
215 |
def ClusterCopyFile(opts, args): |
216 |
"""Copy a file from master to some nodes. |
217 |
|
218 |
Args: |
219 |
opts - class with options as members |
220 |
args - list containing a single element, the file name |
221 |
Opts used: |
222 |
nodes - list containing the name of target nodes; if empty, all nodes |
223 |
|
224 |
""" |
225 |
filename = args[0] |
226 |
if not os.path.exists(filename): |
227 |
raise errors.OpPrereqError("No such filename '%s'" % filename) |
228 |
|
229 |
cl = GetClient() |
230 |
|
231 |
myname = utils.HostInfo().name |
232 |
|
233 |
cluster_name = cl.QueryConfigValues(["cluster_name"])[0] |
234 |
|
235 |
op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes) |
236 |
results = [row[0] for row in SubmitOpCode(op, cl=cl) if row[0] != myname] |
237 |
|
238 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
239 |
for node in results: |
240 |
if not srun.CopyFileToNode(node, filename): |
241 |
ToStderr("Copy of file %s to node %s failed", filename, node) |
242 |
|
243 |
return 0 |
244 |
|
245 |
|
246 |
def RunClusterCommand(opts, args): |
247 |
"""Run a command on some nodes. |
248 |
|
249 |
Args: |
250 |
opts - class with options as members |
251 |
args - the command list as a list |
252 |
Opts used: |
253 |
nodes: list containing the name of target nodes; if empty, all nodes |
254 |
|
255 |
""" |
256 |
cl = GetClient() |
257 |
|
258 |
command = " ".join(args) |
259 |
op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes) |
260 |
nodes = [row[0] for row in SubmitOpCode(op, cl=cl)] |
261 |
|
262 |
cluster_name, master_node = cl.QueryConfigValues(["cluster_name", |
263 |
"master_node"]) |
264 |
|
265 |
srun = ssh.SshRunner(cluster_name=cluster_name) |
266 |
|
267 |
# Make sure master node is at list end |
268 |
if master_node in nodes: |
269 |
nodes.remove(master_node) |
270 |
nodes.append(master_node) |
271 |
|
272 |
for name in nodes: |
273 |
result = srun.Run(name, "root", command) |
274 |
ToStdout("------------------------------------------------") |
275 |
ToStdout("node: %s", name) |
276 |
ToStdout("%s", result.output) |
277 |
ToStdout("return code = %s", result.exit_code) |
278 |
|
279 |
return 0 |
280 |
|
281 |
|
282 |
def VerifyCluster(opts, args): |
283 |
"""Verify integrity of cluster, performing various test on nodes. |
284 |
|
285 |
Args: |
286 |
opts - class with options as members |
287 |
|
288 |
""" |
289 |
skip_checks = [] |
290 |
if opts.skip_nplusone_mem: |
291 |
skip_checks.append(constants.VERIFY_NPLUSONE_MEM) |
292 |
op = opcodes.OpVerifyCluster(skip_checks=skip_checks) |
293 |
if SubmitOpCode(op): |
294 |
return 0 |
295 |
else: |
296 |
return 1 |
297 |
|
298 |
|
299 |
def VerifyDisks(opts, args): |
300 |
"""Verify integrity of cluster disks. |
301 |
|
302 |
Args: |
303 |
opts - class with options as members |
304 |
|
305 |
""" |
306 |
op = opcodes.OpVerifyDisks() |
307 |
result = SubmitOpCode(op) |
308 |
if not isinstance(result, (list, tuple)) or len(result) != 4: |
309 |
raise errors.ProgrammerError("Unknown result type for OpVerifyDisks") |
310 |
|
311 |
nodes, nlvm, instances, missing = result |
312 |
|
313 |
if nodes: |
314 |
ToStdout("Nodes unreachable or with bad data:") |
315 |
for name in nodes: |
316 |
ToStdout("\t%s", name) |
317 |
retcode = constants.EXIT_SUCCESS |
318 |
|
319 |
if nlvm: |
320 |
for node, text in nlvm.iteritems(): |
321 |
ToStdout("Error on node %s: LVM error: %s", |
322 |
node, text[-400:].encode('string_escape')) |
323 |
retcode |= 1 |
324 |
ToStdout("You need to fix these nodes first before fixing instances") |
325 |
|
326 |
if instances: |
327 |
for iname in instances: |
328 |
if iname in missing: |
329 |
continue |
330 |
op = opcodes.OpActivateInstanceDisks(instance_name=iname) |
331 |
try: |
332 |
ToStdout("Activating disks for instance '%s'", iname) |
333 |
SubmitOpCode(op) |
334 |
except errors.GenericError, err: |
335 |
nret, msg = FormatError(err) |
336 |
retcode |= nret |
337 |
ToStderr("Error activating disks for instance %s: %s", iname, msg) |
338 |
|
339 |
if missing: |
340 |
for iname, ival in missing.iteritems(): |
341 |
all_missing = utils.all(ival, lambda x: x[0] in nlvm) |
342 |
if all_missing: |
343 |
ToStdout("Instance %s cannot be verified as it lives on" |
344 |
" broken nodes", iname) |
345 |
else: |
346 |
ToStdout("Instance %s has missing logical volumes:", iname) |
347 |
ival.sort() |
348 |
for node, vol in ival: |
349 |
if node in nlvm: |
350 |
ToStdout("\tbroken node %s /dev/xenvg/%s", node, vol) |
351 |
else: |
352 |
ToStdout("\t%s /dev/xenvg/%s", node, vol) |
353 |
ToStdout("You need to run replace_disks for all the above" |
354 |
" instances, if this message persist after fixing nodes.") |
355 |
retcode |= 1 |
356 |
|
357 |
return retcode |
358 |
|
359 |
|
360 |
def MasterFailover(opts, args): |
361 |
"""Failover the master node. |
362 |
|
363 |
This command, when run on a non-master node, will cause the current |
364 |
master to cease being master, and the non-master to become new |
365 |
master. |
366 |
|
367 |
""" |
368 |
return bootstrap.MasterFailover() |
369 |
|
370 |
|
371 |
def SearchTags(opts, args): |
372 |
"""Searches the tags on all the cluster. |
373 |
|
374 |
""" |
375 |
op = opcodes.OpSearchTags(pattern=args[0]) |
376 |
result = SubmitOpCode(op) |
377 |
if not result: |
378 |
return 1 |
379 |
result = list(result) |
380 |
result.sort() |
381 |
for path, tag in result: |
382 |
ToStdout("%s %s", path, tag) |
383 |
|
384 |
|
385 |
def SetClusterParams(opts, args): |
386 |
"""Modify the cluster. |
387 |
|
388 |
Args: |
389 |
opts - class with options as members |
390 |
|
391 |
""" |
392 |
if not (not opts.lvm_storage or opts.vg_name or |
393 |
opts.enabled_hypervisors or opts.hvparams or |
394 |
opts.beparams): |
395 |
ToStderr("Please give at least one of the parameters.") |
396 |
return 1 |
397 |
|
398 |
vg_name = opts.vg_name |
399 |
if not opts.lvm_storage and opts.vg_name: |
400 |
ToStdout("Options --no-lvm-storage and --vg-name conflict.") |
401 |
return 1 |
402 |
|
403 |
hvlist = opts.enabled_hypervisors |
404 |
if hvlist is not None: |
405 |
hvlist = hvlist.split(",") |
406 |
|
407 |
hvparams = opts.hvparams |
408 |
if hvparams: |
409 |
# a list of (name, dict) we can pass directly to dict() |
410 |
hvparams = dict(opts.hvparams) |
411 |
|
412 |
beparams = opts.beparams |
413 |
|
414 |
op = opcodes.OpSetClusterParams(vg_name=opts.vg_name, |
415 |
enabled_hypervisors=hvlist, |
416 |
hvparams=hvparams, |
417 |
beparams=beparams) |
418 |
SubmitOpCode(op) |
419 |
return 0 |
420 |
|
421 |
|
422 |
def QueueOps(opts, args): |
423 |
"""Queue operations. |
424 |
|
425 |
""" |
426 |
command = args[0] |
427 |
client = GetClient() |
428 |
if command in ("drain", "undrain"): |
429 |
drain_flag = command == "drain" |
430 |
client.SetQueueDrainFlag(drain_flag) |
431 |
elif command == "info": |
432 |
result = client.QueryConfigValues(["drain_flag"]) |
433 |
if result[0]: |
434 |
val = "set" |
435 |
else: |
436 |
val = "unset" |
437 |
ToStdout("The drain flag is %s" % val) |
438 |
return 0 |
439 |
|
440 |
# this is an option common to more than one command, so we declare |
441 |
# it here and reuse it |
442 |
node_option = make_option("-n", "--node", action="append", dest="nodes", |
443 |
help="Node to copy to (if not given, all nodes)," |
444 |
" can be given multiple times", |
445 |
metavar="<node>", default=[]) |
446 |
|
447 |
commands = { |
448 |
'init': (InitCluster, ARGS_ONE, |
449 |
[DEBUG_OPT, |
450 |
make_option("-s", "--secondary-ip", dest="secondary_ip", |
451 |
help="Specify the secondary ip for this node;" |
452 |
" if given, the entire cluster must have secondary" |
453 |
" addresses", |
454 |
metavar="ADDRESS", default=None), |
455 |
make_option("-m", "--mac-prefix", dest="mac_prefix", |
456 |
help="Specify the mac prefix for the instance IP" |
457 |
" addresses, in the format XX:XX:XX", |
458 |
metavar="PREFIX", |
459 |
default="aa:00:00",), |
460 |
make_option("-g", "--vg-name", dest="vg_name", |
461 |
help="Specify the volume group name " |
462 |
" (cluster-wide) for disk allocation [xenvg]", |
463 |
metavar="VG", |
464 |
default=None,), |
465 |
make_option("-b", "--bridge", dest="def_bridge", |
466 |
help="Specify the default bridge name (cluster-wide)" |
467 |
" to connect the instances to [%s]" % |
468 |
constants.DEFAULT_BRIDGE, |
469 |
metavar="BRIDGE", |
470 |
default=constants.DEFAULT_BRIDGE,), |
471 |
make_option("--master-netdev", dest="master_netdev", |
472 |
help="Specify the node interface (cluster-wide)" |
473 |
" on which the master IP address will be added " |
474 |
" [%s]" % constants.DEFAULT_BRIDGE, |
475 |
metavar="NETDEV", |
476 |
default=constants.DEFAULT_BRIDGE,), |
477 |
make_option("--file-storage-dir", dest="file_storage_dir", |
478 |
help="Specify the default directory (cluster-wide)" |
479 |
" for storing the file-based disks [%s]" % |
480 |
constants.DEFAULT_FILE_STORAGE_DIR, |
481 |
metavar="DIR", |
482 |
default=constants.DEFAULT_FILE_STORAGE_DIR,), |
483 |
make_option("--no-lvm-storage", dest="lvm_storage", |
484 |
help="No support for lvm based instances" |
485 |
" (cluster-wide)", |
486 |
action="store_false", default=True,), |
487 |
make_option("--enabled-hypervisors", dest="enabled_hypervisors", |
488 |
help="Comma-separated list of hypervisors", |
489 |
type="string", default=None), |
490 |
ikv_option("-H", "--hypervisor-parameters", dest="hvparams", |
491 |
help="Hypervisor and hypervisor options, in the" |
492 |
" format" |
493 |
" hypervisor:option=value,option=value,...", |
494 |
default=[], |
495 |
action="append", |
496 |
type="identkeyval"), |
497 |
keyval_option("-B", "--backend-parameters", dest="beparams", |
498 |
type="keyval", default={}, |
499 |
help="Backend parameters"), |
500 |
], |
501 |
"[opts...] <cluster_name>", |
502 |
"Initialises a new cluster configuration"), |
503 |
'destroy': (DestroyCluster, ARGS_NONE, |
504 |
[DEBUG_OPT, |
505 |
make_option("--yes-do-it", dest="yes_do_it", |
506 |
help="Destroy cluster", |
507 |
action="store_true"), |
508 |
], |
509 |
"", "Destroy cluster"), |
510 |
'rename': (RenameCluster, ARGS_ONE, [DEBUG_OPT, FORCE_OPT], |
511 |
"<new_name>", |
512 |
"Renames the cluster"), |
513 |
'verify': (VerifyCluster, ARGS_NONE, [DEBUG_OPT, |
514 |
make_option("--no-nplus1-mem", dest="skip_nplusone_mem", |
515 |
help="Skip N+1 memory redundancy tests", |
516 |
action="store_true", |
517 |
default=False,), |
518 |
], |
519 |
"", "Does a check on the cluster configuration"), |
520 |
'verify-disks': (VerifyDisks, ARGS_NONE, [DEBUG_OPT], |
521 |
"", "Does a check on the cluster disk status"), |
522 |
'masterfailover': (MasterFailover, ARGS_NONE, [DEBUG_OPT], |
523 |
"", "Makes the current node the master"), |
524 |
'version': (ShowClusterVersion, ARGS_NONE, [DEBUG_OPT], |
525 |
"", "Shows the cluster version"), |
526 |
'getmaster': (ShowClusterMaster, ARGS_NONE, [DEBUG_OPT], |
527 |
"", "Shows the cluster master"), |
528 |
'copyfile': (ClusterCopyFile, ARGS_ONE, [DEBUG_OPT, node_option], |
529 |
"[-n node...] <filename>", |
530 |
"Copies a file to all (or only some) nodes"), |
531 |
'command': (RunClusterCommand, ARGS_ATLEAST(1), [DEBUG_OPT, node_option], |
532 |
"[-n node...] <command>", |
533 |
"Runs a command on all (or only some) nodes"), |
534 |
'info': (ShowClusterConfig, ARGS_NONE, [DEBUG_OPT], |
535 |
"", "Show cluster configuration"), |
536 |
'list-tags': (ListTags, ARGS_NONE, |
537 |
[DEBUG_OPT], "", "List the tags of the cluster"), |
538 |
'add-tags': (AddTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT], |
539 |
"tag...", "Add tags to the cluster"), |
540 |
'remove-tags': (RemoveTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT], |
541 |
"tag...", "Remove tags from the cluster"), |
542 |
'search-tags': (SearchTags, ARGS_ONE, |
543 |
[DEBUG_OPT], "", "Searches the tags on all objects on" |
544 |
" the cluster for a given pattern (regex)"), |
545 |
'queue': (QueueOps, ARGS_ONE, [DEBUG_OPT], |
546 |
"drain|undrain|info", "Change queue properties"), |
547 |
'modify': (SetClusterParams, ARGS_NONE, |
548 |
[DEBUG_OPT, |
549 |
make_option("-g", "--vg-name", dest="vg_name", |
550 |
help="Specify the volume group name " |
551 |
" (cluster-wide) for disk allocation " |
552 |
"and enable lvm based storage", |
553 |
metavar="VG",), |
554 |
make_option("--no-lvm-storage", dest="lvm_storage", |
555 |
help="Disable support for lvm based instances" |
556 |
" (cluster-wide)", |
557 |
action="store_false", default=True,), |
558 |
make_option("--enabled-hypervisors", dest="enabled_hypervisors", |
559 |
help="Comma-separated list of hypervisors", |
560 |
type="string", default=None), |
561 |
ikv_option("-H", "--hypervisor-parameters", dest="hvparams", |
562 |
help="Hypervisor and hypervisor options, in the" |
563 |
" format" |
564 |
" hypervisor:option=value,option=value,...", |
565 |
default=[], |
566 |
action="append", |
567 |
type="identkeyval"), |
568 |
keyval_option("-B", "--backend-parameters", dest="beparams", |
569 |
type="keyval", default={}, |
570 |
help="Backend parameters"), |
571 |
], |
572 |
"[opts...]", |
573 |
"Alters the parameters of the cluster"), |
574 |
} |
575 |
|
576 |
if __name__ == '__main__': |
577 |
sys.exit(GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER})) |