4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 from optparse import make_option
27 from ganeti.cli import *
28 from ganeti import opcodes
29 from ganeti import constants
30 from ganeti import errors
31 from ganeti import utils
32 from ganeti import bootstrap
33 from ganeti import ssh
34 from ganeti import ssconf
37 def InitCluster(opts, args):
38 """Initialize the cluster.
41 opts - class with options as members
42 args - list of arguments, expected to be [clustername]
45 if not opts.lvm_storage and opts.vg_name:
46 print ("Options --no-lvm-storage and --vg-name conflict.")
49 vg_name = opts.vg_name
50 if opts.lvm_storage and not opts.vg_name:
51 vg_name = constants.DEFAULT_VG
53 bootstrap.InitCluster(cluster_name=args[0],
54 secondary_ip=opts.secondary_ip,
55 hypervisor_type=opts.hypervisor_type,
57 mac_prefix=opts.mac_prefix,
58 def_bridge=opts.def_bridge,
59 master_netdev=opts.master_netdev,
60 file_storage_dir=opts.file_storage_dir)
64 def DestroyCluster(opts, args):
65 """Destroy the cluster.
68 opts - class with options as members
71 if not opts.yes_do_it:
72 print ("Destroying a cluster is irreversibly. If you really want destroy"
73 " this cluster, supply the --yes-do-it option.")
76 op = opcodes.OpDestroyCluster()
77 master = SubmitOpCode(op)
78 # if we reached this, the opcode didn't fail; we can proceed to
79 # shutdown all the daemons
80 bootstrap.FinalizeClusterDestroy(master)
84 def RenameCluster(opts, args):
85 """Rename the cluster.
88 opts - class with options as members, we use force only
89 args - list of arguments, expected to be [new_name]
94 usertext = ("This will rename the cluster to '%s'. If you are connected"
95 " over the network to the cluster name, the operation is very"
96 " dangerous as the IP address will be removed from the node"
97 " and the change may not go through. Continue?") % name
98 if not AskUser(usertext):
101 op = opcodes.OpRenameCluster(name=name)
106 def ShowClusterVersion(opts, args):
107 """Write version of ganeti software to the standard output.
110 opts - class with options as members
113 op = opcodes.OpQueryClusterInfo()
114 result = SubmitOpCode(op)
115 print ("Software version: %s" % result["software_version"])
116 print ("Internode protocol: %s" % result["protocol_version"])
117 print ("Configuration format: %s" % result["config_version"])
118 print ("OS api version: %s" % result["os_api_version"])
119 print ("Export interface: %s" % result["export_version"])
123 def ShowClusterMaster(opts, args):
124 """Write name of master node to the standard output.
127 opts - class with options as members
130 sstore = ssconf.SimpleStore()
131 print sstore.GetMasterNode()
135 def ShowClusterConfig(opts, args):
136 """Shows cluster information.
139 op = opcodes.OpQueryClusterInfo()
140 result = SubmitOpCode(op)
142 print ("Cluster name: %s" % result["name"])
144 print ("Master node: %s" % result["master"])
146 print ("Architecture (this node): %s (%s)" %
147 (result["architecture"][0], result["architecture"][1]))
149 print ("Cluster hypervisor: %s" % result["hypervisor_type"])
154 def ClusterCopyFile(opts, args):
155 """Copy a file from master to some nodes.
158 opts - class with options as members
159 args - list containing a single element, the file name
161 nodes - list containing the name of target nodes; if empty, all nodes
165 if not os.path.exists(filename):
166 raise errors.OpPrereqError("No such filename '%s'" % filename)
168 myname = utils.HostInfo().name
170 op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes)
171 results = [row[0] for row in SubmitOpCode(op) if row[0] != myname]
172 srun = ssh.SshRunner()
174 if not srun.CopyFileToNode(node, filename):
175 print >> sys.stderr, ("Copy of file %s to node %s failed" %
181 def RunClusterCommand(opts, args):
182 """Run a command on some nodes.
185 opts - class with options as members
186 args - the command list as a list
188 nodes: list containing the name of target nodes; if empty, all nodes
191 command = " ".join(args)
192 op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes)
193 nodes = [row[0] for row in SubmitOpCode(op)]
195 sstore = ssconf.SimpleStore()
196 master_node = sstore.GetMasterNode()
197 srun = ssh.SshRunner(sstore=sstore)
199 if master_node in nodes:
200 nodes.remove(master_node)
201 nodes.append(master_node)
204 result = srun.Run(name, "root", command)
205 print ("------------------------------------------------")
206 print ("node: %s" % name)
207 print ("%s" % result.output)
208 print ("return code = %s" % result.exit_code)
213 def VerifyCluster(opts, args):
214 """Verify integrity of cluster, performing various test on nodes.
217 opts - class with options as members
221 if opts.skip_nplusone_mem:
222 skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
223 op = opcodes.OpVerifyCluster(skip_checks=skip_checks)
230 def VerifyDisks(opts, args):
231 """Verify integrity of cluster disks.
234 opts - class with options as members
237 op = opcodes.OpVerifyDisks()
238 result = SubmitOpCode(op)
239 if not isinstance(result, (list, tuple)) or len(result) != 4:
240 raise errors.ProgrammerError("Unknown result type for OpVerifyDisks")
242 nodes, nlvm, instances, missing = result
245 print "Nodes unreachable or with bad data:"
248 retcode = constants.EXIT_SUCCESS
251 for node, text in nlvm.iteritems():
252 print ("Error on node %s: LVM error: %s" %
253 (node, text[-400:].encode('string_escape')))
255 print "You need to fix these nodes first before fixing instances"
258 for iname in instances:
261 op = opcodes.OpActivateInstanceDisks(instance_name=iname)
263 print "Activating disks for instance '%s'" % iname
265 except errors.GenericError, err:
266 nret, msg = FormatError(err)
268 print >> sys.stderr, ("Error activating disks for instance %s: %s" %
272 for iname, ival in missing.iteritems():
273 all_missing = utils.all(ival, lambda x: x[0] in nlvm)
275 print ("Instance %s cannot be verified as it lives on"
276 " broken nodes" % iname)
278 print "Instance %s has missing logical volumes:" % iname
280 for node, vol in ival:
282 print ("\tbroken node %s /dev/xenvg/%s" % (node, vol))
284 print ("\t%s /dev/xenvg/%s" % (node, vol))
285 print ("You need to run replace_disks for all the above"
286 " instances, if this message persist after fixing nodes.")
292 def MasterFailover(opts, args):
293 """Failover the master node.
295 This command, when run on a non-master node, will cause the current
296 master to cease being master, and the non-master to become new
300 return bootstrap.MasterFailover()
303 def SearchTags(opts, args):
304 """Searches the tags on all the cluster.
307 op = opcodes.OpSearchTags(pattern=args[0])
308 result = SubmitOpCode(op)
311 result = list(result)
313 for path, tag in result:
314 print "%s %s" % (path, tag)
317 def SetClusterParams(opts, args):
318 """Modify the cluster.
321 opts - class with options as members
324 if not (not opts.lvm_storage or opts.vg_name):
325 print "Please give at least one of the parameters."
328 vg_name = opts.vg_name
329 if not opts.lvm_storage and opts.vg_name:
330 print ("Options --no-lvm-storage and --vg-name conflict.")
333 op = opcodes.OpSetClusterParams(vg_name=opts.vg_name)
338 # this is an option common to more than one command, so we declare
339 # it here and reuse it
340 node_option = make_option("-n", "--node", action="append", dest="nodes",
341 help="Node to copy to (if not given, all nodes),"
342 " can be given multiple times",
343 metavar="<node>", default=[])
346 'init': (InitCluster, ARGS_ONE,
348 make_option("-s", "--secondary-ip", dest="secondary_ip",
349 help="Specify the secondary ip for this node;"
350 " if given, the entire cluster must have secondary"
352 metavar="ADDRESS", default=None),
353 make_option("-t", "--hypervisor-type", dest="hypervisor_type",
354 help="Specify the hypervisor type "
355 "(xen-3.0, kvm, fake, xen-hvm-3.1)",
356 metavar="TYPE", choices=["xen-3.0",
361 make_option("-m", "--mac-prefix", dest="mac_prefix",
362 help="Specify the mac prefix for the instance IP"
363 " addresses, in the format XX:XX:XX",
365 default="aa:00:00",),
366 make_option("-g", "--vg-name", dest="vg_name",
367 help="Specify the volume group name "
368 " (cluster-wide) for disk allocation [xenvg]",
371 make_option("-b", "--bridge", dest="def_bridge",
372 help="Specify the default bridge name (cluster-wide)"
373 " to connect the instances to [%s]" %
374 constants.DEFAULT_BRIDGE,
376 default=constants.DEFAULT_BRIDGE,),
377 make_option("--master-netdev", dest="master_netdev",
378 help="Specify the node interface (cluster-wide)"
379 " on which the master IP address will be added "
380 " [%s]" % constants.DEFAULT_BRIDGE,
382 default=constants.DEFAULT_BRIDGE,),
383 make_option("--file-storage-dir", dest="file_storage_dir",
384 help="Specify the default directory (cluster-wide)"
385 " for storing the file-based disks [%s]" %
386 constants.DEFAULT_FILE_STORAGE_DIR,
388 default=constants.DEFAULT_FILE_STORAGE_DIR,),
389 make_option("--no-lvm-storage", dest="lvm_storage",
390 help="No support for lvm based instances"
392 action="store_false", default=True,),
394 "[opts...] <cluster_name>",
395 "Initialises a new cluster configuration"),
396 'destroy': (DestroyCluster, ARGS_NONE,
398 make_option("--yes-do-it", dest="yes_do_it",
399 help="Destroy cluster",
400 action="store_true"),
402 "", "Destroy cluster"),
403 'rename': (RenameCluster, ARGS_ONE, [DEBUG_OPT, FORCE_OPT],
405 "Renames the cluster"),
406 'verify': (VerifyCluster, ARGS_NONE, [DEBUG_OPT,
407 make_option("--no-nplus1-mem", dest="skip_nplusone_mem",
408 help="Skip N+1 memory redundancy tests",
412 "", "Does a check on the cluster configuration"),
413 'verify-disks': (VerifyDisks, ARGS_NONE, [DEBUG_OPT],
414 "", "Does a check on the cluster disk status"),
415 'masterfailover': (MasterFailover, ARGS_NONE, [DEBUG_OPT],
416 "", "Makes the current node the master"),
417 'version': (ShowClusterVersion, ARGS_NONE, [DEBUG_OPT],
418 "", "Shows the cluster version"),
419 'getmaster': (ShowClusterMaster, ARGS_NONE, [DEBUG_OPT],
420 "", "Shows the cluster master"),
421 'copyfile': (ClusterCopyFile, ARGS_ONE, [DEBUG_OPT, node_option],
422 "[-n node...] <filename>",
423 "Copies a file to all (or only some) nodes"),
424 'command': (RunClusterCommand, ARGS_ATLEAST(1), [DEBUG_OPT, node_option],
425 "[-n node...] <command>",
426 "Runs a command on all (or only some) nodes"),
427 'info': (ShowClusterConfig, ARGS_NONE, [DEBUG_OPT],
428 "", "Show cluster configuration"),
429 'list-tags': (ListTags, ARGS_NONE,
430 [DEBUG_OPT], "", "List the tags of the cluster"),
431 'add-tags': (AddTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT],
432 "tag...", "Add tags to the cluster"),
433 'remove-tags': (RemoveTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT],
434 "tag...", "Remove tags from the cluster"),
435 'search-tags': (SearchTags, ARGS_ONE,
436 [DEBUG_OPT], "", "Searches the tags on all objects on"
437 " the cluster for a given pattern (regex)"),
438 'modify': (SetClusterParams, ARGS_NONE,
440 make_option("-g", "--vg-name", dest="vg_name",
441 help="Specify the volume group name "
442 " (cluster-wide) for disk allocation "
443 "and enable lvm based storage",
445 make_option("--no-lvm-storage", dest="lvm_storage",
446 help="Disable support for lvm based instances"
448 action="store_false", default=True,),
451 "Alters the parameters of the cluster"),
454 if __name__ == '__main__':
455 sys.exit(GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER}))