import sys
from optparse import make_option
import pprint
+import os.path
from ganeti.cli import *
from ganeti import opcodes
+from ganeti import constants
+from ganeti import errors
+from ganeti import utils
+from ganeti import bootstrap
+from ganeti import ssh
+from ganeti import ssconf
def InitCluster(opts, args):
args - list of arguments, expected to be [clustername]
"""
- op = opcodes.OpInitCluster(cluster_name=args[0],
- secondary_ip=opts.secondary_ip,
- hypervisor_type=opts.hypervisor_type,
- vg_name=opts.vg_name,
- mac_prefix=opts.mac_prefix,
- def_bridge=opts.def_bridge)
- SubmitOpCode(op)
+ if not opts.lvm_storage and opts.vg_name:
+ print ("Options --no-lvm-storage and --vg-name conflict.")
+ return 1
+
+ vg_name = opts.vg_name
+ if opts.lvm_storage and not opts.vg_name:
+ vg_name = constants.DEFAULT_VG
+
+ bootstrap.InitCluster(cluster_name=args[0],
+ secondary_ip=opts.secondary_ip,
+ hypervisor_type=opts.hypervisor_type,
+ vg_name=vg_name,
+ mac_prefix=opts.mac_prefix,
+ def_bridge=opts.def_bridge,
+ master_netdev=opts.master_netdev,
+ file_storage_dir=opts.file_storage_dir)
return 0
Args:
opts - class with options as members
+
"""
if not opts.yes_do_it:
print ("Destroying a cluster is irreversibly. If you really want destroy"
- "this cluster, supply the --yes-do-it option.")
+ " this cluster, supply the --yes-do-it option.")
return 1
op = opcodes.OpDestroyCluster()
+ master = SubmitOpCode(op)
+ # if we reached this, the opcode didn't fail; we can proceed to
+ # shutdown all the daemons
+ bootstrap.FinalizeClusterDestroy(master)
+ return 0
+
+
+def RenameCluster(opts, args):
+ """Rename the cluster.
+
+ Args:
+ opts - class with options as members, we use force only
+ args - list of arguments, expected to be [new_name]
+
+ """
+ name = args[0]
+ if not opts.force:
+ usertext = ("This will rename the cluster to '%s'. If you are connected"
+ " over the network to the cluster name, the operation is very"
+ " dangerous as the IP address will be removed from the node"
+ " and the change may not go through. Continue?") % name
+ if not AskUser(usertext):
+ return 1
+
+ op = opcodes.OpRenameCluster(name=name)
SubmitOpCode(op)
return 0
opts - class with options as members
"""
- op = opcodes.OpQueryClusterInfo()
- result = SubmitOpCode(op)
- print (result["master"])
+ sstore = ssconf.SimpleStore()
+ print sstore.GetMasterNode()
return 0
print ("Cluster name: %s" % result["name"])
- print ("Architecture: %s (%s)" %
- (result["architecture"][0], result["architecture"][1]))
-
print ("Master node: %s" % result["master"])
- print ("Instances:")
- for name, node in result["instances"]:
- print (" - %s (on %s)" % (name, node))
- print ("Nodes:")
- for name in result["nodes"]:
- print (" - %s" % name)
+ print ("Architecture (this node): %s (%s)" %
+ (result["architecture"][0], result["architecture"][1]))
+
+ print ("Cluster hypervisor: %s" % result["hypervisor_type"])
return 0
nodes - list containing the name of target nodes; if empty, all nodes
"""
- op = opcodes.OpClusterCopyFile(filename=args[0], nodes=opts.nodes)
- SubmitOpCode(op)
+ filename = args[0]
+ if not os.path.exists(filename):
+ raise errors.OpPrereqError("No such filename '%s'" % filename)
+
+ myname = utils.HostInfo().name
+
+ op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes)
+ results = [row[0] for row in SubmitOpCode(op) if row[0] != myname]
+ srun = ssh.SshRunner()
+ for node in results:
+ if not srun.CopyFileToNode(node, filename):
+ print >> sys.stderr, ("Copy of file %s to node %s failed" %
+ (filename, node))
+
return 0
"""
command = " ".join(args)
- nodes = opts.nodes
- op = opcodes.OpRunClusterCommand(command=command, nodes=nodes)
- result = SubmitOpCode(op)
- for node, sshcommand, output, exit_code in result:
+ op = opcodes.OpQueryNodes(output_fields=["name"], names=opts.nodes)
+ nodes = [row[0] for row in SubmitOpCode(op)]
+
+ sstore = ssconf.SimpleStore()
+ master_node = sstore.GetMasterNode()
+ srun = ssh.SshRunner(sstore=sstore)
+
+ if master_node in nodes:
+ nodes.remove(master_node)
+ nodes.append(master_node)
+
+ for name in nodes:
+ result = srun.Run(name, "root", command)
print ("------------------------------------------------")
- print ("node: %s" % node)
- print ("command: %s" % sshcommand)
- print ("%s" % output)
- print ("return code = %s" % exit_code)
+ print ("node: %s" % name)
+ print ("%s" % result.output)
+ print ("return code = %s" % result.exit_code)
+
+ return 0
def VerifyCluster(opts, args):
opts - class with options as members
"""
- op = opcodes.OpVerifyCluster()
+ skip_checks = []
+ if opts.skip_nplusone_mem:
+ skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
+ op = opcodes.OpVerifyCluster(skip_checks=skip_checks)
+ if SubmitOpCode(op):
+ return 0
+ else:
+ return 1
+
+
+def VerifyDisks(opts, args):
+ """Verify integrity of cluster disks.
+
+ Args:
+ opts - class with options as members
+
+ """
+ op = opcodes.OpVerifyDisks()
result = SubmitOpCode(op)
- return result
+ if not isinstance(result, (list, tuple)) or len(result) != 4:
+ raise errors.ProgrammerError("Unknown result type for OpVerifyDisks")
+
+ nodes, nlvm, instances, missing = result
+
+ if nodes:
+ print "Nodes unreachable or with bad data:"
+ for name in nodes:
+ print "\t%s" % name
+ retcode = constants.EXIT_SUCCESS
+
+ if nlvm:
+ for node, text in nlvm.iteritems():
+ print ("Error on node %s: LVM error: %s" %
+ (node, text[-400:].encode('string_escape')))
+ retcode |= 1
+ print "You need to fix these nodes first before fixing instances"
+
+ if instances:
+ for iname in instances:
+ if iname in missing:
+ continue
+ op = opcodes.OpActivateInstanceDisks(instance_name=iname)
+ try:
+ print "Activating disks for instance '%s'" % iname
+ SubmitOpCode(op)
+ except errors.GenericError, err:
+ nret, msg = FormatError(err)
+ retcode |= nret
+ print >> sys.stderr, ("Error activating disks for instance %s: %s" %
+ (iname, msg))
+
+ if missing:
+ for iname, ival in missing.iteritems():
+ all_missing = utils.all(ival, lambda x: x[0] in nlvm)
+ if all_missing:
+ print ("Instance %s cannot be verified as it lives on"
+ " broken nodes" % iname)
+ else:
+ print "Instance %s has missing logical volumes:" % iname
+ ival.sort()
+ for node, vol in ival:
+ if node in nlvm:
+ print ("\tbroken node %s /dev/xenvg/%s" % (node, vol))
+ else:
+ print ("\t%s /dev/xenvg/%s" % (node, vol))
+ print ("You need to run replace_disks for all the above"
+ " instances, if this message persist after fixing nodes.")
+ retcode |= 1
+
+ return retcode
def MasterFailover(opts, args):
master.
"""
- op = opcodes.OpMasterFailover()
+ return bootstrap.MasterFailover()
+
+
+def SearchTags(opts, args):
+ """Searches the tags on all the cluster.
+
+ """
+ op = opcodes.OpSearchTags(pattern=args[0])
+ result = SubmitOpCode(op)
+ if not result:
+ return 1
+ result = list(result)
+ result.sort()
+ for path, tag in result:
+ print "%s %s" % (path, tag)
+
+
+def SetClusterParams(opts, args):
+ """Modify the cluster.
+
+ Args:
+ opts - class with options as members
+
+ """
+ if not (not opts.lvm_storage or opts.vg_name):
+ print "Please give at least one of the parameters."
+ return 1
+
+ vg_name = opts.vg_name
+ if not opts.lvm_storage and opts.vg_name:
+ print ("Options --no-lvm-storage and --vg-name conflict.")
+ return 1
+
+ op = opcodes.OpSetClusterParams(vg_name=opts.vg_name)
SubmitOpCode(op)
+ return 0
# this is an option common to more than one command, so we declare
# it here and reuse it
node_option = make_option("-n", "--node", action="append", dest="nodes",
- help="Node to copy to (if not given, all nodes)"
- ", can be given multiple times", metavar="<node>",
- default=[])
+ help="Node to copy to (if not given, all nodes),"
+ " can be given multiple times",
+ metavar="<node>", default=[])
commands = {
'init': (InitCluster, ARGS_ONE,
" addresses",
metavar="ADDRESS", default=None),
make_option("-t", "--hypervisor-type", dest="hypervisor_type",
- help="Specify the hypervisor type (xen-3.0, fake)",
- metavar="TYPE", choices=["xen-3.0", "fake"],
+ help="Specify the hypervisor type "
+ "(xen-3.0, kvm, fake, xen-hvm-3.1)",
+ metavar="TYPE", choices=["xen-3.0",
+ "kvm",
+ "fake",
+ "xen-hvm-3.1"],
default="xen-3.0",),
make_option("-m", "--mac-prefix", dest="mac_prefix",
help="Specify the mac prefix for the instance IP"
help="Specify the volume group name "
" (cluster-wide) for disk allocation [xenvg]",
metavar="VG",
- default="xenvg",),
+ default=None,),
make_option("-b", "--bridge", dest="def_bridge",
help="Specify the default bridge name (cluster-wide)"
- " to connect the instances to [xen-br0]",
+ " to connect the instances to [%s]" %
+ constants.DEFAULT_BRIDGE,
metavar="BRIDGE",
- default="xen-br0",),
+ default=constants.DEFAULT_BRIDGE,),
+ make_option("--master-netdev", dest="master_netdev",
+ help="Specify the node interface (cluster-wide)"
+ " on which the master IP address will be added "
+ " [%s]" % constants.DEFAULT_BRIDGE,
+ metavar="NETDEV",
+ default=constants.DEFAULT_BRIDGE,),
+ make_option("--file-storage-dir", dest="file_storage_dir",
+ help="Specify the default directory (cluster-wide)"
+ " for storing the file-based disks [%s]" %
+ constants.DEFAULT_FILE_STORAGE_DIR,
+ metavar="DIR",
+ default=constants.DEFAULT_FILE_STORAGE_DIR,),
+ make_option("--no-lvm-storage", dest="lvm_storage",
+ help="No support for lvm based instances"
+ " (cluster-wide)",
+ action="store_false", default=True,),
],
"[opts...] <cluster_name>",
"Initialises a new cluster configuration"),
action="store_true"),
],
"", "Destroy cluster"),
- 'verify': (VerifyCluster, ARGS_NONE, [DEBUG_OPT],
+ 'rename': (RenameCluster, ARGS_ONE, [DEBUG_OPT, FORCE_OPT],
+ "<new_name>",
+ "Renames the cluster"),
+ 'verify': (VerifyCluster, ARGS_NONE, [DEBUG_OPT,
+ make_option("--no-nplus1-mem", dest="skip_nplusone_mem",
+ help="Skip N+1 memory redundancy tests",
+ action="store_true",
+ default=False,),
+ ],
"", "Does a check on the cluster configuration"),
+ 'verify-disks': (VerifyDisks, ARGS_NONE, [DEBUG_OPT],
+ "", "Does a check on the cluster disk status"),
'masterfailover': (MasterFailover, ARGS_NONE, [DEBUG_OPT],
"", "Makes the current node the master"),
'version': (ShowClusterVersion, ARGS_NONE, [DEBUG_OPT],
"Runs a command on all (or only some) nodes"),
'info': (ShowClusterConfig, ARGS_NONE, [DEBUG_OPT],
"", "Show cluster configuration"),
+ 'list-tags': (ListTags, ARGS_NONE,
+ [DEBUG_OPT], "", "List the tags of the cluster"),
+ 'add-tags': (AddTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT],
+ "tag...", "Add tags to the cluster"),
+ 'remove-tags': (RemoveTags, ARGS_ANY, [DEBUG_OPT, TAG_SRC_OPT],
+ "tag...", "Remove tags from the cluster"),
+ 'search-tags': (SearchTags, ARGS_ONE,
+ [DEBUG_OPT], "", "Searches the tags on all objects on"
+ " the cluster for a given pattern (regex)"),
+ 'modify': (SetClusterParams, ARGS_NONE,
+ [DEBUG_OPT,
+ make_option("-g", "--vg-name", dest="vg_name",
+ help="Specify the volume group name "
+ " (cluster-wide) for disk allocation "
+ "and enable lvm based storage",
+ metavar="VG",),
+ make_option("--no-lvm-storage", dest="lvm_storage",
+ help="Disable support for lvm based instances"
+ " (cluster-wide)",
+ action="store_false", default=True,),
+ ],
+ "[opts...]",
+ "Alters the parameters of the cluster"),
}
if __name__ == '__main__':
- retcode = GenericMain(commands)
- sys.exit(retcode)
+ sys.exit(GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER}))