#!/usr/bin/python
#
-# Copyright (C) 2006, 2007, 2008 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Node related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-node
import sys
-from optparse import make_option
from ganeti.cli import *
-from ganeti import cli
from ganeti import opcodes
from ganeti import utils
from ganeti import constants
+from ganeti import compat
from ganeti import errors
from ganeti import bootstrap
+from ganeti import netutils
#: default list of field for L{ListNodes}
"pinst_cnt", "sinst_cnt",
]
+
+#: default list of field for L{ListStorage}
+_LIST_STOR_DEF_FIELDS = [
+ constants.SF_NODE,
+ constants.SF_TYPE,
+ constants.SF_NAME,
+ constants.SF_SIZE,
+ constants.SF_USED,
+ constants.SF_FREE,
+ constants.SF_ALLOCATABLE,
+ ]
+
+
#: headers (and full field list for L{ListNodes}
_LIST_HEADERS = {
"name": "Node", "pinst_cnt": "Pinst", "sinst_cnt": "Sinst",
"master": "IsMaster",
"offline": "Offline", "drained": "Drained",
"role": "Role",
+ "ctime": "CTime", "mtime": "MTime", "uuid": "UUID"
}
+#: headers (and full field list for L{ListStorage}
+_LIST_STOR_HEADERS = {
+ constants.SF_NODE: "Node",
+ constants.SF_TYPE: "Type",
+ constants.SF_NAME: "Name",
+ constants.SF_SIZE: "Size",
+ constants.SF_USED: "Used",
+ constants.SF_FREE: "Free",
+ constants.SF_ALLOCATABLE: "Allocatable",
+ }
+
+
+#: User-facing storage unit types
+_USER_STORAGE_TYPE = {
+ constants.ST_FILE: "file",
+ constants.ST_LVM_PV: "lvm-pv",
+ constants.ST_LVM_VG: "lvm-vg",
+ }
+
+_STORAGE_TYPE_OPT = \
+ cli_option("-t", "--storage-type",
+ dest="user_storage_type",
+ choices=_USER_STORAGE_TYPE.keys(),
+ default=None,
+ metavar="STORAGE_TYPE",
+ help=("Storage type (%s)" %
+ utils.CommaJoin(_USER_STORAGE_TYPE.keys())))
+
+_REPAIRABLE_STORAGE_TYPES = \
+ [st for st, so in constants.VALID_STORAGE_OPERATIONS.iteritems()
+ if constants.SO_FIX_CONSISTENCY in so]
+
+_MODIFIABLE_STORAGE_TYPES = constants.MODIFIABLE_STORAGE_FIELDS.keys()
+
+
+def ConvertStorageType(user_storage_type):
+ """Converts a user storage type to its internal name.
+
+ """
+ try:
+ return _USER_STORAGE_TYPE[user_storage_type]
+ except KeyError:
+ raise errors.OpPrereqError("Unknown storage type: %s" % user_storage_type,
+ errors.ECODE_INVAL)
+
+
@UsesRPC
def AddNode(opts, args):
"""Add a node to the cluster.
"""
cl = GetClient()
- dns_data = utils.HostInfo(args[0])
- node = dns_data.name
+ node = netutils.GetHostname(name=args[0]).name
readd = opts.readd
try:
op = opcodes.OpAddNode(node_name=args[0], secondary_ip=sip,
readd=opts.readd)
- SubmitOpCode(op)
+ SubmitOpCode(op, opts=opts)
def ListNodes(opts, args):
val = 'Y'
else:
val = 'N'
+ elif field == "ctime" or field == "mtime":
+ val = utils.FormatTime(val)
elif val is None:
val = "?"
+ elif opts.roman_integers and isinstance(val, int):
+ val = compat.TryToRoman(val)
row[idx] = str(val)
data = GenerateTable(separator=opts.separator, headers=headers,
dst_node = opts.dst_node
iallocator = opts.iallocator
- cnt = [dst_node, iallocator].count(None)
- if cnt != 1:
- raise errors.OpPrereqError("One and only one of the -n and -i"
- " options must be passed")
-
- selected_fields = ["name", "sinst_list"]
- src_node = args[0]
+ op = opcodes.OpNodeEvacuationStrategy(nodes=args,
+ iallocator=iallocator,
+ remote_node=dst_node)
- result = cl.QueryNodes(names=[src_node], fields=selected_fields,
- use_locking=False)
- src_node, sinst = result[0]
-
- if not sinst:
- ToStderr("No secondary instances on node %s, exiting.", src_node)
+ result = SubmitOpCode(op, cl=cl, opts=opts)
+ if not result:
+ # no instances to migrate
+ ToStderr("No secondary instances on node(s) %s, exiting.",
+ utils.CommaJoin(args))
return constants.EXIT_SUCCESS
- if dst_node is not None:
- result = cl.QueryNodes(names=[dst_node], fields=["name"],
- use_locking=False)
- dst_node = result[0][0]
-
- if src_node == dst_node:
- raise errors.OpPrereqError("Evacuate node needs different source and"
- " target nodes (node %s given twice)" %
- src_node)
- txt_msg = "to node %s" % dst_node
- else:
- txt_msg = "using iallocator %s" % iallocator
-
- sinst = utils.NiceSort(sinst)
-
- if not force and not AskUser("Relocate instance(s) %s from node\n"
- " %s %s?" %
- (",".join("'%s'" % name for name in sinst),
- src_node, txt_msg)):
+ if not force and not AskUser("Relocate instance(s) %s from node(s) %s?" %
+ (",".join("'%s'" % name[0] for name in result),
+ utils.CommaJoin(args))):
return constants.EXIT_CONFIRMATION
- op = opcodes.OpEvacuateNode(node_name=args[0], remote_node=dst_node,
- iallocator=iallocator)
- SubmitOpCode(op, cl=cl)
+ jex = JobExecutor(cl=cl, opts=opts)
+ for row in result:
+ iname = row[0]
+ node = row[1]
+ ToStdout("Will relocate instance %s to node %s", iname, node)
+ op = opcodes.OpReplaceDisks(instance_name=iname,
+ remote_node=node, disks=[],
+ mode=constants.REPLACE_DISK_CHG,
+ early_release=opts.early_release)
+ jex.QueueJob(iname, op)
+ results = jex.GetResults()
+ bad_cnt = len([row for row in results if not row[0]])
+ if bad_cnt == 0:
+ ToStdout("All %d instance(s) failed over successfully.", len(results))
+ rcode = constants.EXIT_SUCCESS
+ else:
+ ToStdout("There were errors during the failover:\n"
+ "%d error(s) out of %d instance(s).", bad_cnt, len(results))
+ rcode = constants.EXIT_FAILURE
+ return rcode
def FailoverNode(opts, args):
(",".join("'%s'" % name for name in pinst))):
return 2
- jex = JobExecutor(cl=cl)
+ jex = JobExecutor(cl=cl, opts=opts)
for iname in pinst:
op = opcodes.OpFailoverInstance(instance_name=iname,
ignore_consistency=opts.ignore_consistency)
pinst = utils.NiceSort(pinst)
- retcode = 0
-
if not force and not AskUser("Migrate instance(s) %s?" %
(",".join("'%s'" % name for name in pinst))):
return 2
- jex = JobExecutor(cl=cl)
- for iname in pinst:
- op = opcodes.OpMigrateInstance(instance_name=iname, live=opts.live,
- cleanup=False)
- jex.QueueJob(iname, op)
-
- results = jex.GetResults()
- bad_cnt = len([row for row in results if not row[0]])
- if bad_cnt == 0:
- ToStdout("All %d instance(s) migrated successfully.", len(results))
+ # this should be removed once --non-live is deprecated
+ if not opts.live and opts.migration_mode is not None:
+ raise errors.OpPrereqError("Only one of the --non-live and "
+ "--migration-mode options can be passed",
+ errors.ECODE_INVAL)
+ if not opts.live: # --non-live passed
+ mode = constants.HT_MIGRATION_NONLIVE
else:
- ToStdout("There were errors during the migration:\n"
- "%d error(s) out of %d instance(s).", bad_cnt, len(results))
- return retcode
+ mode = opts.migration_mode
+ op = opcodes.OpMigrateNode(node_name=args[0], mode=mode)
+ SubmitOpCode(op, cl=cl, opts=opts)
def ShowNodeConfig(opts, args):
"""
op = opcodes.OpRemoveNode(node_name=args[0])
- SubmitOpCode(op)
+ SubmitOpCode(op, opts=opts)
return 0
return 2
op = opcodes.OpPowercycleNode(node_name=node, force=opts.force)
- result = SubmitOpCode(op)
+ result = SubmitOpCode(op, opts=opts)
ToStderr(result)
return 0
selected_fields = opts.output.split(",")
op = opcodes.OpQueryNodeVolumes(nodes=args, output_fields=selected_fields)
- output = SubmitOpCode(op)
+ output = SubmitOpCode(op, opts=opts)
if not opts.no_headers:
headers = {"node": "Node", "phys": "PhysDev",
return 0
+def ListStorage(opts, args):
+ """List physical volumes on node(s).
+
+ @param opts: the command line options selected by the user
+ @type args: list
+ @param args: should either be an empty list, in which case
+ we list data for all nodes, or contain a list of nodes
+ to display data only for those
+ @rtype: int
+ @return: the desired exit code
+
+ """
+ # TODO: Default to ST_FILE if LVM is disabled on the cluster
+ if opts.user_storage_type is None:
+ opts.user_storage_type = constants.ST_LVM_PV
+
+ storage_type = ConvertStorageType(opts.user_storage_type)
+
+ if opts.output is None:
+ selected_fields = _LIST_STOR_DEF_FIELDS
+ elif opts.output.startswith("+"):
+ selected_fields = _LIST_STOR_DEF_FIELDS + opts.output[1:].split(",")
+ else:
+ selected_fields = opts.output.split(",")
+
+ op = opcodes.OpQueryNodeStorage(nodes=args,
+ storage_type=storage_type,
+ output_fields=selected_fields)
+ output = SubmitOpCode(op, opts=opts)
+
+ if not opts.no_headers:
+ headers = {
+ constants.SF_NODE: "Node",
+ constants.SF_TYPE: "Type",
+ constants.SF_NAME: "Name",
+ constants.SF_SIZE: "Size",
+ constants.SF_USED: "Used",
+ constants.SF_FREE: "Free",
+ constants.SF_ALLOCATABLE: "Allocatable",
+ }
+ else:
+ headers = None
+
+ unitfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE]
+ numfields = [constants.SF_SIZE, constants.SF_USED, constants.SF_FREE]
+
+ # change raw values to nicer strings
+ for row in output:
+ for idx, field in enumerate(selected_fields):
+ val = row[idx]
+ if field == constants.SF_ALLOCATABLE:
+ if val:
+ val = "Y"
+ else:
+ val = "N"
+ row[idx] = str(val)
+
+ data = GenerateTable(separator=opts.separator, headers=headers,
+ fields=selected_fields, unitfields=unitfields,
+ numfields=numfields, data=output, units=opts.units)
+
+ for line in data:
+ ToStdout(line)
+
+ return 0
+
+
+def ModifyStorage(opts, args):
+ """Modify storage volume on a node.
+
+ @param opts: the command line options selected by the user
+ @type args: list
+ @param args: should contain 3 items: node name, storage type and volume name
+ @rtype: int
+ @return: the desired exit code
+
+ """
+ (node_name, user_storage_type, volume_name) = args
+
+ storage_type = ConvertStorageType(user_storage_type)
+
+ changes = {}
+
+ if opts.allocatable is not None:
+ changes[constants.SF_ALLOCATABLE] = opts.allocatable
+
+ if changes:
+ op = opcodes.OpModifyNodeStorage(node_name=node_name,
+ storage_type=storage_type,
+ name=volume_name,
+ changes=changes)
+ SubmitOpCode(op, opts=opts)
+ else:
+ ToStderr("No changes to perform, exiting.")
+
+
+def RepairStorage(opts, args):
+ """Repairs a storage volume on a node.
+
+ @param opts: the command line options selected by the user
+ @type args: list
+ @param args: should contain 3 items: node name, storage type and volume name
+ @rtype: int
+ @return: the desired exit code
+
+ """
+ (node_name, user_storage_type, volume_name) = args
+
+ storage_type = ConvertStorageType(user_storage_type)
+
+ op = opcodes.OpRepairNodeStorage(node_name=node_name,
+ storage_type=storage_type,
+ name=volume_name,
+ ignore_consistency=opts.ignore_consistency)
+ SubmitOpCode(op, opts=opts)
+
+
def SetNodeParams(opts, args):
"""Modifies a node.
ToStderr("Please give at least one of the parameters.")
return 1
- if opts.master_candidate is not None:
- candidate = opts.master_candidate == 'yes'
- else:
- candidate = None
- if opts.offline is not None:
- offline = opts.offline == 'yes'
- else:
- offline = None
-
- if opts.drained is not None:
- drained = opts.drained == 'yes'
- else:
- drained = None
op = opcodes.OpSetNodeParams(node_name=args[0],
- master_candidate=candidate,
- offline=offline,
- drained=drained,
- force=opts.force)
+ master_candidate=opts.master_candidate,
+ offline=opts.offline,
+ drained=opts.drained,
+ force=opts.force,
+ auto_promote=opts.auto_promote)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
commands = {
- 'add': (AddNode, ARGS_ONE,
- [DEBUG_OPT,
- make_option("-s", "--secondary-ip", dest="secondary_ip",
- help="Specify the secondary ip for the node",
- metavar="ADDRESS", default=None),
- make_option("--readd", dest="readd",
- default=False, action="store_true",
- help="Readd old node after replacing it"),
- make_option("--no-ssh-key-check", dest="ssh_key_check",
- default=True, action="store_false",
- help="Disable SSH key fingerprint checking"),
- ],
- "[-s ip] [--readd] [--no-ssh-key-check] <node_name>",
- "Add a node to the cluster"),
- 'evacuate': (EvacuateNode, ARGS_ONE,
- [DEBUG_OPT, FORCE_OPT,
- make_option("-n", "--new-secondary", dest="dst_node",
- help="New secondary node", metavar="NODE",
- default=None),
- make_option("-I", "--iallocator", metavar="<NAME>",
- help="Select new secondary for the instance"
- " automatically using the"
- " <NAME> iallocator plugin",
- default=None, type="string"),
- ],
- "[-f] {-I <iallocator> | -n <dst>} <node>",
- "Relocate the secondary instances from a node"
- " to other nodes (only for instances with drbd disk template)"),
- 'failover': (FailoverNode, ARGS_ONE,
- [DEBUG_OPT, FORCE_OPT,
- make_option("--ignore-consistency", dest="ignore_consistency",
- action="store_true", default=False,
- help="Ignore the consistency of the disks on"
- " the secondary"),
- ],
- "[-f] <node>",
- "Stops the primary instances on a node and start them on their"
- " secondary node (only for instances with drbd disk template)"),
- 'migrate': (MigrateNode, ARGS_ONE,
- [DEBUG_OPT, FORCE_OPT,
- make_option("--non-live", dest="live",
- default=True, action="store_false",
- help="Do a non-live migration (this usually means"
- " freeze the instance, save the state,"
- " transfer and only then resume running on the"
- " secondary node)"),
- ],
- "[-f] <node>",
- "Migrate all the primary instance on a node away from it"
- " (only for instances of type drbd)"),
- 'info': (ShowNodeConfig, ARGS_ANY, [DEBUG_OPT],
- "[<node_name>...]", "Show information about the node(s)"),
- 'list': (ListNodes, ARGS_ANY,
- [DEBUG_OPT, NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, SYNC_OPT],
- "[nodes...]",
- "Lists the nodes in the cluster. The available fields"
- " are (see the man page for details): %s"
- " The default field list is (in order): %s." %
- (", ".join(_LIST_HEADERS), ", ".join(_LIST_DEF_FIELDS))),
- 'modify': (SetNodeParams, ARGS_ONE,
- [DEBUG_OPT, FORCE_OPT,
- SUBMIT_OPT,
- make_option("-C", "--master-candidate", dest="master_candidate",
- choices=('yes', 'no'), default=None,
- metavar="yes|no",
- help="Set the master_candidate flag on the node"),
-
- make_option("-O", "--offline", dest="offline", metavar="yes|no",
- choices=('yes', 'no'), default=None,
- help="Set the offline flag on the node"),
- make_option("-D", "--drained", dest="drained", metavar="yes|no",
- choices=('yes', 'no'), default=None,
- help="Set the drained flag on the node"),
- ],
- "<instance>", "Alters the parameters of an instance"),
- 'powercycle': (PowercycleNode, ARGS_ONE, [DEBUG_OPT, FORCE_OPT, CONFIRM_OPT],
- "<node_name>", "Tries to forcefully powercycle a node"),
- 'remove': (RemoveNode, ARGS_ONE, [DEBUG_OPT],
- "<node_name>", "Removes a node from the cluster"),
- 'volumes': (ListVolumes, ARGS_ANY,
- [DEBUG_OPT, NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT],
- "[<node_name>...]", "List logical volumes on node(s)"),
- 'list-tags': (ListTags, ARGS_ONE, [DEBUG_OPT],
- "<node_name>", "List the tags of the given node"),
- 'add-tags': (AddTags, ARGS_ATLEAST(1), [DEBUG_OPT, TAG_SRC_OPT],
- "<node_name> tag...", "Add tags to the given node"),
- 'remove-tags': (RemoveTags, ARGS_ATLEAST(1), [DEBUG_OPT, TAG_SRC_OPT],
- "<node_name> tag...", "Remove tags from the given node"),
+ 'add': (
+ AddNode, [ArgHost(min=1, max=1)],
+ [SECONDARY_IP_OPT, READD_OPT, NOSSH_KEYCHECK_OPT],
+ "[-s ip] [--readd] [--no-ssh-key-check] <node_name>",
+ "Add a node to the cluster"),
+ 'evacuate': (
+ EvacuateNode, [ArgNode(min=1)],
+ [FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT],
+ "[-f] {-I <iallocator> | -n <dst>} <node>",
+ "Relocate the secondary instances from a node"
+ " to other nodes (only for instances with drbd disk template)"),
+ 'failover': (
+ FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT],
+ "[-f] <node>",
+ "Stops the primary instances on a node and start them on their"
+ " secondary node (only for instances with drbd disk template)"),
+ 'migrate': (
+ MigrateNode, ARGS_ONE_NODE, [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT],
+ "[-f] <node>",
+ "Migrate all the primary instance on a node away from it"
+ " (only for instances of type drbd)"),
+ 'info': (
+ ShowNodeConfig, ARGS_MANY_NODES, [],
+ "[<node_name>...]", "Show information about the node(s)"),
+ 'list': (
+ ListNodes, ARGS_MANY_NODES,
+ [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, SYNC_OPT, ROMAN_OPT],
+ "[nodes...]",
+ "Lists the nodes in the cluster. The available fields are (see the man"
+ " page for details): %s. The default field list is (in order): %s." %
+ (utils.CommaJoin(_LIST_HEADERS), utils.CommaJoin(_LIST_DEF_FIELDS))),
+ 'modify': (
+ SetNodeParams, ARGS_ONE_NODE,
+ [FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT,
+ AUTO_PROMOTE_OPT],
+ "<node_name>", "Alters the parameters of a node"),
+ 'powercycle': (
+ PowercycleNode, ARGS_ONE_NODE,
+ [FORCE_OPT, CONFIRM_OPT],
+ "<node_name>", "Tries to forcefully powercycle a node"),
+ 'remove': (
+ RemoveNode, ARGS_ONE_NODE, [],
+ "<node_name>", "Removes a node from the cluster"),
+ 'volumes': (
+ ListVolumes, [ArgNode()],
+ [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT],
+ "[<node_name>...]", "List logical volumes on node(s)"),
+ 'list-storage': (
+ ListStorage, ARGS_MANY_NODES,
+ [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT],
+ "[<node_name>...]", "List physical volumes on node(s). The available"
+ " fields are (see the man page for details): %s." %
+ (utils.CommaJoin(_LIST_STOR_HEADERS))),
+ 'modify-storage': (
+ ModifyStorage,
+ [ArgNode(min=1, max=1),
+ ArgChoice(min=1, max=1, choices=_MODIFIABLE_STORAGE_TYPES),
+ ArgFile(min=1, max=1)],
+ [ALLOCATABLE_OPT],
+ "<node_name> <storage_type> <name>", "Modify storage volume on a node"),
+ 'repair-storage': (
+ RepairStorage,
+ [ArgNode(min=1, max=1),
+ ArgChoice(min=1, max=1, choices=_REPAIRABLE_STORAGE_TYPES),
+ ArgFile(min=1, max=1)],
+ [IGNORE_CONSIST_OPT],
+ "<node_name> <storage_type> <name>",
+ "Repairs a storage volume on a node"),
+ 'list-tags': (
+ ListTags, ARGS_ONE_NODE, [],
+ "<node_name>", "List the tags of the given node"),
+ 'add-tags': (
+ AddTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT],
+ "<node_name> tag...", "Add tags to the given node"),
+ 'remove-tags': (
+ RemoveTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT],
+ "<node_name> tag...", "Remove tags from the given node"),
}