"ALLOCATABLE_OPT",
"ALLOC_POLICY_OPT",
"ALL_OPT",
+ "ALLOW_FAILOVER_OPT",
"AUTO_PROMOTE_OPT",
"AUTO_REPLACE_OPT",
"BACKEND_OPT",
"DRAINED_OPT",
"DRY_RUN_OPT",
"DRBD_HELPER_OPT",
+ "DST_NODE_OPT",
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
"ERROR_CODES_OPT",
"FIELDS_OPT",
"FILESTORE_DIR_OPT",
"FILESTORE_DRIVER_OPT",
+ "FORCE_FILTER_OPT",
"FORCE_OPT",
"FORCE_VARIANT_OPT",
"GLOBAL_FILEDIR_OPT",
"OS_OPT",
"OS_SIZE_OPT",
"OOB_TIMEOUT_OPT",
+ "POWER_DELAY_OPT",
"PREALLOC_WIPE_DISKS_OPT",
"PRIMARY_IP_VERSION_OPT",
"PRIORITY_OPT",
ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
-ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
+# TODO
+ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
kind = opts.tag_type
if kind == constants.TAG_CLUSTER:
retval = kind, kind
- elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
+ elif kind in (constants.TAG_NODEGROUP,
+ constants.TAG_NODE,
+ constants.TAG_INSTANCE):
if not args:
raise errors.OpPrereqError("no arguments passed to the command")
name = args.pop(0)
help="Ignore the consistency of the disks on"
" the secondary")
+ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
+ dest="allow_failover",
+ action="store_true", default=False,
+ help="If migration is not possible fallback to"
+ " failover")
+
NONLIVE_OPT = cli_option("--non-live", dest="live",
default=True, action="store_false",
help="Do a non-live migration (this usually means"
action="store_true", default=False,
help="Remove the instance from the cluster")
+DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
+ help="Specifies the new node for the instance",
+ metavar="NODE", default=None,
+ completion_suggest=OPT_COMPL_ONE_NODE)
+
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
help="Specifies the new secondary node",
metavar="NODE", default=None,
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
default=False, action="store_true",
- help="Force the joining of a node,"
- " needed when merging clusters")
+ help="Force the joining of a node")
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
type="bool", default=None, metavar=_YORNO,
default=constants.OOB_TIMEOUT,
help="Maximum time to wait for out-of-band helper")
+POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
+ default=constants.OOB_POWER_DELAY,
+ help="Time in seconds to wait between power-ons")
+
+FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
+ action="store_true", default=False,
+ help=("Whether command argument should be treated"
+ " as filter"))
+
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT]
"%s" % msg)
elif isinstance(err, errors.JobLost):
obuf.write("Error checking job status: %s" % msg)
+ elif isinstance(err, errors.QueryFilterParseError):
+ obuf.write("Error while parsing query filter: %s\n" % err.args[0])
+ obuf.write("\n".join(err.GetDetails()))
elif isinstance(err, errors.GenericError):
obuf.write("Unhandled Ganeti error: %s" % msg)
elif isinstance(err, JobSubmittedException):
raise errors.OpPrereqError("Please use either the '--disk' or"
" '-s' option")
if opts.sd_size is not None:
- opts.disks = [(0, {"size": opts.sd_size})]
+ opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
if opts.disks:
try:
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg)
- elif "size" in ddict:
- if "adopt" in ddict:
+ elif constants.IDISK_SIZE in ddict:
+ if constants.IDISK_ADOPT in ddict:
raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
" (disk %d)" % didx)
try:
- ddict["size"] = utils.ParseUnit(ddict["size"])
+ ddict[constants.IDISK_SIZE] = \
+ utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err))
- elif "adopt" in ddict:
+ elif constants.IDISK_ADOPT in ddict:
if mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for instance"
" import")
- ddict["size"] = 0
+ ddict[constants.IDISK_SIZE] = 0
else:
raise errors.OpPrereqError("Missing size or adoption source for"
" disk %d" % didx)
def GenericList(resource, fields, names, unit, separator, header, cl=None,
- format_override=None, verbose=False):
+ format_override=None, verbose=False, force_filter=False):
"""Generic implementation for listing all items of a resource.
- @param resource: One of L{constants.QR_OP_LUXI}
+ @param resource: One of L{constants.QR_VIA_LUXI}
@type fields: list of strings
@param fields: List of fields to query for
@type names: list of strings
@param separator: String used to separate fields
@type header: bool
@param header: Whether to show header row
+ @type force_filter: bool
+ @param force_filter: Whether to always treat names as filter
@type format_override: dict
@param format_override: Dictionary for overriding field formatting functions,
indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
if not names:
names = None
- response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
+ if (force_filter or
+ (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
+ try:
+ (filter_text, ) = names
+ except ValueError:
+ raise errors.OpPrereqError("Exactly one argument must be given as a"
+ " filter")
+
+ logging.debug("Parsing '%s' as filter", filter_text)
+ filter_ = qlang.ParseFilter(filter_text)
+ else:
+ filter_ = qlang.MakeSimpleFilter("name", names)
+
+ response = cl.Query(resource, fields, filter_)
found_unknown = _WarnUnknownFields(response.fields)
def GenericListFields(resource, fields, separator, header, cl=None):
"""Generic implementation for listing fields for a resource.
- @param resource: One of L{constants.QR_OP_LUXI}
+ @param resource: One of L{constants.QR_VIA_LUXI}
@type fields: list of strings
@param fields: List of fields to query for
@type separator: string or None