import os.path
import time
import logging
+import errno
+import itertools
from cStringIO import StringIO
from ganeti import utils
"ALLOCATABLE_OPT",
"ALLOC_POLICY_OPT",
"ALL_OPT",
+ "ALLOW_FAILOVER_OPT",
"AUTO_PROMOTE_OPT",
"AUTO_REPLACE_OPT",
"BACKEND_OPT",
"DRAINED_OPT",
"DRY_RUN_OPT",
"DRBD_HELPER_OPT",
+ "DST_NODE_OPT",
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
"ERROR_CODES_OPT",
"FIELDS_OPT",
"FILESTORE_DIR_OPT",
"FILESTORE_DRIVER_OPT",
+ "FORCE_FILTER_OPT",
"FORCE_OPT",
"FORCE_VARIANT_OPT",
"GLOBAL_FILEDIR_OPT",
"HID_OS_OPT",
+ "GLOBAL_SHARED_FILEDIR_OPT",
"HVLIST_OPT",
"HVOPTS_OPT",
"HYPERVISOR_OPT",
"NEW_RAPI_CERT_OPT",
"NEW_SECONDARY_OPT",
"NIC_PARAMS_OPT",
+ "NODE_FORCE_JOIN_OPT",
"NODE_LIST_OPT",
"NODE_PLACEMENT_OPT",
"NODEGROUP_OPT",
"NOSTART_OPT",
"NOSSH_KEYCHECK_OPT",
"NOVOTING_OPT",
+ "NO_REMEMBER_OPT",
"NWSYNC_OPT",
"ON_PRIMARY_OPT",
"ON_SECONDARY_OPT",
"OSPARAMS_OPT",
"OS_OPT",
"OS_SIZE_OPT",
+ "OOB_TIMEOUT_OPT",
+ "POWER_DELAY_OPT",
"PREALLOC_WIPE_DISKS_OPT",
"PRIMARY_IP_VERSION_OPT",
"PRIORITY_OPT",
ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
-ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
+# TODO
+ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
kind = opts.tag_type
if kind == constants.TAG_CLUSTER:
retval = kind, kind
- elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
+ elif kind in (constants.TAG_NODEGROUP,
+ constants.TAG_NODE,
+ constants.TAG_INSTANCE):
if not args:
raise errors.OpPrereqError("no arguments passed to the command")
name = args.pop(0)
help="Don't wait for sync (DANGEROUS!)")
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
- help="Custom disk setup (diskless, file,"
- " plain or drbd)",
+ help=("Custom disk setup (%s)" %
+ utils.CommaJoin(constants.DISK_TEMPLATES)),
default=None, metavar="TEMPL",
choices=list(constants.DISK_TEMPLATES))
help="Ignore the consistency of the disks on"
" the secondary")
+ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
+ dest="allow_failover",
+ action="store_true", default=False,
+ help="If migration is not possible fallback to"
+ " failover")
+
NONLIVE_OPT = cli_option("--non-live", dest="live",
default=True, action="store_false",
help="Do a non-live migration (this usually means"
action="store_true", default=False,
help="Remove the instance from the cluster")
+DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
+ help="Specifies the new node for the instance",
+ metavar="NODE", default=None,
+ completion_suggest=OPT_COMPL_ONE_NODE)
+
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
help="Specifies the new secondary node",
metavar="NODE", default=None,
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
default=False, action="store_true",
help="Replace the disk(s) on the primary"
- " node (only for the drbd template)")
+ " node (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
default=False, action="store_true",
help="Replace the disk(s) on the secondary"
- " node (only for the drbd template)")
+ " node (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
default=False, action="store_true",
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
default=False, action="store_true",
help="Automatically replace faulty disks"
- " (only for the drbd template)")
+ " (applies only to internally mirrored"
+ " disk templates, e.g. %s)" %
+ utils.CommaJoin(constants.DTS_INT_MIRROR))
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
default=False, action="store_true",
default=True, action="store_false",
help="Disable SSH key fingerprint checking")
+NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
+ default=False, action="store_true",
+ help="Force the joining of a node")
+
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
type="bool", default=None, metavar=_YORNO,
help="Set the master_candidate flag on the node")
metavar="DIR",
default=constants.DEFAULT_FILE_STORAGE_DIR)
+GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
+ dest="shared_file_storage_dir",
+ help="Specify the default directory (cluster-"
+ "wide) for storing the shared file-based"
+ " disks [%s]" %
+ constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
+ metavar="SHAREDDIR",
+ default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
+
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
help="Don't modify /etc/hosts",
action="store_false", default=True)
dest="node_powered",
help="Specify if the SoR for node is powered")
+OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
+ default=constants.OOB_TIMEOUT,
+ help="Maximum time to wait for out-of-band helper")
+
+POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
+ default=constants.OOB_POWER_DELAY,
+ help="Time in seconds to wait between power-ons")
+
+FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
+ action="store_true", default=False,
+ help=("Whether command argument should be treated"
+ " as filter"))
+
+NO_REMEMBER_OPT = cli_option("--no-remember",
+ dest="no_remember",
+ action="store_true", default=False,
+ help="Perform but do not record the change"
+ " in the configuration")
+
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT]
]
-_RSTATUS_TO_TEXT = {
- constants.RS_UNKNOWN: "(unknown)",
- constants.RS_NODATA: "(nodata)",
- constants.RS_UNAVAIL: "(unavail)",
- constants.RS_OFFLINE: "(offline)",
- }
-
-
def _ParseArgs(argv, commands, aliases):
"""Parser for the command line arguments.
"%s" % msg)
elif isinstance(err, errors.JobLost):
obuf.write("Error checking job status: %s" % msg)
+ elif isinstance(err, errors.QueryFilterParseError):
+ obuf.write("Error while parsing query filter: %s\n" % err.args[0])
+ obuf.write("\n".join(err.GetDetails()))
elif isinstance(err, errors.GenericError):
obuf.write("Unhandled Ganeti error: %s" % msg)
elif isinstance(err, JobSubmittedException):
for key, val in override.iteritems():
setattr(options, key, val)
- utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
- stderr_logging=True, program=binary)
+ utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
+ stderr_logging=True)
if old_cmdline:
logging.info("run with arguments '%s'", old_cmdline)
result, err_msg = FormatError(err)
logging.exception("Error during command processing")
ToStderr(err_msg)
+ except KeyboardInterrupt:
+ result = constants.EXIT_FAILURE
+ ToStderr("Aborted. Note that if the operation created any jobs, they"
+ " might have been submitted and"
+ " will continue to run in the background.")
+ except IOError, err:
+ if err.errno == errno.EPIPE:
+ # our terminal went away, we'll exit
+ sys.exit(constants.EXIT_FAILURE)
+ else:
+ raise
return result
raise errors.OpPrereqError("Please use either the '--disk' or"
" '-s' option")
if opts.sd_size is not None:
- opts.disks = [(0, {"size": opts.sd_size})]
+ opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
if opts.disks:
try:
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg)
- elif "size" in ddict:
- if "adopt" in ddict:
+ elif constants.IDISK_SIZE in ddict:
+ if constants.IDISK_ADOPT in ddict:
raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
" (disk %d)" % didx)
try:
- ddict["size"] = utils.ParseUnit(ddict["size"])
+ ddict[constants.IDISK_SIZE] = \
+ utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err))
- elif "adopt" in ddict:
+ elif constants.IDISK_ADOPT in ddict:
if mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for instance"
" import")
- ddict["size"] = 0
+ ddict[constants.IDISK_SIZE] = 0
else:
raise errors.OpPrereqError("Missing size or adoption source for"
" disk %d" % didx)
"""Callable class for formatting fields of a query.
"""
- def __init__(self, fn, status_fn):
+ def __init__(self, fn, status_fn, verbose):
"""Initializes this class.
@type fn: callable
@param fn: Formatting function
@type status_fn: callable
@param status_fn: Function to report fields' status
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
"""
self._fn = fn
self._status_fn = status_fn
+ self._verbose = verbose
def __call__(self, data):
"""Returns a field's string representation.
assert value is None, \
"Found value %r for abnormal status %s" % (value, status)
- return FormatResultError(status)
+ return FormatResultError(status, self._verbose)
-def FormatResultError(status):
+def FormatResultError(status, verbose):
"""Formats result status other than L{constants.RS_NORMAL}.
@param status: The result status
+ @type verbose: boolean
+ @param verbose: Whether to return the verbose text
@return: Text of result status
"""
assert status != constants.RS_NORMAL, \
- "FormatResultError called with status equals to constants.RS_NORMAL"
+ "FormatResultError called with status equal to constants.RS_NORMAL"
try:
- return _RSTATUS_TO_TEXT[status]
+ (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
except KeyError:
raise NotImplementedError("Unknown status %s" % status)
+ else:
+ if verbose:
+ return verbose_text
+ return normal_text
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
- header=False):
+ header=False, verbose=False):
"""Formats data in L{objects.QueryResponse}.
@type result: L{objects.QueryResponse}
@param separator: String used to separate fields
@type header: bool
@param header: Whether to output header row
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
"""
if unit is None:
assert fdef.title and fdef.name
(fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
columns.append(TableColumn(fdef.title,
- _QueryColumnFormatter(fn, _RecordStatus),
+ _QueryColumnFormatter(fn, _RecordStatus,
+ verbose),
align_right))
table = FormatTable(result.data, columns, header, separator)
def GenericList(resource, fields, names, unit, separator, header, cl=None,
- format_override=None):
+ format_override=None, verbose=False, force_filter=False):
"""Generic implementation for listing all items of a resource.
- @param resource: One of L{constants.QR_OP_LUXI}
+ @param resource: One of L{constants.QR_VIA_LUXI}
@type fields: list of strings
@param fields: List of fields to query for
@type names: list of strings
@param separator: String used to separate fields
@type header: bool
@param header: Whether to show header row
+ @type force_filter: bool
+ @param force_filter: Whether to always treat names as filter
@type format_override: dict
@param format_override: Dictionary for overriding field formatting functions,
indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
+ @type verbose: boolean
+ @param verbose: whether to use verbose field descriptions or not
"""
if cl is None:
if not names:
names = None
- response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
+ if (force_filter or
+ (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
+ try:
+ (filter_text, ) = names
+ except ValueError:
+ raise errors.OpPrereqError("Exactly one argument must be given as a"
+ " filter")
+
+ logging.debug("Parsing '%s' as filter", filter_text)
+ filter_ = qlang.ParseFilter(filter_text)
+ else:
+ filter_ = qlang.MakeSimpleFilter("name", names)
+
+ response = cl.Query(resource, fields, filter_)
found_unknown = _WarnUnknownFields(response.fields)
(status, data) = FormatQueryResult(response, unit=unit, separator=separator,
header=header,
- format_override=format_override)
+ format_override=format_override,
+ verbose=verbose)
for line in data:
ToStdout(line)
def GenericListFields(resource, fields, separator, header, cl=None):
"""Generic implementation for listing fields for a resource.
- @param resource: One of L{constants.QR_OP_LUXI}
+ @param resource: One of L{constants.QR_VIA_LUXI}
@type fields: list of strings
@param fields: List of fields to query for
@type separator: string or None
columns = [
TableColumn("Name", str, False),
TableColumn("Title", str, False),
- # TODO: Add field description to master daemon
+ TableColumn("Description", str, False),
]
- rows = [[fdef.name, fdef.title] for fdef in response.fields]
+ rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
for line in FormatTable(rows, columns, header, separator):
ToStdout(line)
@param txt: the message
"""
- if args:
- args = tuple(args)
- stream.write(txt % args)
- else:
- stream.write(txt)
- stream.write('\n')
- stream.flush()
+ try:
+ if args:
+ args = tuple(args)
+ stream.write(txt % args)
+ else:
+ stream.write(txt)
+ stream.write('\n')
+ stream.flush()
+ except IOError, err:
+ if err.errno == errno.EPIPE:
+ # our terminal went away, we'll exit
+ sys.exit(constants.EXIT_FAILURE)
+ else:
+ raise
def ToStdout(txt, *args):
self.jobs = []
self.opts = opts
self.feedback_fn = feedback_fn
+ self._counter = itertools.count()
+
+ @staticmethod
+ def _IfName(name, fmt):
+ """Helper function for formatting name.
+
+ """
+ if name:
+ return fmt % name
+
+ return ""
def QueueJob(self, name, *ops):
"""Record a job for later submit.
@type name: string
@param name: a description of the job, will be used in WaitJobSet
+
"""
SetGenericOpcodeOpts(ops, self.opts)
- self.queue.append((name, ops))
+ self.queue.append((self._counter.next(), name, ops))
+
+ def AddJobId(self, name, status, job_id):
+ """Adds a job ID to the internal queue.
+
+ """
+ self.jobs.append((self._counter.next(), status, job_id, name))
def SubmitPending(self, each=False):
"""Submit all pending jobs.
"""
if each:
results = []
- for row in self.queue:
+ for (_, _, ops) in self.queue:
# SubmitJob will remove the success status, but raise an exception if
# the submission fails, so we'll notice that anyway.
- results.append([True, self.cl.SubmitJob(row[1])])
+ results.append([True, self.cl.SubmitJob(ops)])
else:
- results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
- for (idx, ((status, data), (name, _))) in enumerate(zip(results,
- self.queue)):
+ results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
+ for ((status, data), (idx, name, _)) in zip(results, self.queue):
self.jobs.append((idx, status, data, name))
def _ChooseJob(self):
# first, remove any non-submitted jobs
self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
for idx, _, jid, name in failures:
- ToStderr("Failed to submit job for %s: %s", name, jid)
+ ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
results.append((idx, False, jid))
while self.jobs:
(idx, _, jid, name) = self._ChooseJob()
- ToStdout("Waiting for job %s for %s...", jid, name)
+ ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
try:
job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
success = True
except errors.JobLost, err:
_, job_result = FormatError(err)
- ToStderr("Job %s for %s has been archived, cannot check its result",
- jid, name)
+ ToStderr("Job %s%s has been archived, cannot check its result",
+ jid, self._IfName(name, " for %s"))
success = False
except (errors.GenericError, luxi.ProtocolError), err:
_, job_result = FormatError(err)
success = False
# the error message will always be shown, verbose or not
- ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
+ ToStderr("Job %s%s has failed: %s",
+ jid, self._IfName(name, " for %s"), job_result)
results.append((idx, success, job_result))