from ganeti import ssconf
from ganeti import rpc
from ganeti import ssh
+from ganeti import compat
from optparse import (OptionParser, TitledHelpFormatter,
Option, OptionValueError)
__all__ = [
# Command line options
+ "ADD_UIDS_OPT",
"ALLOCATABLE_OPT",
"ALL_OPT",
"AUTO_PROMOTE_OPT",
"AUTO_REPLACE_OPT",
"BACKEND_OPT",
"CLEANUP_OPT",
+ "CLUSTER_DOMAIN_SECRET_OPT",
"CONFIRM_OPT",
"CP_SIZE_OPT",
"DEBUG_OPT",
"HVOPTS_OPT",
"HYPERVISOR_OPT",
"IALLOCATOR_OPT",
+ "IDENTIFY_DEFAULTS_OPT",
"IGNORE_CONSIST_OPT",
"IGNORE_FAILURES_OPT",
+ "IGNORE_REMOVE_FAILURES_OPT",
"IGNORE_SECONDARIES_OPT",
"IGNORE_SIZE_OPT",
"MAC_PREFIX_OPT",
+ "MAINTAIN_NODE_HEALTH_OPT",
"MASTER_NETDEV_OPT",
"MC_OPT",
"NET_OPT",
+ "NEW_CLUSTER_CERT_OPT",
+ "NEW_CLUSTER_DOMAIN_SECRET_OPT",
+ "NEW_CONFD_HMAC_KEY_OPT",
+ "NEW_RAPI_CERT_OPT",
"NEW_SECONDARY_OPT",
"NIC_PARAMS_OPT",
"NODE_LIST_OPT",
"NODE_PLACEMENT_OPT",
"NOHDR_OPT",
"NOIPCHECK_OPT",
+ "NO_INSTALL_OPT",
"NONAMECHECK_OPT",
"NOLVM_STORAGE_OPT",
"NOMODIFY_ETCHOSTS_OPT",
"OFFLINE_OPT",
"OS_OPT",
"OS_SIZE_OPT",
+ "RAPI_CERT_OPT",
"READD_OPT",
"REBOOT_TYPE_OPT",
+ "REMOVE_INSTANCE_OPT",
+ "REMOVE_UIDS_OPT",
"SECONDARY_IP_OPT",
"SELECT_OS_OPT",
"SEP_OPT",
"SYNC_OPT",
"TAG_SRC_OPT",
"TIMEOUT_OPT",
+ "UIDPOOL_OPT",
"USEUNITS_OPT",
+ "USE_REPL_NET_OPT",
"VERBOSE_OPT",
"VG_NAME_OPT",
"YES_DOIT_OPT",
return _SplitKeyVal(opt, value)
+def check_bool(option, opt, value): # pylint: disable-msg=W0613
+ """Custom parser for yes/no options.
+
+ This will store the parsed value as either True or False.
+
+ """
+ value = value.lower()
+ if value == constants.VALUE_FALSE or value == "no":
+ return False
+ elif value == constants.VALUE_TRUE or value == "yes":
+ return True
+ else:
+ raise errors.ParameterError("Invalid boolean value '%s'" % value)
+
+
# completion_suggestion is normally a list. Using numeric values not evaluating
# to False for dynamic completion.
(OPT_COMPL_MANY_NODES,
"identkeyval",
"keyval",
"unit",
+ "bool",
)
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["identkeyval"] = check_ident_key_val
TYPE_CHECKER["keyval"] = check_key_val
TYPE_CHECKER["unit"] = check_unit
+ TYPE_CHECKER["bool"] = check_bool
# optparse.py sets make_option, so we do it for our own option class, too
cli_option = CliOption
-_YESNO = ("yes", "no")
_YORNO = "yes|no"
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
action="store_true", default=False,
help="Force an unknown variant")
+NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
+ action="store_true", default=False,
+ help="Do not install the OS (will"
+ " enable no-start)")
+
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
type="keyval", default={},
help="Backend parameters")
" configuration even if there are failures"
" during the removal process")
+IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
+ dest="ignore_remove_failures",
+ action="store_true", default=False,
+ help="Remove the instance from the"
+ " cluster configuration even if there"
+ " are failures during the removal"
+ " process")
+
+REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
+ action="store_true", default=False,
+ help="Remove the instance from the cluster")
+
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
help="Specifies the new secondary node",
metavar="NODE", default=None,
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
- choices=_YESNO, default=None, metavar=_YORNO,
+ type="bool", default=None, metavar=_YORNO,
help="Set the master_candidate flag on the node")
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
- choices=_YESNO, default=None,
+ type="bool", default=None,
help="Set the offline flag on the node")
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
- choices=_YESNO, default=None,
+ type="bool", default=None,
help="Set the drained flag on the node")
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
- choices=_YESNO, default=None, metavar=_YORNO,
+ type="bool", default=None, metavar=_YORNO,
help="Set the allocatable flag on a volume")
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
metavar="NETDEV",
default=constants.DEFAULT_BRIDGE)
-
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the file-based disks [%s]" %
help="Release the locks on the secondary"
" node(s) early")
+NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
+ dest="new_cluster_cert",
+ default=False, action="store_true",
+ help="Generate a new cluster certificate")
+
+RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
+ default=None,
+ help="File containing new RAPI certificate")
+
+NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
+ default=None, action="store_true",
+ help=("Generate a new self-signed RAPI"
+ " certificate"))
+
+NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
+ dest="new_confd_hmac_key",
+ default=False, action="store_true",
+ help=("Create a new HMAC key for %s" %
+ constants.CONFD))
+
+CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
+ dest="cluster_domain_secret",
+ default=None,
+ help=("Load new new cluster domain"
+ " secret from file"))
+
+NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
+ dest="new_cluster_domain_secret",
+ default=False, action="store_true",
+ help=("Create a new cluster domain"
+ " secret"))
+
+USE_REPL_NET_OPT = cli_option("--use-replication-network",
+ dest="use_replication_network",
+ help="Whether to use the replication network"
+ " for talking to the nodes",
+ action="store_true", default=False)
+
+MAINTAIN_NODE_HEALTH_OPT = \
+ cli_option("--maintain-node-health", dest="maintain_node_health",
+ metavar=_YORNO, default=None, type="bool",
+ help="Configure the cluster to automatically maintain node"
+ " health, by shutting down unknown instances, shutting down"
+ " unknown DRBD devices, etc.")
+
+IDENTIFY_DEFAULTS_OPT = \
+ cli_option("--identify-defaults", dest="identify_defaults",
+ default=False, action="store_true",
+ help="Identify which saved instance parameters are equal to"
+ " the current cluster defaults and set them as such, instead"
+ " of marking them as overridden")
+
+UIDPOOL_OPT = cli_option("--uid-pool", default=None,
+ action="store", dest="uid_pool",
+ help=("A list of user-ids or user-id"
+ " ranges separated by commas"))
+
+ADD_UIDS_OPT = cli_option("--add-uids", default=None,
+ action="store", dest="add_uids",
+ help=("A list of user-ids or user-id"
+ " ranges separated by commas, to be"
+ " added to the user-id pool"))
+
+REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
+ action="store", dest="remove_uids",
+ help=("A list of user-ids or user-id"
+ " ranges separated by commas, to be"
+ " removed from the user-id pool"))
+
def _ParseArgs(argv, commands, aliases):
"""Parser for the command line arguments.
obuf.write("Parameter Error: %s" % msg)
elif isinstance(err, errors.ParameterError):
obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
- elif isinstance(err, errors.GenericError):
- obuf.write("Unhandled Ganeti error: %s" % msg)
elif isinstance(err, luxi.NoMasterError):
obuf.write("Cannot communicate with the master daemon.\nIs it running"
" and listening for connections?")
elif isinstance(err, luxi.ProtocolError):
obuf.write("Unhandled protocol error while talking to the master daemon:\n"
"%s" % msg)
+ elif isinstance(err, errors.GenericError):
+ obuf.write("Unhandled Ganeti error: %s" % msg)
elif isinstance(err, JobSubmittedException):
obuf.write("JobID: %s\n" % err.args[0])
retcode = 0
elif opts.no_nics:
# no nics
nics = []
- else:
+ elif mode == constants.INSTANCE_CREATE:
# default of one nic, all auto
nics = [{}]
+ else:
+ # mode == import
+ nics = []
if opts.disk_template == constants.DT_DISKLESS:
if opts.disks or opts.sd_size is not None:
" information passed")
disks = []
else:
- if not opts.disks and not opts.sd_size:
+ if (not opts.disks and not opts.sd_size
+ and mode == constants.INSTANCE_CREATE):
raise errors.OpPrereqError("No disk information specified")
if opts.disks and opts.sd_size is not None:
raise errors.OpPrereqError("Please use either the '--disk' or"
" '-s' option")
if opts.sd_size is not None:
opts.disks = [(0, {"size": opts.sd_size})]
- try:
- disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
- except ValueError, err:
- raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
- disks = [{}] * disk_max
+
+ if opts.disks:
+ try:
+ disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
+ disks = [{}] * disk_max
+ else:
+ disks = []
for didx, ddict in opts.disks:
didx = int(didx)
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg)
- elif "size" not in ddict:
- raise errors.OpPrereqError("Missing size for disk %d" % didx)
- try:
- ddict["size"] = utils.ParseUnit(ddict["size"])
- except ValueError, err:
- raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
- (didx, err))
+ elif "size" in ddict:
+ if "adopt" in ddict:
+ raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
+ " (disk %d)" % didx)
+ try:
+ ddict["size"] = utils.ParseUnit(ddict["size"])
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+ (didx, err))
+ elif "adopt" in ddict:
+ if mode == constants.INSTANCE_IMPORT:
+ raise errors.OpPrereqError("Disk adoption not allowed for instance"
+ " import")
+ ddict["size"] = 0
+ else:
+ raise errors.OpPrereqError("Missing size or adoption source for"
+ " disk %d" % didx)
disks[didx] = ddict
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
os_type = opts.os
src_node = None
src_path = None
+ no_install = opts.no_install
+ identify_defaults = False
elif mode == constants.INSTANCE_IMPORT:
start = False
os_type = None
src_node = opts.src_node
src_path = opts.src_dir
+ no_install = None
+ identify_defaults = opts.identify_defaults
else:
raise errors.ProgrammerError("Invalid creation mode %s" % mode)
start=start,
os_type=os_type,
src_node=src_node,
- src_path=src_path)
+ src_path=src_path,
+ no_install=no_install,
+ identify_defaults=identify_defaults)
SubmitOrSend(op, opts)
return 0
# All daemons are shut down now
try:
return fn(self, *args)
- except Exception:
+ except Exception, err:
+ _, errmsg = FormatError(err)
logging.exception("Caught exception")
+ self.feedback_fn(errmsg)
raise
finally:
# Start cluster again, master node last
return value
-def GetOnlineNodes(nodes, cl=None, nowarn=False):
+def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
+ filter_master=False):
"""Returns the names of online nodes.
This function will also log a warning on stderr with the names of
@param nowarn: by default, this function will output a note with the
offline nodes that are skipped; if this parameter is True the
note is not displayed
+ @type secondary_ips: boolean
+ @param secondary_ips: if True, return the secondary IPs instead of the
+ names, useful for doing network traffic over the replication interface
+ (if any)
+ @type filter_master: boolean
+ @param filter_master: if True, do not return the master node in the list
+ (useful in coordination with secondary_ips where we cannot check our
+ node name against the list)
"""
if cl is None:
cl = GetClient()
- result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
+ if secondary_ips:
+ name_idx = 2
+ else:
+ name_idx = 0
+
+ if filter_master:
+ master_node = cl.QueryConfigValues(["master_node"])[0]
+ filter_fn = lambda x: x != master_node
+ else:
+ filter_fn = lambda _: True
+
+ result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
use_locking=False)
offline = [row[0] for row in result if row[1]]
if offline and not nowarn:
ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
- return [row[0] for row in result if not row[1]]
+ return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
def _ToStream(stream, txt, *args):
ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
# first, remove any non-submitted jobs
- self.jobs, failures = utils.partition(self.jobs, lambda x: x[1])
+ self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
for idx, _, jid, name in failures:
ToStderr("Failed to submit job for %s: %s", name, jid)
results.append((idx, False, jid))
else:
if not self.jobs:
self.SubmitPending()
- for status, result, name in self.jobs:
+ for _, status, result, name in self.jobs:
if status:
ToStdout("%s: %s", result, name)
else: