X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/25a8792cb981df1979ce383248499ed207d203a4..5d97d6ddf838c40a52cc81eadd9513e5743ef4f5:/lib/cli.py diff --git a/lib/cli.py b/lib/cli.py index bd717d3..b7722e2 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -37,6 +37,7 @@ from ganeti import luxi from ganeti import ssconf from ganeti import rpc from ganeti import ssh +from ganeti import compat from optparse import (OptionParser, TitledHelpFormatter, Option, OptionValueError) @@ -44,12 +45,14 @@ from optparse import (OptionParser, TitledHelpFormatter, __all__ = [ # Command line options + "ADD_UIDS_OPT", "ALLOCATABLE_OPT", "ALL_OPT", "AUTO_PROMOTE_OPT", "AUTO_REPLACE_OPT", "BACKEND_OPT", "CLEANUP_OPT", + "CLUSTER_DOMAIN_SECRET_OPT", "CONFIRM_OPT", "CP_SIZE_OPT", "DEBUG_OPT", @@ -71,15 +74,19 @@ __all__ = [ "HVOPTS_OPT", "HYPERVISOR_OPT", "IALLOCATOR_OPT", + "IDENTIFY_DEFAULTS_OPT", "IGNORE_CONSIST_OPT", "IGNORE_FAILURES_OPT", + "IGNORE_REMOVE_FAILURES_OPT", "IGNORE_SECONDARIES_OPT", "IGNORE_SIZE_OPT", "MAC_PREFIX_OPT", + "MAINTAIN_NODE_HEALTH_OPT", "MASTER_NETDEV_OPT", "MC_OPT", "NET_OPT", "NEW_CLUSTER_CERT_OPT", + "NEW_CLUSTER_DOMAIN_SECRET_OPT", "NEW_CONFD_HMAC_KEY_OPT", "NEW_RAPI_CERT_OPT", "NEW_SECONDARY_OPT", @@ -109,6 +116,8 @@ __all__ = [ "RAPI_CERT_OPT", "READD_OPT", "REBOOT_TYPE_OPT", + "REMOVE_INSTANCE_OPT", + "REMOVE_UIDS_OPT", "SECONDARY_IP_OPT", "SELECT_OS_OPT", "SEP_OPT", @@ -122,7 +131,9 @@ __all__ = [ "SYNC_OPT", "TAG_SRC_OPT", "TIMEOUT_OPT", + "UIDPOOL_OPT", "USEUNITS_OPT", + "USE_REPL_NET_OPT", "VERBOSE_OPT", "VG_NAME_OPT", "YES_DOIT_OPT", @@ -456,6 +467,21 @@ def check_key_val(option, opt, value): # pylint: disable-msg=W0613 return _SplitKeyVal(opt, value) +def check_bool(option, opt, value): # pylint: disable-msg=W0613 + """Custom parser for yes/no options. + + This will store the parsed value as either True or False. + + """ + value = value.lower() + if value == constants.VALUE_FALSE or value == "no": + return False + elif value == constants.VALUE_TRUE or value == "yes": + return True + else: + raise errors.ParameterError("Invalid boolean value '%s'" % value) + + # completion_suggestion is normally a list. Using numeric values not evaluating # to False for dynamic completion. (OPT_COMPL_MANY_NODES, @@ -486,18 +512,19 @@ class CliOption(Option): "identkeyval", "keyval", "unit", + "bool", ) TYPE_CHECKER = Option.TYPE_CHECKER.copy() TYPE_CHECKER["identkeyval"] = check_ident_key_val TYPE_CHECKER["keyval"] = check_key_val TYPE_CHECKER["unit"] = check_unit + TYPE_CHECKER["bool"] = check_bool # optparse.py sets make_option, so we do it for our own option class, too cli_option = CliOption -_YESNO = ("yes", "no") _YORNO = "yes|no" DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", @@ -706,6 +733,18 @@ IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", " configuration even if there are failures" " during the removal process") +IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", + dest="ignore_remove_failures", + action="store_true", default=False, + help="Remove the instance from the" + " cluster configuration even if there" + " are failures during the removal" + " process") + +REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", + action="store_true", default=False, + help="Remove the instance from the cluster") + NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", help="Specifies the new secondary node", metavar="NODE", default=None, @@ -758,19 +797,19 @@ NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", - choices=_YESNO, default=None, metavar=_YORNO, + type="bool", default=None, metavar=_YORNO, help="Set the master_candidate flag on the node") OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, - choices=_YESNO, default=None, + type="bool", default=None, help="Set the offline flag on the node") DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, - choices=_YESNO, default=None, + type="bool", default=None, help="Set the drained flag on the node") ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", - choices=_YESNO, default=None, metavar=_YORNO, + type="bool", default=None, metavar=_YORNO, help="Set the allocatable flag on a volume") NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", @@ -816,7 +855,6 @@ MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", metavar="NETDEV", default=constants.DEFAULT_BRIDGE) - GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", help="Specify the default directory (cluster-" "wide) for storing the file-based disks [%s]" % @@ -890,6 +928,55 @@ NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", help=("Create a new HMAC key for %s" % constants.CONFD)) +CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", + dest="cluster_domain_secret", + default=None, + help=("Load new new cluster domain" + " secret from file")) + +NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", + dest="new_cluster_domain_secret", + default=False, action="store_true", + help=("Create a new cluster domain" + " secret")) + +USE_REPL_NET_OPT = cli_option("--use-replication-network", + dest="use_replication_network", + help="Whether to use the replication network" + " for talking to the nodes", + action="store_true", default=False) + +MAINTAIN_NODE_HEALTH_OPT = \ + cli_option("--maintain-node-health", dest="maintain_node_health", + metavar=_YORNO, default=None, type="bool", + help="Configure the cluster to automatically maintain node" + " health, by shutting down unknown instances, shutting down" + " unknown DRBD devices, etc.") + +IDENTIFY_DEFAULTS_OPT = \ + cli_option("--identify-defaults", dest="identify_defaults", + default=False, action="store_true", + help="Identify which saved instance parameters are equal to" + " the current cluster defaults and set them as such, instead" + " of marking them as overridden") + +UIDPOOL_OPT = cli_option("--uid-pool", default=None, + action="store", dest="uid_pool", + help=("A list of user-ids or user-id" + " ranges separated by commas")) + +ADD_UIDS_OPT = cli_option("--add-uids", default=None, + action="store", dest="add_uids", + help=("A list of user-ids or user-id" + " ranges separated by commas, to be" + " added to the user-id pool")) + +REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, + action="store", dest="remove_uids", + help=("A list of user-ids or user-id" + " ranges separated by commas, to be" + " removed from the user-id pool")) + def _ParseArgs(argv, commands, aliases): """Parser for the command line arguments. @@ -1391,8 +1478,6 @@ def FormatError(err): obuf.write("Parameter Error: %s" % msg) elif isinstance(err, errors.ParameterError): obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) - elif isinstance(err, errors.GenericError): - obuf.write("Unhandled Ganeti error: %s" % msg) elif isinstance(err, luxi.NoMasterError): obuf.write("Cannot communicate with the master daemon.\nIs it running" " and listening for connections?") @@ -1402,6 +1487,8 @@ def FormatError(err): elif isinstance(err, luxi.ProtocolError): obuf.write("Unhandled protocol error while talking to the master daemon:\n" "%s" % msg) + elif isinstance(err, errors.GenericError): + obuf.write("Unhandled Ganeti error: %s" % msg) elif isinstance(err, JobSubmittedException): obuf.write("JobID: %s\n" % err.args[0]) retcode = 0 @@ -1505,9 +1592,12 @@ def GenericInstanceCreate(mode, opts, args): elif opts.no_nics: # no nics nics = [] - else: + elif mode == constants.INSTANCE_CREATE: # default of one nic, all auto nics = [{}] + else: + # mode == import + nics = [] if opts.disk_template == constants.DT_DISKLESS: if opts.disks or opts.sd_size is not None: @@ -1515,18 +1605,23 @@ def GenericInstanceCreate(mode, opts, args): " information passed") disks = [] else: - if not opts.disks and not opts.sd_size: + if (not opts.disks and not opts.sd_size + and mode == constants.INSTANCE_CREATE): raise errors.OpPrereqError("No disk information specified") if opts.disks and opts.sd_size is not None: raise errors.OpPrereqError("Please use either the '--disk' or" " '-s' option") if opts.sd_size is not None: opts.disks = [(0, {"size": opts.sd_size})] - try: - disk_max = max(int(didx[0]) + 1 for didx in opts.disks) - except ValueError, err: - raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) - disks = [{}] * disk_max + + if opts.disks: + try: + disk_max = max(int(didx[0]) + 1 for didx in opts.disks) + except ValueError, err: + raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) + disks = [{}] * disk_max + else: + disks = [] for didx, ddict in opts.disks: didx = int(didx) if not isinstance(ddict, dict): @@ -1560,12 +1655,14 @@ def GenericInstanceCreate(mode, opts, args): src_node = None src_path = None no_install = opts.no_install + identify_defaults = False elif mode == constants.INSTANCE_IMPORT: start = False os_type = None src_node = opts.src_node src_path = opts.src_dir no_install = None + identify_defaults = opts.identify_defaults else: raise errors.ProgrammerError("Invalid creation mode %s" % mode) @@ -1588,7 +1685,8 @@ def GenericInstanceCreate(mode, opts, args): os_type=os_type, src_node=src_node, src_path=src_path, - no_install=no_install) + no_install=no_install, + identify_defaults=identify_defaults) SubmitOrSend(op, opts) return 0 @@ -1883,7 +1981,8 @@ def ParseTimespec(value): return value -def GetOnlineNodes(nodes, cl=None, nowarn=False): +def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, + filter_master=False): """Returns the names of online nodes. This function will also log a warning on stderr with the names of @@ -1896,17 +1995,36 @@ def GetOnlineNodes(nodes, cl=None, nowarn=False): @param nowarn: by default, this function will output a note with the offline nodes that are skipped; if this parameter is True the note is not displayed + @type secondary_ips: boolean + @param secondary_ips: if True, return the secondary IPs instead of the + names, useful for doing network traffic over the replication interface + (if any) + @type filter_master: boolean + @param filter_master: if True, do not return the master node in the list + (useful in coordination with secondary_ips where we cannot check our + node name against the list) """ if cl is None: cl = GetClient() - result = cl.QueryNodes(names=nodes, fields=["name", "offline"], + if secondary_ips: + name_idx = 2 + else: + name_idx = 0 + + if filter_master: + master_node = cl.QueryConfigValues(["master_node"])[0] + filter_fn = lambda x: x != master_node + else: + filter_fn = lambda _: True + + result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"], use_locking=False) offline = [row[0] for row in result if row[1]] if offline and not nowarn: ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) - return [row[0] for row in result if not row[1]] + return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])] def _ToStream(stream, txt, *args): @@ -2026,7 +2144,7 @@ class JobExecutor(object): ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) # first, remove any non-submitted jobs - self.jobs, failures = utils.partition(self.jobs, lambda x: x[1]) + self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) for idx, _, jid, name in failures: ToStderr("Failed to submit job for %s: %s", name, jid) results.append((idx, False, jid)) @@ -2063,7 +2181,7 @@ class JobExecutor(object): else: if not self.jobs: self.SubmitPending() - for status, result, name in self.jobs: + for _, status, result, name in self.jobs: if status: ToStdout("%s: %s", result, name) else: