4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
164 "SECONDARY_ONLY_OPT",
168 "SHUTDOWN_TIMEOUT_OPT",
170 "SPECS_CPU_COUNT_OPT",
171 "SPECS_DISK_COUNT_OPT",
172 "SPECS_DISK_SIZE_OPT",
173 "SPECS_MEM_SIZE_OPT",
174 "SPECS_NIC_COUNT_OPT",
175 "IPOLICY_DISK_TEMPLATES",
176 "IPOLICY_VCPU_RATIO",
182 "STARTUP_PAUSED_OPT",
191 "USE_EXTERNAL_MIP_SCRIPT",
198 "IGNORE_IPOLICY_OPT",
199 "INSTANCE_POLICY_OPTS",
200 # Generic functions for CLI programs
203 "GenericInstanceCreate",
209 "JobSubmittedException",
211 "RunWhileClusterStopped",
215 # Formatting functions
216 "ToStderr", "ToStdout",
219 "FormatParameterDict",
228 # command line options support infrastructure
229 "ARGS_MANY_INSTANCES",
248 "OPT_COMPL_INST_ADD_NODES",
249 "OPT_COMPL_MANY_NODES",
250 "OPT_COMPL_ONE_IALLOCATOR",
251 "OPT_COMPL_ONE_INSTANCE",
252 "OPT_COMPL_ONE_NODE",
253 "OPT_COMPL_ONE_NODEGROUP",
259 "COMMON_CREATE_OPTS",
265 #: Priorities (sorted)
267 ("low", constants.OP_PRIO_LOW),
268 ("normal", constants.OP_PRIO_NORMAL),
269 ("high", constants.OP_PRIO_HIGH),
272 #: Priority dictionary for easier lookup
273 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
274 # we migrate to Python 2.6
275 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
277 # Query result status for clients
280 QR_INCOMPLETE) = range(3)
282 #: Maximum batch size for ChooseJob
287 def __init__(self, min=0, max=None): # pylint: disable=W0622
292 return ("<%s min=%s max=%s>" %
293 (self.__class__.__name__, self.min, self.max))
296 class ArgSuggest(_Argument):
297 """Suggesting argument.
299 Value can be any of the ones passed to the constructor.
302 # pylint: disable=W0622
303 def __init__(self, min=0, max=None, choices=None):
304 _Argument.__init__(self, min=min, max=max)
305 self.choices = choices
308 return ("<%s min=%s max=%s choices=%r>" %
309 (self.__class__.__name__, self.min, self.max, self.choices))
312 class ArgChoice(ArgSuggest):
315 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
316 but value must be one of the choices.
321 class ArgUnknown(_Argument):
322 """Unknown argument to program (e.g. determined at runtime).
327 class ArgInstance(_Argument):
328 """Instances argument.
333 class ArgNode(_Argument):
339 class ArgGroup(_Argument):
340 """Node group argument.
345 class ArgJobId(_Argument):
351 class ArgFile(_Argument):
352 """File path argument.
357 class ArgCommand(_Argument):
363 class ArgHost(_Argument):
369 class ArgOs(_Argument):
376 ARGS_MANY_INSTANCES = [ArgInstance()]
377 ARGS_MANY_NODES = [ArgNode()]
378 ARGS_MANY_GROUPS = [ArgGroup()]
379 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
380 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
382 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
383 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
386 def _ExtractTagsObject(opts, args):
387 """Extract the tag type object.
389 Note that this function will modify its args parameter.
392 if not hasattr(opts, "tag_type"):
393 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
395 if kind == constants.TAG_CLUSTER:
397 elif kind in (constants.TAG_NODEGROUP,
399 constants.TAG_INSTANCE):
401 raise errors.OpPrereqError("no arguments passed to the command")
405 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
409 def _ExtendTags(opts, args):
410 """Extend the args if a source file has been given.
412 This function will extend the tags with the contents of the file
413 passed in the 'tags_source' attribute of the opts parameter. A file
414 named '-' will be replaced by stdin.
417 fname = opts.tags_source
423 new_fh = open(fname, "r")
426 # we don't use the nice 'new_data = [line.strip() for line in fh]'
427 # because of python bug 1633941
429 line = new_fh.readline()
432 new_data.append(line.strip())
435 args.extend(new_data)
438 def ListTags(opts, args):
439 """List the tags on a given object.
441 This is a generic implementation that knows how to deal with all
442 three cases of tag objects (cluster, node, instance). The opts
443 argument is expected to contain a tag_type field denoting what
444 object type we work on.
447 kind, name = _ExtractTagsObject(opts, args)
449 result = cl.QueryTags(kind, name)
450 result = list(result)
456 def AddTags(opts, args):
457 """Add tags on a given object.
459 This is a generic implementation that knows how to deal with all
460 three cases of tag objects (cluster, node, instance). The opts
461 argument is expected to contain a tag_type field denoting what
462 object type we work on.
465 kind, name = _ExtractTagsObject(opts, args)
466 _ExtendTags(opts, args)
468 raise errors.OpPrereqError("No tags to be added")
469 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
470 SubmitOpCode(op, opts=opts)
473 def RemoveTags(opts, args):
474 """Remove tags from a given object.
476 This is a generic implementation that knows how to deal with all
477 three cases of tag objects (cluster, node, instance). The opts
478 argument is expected to contain a tag_type field denoting what
479 object type we work on.
482 kind, name = _ExtractTagsObject(opts, args)
483 _ExtendTags(opts, args)
485 raise errors.OpPrereqError("No tags to be removed")
486 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
487 SubmitOpCode(op, opts=opts)
490 def check_unit(option, opt, value): # pylint: disable=W0613
491 """OptParsers custom converter for units.
495 return utils.ParseUnit(value)
496 except errors.UnitParseError, err:
497 raise OptionValueError("option %s: %s" % (opt, err))
500 def _SplitKeyVal(opt, data):
501 """Convert a KeyVal string into a dict.
503 This function will convert a key=val[,...] string into a dict. Empty
504 values will be converted specially: keys which have the prefix 'no_'
505 will have the value=False and the prefix stripped, the others will
509 @param opt: a string holding the option name for which we process the
510 data, used in building error messages
512 @param data: a string of the format key=val,key=val,...
514 @return: {key=val, key=val}
515 @raises errors.ParameterError: if there are duplicate keys
520 for elem in utils.UnescapeAndSplit(data, sep=","):
522 key, val = elem.split("=", 1)
524 if elem.startswith(NO_PREFIX):
525 key, val = elem[len(NO_PREFIX):], False
526 elif elem.startswith(UN_PREFIX):
527 key, val = elem[len(UN_PREFIX):], None
529 key, val = elem, True
531 raise errors.ParameterError("Duplicate key '%s' in option %s" %
537 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
538 """Custom parser for ident:key=val,key=val options.
540 This will store the parsed values as a tuple (ident, {key: val}). As such,
541 multiple uses of this option via action=append is possible.
545 ident, rest = value, ""
547 ident, rest = value.split(":", 1)
549 if ident.startswith(NO_PREFIX):
551 msg = "Cannot pass options when removing parameter groups: %s" % value
552 raise errors.ParameterError(msg)
553 retval = (ident[len(NO_PREFIX):], False)
554 elif ident.startswith(UN_PREFIX):
556 msg = "Cannot pass options when removing parameter groups: %s" % value
557 raise errors.ParameterError(msg)
558 retval = (ident[len(UN_PREFIX):], None)
560 kv_dict = _SplitKeyVal(opt, rest)
561 retval = (ident, kv_dict)
565 def check_key_val(option, opt, value): # pylint: disable=W0613
566 """Custom parser class for key=val,key=val options.
568 This will store the parsed values as a dict {key: val}.
571 return _SplitKeyVal(opt, value)
574 def check_bool(option, opt, value): # pylint: disable=W0613
575 """Custom parser for yes/no options.
577 This will store the parsed value as either True or False.
580 value = value.lower()
581 if value == constants.VALUE_FALSE or value == "no":
583 elif value == constants.VALUE_TRUE or value == "yes":
586 raise errors.ParameterError("Invalid boolean value '%s'" % value)
589 def check_list(option, opt, value): # pylint: disable=W0613
590 """Custom parser for comma-separated lists.
593 # we have to make this explicit check since "".split(",") is [""],
594 # not an empty list :(
598 return utils.UnescapeAndSplit(value)
601 # completion_suggestion is normally a list. Using numeric values not evaluating
602 # to False for dynamic completion.
603 (OPT_COMPL_MANY_NODES,
605 OPT_COMPL_ONE_INSTANCE,
607 OPT_COMPL_ONE_IALLOCATOR,
608 OPT_COMPL_INST_ADD_NODES,
609 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
611 OPT_COMPL_ALL = frozenset([
612 OPT_COMPL_MANY_NODES,
614 OPT_COMPL_ONE_INSTANCE,
616 OPT_COMPL_ONE_IALLOCATOR,
617 OPT_COMPL_INST_ADD_NODES,
618 OPT_COMPL_ONE_NODEGROUP,
622 class CliOption(Option):
623 """Custom option class for optparse.
626 ATTRS = Option.ATTRS + [
627 "completion_suggest",
629 TYPES = Option.TYPES + (
636 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
637 TYPE_CHECKER["identkeyval"] = check_ident_key_val
638 TYPE_CHECKER["keyval"] = check_key_val
639 TYPE_CHECKER["unit"] = check_unit
640 TYPE_CHECKER["bool"] = check_bool
641 TYPE_CHECKER["list"] = check_list
644 # optparse.py sets make_option, so we do it for our own option class, too
645 cli_option = CliOption
650 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
651 help="Increase debugging level")
653 NOHDR_OPT = cli_option("--no-headers", default=False,
654 action="store_true", dest="no_headers",
655 help="Don't display column headers")
657 SEP_OPT = cli_option("--separator", default=None,
658 action="store", dest="separator",
659 help=("Separator between output fields"
660 " (defaults to one space)"))
662 USEUNITS_OPT = cli_option("--units", default=None,
663 dest="units", choices=("h", "m", "g", "t"),
664 help="Specify units for output (one of h/m/g/t)")
666 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
667 type="string", metavar="FIELDS",
668 help="Comma separated list of output fields")
670 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
671 default=False, help="Force the operation")
673 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
674 default=False, help="Do not require confirmation")
676 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
677 action="store_true", default=False,
678 help=("Ignore offline nodes and do as much"
681 TAG_ADD_OPT = cli_option("--tags", dest="tags",
682 default=None, help="Comma-separated list of instance"
685 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
686 default=None, help="File with tag names")
688 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
689 default=False, action="store_true",
690 help=("Submit the job and return the job ID, but"
691 " don't wait for the job to finish"))
693 SYNC_OPT = cli_option("--sync", dest="do_locking",
694 default=False, action="store_true",
695 help=("Grab locks while doing the queries"
696 " in order to ensure more consistent results"))
698 DRY_RUN_OPT = cli_option("--dry-run", default=False,
700 help=("Do not execute the operation, just run the"
701 " check steps and verify it it could be"
704 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
706 help="Increase the verbosity of the operation")
708 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
709 action="store_true", dest="simulate_errors",
710 help="Debugging option that makes the operation"
711 " treat most runtime checks as failed")
713 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
714 default=True, action="store_false",
715 help="Don't wait for sync (DANGEROUS!)")
717 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
718 action="store_true", default=False,
719 help="Enable offline instance")
721 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
722 action="store_true", default=False,
723 help="Disable down instance")
725 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
726 help=("Custom disk setup (%s)" %
727 utils.CommaJoin(constants.DISK_TEMPLATES)),
728 default=None, metavar="TEMPL",
729 choices=list(constants.DISK_TEMPLATES))
731 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
732 help="Do not create any network cards for"
735 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
736 help="Relative path under default cluster-wide"
737 " file storage dir to store file-based disks",
738 default=None, metavar="<DIR>")
740 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
741 help="Driver to use for image files",
742 default="loop", metavar="<DRIVER>",
743 choices=list(constants.FILE_DRIVER))
745 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
746 help="Select nodes for the instance automatically"
747 " using the <NAME> iallocator plugin",
748 default=None, type="string",
749 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
751 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
753 help="Set the default instance allocator plugin",
754 default=None, type="string",
755 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
757 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
759 completion_suggest=OPT_COMPL_ONE_OS)
761 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
762 type="keyval", default={},
763 help="OS parameters")
765 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
766 action="store_true", default=False,
767 help="Force an unknown variant")
769 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
770 action="store_true", default=False,
771 help="Do not install the OS (will"
774 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
775 type="keyval", default={},
776 help="Backend parameters")
778 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
779 default={}, dest="hvparams",
780 help="Hypervisor parameters")
782 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
783 help="Disk template parameters, in the format"
784 " template:option=value,option=value,...",
785 type="identkeyval", action="append", default=[])
787 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
788 type="keyval", default={},
789 help="Memory count specs: min, max, std"
792 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
793 type="keyval", default={},
794 help="CPU count specs: min, max, std")
796 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
797 dest="ispecs_disk_count",
798 type="keyval", default={},
799 help="Disk count specs: min, max, std")
801 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
802 type="keyval", default={},
803 help="Disk size specs: min, max, std (in MB)")
805 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
806 type="keyval", default={},
807 help="NIC count specs: min, max, std")
809 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
810 dest="ipolicy_disk_templates",
811 type="list", default=None,
812 help="Comma-separated list of"
813 " enabled disk templates")
815 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
816 dest="ipolicy_vcpu_ratio",
817 type="float", default=None,
818 help="The maximum allowed vcpu-to-cpu ratio")
820 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
821 help="Hypervisor and hypervisor options, in the"
822 " format hypervisor:option=value,option=value,...",
823 default=None, type="identkeyval")
825 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
826 help="Hypervisor and hypervisor options, in the"
827 " format hypervisor:option=value,option=value,...",
828 default=[], action="append", type="identkeyval")
830 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
831 action="store_false",
832 help="Don't check that the instance's IP"
835 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
836 default=True, action="store_false",
837 help="Don't check that the instance's name"
840 NET_OPT = cli_option("--net",
841 help="NIC parameters", default=[],
842 dest="nics", action="append", type="identkeyval")
844 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
845 dest="disks", action="append", type="identkeyval")
847 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
848 help="Comma-separated list of disks"
849 " indices to act on (e.g. 0,2) (optional,"
850 " defaults to all disks)")
852 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
853 help="Enforces a single-disk configuration using the"
854 " given disk size, in MiB unless a suffix is used",
855 default=None, type="unit", metavar="<size>")
857 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
858 dest="ignore_consistency",
859 action="store_true", default=False,
860 help="Ignore the consistency of the disks on"
863 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
864 dest="allow_failover",
865 action="store_true", default=False,
866 help="If migration is not possible fallback to"
869 NONLIVE_OPT = cli_option("--non-live", dest="live",
870 default=True, action="store_false",
871 help="Do a non-live migration (this usually means"
872 " freeze the instance, save the state, transfer and"
873 " only then resume running on the secondary node)")
875 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
877 choices=list(constants.HT_MIGRATION_MODES),
878 help="Override default migration mode (choose"
879 " either live or non-live")
881 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
882 help="Target node and optional secondary node",
883 metavar="<pnode>[:<snode>]",
884 completion_suggest=OPT_COMPL_INST_ADD_NODES)
886 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
887 action="append", metavar="<node>",
888 help="Use only this node (can be used multiple"
889 " times, if not given defaults to all nodes)",
890 completion_suggest=OPT_COMPL_ONE_NODE)
892 NODEGROUP_OPT_NAME = "--node-group"
893 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
895 help="Node group (name or uuid)",
896 metavar="<nodegroup>",
897 default=None, type="string",
898 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
900 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
902 completion_suggest=OPT_COMPL_ONE_NODE)
904 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
905 action="store_false",
906 help="Don't start the instance after creation")
908 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
909 action="store_true", default=False,
910 help="Show command instead of executing it")
912 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
913 default=False, action="store_true",
914 help="Instead of performing the migration, try to"
915 " recover from a failed cleanup. This is safe"
916 " to run even if the instance is healthy, but it"
917 " will create extra replication traffic and "
918 " disrupt briefly the replication (like during the"
921 STATIC_OPT = cli_option("-s", "--static", dest="static",
922 action="store_true", default=False,
923 help="Only show configuration data, not runtime data")
925 ALL_OPT = cli_option("--all", dest="show_all",
926 default=False, action="store_true",
927 help="Show info on all instances on the cluster."
928 " This can take a long time to run, use wisely")
930 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
931 action="store_true", default=False,
932 help="Interactive OS reinstall, lists available"
933 " OS templates for selection")
935 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
936 action="store_true", default=False,
937 help="Remove the instance from the cluster"
938 " configuration even if there are failures"
939 " during the removal process")
941 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
942 dest="ignore_remove_failures",
943 action="store_true", default=False,
944 help="Remove the instance from the"
945 " cluster configuration even if there"
946 " are failures during the removal"
949 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
950 action="store_true", default=False,
951 help="Remove the instance from the cluster")
953 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
954 help="Specifies the new node for the instance",
955 metavar="NODE", default=None,
956 completion_suggest=OPT_COMPL_ONE_NODE)
958 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
959 help="Specifies the new secondary node",
960 metavar="NODE", default=None,
961 completion_suggest=OPT_COMPL_ONE_NODE)
963 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
964 default=False, action="store_true",
965 help="Replace the disk(s) on the primary"
966 " node (applies only to internally mirrored"
967 " disk templates, e.g. %s)" %
968 utils.CommaJoin(constants.DTS_INT_MIRROR))
970 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
971 default=False, action="store_true",
972 help="Replace the disk(s) on the secondary"
973 " node (applies only to internally mirrored"
974 " disk templates, e.g. %s)" %
975 utils.CommaJoin(constants.DTS_INT_MIRROR))
977 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
978 default=False, action="store_true",
979 help="Lock all nodes and auto-promote as needed"
982 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
983 default=False, action="store_true",
984 help="Automatically replace faulty disks"
985 " (applies only to internally mirrored"
986 " disk templates, e.g. %s)" %
987 utils.CommaJoin(constants.DTS_INT_MIRROR))
989 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
990 default=False, action="store_true",
991 help="Ignore current recorded size"
992 " (useful for forcing activation when"
993 " the recorded size is wrong)")
995 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
997 completion_suggest=OPT_COMPL_ONE_NODE)
999 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1002 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1003 help="Specify the secondary ip for the node",
1004 metavar="ADDRESS", default=None)
1006 READD_OPT = cli_option("--readd", dest="readd",
1007 default=False, action="store_true",
1008 help="Readd old node after replacing it")
1010 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1011 default=True, action="store_false",
1012 help="Disable SSH key fingerprint checking")
1014 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1015 default=False, action="store_true",
1016 help="Force the joining of a node")
1018 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1019 type="bool", default=None, metavar=_YORNO,
1020 help="Set the master_candidate flag on the node")
1022 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1023 type="bool", default=None,
1024 help=("Set the offline flag on the node"
1025 " (cluster does not communicate with offline"
1028 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1029 type="bool", default=None,
1030 help=("Set the drained flag on the node"
1031 " (excluded from allocation operations)"))
1033 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1034 type="bool", default=None, metavar=_YORNO,
1035 help="Set the master_capable flag on the node")
1037 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1038 type="bool", default=None, metavar=_YORNO,
1039 help="Set the vm_capable flag on the node")
1041 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1042 type="bool", default=None, metavar=_YORNO,
1043 help="Set the allocatable flag on a volume")
1045 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1046 help="Disable support for lvm based instances"
1048 action="store_false", default=True)
1050 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1051 dest="enabled_hypervisors",
1052 help="Comma-separated list of hypervisors",
1053 type="string", default=None)
1055 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1056 type="keyval", default={},
1057 help="NIC parameters")
1059 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1060 dest="candidate_pool_size", type="int",
1061 help="Set the candidate pool size")
1063 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1064 help=("Enables LVM and specifies the volume group"
1065 " name (cluster-wide) for disk allocation"
1066 " [%s]" % constants.DEFAULT_VG),
1067 metavar="VG", default=None)
1069 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1070 help="Destroy cluster", action="store_true")
1072 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1073 help="Skip node agreement check (dangerous)",
1074 action="store_true", default=False)
1076 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1077 help="Specify the mac prefix for the instance IP"
1078 " addresses, in the format XX:XX:XX",
1082 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1083 help="Specify the node interface (cluster-wide)"
1084 " on which the master IP address will be added"
1085 " (cluster init default: %s)" %
1086 constants.DEFAULT_BRIDGE,
1090 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1091 help="Specify the netmask of the master IP",
1095 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1096 dest="use_external_mip_script",
1097 help="Specify whether to run a user-provided"
1098 " script for the master IP address turnup and"
1099 " turndown operations",
1100 type="bool", metavar=_YORNO, default=None)
1102 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1103 help="Specify the default directory (cluster-"
1104 "wide) for storing the file-based disks [%s]" %
1105 constants.DEFAULT_FILE_STORAGE_DIR,
1107 default=constants.DEFAULT_FILE_STORAGE_DIR)
1109 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1110 dest="shared_file_storage_dir",
1111 help="Specify the default directory (cluster-"
1112 "wide) for storing the shared file-based"
1114 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1115 metavar="SHAREDDIR",
1116 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1118 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1119 help="Don't modify /etc/hosts",
1120 action="store_false", default=True)
1122 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1123 help="Don't initialize SSH keys",
1124 action="store_false", default=True)
1126 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1127 help="Enable parseable error messages",
1128 action="store_true", default=False)
1130 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1131 help="Skip N+1 memory redundancy tests",
1132 action="store_true", default=False)
1134 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1135 help="Type of reboot: soft/hard/full",
1136 default=constants.INSTANCE_REBOOT_HARD,
1138 choices=list(constants.REBOOT_TYPES))
1140 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1141 dest="ignore_secondaries",
1142 default=False, action="store_true",
1143 help="Ignore errors from secondaries")
1145 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1146 action="store_false", default=True,
1147 help="Don't shutdown the instance (unsafe)")
1149 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1150 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1151 help="Maximum time to wait")
1153 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1154 dest="shutdown_timeout", type="int",
1155 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1156 help="Maximum time to wait for instance shutdown")
1158 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1160 help=("Number of seconds between repetions of the"
1163 EARLY_RELEASE_OPT = cli_option("--early-release",
1164 dest="early_release", default=False,
1165 action="store_true",
1166 help="Release the locks on the secondary"
1169 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1170 dest="new_cluster_cert",
1171 default=False, action="store_true",
1172 help="Generate a new cluster certificate")
1174 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1176 help="File containing new RAPI certificate")
1178 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1179 default=None, action="store_true",
1180 help=("Generate a new self-signed RAPI"
1183 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1185 help="File containing new SPICE certificate")
1187 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1189 help="File containing the certificate of the CA"
1190 " which signed the SPICE certificate")
1192 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1193 dest="new_spice_cert", default=None,
1194 action="store_true",
1195 help=("Generate a new self-signed SPICE"
1198 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1199 dest="new_confd_hmac_key",
1200 default=False, action="store_true",
1201 help=("Create a new HMAC key for %s" %
1204 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1205 dest="cluster_domain_secret",
1207 help=("Load new new cluster domain"
1208 " secret from file"))
1210 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1211 dest="new_cluster_domain_secret",
1212 default=False, action="store_true",
1213 help=("Create a new cluster domain"
1216 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1217 dest="use_replication_network",
1218 help="Whether to use the replication network"
1219 " for talking to the nodes",
1220 action="store_true", default=False)
1222 MAINTAIN_NODE_HEALTH_OPT = \
1223 cli_option("--maintain-node-health", dest="maintain_node_health",
1224 metavar=_YORNO, default=None, type="bool",
1225 help="Configure the cluster to automatically maintain node"
1226 " health, by shutting down unknown instances, shutting down"
1227 " unknown DRBD devices, etc.")
1229 IDENTIFY_DEFAULTS_OPT = \
1230 cli_option("--identify-defaults", dest="identify_defaults",
1231 default=False, action="store_true",
1232 help="Identify which saved instance parameters are equal to"
1233 " the current cluster defaults and set them as such, instead"
1234 " of marking them as overridden")
1236 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1237 action="store", dest="uid_pool",
1238 help=("A list of user-ids or user-id"
1239 " ranges separated by commas"))
1241 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1242 action="store", dest="add_uids",
1243 help=("A list of user-ids or user-id"
1244 " ranges separated by commas, to be"
1245 " added to the user-id pool"))
1247 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1248 action="store", dest="remove_uids",
1249 help=("A list of user-ids or user-id"
1250 " ranges separated by commas, to be"
1251 " removed from the user-id pool"))
1253 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1254 action="store", dest="reserved_lvs",
1255 help=("A comma-separated list of reserved"
1256 " logical volumes names, that will be"
1257 " ignored by cluster verify"))
1259 ROMAN_OPT = cli_option("--roman",
1260 dest="roman_integers", default=False,
1261 action="store_true",
1262 help="Use roman numbers for positive integers")
1264 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1265 action="store", default=None,
1266 help="Specifies usermode helper for DRBD")
1268 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1269 action="store_false", default=True,
1270 help="Disable support for DRBD")
1272 PRIMARY_IP_VERSION_OPT = \
1273 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1274 action="store", dest="primary_ip_version",
1275 metavar="%d|%d" % (constants.IP4_VERSION,
1276 constants.IP6_VERSION),
1277 help="Cluster-wide IP version for primary IP")
1279 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1280 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1281 choices=_PRIONAME_TO_VALUE.keys(),
1282 help="Priority for opcode processing")
1284 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1285 type="bool", default=None, metavar=_YORNO,
1286 help="Sets the hidden flag on the OS")
1288 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1289 type="bool", default=None, metavar=_YORNO,
1290 help="Sets the blacklisted flag on the OS")
1292 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1293 type="bool", metavar=_YORNO,
1294 dest="prealloc_wipe_disks",
1295 help=("Wipe disks prior to instance"
1298 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1299 type="keyval", default=None,
1300 help="Node parameters")
1302 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1303 action="store", metavar="POLICY", default=None,
1304 help="Allocation policy for the node group")
1306 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1307 type="bool", metavar=_YORNO,
1308 dest="node_powered",
1309 help="Specify if the SoR for node is powered")
1311 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1312 default=constants.OOB_TIMEOUT,
1313 help="Maximum time to wait for out-of-band helper")
1315 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1316 default=constants.OOB_POWER_DELAY,
1317 help="Time in seconds to wait between power-ons")
1319 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1320 action="store_true", default=False,
1321 help=("Whether command argument should be treated"
1324 NO_REMEMBER_OPT = cli_option("--no-remember",
1326 action="store_true", default=False,
1327 help="Perform but do not record the change"
1328 " in the configuration")
1330 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1331 default=False, action="store_true",
1332 help="Evacuate primary instances only")
1334 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1335 default=False, action="store_true",
1336 help="Evacuate secondary instances only"
1337 " (applies only to internally mirrored"
1338 " disk templates, e.g. %s)" %
1339 utils.CommaJoin(constants.DTS_INT_MIRROR))
1341 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1342 action="store_true", default=False,
1343 help="Pause instance at startup")
1345 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1346 help="Destination node group (name or uuid)",
1347 default=None, action="append",
1348 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1350 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1351 action="append", dest="ignore_errors",
1352 choices=list(constants.CV_ALL_ECODES_STRINGS),
1353 help="Error code to be ignored")
1355 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1357 help=("Specify disk state information in the format"
1358 " storage_type/identifier:option=value,..."),
1361 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1363 help=("Specify hypervisor state information in the"
1364 " format hypervisor:option=value,..."),
1367 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1368 action="store_true", default=False,
1369 help="Ignore instance policy violations")
1371 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1372 help="Sets the instance's runtime memory,"
1373 " ballooning it up or down to the new value",
1374 default=None, type="unit", metavar="<size>")
1376 #: Options provided by all commands
1377 COMMON_OPTS = [DEBUG_OPT]
1379 # common options for creating instances. add and import then add their own
1381 COMMON_CREATE_OPTS = [
1386 FILESTORE_DRIVER_OPT,
1403 # common instance policy options
1404 INSTANCE_POLICY_OPTS = [
1405 SPECS_CPU_COUNT_OPT,
1406 SPECS_DISK_COUNT_OPT,
1407 SPECS_DISK_SIZE_OPT,
1409 SPECS_NIC_COUNT_OPT,
1410 IPOLICY_DISK_TEMPLATES,
1415 def _ParseArgs(argv, commands, aliases, env_override):
1416 """Parser for the command line arguments.
1418 This function parses the arguments and returns the function which
1419 must be executed together with its (modified) arguments.
1421 @param argv: the command line
1422 @param commands: dictionary with special contents, see the design
1423 doc for cmdline handling
1424 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1425 @param env_override: list of env variables allowed for default args
1428 assert not (env_override - set(commands))
1431 binary = "<command>"
1433 binary = argv[0].split("/")[-1]
1435 if len(argv) > 1 and argv[1] == "--version":
1436 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1437 constants.RELEASE_VERSION)
1438 # Quit right away. That way we don't have to care about this special
1439 # argument. optparse.py does it the same.
1442 if len(argv) < 2 or not (argv[1] in commands or
1443 argv[1] in aliases):
1444 # let's do a nice thing
1445 sortedcmds = commands.keys()
1448 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1449 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1452 # compute the max line length for cmd + usage
1453 mlen = max([len(" %s" % cmd) for cmd in commands])
1454 mlen = min(60, mlen) # should not get here...
1456 # and format a nice command list
1457 ToStdout("Commands:")
1458 for cmd in sortedcmds:
1459 cmdstr = " %s" % (cmd,)
1460 help_text = commands[cmd][4]
1461 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1462 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1463 for line in help_lines:
1464 ToStdout("%-*s %s", mlen, "", line)
1468 return None, None, None
1470 # get command, unalias it, and look it up in commands
1474 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1477 if aliases[cmd] not in commands:
1478 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1479 " command '%s'" % (cmd, aliases[cmd]))
1483 if cmd in env_override:
1484 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1485 env_args = os.environ.get(args_env_name)
1487 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1489 func, args_def, parser_opts, usage, description = commands[cmd]
1490 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1491 description=description,
1492 formatter=TitledHelpFormatter(),
1493 usage="%%prog %s %s" % (cmd, usage))
1494 parser.disable_interspersed_args()
1495 options, args = parser.parse_args(args=argv[1:])
1497 if not _CheckArguments(cmd, args_def, args):
1498 return None, None, None
1500 return func, options, args
1503 def _CheckArguments(cmd, args_def, args):
1504 """Verifies the arguments using the argument definition.
1508 1. Abort with error if values specified by user but none expected.
1510 1. For each argument in definition
1512 1. Keep running count of minimum number of values (min_count)
1513 1. Keep running count of maximum number of values (max_count)
1514 1. If it has an unlimited number of values
1516 1. Abort with error if it's not the last argument in the definition
1518 1. If last argument has limited number of values
1520 1. Abort with error if number of values doesn't match or is too large
1522 1. Abort with error if user didn't pass enough values (min_count)
1525 if args and not args_def:
1526 ToStderr("Error: Command %s expects no arguments", cmd)
1533 last_idx = len(args_def) - 1
1535 for idx, arg in enumerate(args_def):
1536 if min_count is None:
1538 elif arg.min is not None:
1539 min_count += arg.min
1541 if max_count is None:
1543 elif arg.max is not None:
1544 max_count += arg.max
1547 check_max = (arg.max is not None)
1549 elif arg.max is None:
1550 raise errors.ProgrammerError("Only the last argument can have max=None")
1553 # Command with exact number of arguments
1554 if (min_count is not None and max_count is not None and
1555 min_count == max_count and len(args) != min_count):
1556 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1559 # Command with limited number of arguments
1560 if max_count is not None and len(args) > max_count:
1561 ToStderr("Error: Command %s expects only %d argument(s)",
1565 # Command with some required arguments
1566 if min_count is not None and len(args) < min_count:
1567 ToStderr("Error: Command %s expects at least %d argument(s)",
1574 def SplitNodeOption(value):
1575 """Splits the value of a --node option.
1578 if value and ":" in value:
1579 return value.split(":", 1)
1581 return (value, None)
1584 def CalculateOSNames(os_name, os_variants):
1585 """Calculates all the names an OS can be called, according to its variants.
1587 @type os_name: string
1588 @param os_name: base name of the os
1589 @type os_variants: list or None
1590 @param os_variants: list of supported variants
1592 @return: list of valid names
1596 return ["%s+%s" % (os_name, v) for v in os_variants]
1601 def ParseFields(selected, default):
1602 """Parses the values of "--field"-like options.
1604 @type selected: string or None
1605 @param selected: User-selected options
1607 @param default: Default fields
1610 if selected is None:
1613 if selected.startswith("+"):
1614 return default + selected[1:].split(",")
1616 return selected.split(",")
1619 UsesRPC = rpc.RunWithRPC
1622 def AskUser(text, choices=None):
1623 """Ask the user a question.
1625 @param text: the question to ask
1627 @param choices: list with elements tuples (input_char, return_value,
1628 description); if not given, it will default to: [('y', True,
1629 'Perform the operation'), ('n', False, 'Do no do the operation')];
1630 note that the '?' char is reserved for help
1632 @return: one of the return values from the choices list; if input is
1633 not possible (i.e. not running with a tty, we return the last
1638 choices = [("y", True, "Perform the operation"),
1639 ("n", False, "Do not perform the operation")]
1640 if not choices or not isinstance(choices, list):
1641 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1642 for entry in choices:
1643 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1644 raise errors.ProgrammerError("Invalid choices element to AskUser")
1646 answer = choices[-1][1]
1648 for line in text.splitlines():
1649 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1650 text = "\n".join(new_text)
1652 f = file("/dev/tty", "a+")
1656 chars = [entry[0] for entry in choices]
1657 chars[-1] = "[%s]" % chars[-1]
1659 maps = dict([(entry[0], entry[1]) for entry in choices])
1663 f.write("/".join(chars))
1665 line = f.readline(2).strip().lower()
1670 for entry in choices:
1671 f.write(" %s - %s\n" % (entry[0], entry[2]))
1679 class JobSubmittedException(Exception):
1680 """Job was submitted, client should exit.
1682 This exception has one argument, the ID of the job that was
1683 submitted. The handler should print this ID.
1685 This is not an error, just a structured way to exit from clients.
1690 def SendJob(ops, cl=None):
1691 """Function to submit an opcode without waiting for the results.
1694 @param ops: list of opcodes
1695 @type cl: luxi.Client
1696 @param cl: the luxi client to use for communicating with the master;
1697 if None, a new client will be created
1703 job_id = cl.SubmitJob(ops)
1708 def GenericPollJob(job_id, cbs, report_cbs):
1709 """Generic job-polling function.
1711 @type job_id: number
1712 @param job_id: Job ID
1713 @type cbs: Instance of L{JobPollCbBase}
1714 @param cbs: Data callbacks
1715 @type report_cbs: Instance of L{JobPollReportCbBase}
1716 @param report_cbs: Reporting callbacks
1719 prev_job_info = None
1720 prev_logmsg_serial = None
1725 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1728 # job not found, go away!
1729 raise errors.JobLost("Job with id %s lost" % job_id)
1731 if result == constants.JOB_NOTCHANGED:
1732 report_cbs.ReportNotChanged(job_id, status)
1737 # Split result, a tuple of (field values, log entries)
1738 (job_info, log_entries) = result
1739 (status, ) = job_info
1742 for log_entry in log_entries:
1743 (serial, timestamp, log_type, message) = log_entry
1744 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1746 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1748 # TODO: Handle canceled and archived jobs
1749 elif status in (constants.JOB_STATUS_SUCCESS,
1750 constants.JOB_STATUS_ERROR,
1751 constants.JOB_STATUS_CANCELING,
1752 constants.JOB_STATUS_CANCELED):
1755 prev_job_info = job_info
1757 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1759 raise errors.JobLost("Job with id %s lost" % job_id)
1761 status, opstatus, result = jobs[0]
1763 if status == constants.JOB_STATUS_SUCCESS:
1766 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1767 raise errors.OpExecError("Job was canceled")
1770 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1771 if status == constants.OP_STATUS_SUCCESS:
1773 elif status == constants.OP_STATUS_ERROR:
1774 errors.MaybeRaise(msg)
1777 raise errors.OpExecError("partial failure (opcode %d): %s" %
1780 raise errors.OpExecError(str(msg))
1782 # default failure mode
1783 raise errors.OpExecError(result)
1786 class JobPollCbBase:
1787 """Base class for L{GenericPollJob} callbacks.
1791 """Initializes this class.
1795 def WaitForJobChangeOnce(self, job_id, fields,
1796 prev_job_info, prev_log_serial):
1797 """Waits for changes on a job.
1800 raise NotImplementedError()
1802 def QueryJobs(self, job_ids, fields):
1803 """Returns the selected fields for the selected job IDs.
1805 @type job_ids: list of numbers
1806 @param job_ids: Job IDs
1807 @type fields: list of strings
1808 @param fields: Fields
1811 raise NotImplementedError()
1814 class JobPollReportCbBase:
1815 """Base class for L{GenericPollJob} reporting callbacks.
1819 """Initializes this class.
1823 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1824 """Handles a log message.
1827 raise NotImplementedError()
1829 def ReportNotChanged(self, job_id, status):
1830 """Called for if a job hasn't changed in a while.
1832 @type job_id: number
1833 @param job_id: Job ID
1834 @type status: string or None
1835 @param status: Job status if available
1838 raise NotImplementedError()
1841 class _LuxiJobPollCb(JobPollCbBase):
1842 def __init__(self, cl):
1843 """Initializes this class.
1846 JobPollCbBase.__init__(self)
1849 def WaitForJobChangeOnce(self, job_id, fields,
1850 prev_job_info, prev_log_serial):
1851 """Waits for changes on a job.
1854 return self.cl.WaitForJobChangeOnce(job_id, fields,
1855 prev_job_info, prev_log_serial)
1857 def QueryJobs(self, job_ids, fields):
1858 """Returns the selected fields for the selected job IDs.
1861 return self.cl.QueryJobs(job_ids, fields)
1864 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1865 def __init__(self, feedback_fn):
1866 """Initializes this class.
1869 JobPollReportCbBase.__init__(self)
1871 self.feedback_fn = feedback_fn
1873 assert callable(feedback_fn)
1875 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1876 """Handles a log message.
1879 self.feedback_fn((timestamp, log_type, log_msg))
1881 def ReportNotChanged(self, job_id, status):
1882 """Called if a job hasn't changed in a while.
1888 class StdioJobPollReportCb(JobPollReportCbBase):
1890 """Initializes this class.
1893 JobPollReportCbBase.__init__(self)
1895 self.notified_queued = False
1896 self.notified_waitlock = False
1898 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1899 """Handles a log message.
1902 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1903 FormatLogMessage(log_type, log_msg))
1905 def ReportNotChanged(self, job_id, status):
1906 """Called if a job hasn't changed in a while.
1912 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1913 ToStderr("Job %s is waiting in queue", job_id)
1914 self.notified_queued = True
1916 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1917 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1918 self.notified_waitlock = True
1921 def FormatLogMessage(log_type, log_msg):
1922 """Formats a job message according to its type.
1925 if log_type != constants.ELOG_MESSAGE:
1926 log_msg = str(log_msg)
1928 return utils.SafeEncode(log_msg)
1931 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1932 """Function to poll for the result of a job.
1934 @type job_id: job identified
1935 @param job_id: the job to poll for results
1936 @type cl: luxi.Client
1937 @param cl: the luxi client to use for communicating with the master;
1938 if None, a new client will be created
1944 if reporter is None:
1946 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1948 reporter = StdioJobPollReportCb()
1950 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1952 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1955 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1956 """Legacy function to submit an opcode.
1958 This is just a simple wrapper over the construction of the processor
1959 instance. It should be extended to better handle feedback and
1960 interaction functions.
1966 SetGenericOpcodeOpts([op], opts)
1968 job_id = SendJob([op], cl=cl)
1970 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1973 return op_results[0]
1976 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1977 """Wrapper around SubmitOpCode or SendJob.
1979 This function will decide, based on the 'opts' parameter, whether to
1980 submit and wait for the result of the opcode (and return it), or
1981 whether to just send the job and print its identifier. It is used in
1982 order to simplify the implementation of the '--submit' option.
1984 It will also process the opcodes if we're sending the via SendJob
1985 (otherwise SubmitOpCode does it).
1988 if opts and opts.submit_only:
1990 SetGenericOpcodeOpts(job, opts)
1991 job_id = SendJob(job, cl=cl)
1992 raise JobSubmittedException(job_id)
1994 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1997 def SetGenericOpcodeOpts(opcode_list, options):
1998 """Processor for generic options.
2000 This function updates the given opcodes based on generic command
2001 line options (like debug, dry-run, etc.).
2003 @param opcode_list: list of opcodes
2004 @param options: command line options or None
2005 @return: None (in-place modification)
2010 for op in opcode_list:
2011 op.debug_level = options.debug
2012 if hasattr(options, "dry_run"):
2013 op.dry_run = options.dry_run
2014 if getattr(options, "priority", None) is not None:
2015 op.priority = _PRIONAME_TO_VALUE[options.priority]
2019 # TODO: Cache object?
2021 client = luxi.Client()
2022 except luxi.NoMasterError:
2023 ss = ssconf.SimpleStore()
2025 # Try to read ssconf file
2028 except errors.ConfigurationError:
2029 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2030 " not part of a cluster")
2032 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2033 if master != myself:
2034 raise errors.OpPrereqError("This is not the master node, please connect"
2035 " to node '%s' and rerun the command" %
2041 def FormatError(err):
2042 """Return a formatted error message for a given error.
2044 This function takes an exception instance and returns a tuple
2045 consisting of two values: first, the recommended exit code, and
2046 second, a string describing the error message (not
2047 newline-terminated).
2053 if isinstance(err, errors.ConfigurationError):
2054 txt = "Corrupt configuration file: %s" % msg
2056 obuf.write(txt + "\n")
2057 obuf.write("Aborting.")
2059 elif isinstance(err, errors.HooksAbort):
2060 obuf.write("Failure: hooks execution failed:\n")
2061 for node, script, out in err.args[0]:
2063 obuf.write(" node: %s, script: %s, output: %s\n" %
2064 (node, script, out))
2066 obuf.write(" node: %s, script: %s (no output)\n" %
2068 elif isinstance(err, errors.HooksFailure):
2069 obuf.write("Failure: hooks general failure: %s" % msg)
2070 elif isinstance(err, errors.ResolverError):
2071 this_host = netutils.Hostname.GetSysName()
2072 if err.args[0] == this_host:
2073 msg = "Failure: can't resolve my own hostname ('%s')"
2075 msg = "Failure: can't resolve hostname '%s'"
2076 obuf.write(msg % err.args[0])
2077 elif isinstance(err, errors.OpPrereqError):
2078 if len(err.args) == 2:
2079 obuf.write("Failure: prerequisites not met for this"
2080 " operation:\nerror type: %s, error details:\n%s" %
2081 (err.args[1], err.args[0]))
2083 obuf.write("Failure: prerequisites not met for this"
2084 " operation:\n%s" % msg)
2085 elif isinstance(err, errors.OpExecError):
2086 obuf.write("Failure: command execution error:\n%s" % msg)
2087 elif isinstance(err, errors.TagError):
2088 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2089 elif isinstance(err, errors.JobQueueDrainError):
2090 obuf.write("Failure: the job queue is marked for drain and doesn't"
2091 " accept new requests\n")
2092 elif isinstance(err, errors.JobQueueFull):
2093 obuf.write("Failure: the job queue is full and doesn't accept new"
2094 " job submissions until old jobs are archived\n")
2095 elif isinstance(err, errors.TypeEnforcementError):
2096 obuf.write("Parameter Error: %s" % msg)
2097 elif isinstance(err, errors.ParameterError):
2098 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2099 elif isinstance(err, luxi.NoMasterError):
2100 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2101 " and listening for connections?")
2102 elif isinstance(err, luxi.TimeoutError):
2103 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2104 " been submitted and will continue to run even if the call"
2105 " timed out. Useful commands in this situation are \"gnt-job"
2106 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2108 elif isinstance(err, luxi.PermissionError):
2109 obuf.write("It seems you don't have permissions to connect to the"
2110 " master daemon.\nPlease retry as a different user.")
2111 elif isinstance(err, luxi.ProtocolError):
2112 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2114 elif isinstance(err, errors.JobLost):
2115 obuf.write("Error checking job status: %s" % msg)
2116 elif isinstance(err, errors.QueryFilterParseError):
2117 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2118 obuf.write("\n".join(err.GetDetails()))
2119 elif isinstance(err, errors.GenericError):
2120 obuf.write("Unhandled Ganeti error: %s" % msg)
2121 elif isinstance(err, JobSubmittedException):
2122 obuf.write("JobID: %s\n" % err.args[0])
2125 obuf.write("Unhandled exception: %s" % msg)
2126 return retcode, obuf.getvalue().rstrip("\n")
2129 def GenericMain(commands, override=None, aliases=None,
2130 env_override=frozenset()):
2131 """Generic main function for all the gnt-* commands.
2133 @param commands: a dictionary with a special structure, see the design doc
2134 for command line handling.
2135 @param override: if not None, we expect a dictionary with keys that will
2136 override command line options; this can be used to pass
2137 options from the scripts to generic functions
2138 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2139 @param env_override: list of environment names which are allowed to submit
2140 default args for commands
2143 # save the program name and the entire command line for later logging
2145 binary = os.path.basename(sys.argv[0])
2147 binary = sys.argv[0]
2149 if len(sys.argv) >= 2:
2150 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2154 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2156 binary = "<unknown program>"
2157 cmdline = "<unknown>"
2163 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2164 except errors.ParameterError, err:
2165 result, err_msg = FormatError(err)
2169 if func is None: # parse error
2172 if override is not None:
2173 for key, val in override.iteritems():
2174 setattr(options, key, val)
2176 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2177 stderr_logging=True)
2179 logging.info("Command line: %s", cmdline)
2182 result = func(options, args)
2183 except (errors.GenericError, luxi.ProtocolError,
2184 JobSubmittedException), err:
2185 result, err_msg = FormatError(err)
2186 logging.exception("Error during command processing")
2188 except KeyboardInterrupt:
2189 result = constants.EXIT_FAILURE
2190 ToStderr("Aborted. Note that if the operation created any jobs, they"
2191 " might have been submitted and"
2192 " will continue to run in the background.")
2193 except IOError, err:
2194 if err.errno == errno.EPIPE:
2195 # our terminal went away, we'll exit
2196 sys.exit(constants.EXIT_FAILURE)
2203 def ParseNicOption(optvalue):
2204 """Parses the value of the --net option(s).
2208 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2209 except (TypeError, ValueError), err:
2210 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2212 nics = [{}] * nic_max
2213 for nidx, ndict in optvalue:
2216 if not isinstance(ndict, dict):
2217 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2218 " got %s" % (nidx, ndict))
2220 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2227 def GenericInstanceCreate(mode, opts, args):
2228 """Add an instance to the cluster via either creation or import.
2230 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2231 @param opts: the command line options selected by the user
2233 @param args: should contain only one element, the new instance name
2235 @return: the desired exit code
2240 (pnode, snode) = SplitNodeOption(opts.node)
2245 hypervisor, hvparams = opts.hypervisor
2248 nics = ParseNicOption(opts.nics)
2252 elif mode == constants.INSTANCE_CREATE:
2253 # default of one nic, all auto
2259 if opts.disk_template == constants.DT_DISKLESS:
2260 if opts.disks or opts.sd_size is not None:
2261 raise errors.OpPrereqError("Diskless instance but disk"
2262 " information passed")
2265 if (not opts.disks and not opts.sd_size
2266 and mode == constants.INSTANCE_CREATE):
2267 raise errors.OpPrereqError("No disk information specified")
2268 if opts.disks and opts.sd_size is not None:
2269 raise errors.OpPrereqError("Please use either the '--disk' or"
2271 if opts.sd_size is not None:
2272 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2276 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2277 except ValueError, err:
2278 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2279 disks = [{}] * disk_max
2282 for didx, ddict in opts.disks:
2284 if not isinstance(ddict, dict):
2285 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2286 raise errors.OpPrereqError(msg)
2287 elif constants.IDISK_SIZE in ddict:
2288 if constants.IDISK_ADOPT in ddict:
2289 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2290 " (disk %d)" % didx)
2292 ddict[constants.IDISK_SIZE] = \
2293 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2294 except ValueError, err:
2295 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2297 elif constants.IDISK_ADOPT in ddict:
2298 if mode == constants.INSTANCE_IMPORT:
2299 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2301 ddict[constants.IDISK_SIZE] = 0
2303 raise errors.OpPrereqError("Missing size or adoption source for"
2307 if opts.tags is not None:
2308 tags = opts.tags.split(",")
2312 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2313 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2315 if mode == constants.INSTANCE_CREATE:
2318 force_variant = opts.force_variant
2321 no_install = opts.no_install
2322 identify_defaults = False
2323 elif mode == constants.INSTANCE_IMPORT:
2326 force_variant = False
2327 src_node = opts.src_node
2328 src_path = opts.src_dir
2330 identify_defaults = opts.identify_defaults
2332 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2334 op = opcodes.OpInstanceCreate(instance_name=instance,
2336 disk_template=opts.disk_template,
2338 pnode=pnode, snode=snode,
2339 ip_check=opts.ip_check,
2340 name_check=opts.name_check,
2341 wait_for_sync=opts.wait_for_sync,
2342 file_storage_dir=opts.file_storage_dir,
2343 file_driver=opts.file_driver,
2344 iallocator=opts.iallocator,
2345 hypervisor=hypervisor,
2347 beparams=opts.beparams,
2348 osparams=opts.osparams,
2352 force_variant=force_variant,
2356 no_install=no_install,
2357 identify_defaults=identify_defaults,
2358 ignore_ipolicy=opts.ignore_ipolicy)
2360 SubmitOrSend(op, opts)
2364 class _RunWhileClusterStoppedHelper:
2365 """Helper class for L{RunWhileClusterStopped} to simplify state management
2368 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2369 """Initializes this class.
2371 @type feedback_fn: callable
2372 @param feedback_fn: Feedback function
2373 @type cluster_name: string
2374 @param cluster_name: Cluster name
2375 @type master_node: string
2376 @param master_node Master node name
2377 @type online_nodes: list
2378 @param online_nodes: List of names of online nodes
2381 self.feedback_fn = feedback_fn
2382 self.cluster_name = cluster_name
2383 self.master_node = master_node
2384 self.online_nodes = online_nodes
2386 self.ssh = ssh.SshRunner(self.cluster_name)
2388 self.nonmaster_nodes = [name for name in online_nodes
2389 if name != master_node]
2391 assert self.master_node not in self.nonmaster_nodes
2393 def _RunCmd(self, node_name, cmd):
2394 """Runs a command on the local or a remote machine.
2396 @type node_name: string
2397 @param node_name: Machine name
2402 if node_name is None or node_name == self.master_node:
2403 # No need to use SSH
2404 result = utils.RunCmd(cmd)
2406 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2409 errmsg = ["Failed to run command %s" % result.cmd]
2411 errmsg.append("on node %s" % node_name)
2412 errmsg.append(": exitcode %s and error %s" %
2413 (result.exit_code, result.output))
2414 raise errors.OpExecError(" ".join(errmsg))
2416 def Call(self, fn, *args):
2417 """Call function while all daemons are stopped.
2420 @param fn: Function to be called
2423 # Pause watcher by acquiring an exclusive lock on watcher state file
2424 self.feedback_fn("Blocking watcher")
2425 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2427 # TODO: Currently, this just blocks. There's no timeout.
2428 # TODO: Should it be a shared lock?
2429 watcher_block.Exclusive(blocking=True)
2431 # Stop master daemons, so that no new jobs can come in and all running
2433 self.feedback_fn("Stopping master daemons")
2434 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2436 # Stop daemons on all nodes
2437 for node_name in self.online_nodes:
2438 self.feedback_fn("Stopping daemons on %s" % node_name)
2439 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2441 # All daemons are shut down now
2443 return fn(self, *args)
2444 except Exception, err:
2445 _, errmsg = FormatError(err)
2446 logging.exception("Caught exception")
2447 self.feedback_fn(errmsg)
2450 # Start cluster again, master node last
2451 for node_name in self.nonmaster_nodes + [self.master_node]:
2452 self.feedback_fn("Starting daemons on %s" % node_name)
2453 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2456 watcher_block.Close()
2459 def RunWhileClusterStopped(feedback_fn, fn, *args):
2460 """Calls a function while all cluster daemons are stopped.
2462 @type feedback_fn: callable
2463 @param feedback_fn: Feedback function
2465 @param fn: Function to be called when daemons are stopped
2468 feedback_fn("Gathering cluster information")
2470 # This ensures we're running on the master daemon
2473 (cluster_name, master_node) = \
2474 cl.QueryConfigValues(["cluster_name", "master_node"])
2476 online_nodes = GetOnlineNodes([], cl=cl)
2478 # Don't keep a reference to the client. The master daemon will go away.
2481 assert master_node in online_nodes
2483 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2484 online_nodes).Call(fn, *args)
2487 def GenerateTable(headers, fields, separator, data,
2488 numfields=None, unitfields=None,
2490 """Prints a table with headers and different fields.
2493 @param headers: dictionary mapping field names to headers for
2496 @param fields: the field names corresponding to each row in
2498 @param separator: the separator to be used; if this is None,
2499 the default 'smart' algorithm is used which computes optimal
2500 field width, otherwise just the separator is used between
2503 @param data: a list of lists, each sublist being one row to be output
2504 @type numfields: list
2505 @param numfields: a list with the fields that hold numeric
2506 values and thus should be right-aligned
2507 @type unitfields: list
2508 @param unitfields: a list with the fields that hold numeric
2509 values that should be formatted with the units field
2510 @type units: string or None
2511 @param units: the units we should use for formatting, or None for
2512 automatic choice (human-readable for non-separator usage, otherwise
2513 megabytes); this is a one-letter string
2522 if numfields is None:
2524 if unitfields is None:
2527 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2528 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2531 for field in fields:
2532 if headers and field not in headers:
2533 # TODO: handle better unknown fields (either revert to old
2534 # style of raising exception, or deal more intelligently with
2536 headers[field] = field
2537 if separator is not None:
2538 format_fields.append("%s")
2539 elif numfields.Matches(field):
2540 format_fields.append("%*s")
2542 format_fields.append("%-*s")
2544 if separator is None:
2545 mlens = [0 for name in fields]
2546 format_str = " ".join(format_fields)
2548 format_str = separator.replace("%", "%%").join(format_fields)
2553 for idx, val in enumerate(row):
2554 if unitfields.Matches(fields[idx]):
2557 except (TypeError, ValueError):
2560 val = row[idx] = utils.FormatUnit(val, units)
2561 val = row[idx] = str(val)
2562 if separator is None:
2563 mlens[idx] = max(mlens[idx], len(val))
2568 for idx, name in enumerate(fields):
2570 if separator is None:
2571 mlens[idx] = max(mlens[idx], len(hdr))
2572 args.append(mlens[idx])
2574 result.append(format_str % tuple(args))
2576 if separator is None:
2577 assert len(mlens) == len(fields)
2579 if fields and not numfields.Matches(fields[-1]):
2585 line = ["-" for _ in fields]
2586 for idx in range(len(fields)):
2587 if separator is None:
2588 args.append(mlens[idx])
2589 args.append(line[idx])
2590 result.append(format_str % tuple(args))
2595 def _FormatBool(value):
2596 """Formats a boolean value as a string.
2604 #: Default formatting for query results; (callback, align right)
2605 _DEFAULT_FORMAT_QUERY = {
2606 constants.QFT_TEXT: (str, False),
2607 constants.QFT_BOOL: (_FormatBool, False),
2608 constants.QFT_NUMBER: (str, True),
2609 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2610 constants.QFT_OTHER: (str, False),
2611 constants.QFT_UNKNOWN: (str, False),
2615 def _GetColumnFormatter(fdef, override, unit):
2616 """Returns formatting function for a field.
2618 @type fdef: L{objects.QueryFieldDefinition}
2619 @type override: dict
2620 @param override: Dictionary for overriding field formatting functions,
2621 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2623 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2624 @rtype: tuple; (callable, bool)
2625 @return: Returns the function to format a value (takes one parameter) and a
2626 boolean for aligning the value on the right-hand side
2629 fmt = override.get(fdef.name, None)
2633 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2635 if fdef.kind == constants.QFT_UNIT:
2636 # Can't keep this information in the static dictionary
2637 return (lambda value: utils.FormatUnit(value, unit), True)
2639 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2643 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2646 class _QueryColumnFormatter:
2647 """Callable class for formatting fields of a query.
2650 def __init__(self, fn, status_fn, verbose):
2651 """Initializes this class.
2654 @param fn: Formatting function
2655 @type status_fn: callable
2656 @param status_fn: Function to report fields' status
2657 @type verbose: boolean
2658 @param verbose: whether to use verbose field descriptions or not
2662 self._status_fn = status_fn
2663 self._verbose = verbose
2665 def __call__(self, data):
2666 """Returns a field's string representation.
2669 (status, value) = data
2672 self._status_fn(status)
2674 if status == constants.RS_NORMAL:
2675 return self._fn(value)
2677 assert value is None, \
2678 "Found value %r for abnormal status %s" % (value, status)
2680 return FormatResultError(status, self._verbose)
2683 def FormatResultError(status, verbose):
2684 """Formats result status other than L{constants.RS_NORMAL}.
2686 @param status: The result status
2687 @type verbose: boolean
2688 @param verbose: Whether to return the verbose text
2689 @return: Text of result status
2692 assert status != constants.RS_NORMAL, \
2693 "FormatResultError called with status equal to constants.RS_NORMAL"
2695 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2697 raise NotImplementedError("Unknown status %s" % status)
2704 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2705 header=False, verbose=False):
2706 """Formats data in L{objects.QueryResponse}.
2708 @type result: L{objects.QueryResponse}
2709 @param result: result of query operation
2711 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2712 see L{utils.text.FormatUnit}
2713 @type format_override: dict
2714 @param format_override: Dictionary for overriding field formatting functions,
2715 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2716 @type separator: string or None
2717 @param separator: String used to separate fields
2719 @param header: Whether to output header row
2720 @type verbose: boolean
2721 @param verbose: whether to use verbose field descriptions or not
2730 if format_override is None:
2731 format_override = {}
2733 stats = dict.fromkeys(constants.RS_ALL, 0)
2735 def _RecordStatus(status):
2740 for fdef in result.fields:
2741 assert fdef.title and fdef.name
2742 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2743 columns.append(TableColumn(fdef.title,
2744 _QueryColumnFormatter(fn, _RecordStatus,
2748 table = FormatTable(result.data, columns, header, separator)
2750 # Collect statistics
2751 assert len(stats) == len(constants.RS_ALL)
2752 assert compat.all(count >= 0 for count in stats.values())
2754 # Determine overall status. If there was no data, unknown fields must be
2755 # detected via the field definitions.
2756 if (stats[constants.RS_UNKNOWN] or
2757 (not result.data and _GetUnknownFields(result.fields))):
2759 elif compat.any(count > 0 for key, count in stats.items()
2760 if key != constants.RS_NORMAL):
2761 status = QR_INCOMPLETE
2765 return (status, table)
2768 def _GetUnknownFields(fdefs):
2769 """Returns list of unknown fields included in C{fdefs}.
2771 @type fdefs: list of L{objects.QueryFieldDefinition}
2774 return [fdef for fdef in fdefs
2775 if fdef.kind == constants.QFT_UNKNOWN]
2778 def _WarnUnknownFields(fdefs):
2779 """Prints a warning to stderr if a query included unknown fields.
2781 @type fdefs: list of L{objects.QueryFieldDefinition}
2784 unknown = _GetUnknownFields(fdefs)
2786 ToStderr("Warning: Queried for unknown fields %s",
2787 utils.CommaJoin(fdef.name for fdef in unknown))
2793 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2794 format_override=None, verbose=False, force_filter=False):
2795 """Generic implementation for listing all items of a resource.
2797 @param resource: One of L{constants.QR_VIA_LUXI}
2798 @type fields: list of strings
2799 @param fields: List of fields to query for
2800 @type names: list of strings
2801 @param names: Names of items to query for
2802 @type unit: string or None
2803 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2804 None for automatic choice (human-readable for non-separator usage,
2805 otherwise megabytes); this is a one-letter string
2806 @type separator: string or None
2807 @param separator: String used to separate fields
2809 @param header: Whether to show header row
2810 @type force_filter: bool
2811 @param force_filter: Whether to always treat names as filter
2812 @type format_override: dict
2813 @param format_override: Dictionary for overriding field formatting functions,
2814 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2815 @type verbose: boolean
2816 @param verbose: whether to use verbose field descriptions or not
2822 qfilter = qlang.MakeFilter(names, force_filter)
2827 response = cl.Query(resource, fields, qfilter)
2829 found_unknown = _WarnUnknownFields(response.fields)
2831 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2833 format_override=format_override,
2839 assert ((found_unknown and status == QR_UNKNOWN) or
2840 (not found_unknown and status != QR_UNKNOWN))
2842 if status == QR_UNKNOWN:
2843 return constants.EXIT_UNKNOWN_FIELD
2845 # TODO: Should the list command fail if not all data could be collected?
2846 return constants.EXIT_SUCCESS
2849 def GenericListFields(resource, fields, separator, header, cl=None):
2850 """Generic implementation for listing fields for a resource.
2852 @param resource: One of L{constants.QR_VIA_LUXI}
2853 @type fields: list of strings
2854 @param fields: List of fields to query for
2855 @type separator: string or None
2856 @param separator: String used to separate fields
2858 @param header: Whether to show header row
2867 response = cl.QueryFields(resource, fields)
2869 found_unknown = _WarnUnknownFields(response.fields)
2872 TableColumn("Name", str, False),
2873 TableColumn("Title", str, False),
2874 TableColumn("Description", str, False),
2877 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2879 for line in FormatTable(rows, columns, header, separator):
2883 return constants.EXIT_UNKNOWN_FIELD
2885 return constants.EXIT_SUCCESS
2889 """Describes a column for L{FormatTable}.
2892 def __init__(self, title, fn, align_right):
2893 """Initializes this class.
2896 @param title: Column title
2898 @param fn: Formatting function
2899 @type align_right: bool
2900 @param align_right: Whether to align values on the right-hand side
2905 self.align_right = align_right
2908 def _GetColFormatString(width, align_right):
2909 """Returns the format string for a field.
2917 return "%%%s%ss" % (sign, width)
2920 def FormatTable(rows, columns, header, separator):
2921 """Formats data as a table.
2923 @type rows: list of lists
2924 @param rows: Row data, one list per row
2925 @type columns: list of L{TableColumn}
2926 @param columns: Column descriptions
2928 @param header: Whether to show header row
2929 @type separator: string or None
2930 @param separator: String used to separate columns
2934 data = [[col.title for col in columns]]
2935 colwidth = [len(col.title) for col in columns]
2938 colwidth = [0 for _ in columns]
2942 assert len(row) == len(columns)
2944 formatted = [col.format(value) for value, col in zip(row, columns)]
2946 if separator is None:
2947 # Update column widths
2948 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2949 # Modifying a list's items while iterating is fine
2950 colwidth[idx] = max(oldwidth, len(value))
2952 data.append(formatted)
2954 if separator is not None:
2955 # Return early if a separator is used
2956 return [separator.join(row) for row in data]
2958 if columns and not columns[-1].align_right:
2959 # Avoid unnecessary spaces at end of line
2962 # Build format string
2963 fmt = " ".join([_GetColFormatString(width, col.align_right)
2964 for col, width in zip(columns, colwidth)])
2966 return [fmt % tuple(row) for row in data]
2969 def FormatTimestamp(ts):
2970 """Formats a given timestamp.
2973 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2976 @return: a string with the formatted timestamp
2979 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2982 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2985 def ParseTimespec(value):
2986 """Parse a time specification.
2988 The following suffixed will be recognized:
2996 Without any suffix, the value will be taken to be in seconds.
3001 raise errors.OpPrereqError("Empty time specification passed")
3009 if value[-1] not in suffix_map:
3012 except (TypeError, ValueError):
3013 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3015 multiplier = suffix_map[value[-1]]
3017 if not value: # no data left after stripping the suffix
3018 raise errors.OpPrereqError("Invalid time specification (only"
3021 value = int(value) * multiplier
3022 except (TypeError, ValueError):
3023 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3027 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3028 filter_master=False, nodegroup=None):
3029 """Returns the names of online nodes.
3031 This function will also log a warning on stderr with the names of
3034 @param nodes: if not empty, use only this subset of nodes (minus the
3036 @param cl: if not None, luxi client to use
3037 @type nowarn: boolean
3038 @param nowarn: by default, this function will output a note with the
3039 offline nodes that are skipped; if this parameter is True the
3040 note is not displayed
3041 @type secondary_ips: boolean
3042 @param secondary_ips: if True, return the secondary IPs instead of the
3043 names, useful for doing network traffic over the replication interface
3045 @type filter_master: boolean
3046 @param filter_master: if True, do not return the master node in the list
3047 (useful in coordination with secondary_ips where we cannot check our
3048 node name against the list)
3049 @type nodegroup: string
3050 @param nodegroup: If set, only return nodes in this node group
3059 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3061 if nodegroup is not None:
3062 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3063 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3066 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3069 if len(qfilter) > 1:
3070 final_filter = [qlang.OP_AND] + qfilter
3072 assert len(qfilter) == 1
3073 final_filter = qfilter[0]
3077 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3079 def _IsOffline(row):
3080 (_, (_, offline), _) = row
3084 ((_, name), _, _) = row
3088 (_, _, (_, sip)) = row
3091 (offline, online) = compat.partition(result.data, _IsOffline)
3093 if offline and not nowarn:
3094 ToStderr("Note: skipping offline node(s): %s" %
3095 utils.CommaJoin(map(_GetName, offline)))
3102 return map(fn, online)
3105 def _ToStream(stream, txt, *args):
3106 """Write a message to a stream, bypassing the logging system
3108 @type stream: file object
3109 @param stream: the file to which we should write
3111 @param txt: the message
3117 stream.write(txt % args)
3122 except IOError, err:
3123 if err.errno == errno.EPIPE:
3124 # our terminal went away, we'll exit
3125 sys.exit(constants.EXIT_FAILURE)
3130 def ToStdout(txt, *args):
3131 """Write a message to stdout only, bypassing the logging system
3133 This is just a wrapper over _ToStream.
3136 @param txt: the message
3139 _ToStream(sys.stdout, txt, *args)
3142 def ToStderr(txt, *args):
3143 """Write a message to stderr only, bypassing the logging system
3145 This is just a wrapper over _ToStream.
3148 @param txt: the message
3151 _ToStream(sys.stderr, txt, *args)
3154 class JobExecutor(object):
3155 """Class which manages the submission and execution of multiple jobs.
3157 Note that instances of this class should not be reused between
3161 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3166 self.verbose = verbose
3169 self.feedback_fn = feedback_fn
3170 self._counter = itertools.count()
3173 def _IfName(name, fmt):
3174 """Helper function for formatting name.
3182 def QueueJob(self, name, *ops):
3183 """Record a job for later submit.
3186 @param name: a description of the job, will be used in WaitJobSet
3189 SetGenericOpcodeOpts(ops, self.opts)
3190 self.queue.append((self._counter.next(), name, ops))
3192 def AddJobId(self, name, status, job_id):
3193 """Adds a job ID to the internal queue.
3196 self.jobs.append((self._counter.next(), status, job_id, name))
3198 def SubmitPending(self, each=False):
3199 """Submit all pending jobs.
3204 for (_, _, ops) in self.queue:
3205 # SubmitJob will remove the success status, but raise an exception if
3206 # the submission fails, so we'll notice that anyway.
3207 results.append([True, self.cl.SubmitJob(ops)[0]])
3209 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3210 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3211 self.jobs.append((idx, status, data, name))
3213 def _ChooseJob(self):
3214 """Choose a non-waiting/queued job to poll next.
3217 assert self.jobs, "_ChooseJob called with empty job list"
3219 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3223 for job_data, status in zip(self.jobs, result):
3224 if (isinstance(status, list) and status and
3225 status[0] in (constants.JOB_STATUS_QUEUED,
3226 constants.JOB_STATUS_WAITING,
3227 constants.JOB_STATUS_CANCELING)):
3228 # job is still present and waiting
3230 # good candidate found (either running job or lost job)
3231 self.jobs.remove(job_data)
3235 return self.jobs.pop(0)
3237 def GetResults(self):
3238 """Wait for and return the results of all jobs.
3241 @return: list of tuples (success, job results), in the same order
3242 as the submitted jobs; if a job has failed, instead of the result
3243 there will be the error message
3247 self.SubmitPending()
3250 ok_jobs = [row[2] for row in self.jobs if row[1]]
3252 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3254 # first, remove any non-submitted jobs
3255 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3256 for idx, _, jid, name in failures:
3257 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3258 results.append((idx, False, jid))
3261 (idx, _, jid, name) = self._ChooseJob()
3262 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3264 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3266 except errors.JobLost, err:
3267 _, job_result = FormatError(err)
3268 ToStderr("Job %s%s has been archived, cannot check its result",
3269 jid, self._IfName(name, " for %s"))
3271 except (errors.GenericError, luxi.ProtocolError), err:
3272 _, job_result = FormatError(err)
3274 # the error message will always be shown, verbose or not
3275 ToStderr("Job %s%s has failed: %s",
3276 jid, self._IfName(name, " for %s"), job_result)
3278 results.append((idx, success, job_result))
3280 # sort based on the index, then drop it
3282 results = [i[1:] for i in results]
3286 def WaitOrShow(self, wait):
3287 """Wait for job results or only print the job IDs.
3290 @param wait: whether to wait or not
3294 return self.GetResults()
3297 self.SubmitPending()
3298 for _, status, result, name in self.jobs:
3300 ToStdout("%s: %s", result, name)
3302 ToStderr("Failure for %s: %s", name, result)
3303 return [row[1:3] for row in self.jobs]
3306 def FormatParameterDict(buf, param_dict, actual, level=1):
3307 """Formats a parameter dictionary.
3309 @type buf: L{StringIO}
3310 @param buf: the buffer into which to write
3311 @type param_dict: dict
3312 @param param_dict: the own parameters
3314 @param actual: the current parameter set (including defaults)
3315 @param level: Level of indent
3318 indent = " " * level
3319 for key in sorted(actual):
3320 val = param_dict.get(key, "default (%s)" % actual[key])
3321 buf.write("%s- %s: %s\n" % (indent, key, val))
3324 def ConfirmOperation(names, list_type, text, extra=""):
3325 """Ask the user to confirm an operation on a list of list_type.
3327 This function is used to request confirmation for doing an operation
3328 on a given list of list_type.
3331 @param names: the list of names that we display when
3332 we ask for confirmation
3333 @type list_type: str
3334 @param list_type: Human readable name for elements in the list (e.g. nodes)
3336 @param text: the operation that the user should confirm
3338 @return: True or False depending on user's confirmation.
3342 msg = ("The %s will operate on %d %s.\n%s"
3343 "Do you want to continue?" % (text, count, list_type, extra))
3344 affected = (("\nAffected %s:\n" % list_type) +
3345 "\n".join([" %s" % name for name in names]))
3347 choices = [("y", True, "Yes, execute the %s" % text),
3348 ("n", False, "No, abort the %s" % text)]
3351 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3354 question = msg + affected
3356 choice = AskUser(question, choices)
3359 choice = AskUser(msg + affected, choices)