4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
137 "NOSSH_KEYCHECK_OPT",
151 "PREALLOC_WIPE_DISKS_OPT",
152 "PRIMARY_IP_VERSION_OPT",
158 "REMOVE_INSTANCE_OPT",
164 "SECONDARY_ONLY_OPT",
168 "SHUTDOWN_TIMEOUT_OPT",
170 "SPECS_CPU_COUNT_OPT",
171 "SPECS_DISK_COUNT_OPT",
172 "SPECS_DISK_SIZE_OPT",
173 "SPECS_MEM_SIZE_OPT",
174 "SPECS_NIC_COUNT_OPT",
175 "SPECS_DISK_TEMPLATES",
181 "STARTUP_PAUSED_OPT",
190 "USE_EXTERNAL_MIP_SCRIPT",
197 "IGNORE_IPOLICY_OPT",
198 "INSTANCE_POLICY_OPTS",
199 # Generic functions for CLI programs
202 "GenericInstanceCreate",
208 "JobSubmittedException",
210 "RunWhileClusterStopped",
214 # Formatting functions
215 "ToStderr", "ToStdout",
218 "FormatParameterDict",
227 # command line options support infrastructure
228 "ARGS_MANY_INSTANCES",
247 "OPT_COMPL_INST_ADD_NODES",
248 "OPT_COMPL_MANY_NODES",
249 "OPT_COMPL_ONE_IALLOCATOR",
250 "OPT_COMPL_ONE_INSTANCE",
251 "OPT_COMPL_ONE_NODE",
252 "OPT_COMPL_ONE_NODEGROUP",
258 "COMMON_CREATE_OPTS",
264 #: Priorities (sorted)
266 ("low", constants.OP_PRIO_LOW),
267 ("normal", constants.OP_PRIO_NORMAL),
268 ("high", constants.OP_PRIO_HIGH),
271 #: Priority dictionary for easier lookup
272 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
273 # we migrate to Python 2.6
274 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
276 # Query result status for clients
279 QR_INCOMPLETE) = range(3)
281 #: Maximum batch size for ChooseJob
286 def __init__(self, min=0, max=None): # pylint: disable=W0622
291 return ("<%s min=%s max=%s>" %
292 (self.__class__.__name__, self.min, self.max))
295 class ArgSuggest(_Argument):
296 """Suggesting argument.
298 Value can be any of the ones passed to the constructor.
301 # pylint: disable=W0622
302 def __init__(self, min=0, max=None, choices=None):
303 _Argument.__init__(self, min=min, max=max)
304 self.choices = choices
307 return ("<%s min=%s max=%s choices=%r>" %
308 (self.__class__.__name__, self.min, self.max, self.choices))
311 class ArgChoice(ArgSuggest):
314 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
315 but value must be one of the choices.
320 class ArgUnknown(_Argument):
321 """Unknown argument to program (e.g. determined at runtime).
326 class ArgInstance(_Argument):
327 """Instances argument.
332 class ArgNode(_Argument):
338 class ArgGroup(_Argument):
339 """Node group argument.
344 class ArgJobId(_Argument):
350 class ArgFile(_Argument):
351 """File path argument.
356 class ArgCommand(_Argument):
362 class ArgHost(_Argument):
368 class ArgOs(_Argument):
375 ARGS_MANY_INSTANCES = [ArgInstance()]
376 ARGS_MANY_NODES = [ArgNode()]
377 ARGS_MANY_GROUPS = [ArgGroup()]
378 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
379 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
381 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
382 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
385 def _ExtractTagsObject(opts, args):
386 """Extract the tag type object.
388 Note that this function will modify its args parameter.
391 if not hasattr(opts, "tag_type"):
392 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
394 if kind == constants.TAG_CLUSTER:
396 elif kind in (constants.TAG_NODEGROUP,
398 constants.TAG_INSTANCE):
400 raise errors.OpPrereqError("no arguments passed to the command")
404 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
408 def _ExtendTags(opts, args):
409 """Extend the args if a source file has been given.
411 This function will extend the tags with the contents of the file
412 passed in the 'tags_source' attribute of the opts parameter. A file
413 named '-' will be replaced by stdin.
416 fname = opts.tags_source
422 new_fh = open(fname, "r")
425 # we don't use the nice 'new_data = [line.strip() for line in fh]'
426 # because of python bug 1633941
428 line = new_fh.readline()
431 new_data.append(line.strip())
434 args.extend(new_data)
437 def ListTags(opts, args):
438 """List the tags on a given object.
440 This is a generic implementation that knows how to deal with all
441 three cases of tag objects (cluster, node, instance). The opts
442 argument is expected to contain a tag_type field denoting what
443 object type we work on.
446 kind, name = _ExtractTagsObject(opts, args)
448 result = cl.QueryTags(kind, name)
449 result = list(result)
455 def AddTags(opts, args):
456 """Add tags on a given object.
458 This is a generic implementation that knows how to deal with all
459 three cases of tag objects (cluster, node, instance). The opts
460 argument is expected to contain a tag_type field denoting what
461 object type we work on.
464 kind, name = _ExtractTagsObject(opts, args)
465 _ExtendTags(opts, args)
467 raise errors.OpPrereqError("No tags to be added")
468 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
469 SubmitOpCode(op, opts=opts)
472 def RemoveTags(opts, args):
473 """Remove tags from a given object.
475 This is a generic implementation that knows how to deal with all
476 three cases of tag objects (cluster, node, instance). The opts
477 argument is expected to contain a tag_type field denoting what
478 object type we work on.
481 kind, name = _ExtractTagsObject(opts, args)
482 _ExtendTags(opts, args)
484 raise errors.OpPrereqError("No tags to be removed")
485 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
486 SubmitOpCode(op, opts=opts)
489 def check_unit(option, opt, value): # pylint: disable=W0613
490 """OptParsers custom converter for units.
494 return utils.ParseUnit(value)
495 except errors.UnitParseError, err:
496 raise OptionValueError("option %s: %s" % (opt, err))
499 def _SplitKeyVal(opt, data):
500 """Convert a KeyVal string into a dict.
502 This function will convert a key=val[,...] string into a dict. Empty
503 values will be converted specially: keys which have the prefix 'no_'
504 will have the value=False and the prefix stripped, the others will
508 @param opt: a string holding the option name for which we process the
509 data, used in building error messages
511 @param data: a string of the format key=val,key=val,...
513 @return: {key=val, key=val}
514 @raises errors.ParameterError: if there are duplicate keys
519 for elem in utils.UnescapeAndSplit(data, sep=","):
521 key, val = elem.split("=", 1)
523 if elem.startswith(NO_PREFIX):
524 key, val = elem[len(NO_PREFIX):], False
525 elif elem.startswith(UN_PREFIX):
526 key, val = elem[len(UN_PREFIX):], None
528 key, val = elem, True
530 raise errors.ParameterError("Duplicate key '%s' in option %s" %
536 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
537 """Custom parser for ident:key=val,key=val options.
539 This will store the parsed values as a tuple (ident, {key: val}). As such,
540 multiple uses of this option via action=append is possible.
544 ident, rest = value, ""
546 ident, rest = value.split(":", 1)
548 if ident.startswith(NO_PREFIX):
550 msg = "Cannot pass options when removing parameter groups: %s" % value
551 raise errors.ParameterError(msg)
552 retval = (ident[len(NO_PREFIX):], False)
553 elif ident.startswith(UN_PREFIX):
555 msg = "Cannot pass options when removing parameter groups: %s" % value
556 raise errors.ParameterError(msg)
557 retval = (ident[len(UN_PREFIX):], None)
559 kv_dict = _SplitKeyVal(opt, rest)
560 retval = (ident, kv_dict)
564 def check_key_val(option, opt, value): # pylint: disable=W0613
565 """Custom parser class for key=val,key=val options.
567 This will store the parsed values as a dict {key: val}.
570 return _SplitKeyVal(opt, value)
573 def check_bool(option, opt, value): # pylint: disable=W0613
574 """Custom parser for yes/no options.
576 This will store the parsed value as either True or False.
579 value = value.lower()
580 if value == constants.VALUE_FALSE or value == "no":
582 elif value == constants.VALUE_TRUE or value == "yes":
585 raise errors.ParameterError("Invalid boolean value '%s'" % value)
588 def check_list(option, opt, value): # pylint: disable=W0613
589 """Custom parser for comma-separated lists.
592 # we have to make this explicit check since "".split(",") is [""],
593 # not an empty list :(
597 return utils.UnescapeAndSplit(value)
600 # completion_suggestion is normally a list. Using numeric values not evaluating
601 # to False for dynamic completion.
602 (OPT_COMPL_MANY_NODES,
604 OPT_COMPL_ONE_INSTANCE,
606 OPT_COMPL_ONE_IALLOCATOR,
607 OPT_COMPL_INST_ADD_NODES,
608 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
610 OPT_COMPL_ALL = frozenset([
611 OPT_COMPL_MANY_NODES,
613 OPT_COMPL_ONE_INSTANCE,
615 OPT_COMPL_ONE_IALLOCATOR,
616 OPT_COMPL_INST_ADD_NODES,
617 OPT_COMPL_ONE_NODEGROUP,
621 class CliOption(Option):
622 """Custom option class for optparse.
625 ATTRS = Option.ATTRS + [
626 "completion_suggest",
628 TYPES = Option.TYPES + (
635 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
636 TYPE_CHECKER["identkeyval"] = check_ident_key_val
637 TYPE_CHECKER["keyval"] = check_key_val
638 TYPE_CHECKER["unit"] = check_unit
639 TYPE_CHECKER["bool"] = check_bool
640 TYPE_CHECKER["list"] = check_list
643 # optparse.py sets make_option, so we do it for our own option class, too
644 cli_option = CliOption
649 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
650 help="Increase debugging level")
652 NOHDR_OPT = cli_option("--no-headers", default=False,
653 action="store_true", dest="no_headers",
654 help="Don't display column headers")
656 SEP_OPT = cli_option("--separator", default=None,
657 action="store", dest="separator",
658 help=("Separator between output fields"
659 " (defaults to one space)"))
661 USEUNITS_OPT = cli_option("--units", default=None,
662 dest="units", choices=("h", "m", "g", "t"),
663 help="Specify units for output (one of h/m/g/t)")
665 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
666 type="string", metavar="FIELDS",
667 help="Comma separated list of output fields")
669 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
670 default=False, help="Force the operation")
672 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
673 default=False, help="Do not require confirmation")
675 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
676 action="store_true", default=False,
677 help=("Ignore offline nodes and do as much"
680 TAG_ADD_OPT = cli_option("--tags", dest="tags",
681 default=None, help="Comma-separated list of instance"
684 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
685 default=None, help="File with tag names")
687 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
688 default=False, action="store_true",
689 help=("Submit the job and return the job ID, but"
690 " don't wait for the job to finish"))
692 SYNC_OPT = cli_option("--sync", dest="do_locking",
693 default=False, action="store_true",
694 help=("Grab locks while doing the queries"
695 " in order to ensure more consistent results"))
697 DRY_RUN_OPT = cli_option("--dry-run", default=False,
699 help=("Do not execute the operation, just run the"
700 " check steps and verify it it could be"
703 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
705 help="Increase the verbosity of the operation")
707 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
708 action="store_true", dest="simulate_errors",
709 help="Debugging option that makes the operation"
710 " treat most runtime checks as failed")
712 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
713 default=True, action="store_false",
714 help="Don't wait for sync (DANGEROUS!)")
716 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
717 action="store_true", default=False,
718 help="Enable offline instance")
720 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
721 action="store_true", default=False,
722 help="Disable down instance")
724 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
725 help=("Custom disk setup (%s)" %
726 utils.CommaJoin(constants.DISK_TEMPLATES)),
727 default=None, metavar="TEMPL",
728 choices=list(constants.DISK_TEMPLATES))
730 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
731 help="Do not create any network cards for"
734 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
735 help="Relative path under default cluster-wide"
736 " file storage dir to store file-based disks",
737 default=None, metavar="<DIR>")
739 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
740 help="Driver to use for image files",
741 default="loop", metavar="<DRIVER>",
742 choices=list(constants.FILE_DRIVER))
744 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
745 help="Select nodes for the instance automatically"
746 " using the <NAME> iallocator plugin",
747 default=None, type="string",
748 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
750 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
752 help="Set the default instance allocator plugin",
753 default=None, type="string",
754 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
756 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
758 completion_suggest=OPT_COMPL_ONE_OS)
760 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
761 type="keyval", default={},
762 help="OS parameters")
764 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
765 action="store_true", default=False,
766 help="Force an unknown variant")
768 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
769 action="store_true", default=False,
770 help="Do not install the OS (will"
773 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
774 type="keyval", default={},
775 help="Backend parameters")
777 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
778 default={}, dest="hvparams",
779 help="Hypervisor parameters")
781 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
782 help="Disk template parameters, in the format"
783 " template:option=value,option=value,...",
784 type="identkeyval", action="append", default=[])
786 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
787 type="keyval", default={},
788 help="Memory count specs: min, max, std"
791 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
792 type="keyval", default={},
793 help="CPU count specs: min, max, std")
795 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
796 dest="ispecs_disk_count",
797 type="keyval", default={},
798 help="Disk count specs: min, max, std")
800 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
801 type="keyval", default={},
802 help="Disk size specs: min, max, std (in MB)")
804 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
805 type="keyval", default={},
806 help="NIC count specs: min, max, std")
808 SPECS_DISK_TEMPLATES = cli_option("--specs-disk-templates",
809 dest="ispecs_disk_templates",
810 type="list", default=None,
811 help="Comma-separated list of"
812 " enabled disk templates")
814 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
815 help="Hypervisor and hypervisor options, in the"
816 " format hypervisor:option=value,option=value,...",
817 default=None, type="identkeyval")
819 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
820 help="Hypervisor and hypervisor options, in the"
821 " format hypervisor:option=value,option=value,...",
822 default=[], action="append", type="identkeyval")
824 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
825 action="store_false",
826 help="Don't check that the instance's IP"
829 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
830 default=True, action="store_false",
831 help="Don't check that the instance's name"
834 NET_OPT = cli_option("--net",
835 help="NIC parameters", default=[],
836 dest="nics", action="append", type="identkeyval")
838 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
839 dest="disks", action="append", type="identkeyval")
841 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
842 help="Comma-separated list of disks"
843 " indices to act on (e.g. 0,2) (optional,"
844 " defaults to all disks)")
846 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
847 help="Enforces a single-disk configuration using the"
848 " given disk size, in MiB unless a suffix is used",
849 default=None, type="unit", metavar="<size>")
851 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
852 dest="ignore_consistency",
853 action="store_true", default=False,
854 help="Ignore the consistency of the disks on"
857 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
858 dest="allow_failover",
859 action="store_true", default=False,
860 help="If migration is not possible fallback to"
863 NONLIVE_OPT = cli_option("--non-live", dest="live",
864 default=True, action="store_false",
865 help="Do a non-live migration (this usually means"
866 " freeze the instance, save the state, transfer and"
867 " only then resume running on the secondary node)")
869 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
871 choices=list(constants.HT_MIGRATION_MODES),
872 help="Override default migration mode (choose"
873 " either live or non-live")
875 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
876 help="Target node and optional secondary node",
877 metavar="<pnode>[:<snode>]",
878 completion_suggest=OPT_COMPL_INST_ADD_NODES)
880 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
881 action="append", metavar="<node>",
882 help="Use only this node (can be used multiple"
883 " times, if not given defaults to all nodes)",
884 completion_suggest=OPT_COMPL_ONE_NODE)
886 NODEGROUP_OPT_NAME = "--node-group"
887 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
889 help="Node group (name or uuid)",
890 metavar="<nodegroup>",
891 default=None, type="string",
892 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
894 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
896 completion_suggest=OPT_COMPL_ONE_NODE)
898 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
899 action="store_false",
900 help="Don't start the instance after creation")
902 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
903 action="store_true", default=False,
904 help="Show command instead of executing it")
906 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
907 default=False, action="store_true",
908 help="Instead of performing the migration, try to"
909 " recover from a failed cleanup. This is safe"
910 " to run even if the instance is healthy, but it"
911 " will create extra replication traffic and "
912 " disrupt briefly the replication (like during the"
915 STATIC_OPT = cli_option("-s", "--static", dest="static",
916 action="store_true", default=False,
917 help="Only show configuration data, not runtime data")
919 ALL_OPT = cli_option("--all", dest="show_all",
920 default=False, action="store_true",
921 help="Show info on all instances on the cluster."
922 " This can take a long time to run, use wisely")
924 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
925 action="store_true", default=False,
926 help="Interactive OS reinstall, lists available"
927 " OS templates for selection")
929 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
930 action="store_true", default=False,
931 help="Remove the instance from the cluster"
932 " configuration even if there are failures"
933 " during the removal process")
935 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
936 dest="ignore_remove_failures",
937 action="store_true", default=False,
938 help="Remove the instance from the"
939 " cluster configuration even if there"
940 " are failures during the removal"
943 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
944 action="store_true", default=False,
945 help="Remove the instance from the cluster")
947 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
948 help="Specifies the new node for the instance",
949 metavar="NODE", default=None,
950 completion_suggest=OPT_COMPL_ONE_NODE)
952 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
953 help="Specifies the new secondary node",
954 metavar="NODE", default=None,
955 completion_suggest=OPT_COMPL_ONE_NODE)
957 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
958 default=False, action="store_true",
959 help="Replace the disk(s) on the primary"
960 " node (applies only to internally mirrored"
961 " disk templates, e.g. %s)" %
962 utils.CommaJoin(constants.DTS_INT_MIRROR))
964 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
965 default=False, action="store_true",
966 help="Replace the disk(s) on the secondary"
967 " node (applies only to internally mirrored"
968 " disk templates, e.g. %s)" %
969 utils.CommaJoin(constants.DTS_INT_MIRROR))
971 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
972 default=False, action="store_true",
973 help="Lock all nodes and auto-promote as needed"
976 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
977 default=False, action="store_true",
978 help="Automatically replace faulty disks"
979 " (applies only to internally mirrored"
980 " disk templates, e.g. %s)" %
981 utils.CommaJoin(constants.DTS_INT_MIRROR))
983 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
984 default=False, action="store_true",
985 help="Ignore current recorded size"
986 " (useful for forcing activation when"
987 " the recorded size is wrong)")
989 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
991 completion_suggest=OPT_COMPL_ONE_NODE)
993 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
996 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
997 help="Specify the secondary ip for the node",
998 metavar="ADDRESS", default=None)
1000 READD_OPT = cli_option("--readd", dest="readd",
1001 default=False, action="store_true",
1002 help="Readd old node after replacing it")
1004 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1005 default=True, action="store_false",
1006 help="Disable SSH key fingerprint checking")
1008 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1009 default=False, action="store_true",
1010 help="Force the joining of a node")
1012 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1013 type="bool", default=None, metavar=_YORNO,
1014 help="Set the master_candidate flag on the node")
1016 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1017 type="bool", default=None,
1018 help=("Set the offline flag on the node"
1019 " (cluster does not communicate with offline"
1022 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1023 type="bool", default=None,
1024 help=("Set the drained flag on the node"
1025 " (excluded from allocation operations)"))
1027 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1028 type="bool", default=None, metavar=_YORNO,
1029 help="Set the master_capable flag on the node")
1031 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1032 type="bool", default=None, metavar=_YORNO,
1033 help="Set the vm_capable flag on the node")
1035 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1036 type="bool", default=None, metavar=_YORNO,
1037 help="Set the allocatable flag on a volume")
1039 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1040 help="Disable support for lvm based instances"
1042 action="store_false", default=True)
1044 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1045 dest="enabled_hypervisors",
1046 help="Comma-separated list of hypervisors",
1047 type="string", default=None)
1049 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1050 type="keyval", default={},
1051 help="NIC parameters")
1053 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1054 dest="candidate_pool_size", type="int",
1055 help="Set the candidate pool size")
1057 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1058 help=("Enables LVM and specifies the volume group"
1059 " name (cluster-wide) for disk allocation"
1060 " [%s]" % constants.DEFAULT_VG),
1061 metavar="VG", default=None)
1063 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1064 help="Destroy cluster", action="store_true")
1066 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1067 help="Skip node agreement check (dangerous)",
1068 action="store_true", default=False)
1070 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1071 help="Specify the mac prefix for the instance IP"
1072 " addresses, in the format XX:XX:XX",
1076 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1077 help="Specify the node interface (cluster-wide)"
1078 " on which the master IP address will be added"
1079 " (cluster init default: %s)" %
1080 constants.DEFAULT_BRIDGE,
1084 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1085 help="Specify the netmask of the master IP",
1089 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1090 dest="use_external_mip_script",
1091 help="Specify whether to run a user-provided"
1092 " script for the master IP address turnup and"
1093 " turndown operations",
1094 type="bool", metavar=_YORNO, default=None)
1096 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1097 help="Specify the default directory (cluster-"
1098 "wide) for storing the file-based disks [%s]" %
1099 constants.DEFAULT_FILE_STORAGE_DIR,
1101 default=constants.DEFAULT_FILE_STORAGE_DIR)
1103 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1104 dest="shared_file_storage_dir",
1105 help="Specify the default directory (cluster-"
1106 "wide) for storing the shared file-based"
1108 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1109 metavar="SHAREDDIR",
1110 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1112 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1113 help="Don't modify /etc/hosts",
1114 action="store_false", default=True)
1116 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1117 help="Don't initialize SSH keys",
1118 action="store_false", default=True)
1120 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1121 help="Enable parseable error messages",
1122 action="store_true", default=False)
1124 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1125 help="Skip N+1 memory redundancy tests",
1126 action="store_true", default=False)
1128 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1129 help="Type of reboot: soft/hard/full",
1130 default=constants.INSTANCE_REBOOT_HARD,
1132 choices=list(constants.REBOOT_TYPES))
1134 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1135 dest="ignore_secondaries",
1136 default=False, action="store_true",
1137 help="Ignore errors from secondaries")
1139 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1140 action="store_false", default=True,
1141 help="Don't shutdown the instance (unsafe)")
1143 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1144 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1145 help="Maximum time to wait")
1147 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1148 dest="shutdown_timeout", type="int",
1149 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1150 help="Maximum time to wait for instance shutdown")
1152 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1154 help=("Number of seconds between repetions of the"
1157 EARLY_RELEASE_OPT = cli_option("--early-release",
1158 dest="early_release", default=False,
1159 action="store_true",
1160 help="Release the locks on the secondary"
1163 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1164 dest="new_cluster_cert",
1165 default=False, action="store_true",
1166 help="Generate a new cluster certificate")
1168 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1170 help="File containing new RAPI certificate")
1172 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1173 default=None, action="store_true",
1174 help=("Generate a new self-signed RAPI"
1177 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1179 help="File containing new SPICE certificate")
1181 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1183 help="File containing the certificate of the CA"
1184 " which signed the SPICE certificate")
1186 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1187 dest="new_spice_cert", default=None,
1188 action="store_true",
1189 help=("Generate a new self-signed SPICE"
1192 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1193 dest="new_confd_hmac_key",
1194 default=False, action="store_true",
1195 help=("Create a new HMAC key for %s" %
1198 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1199 dest="cluster_domain_secret",
1201 help=("Load new new cluster domain"
1202 " secret from file"))
1204 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1205 dest="new_cluster_domain_secret",
1206 default=False, action="store_true",
1207 help=("Create a new cluster domain"
1210 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1211 dest="use_replication_network",
1212 help="Whether to use the replication network"
1213 " for talking to the nodes",
1214 action="store_true", default=False)
1216 MAINTAIN_NODE_HEALTH_OPT = \
1217 cli_option("--maintain-node-health", dest="maintain_node_health",
1218 metavar=_YORNO, default=None, type="bool",
1219 help="Configure the cluster to automatically maintain node"
1220 " health, by shutting down unknown instances, shutting down"
1221 " unknown DRBD devices, etc.")
1223 IDENTIFY_DEFAULTS_OPT = \
1224 cli_option("--identify-defaults", dest="identify_defaults",
1225 default=False, action="store_true",
1226 help="Identify which saved instance parameters are equal to"
1227 " the current cluster defaults and set them as such, instead"
1228 " of marking them as overridden")
1230 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1231 action="store", dest="uid_pool",
1232 help=("A list of user-ids or user-id"
1233 " ranges separated by commas"))
1235 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1236 action="store", dest="add_uids",
1237 help=("A list of user-ids or user-id"
1238 " ranges separated by commas, to be"
1239 " added to the user-id pool"))
1241 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1242 action="store", dest="remove_uids",
1243 help=("A list of user-ids or user-id"
1244 " ranges separated by commas, to be"
1245 " removed from the user-id pool"))
1247 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1248 action="store", dest="reserved_lvs",
1249 help=("A comma-separated list of reserved"
1250 " logical volumes names, that will be"
1251 " ignored by cluster verify"))
1253 ROMAN_OPT = cli_option("--roman",
1254 dest="roman_integers", default=False,
1255 action="store_true",
1256 help="Use roman numbers for positive integers")
1258 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1259 action="store", default=None,
1260 help="Specifies usermode helper for DRBD")
1262 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1263 action="store_false", default=True,
1264 help="Disable support for DRBD")
1266 PRIMARY_IP_VERSION_OPT = \
1267 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1268 action="store", dest="primary_ip_version",
1269 metavar="%d|%d" % (constants.IP4_VERSION,
1270 constants.IP6_VERSION),
1271 help="Cluster-wide IP version for primary IP")
1273 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1274 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1275 choices=_PRIONAME_TO_VALUE.keys(),
1276 help="Priority for opcode processing")
1278 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1279 type="bool", default=None, metavar=_YORNO,
1280 help="Sets the hidden flag on the OS")
1282 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1283 type="bool", default=None, metavar=_YORNO,
1284 help="Sets the blacklisted flag on the OS")
1286 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1287 type="bool", metavar=_YORNO,
1288 dest="prealloc_wipe_disks",
1289 help=("Wipe disks prior to instance"
1292 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1293 type="keyval", default=None,
1294 help="Node parameters")
1296 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1297 action="store", metavar="POLICY", default=None,
1298 help="Allocation policy for the node group")
1300 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1301 type="bool", metavar=_YORNO,
1302 dest="node_powered",
1303 help="Specify if the SoR for node is powered")
1305 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1306 default=constants.OOB_TIMEOUT,
1307 help="Maximum time to wait for out-of-band helper")
1309 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1310 default=constants.OOB_POWER_DELAY,
1311 help="Time in seconds to wait between power-ons")
1313 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1314 action="store_true", default=False,
1315 help=("Whether command argument should be treated"
1318 NO_REMEMBER_OPT = cli_option("--no-remember",
1320 action="store_true", default=False,
1321 help="Perform but do not record the change"
1322 " in the configuration")
1324 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1325 default=False, action="store_true",
1326 help="Evacuate primary instances only")
1328 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1329 default=False, action="store_true",
1330 help="Evacuate secondary instances only"
1331 " (applies only to internally mirrored"
1332 " disk templates, e.g. %s)" %
1333 utils.CommaJoin(constants.DTS_INT_MIRROR))
1335 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1336 action="store_true", default=False,
1337 help="Pause instance at startup")
1339 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1340 help="Destination node group (name or uuid)",
1341 default=None, action="append",
1342 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1344 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1345 action="append", dest="ignore_errors",
1346 choices=list(constants.CV_ALL_ECODES_STRINGS),
1347 help="Error code to be ignored")
1349 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1351 help=("Specify disk state information in the format"
1352 " storage_type/identifier:option=value,..."),
1355 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1357 help=("Specify hypervisor state information in the"
1358 " format hypervisor:option=value,..."),
1361 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1362 action="store_true", default=False,
1363 help="Ignore instance policy violations")
1365 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1366 help="Sets the instance's runtime memory,"
1367 " ballooning it up or down to the new value",
1368 default=None, type="unit", metavar="<size>")
1370 #: Options provided by all commands
1371 COMMON_OPTS = [DEBUG_OPT]
1373 # common options for creating instances. add and import then add their own
1375 COMMON_CREATE_OPTS = [
1380 FILESTORE_DRIVER_OPT,
1397 # common instance policy options
1398 INSTANCE_POLICY_OPTS = [
1399 SPECS_CPU_COUNT_OPT,
1400 SPECS_DISK_COUNT_OPT,
1401 SPECS_DISK_SIZE_OPT,
1403 SPECS_NIC_COUNT_OPT,
1404 SPECS_DISK_TEMPLATES,
1408 def _ParseArgs(argv, commands, aliases, env_override):
1409 """Parser for the command line arguments.
1411 This function parses the arguments and returns the function which
1412 must be executed together with its (modified) arguments.
1414 @param argv: the command line
1415 @param commands: dictionary with special contents, see the design
1416 doc for cmdline handling
1417 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1418 @param env_override: list of env variables allowed for default args
1421 assert not (env_override - set(commands))
1424 binary = "<command>"
1426 binary = argv[0].split("/")[-1]
1428 if len(argv) > 1 and argv[1] == "--version":
1429 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1430 constants.RELEASE_VERSION)
1431 # Quit right away. That way we don't have to care about this special
1432 # argument. optparse.py does it the same.
1435 if len(argv) < 2 or not (argv[1] in commands or
1436 argv[1] in aliases):
1437 # let's do a nice thing
1438 sortedcmds = commands.keys()
1441 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1442 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1445 # compute the max line length for cmd + usage
1446 mlen = max([len(" %s" % cmd) for cmd in commands])
1447 mlen = min(60, mlen) # should not get here...
1449 # and format a nice command list
1450 ToStdout("Commands:")
1451 for cmd in sortedcmds:
1452 cmdstr = " %s" % (cmd,)
1453 help_text = commands[cmd][4]
1454 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1455 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1456 for line in help_lines:
1457 ToStdout("%-*s %s", mlen, "", line)
1461 return None, None, None
1463 # get command, unalias it, and look it up in commands
1467 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1470 if aliases[cmd] not in commands:
1471 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1472 " command '%s'" % (cmd, aliases[cmd]))
1476 if cmd in env_override:
1477 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1478 env_args = os.environ.get(args_env_name)
1480 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1482 func, args_def, parser_opts, usage, description = commands[cmd]
1483 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1484 description=description,
1485 formatter=TitledHelpFormatter(),
1486 usage="%%prog %s %s" % (cmd, usage))
1487 parser.disable_interspersed_args()
1488 options, args = parser.parse_args(args=argv[1:])
1490 if not _CheckArguments(cmd, args_def, args):
1491 return None, None, None
1493 return func, options, args
1496 def _CheckArguments(cmd, args_def, args):
1497 """Verifies the arguments using the argument definition.
1501 1. Abort with error if values specified by user but none expected.
1503 1. For each argument in definition
1505 1. Keep running count of minimum number of values (min_count)
1506 1. Keep running count of maximum number of values (max_count)
1507 1. If it has an unlimited number of values
1509 1. Abort with error if it's not the last argument in the definition
1511 1. If last argument has limited number of values
1513 1. Abort with error if number of values doesn't match or is too large
1515 1. Abort with error if user didn't pass enough values (min_count)
1518 if args and not args_def:
1519 ToStderr("Error: Command %s expects no arguments", cmd)
1526 last_idx = len(args_def) - 1
1528 for idx, arg in enumerate(args_def):
1529 if min_count is None:
1531 elif arg.min is not None:
1532 min_count += arg.min
1534 if max_count is None:
1536 elif arg.max is not None:
1537 max_count += arg.max
1540 check_max = (arg.max is not None)
1542 elif arg.max is None:
1543 raise errors.ProgrammerError("Only the last argument can have max=None")
1546 # Command with exact number of arguments
1547 if (min_count is not None and max_count is not None and
1548 min_count == max_count and len(args) != min_count):
1549 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1552 # Command with limited number of arguments
1553 if max_count is not None and len(args) > max_count:
1554 ToStderr("Error: Command %s expects only %d argument(s)",
1558 # Command with some required arguments
1559 if min_count is not None and len(args) < min_count:
1560 ToStderr("Error: Command %s expects at least %d argument(s)",
1567 def SplitNodeOption(value):
1568 """Splits the value of a --node option.
1571 if value and ":" in value:
1572 return value.split(":", 1)
1574 return (value, None)
1577 def CalculateOSNames(os_name, os_variants):
1578 """Calculates all the names an OS can be called, according to its variants.
1580 @type os_name: string
1581 @param os_name: base name of the os
1582 @type os_variants: list or None
1583 @param os_variants: list of supported variants
1585 @return: list of valid names
1589 return ["%s+%s" % (os_name, v) for v in os_variants]
1594 def ParseFields(selected, default):
1595 """Parses the values of "--field"-like options.
1597 @type selected: string or None
1598 @param selected: User-selected options
1600 @param default: Default fields
1603 if selected is None:
1606 if selected.startswith("+"):
1607 return default + selected[1:].split(",")
1609 return selected.split(",")
1612 UsesRPC = rpc.RunWithRPC
1615 def AskUser(text, choices=None):
1616 """Ask the user a question.
1618 @param text: the question to ask
1620 @param choices: list with elements tuples (input_char, return_value,
1621 description); if not given, it will default to: [('y', True,
1622 'Perform the operation'), ('n', False, 'Do no do the operation')];
1623 note that the '?' char is reserved for help
1625 @return: one of the return values from the choices list; if input is
1626 not possible (i.e. not running with a tty, we return the last
1631 choices = [("y", True, "Perform the operation"),
1632 ("n", False, "Do not perform the operation")]
1633 if not choices or not isinstance(choices, list):
1634 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1635 for entry in choices:
1636 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1637 raise errors.ProgrammerError("Invalid choices element to AskUser")
1639 answer = choices[-1][1]
1641 for line in text.splitlines():
1642 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1643 text = "\n".join(new_text)
1645 f = file("/dev/tty", "a+")
1649 chars = [entry[0] for entry in choices]
1650 chars[-1] = "[%s]" % chars[-1]
1652 maps = dict([(entry[0], entry[1]) for entry in choices])
1656 f.write("/".join(chars))
1658 line = f.readline(2).strip().lower()
1663 for entry in choices:
1664 f.write(" %s - %s\n" % (entry[0], entry[2]))
1672 class JobSubmittedException(Exception):
1673 """Job was submitted, client should exit.
1675 This exception has one argument, the ID of the job that was
1676 submitted. The handler should print this ID.
1678 This is not an error, just a structured way to exit from clients.
1683 def SendJob(ops, cl=None):
1684 """Function to submit an opcode without waiting for the results.
1687 @param ops: list of opcodes
1688 @type cl: luxi.Client
1689 @param cl: the luxi client to use for communicating with the master;
1690 if None, a new client will be created
1696 job_id = cl.SubmitJob(ops)
1701 def GenericPollJob(job_id, cbs, report_cbs):
1702 """Generic job-polling function.
1704 @type job_id: number
1705 @param job_id: Job ID
1706 @type cbs: Instance of L{JobPollCbBase}
1707 @param cbs: Data callbacks
1708 @type report_cbs: Instance of L{JobPollReportCbBase}
1709 @param report_cbs: Reporting callbacks
1712 prev_job_info = None
1713 prev_logmsg_serial = None
1718 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1721 # job not found, go away!
1722 raise errors.JobLost("Job with id %s lost" % job_id)
1724 if result == constants.JOB_NOTCHANGED:
1725 report_cbs.ReportNotChanged(job_id, status)
1730 # Split result, a tuple of (field values, log entries)
1731 (job_info, log_entries) = result
1732 (status, ) = job_info
1735 for log_entry in log_entries:
1736 (serial, timestamp, log_type, message) = log_entry
1737 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1739 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1741 # TODO: Handle canceled and archived jobs
1742 elif status in (constants.JOB_STATUS_SUCCESS,
1743 constants.JOB_STATUS_ERROR,
1744 constants.JOB_STATUS_CANCELING,
1745 constants.JOB_STATUS_CANCELED):
1748 prev_job_info = job_info
1750 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1752 raise errors.JobLost("Job with id %s lost" % job_id)
1754 status, opstatus, result = jobs[0]
1756 if status == constants.JOB_STATUS_SUCCESS:
1759 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1760 raise errors.OpExecError("Job was canceled")
1763 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1764 if status == constants.OP_STATUS_SUCCESS:
1766 elif status == constants.OP_STATUS_ERROR:
1767 errors.MaybeRaise(msg)
1770 raise errors.OpExecError("partial failure (opcode %d): %s" %
1773 raise errors.OpExecError(str(msg))
1775 # default failure mode
1776 raise errors.OpExecError(result)
1779 class JobPollCbBase:
1780 """Base class for L{GenericPollJob} callbacks.
1784 """Initializes this class.
1788 def WaitForJobChangeOnce(self, job_id, fields,
1789 prev_job_info, prev_log_serial):
1790 """Waits for changes on a job.
1793 raise NotImplementedError()
1795 def QueryJobs(self, job_ids, fields):
1796 """Returns the selected fields for the selected job IDs.
1798 @type job_ids: list of numbers
1799 @param job_ids: Job IDs
1800 @type fields: list of strings
1801 @param fields: Fields
1804 raise NotImplementedError()
1807 class JobPollReportCbBase:
1808 """Base class for L{GenericPollJob} reporting callbacks.
1812 """Initializes this class.
1816 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1817 """Handles a log message.
1820 raise NotImplementedError()
1822 def ReportNotChanged(self, job_id, status):
1823 """Called for if a job hasn't changed in a while.
1825 @type job_id: number
1826 @param job_id: Job ID
1827 @type status: string or None
1828 @param status: Job status if available
1831 raise NotImplementedError()
1834 class _LuxiJobPollCb(JobPollCbBase):
1835 def __init__(self, cl):
1836 """Initializes this class.
1839 JobPollCbBase.__init__(self)
1842 def WaitForJobChangeOnce(self, job_id, fields,
1843 prev_job_info, prev_log_serial):
1844 """Waits for changes on a job.
1847 return self.cl.WaitForJobChangeOnce(job_id, fields,
1848 prev_job_info, prev_log_serial)
1850 def QueryJobs(self, job_ids, fields):
1851 """Returns the selected fields for the selected job IDs.
1854 return self.cl.QueryJobs(job_ids, fields)
1857 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1858 def __init__(self, feedback_fn):
1859 """Initializes this class.
1862 JobPollReportCbBase.__init__(self)
1864 self.feedback_fn = feedback_fn
1866 assert callable(feedback_fn)
1868 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1869 """Handles a log message.
1872 self.feedback_fn((timestamp, log_type, log_msg))
1874 def ReportNotChanged(self, job_id, status):
1875 """Called if a job hasn't changed in a while.
1881 class StdioJobPollReportCb(JobPollReportCbBase):
1883 """Initializes this class.
1886 JobPollReportCbBase.__init__(self)
1888 self.notified_queued = False
1889 self.notified_waitlock = False
1891 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1892 """Handles a log message.
1895 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1896 FormatLogMessage(log_type, log_msg))
1898 def ReportNotChanged(self, job_id, status):
1899 """Called if a job hasn't changed in a while.
1905 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1906 ToStderr("Job %s is waiting in queue", job_id)
1907 self.notified_queued = True
1909 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1910 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1911 self.notified_waitlock = True
1914 def FormatLogMessage(log_type, log_msg):
1915 """Formats a job message according to its type.
1918 if log_type != constants.ELOG_MESSAGE:
1919 log_msg = str(log_msg)
1921 return utils.SafeEncode(log_msg)
1924 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1925 """Function to poll for the result of a job.
1927 @type job_id: job identified
1928 @param job_id: the job to poll for results
1929 @type cl: luxi.Client
1930 @param cl: the luxi client to use for communicating with the master;
1931 if None, a new client will be created
1937 if reporter is None:
1939 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1941 reporter = StdioJobPollReportCb()
1943 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1945 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1948 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1949 """Legacy function to submit an opcode.
1951 This is just a simple wrapper over the construction of the processor
1952 instance. It should be extended to better handle feedback and
1953 interaction functions.
1959 SetGenericOpcodeOpts([op], opts)
1961 job_id = SendJob([op], cl=cl)
1963 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1966 return op_results[0]
1969 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1970 """Wrapper around SubmitOpCode or SendJob.
1972 This function will decide, based on the 'opts' parameter, whether to
1973 submit and wait for the result of the opcode (and return it), or
1974 whether to just send the job and print its identifier. It is used in
1975 order to simplify the implementation of the '--submit' option.
1977 It will also process the opcodes if we're sending the via SendJob
1978 (otherwise SubmitOpCode does it).
1981 if opts and opts.submit_only:
1983 SetGenericOpcodeOpts(job, opts)
1984 job_id = SendJob(job, cl=cl)
1985 raise JobSubmittedException(job_id)
1987 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1990 def SetGenericOpcodeOpts(opcode_list, options):
1991 """Processor for generic options.
1993 This function updates the given opcodes based on generic command
1994 line options (like debug, dry-run, etc.).
1996 @param opcode_list: list of opcodes
1997 @param options: command line options or None
1998 @return: None (in-place modification)
2003 for op in opcode_list:
2004 op.debug_level = options.debug
2005 if hasattr(options, "dry_run"):
2006 op.dry_run = options.dry_run
2007 if getattr(options, "priority", None) is not None:
2008 op.priority = _PRIONAME_TO_VALUE[options.priority]
2012 # TODO: Cache object?
2014 client = luxi.Client()
2015 except luxi.NoMasterError:
2016 ss = ssconf.SimpleStore()
2018 # Try to read ssconf file
2021 except errors.ConfigurationError:
2022 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2023 " not part of a cluster")
2025 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2026 if master != myself:
2027 raise errors.OpPrereqError("This is not the master node, please connect"
2028 " to node '%s' and rerun the command" %
2034 def FormatError(err):
2035 """Return a formatted error message for a given error.
2037 This function takes an exception instance and returns a tuple
2038 consisting of two values: first, the recommended exit code, and
2039 second, a string describing the error message (not
2040 newline-terminated).
2046 if isinstance(err, errors.ConfigurationError):
2047 txt = "Corrupt configuration file: %s" % msg
2049 obuf.write(txt + "\n")
2050 obuf.write("Aborting.")
2052 elif isinstance(err, errors.HooksAbort):
2053 obuf.write("Failure: hooks execution failed:\n")
2054 for node, script, out in err.args[0]:
2056 obuf.write(" node: %s, script: %s, output: %s\n" %
2057 (node, script, out))
2059 obuf.write(" node: %s, script: %s (no output)\n" %
2061 elif isinstance(err, errors.HooksFailure):
2062 obuf.write("Failure: hooks general failure: %s" % msg)
2063 elif isinstance(err, errors.ResolverError):
2064 this_host = netutils.Hostname.GetSysName()
2065 if err.args[0] == this_host:
2066 msg = "Failure: can't resolve my own hostname ('%s')"
2068 msg = "Failure: can't resolve hostname '%s'"
2069 obuf.write(msg % err.args[0])
2070 elif isinstance(err, errors.OpPrereqError):
2071 if len(err.args) == 2:
2072 obuf.write("Failure: prerequisites not met for this"
2073 " operation:\nerror type: %s, error details:\n%s" %
2074 (err.args[1], err.args[0]))
2076 obuf.write("Failure: prerequisites not met for this"
2077 " operation:\n%s" % msg)
2078 elif isinstance(err, errors.OpExecError):
2079 obuf.write("Failure: command execution error:\n%s" % msg)
2080 elif isinstance(err, errors.TagError):
2081 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2082 elif isinstance(err, errors.JobQueueDrainError):
2083 obuf.write("Failure: the job queue is marked for drain and doesn't"
2084 " accept new requests\n")
2085 elif isinstance(err, errors.JobQueueFull):
2086 obuf.write("Failure: the job queue is full and doesn't accept new"
2087 " job submissions until old jobs are archived\n")
2088 elif isinstance(err, errors.TypeEnforcementError):
2089 obuf.write("Parameter Error: %s" % msg)
2090 elif isinstance(err, errors.ParameterError):
2091 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2092 elif isinstance(err, luxi.NoMasterError):
2093 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2094 " and listening for connections?")
2095 elif isinstance(err, luxi.TimeoutError):
2096 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2097 " been submitted and will continue to run even if the call"
2098 " timed out. Useful commands in this situation are \"gnt-job"
2099 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2101 elif isinstance(err, luxi.PermissionError):
2102 obuf.write("It seems you don't have permissions to connect to the"
2103 " master daemon.\nPlease retry as a different user.")
2104 elif isinstance(err, luxi.ProtocolError):
2105 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2107 elif isinstance(err, errors.JobLost):
2108 obuf.write("Error checking job status: %s" % msg)
2109 elif isinstance(err, errors.QueryFilterParseError):
2110 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2111 obuf.write("\n".join(err.GetDetails()))
2112 elif isinstance(err, errors.GenericError):
2113 obuf.write("Unhandled Ganeti error: %s" % msg)
2114 elif isinstance(err, JobSubmittedException):
2115 obuf.write("JobID: %s\n" % err.args[0])
2118 obuf.write("Unhandled exception: %s" % msg)
2119 return retcode, obuf.getvalue().rstrip("\n")
2122 def GenericMain(commands, override=None, aliases=None,
2123 env_override=frozenset()):
2124 """Generic main function for all the gnt-* commands.
2126 @param commands: a dictionary with a special structure, see the design doc
2127 for command line handling.
2128 @param override: if not None, we expect a dictionary with keys that will
2129 override command line options; this can be used to pass
2130 options from the scripts to generic functions
2131 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2132 @param env_override: list of environment names which are allowed to submit
2133 default args for commands
2136 # save the program name and the entire command line for later logging
2138 binary = os.path.basename(sys.argv[0])
2140 binary = sys.argv[0]
2142 if len(sys.argv) >= 2:
2143 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2147 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2149 binary = "<unknown program>"
2150 cmdline = "<unknown>"
2156 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2157 except errors.ParameterError, err:
2158 result, err_msg = FormatError(err)
2162 if func is None: # parse error
2165 if override is not None:
2166 for key, val in override.iteritems():
2167 setattr(options, key, val)
2169 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2170 stderr_logging=True)
2172 logging.info("Command line: %s", cmdline)
2175 result = func(options, args)
2176 except (errors.GenericError, luxi.ProtocolError,
2177 JobSubmittedException), err:
2178 result, err_msg = FormatError(err)
2179 logging.exception("Error during command processing")
2181 except KeyboardInterrupt:
2182 result = constants.EXIT_FAILURE
2183 ToStderr("Aborted. Note that if the operation created any jobs, they"
2184 " might have been submitted and"
2185 " will continue to run in the background.")
2186 except IOError, err:
2187 if err.errno == errno.EPIPE:
2188 # our terminal went away, we'll exit
2189 sys.exit(constants.EXIT_FAILURE)
2196 def ParseNicOption(optvalue):
2197 """Parses the value of the --net option(s).
2201 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2202 except (TypeError, ValueError), err:
2203 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2205 nics = [{}] * nic_max
2206 for nidx, ndict in optvalue:
2209 if not isinstance(ndict, dict):
2210 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2211 " got %s" % (nidx, ndict))
2213 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2220 def GenericInstanceCreate(mode, opts, args):
2221 """Add an instance to the cluster via either creation or import.
2223 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2224 @param opts: the command line options selected by the user
2226 @param args: should contain only one element, the new instance name
2228 @return: the desired exit code
2233 (pnode, snode) = SplitNodeOption(opts.node)
2238 hypervisor, hvparams = opts.hypervisor
2241 nics = ParseNicOption(opts.nics)
2245 elif mode == constants.INSTANCE_CREATE:
2246 # default of one nic, all auto
2252 if opts.disk_template == constants.DT_DISKLESS:
2253 if opts.disks or opts.sd_size is not None:
2254 raise errors.OpPrereqError("Diskless instance but disk"
2255 " information passed")
2258 if (not opts.disks and not opts.sd_size
2259 and mode == constants.INSTANCE_CREATE):
2260 raise errors.OpPrereqError("No disk information specified")
2261 if opts.disks and opts.sd_size is not None:
2262 raise errors.OpPrereqError("Please use either the '--disk' or"
2264 if opts.sd_size is not None:
2265 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2269 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2270 except ValueError, err:
2271 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2272 disks = [{}] * disk_max
2275 for didx, ddict in opts.disks:
2277 if not isinstance(ddict, dict):
2278 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2279 raise errors.OpPrereqError(msg)
2280 elif constants.IDISK_SIZE in ddict:
2281 if constants.IDISK_ADOPT in ddict:
2282 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2283 " (disk %d)" % didx)
2285 ddict[constants.IDISK_SIZE] = \
2286 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2287 except ValueError, err:
2288 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2290 elif constants.IDISK_ADOPT in ddict:
2291 if mode == constants.INSTANCE_IMPORT:
2292 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2294 ddict[constants.IDISK_SIZE] = 0
2296 raise errors.OpPrereqError("Missing size or adoption source for"
2300 if opts.tags is not None:
2301 tags = opts.tags.split(",")
2305 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2306 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2308 if mode == constants.INSTANCE_CREATE:
2311 force_variant = opts.force_variant
2314 no_install = opts.no_install
2315 identify_defaults = False
2316 elif mode == constants.INSTANCE_IMPORT:
2319 force_variant = False
2320 src_node = opts.src_node
2321 src_path = opts.src_dir
2323 identify_defaults = opts.identify_defaults
2325 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2327 op = opcodes.OpInstanceCreate(instance_name=instance,
2329 disk_template=opts.disk_template,
2331 pnode=pnode, snode=snode,
2332 ip_check=opts.ip_check,
2333 name_check=opts.name_check,
2334 wait_for_sync=opts.wait_for_sync,
2335 file_storage_dir=opts.file_storage_dir,
2336 file_driver=opts.file_driver,
2337 iallocator=opts.iallocator,
2338 hypervisor=hypervisor,
2340 beparams=opts.beparams,
2341 osparams=opts.osparams,
2345 force_variant=force_variant,
2349 no_install=no_install,
2350 identify_defaults=identify_defaults,
2351 ignore_ipolicy=opts.ignore_ipolicy)
2353 SubmitOrSend(op, opts)
2357 class _RunWhileClusterStoppedHelper:
2358 """Helper class for L{RunWhileClusterStopped} to simplify state management
2361 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2362 """Initializes this class.
2364 @type feedback_fn: callable
2365 @param feedback_fn: Feedback function
2366 @type cluster_name: string
2367 @param cluster_name: Cluster name
2368 @type master_node: string
2369 @param master_node Master node name
2370 @type online_nodes: list
2371 @param online_nodes: List of names of online nodes
2374 self.feedback_fn = feedback_fn
2375 self.cluster_name = cluster_name
2376 self.master_node = master_node
2377 self.online_nodes = online_nodes
2379 self.ssh = ssh.SshRunner(self.cluster_name)
2381 self.nonmaster_nodes = [name for name in online_nodes
2382 if name != master_node]
2384 assert self.master_node not in self.nonmaster_nodes
2386 def _RunCmd(self, node_name, cmd):
2387 """Runs a command on the local or a remote machine.
2389 @type node_name: string
2390 @param node_name: Machine name
2395 if node_name is None or node_name == self.master_node:
2396 # No need to use SSH
2397 result = utils.RunCmd(cmd)
2399 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2402 errmsg = ["Failed to run command %s" % result.cmd]
2404 errmsg.append("on node %s" % node_name)
2405 errmsg.append(": exitcode %s and error %s" %
2406 (result.exit_code, result.output))
2407 raise errors.OpExecError(" ".join(errmsg))
2409 def Call(self, fn, *args):
2410 """Call function while all daemons are stopped.
2413 @param fn: Function to be called
2416 # Pause watcher by acquiring an exclusive lock on watcher state file
2417 self.feedback_fn("Blocking watcher")
2418 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2420 # TODO: Currently, this just blocks. There's no timeout.
2421 # TODO: Should it be a shared lock?
2422 watcher_block.Exclusive(blocking=True)
2424 # Stop master daemons, so that no new jobs can come in and all running
2426 self.feedback_fn("Stopping master daemons")
2427 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2429 # Stop daemons on all nodes
2430 for node_name in self.online_nodes:
2431 self.feedback_fn("Stopping daemons on %s" % node_name)
2432 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2434 # All daemons are shut down now
2436 return fn(self, *args)
2437 except Exception, err:
2438 _, errmsg = FormatError(err)
2439 logging.exception("Caught exception")
2440 self.feedback_fn(errmsg)
2443 # Start cluster again, master node last
2444 for node_name in self.nonmaster_nodes + [self.master_node]:
2445 self.feedback_fn("Starting daemons on %s" % node_name)
2446 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2449 watcher_block.Close()
2452 def RunWhileClusterStopped(feedback_fn, fn, *args):
2453 """Calls a function while all cluster daemons are stopped.
2455 @type feedback_fn: callable
2456 @param feedback_fn: Feedback function
2458 @param fn: Function to be called when daemons are stopped
2461 feedback_fn("Gathering cluster information")
2463 # This ensures we're running on the master daemon
2466 (cluster_name, master_node) = \
2467 cl.QueryConfigValues(["cluster_name", "master_node"])
2469 online_nodes = GetOnlineNodes([], cl=cl)
2471 # Don't keep a reference to the client. The master daemon will go away.
2474 assert master_node in online_nodes
2476 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2477 online_nodes).Call(fn, *args)
2480 def GenerateTable(headers, fields, separator, data,
2481 numfields=None, unitfields=None,
2483 """Prints a table with headers and different fields.
2486 @param headers: dictionary mapping field names to headers for
2489 @param fields: the field names corresponding to each row in
2491 @param separator: the separator to be used; if this is None,
2492 the default 'smart' algorithm is used which computes optimal
2493 field width, otherwise just the separator is used between
2496 @param data: a list of lists, each sublist being one row to be output
2497 @type numfields: list
2498 @param numfields: a list with the fields that hold numeric
2499 values and thus should be right-aligned
2500 @type unitfields: list
2501 @param unitfields: a list with the fields that hold numeric
2502 values that should be formatted with the units field
2503 @type units: string or None
2504 @param units: the units we should use for formatting, or None for
2505 automatic choice (human-readable for non-separator usage, otherwise
2506 megabytes); this is a one-letter string
2515 if numfields is None:
2517 if unitfields is None:
2520 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2521 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2524 for field in fields:
2525 if headers and field not in headers:
2526 # TODO: handle better unknown fields (either revert to old
2527 # style of raising exception, or deal more intelligently with
2529 headers[field] = field
2530 if separator is not None:
2531 format_fields.append("%s")
2532 elif numfields.Matches(field):
2533 format_fields.append("%*s")
2535 format_fields.append("%-*s")
2537 if separator is None:
2538 mlens = [0 for name in fields]
2539 format_str = " ".join(format_fields)
2541 format_str = separator.replace("%", "%%").join(format_fields)
2546 for idx, val in enumerate(row):
2547 if unitfields.Matches(fields[idx]):
2550 except (TypeError, ValueError):
2553 val = row[idx] = utils.FormatUnit(val, units)
2554 val = row[idx] = str(val)
2555 if separator is None:
2556 mlens[idx] = max(mlens[idx], len(val))
2561 for idx, name in enumerate(fields):
2563 if separator is None:
2564 mlens[idx] = max(mlens[idx], len(hdr))
2565 args.append(mlens[idx])
2567 result.append(format_str % tuple(args))
2569 if separator is None:
2570 assert len(mlens) == len(fields)
2572 if fields and not numfields.Matches(fields[-1]):
2578 line = ["-" for _ in fields]
2579 for idx in range(len(fields)):
2580 if separator is None:
2581 args.append(mlens[idx])
2582 args.append(line[idx])
2583 result.append(format_str % tuple(args))
2588 def _FormatBool(value):
2589 """Formats a boolean value as a string.
2597 #: Default formatting for query results; (callback, align right)
2598 _DEFAULT_FORMAT_QUERY = {
2599 constants.QFT_TEXT: (str, False),
2600 constants.QFT_BOOL: (_FormatBool, False),
2601 constants.QFT_NUMBER: (str, True),
2602 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2603 constants.QFT_OTHER: (str, False),
2604 constants.QFT_UNKNOWN: (str, False),
2608 def _GetColumnFormatter(fdef, override, unit):
2609 """Returns formatting function for a field.
2611 @type fdef: L{objects.QueryFieldDefinition}
2612 @type override: dict
2613 @param override: Dictionary for overriding field formatting functions,
2614 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2616 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2617 @rtype: tuple; (callable, bool)
2618 @return: Returns the function to format a value (takes one parameter) and a
2619 boolean for aligning the value on the right-hand side
2622 fmt = override.get(fdef.name, None)
2626 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2628 if fdef.kind == constants.QFT_UNIT:
2629 # Can't keep this information in the static dictionary
2630 return (lambda value: utils.FormatUnit(value, unit), True)
2632 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2636 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2639 class _QueryColumnFormatter:
2640 """Callable class for formatting fields of a query.
2643 def __init__(self, fn, status_fn, verbose):
2644 """Initializes this class.
2647 @param fn: Formatting function
2648 @type status_fn: callable
2649 @param status_fn: Function to report fields' status
2650 @type verbose: boolean
2651 @param verbose: whether to use verbose field descriptions or not
2655 self._status_fn = status_fn
2656 self._verbose = verbose
2658 def __call__(self, data):
2659 """Returns a field's string representation.
2662 (status, value) = data
2665 self._status_fn(status)
2667 if status == constants.RS_NORMAL:
2668 return self._fn(value)
2670 assert value is None, \
2671 "Found value %r for abnormal status %s" % (value, status)
2673 return FormatResultError(status, self._verbose)
2676 def FormatResultError(status, verbose):
2677 """Formats result status other than L{constants.RS_NORMAL}.
2679 @param status: The result status
2680 @type verbose: boolean
2681 @param verbose: Whether to return the verbose text
2682 @return: Text of result status
2685 assert status != constants.RS_NORMAL, \
2686 "FormatResultError called with status equal to constants.RS_NORMAL"
2688 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2690 raise NotImplementedError("Unknown status %s" % status)
2697 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2698 header=False, verbose=False):
2699 """Formats data in L{objects.QueryResponse}.
2701 @type result: L{objects.QueryResponse}
2702 @param result: result of query operation
2704 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2705 see L{utils.text.FormatUnit}
2706 @type format_override: dict
2707 @param format_override: Dictionary for overriding field formatting functions,
2708 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2709 @type separator: string or None
2710 @param separator: String used to separate fields
2712 @param header: Whether to output header row
2713 @type verbose: boolean
2714 @param verbose: whether to use verbose field descriptions or not
2723 if format_override is None:
2724 format_override = {}
2726 stats = dict.fromkeys(constants.RS_ALL, 0)
2728 def _RecordStatus(status):
2733 for fdef in result.fields:
2734 assert fdef.title and fdef.name
2735 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2736 columns.append(TableColumn(fdef.title,
2737 _QueryColumnFormatter(fn, _RecordStatus,
2741 table = FormatTable(result.data, columns, header, separator)
2743 # Collect statistics
2744 assert len(stats) == len(constants.RS_ALL)
2745 assert compat.all(count >= 0 for count in stats.values())
2747 # Determine overall status. If there was no data, unknown fields must be
2748 # detected via the field definitions.
2749 if (stats[constants.RS_UNKNOWN] or
2750 (not result.data and _GetUnknownFields(result.fields))):
2752 elif compat.any(count > 0 for key, count in stats.items()
2753 if key != constants.RS_NORMAL):
2754 status = QR_INCOMPLETE
2758 return (status, table)
2761 def _GetUnknownFields(fdefs):
2762 """Returns list of unknown fields included in C{fdefs}.
2764 @type fdefs: list of L{objects.QueryFieldDefinition}
2767 return [fdef for fdef in fdefs
2768 if fdef.kind == constants.QFT_UNKNOWN]
2771 def _WarnUnknownFields(fdefs):
2772 """Prints a warning to stderr if a query included unknown fields.
2774 @type fdefs: list of L{objects.QueryFieldDefinition}
2777 unknown = _GetUnknownFields(fdefs)
2779 ToStderr("Warning: Queried for unknown fields %s",
2780 utils.CommaJoin(fdef.name for fdef in unknown))
2786 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2787 format_override=None, verbose=False, force_filter=False):
2788 """Generic implementation for listing all items of a resource.
2790 @param resource: One of L{constants.QR_VIA_LUXI}
2791 @type fields: list of strings
2792 @param fields: List of fields to query for
2793 @type names: list of strings
2794 @param names: Names of items to query for
2795 @type unit: string or None
2796 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2797 None for automatic choice (human-readable for non-separator usage,
2798 otherwise megabytes); this is a one-letter string
2799 @type separator: string or None
2800 @param separator: String used to separate fields
2802 @param header: Whether to show header row
2803 @type force_filter: bool
2804 @param force_filter: Whether to always treat names as filter
2805 @type format_override: dict
2806 @param format_override: Dictionary for overriding field formatting functions,
2807 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2808 @type verbose: boolean
2809 @param verbose: whether to use verbose field descriptions or not
2815 qfilter = qlang.MakeFilter(names, force_filter)
2820 response = cl.Query(resource, fields, qfilter)
2822 found_unknown = _WarnUnknownFields(response.fields)
2824 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2826 format_override=format_override,
2832 assert ((found_unknown and status == QR_UNKNOWN) or
2833 (not found_unknown and status != QR_UNKNOWN))
2835 if status == QR_UNKNOWN:
2836 return constants.EXIT_UNKNOWN_FIELD
2838 # TODO: Should the list command fail if not all data could be collected?
2839 return constants.EXIT_SUCCESS
2842 def GenericListFields(resource, fields, separator, header, cl=None):
2843 """Generic implementation for listing fields for a resource.
2845 @param resource: One of L{constants.QR_VIA_LUXI}
2846 @type fields: list of strings
2847 @param fields: List of fields to query for
2848 @type separator: string or None
2849 @param separator: String used to separate fields
2851 @param header: Whether to show header row
2860 response = cl.QueryFields(resource, fields)
2862 found_unknown = _WarnUnknownFields(response.fields)
2865 TableColumn("Name", str, False),
2866 TableColumn("Title", str, False),
2867 TableColumn("Description", str, False),
2870 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2872 for line in FormatTable(rows, columns, header, separator):
2876 return constants.EXIT_UNKNOWN_FIELD
2878 return constants.EXIT_SUCCESS
2882 """Describes a column for L{FormatTable}.
2885 def __init__(self, title, fn, align_right):
2886 """Initializes this class.
2889 @param title: Column title
2891 @param fn: Formatting function
2892 @type align_right: bool
2893 @param align_right: Whether to align values on the right-hand side
2898 self.align_right = align_right
2901 def _GetColFormatString(width, align_right):
2902 """Returns the format string for a field.
2910 return "%%%s%ss" % (sign, width)
2913 def FormatTable(rows, columns, header, separator):
2914 """Formats data as a table.
2916 @type rows: list of lists
2917 @param rows: Row data, one list per row
2918 @type columns: list of L{TableColumn}
2919 @param columns: Column descriptions
2921 @param header: Whether to show header row
2922 @type separator: string or None
2923 @param separator: String used to separate columns
2927 data = [[col.title for col in columns]]
2928 colwidth = [len(col.title) for col in columns]
2931 colwidth = [0 for _ in columns]
2935 assert len(row) == len(columns)
2937 formatted = [col.format(value) for value, col in zip(row, columns)]
2939 if separator is None:
2940 # Update column widths
2941 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2942 # Modifying a list's items while iterating is fine
2943 colwidth[idx] = max(oldwidth, len(value))
2945 data.append(formatted)
2947 if separator is not None:
2948 # Return early if a separator is used
2949 return [separator.join(row) for row in data]
2951 if columns and not columns[-1].align_right:
2952 # Avoid unnecessary spaces at end of line
2955 # Build format string
2956 fmt = " ".join([_GetColFormatString(width, col.align_right)
2957 for col, width in zip(columns, colwidth)])
2959 return [fmt % tuple(row) for row in data]
2962 def FormatTimestamp(ts):
2963 """Formats a given timestamp.
2966 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2969 @return: a string with the formatted timestamp
2972 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2975 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2978 def ParseTimespec(value):
2979 """Parse a time specification.
2981 The following suffixed will be recognized:
2989 Without any suffix, the value will be taken to be in seconds.
2994 raise errors.OpPrereqError("Empty time specification passed")
3002 if value[-1] not in suffix_map:
3005 except (TypeError, ValueError):
3006 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3008 multiplier = suffix_map[value[-1]]
3010 if not value: # no data left after stripping the suffix
3011 raise errors.OpPrereqError("Invalid time specification (only"
3014 value = int(value) * multiplier
3015 except (TypeError, ValueError):
3016 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3020 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3021 filter_master=False, nodegroup=None):
3022 """Returns the names of online nodes.
3024 This function will also log a warning on stderr with the names of
3027 @param nodes: if not empty, use only this subset of nodes (minus the
3029 @param cl: if not None, luxi client to use
3030 @type nowarn: boolean
3031 @param nowarn: by default, this function will output a note with the
3032 offline nodes that are skipped; if this parameter is True the
3033 note is not displayed
3034 @type secondary_ips: boolean
3035 @param secondary_ips: if True, return the secondary IPs instead of the
3036 names, useful for doing network traffic over the replication interface
3038 @type filter_master: boolean
3039 @param filter_master: if True, do not return the master node in the list
3040 (useful in coordination with secondary_ips where we cannot check our
3041 node name against the list)
3042 @type nodegroup: string
3043 @param nodegroup: If set, only return nodes in this node group
3052 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3054 if nodegroup is not None:
3055 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3056 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3059 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3062 if len(qfilter) > 1:
3063 final_filter = [qlang.OP_AND] + qfilter
3065 assert len(qfilter) == 1
3066 final_filter = qfilter[0]
3070 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3072 def _IsOffline(row):
3073 (_, (_, offline), _) = row
3077 ((_, name), _, _) = row
3081 (_, _, (_, sip)) = row
3084 (offline, online) = compat.partition(result.data, _IsOffline)
3086 if offline and not nowarn:
3087 ToStderr("Note: skipping offline node(s): %s" %
3088 utils.CommaJoin(map(_GetName, offline)))
3095 return map(fn, online)
3098 def _ToStream(stream, txt, *args):
3099 """Write a message to a stream, bypassing the logging system
3101 @type stream: file object
3102 @param stream: the file to which we should write
3104 @param txt: the message
3110 stream.write(txt % args)
3115 except IOError, err:
3116 if err.errno == errno.EPIPE:
3117 # our terminal went away, we'll exit
3118 sys.exit(constants.EXIT_FAILURE)
3123 def ToStdout(txt, *args):
3124 """Write a message to stdout only, bypassing the logging system
3126 This is just a wrapper over _ToStream.
3129 @param txt: the message
3132 _ToStream(sys.stdout, txt, *args)
3135 def ToStderr(txt, *args):
3136 """Write a message to stderr only, bypassing the logging system
3138 This is just a wrapper over _ToStream.
3141 @param txt: the message
3144 _ToStream(sys.stderr, txt, *args)
3147 class JobExecutor(object):
3148 """Class which manages the submission and execution of multiple jobs.
3150 Note that instances of this class should not be reused between
3154 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3159 self.verbose = verbose
3162 self.feedback_fn = feedback_fn
3163 self._counter = itertools.count()
3166 def _IfName(name, fmt):
3167 """Helper function for formatting name.
3175 def QueueJob(self, name, *ops):
3176 """Record a job for later submit.
3179 @param name: a description of the job, will be used in WaitJobSet
3182 SetGenericOpcodeOpts(ops, self.opts)
3183 self.queue.append((self._counter.next(), name, ops))
3185 def AddJobId(self, name, status, job_id):
3186 """Adds a job ID to the internal queue.
3189 self.jobs.append((self._counter.next(), status, job_id, name))
3191 def SubmitPending(self, each=False):
3192 """Submit all pending jobs.
3197 for (_, _, ops) in self.queue:
3198 # SubmitJob will remove the success status, but raise an exception if
3199 # the submission fails, so we'll notice that anyway.
3200 results.append([True, self.cl.SubmitJob(ops)[0]])
3202 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3203 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3204 self.jobs.append((idx, status, data, name))
3206 def _ChooseJob(self):
3207 """Choose a non-waiting/queued job to poll next.
3210 assert self.jobs, "_ChooseJob called with empty job list"
3212 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3216 for job_data, status in zip(self.jobs, result):
3217 if (isinstance(status, list) and status and
3218 status[0] in (constants.JOB_STATUS_QUEUED,
3219 constants.JOB_STATUS_WAITING,
3220 constants.JOB_STATUS_CANCELING)):
3221 # job is still present and waiting
3223 # good candidate found (either running job or lost job)
3224 self.jobs.remove(job_data)
3228 return self.jobs.pop(0)
3230 def GetResults(self):
3231 """Wait for and return the results of all jobs.
3234 @return: list of tuples (success, job results), in the same order
3235 as the submitted jobs; if a job has failed, instead of the result
3236 there will be the error message
3240 self.SubmitPending()
3243 ok_jobs = [row[2] for row in self.jobs if row[1]]
3245 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3247 # first, remove any non-submitted jobs
3248 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3249 for idx, _, jid, name in failures:
3250 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3251 results.append((idx, False, jid))
3254 (idx, _, jid, name) = self._ChooseJob()
3255 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3257 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3259 except errors.JobLost, err:
3260 _, job_result = FormatError(err)
3261 ToStderr("Job %s%s has been archived, cannot check its result",
3262 jid, self._IfName(name, " for %s"))
3264 except (errors.GenericError, luxi.ProtocolError), err:
3265 _, job_result = FormatError(err)
3267 # the error message will always be shown, verbose or not
3268 ToStderr("Job %s%s has failed: %s",
3269 jid, self._IfName(name, " for %s"), job_result)
3271 results.append((idx, success, job_result))
3273 # sort based on the index, then drop it
3275 results = [i[1:] for i in results]
3279 def WaitOrShow(self, wait):
3280 """Wait for job results or only print the job IDs.
3283 @param wait: whether to wait or not
3287 return self.GetResults()
3290 self.SubmitPending()
3291 for _, status, result, name in self.jobs:
3293 ToStdout("%s: %s", result, name)
3295 ToStderr("Failure for %s: %s", name, result)
3296 return [row[1:3] for row in self.jobs]
3299 def FormatParameterDict(buf, param_dict, actual, level=1):
3300 """Formats a parameter dictionary.
3302 @type buf: L{StringIO}
3303 @param buf: the buffer into which to write
3304 @type param_dict: dict
3305 @param param_dict: the own parameters
3307 @param actual: the current parameter set (including defaults)
3308 @param level: Level of indent
3311 indent = " " * level
3312 for key in sorted(actual):
3313 val = param_dict.get(key, "default (%s)" % actual[key])
3314 buf.write("%s- %s: %s\n" % (indent, key, val))
3317 def ConfirmOperation(names, list_type, text, extra=""):
3318 """Ask the user to confirm an operation on a list of list_type.
3320 This function is used to request confirmation for doing an operation
3321 on a given list of list_type.
3324 @param names: the list of names that we display when
3325 we ask for confirmation
3326 @type list_type: str
3327 @param list_type: Human readable name for elements in the list (e.g. nodes)
3329 @param text: the operation that the user should confirm
3331 @return: True or False depending on user's confirmation.
3335 msg = ("The %s will operate on %d %s.\n%s"
3336 "Do you want to continue?" % (text, count, list_type, extra))
3337 affected = (("\nAffected %s:\n" % list_type) +
3338 "\n".join([" %s" % name for name in names]))
3340 choices = [("y", True, "Yes, execute the %s" % text),
3341 ("n", False, "No, abort the %s" % text)]
3344 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3347 question = msg + affected
3349 choice = AskUser(question, choices)
3352 choice = AskUser(msg + affected, choices)