4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Module dealing with command line parsing"""
33 from cStringIO import StringIO
35 from ganeti import utils
36 from ganeti import errors
37 from ganeti import constants
38 from ganeti import opcodes
39 from ganeti import luxi
40 from ganeti import ssconf
41 from ganeti import rpc
42 from ganeti import ssh
43 from ganeti import compat
44 from ganeti import netutils
45 from ganeti import qlang
47 from optparse import (OptionParser, TitledHelpFormatter,
48 Option, OptionValueError)
52 # Command line options
65 "CLUSTER_DOMAIN_SECRET_OPT",
83 "FILESTORE_DRIVER_OPT",
89 "GLOBAL_SHARED_FILEDIR_OPT",
94 "DEFAULT_IALLOCATOR_OPT",
95 "IDENTIFY_DEFAULTS_OPT",
98 "IGNORE_FAILURES_OPT",
100 "IGNORE_REMOVE_FAILURES_OPT",
101 "IGNORE_SECONDARIES_OPT",
105 "MAINTAIN_NODE_HEALTH_OPT",
107 "MASTER_NETMASK_OPT",
109 "MIGRATION_MODE_OPT",
111 "NEW_CLUSTER_CERT_OPT",
112 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113 "NEW_CONFD_HMAC_KEY_OPT",
116 "NEW_SPICE_CERT_OPT",
118 "NODE_FORCE_JOIN_OPT",
120 "NODE_PLACEMENT_OPT",
124 "NODRBD_STORAGE_OPT",
130 "NOMODIFY_ETCHOSTS_OPT",
131 "NOMODIFY_SSH_SETUP_OPT",
135 "NORUNTIME_CHGS_OPT",
138 "NOSSH_KEYCHECK_OPT",
152 "PREALLOC_WIPE_DISKS_OPT",
153 "PRIMARY_IP_VERSION_OPT",
159 "REMOVE_INSTANCE_OPT",
165 "SECONDARY_ONLY_OPT",
169 "SHUTDOWN_TIMEOUT_OPT",
171 "SPECS_CPU_COUNT_OPT",
172 "SPECS_DISK_COUNT_OPT",
173 "SPECS_DISK_SIZE_OPT",
174 "SPECS_MEM_SIZE_OPT",
175 "SPECS_NIC_COUNT_OPT",
176 "IPOLICY_DISK_TEMPLATES",
177 "IPOLICY_VCPU_RATIO",
183 "STARTUP_PAUSED_OPT",
192 "USE_EXTERNAL_MIP_SCRIPT",
199 "IGNORE_IPOLICY_OPT",
200 "INSTANCE_POLICY_OPTS",
201 # Generic functions for CLI programs
204 "GenericInstanceCreate",
210 "JobSubmittedException",
212 "RunWhileClusterStopped",
216 # Formatting functions
217 "ToStderr", "ToStdout",
220 "FormatParameterDict",
229 # command line options support infrastructure
230 "ARGS_MANY_INSTANCES",
249 "OPT_COMPL_INST_ADD_NODES",
250 "OPT_COMPL_MANY_NODES",
251 "OPT_COMPL_ONE_IALLOCATOR",
252 "OPT_COMPL_ONE_INSTANCE",
253 "OPT_COMPL_ONE_NODE",
254 "OPT_COMPL_ONE_NODEGROUP",
260 "COMMON_CREATE_OPTS",
266 #: Priorities (sorted)
268 ("low", constants.OP_PRIO_LOW),
269 ("normal", constants.OP_PRIO_NORMAL),
270 ("high", constants.OP_PRIO_HIGH),
273 #: Priority dictionary for easier lookup
274 # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
275 # we migrate to Python 2.6
276 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
278 # Query result status for clients
281 QR_INCOMPLETE) = range(3)
283 #: Maximum batch size for ChooseJob
288 def __init__(self, min=0, max=None): # pylint: disable=W0622
293 return ("<%s min=%s max=%s>" %
294 (self.__class__.__name__, self.min, self.max))
297 class ArgSuggest(_Argument):
298 """Suggesting argument.
300 Value can be any of the ones passed to the constructor.
303 # pylint: disable=W0622
304 def __init__(self, min=0, max=None, choices=None):
305 _Argument.__init__(self, min=min, max=max)
306 self.choices = choices
309 return ("<%s min=%s max=%s choices=%r>" %
310 (self.__class__.__name__, self.min, self.max, self.choices))
313 class ArgChoice(ArgSuggest):
316 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
317 but value must be one of the choices.
322 class ArgUnknown(_Argument):
323 """Unknown argument to program (e.g. determined at runtime).
328 class ArgInstance(_Argument):
329 """Instances argument.
334 class ArgNode(_Argument):
340 class ArgGroup(_Argument):
341 """Node group argument.
346 class ArgJobId(_Argument):
352 class ArgFile(_Argument):
353 """File path argument.
358 class ArgCommand(_Argument):
364 class ArgHost(_Argument):
370 class ArgOs(_Argument):
377 ARGS_MANY_INSTANCES = [ArgInstance()]
378 ARGS_MANY_NODES = [ArgNode()]
379 ARGS_MANY_GROUPS = [ArgGroup()]
380 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
381 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
383 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
384 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
387 def _ExtractTagsObject(opts, args):
388 """Extract the tag type object.
390 Note that this function will modify its args parameter.
393 if not hasattr(opts, "tag_type"):
394 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
396 if kind == constants.TAG_CLUSTER:
398 elif kind in (constants.TAG_NODEGROUP,
400 constants.TAG_INSTANCE):
402 raise errors.OpPrereqError("no arguments passed to the command")
406 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
410 def _ExtendTags(opts, args):
411 """Extend the args if a source file has been given.
413 This function will extend the tags with the contents of the file
414 passed in the 'tags_source' attribute of the opts parameter. A file
415 named '-' will be replaced by stdin.
418 fname = opts.tags_source
424 new_fh = open(fname, "r")
427 # we don't use the nice 'new_data = [line.strip() for line in fh]'
428 # because of python bug 1633941
430 line = new_fh.readline()
433 new_data.append(line.strip())
436 args.extend(new_data)
439 def ListTags(opts, args):
440 """List the tags on a given object.
442 This is a generic implementation that knows how to deal with all
443 three cases of tag objects (cluster, node, instance). The opts
444 argument is expected to contain a tag_type field denoting what
445 object type we work on.
448 kind, name = _ExtractTagsObject(opts, args)
450 result = cl.QueryTags(kind, name)
451 result = list(result)
457 def AddTags(opts, args):
458 """Add tags on a given object.
460 This is a generic implementation that knows how to deal with all
461 three cases of tag objects (cluster, node, instance). The opts
462 argument is expected to contain a tag_type field denoting what
463 object type we work on.
466 kind, name = _ExtractTagsObject(opts, args)
467 _ExtendTags(opts, args)
469 raise errors.OpPrereqError("No tags to be added")
470 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
471 SubmitOpCode(op, opts=opts)
474 def RemoveTags(opts, args):
475 """Remove tags from a given object.
477 This is a generic implementation that knows how to deal with all
478 three cases of tag objects (cluster, node, instance). The opts
479 argument is expected to contain a tag_type field denoting what
480 object type we work on.
483 kind, name = _ExtractTagsObject(opts, args)
484 _ExtendTags(opts, args)
486 raise errors.OpPrereqError("No tags to be removed")
487 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
488 SubmitOpCode(op, opts=opts)
491 def check_unit(option, opt, value): # pylint: disable=W0613
492 """OptParsers custom converter for units.
496 return utils.ParseUnit(value)
497 except errors.UnitParseError, err:
498 raise OptionValueError("option %s: %s" % (opt, err))
501 def _SplitKeyVal(opt, data):
502 """Convert a KeyVal string into a dict.
504 This function will convert a key=val[,...] string into a dict. Empty
505 values will be converted specially: keys which have the prefix 'no_'
506 will have the value=False and the prefix stripped, the others will
510 @param opt: a string holding the option name for which we process the
511 data, used in building error messages
513 @param data: a string of the format key=val,key=val,...
515 @return: {key=val, key=val}
516 @raises errors.ParameterError: if there are duplicate keys
521 for elem in utils.UnescapeAndSplit(data, sep=","):
523 key, val = elem.split("=", 1)
525 if elem.startswith(NO_PREFIX):
526 key, val = elem[len(NO_PREFIX):], False
527 elif elem.startswith(UN_PREFIX):
528 key, val = elem[len(UN_PREFIX):], None
530 key, val = elem, True
532 raise errors.ParameterError("Duplicate key '%s' in option %s" %
538 def check_ident_key_val(option, opt, value): # pylint: disable=W0613
539 """Custom parser for ident:key=val,key=val options.
541 This will store the parsed values as a tuple (ident, {key: val}). As such,
542 multiple uses of this option via action=append is possible.
546 ident, rest = value, ""
548 ident, rest = value.split(":", 1)
550 if ident.startswith(NO_PREFIX):
552 msg = "Cannot pass options when removing parameter groups: %s" % value
553 raise errors.ParameterError(msg)
554 retval = (ident[len(NO_PREFIX):], False)
555 elif ident.startswith(UN_PREFIX):
557 msg = "Cannot pass options when removing parameter groups: %s" % value
558 raise errors.ParameterError(msg)
559 retval = (ident[len(UN_PREFIX):], None)
561 kv_dict = _SplitKeyVal(opt, rest)
562 retval = (ident, kv_dict)
566 def check_key_val(option, opt, value): # pylint: disable=W0613
567 """Custom parser class for key=val,key=val options.
569 This will store the parsed values as a dict {key: val}.
572 return _SplitKeyVal(opt, value)
575 def check_bool(option, opt, value): # pylint: disable=W0613
576 """Custom parser for yes/no options.
578 This will store the parsed value as either True or False.
581 value = value.lower()
582 if value == constants.VALUE_FALSE or value == "no":
584 elif value == constants.VALUE_TRUE or value == "yes":
587 raise errors.ParameterError("Invalid boolean value '%s'" % value)
590 def check_list(option, opt, value): # pylint: disable=W0613
591 """Custom parser for comma-separated lists.
594 # we have to make this explicit check since "".split(",") is [""],
595 # not an empty list :(
599 return utils.UnescapeAndSplit(value)
602 # completion_suggestion is normally a list. Using numeric values not evaluating
603 # to False for dynamic completion.
604 (OPT_COMPL_MANY_NODES,
606 OPT_COMPL_ONE_INSTANCE,
608 OPT_COMPL_ONE_IALLOCATOR,
609 OPT_COMPL_INST_ADD_NODES,
610 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
612 OPT_COMPL_ALL = frozenset([
613 OPT_COMPL_MANY_NODES,
615 OPT_COMPL_ONE_INSTANCE,
617 OPT_COMPL_ONE_IALLOCATOR,
618 OPT_COMPL_INST_ADD_NODES,
619 OPT_COMPL_ONE_NODEGROUP,
623 class CliOption(Option):
624 """Custom option class for optparse.
627 ATTRS = Option.ATTRS + [
628 "completion_suggest",
630 TYPES = Option.TYPES + (
637 TYPE_CHECKER = Option.TYPE_CHECKER.copy()
638 TYPE_CHECKER["identkeyval"] = check_ident_key_val
639 TYPE_CHECKER["keyval"] = check_key_val
640 TYPE_CHECKER["unit"] = check_unit
641 TYPE_CHECKER["bool"] = check_bool
642 TYPE_CHECKER["list"] = check_list
645 # optparse.py sets make_option, so we do it for our own option class, too
646 cli_option = CliOption
651 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
652 help="Increase debugging level")
654 NOHDR_OPT = cli_option("--no-headers", default=False,
655 action="store_true", dest="no_headers",
656 help="Don't display column headers")
658 SEP_OPT = cli_option("--separator", default=None,
659 action="store", dest="separator",
660 help=("Separator between output fields"
661 " (defaults to one space)"))
663 USEUNITS_OPT = cli_option("--units", default=None,
664 dest="units", choices=("h", "m", "g", "t"),
665 help="Specify units for output (one of h/m/g/t)")
667 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
668 type="string", metavar="FIELDS",
669 help="Comma separated list of output fields")
671 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
672 default=False, help="Force the operation")
674 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
675 default=False, help="Do not require confirmation")
677 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
678 action="store_true", default=False,
679 help=("Ignore offline nodes and do as much"
682 TAG_ADD_OPT = cli_option("--tags", dest="tags",
683 default=None, help="Comma-separated list of instance"
686 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
687 default=None, help="File with tag names")
689 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
690 default=False, action="store_true",
691 help=("Submit the job and return the job ID, but"
692 " don't wait for the job to finish"))
694 SYNC_OPT = cli_option("--sync", dest="do_locking",
695 default=False, action="store_true",
696 help=("Grab locks while doing the queries"
697 " in order to ensure more consistent results"))
699 DRY_RUN_OPT = cli_option("--dry-run", default=False,
701 help=("Do not execute the operation, just run the"
702 " check steps and verify it it could be"
705 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
707 help="Increase the verbosity of the operation")
709 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
710 action="store_true", dest="simulate_errors",
711 help="Debugging option that makes the operation"
712 " treat most runtime checks as failed")
714 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
715 default=True, action="store_false",
716 help="Don't wait for sync (DANGEROUS!)")
718 ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
719 action="store_true", default=False,
720 help="Enable offline instance")
722 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
723 action="store_true", default=False,
724 help="Disable down instance")
726 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
727 help=("Custom disk setup (%s)" %
728 utils.CommaJoin(constants.DISK_TEMPLATES)),
729 default=None, metavar="TEMPL",
730 choices=list(constants.DISK_TEMPLATES))
732 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
733 help="Do not create any network cards for"
736 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
737 help="Relative path under default cluster-wide"
738 " file storage dir to store file-based disks",
739 default=None, metavar="<DIR>")
741 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
742 help="Driver to use for image files",
743 default="loop", metavar="<DRIVER>",
744 choices=list(constants.FILE_DRIVER))
746 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
747 help="Select nodes for the instance automatically"
748 " using the <NAME> iallocator plugin",
749 default=None, type="string",
750 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
752 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
754 help="Set the default instance allocator plugin",
755 default=None, type="string",
756 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
758 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
760 completion_suggest=OPT_COMPL_ONE_OS)
762 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
763 type="keyval", default={},
764 help="OS parameters")
766 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
767 action="store_true", default=False,
768 help="Force an unknown variant")
770 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
771 action="store_true", default=False,
772 help="Do not install the OS (will"
775 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
776 dest="allow_runtime_chgs",
777 default=True, action="store_false",
778 help="Don't allow runtime changes")
780 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
781 type="keyval", default={},
782 help="Backend parameters")
784 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
785 default={}, dest="hvparams",
786 help="Hypervisor parameters")
788 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
789 help="Disk template parameters, in the format"
790 " template:option=value,option=value,...",
791 type="identkeyval", action="append", default=[])
793 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
794 type="keyval", default={},
795 help="Memory count specs: min, max, std"
798 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
799 type="keyval", default={},
800 help="CPU count specs: min, max, std")
802 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
803 dest="ispecs_disk_count",
804 type="keyval", default={},
805 help="Disk count specs: min, max, std")
807 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
808 type="keyval", default={},
809 help="Disk size specs: min, max, std (in MB)")
811 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
812 type="keyval", default={},
813 help="NIC count specs: min, max, std")
815 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
816 dest="ipolicy_disk_templates",
817 type="list", default=None,
818 help="Comma-separated list of"
819 " enabled disk templates")
821 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
822 dest="ipolicy_vcpu_ratio",
823 type="float", default=None,
824 help="The maximum allowed vcpu-to-cpu ratio")
826 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
827 help="Hypervisor and hypervisor options, in the"
828 " format hypervisor:option=value,option=value,...",
829 default=None, type="identkeyval")
831 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
832 help="Hypervisor and hypervisor options, in the"
833 " format hypervisor:option=value,option=value,...",
834 default=[], action="append", type="identkeyval")
836 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
837 action="store_false",
838 help="Don't check that the instance's IP"
841 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
842 default=True, action="store_false",
843 help="Don't check that the instance's name"
846 NET_OPT = cli_option("--net",
847 help="NIC parameters", default=[],
848 dest="nics", action="append", type="identkeyval")
850 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
851 dest="disks", action="append", type="identkeyval")
853 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
854 help="Comma-separated list of disks"
855 " indices to act on (e.g. 0,2) (optional,"
856 " defaults to all disks)")
858 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
859 help="Enforces a single-disk configuration using the"
860 " given disk size, in MiB unless a suffix is used",
861 default=None, type="unit", metavar="<size>")
863 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
864 dest="ignore_consistency",
865 action="store_true", default=False,
866 help="Ignore the consistency of the disks on"
869 ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
870 dest="allow_failover",
871 action="store_true", default=False,
872 help="If migration is not possible fallback to"
875 NONLIVE_OPT = cli_option("--non-live", dest="live",
876 default=True, action="store_false",
877 help="Do a non-live migration (this usually means"
878 " freeze the instance, save the state, transfer and"
879 " only then resume running on the secondary node)")
881 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
883 choices=list(constants.HT_MIGRATION_MODES),
884 help="Override default migration mode (choose"
885 " either live or non-live")
887 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
888 help="Target node and optional secondary node",
889 metavar="<pnode>[:<snode>]",
890 completion_suggest=OPT_COMPL_INST_ADD_NODES)
892 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
893 action="append", metavar="<node>",
894 help="Use only this node (can be used multiple"
895 " times, if not given defaults to all nodes)",
896 completion_suggest=OPT_COMPL_ONE_NODE)
898 NODEGROUP_OPT_NAME = "--node-group"
899 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
901 help="Node group (name or uuid)",
902 metavar="<nodegroup>",
903 default=None, type="string",
904 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
906 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
908 completion_suggest=OPT_COMPL_ONE_NODE)
910 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
911 action="store_false",
912 help="Don't start the instance after creation")
914 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
915 action="store_true", default=False,
916 help="Show command instead of executing it")
918 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
919 default=False, action="store_true",
920 help="Instead of performing the migration, try to"
921 " recover from a failed cleanup. This is safe"
922 " to run even if the instance is healthy, but it"
923 " will create extra replication traffic and "
924 " disrupt briefly the replication (like during the"
927 STATIC_OPT = cli_option("-s", "--static", dest="static",
928 action="store_true", default=False,
929 help="Only show configuration data, not runtime data")
931 ALL_OPT = cli_option("--all", dest="show_all",
932 default=False, action="store_true",
933 help="Show info on all instances on the cluster."
934 " This can take a long time to run, use wisely")
936 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
937 action="store_true", default=False,
938 help="Interactive OS reinstall, lists available"
939 " OS templates for selection")
941 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
942 action="store_true", default=False,
943 help="Remove the instance from the cluster"
944 " configuration even if there are failures"
945 " during the removal process")
947 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
948 dest="ignore_remove_failures",
949 action="store_true", default=False,
950 help="Remove the instance from the"
951 " cluster configuration even if there"
952 " are failures during the removal"
955 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
956 action="store_true", default=False,
957 help="Remove the instance from the cluster")
959 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
960 help="Specifies the new node for the instance",
961 metavar="NODE", default=None,
962 completion_suggest=OPT_COMPL_ONE_NODE)
964 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
965 help="Specifies the new secondary node",
966 metavar="NODE", default=None,
967 completion_suggest=OPT_COMPL_ONE_NODE)
969 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
970 default=False, action="store_true",
971 help="Replace the disk(s) on the primary"
972 " node (applies only to internally mirrored"
973 " disk templates, e.g. %s)" %
974 utils.CommaJoin(constants.DTS_INT_MIRROR))
976 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
977 default=False, action="store_true",
978 help="Replace the disk(s) on the secondary"
979 " node (applies only to internally mirrored"
980 " disk templates, e.g. %s)" %
981 utils.CommaJoin(constants.DTS_INT_MIRROR))
983 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
984 default=False, action="store_true",
985 help="Lock all nodes and auto-promote as needed"
988 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
989 default=False, action="store_true",
990 help="Automatically replace faulty disks"
991 " (applies only to internally mirrored"
992 " disk templates, e.g. %s)" %
993 utils.CommaJoin(constants.DTS_INT_MIRROR))
995 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
996 default=False, action="store_true",
997 help="Ignore current recorded size"
998 " (useful for forcing activation when"
999 " the recorded size is wrong)")
1001 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1003 completion_suggest=OPT_COMPL_ONE_NODE)
1005 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1008 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1009 help="Specify the secondary ip for the node",
1010 metavar="ADDRESS", default=None)
1012 READD_OPT = cli_option("--readd", dest="readd",
1013 default=False, action="store_true",
1014 help="Readd old node after replacing it")
1016 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1017 default=True, action="store_false",
1018 help="Disable SSH key fingerprint checking")
1020 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1021 default=False, action="store_true",
1022 help="Force the joining of a node")
1024 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1025 type="bool", default=None, metavar=_YORNO,
1026 help="Set the master_candidate flag on the node")
1028 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1029 type="bool", default=None,
1030 help=("Set the offline flag on the node"
1031 " (cluster does not communicate with offline"
1034 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1035 type="bool", default=None,
1036 help=("Set the drained flag on the node"
1037 " (excluded from allocation operations)"))
1039 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1040 type="bool", default=None, metavar=_YORNO,
1041 help="Set the master_capable flag on the node")
1043 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1044 type="bool", default=None, metavar=_YORNO,
1045 help="Set the vm_capable flag on the node")
1047 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1048 type="bool", default=None, metavar=_YORNO,
1049 help="Set the allocatable flag on a volume")
1051 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1052 help="Disable support for lvm based instances"
1054 action="store_false", default=True)
1056 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1057 dest="enabled_hypervisors",
1058 help="Comma-separated list of hypervisors",
1059 type="string", default=None)
1061 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1062 type="keyval", default={},
1063 help="NIC parameters")
1065 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1066 dest="candidate_pool_size", type="int",
1067 help="Set the candidate pool size")
1069 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1070 help=("Enables LVM and specifies the volume group"
1071 " name (cluster-wide) for disk allocation"
1072 " [%s]" % constants.DEFAULT_VG),
1073 metavar="VG", default=None)
1075 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1076 help="Destroy cluster", action="store_true")
1078 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1079 help="Skip node agreement check (dangerous)",
1080 action="store_true", default=False)
1082 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1083 help="Specify the mac prefix for the instance IP"
1084 " addresses, in the format XX:XX:XX",
1088 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1089 help="Specify the node interface (cluster-wide)"
1090 " on which the master IP address will be added"
1091 " (cluster init default: %s)" %
1092 constants.DEFAULT_BRIDGE,
1096 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1097 help="Specify the netmask of the master IP",
1101 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1102 dest="use_external_mip_script",
1103 help="Specify whether to run a user-provided"
1104 " script for the master IP address turnup and"
1105 " turndown operations",
1106 type="bool", metavar=_YORNO, default=None)
1108 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1109 help="Specify the default directory (cluster-"
1110 "wide) for storing the file-based disks [%s]" %
1111 constants.DEFAULT_FILE_STORAGE_DIR,
1113 default=constants.DEFAULT_FILE_STORAGE_DIR)
1115 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1116 dest="shared_file_storage_dir",
1117 help="Specify the default directory (cluster-"
1118 "wide) for storing the shared file-based"
1120 constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1121 metavar="SHAREDDIR",
1122 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1124 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1125 help="Don't modify /etc/hosts",
1126 action="store_false", default=True)
1128 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1129 help="Don't initialize SSH keys",
1130 action="store_false", default=True)
1132 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1133 help="Enable parseable error messages",
1134 action="store_true", default=False)
1136 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1137 help="Skip N+1 memory redundancy tests",
1138 action="store_true", default=False)
1140 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1141 help="Type of reboot: soft/hard/full",
1142 default=constants.INSTANCE_REBOOT_HARD,
1144 choices=list(constants.REBOOT_TYPES))
1146 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1147 dest="ignore_secondaries",
1148 default=False, action="store_true",
1149 help="Ignore errors from secondaries")
1151 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1152 action="store_false", default=True,
1153 help="Don't shutdown the instance (unsafe)")
1155 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1156 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1157 help="Maximum time to wait")
1159 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1160 dest="shutdown_timeout", type="int",
1161 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1162 help="Maximum time to wait for instance shutdown")
1164 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1166 help=("Number of seconds between repetions of the"
1169 EARLY_RELEASE_OPT = cli_option("--early-release",
1170 dest="early_release", default=False,
1171 action="store_true",
1172 help="Release the locks on the secondary"
1175 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1176 dest="new_cluster_cert",
1177 default=False, action="store_true",
1178 help="Generate a new cluster certificate")
1180 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1182 help="File containing new RAPI certificate")
1184 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1185 default=None, action="store_true",
1186 help=("Generate a new self-signed RAPI"
1189 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1191 help="File containing new SPICE certificate")
1193 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1195 help="File containing the certificate of the CA"
1196 " which signed the SPICE certificate")
1198 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1199 dest="new_spice_cert", default=None,
1200 action="store_true",
1201 help=("Generate a new self-signed SPICE"
1204 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1205 dest="new_confd_hmac_key",
1206 default=False, action="store_true",
1207 help=("Create a new HMAC key for %s" %
1210 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1211 dest="cluster_domain_secret",
1213 help=("Load new new cluster domain"
1214 " secret from file"))
1216 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1217 dest="new_cluster_domain_secret",
1218 default=False, action="store_true",
1219 help=("Create a new cluster domain"
1222 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1223 dest="use_replication_network",
1224 help="Whether to use the replication network"
1225 " for talking to the nodes",
1226 action="store_true", default=False)
1228 MAINTAIN_NODE_HEALTH_OPT = \
1229 cli_option("--maintain-node-health", dest="maintain_node_health",
1230 metavar=_YORNO, default=None, type="bool",
1231 help="Configure the cluster to automatically maintain node"
1232 " health, by shutting down unknown instances, shutting down"
1233 " unknown DRBD devices, etc.")
1235 IDENTIFY_DEFAULTS_OPT = \
1236 cli_option("--identify-defaults", dest="identify_defaults",
1237 default=False, action="store_true",
1238 help="Identify which saved instance parameters are equal to"
1239 " the current cluster defaults and set them as such, instead"
1240 " of marking them as overridden")
1242 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1243 action="store", dest="uid_pool",
1244 help=("A list of user-ids or user-id"
1245 " ranges separated by commas"))
1247 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1248 action="store", dest="add_uids",
1249 help=("A list of user-ids or user-id"
1250 " ranges separated by commas, to be"
1251 " added to the user-id pool"))
1253 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1254 action="store", dest="remove_uids",
1255 help=("A list of user-ids or user-id"
1256 " ranges separated by commas, to be"
1257 " removed from the user-id pool"))
1259 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1260 action="store", dest="reserved_lvs",
1261 help=("A comma-separated list of reserved"
1262 " logical volumes names, that will be"
1263 " ignored by cluster verify"))
1265 ROMAN_OPT = cli_option("--roman",
1266 dest="roman_integers", default=False,
1267 action="store_true",
1268 help="Use roman numbers for positive integers")
1270 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1271 action="store", default=None,
1272 help="Specifies usermode helper for DRBD")
1274 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1275 action="store_false", default=True,
1276 help="Disable support for DRBD")
1278 PRIMARY_IP_VERSION_OPT = \
1279 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1280 action="store", dest="primary_ip_version",
1281 metavar="%d|%d" % (constants.IP4_VERSION,
1282 constants.IP6_VERSION),
1283 help="Cluster-wide IP version for primary IP")
1285 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1286 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1287 choices=_PRIONAME_TO_VALUE.keys(),
1288 help="Priority for opcode processing")
1290 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1291 type="bool", default=None, metavar=_YORNO,
1292 help="Sets the hidden flag on the OS")
1294 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1295 type="bool", default=None, metavar=_YORNO,
1296 help="Sets the blacklisted flag on the OS")
1298 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1299 type="bool", metavar=_YORNO,
1300 dest="prealloc_wipe_disks",
1301 help=("Wipe disks prior to instance"
1304 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1305 type="keyval", default=None,
1306 help="Node parameters")
1308 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1309 action="store", metavar="POLICY", default=None,
1310 help="Allocation policy for the node group")
1312 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1313 type="bool", metavar=_YORNO,
1314 dest="node_powered",
1315 help="Specify if the SoR for node is powered")
1317 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1318 default=constants.OOB_TIMEOUT,
1319 help="Maximum time to wait for out-of-band helper")
1321 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1322 default=constants.OOB_POWER_DELAY,
1323 help="Time in seconds to wait between power-ons")
1325 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1326 action="store_true", default=False,
1327 help=("Whether command argument should be treated"
1330 NO_REMEMBER_OPT = cli_option("--no-remember",
1332 action="store_true", default=False,
1333 help="Perform but do not record the change"
1334 " in the configuration")
1336 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1337 default=False, action="store_true",
1338 help="Evacuate primary instances only")
1340 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1341 default=False, action="store_true",
1342 help="Evacuate secondary instances only"
1343 " (applies only to internally mirrored"
1344 " disk templates, e.g. %s)" %
1345 utils.CommaJoin(constants.DTS_INT_MIRROR))
1347 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1348 action="store_true", default=False,
1349 help="Pause instance at startup")
1351 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1352 help="Destination node group (name or uuid)",
1353 default=None, action="append",
1354 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1356 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1357 action="append", dest="ignore_errors",
1358 choices=list(constants.CV_ALL_ECODES_STRINGS),
1359 help="Error code to be ignored")
1361 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1363 help=("Specify disk state information in the format"
1364 " storage_type/identifier:option=value,..."),
1367 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1369 help=("Specify hypervisor state information in the"
1370 " format hypervisor:option=value,..."),
1373 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1374 action="store_true", default=False,
1375 help="Ignore instance policy violations")
1377 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1378 help="Sets the instance's runtime memory,"
1379 " ballooning it up or down to the new value",
1380 default=None, type="unit", metavar="<size>")
1382 #: Options provided by all commands
1383 COMMON_OPTS = [DEBUG_OPT]
1385 # common options for creating instances. add and import then add their own
1387 COMMON_CREATE_OPTS = [
1392 FILESTORE_DRIVER_OPT,
1409 # common instance policy options
1410 INSTANCE_POLICY_OPTS = [
1411 SPECS_CPU_COUNT_OPT,
1412 SPECS_DISK_COUNT_OPT,
1413 SPECS_DISK_SIZE_OPT,
1415 SPECS_NIC_COUNT_OPT,
1416 IPOLICY_DISK_TEMPLATES,
1421 def _ParseArgs(argv, commands, aliases, env_override):
1422 """Parser for the command line arguments.
1424 This function parses the arguments and returns the function which
1425 must be executed together with its (modified) arguments.
1427 @param argv: the command line
1428 @param commands: dictionary with special contents, see the design
1429 doc for cmdline handling
1430 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1431 @param env_override: list of env variables allowed for default args
1434 assert not (env_override - set(commands))
1437 binary = "<command>"
1439 binary = argv[0].split("/")[-1]
1441 if len(argv) > 1 and argv[1] == "--version":
1442 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1443 constants.RELEASE_VERSION)
1444 # Quit right away. That way we don't have to care about this special
1445 # argument. optparse.py does it the same.
1448 if len(argv) < 2 or not (argv[1] in commands or
1449 argv[1] in aliases):
1450 # let's do a nice thing
1451 sortedcmds = commands.keys()
1454 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1455 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1458 # compute the max line length for cmd + usage
1459 mlen = max([len(" %s" % cmd) for cmd in commands])
1460 mlen = min(60, mlen) # should not get here...
1462 # and format a nice command list
1463 ToStdout("Commands:")
1464 for cmd in sortedcmds:
1465 cmdstr = " %s" % (cmd,)
1466 help_text = commands[cmd][4]
1467 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1468 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1469 for line in help_lines:
1470 ToStdout("%-*s %s", mlen, "", line)
1474 return None, None, None
1476 # get command, unalias it, and look it up in commands
1480 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1483 if aliases[cmd] not in commands:
1484 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1485 " command '%s'" % (cmd, aliases[cmd]))
1489 if cmd in env_override:
1490 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1491 env_args = os.environ.get(args_env_name)
1493 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1495 func, args_def, parser_opts, usage, description = commands[cmd]
1496 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1497 description=description,
1498 formatter=TitledHelpFormatter(),
1499 usage="%%prog %s %s" % (cmd, usage))
1500 parser.disable_interspersed_args()
1501 options, args = parser.parse_args(args=argv[1:])
1503 if not _CheckArguments(cmd, args_def, args):
1504 return None, None, None
1506 return func, options, args
1509 def _CheckArguments(cmd, args_def, args):
1510 """Verifies the arguments using the argument definition.
1514 1. Abort with error if values specified by user but none expected.
1516 1. For each argument in definition
1518 1. Keep running count of minimum number of values (min_count)
1519 1. Keep running count of maximum number of values (max_count)
1520 1. If it has an unlimited number of values
1522 1. Abort with error if it's not the last argument in the definition
1524 1. If last argument has limited number of values
1526 1. Abort with error if number of values doesn't match or is too large
1528 1. Abort with error if user didn't pass enough values (min_count)
1531 if args and not args_def:
1532 ToStderr("Error: Command %s expects no arguments", cmd)
1539 last_idx = len(args_def) - 1
1541 for idx, arg in enumerate(args_def):
1542 if min_count is None:
1544 elif arg.min is not None:
1545 min_count += arg.min
1547 if max_count is None:
1549 elif arg.max is not None:
1550 max_count += arg.max
1553 check_max = (arg.max is not None)
1555 elif arg.max is None:
1556 raise errors.ProgrammerError("Only the last argument can have max=None")
1559 # Command with exact number of arguments
1560 if (min_count is not None and max_count is not None and
1561 min_count == max_count and len(args) != min_count):
1562 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1565 # Command with limited number of arguments
1566 if max_count is not None and len(args) > max_count:
1567 ToStderr("Error: Command %s expects only %d argument(s)",
1571 # Command with some required arguments
1572 if min_count is not None and len(args) < min_count:
1573 ToStderr("Error: Command %s expects at least %d argument(s)",
1580 def SplitNodeOption(value):
1581 """Splits the value of a --node option.
1584 if value and ":" in value:
1585 return value.split(":", 1)
1587 return (value, None)
1590 def CalculateOSNames(os_name, os_variants):
1591 """Calculates all the names an OS can be called, according to its variants.
1593 @type os_name: string
1594 @param os_name: base name of the os
1595 @type os_variants: list or None
1596 @param os_variants: list of supported variants
1598 @return: list of valid names
1602 return ["%s+%s" % (os_name, v) for v in os_variants]
1607 def ParseFields(selected, default):
1608 """Parses the values of "--field"-like options.
1610 @type selected: string or None
1611 @param selected: User-selected options
1613 @param default: Default fields
1616 if selected is None:
1619 if selected.startswith("+"):
1620 return default + selected[1:].split(",")
1622 return selected.split(",")
1625 UsesRPC = rpc.RunWithRPC
1628 def AskUser(text, choices=None):
1629 """Ask the user a question.
1631 @param text: the question to ask
1633 @param choices: list with elements tuples (input_char, return_value,
1634 description); if not given, it will default to: [('y', True,
1635 'Perform the operation'), ('n', False, 'Do no do the operation')];
1636 note that the '?' char is reserved for help
1638 @return: one of the return values from the choices list; if input is
1639 not possible (i.e. not running with a tty, we return the last
1644 choices = [("y", True, "Perform the operation"),
1645 ("n", False, "Do not perform the operation")]
1646 if not choices or not isinstance(choices, list):
1647 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1648 for entry in choices:
1649 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1650 raise errors.ProgrammerError("Invalid choices element to AskUser")
1652 answer = choices[-1][1]
1654 for line in text.splitlines():
1655 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1656 text = "\n".join(new_text)
1658 f = file("/dev/tty", "a+")
1662 chars = [entry[0] for entry in choices]
1663 chars[-1] = "[%s]" % chars[-1]
1665 maps = dict([(entry[0], entry[1]) for entry in choices])
1669 f.write("/".join(chars))
1671 line = f.readline(2).strip().lower()
1676 for entry in choices:
1677 f.write(" %s - %s\n" % (entry[0], entry[2]))
1685 class JobSubmittedException(Exception):
1686 """Job was submitted, client should exit.
1688 This exception has one argument, the ID of the job that was
1689 submitted. The handler should print this ID.
1691 This is not an error, just a structured way to exit from clients.
1696 def SendJob(ops, cl=None):
1697 """Function to submit an opcode without waiting for the results.
1700 @param ops: list of opcodes
1701 @type cl: luxi.Client
1702 @param cl: the luxi client to use for communicating with the master;
1703 if None, a new client will be created
1709 job_id = cl.SubmitJob(ops)
1714 def GenericPollJob(job_id, cbs, report_cbs):
1715 """Generic job-polling function.
1717 @type job_id: number
1718 @param job_id: Job ID
1719 @type cbs: Instance of L{JobPollCbBase}
1720 @param cbs: Data callbacks
1721 @type report_cbs: Instance of L{JobPollReportCbBase}
1722 @param report_cbs: Reporting callbacks
1725 prev_job_info = None
1726 prev_logmsg_serial = None
1731 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1734 # job not found, go away!
1735 raise errors.JobLost("Job with id %s lost" % job_id)
1737 if result == constants.JOB_NOTCHANGED:
1738 report_cbs.ReportNotChanged(job_id, status)
1743 # Split result, a tuple of (field values, log entries)
1744 (job_info, log_entries) = result
1745 (status, ) = job_info
1748 for log_entry in log_entries:
1749 (serial, timestamp, log_type, message) = log_entry
1750 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1752 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1754 # TODO: Handle canceled and archived jobs
1755 elif status in (constants.JOB_STATUS_SUCCESS,
1756 constants.JOB_STATUS_ERROR,
1757 constants.JOB_STATUS_CANCELING,
1758 constants.JOB_STATUS_CANCELED):
1761 prev_job_info = job_info
1763 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1765 raise errors.JobLost("Job with id %s lost" % job_id)
1767 status, opstatus, result = jobs[0]
1769 if status == constants.JOB_STATUS_SUCCESS:
1772 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1773 raise errors.OpExecError("Job was canceled")
1776 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1777 if status == constants.OP_STATUS_SUCCESS:
1779 elif status == constants.OP_STATUS_ERROR:
1780 errors.MaybeRaise(msg)
1783 raise errors.OpExecError("partial failure (opcode %d): %s" %
1786 raise errors.OpExecError(str(msg))
1788 # default failure mode
1789 raise errors.OpExecError(result)
1792 class JobPollCbBase:
1793 """Base class for L{GenericPollJob} callbacks.
1797 """Initializes this class.
1801 def WaitForJobChangeOnce(self, job_id, fields,
1802 prev_job_info, prev_log_serial):
1803 """Waits for changes on a job.
1806 raise NotImplementedError()
1808 def QueryJobs(self, job_ids, fields):
1809 """Returns the selected fields for the selected job IDs.
1811 @type job_ids: list of numbers
1812 @param job_ids: Job IDs
1813 @type fields: list of strings
1814 @param fields: Fields
1817 raise NotImplementedError()
1820 class JobPollReportCbBase:
1821 """Base class for L{GenericPollJob} reporting callbacks.
1825 """Initializes this class.
1829 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1830 """Handles a log message.
1833 raise NotImplementedError()
1835 def ReportNotChanged(self, job_id, status):
1836 """Called for if a job hasn't changed in a while.
1838 @type job_id: number
1839 @param job_id: Job ID
1840 @type status: string or None
1841 @param status: Job status if available
1844 raise NotImplementedError()
1847 class _LuxiJobPollCb(JobPollCbBase):
1848 def __init__(self, cl):
1849 """Initializes this class.
1852 JobPollCbBase.__init__(self)
1855 def WaitForJobChangeOnce(self, job_id, fields,
1856 prev_job_info, prev_log_serial):
1857 """Waits for changes on a job.
1860 return self.cl.WaitForJobChangeOnce(job_id, fields,
1861 prev_job_info, prev_log_serial)
1863 def QueryJobs(self, job_ids, fields):
1864 """Returns the selected fields for the selected job IDs.
1867 return self.cl.QueryJobs(job_ids, fields)
1870 class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1871 def __init__(self, feedback_fn):
1872 """Initializes this class.
1875 JobPollReportCbBase.__init__(self)
1877 self.feedback_fn = feedback_fn
1879 assert callable(feedback_fn)
1881 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1882 """Handles a log message.
1885 self.feedback_fn((timestamp, log_type, log_msg))
1887 def ReportNotChanged(self, job_id, status):
1888 """Called if a job hasn't changed in a while.
1894 class StdioJobPollReportCb(JobPollReportCbBase):
1896 """Initializes this class.
1899 JobPollReportCbBase.__init__(self)
1901 self.notified_queued = False
1902 self.notified_waitlock = False
1904 def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1905 """Handles a log message.
1908 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1909 FormatLogMessage(log_type, log_msg))
1911 def ReportNotChanged(self, job_id, status):
1912 """Called if a job hasn't changed in a while.
1918 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1919 ToStderr("Job %s is waiting in queue", job_id)
1920 self.notified_queued = True
1922 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1923 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1924 self.notified_waitlock = True
1927 def FormatLogMessage(log_type, log_msg):
1928 """Formats a job message according to its type.
1931 if log_type != constants.ELOG_MESSAGE:
1932 log_msg = str(log_msg)
1934 return utils.SafeEncode(log_msg)
1937 def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1938 """Function to poll for the result of a job.
1940 @type job_id: job identified
1941 @param job_id: the job to poll for results
1942 @type cl: luxi.Client
1943 @param cl: the luxi client to use for communicating with the master;
1944 if None, a new client will be created
1950 if reporter is None:
1952 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1954 reporter = StdioJobPollReportCb()
1956 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1958 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1961 def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1962 """Legacy function to submit an opcode.
1964 This is just a simple wrapper over the construction of the processor
1965 instance. It should be extended to better handle feedback and
1966 interaction functions.
1972 SetGenericOpcodeOpts([op], opts)
1974 job_id = SendJob([op], cl=cl)
1976 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1979 return op_results[0]
1982 def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1983 """Wrapper around SubmitOpCode or SendJob.
1985 This function will decide, based on the 'opts' parameter, whether to
1986 submit and wait for the result of the opcode (and return it), or
1987 whether to just send the job and print its identifier. It is used in
1988 order to simplify the implementation of the '--submit' option.
1990 It will also process the opcodes if we're sending the via SendJob
1991 (otherwise SubmitOpCode does it).
1994 if opts and opts.submit_only:
1996 SetGenericOpcodeOpts(job, opts)
1997 job_id = SendJob(job, cl=cl)
1998 raise JobSubmittedException(job_id)
2000 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2003 def SetGenericOpcodeOpts(opcode_list, options):
2004 """Processor for generic options.
2006 This function updates the given opcodes based on generic command
2007 line options (like debug, dry-run, etc.).
2009 @param opcode_list: list of opcodes
2010 @param options: command line options or None
2011 @return: None (in-place modification)
2016 for op in opcode_list:
2017 op.debug_level = options.debug
2018 if hasattr(options, "dry_run"):
2019 op.dry_run = options.dry_run
2020 if getattr(options, "priority", None) is not None:
2021 op.priority = _PRIONAME_TO_VALUE[options.priority]
2025 # TODO: Cache object?
2027 client = luxi.Client()
2028 except luxi.NoMasterError:
2029 ss = ssconf.SimpleStore()
2031 # Try to read ssconf file
2034 except errors.ConfigurationError:
2035 raise errors.OpPrereqError("Cluster not initialized or this machine is"
2036 " not part of a cluster")
2038 master, myself = ssconf.GetMasterAndMyself(ss=ss)
2039 if master != myself:
2040 raise errors.OpPrereqError("This is not the master node, please connect"
2041 " to node '%s' and rerun the command" %
2047 def FormatError(err):
2048 """Return a formatted error message for a given error.
2050 This function takes an exception instance and returns a tuple
2051 consisting of two values: first, the recommended exit code, and
2052 second, a string describing the error message (not
2053 newline-terminated).
2059 if isinstance(err, errors.ConfigurationError):
2060 txt = "Corrupt configuration file: %s" % msg
2062 obuf.write(txt + "\n")
2063 obuf.write("Aborting.")
2065 elif isinstance(err, errors.HooksAbort):
2066 obuf.write("Failure: hooks execution failed:\n")
2067 for node, script, out in err.args[0]:
2069 obuf.write(" node: %s, script: %s, output: %s\n" %
2070 (node, script, out))
2072 obuf.write(" node: %s, script: %s (no output)\n" %
2074 elif isinstance(err, errors.HooksFailure):
2075 obuf.write("Failure: hooks general failure: %s" % msg)
2076 elif isinstance(err, errors.ResolverError):
2077 this_host = netutils.Hostname.GetSysName()
2078 if err.args[0] == this_host:
2079 msg = "Failure: can't resolve my own hostname ('%s')"
2081 msg = "Failure: can't resolve hostname '%s'"
2082 obuf.write(msg % err.args[0])
2083 elif isinstance(err, errors.OpPrereqError):
2084 if len(err.args) == 2:
2085 obuf.write("Failure: prerequisites not met for this"
2086 " operation:\nerror type: %s, error details:\n%s" %
2087 (err.args[1], err.args[0]))
2089 obuf.write("Failure: prerequisites not met for this"
2090 " operation:\n%s" % msg)
2091 elif isinstance(err, errors.OpExecError):
2092 obuf.write("Failure: command execution error:\n%s" % msg)
2093 elif isinstance(err, errors.TagError):
2094 obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2095 elif isinstance(err, errors.JobQueueDrainError):
2096 obuf.write("Failure: the job queue is marked for drain and doesn't"
2097 " accept new requests\n")
2098 elif isinstance(err, errors.JobQueueFull):
2099 obuf.write("Failure: the job queue is full and doesn't accept new"
2100 " job submissions until old jobs are archived\n")
2101 elif isinstance(err, errors.TypeEnforcementError):
2102 obuf.write("Parameter Error: %s" % msg)
2103 elif isinstance(err, errors.ParameterError):
2104 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2105 elif isinstance(err, luxi.NoMasterError):
2106 obuf.write("Cannot communicate with the master daemon.\nIs it running"
2107 " and listening for connections?")
2108 elif isinstance(err, luxi.TimeoutError):
2109 obuf.write("Timeout while talking to the master daemon. Jobs might have"
2110 " been submitted and will continue to run even if the call"
2111 " timed out. Useful commands in this situation are \"gnt-job"
2112 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2114 elif isinstance(err, luxi.PermissionError):
2115 obuf.write("It seems you don't have permissions to connect to the"
2116 " master daemon.\nPlease retry as a different user.")
2117 elif isinstance(err, luxi.ProtocolError):
2118 obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2120 elif isinstance(err, errors.JobLost):
2121 obuf.write("Error checking job status: %s" % msg)
2122 elif isinstance(err, errors.QueryFilterParseError):
2123 obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2124 obuf.write("\n".join(err.GetDetails()))
2125 elif isinstance(err, errors.GenericError):
2126 obuf.write("Unhandled Ganeti error: %s" % msg)
2127 elif isinstance(err, JobSubmittedException):
2128 obuf.write("JobID: %s\n" % err.args[0])
2131 obuf.write("Unhandled exception: %s" % msg)
2132 return retcode, obuf.getvalue().rstrip("\n")
2135 def GenericMain(commands, override=None, aliases=None,
2136 env_override=frozenset()):
2137 """Generic main function for all the gnt-* commands.
2139 @param commands: a dictionary with a special structure, see the design doc
2140 for command line handling.
2141 @param override: if not None, we expect a dictionary with keys that will
2142 override command line options; this can be used to pass
2143 options from the scripts to generic functions
2144 @param aliases: dictionary with command aliases {'alias': 'target, ...}
2145 @param env_override: list of environment names which are allowed to submit
2146 default args for commands
2149 # save the program name and the entire command line for later logging
2151 binary = os.path.basename(sys.argv[0])
2153 binary = sys.argv[0]
2155 if len(sys.argv) >= 2:
2156 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2160 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2162 binary = "<unknown program>"
2163 cmdline = "<unknown>"
2169 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2170 except errors.ParameterError, err:
2171 result, err_msg = FormatError(err)
2175 if func is None: # parse error
2178 if override is not None:
2179 for key, val in override.iteritems():
2180 setattr(options, key, val)
2182 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug,
2183 stderr_logging=True)
2185 logging.info("Command line: %s", cmdline)
2188 result = func(options, args)
2189 except (errors.GenericError, luxi.ProtocolError,
2190 JobSubmittedException), err:
2191 result, err_msg = FormatError(err)
2192 logging.exception("Error during command processing")
2194 except KeyboardInterrupt:
2195 result = constants.EXIT_FAILURE
2196 ToStderr("Aborted. Note that if the operation created any jobs, they"
2197 " might have been submitted and"
2198 " will continue to run in the background.")
2199 except IOError, err:
2200 if err.errno == errno.EPIPE:
2201 # our terminal went away, we'll exit
2202 sys.exit(constants.EXIT_FAILURE)
2209 def ParseNicOption(optvalue):
2210 """Parses the value of the --net option(s).
2214 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2215 except (TypeError, ValueError), err:
2216 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2218 nics = [{}] * nic_max
2219 for nidx, ndict in optvalue:
2222 if not isinstance(ndict, dict):
2223 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2224 " got %s" % (nidx, ndict))
2226 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2233 def GenericInstanceCreate(mode, opts, args):
2234 """Add an instance to the cluster via either creation or import.
2236 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2237 @param opts: the command line options selected by the user
2239 @param args: should contain only one element, the new instance name
2241 @return: the desired exit code
2246 (pnode, snode) = SplitNodeOption(opts.node)
2251 hypervisor, hvparams = opts.hypervisor
2254 nics = ParseNicOption(opts.nics)
2258 elif mode == constants.INSTANCE_CREATE:
2259 # default of one nic, all auto
2265 if opts.disk_template == constants.DT_DISKLESS:
2266 if opts.disks or opts.sd_size is not None:
2267 raise errors.OpPrereqError("Diskless instance but disk"
2268 " information passed")
2271 if (not opts.disks and not opts.sd_size
2272 and mode == constants.INSTANCE_CREATE):
2273 raise errors.OpPrereqError("No disk information specified")
2274 if opts.disks and opts.sd_size is not None:
2275 raise errors.OpPrereqError("Please use either the '--disk' or"
2277 if opts.sd_size is not None:
2278 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2282 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2283 except ValueError, err:
2284 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2285 disks = [{}] * disk_max
2288 for didx, ddict in opts.disks:
2290 if not isinstance(ddict, dict):
2291 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2292 raise errors.OpPrereqError(msg)
2293 elif constants.IDISK_SIZE in ddict:
2294 if constants.IDISK_ADOPT in ddict:
2295 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2296 " (disk %d)" % didx)
2298 ddict[constants.IDISK_SIZE] = \
2299 utils.ParseUnit(ddict[constants.IDISK_SIZE])
2300 except ValueError, err:
2301 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2303 elif constants.IDISK_ADOPT in ddict:
2304 if mode == constants.INSTANCE_IMPORT:
2305 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2307 ddict[constants.IDISK_SIZE] = 0
2309 raise errors.OpPrereqError("Missing size or adoption source for"
2313 if opts.tags is not None:
2314 tags = opts.tags.split(",")
2318 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2319 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2321 if mode == constants.INSTANCE_CREATE:
2324 force_variant = opts.force_variant
2327 no_install = opts.no_install
2328 identify_defaults = False
2329 elif mode == constants.INSTANCE_IMPORT:
2332 force_variant = False
2333 src_node = opts.src_node
2334 src_path = opts.src_dir
2336 identify_defaults = opts.identify_defaults
2338 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2340 op = opcodes.OpInstanceCreate(instance_name=instance,
2342 disk_template=opts.disk_template,
2344 pnode=pnode, snode=snode,
2345 ip_check=opts.ip_check,
2346 name_check=opts.name_check,
2347 wait_for_sync=opts.wait_for_sync,
2348 file_storage_dir=opts.file_storage_dir,
2349 file_driver=opts.file_driver,
2350 iallocator=opts.iallocator,
2351 hypervisor=hypervisor,
2353 beparams=opts.beparams,
2354 osparams=opts.osparams,
2358 force_variant=force_variant,
2362 no_install=no_install,
2363 identify_defaults=identify_defaults,
2364 ignore_ipolicy=opts.ignore_ipolicy)
2366 SubmitOrSend(op, opts)
2370 class _RunWhileClusterStoppedHelper:
2371 """Helper class for L{RunWhileClusterStopped} to simplify state management
2374 def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2375 """Initializes this class.
2377 @type feedback_fn: callable
2378 @param feedback_fn: Feedback function
2379 @type cluster_name: string
2380 @param cluster_name: Cluster name
2381 @type master_node: string
2382 @param master_node Master node name
2383 @type online_nodes: list
2384 @param online_nodes: List of names of online nodes
2387 self.feedback_fn = feedback_fn
2388 self.cluster_name = cluster_name
2389 self.master_node = master_node
2390 self.online_nodes = online_nodes
2392 self.ssh = ssh.SshRunner(self.cluster_name)
2394 self.nonmaster_nodes = [name for name in online_nodes
2395 if name != master_node]
2397 assert self.master_node not in self.nonmaster_nodes
2399 def _RunCmd(self, node_name, cmd):
2400 """Runs a command on the local or a remote machine.
2402 @type node_name: string
2403 @param node_name: Machine name
2408 if node_name is None or node_name == self.master_node:
2409 # No need to use SSH
2410 result = utils.RunCmd(cmd)
2412 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2415 errmsg = ["Failed to run command %s" % result.cmd]
2417 errmsg.append("on node %s" % node_name)
2418 errmsg.append(": exitcode %s and error %s" %
2419 (result.exit_code, result.output))
2420 raise errors.OpExecError(" ".join(errmsg))
2422 def Call(self, fn, *args):
2423 """Call function while all daemons are stopped.
2426 @param fn: Function to be called
2429 # Pause watcher by acquiring an exclusive lock on watcher state file
2430 self.feedback_fn("Blocking watcher")
2431 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2433 # TODO: Currently, this just blocks. There's no timeout.
2434 # TODO: Should it be a shared lock?
2435 watcher_block.Exclusive(blocking=True)
2437 # Stop master daemons, so that no new jobs can come in and all running
2439 self.feedback_fn("Stopping master daemons")
2440 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2442 # Stop daemons on all nodes
2443 for node_name in self.online_nodes:
2444 self.feedback_fn("Stopping daemons on %s" % node_name)
2445 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2447 # All daemons are shut down now
2449 return fn(self, *args)
2450 except Exception, err:
2451 _, errmsg = FormatError(err)
2452 logging.exception("Caught exception")
2453 self.feedback_fn(errmsg)
2456 # Start cluster again, master node last
2457 for node_name in self.nonmaster_nodes + [self.master_node]:
2458 self.feedback_fn("Starting daemons on %s" % node_name)
2459 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2462 watcher_block.Close()
2465 def RunWhileClusterStopped(feedback_fn, fn, *args):
2466 """Calls a function while all cluster daemons are stopped.
2468 @type feedback_fn: callable
2469 @param feedback_fn: Feedback function
2471 @param fn: Function to be called when daemons are stopped
2474 feedback_fn("Gathering cluster information")
2476 # This ensures we're running on the master daemon
2479 (cluster_name, master_node) = \
2480 cl.QueryConfigValues(["cluster_name", "master_node"])
2482 online_nodes = GetOnlineNodes([], cl=cl)
2484 # Don't keep a reference to the client. The master daemon will go away.
2487 assert master_node in online_nodes
2489 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2490 online_nodes).Call(fn, *args)
2493 def GenerateTable(headers, fields, separator, data,
2494 numfields=None, unitfields=None,
2496 """Prints a table with headers and different fields.
2499 @param headers: dictionary mapping field names to headers for
2502 @param fields: the field names corresponding to each row in
2504 @param separator: the separator to be used; if this is None,
2505 the default 'smart' algorithm is used which computes optimal
2506 field width, otherwise just the separator is used between
2509 @param data: a list of lists, each sublist being one row to be output
2510 @type numfields: list
2511 @param numfields: a list with the fields that hold numeric
2512 values and thus should be right-aligned
2513 @type unitfields: list
2514 @param unitfields: a list with the fields that hold numeric
2515 values that should be formatted with the units field
2516 @type units: string or None
2517 @param units: the units we should use for formatting, or None for
2518 automatic choice (human-readable for non-separator usage, otherwise
2519 megabytes); this is a one-letter string
2528 if numfields is None:
2530 if unitfields is None:
2533 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
2534 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2537 for field in fields:
2538 if headers and field not in headers:
2539 # TODO: handle better unknown fields (either revert to old
2540 # style of raising exception, or deal more intelligently with
2542 headers[field] = field
2543 if separator is not None:
2544 format_fields.append("%s")
2545 elif numfields.Matches(field):
2546 format_fields.append("%*s")
2548 format_fields.append("%-*s")
2550 if separator is None:
2551 mlens = [0 for name in fields]
2552 format_str = " ".join(format_fields)
2554 format_str = separator.replace("%", "%%").join(format_fields)
2559 for idx, val in enumerate(row):
2560 if unitfields.Matches(fields[idx]):
2563 except (TypeError, ValueError):
2566 val = row[idx] = utils.FormatUnit(val, units)
2567 val = row[idx] = str(val)
2568 if separator is None:
2569 mlens[idx] = max(mlens[idx], len(val))
2574 for idx, name in enumerate(fields):
2576 if separator is None:
2577 mlens[idx] = max(mlens[idx], len(hdr))
2578 args.append(mlens[idx])
2580 result.append(format_str % tuple(args))
2582 if separator is None:
2583 assert len(mlens) == len(fields)
2585 if fields and not numfields.Matches(fields[-1]):
2591 line = ["-" for _ in fields]
2592 for idx in range(len(fields)):
2593 if separator is None:
2594 args.append(mlens[idx])
2595 args.append(line[idx])
2596 result.append(format_str % tuple(args))
2601 def _FormatBool(value):
2602 """Formats a boolean value as a string.
2610 #: Default formatting for query results; (callback, align right)
2611 _DEFAULT_FORMAT_QUERY = {
2612 constants.QFT_TEXT: (str, False),
2613 constants.QFT_BOOL: (_FormatBool, False),
2614 constants.QFT_NUMBER: (str, True),
2615 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2616 constants.QFT_OTHER: (str, False),
2617 constants.QFT_UNKNOWN: (str, False),
2621 def _GetColumnFormatter(fdef, override, unit):
2622 """Returns formatting function for a field.
2624 @type fdef: L{objects.QueryFieldDefinition}
2625 @type override: dict
2626 @param override: Dictionary for overriding field formatting functions,
2627 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2629 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2630 @rtype: tuple; (callable, bool)
2631 @return: Returns the function to format a value (takes one parameter) and a
2632 boolean for aligning the value on the right-hand side
2635 fmt = override.get(fdef.name, None)
2639 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2641 if fdef.kind == constants.QFT_UNIT:
2642 # Can't keep this information in the static dictionary
2643 return (lambda value: utils.FormatUnit(value, unit), True)
2645 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2649 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2652 class _QueryColumnFormatter:
2653 """Callable class for formatting fields of a query.
2656 def __init__(self, fn, status_fn, verbose):
2657 """Initializes this class.
2660 @param fn: Formatting function
2661 @type status_fn: callable
2662 @param status_fn: Function to report fields' status
2663 @type verbose: boolean
2664 @param verbose: whether to use verbose field descriptions or not
2668 self._status_fn = status_fn
2669 self._verbose = verbose
2671 def __call__(self, data):
2672 """Returns a field's string representation.
2675 (status, value) = data
2678 self._status_fn(status)
2680 if status == constants.RS_NORMAL:
2681 return self._fn(value)
2683 assert value is None, \
2684 "Found value %r for abnormal status %s" % (value, status)
2686 return FormatResultError(status, self._verbose)
2689 def FormatResultError(status, verbose):
2690 """Formats result status other than L{constants.RS_NORMAL}.
2692 @param status: The result status
2693 @type verbose: boolean
2694 @param verbose: Whether to return the verbose text
2695 @return: Text of result status
2698 assert status != constants.RS_NORMAL, \
2699 "FormatResultError called with status equal to constants.RS_NORMAL"
2701 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2703 raise NotImplementedError("Unknown status %s" % status)
2710 def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2711 header=False, verbose=False):
2712 """Formats data in L{objects.QueryResponse}.
2714 @type result: L{objects.QueryResponse}
2715 @param result: result of query operation
2717 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2718 see L{utils.text.FormatUnit}
2719 @type format_override: dict
2720 @param format_override: Dictionary for overriding field formatting functions,
2721 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2722 @type separator: string or None
2723 @param separator: String used to separate fields
2725 @param header: Whether to output header row
2726 @type verbose: boolean
2727 @param verbose: whether to use verbose field descriptions or not
2736 if format_override is None:
2737 format_override = {}
2739 stats = dict.fromkeys(constants.RS_ALL, 0)
2741 def _RecordStatus(status):
2746 for fdef in result.fields:
2747 assert fdef.title and fdef.name
2748 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2749 columns.append(TableColumn(fdef.title,
2750 _QueryColumnFormatter(fn, _RecordStatus,
2754 table = FormatTable(result.data, columns, header, separator)
2756 # Collect statistics
2757 assert len(stats) == len(constants.RS_ALL)
2758 assert compat.all(count >= 0 for count in stats.values())
2760 # Determine overall status. If there was no data, unknown fields must be
2761 # detected via the field definitions.
2762 if (stats[constants.RS_UNKNOWN] or
2763 (not result.data and _GetUnknownFields(result.fields))):
2765 elif compat.any(count > 0 for key, count in stats.items()
2766 if key != constants.RS_NORMAL):
2767 status = QR_INCOMPLETE
2771 return (status, table)
2774 def _GetUnknownFields(fdefs):
2775 """Returns list of unknown fields included in C{fdefs}.
2777 @type fdefs: list of L{objects.QueryFieldDefinition}
2780 return [fdef for fdef in fdefs
2781 if fdef.kind == constants.QFT_UNKNOWN]
2784 def _WarnUnknownFields(fdefs):
2785 """Prints a warning to stderr if a query included unknown fields.
2787 @type fdefs: list of L{objects.QueryFieldDefinition}
2790 unknown = _GetUnknownFields(fdefs)
2792 ToStderr("Warning: Queried for unknown fields %s",
2793 utils.CommaJoin(fdef.name for fdef in unknown))
2799 def GenericList(resource, fields, names, unit, separator, header, cl=None,
2800 format_override=None, verbose=False, force_filter=False):
2801 """Generic implementation for listing all items of a resource.
2803 @param resource: One of L{constants.QR_VIA_LUXI}
2804 @type fields: list of strings
2805 @param fields: List of fields to query for
2806 @type names: list of strings
2807 @param names: Names of items to query for
2808 @type unit: string or None
2809 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2810 None for automatic choice (human-readable for non-separator usage,
2811 otherwise megabytes); this is a one-letter string
2812 @type separator: string or None
2813 @param separator: String used to separate fields
2815 @param header: Whether to show header row
2816 @type force_filter: bool
2817 @param force_filter: Whether to always treat names as filter
2818 @type format_override: dict
2819 @param format_override: Dictionary for overriding field formatting functions,
2820 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2821 @type verbose: boolean
2822 @param verbose: whether to use verbose field descriptions or not
2828 qfilter = qlang.MakeFilter(names, force_filter)
2833 response = cl.Query(resource, fields, qfilter)
2835 found_unknown = _WarnUnknownFields(response.fields)
2837 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2839 format_override=format_override,
2845 assert ((found_unknown and status == QR_UNKNOWN) or
2846 (not found_unknown and status != QR_UNKNOWN))
2848 if status == QR_UNKNOWN:
2849 return constants.EXIT_UNKNOWN_FIELD
2851 # TODO: Should the list command fail if not all data could be collected?
2852 return constants.EXIT_SUCCESS
2855 def GenericListFields(resource, fields, separator, header, cl=None):
2856 """Generic implementation for listing fields for a resource.
2858 @param resource: One of L{constants.QR_VIA_LUXI}
2859 @type fields: list of strings
2860 @param fields: List of fields to query for
2861 @type separator: string or None
2862 @param separator: String used to separate fields
2864 @param header: Whether to show header row
2873 response = cl.QueryFields(resource, fields)
2875 found_unknown = _WarnUnknownFields(response.fields)
2878 TableColumn("Name", str, False),
2879 TableColumn("Title", str, False),
2880 TableColumn("Description", str, False),
2883 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2885 for line in FormatTable(rows, columns, header, separator):
2889 return constants.EXIT_UNKNOWN_FIELD
2891 return constants.EXIT_SUCCESS
2895 """Describes a column for L{FormatTable}.
2898 def __init__(self, title, fn, align_right):
2899 """Initializes this class.
2902 @param title: Column title
2904 @param fn: Formatting function
2905 @type align_right: bool
2906 @param align_right: Whether to align values on the right-hand side
2911 self.align_right = align_right
2914 def _GetColFormatString(width, align_right):
2915 """Returns the format string for a field.
2923 return "%%%s%ss" % (sign, width)
2926 def FormatTable(rows, columns, header, separator):
2927 """Formats data as a table.
2929 @type rows: list of lists
2930 @param rows: Row data, one list per row
2931 @type columns: list of L{TableColumn}
2932 @param columns: Column descriptions
2934 @param header: Whether to show header row
2935 @type separator: string or None
2936 @param separator: String used to separate columns
2940 data = [[col.title for col in columns]]
2941 colwidth = [len(col.title) for col in columns]
2944 colwidth = [0 for _ in columns]
2948 assert len(row) == len(columns)
2950 formatted = [col.format(value) for value, col in zip(row, columns)]
2952 if separator is None:
2953 # Update column widths
2954 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2955 # Modifying a list's items while iterating is fine
2956 colwidth[idx] = max(oldwidth, len(value))
2958 data.append(formatted)
2960 if separator is not None:
2961 # Return early if a separator is used
2962 return [separator.join(row) for row in data]
2964 if columns and not columns[-1].align_right:
2965 # Avoid unnecessary spaces at end of line
2968 # Build format string
2969 fmt = " ".join([_GetColFormatString(width, col.align_right)
2970 for col, width in zip(columns, colwidth)])
2972 return [fmt % tuple(row) for row in data]
2975 def FormatTimestamp(ts):
2976 """Formats a given timestamp.
2979 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2982 @return: a string with the formatted timestamp
2985 if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2988 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2991 def ParseTimespec(value):
2992 """Parse a time specification.
2994 The following suffixed will be recognized:
3002 Without any suffix, the value will be taken to be in seconds.
3007 raise errors.OpPrereqError("Empty time specification passed")
3015 if value[-1] not in suffix_map:
3018 except (TypeError, ValueError):
3019 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3021 multiplier = suffix_map[value[-1]]
3023 if not value: # no data left after stripping the suffix
3024 raise errors.OpPrereqError("Invalid time specification (only"
3027 value = int(value) * multiplier
3028 except (TypeError, ValueError):
3029 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
3033 def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3034 filter_master=False, nodegroup=None):
3035 """Returns the names of online nodes.
3037 This function will also log a warning on stderr with the names of
3040 @param nodes: if not empty, use only this subset of nodes (minus the
3042 @param cl: if not None, luxi client to use
3043 @type nowarn: boolean
3044 @param nowarn: by default, this function will output a note with the
3045 offline nodes that are skipped; if this parameter is True the
3046 note is not displayed
3047 @type secondary_ips: boolean
3048 @param secondary_ips: if True, return the secondary IPs instead of the
3049 names, useful for doing network traffic over the replication interface
3051 @type filter_master: boolean
3052 @param filter_master: if True, do not return the master node in the list
3053 (useful in coordination with secondary_ips where we cannot check our
3054 node name against the list)
3055 @type nodegroup: string
3056 @param nodegroup: If set, only return nodes in this node group
3065 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3067 if nodegroup is not None:
3068 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3069 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3072 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3075 if len(qfilter) > 1:
3076 final_filter = [qlang.OP_AND] + qfilter
3078 assert len(qfilter) == 1
3079 final_filter = qfilter[0]
3083 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3085 def _IsOffline(row):
3086 (_, (_, offline), _) = row
3090 ((_, name), _, _) = row
3094 (_, _, (_, sip)) = row
3097 (offline, online) = compat.partition(result.data, _IsOffline)
3099 if offline and not nowarn:
3100 ToStderr("Note: skipping offline node(s): %s" %
3101 utils.CommaJoin(map(_GetName, offline)))
3108 return map(fn, online)
3111 def _ToStream(stream, txt, *args):
3112 """Write a message to a stream, bypassing the logging system
3114 @type stream: file object
3115 @param stream: the file to which we should write
3117 @param txt: the message
3123 stream.write(txt % args)
3128 except IOError, err:
3129 if err.errno == errno.EPIPE:
3130 # our terminal went away, we'll exit
3131 sys.exit(constants.EXIT_FAILURE)
3136 def ToStdout(txt, *args):
3137 """Write a message to stdout only, bypassing the logging system
3139 This is just a wrapper over _ToStream.
3142 @param txt: the message
3145 _ToStream(sys.stdout, txt, *args)
3148 def ToStderr(txt, *args):
3149 """Write a message to stderr only, bypassing the logging system
3151 This is just a wrapper over _ToStream.
3154 @param txt: the message
3157 _ToStream(sys.stderr, txt, *args)
3160 class JobExecutor(object):
3161 """Class which manages the submission and execution of multiple jobs.
3163 Note that instances of this class should not be reused between
3167 def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3172 self.verbose = verbose
3175 self.feedback_fn = feedback_fn
3176 self._counter = itertools.count()
3179 def _IfName(name, fmt):
3180 """Helper function for formatting name.
3188 def QueueJob(self, name, *ops):
3189 """Record a job for later submit.
3192 @param name: a description of the job, will be used in WaitJobSet
3195 SetGenericOpcodeOpts(ops, self.opts)
3196 self.queue.append((self._counter.next(), name, ops))
3198 def AddJobId(self, name, status, job_id):
3199 """Adds a job ID to the internal queue.
3202 self.jobs.append((self._counter.next(), status, job_id, name))
3204 def SubmitPending(self, each=False):
3205 """Submit all pending jobs.
3210 for (_, _, ops) in self.queue:
3211 # SubmitJob will remove the success status, but raise an exception if
3212 # the submission fails, so we'll notice that anyway.
3213 results.append([True, self.cl.SubmitJob(ops)[0]])
3215 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3216 for ((status, data), (idx, name, _)) in zip(results, self.queue):
3217 self.jobs.append((idx, status, data, name))
3219 def _ChooseJob(self):
3220 """Choose a non-waiting/queued job to poll next.
3223 assert self.jobs, "_ChooseJob called with empty job list"
3225 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3229 for job_data, status in zip(self.jobs, result):
3230 if (isinstance(status, list) and status and
3231 status[0] in (constants.JOB_STATUS_QUEUED,
3232 constants.JOB_STATUS_WAITING,
3233 constants.JOB_STATUS_CANCELING)):
3234 # job is still present and waiting
3236 # good candidate found (either running job or lost job)
3237 self.jobs.remove(job_data)
3241 return self.jobs.pop(0)
3243 def GetResults(self):
3244 """Wait for and return the results of all jobs.
3247 @return: list of tuples (success, job results), in the same order
3248 as the submitted jobs; if a job has failed, instead of the result
3249 there will be the error message
3253 self.SubmitPending()
3256 ok_jobs = [row[2] for row in self.jobs if row[1]]
3258 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3260 # first, remove any non-submitted jobs
3261 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3262 for idx, _, jid, name in failures:
3263 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3264 results.append((idx, False, jid))
3267 (idx, _, jid, name) = self._ChooseJob()
3268 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3270 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3272 except errors.JobLost, err:
3273 _, job_result = FormatError(err)
3274 ToStderr("Job %s%s has been archived, cannot check its result",
3275 jid, self._IfName(name, " for %s"))
3277 except (errors.GenericError, luxi.ProtocolError), err:
3278 _, job_result = FormatError(err)
3280 # the error message will always be shown, verbose or not
3281 ToStderr("Job %s%s has failed: %s",
3282 jid, self._IfName(name, " for %s"), job_result)
3284 results.append((idx, success, job_result))
3286 # sort based on the index, then drop it
3288 results = [i[1:] for i in results]
3292 def WaitOrShow(self, wait):
3293 """Wait for job results or only print the job IDs.
3296 @param wait: whether to wait or not
3300 return self.GetResults()
3303 self.SubmitPending()
3304 for _, status, result, name in self.jobs:
3306 ToStdout("%s: %s", result, name)
3308 ToStderr("Failure for %s: %s", name, result)
3309 return [row[1:3] for row in self.jobs]
3312 def FormatParameterDict(buf, param_dict, actual, level=1):
3313 """Formats a parameter dictionary.
3315 @type buf: L{StringIO}
3316 @param buf: the buffer into which to write
3317 @type param_dict: dict
3318 @param param_dict: the own parameters
3320 @param actual: the current parameter set (including defaults)
3321 @param level: Level of indent
3324 indent = " " * level
3325 for key in sorted(actual):
3326 val = param_dict.get(key, "default (%s)" % actual[key])
3327 buf.write("%s- %s: %s\n" % (indent, key, val))
3330 def ConfirmOperation(names, list_type, text, extra=""):
3331 """Ask the user to confirm an operation on a list of list_type.
3333 This function is used to request confirmation for doing an operation
3334 on a given list of list_type.
3337 @param names: the list of names that we display when
3338 we ask for confirmation
3339 @type list_type: str
3340 @param list_type: Human readable name for elements in the list (e.g. nodes)
3342 @param text: the operation that the user should confirm
3344 @return: True or False depending on user's confirmation.
3348 msg = ("The %s will operate on %d %s.\n%s"
3349 "Do you want to continue?" % (text, count, list_type, extra))
3350 affected = (("\nAffected %s:\n" % list_type) +
3351 "\n".join([" %s" % name for name in names]))
3353 choices = [("y", True, "Yes, execute the %s" % text),
3354 ("n", False, "No, abort the %s" % text)]
3357 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3360 question = msg + affected
3362 choice = AskUser(question, choices)
3365 choice = AskUser(msg + affected, choices)